6a764f03ecf3beac475c0125b0f83ab328dd301b
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.nsr import (
38 get_deployed_kdu,
39 get_deployed_vca,
40 get_deployed_vca_list,
41 get_nsd,
42 )
43 from osm_lcm.data_utils.vca import (
44 DeployedComponent,
45 DeployedK8sResource,
46 DeployedVCA,
47 EELevel,
48 Relation,
49 EERelation,
50 safe_get_ee_relation,
51 )
52 from osm_lcm.ng_ro import NgRoClient, NgRoException
53 from osm_lcm.lcm_utils import (
54 LcmException,
55 LcmExceptionNoMgmtIP,
56 LcmBase,
57 deep_get,
58 get_iterable,
59 populate_dict,
60 check_juju_bundle_existence,
61 get_charm_artifact_path,
62 )
63 from osm_lcm.data_utils.nsd import (
64 get_ns_configuration_relation_list,
65 get_vnf_profile,
66 get_vnf_profiles,
67 )
68 from osm_lcm.data_utils.vnfd import (
69 get_kdu,
70 get_kdu_services,
71 get_relation_list,
72 get_vdu_list,
73 get_vdu_profile,
74 get_ee_sorted_initial_config_primitive_list,
75 get_ee_sorted_terminate_config_primitive_list,
76 get_kdu_list,
77 get_virtual_link_profiles,
78 get_vdu,
79 get_configuration,
80 get_vdu_index,
81 get_scaling_aspect,
82 get_number_of_instances,
83 get_juju_ee_ref,
84 get_kdu_resource_profile,
85 find_software_version,
86 )
87 from osm_lcm.data_utils.list_utils import find_in_list
88 from osm_lcm.data_utils.vnfr import (
89 get_osm_params,
90 get_vdur_index,
91 get_kdur,
92 get_volumes_from_instantiation_params,
93 )
94 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
95 from osm_lcm.data_utils.database.vim_account import VimAccountDB
96 from n2vc.definitions import RelationEndpoint
97 from n2vc.k8s_helm_conn import K8sHelmConnector
98 from n2vc.k8s_helm3_conn import K8sHelm3Connector
99 from n2vc.k8s_juju_conn import K8sJujuConnector
100
101 from osm_common.dbbase import DbException
102 from osm_common.fsbase import FsException
103
104 from osm_lcm.data_utils.database.database import Database
105 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
106
107 from n2vc.n2vc_juju_conn import N2VCJujuConnector
108 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
109
110 from osm_lcm.lcm_helm_conn import LCMHelmConn
111 from osm_lcm.osm_config import OsmConfigBuilder
112 from osm_lcm.prometheus import parse_job
113
114 from copy import copy, deepcopy
115 from time import time
116 from uuid import uuid4
117
118 from random import randint
119
120 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
121
122
123 class NsLcm(LcmBase):
124 timeout_vca_on_error = (
125 5 * 60
126 ) # Time for charm from first time at blocked,error status to mark as failed
127 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
128 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
129 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
130 timeout_charm_delete = 10 * 60
131 timeout_primitive = 30 * 60 # timeout for primitive execution
132 timeout_ns_update = 30 * 60 # timeout for ns update
133 timeout_progress_primitive = (
134 10 * 60
135 ) # timeout for some progress in a primitive execution
136 timeout_migrate = 1800 # default global timeout for migrating vnfs
137 timeout_operate = 1800 # default global timeout for migrating vnfs
138 timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
139 SUBOPERATION_STATUS_NOT_FOUND = -1
140 SUBOPERATION_STATUS_NEW = -2
141 SUBOPERATION_STATUS_SKIP = -3
142 task_name_deploy_vca = "Deploying VCA"
143
144 def __init__(self, msg, lcm_tasks, config, loop):
145 """
146 Init, Connect to database, filesystem storage, and messaging
147 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
148 :return: None
149 """
150 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
151
152 self.db = Database().instance.db
153 self.fs = Filesystem().instance.fs
154 self.loop = loop
155 self.lcm_tasks = lcm_tasks
156 self.timeout = config["timeout"]
157 self.ro_config = config["ro_config"]
158 self.ng_ro = config["ro_config"].get("ng")
159 self.vca_config = config["VCA"].copy()
160
161 # create N2VC connector
162 self.n2vc = N2VCJujuConnector(
163 log=self.logger,
164 loop=self.loop,
165 on_update_db=self._on_update_n2vc_db,
166 fs=self.fs,
167 db=self.db,
168 )
169
170 self.conn_helm_ee = LCMHelmConn(
171 log=self.logger,
172 loop=self.loop,
173 vca_config=self.vca_config,
174 on_update_db=self._on_update_n2vc_db,
175 )
176
177 self.k8sclusterhelm2 = K8sHelmConnector(
178 kubectl_command=self.vca_config.get("kubectlpath"),
179 helm_command=self.vca_config.get("helmpath"),
180 log=self.logger,
181 on_update_db=None,
182 fs=self.fs,
183 db=self.db,
184 )
185
186 self.k8sclusterhelm3 = K8sHelm3Connector(
187 kubectl_command=self.vca_config.get("kubectlpath"),
188 helm_command=self.vca_config.get("helm3path"),
189 fs=self.fs,
190 log=self.logger,
191 db=self.db,
192 on_update_db=None,
193 )
194
195 self.k8sclusterjuju = K8sJujuConnector(
196 kubectl_command=self.vca_config.get("kubectlpath"),
197 juju_command=self.vca_config.get("jujupath"),
198 log=self.logger,
199 loop=self.loop,
200 on_update_db=self._on_update_k8s_db,
201 fs=self.fs,
202 db=self.db,
203 )
204
205 self.k8scluster_map = {
206 "helm-chart": self.k8sclusterhelm2,
207 "helm-chart-v3": self.k8sclusterhelm3,
208 "chart": self.k8sclusterhelm3,
209 "juju-bundle": self.k8sclusterjuju,
210 "juju": self.k8sclusterjuju,
211 }
212
213 self.vca_map = {
214 "lxc_proxy_charm": self.n2vc,
215 "native_charm": self.n2vc,
216 "k8s_proxy_charm": self.n2vc,
217 "helm": self.conn_helm_ee,
218 "helm-v3": self.conn_helm_ee,
219 }
220
221 # create RO client
222 self.RO = NgRoClient(self.loop, **self.ro_config)
223
224 self.op_status_map = {
225 "instantiation": self.RO.status,
226 "termination": self.RO.status,
227 "migrate": self.RO.status,
228 "healing": self.RO.recreate_status,
229 "verticalscale": self.RO.status,
230 "start_stop_rebuild": self.RO.status,
231 }
232
233 @staticmethod
234 def increment_ip_mac(ip_mac, vm_index=1):
235 if not isinstance(ip_mac, str):
236 return ip_mac
237 try:
238 # try with ipv4 look for last dot
239 i = ip_mac.rfind(".")
240 if i > 0:
241 i += 1
242 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
243 # try with ipv6 or mac look for last colon. Operate in hex
244 i = ip_mac.rfind(":")
245 if i > 0:
246 i += 1
247 # format in hex, len can be 2 for mac or 4 for ipv6
248 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
249 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
250 )
251 except Exception:
252 pass
253 return None
254
255 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
256
257 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
258
259 try:
260 # TODO filter RO descriptor fields...
261
262 # write to database
263 db_dict = dict()
264 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
265 db_dict["deploymentStatus"] = ro_descriptor
266 self.update_db_2("nsrs", nsrs_id, db_dict)
267
268 except Exception as e:
269 self.logger.warn(
270 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
271 )
272
273 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
274
275 # remove last dot from path (if exists)
276 if path.endswith("."):
277 path = path[:-1]
278
279 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
280 # .format(table, filter, path, updated_data))
281 try:
282
283 nsr_id = filter.get("_id")
284
285 # read ns record from database
286 nsr = self.db.get_one(table="nsrs", q_filter=filter)
287 current_ns_status = nsr.get("nsState")
288
289 # get vca status for NS
290 status_dict = await self.n2vc.get_status(
291 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
292 )
293
294 # vcaStatus
295 db_dict = dict()
296 db_dict["vcaStatus"] = status_dict
297
298 # update configurationStatus for this VCA
299 try:
300 vca_index = int(path[path.rfind(".") + 1 :])
301
302 vca_list = deep_get(
303 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
304 )
305 vca_status = vca_list[vca_index].get("status")
306
307 configuration_status_list = nsr.get("configurationStatus")
308 config_status = configuration_status_list[vca_index].get("status")
309
310 if config_status == "BROKEN" and vca_status != "failed":
311 db_dict["configurationStatus"][vca_index] = "READY"
312 elif config_status != "BROKEN" and vca_status == "failed":
313 db_dict["configurationStatus"][vca_index] = "BROKEN"
314 except Exception as e:
315 # not update configurationStatus
316 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
317
318 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
319 # if nsState = 'DEGRADED' check if all is OK
320 is_degraded = False
321 if current_ns_status in ("READY", "DEGRADED"):
322 error_description = ""
323 # check machines
324 if status_dict.get("machines"):
325 for machine_id in status_dict.get("machines"):
326 machine = status_dict.get("machines").get(machine_id)
327 # check machine agent-status
328 if machine.get("agent-status"):
329 s = machine.get("agent-status").get("status")
330 if s != "started":
331 is_degraded = True
332 error_description += (
333 "machine {} agent-status={} ; ".format(
334 machine_id, s
335 )
336 )
337 # check machine instance status
338 if machine.get("instance-status"):
339 s = machine.get("instance-status").get("status")
340 if s != "running":
341 is_degraded = True
342 error_description += (
343 "machine {} instance-status={} ; ".format(
344 machine_id, s
345 )
346 )
347 # check applications
348 if status_dict.get("applications"):
349 for app_id in status_dict.get("applications"):
350 app = status_dict.get("applications").get(app_id)
351 # check application status
352 if app.get("status"):
353 s = app.get("status").get("status")
354 if s != "active":
355 is_degraded = True
356 error_description += (
357 "application {} status={} ; ".format(app_id, s)
358 )
359
360 if error_description:
361 db_dict["errorDescription"] = error_description
362 if current_ns_status == "READY" and is_degraded:
363 db_dict["nsState"] = "DEGRADED"
364 if current_ns_status == "DEGRADED" and not is_degraded:
365 db_dict["nsState"] = "READY"
366
367 # write to database
368 self.update_db_2("nsrs", nsr_id, db_dict)
369
370 except (asyncio.CancelledError, asyncio.TimeoutError):
371 raise
372 except Exception as e:
373 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
374
375 async def _on_update_k8s_db(
376 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
377 ):
378 """
379 Updating vca status in NSR record
380 :param cluster_uuid: UUID of a k8s cluster
381 :param kdu_instance: The unique name of the KDU instance
382 :param filter: To get nsr_id
383 :cluster_type: The cluster type (juju, k8s)
384 :return: none
385 """
386
387 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
388 # .format(cluster_uuid, kdu_instance, filter))
389
390 nsr_id = filter.get("_id")
391 try:
392 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
393 cluster_uuid=cluster_uuid,
394 kdu_instance=kdu_instance,
395 yaml_format=False,
396 complete_status=True,
397 vca_id=vca_id,
398 )
399
400 # vcaStatus
401 db_dict = dict()
402 db_dict["vcaStatus"] = {nsr_id: vca_status}
403
404 self.logger.debug(
405 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
406 )
407
408 # write to database
409 self.update_db_2("nsrs", nsr_id, db_dict)
410 except (asyncio.CancelledError, asyncio.TimeoutError):
411 raise
412 except Exception as e:
413 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
414
415 @staticmethod
416 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
417 try:
418 env = Environment(
419 undefined=StrictUndefined,
420 autoescape=select_autoescape(default_for_string=True, default=True),
421 )
422 template = env.from_string(cloud_init_text)
423 return template.render(additional_params or {})
424 except UndefinedError as e:
425 raise LcmException(
426 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
427 "file, must be provided in the instantiation parameters inside the "
428 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
429 )
430 except (TemplateError, TemplateNotFound) as e:
431 raise LcmException(
432 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
433 vnfd_id, vdu_id, e
434 )
435 )
436
437 def _get_vdu_cloud_init_content(self, vdu, vnfd):
438 cloud_init_content = cloud_init_file = None
439 try:
440 if vdu.get("cloud-init-file"):
441 base_folder = vnfd["_admin"]["storage"]
442 if base_folder["pkg-dir"]:
443 cloud_init_file = "{}/{}/cloud_init/{}".format(
444 base_folder["folder"],
445 base_folder["pkg-dir"],
446 vdu["cloud-init-file"],
447 )
448 else:
449 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
450 base_folder["folder"],
451 vdu["cloud-init-file"],
452 )
453 with self.fs.file_open(cloud_init_file, "r") as ci_file:
454 cloud_init_content = ci_file.read()
455 elif vdu.get("cloud-init"):
456 cloud_init_content = vdu["cloud-init"]
457
458 return cloud_init_content
459 except FsException as e:
460 raise LcmException(
461 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
462 vnfd["id"], vdu["id"], cloud_init_file, e
463 )
464 )
465
466 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
467 vdur = next(
468 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
469 )
470 additional_params = vdur.get("additionalParams")
471 return parse_yaml_strings(additional_params)
472
473 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
474 """
475 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
476 :param vnfd: input vnfd
477 :param new_id: overrides vnf id if provided
478 :param additionalParams: Instantiation params for VNFs provided
479 :param nsrId: Id of the NSR
480 :return: copy of vnfd
481 """
482 vnfd_RO = deepcopy(vnfd)
483 # remove unused by RO configuration, monitoring, scaling and internal keys
484 vnfd_RO.pop("_id", None)
485 vnfd_RO.pop("_admin", None)
486 vnfd_RO.pop("monitoring-param", None)
487 vnfd_RO.pop("scaling-group-descriptor", None)
488 vnfd_RO.pop("kdu", None)
489 vnfd_RO.pop("k8s-cluster", None)
490 if new_id:
491 vnfd_RO["id"] = new_id
492
493 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
494 for vdu in get_iterable(vnfd_RO, "vdu"):
495 vdu.pop("cloud-init-file", None)
496 vdu.pop("cloud-init", None)
497 return vnfd_RO
498
499 @staticmethod
500 def ip_profile_2_RO(ip_profile):
501 RO_ip_profile = deepcopy(ip_profile)
502 if "dns-server" in RO_ip_profile:
503 if isinstance(RO_ip_profile["dns-server"], list):
504 RO_ip_profile["dns-address"] = []
505 for ds in RO_ip_profile.pop("dns-server"):
506 RO_ip_profile["dns-address"].append(ds["address"])
507 else:
508 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
509 if RO_ip_profile.get("ip-version") == "ipv4":
510 RO_ip_profile["ip-version"] = "IPv4"
511 if RO_ip_profile.get("ip-version") == "ipv6":
512 RO_ip_profile["ip-version"] = "IPv6"
513 if "dhcp-params" in RO_ip_profile:
514 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
515 return RO_ip_profile
516
517 def _get_ro_vim_id_for_vim_account(self, vim_account):
518 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
519 if db_vim["_admin"]["operationalState"] != "ENABLED":
520 raise LcmException(
521 "VIM={} is not available. operationalState={}".format(
522 vim_account, db_vim["_admin"]["operationalState"]
523 )
524 )
525 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
526 return RO_vim_id
527
528 def get_ro_wim_id_for_wim_account(self, wim_account):
529 if isinstance(wim_account, str):
530 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
531 if db_wim["_admin"]["operationalState"] != "ENABLED":
532 raise LcmException(
533 "WIM={} is not available. operationalState={}".format(
534 wim_account, db_wim["_admin"]["operationalState"]
535 )
536 )
537 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
538 return RO_wim_id
539 else:
540 return wim_account
541
542 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
543
544 db_vdu_push_list = []
545 template_vdur = []
546 db_update = {"_admin.modified": time()}
547 if vdu_create:
548 for vdu_id, vdu_count in vdu_create.items():
549 vdur = next(
550 (
551 vdur
552 for vdur in reversed(db_vnfr["vdur"])
553 if vdur["vdu-id-ref"] == vdu_id
554 ),
555 None,
556 )
557 if not vdur:
558 # Read the template saved in the db:
559 self.logger.debug(
560 "No vdur in the database. Using the vdur-template to scale"
561 )
562 vdur_template = db_vnfr.get("vdur-template")
563 if not vdur_template:
564 raise LcmException(
565 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
566 vdu_id
567 )
568 )
569 vdur = vdur_template[0]
570 # Delete a template from the database after using it
571 self.db.set_one(
572 "vnfrs",
573 {"_id": db_vnfr["_id"]},
574 None,
575 pull={"vdur-template": {"_id": vdur["_id"]}},
576 )
577 for count in range(vdu_count):
578 vdur_copy = deepcopy(vdur)
579 vdur_copy["status"] = "BUILD"
580 vdur_copy["status-detailed"] = None
581 vdur_copy["ip-address"] = None
582 vdur_copy["_id"] = str(uuid4())
583 vdur_copy["count-index"] += count + 1
584 vdur_copy["id"] = "{}-{}".format(
585 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
586 )
587 vdur_copy.pop("vim_info", None)
588 for iface in vdur_copy["interfaces"]:
589 if iface.get("fixed-ip"):
590 iface["ip-address"] = self.increment_ip_mac(
591 iface["ip-address"], count + 1
592 )
593 else:
594 iface.pop("ip-address", None)
595 if iface.get("fixed-mac"):
596 iface["mac-address"] = self.increment_ip_mac(
597 iface["mac-address"], count + 1
598 )
599 else:
600 iface.pop("mac-address", None)
601 if db_vnfr["vdur"]:
602 iface.pop(
603 "mgmt_vnf", None
604 ) # only first vdu can be managment of vnf
605 db_vdu_push_list.append(vdur_copy)
606 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
607 if vdu_delete:
608 if len(db_vnfr["vdur"]) == 1:
609 # The scale will move to 0 instances
610 self.logger.debug(
611 "Scaling to 0 !, creating the template with the last vdur"
612 )
613 template_vdur = [db_vnfr["vdur"][0]]
614 for vdu_id, vdu_count in vdu_delete.items():
615 if mark_delete:
616 indexes_to_delete = [
617 iv[0]
618 for iv in enumerate(db_vnfr["vdur"])
619 if iv[1]["vdu-id-ref"] == vdu_id
620 ]
621 db_update.update(
622 {
623 "vdur.{}.status".format(i): "DELETING"
624 for i in indexes_to_delete[-vdu_count:]
625 }
626 )
627 else:
628 # it must be deleted one by one because common.db does not allow otherwise
629 vdus_to_delete = [
630 v
631 for v in reversed(db_vnfr["vdur"])
632 if v["vdu-id-ref"] == vdu_id
633 ]
634 for vdu in vdus_to_delete[:vdu_count]:
635 self.db.set_one(
636 "vnfrs",
637 {"_id": db_vnfr["_id"]},
638 None,
639 pull={"vdur": {"_id": vdu["_id"]}},
640 )
641 db_push = {}
642 if db_vdu_push_list:
643 db_push["vdur"] = db_vdu_push_list
644 if template_vdur:
645 db_push["vdur-template"] = template_vdur
646 if not db_push:
647 db_push = None
648 db_vnfr["vdur-template"] = template_vdur
649 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
650 # modify passed dictionary db_vnfr
651 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
652 db_vnfr["vdur"] = db_vnfr_["vdur"]
653
654 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
655 """
656 Updates database nsr with the RO info for the created vld
657 :param ns_update_nsr: dictionary to be filled with the updated info
658 :param db_nsr: content of db_nsr. This is also modified
659 :param nsr_desc_RO: nsr descriptor from RO
660 :return: Nothing, LcmException is raised on errors
661 """
662
663 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
664 for net_RO in get_iterable(nsr_desc_RO, "nets"):
665 if vld["id"] != net_RO.get("ns_net_osm_id"):
666 continue
667 vld["vim-id"] = net_RO.get("vim_net_id")
668 vld["name"] = net_RO.get("vim_name")
669 vld["status"] = net_RO.get("status")
670 vld["status-detailed"] = net_RO.get("error_msg")
671 ns_update_nsr["vld.{}".format(vld_index)] = vld
672 break
673 else:
674 raise LcmException(
675 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
676 )
677
678 def set_vnfr_at_error(self, db_vnfrs, error_text):
679 try:
680 for db_vnfr in db_vnfrs.values():
681 vnfr_update = {"status": "ERROR"}
682 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
683 if "status" not in vdur:
684 vdur["status"] = "ERROR"
685 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
686 if error_text:
687 vdur["status-detailed"] = str(error_text)
688 vnfr_update[
689 "vdur.{}.status-detailed".format(vdu_index)
690 ] = "ERROR"
691 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
692 except DbException as e:
693 self.logger.error("Cannot update vnf. {}".format(e))
694
695 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
696 """
697 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
698 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
699 :param nsr_desc_RO: nsr descriptor from RO
700 :return: Nothing, LcmException is raised on errors
701 """
702 for vnf_index, db_vnfr in db_vnfrs.items():
703 for vnf_RO in nsr_desc_RO["vnfs"]:
704 if vnf_RO["member_vnf_index"] != vnf_index:
705 continue
706 vnfr_update = {}
707 if vnf_RO.get("ip_address"):
708 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
709 "ip_address"
710 ].split(";")[0]
711 elif not db_vnfr.get("ip-address"):
712 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
713 raise LcmExceptionNoMgmtIP(
714 "ns member_vnf_index '{}' has no IP address".format(
715 vnf_index
716 )
717 )
718
719 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
720 vdur_RO_count_index = 0
721 if vdur.get("pdu-type"):
722 continue
723 for vdur_RO in get_iterable(vnf_RO, "vms"):
724 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
725 continue
726 if vdur["count-index"] != vdur_RO_count_index:
727 vdur_RO_count_index += 1
728 continue
729 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
730 if vdur_RO.get("ip_address"):
731 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
732 else:
733 vdur["ip-address"] = None
734 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
735 vdur["name"] = vdur_RO.get("vim_name")
736 vdur["status"] = vdur_RO.get("status")
737 vdur["status-detailed"] = vdur_RO.get("error_msg")
738 for ifacer in get_iterable(vdur, "interfaces"):
739 for interface_RO in get_iterable(vdur_RO, "interfaces"):
740 if ifacer["name"] == interface_RO.get("internal_name"):
741 ifacer["ip-address"] = interface_RO.get(
742 "ip_address"
743 )
744 ifacer["mac-address"] = interface_RO.get(
745 "mac_address"
746 )
747 break
748 else:
749 raise LcmException(
750 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
751 "from VIM info".format(
752 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
753 )
754 )
755 vnfr_update["vdur.{}".format(vdu_index)] = vdur
756 break
757 else:
758 raise LcmException(
759 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
760 "VIM info".format(
761 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
762 )
763 )
764
765 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
766 for net_RO in get_iterable(nsr_desc_RO, "nets"):
767 if vld["id"] != net_RO.get("vnf_net_osm_id"):
768 continue
769 vld["vim-id"] = net_RO.get("vim_net_id")
770 vld["name"] = net_RO.get("vim_name")
771 vld["status"] = net_RO.get("status")
772 vld["status-detailed"] = net_RO.get("error_msg")
773 vnfr_update["vld.{}".format(vld_index)] = vld
774 break
775 else:
776 raise LcmException(
777 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
778 vnf_index, vld["id"]
779 )
780 )
781
782 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
783 break
784
785 else:
786 raise LcmException(
787 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
788 vnf_index
789 )
790 )
791
792 def _get_ns_config_info(self, nsr_id):
793 """
794 Generates a mapping between vnf,vdu elements and the N2VC id
795 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
796 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
797 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
798 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
799 """
800 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
801 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
802 mapping = {}
803 ns_config_info = {"osm-config-mapping": mapping}
804 for vca in vca_deployed_list:
805 if not vca["member-vnf-index"]:
806 continue
807 if not vca["vdu_id"]:
808 mapping[vca["member-vnf-index"]] = vca["application"]
809 else:
810 mapping[
811 "{}.{}.{}".format(
812 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
813 )
814 ] = vca["application"]
815 return ns_config_info
816
817 async def _instantiate_ng_ro(
818 self,
819 logging_text,
820 nsr_id,
821 nsd,
822 db_nsr,
823 db_nslcmop,
824 db_vnfrs,
825 db_vnfds,
826 n2vc_key_list,
827 stage,
828 start_deploy,
829 timeout_ns_deploy,
830 ):
831
832 db_vims = {}
833
834 def get_vim_account(vim_account_id):
835 nonlocal db_vims
836 if vim_account_id in db_vims:
837 return db_vims[vim_account_id]
838 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
839 db_vims[vim_account_id] = db_vim
840 return db_vim
841
842 # modify target_vld info with instantiation parameters
843 def parse_vld_instantiation_params(
844 target_vim, target_vld, vld_params, target_sdn
845 ):
846 if vld_params.get("ip-profile"):
847 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
848 "ip-profile"
849 ]
850 if vld_params.get("provider-network"):
851 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
852 "provider-network"
853 ]
854 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
855 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
856 "provider-network"
857 ]["sdn-ports"]
858 if vld_params.get("wimAccountId"):
859 target_wim = "wim:{}".format(vld_params["wimAccountId"])
860 target_vld["vim_info"][target_wim] = {}
861 for param in ("vim-network-name", "vim-network-id"):
862 if vld_params.get(param):
863 if isinstance(vld_params[param], dict):
864 for vim, vim_net in vld_params[param].items():
865 other_target_vim = "vim:" + vim
866 populate_dict(
867 target_vld["vim_info"],
868 (other_target_vim, param.replace("-", "_")),
869 vim_net,
870 )
871 else: # isinstance str
872 target_vld["vim_info"][target_vim][
873 param.replace("-", "_")
874 ] = vld_params[param]
875 if vld_params.get("common_id"):
876 target_vld["common_id"] = vld_params.get("common_id")
877
878 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
879 def update_ns_vld_target(target, ns_params):
880 for vnf_params in ns_params.get("vnf", ()):
881 if vnf_params.get("vimAccountId"):
882 target_vnf = next(
883 (
884 vnfr
885 for vnfr in db_vnfrs.values()
886 if vnf_params["member-vnf-index"]
887 == vnfr["member-vnf-index-ref"]
888 ),
889 None,
890 )
891 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
892 if not vdur:
893 return
894 for a_index, a_vld in enumerate(target["ns"]["vld"]):
895 target_vld = find_in_list(
896 get_iterable(vdur, "interfaces"),
897 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
898 )
899
900 vld_params = find_in_list(
901 get_iterable(ns_params, "vld"),
902 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
903 )
904 if target_vld:
905
906 if vnf_params.get("vimAccountId") not in a_vld.get(
907 "vim_info", {}
908 ):
909 target_vim_network_list = [
910 v for _, v in a_vld.get("vim_info").items()
911 ]
912 target_vim_network_name = next(
913 (
914 item.get("vim_network_name", "")
915 for item in target_vim_network_list
916 ),
917 "",
918 )
919
920 target["ns"]["vld"][a_index].get("vim_info").update(
921 {
922 "vim:{}".format(vnf_params["vimAccountId"]): {
923 "vim_network_name": target_vim_network_name,
924 }
925 }
926 )
927
928 if vld_params:
929 for param in ("vim-network-name", "vim-network-id"):
930 if vld_params.get(param) and isinstance(
931 vld_params[param], dict
932 ):
933 for vim, vim_net in vld_params[
934 param
935 ].items():
936 other_target_vim = "vim:" + vim
937 populate_dict(
938 target["ns"]["vld"][a_index].get(
939 "vim_info"
940 ),
941 (
942 other_target_vim,
943 param.replace("-", "_"),
944 ),
945 vim_net,
946 )
947
948 nslcmop_id = db_nslcmop["_id"]
949 target = {
950 "name": db_nsr["name"],
951 "ns": {"vld": []},
952 "vnf": [],
953 "image": deepcopy(db_nsr["image"]),
954 "flavor": deepcopy(db_nsr["flavor"]),
955 "action_id": nslcmop_id,
956 "cloud_init_content": {},
957 }
958 for image in target["image"]:
959 image["vim_info"] = {}
960 for flavor in target["flavor"]:
961 flavor["vim_info"] = {}
962 if db_nsr.get("affinity-or-anti-affinity-group"):
963 target["affinity-or-anti-affinity-group"] = deepcopy(
964 db_nsr["affinity-or-anti-affinity-group"]
965 )
966 for affinity_or_anti_affinity_group in target[
967 "affinity-or-anti-affinity-group"
968 ]:
969 affinity_or_anti_affinity_group["vim_info"] = {}
970
971 if db_nslcmop.get("lcmOperationType") != "instantiate":
972 # get parameters of instantiation:
973 db_nslcmop_instantiate = self.db.get_list(
974 "nslcmops",
975 {
976 "nsInstanceId": db_nslcmop["nsInstanceId"],
977 "lcmOperationType": "instantiate",
978 },
979 )[-1]
980 ns_params = db_nslcmop_instantiate.get("operationParams")
981 else:
982 ns_params = db_nslcmop.get("operationParams")
983 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
984 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
985
986 cp2target = {}
987 for vld_index, vld in enumerate(db_nsr.get("vld")):
988 target_vim = "vim:{}".format(ns_params["vimAccountId"])
989 target_vld = {
990 "id": vld["id"],
991 "name": vld["name"],
992 "mgmt-network": vld.get("mgmt-network", False),
993 "type": vld.get("type"),
994 "vim_info": {
995 target_vim: {
996 "vim_network_name": vld.get("vim-network-name"),
997 "vim_account_id": ns_params["vimAccountId"],
998 }
999 },
1000 }
1001 # check if this network needs SDN assist
1002 if vld.get("pci-interfaces"):
1003 db_vim = get_vim_account(ns_params["vimAccountId"])
1004 sdnc_id = db_vim["config"].get("sdn-controller")
1005 if sdnc_id:
1006 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1007 target_sdn = "sdn:{}".format(sdnc_id)
1008 target_vld["vim_info"][target_sdn] = {
1009 "sdn": True,
1010 "target_vim": target_vim,
1011 "vlds": [sdn_vld],
1012 "type": vld.get("type"),
1013 }
1014
1015 nsd_vnf_profiles = get_vnf_profiles(nsd)
1016 for nsd_vnf_profile in nsd_vnf_profiles:
1017 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1018 if cp["virtual-link-profile-id"] == vld["id"]:
1019 cp2target[
1020 "member_vnf:{}.{}".format(
1021 cp["constituent-cpd-id"][0][
1022 "constituent-base-element-id"
1023 ],
1024 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1025 )
1026 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1027
1028 # check at nsd descriptor, if there is an ip-profile
1029 vld_params = {}
1030 nsd_vlp = find_in_list(
1031 get_virtual_link_profiles(nsd),
1032 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1033 == vld["id"],
1034 )
1035 if (
1036 nsd_vlp
1037 and nsd_vlp.get("virtual-link-protocol-data")
1038 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1039 ):
1040 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1041 "l3-protocol-data"
1042 ]
1043 ip_profile_dest_data = {}
1044 if "ip-version" in ip_profile_source_data:
1045 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1046 "ip-version"
1047 ]
1048 if "cidr" in ip_profile_source_data:
1049 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1050 "cidr"
1051 ]
1052 if "gateway-ip" in ip_profile_source_data:
1053 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1054 "gateway-ip"
1055 ]
1056 if "dhcp-enabled" in ip_profile_source_data:
1057 ip_profile_dest_data["dhcp-params"] = {
1058 "enabled": ip_profile_source_data["dhcp-enabled"]
1059 }
1060 vld_params["ip-profile"] = ip_profile_dest_data
1061
1062 # update vld_params with instantiation params
1063 vld_instantiation_params = find_in_list(
1064 get_iterable(ns_params, "vld"),
1065 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1066 )
1067 if vld_instantiation_params:
1068 vld_params.update(vld_instantiation_params)
1069 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1070 target["ns"]["vld"].append(target_vld)
1071 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1072 update_ns_vld_target(target, ns_params)
1073
1074 for vnfr in db_vnfrs.values():
1075 vnfd = find_in_list(
1076 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1077 )
1078 vnf_params = find_in_list(
1079 get_iterable(ns_params, "vnf"),
1080 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1081 )
1082 target_vnf = deepcopy(vnfr)
1083 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1084 for vld in target_vnf.get("vld", ()):
1085 # check if connected to a ns.vld, to fill target'
1086 vnf_cp = find_in_list(
1087 vnfd.get("int-virtual-link-desc", ()),
1088 lambda cpd: cpd.get("id") == vld["id"],
1089 )
1090 if vnf_cp:
1091 ns_cp = "member_vnf:{}.{}".format(
1092 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1093 )
1094 if cp2target.get(ns_cp):
1095 vld["target"] = cp2target[ns_cp]
1096
1097 vld["vim_info"] = {
1098 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1099 }
1100 # check if this network needs SDN assist
1101 target_sdn = None
1102 if vld.get("pci-interfaces"):
1103 db_vim = get_vim_account(vnfr["vim-account-id"])
1104 sdnc_id = db_vim["config"].get("sdn-controller")
1105 if sdnc_id:
1106 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1107 target_sdn = "sdn:{}".format(sdnc_id)
1108 vld["vim_info"][target_sdn] = {
1109 "sdn": True,
1110 "target_vim": target_vim,
1111 "vlds": [sdn_vld],
1112 "type": vld.get("type"),
1113 }
1114
1115 # check at vnfd descriptor, if there is an ip-profile
1116 vld_params = {}
1117 vnfd_vlp = find_in_list(
1118 get_virtual_link_profiles(vnfd),
1119 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1120 )
1121 if (
1122 vnfd_vlp
1123 and vnfd_vlp.get("virtual-link-protocol-data")
1124 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1125 ):
1126 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1127 "l3-protocol-data"
1128 ]
1129 ip_profile_dest_data = {}
1130 if "ip-version" in ip_profile_source_data:
1131 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1132 "ip-version"
1133 ]
1134 if "cidr" in ip_profile_source_data:
1135 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1136 "cidr"
1137 ]
1138 if "gateway-ip" in ip_profile_source_data:
1139 ip_profile_dest_data[
1140 "gateway-address"
1141 ] = ip_profile_source_data["gateway-ip"]
1142 if "dhcp-enabled" in ip_profile_source_data:
1143 ip_profile_dest_data["dhcp-params"] = {
1144 "enabled": ip_profile_source_data["dhcp-enabled"]
1145 }
1146
1147 vld_params["ip-profile"] = ip_profile_dest_data
1148 # update vld_params with instantiation params
1149 if vnf_params:
1150 vld_instantiation_params = find_in_list(
1151 get_iterable(vnf_params, "internal-vld"),
1152 lambda i_vld: i_vld["name"] == vld["id"],
1153 )
1154 if vld_instantiation_params:
1155 vld_params.update(vld_instantiation_params)
1156 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1157
1158 vdur_list = []
1159 for vdur in target_vnf.get("vdur", ()):
1160 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1161 continue # This vdu must not be created
1162 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1163
1164 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1165
1166 if ssh_keys_all:
1167 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1168 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1169 if (
1170 vdu_configuration
1171 and vdu_configuration.get("config-access")
1172 and vdu_configuration.get("config-access").get("ssh-access")
1173 ):
1174 vdur["ssh-keys"] = ssh_keys_all
1175 vdur["ssh-access-required"] = vdu_configuration[
1176 "config-access"
1177 ]["ssh-access"]["required"]
1178 elif (
1179 vnf_configuration
1180 and vnf_configuration.get("config-access")
1181 and vnf_configuration.get("config-access").get("ssh-access")
1182 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1183 ):
1184 vdur["ssh-keys"] = ssh_keys_all
1185 vdur["ssh-access-required"] = vnf_configuration[
1186 "config-access"
1187 ]["ssh-access"]["required"]
1188 elif ssh_keys_instantiation and find_in_list(
1189 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1190 ):
1191 vdur["ssh-keys"] = ssh_keys_instantiation
1192
1193 self.logger.debug("NS > vdur > {}".format(vdur))
1194
1195 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1196 # cloud-init
1197 if vdud.get("cloud-init-file"):
1198 vdur["cloud-init"] = "{}:file:{}".format(
1199 vnfd["_id"], vdud.get("cloud-init-file")
1200 )
1201 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1202 if vdur["cloud-init"] not in target["cloud_init_content"]:
1203 base_folder = vnfd["_admin"]["storage"]
1204 if base_folder["pkg-dir"]:
1205 cloud_init_file = "{}/{}/cloud_init/{}".format(
1206 base_folder["folder"],
1207 base_folder["pkg-dir"],
1208 vdud.get("cloud-init-file"),
1209 )
1210 else:
1211 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1212 base_folder["folder"],
1213 vdud.get("cloud-init-file"),
1214 )
1215 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1216 target["cloud_init_content"][
1217 vdur["cloud-init"]
1218 ] = ci_file.read()
1219 elif vdud.get("cloud-init"):
1220 vdur["cloud-init"] = "{}:vdu:{}".format(
1221 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1222 )
1223 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1224 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1225 "cloud-init"
1226 ]
1227 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1228 deploy_params_vdu = self._format_additional_params(
1229 vdur.get("additionalParams") or {}
1230 )
1231 deploy_params_vdu["OSM"] = get_osm_params(
1232 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1233 )
1234 vdur["additionalParams"] = deploy_params_vdu
1235
1236 # flavor
1237 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1238 if target_vim not in ns_flavor["vim_info"]:
1239 ns_flavor["vim_info"][target_vim] = {}
1240
1241 # deal with images
1242 # in case alternative images are provided we must check if they should be applied
1243 # for the vim_type, modify the vim_type taking into account
1244 ns_image_id = int(vdur["ns-image-id"])
1245 if vdur.get("alt-image-ids"):
1246 db_vim = get_vim_account(vnfr["vim-account-id"])
1247 vim_type = db_vim["vim_type"]
1248 for alt_image_id in vdur.get("alt-image-ids"):
1249 ns_alt_image = target["image"][int(alt_image_id)]
1250 if vim_type == ns_alt_image.get("vim-type"):
1251 # must use alternative image
1252 self.logger.debug(
1253 "use alternative image id: {}".format(alt_image_id)
1254 )
1255 ns_image_id = alt_image_id
1256 vdur["ns-image-id"] = ns_image_id
1257 break
1258 ns_image = target["image"][int(ns_image_id)]
1259 if target_vim not in ns_image["vim_info"]:
1260 ns_image["vim_info"][target_vim] = {}
1261
1262 # Affinity groups
1263 if vdur.get("affinity-or-anti-affinity-group-id"):
1264 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1265 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1266 if target_vim not in ns_ags["vim_info"]:
1267 ns_ags["vim_info"][target_vim] = {}
1268
1269 vdur["vim_info"] = {target_vim: {}}
1270 # instantiation parameters
1271 if vnf_params:
1272 vdu_instantiation_params = find_in_list(
1273 get_iterable(vnf_params, "vdu"),
1274 lambda i_vdu: i_vdu["id"] == vdud["id"],
1275 )
1276 if vdu_instantiation_params:
1277 # Parse the vdu_volumes from the instantiation params
1278 vdu_volumes = get_volumes_from_instantiation_params(
1279 vdu_instantiation_params, vdud
1280 )
1281 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1282 vdur_list.append(vdur)
1283 target_vnf["vdur"] = vdur_list
1284 target["vnf"].append(target_vnf)
1285
1286 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1287 desc = await self.RO.deploy(nsr_id, target)
1288 self.logger.debug("RO return > {}".format(desc))
1289 action_id = desc["action_id"]
1290 await self._wait_ng_ro(
1291 nsr_id,
1292 action_id,
1293 nslcmop_id,
1294 start_deploy,
1295 timeout_ns_deploy,
1296 stage,
1297 operation="instantiation",
1298 )
1299
1300 # Updating NSR
1301 db_nsr_update = {
1302 "_admin.deployed.RO.operational-status": "running",
1303 "detailed-status": " ".join(stage),
1304 }
1305 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1306 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1307 self._write_op_status(nslcmop_id, stage)
1308 self.logger.debug(
1309 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1310 )
1311 return
1312
1313 async def _wait_ng_ro(
1314 self,
1315 nsr_id,
1316 action_id,
1317 nslcmop_id=None,
1318 start_time=None,
1319 timeout=600,
1320 stage=None,
1321 operation=None,
1322 ):
1323 detailed_status_old = None
1324 db_nsr_update = {}
1325 start_time = start_time or time()
1326 while time() <= start_time + timeout:
1327 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1328 self.logger.debug("Wait NG RO > {}".format(desc_status))
1329 if desc_status["status"] == "FAILED":
1330 raise NgRoException(desc_status["details"])
1331 elif desc_status["status"] == "BUILD":
1332 if stage:
1333 stage[2] = "VIM: ({})".format(desc_status["details"])
1334 elif desc_status["status"] == "DONE":
1335 if stage:
1336 stage[2] = "Deployed at VIM"
1337 break
1338 else:
1339 assert False, "ROclient.check_ns_status returns unknown {}".format(
1340 desc_status["status"]
1341 )
1342 if stage and nslcmop_id and stage[2] != detailed_status_old:
1343 detailed_status_old = stage[2]
1344 db_nsr_update["detailed-status"] = " ".join(stage)
1345 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1346 self._write_op_status(nslcmop_id, stage)
1347 await asyncio.sleep(15, loop=self.loop)
1348 else: # timeout_ns_deploy
1349 raise NgRoException("Timeout waiting ns to deploy")
1350
1351 async def _terminate_ng_ro(
1352 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1353 ):
1354 db_nsr_update = {}
1355 failed_detail = []
1356 action_id = None
1357 start_deploy = time()
1358 try:
1359 target = {
1360 "ns": {"vld": []},
1361 "vnf": [],
1362 "image": [],
1363 "flavor": [],
1364 "action_id": nslcmop_id,
1365 }
1366 desc = await self.RO.deploy(nsr_id, target)
1367 action_id = desc["action_id"]
1368 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1369 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1370 self.logger.debug(
1371 logging_text
1372 + "ns terminate action at RO. action_id={}".format(action_id)
1373 )
1374
1375 # wait until done
1376 delete_timeout = 20 * 60 # 20 minutes
1377 await self._wait_ng_ro(
1378 nsr_id,
1379 action_id,
1380 nslcmop_id,
1381 start_deploy,
1382 delete_timeout,
1383 stage,
1384 operation="termination",
1385 )
1386
1387 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1388 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1389 # delete all nsr
1390 await self.RO.delete(nsr_id)
1391 except Exception as e:
1392 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1393 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1394 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1395 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1396 self.logger.debug(
1397 logging_text + "RO_action_id={} already deleted".format(action_id)
1398 )
1399 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1400 failed_detail.append("delete conflict: {}".format(e))
1401 self.logger.debug(
1402 logging_text
1403 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1404 )
1405 else:
1406 failed_detail.append("delete error: {}".format(e))
1407 self.logger.error(
1408 logging_text
1409 + "RO_action_id={} delete error: {}".format(action_id, e)
1410 )
1411
1412 if failed_detail:
1413 stage[2] = "Error deleting from VIM"
1414 else:
1415 stage[2] = "Deleted from VIM"
1416 db_nsr_update["detailed-status"] = " ".join(stage)
1417 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1418 self._write_op_status(nslcmop_id, stage)
1419
1420 if failed_detail:
1421 raise LcmException("; ".join(failed_detail))
1422 return
1423
1424 async def instantiate_RO(
1425 self,
1426 logging_text,
1427 nsr_id,
1428 nsd,
1429 db_nsr,
1430 db_nslcmop,
1431 db_vnfrs,
1432 db_vnfds,
1433 n2vc_key_list,
1434 stage,
1435 ):
1436 """
1437 Instantiate at RO
1438 :param logging_text: preffix text to use at logging
1439 :param nsr_id: nsr identity
1440 :param nsd: database content of ns descriptor
1441 :param db_nsr: database content of ns record
1442 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1443 :param db_vnfrs:
1444 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1445 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1446 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1447 :return: None or exception
1448 """
1449 try:
1450 start_deploy = time()
1451 ns_params = db_nslcmop.get("operationParams")
1452 if ns_params and ns_params.get("timeout_ns_deploy"):
1453 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1454 else:
1455 timeout_ns_deploy = self.timeout.get(
1456 "ns_deploy", self.timeout_ns_deploy
1457 )
1458
1459 # Check for and optionally request placement optimization. Database will be updated if placement activated
1460 stage[2] = "Waiting for Placement."
1461 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1462 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1463 for vnfr in db_vnfrs.values():
1464 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1465 break
1466 else:
1467 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1468
1469 return await self._instantiate_ng_ro(
1470 logging_text,
1471 nsr_id,
1472 nsd,
1473 db_nsr,
1474 db_nslcmop,
1475 db_vnfrs,
1476 db_vnfds,
1477 n2vc_key_list,
1478 stage,
1479 start_deploy,
1480 timeout_ns_deploy,
1481 )
1482 except Exception as e:
1483 stage[2] = "ERROR deploying at VIM"
1484 self.set_vnfr_at_error(db_vnfrs, str(e))
1485 self.logger.error(
1486 "Error deploying at VIM {}".format(e),
1487 exc_info=not isinstance(
1488 e,
1489 (
1490 ROclient.ROClientException,
1491 LcmException,
1492 DbException,
1493 NgRoException,
1494 ),
1495 ),
1496 )
1497 raise
1498
1499 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1500 """
1501 Wait for kdu to be up, get ip address
1502 :param logging_text: prefix use for logging
1503 :param nsr_id:
1504 :param vnfr_id:
1505 :param kdu_name:
1506 :return: IP address, K8s services
1507 """
1508
1509 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1510 nb_tries = 0
1511
1512 while nb_tries < 360:
1513 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1514 kdur = next(
1515 (
1516 x
1517 for x in get_iterable(db_vnfr, "kdur")
1518 if x.get("kdu-name") == kdu_name
1519 ),
1520 None,
1521 )
1522 if not kdur:
1523 raise LcmException(
1524 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1525 )
1526 if kdur.get("status"):
1527 if kdur["status"] in ("READY", "ENABLED"):
1528 return kdur.get("ip-address"), kdur.get("services")
1529 else:
1530 raise LcmException(
1531 "target KDU={} is in error state".format(kdu_name)
1532 )
1533
1534 await asyncio.sleep(10, loop=self.loop)
1535 nb_tries += 1
1536 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1537
1538 async def wait_vm_up_insert_key_ro(
1539 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1540 ):
1541 """
1542 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1543 :param logging_text: prefix use for logging
1544 :param nsr_id:
1545 :param vnfr_id:
1546 :param vdu_id:
1547 :param vdu_index:
1548 :param pub_key: public ssh key to inject, None to skip
1549 :param user: user to apply the public ssh key
1550 :return: IP address
1551 """
1552
1553 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1554 ro_nsr_id = None
1555 ip_address = None
1556 nb_tries = 0
1557 target_vdu_id = None
1558 ro_retries = 0
1559
1560 while True:
1561
1562 ro_retries += 1
1563 if ro_retries >= 360: # 1 hour
1564 raise LcmException(
1565 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1566 )
1567
1568 await asyncio.sleep(10, loop=self.loop)
1569
1570 # get ip address
1571 if not target_vdu_id:
1572 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1573
1574 if not vdu_id: # for the VNF case
1575 if db_vnfr.get("status") == "ERROR":
1576 raise LcmException(
1577 "Cannot inject ssh-key because target VNF is in error state"
1578 )
1579 ip_address = db_vnfr.get("ip-address")
1580 if not ip_address:
1581 continue
1582 vdur = next(
1583 (
1584 x
1585 for x in get_iterable(db_vnfr, "vdur")
1586 if x.get("ip-address") == ip_address
1587 ),
1588 None,
1589 )
1590 else: # VDU case
1591 vdur = next(
1592 (
1593 x
1594 for x in get_iterable(db_vnfr, "vdur")
1595 if x.get("vdu-id-ref") == vdu_id
1596 and x.get("count-index") == vdu_index
1597 ),
1598 None,
1599 )
1600
1601 if (
1602 not vdur and len(db_vnfr.get("vdur", ())) == 1
1603 ): # If only one, this should be the target vdu
1604 vdur = db_vnfr["vdur"][0]
1605 if not vdur:
1606 raise LcmException(
1607 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1608 vnfr_id, vdu_id, vdu_index
1609 )
1610 )
1611 # New generation RO stores information at "vim_info"
1612 ng_ro_status = None
1613 target_vim = None
1614 if vdur.get("vim_info"):
1615 target_vim = next(
1616 t for t in vdur["vim_info"]
1617 ) # there should be only one key
1618 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1619 if (
1620 vdur.get("pdu-type")
1621 or vdur.get("status") == "ACTIVE"
1622 or ng_ro_status == "ACTIVE"
1623 ):
1624 ip_address = vdur.get("ip-address")
1625 if not ip_address:
1626 continue
1627 target_vdu_id = vdur["vdu-id-ref"]
1628 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1629 raise LcmException(
1630 "Cannot inject ssh-key because target VM is in error state"
1631 )
1632
1633 if not target_vdu_id:
1634 continue
1635
1636 # inject public key into machine
1637 if pub_key and user:
1638 self.logger.debug(logging_text + "Inserting RO key")
1639 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1640 if vdur.get("pdu-type"):
1641 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1642 return ip_address
1643 try:
1644 ro_vm_id = "{}-{}".format(
1645 db_vnfr["member-vnf-index-ref"], target_vdu_id
1646 ) # TODO add vdu_index
1647 if self.ng_ro:
1648 target = {
1649 "action": {
1650 "action": "inject_ssh_key",
1651 "key": pub_key,
1652 "user": user,
1653 },
1654 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1655 }
1656 desc = await self.RO.deploy(nsr_id, target)
1657 action_id = desc["action_id"]
1658 await self._wait_ng_ro(
1659 nsr_id, action_id, timeout=600, operation="instantiation"
1660 )
1661 break
1662 else:
1663 # wait until NS is deployed at RO
1664 if not ro_nsr_id:
1665 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1666 ro_nsr_id = deep_get(
1667 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1668 )
1669 if not ro_nsr_id:
1670 continue
1671 result_dict = await self.RO.create_action(
1672 item="ns",
1673 item_id_name=ro_nsr_id,
1674 descriptor={
1675 "add_public_key": pub_key,
1676 "vms": [ro_vm_id],
1677 "user": user,
1678 },
1679 )
1680 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1681 if not result_dict or not isinstance(result_dict, dict):
1682 raise LcmException(
1683 "Unknown response from RO when injecting key"
1684 )
1685 for result in result_dict.values():
1686 if result.get("vim_result") == 200:
1687 break
1688 else:
1689 raise ROclient.ROClientException(
1690 "error injecting key: {}".format(
1691 result.get("description")
1692 )
1693 )
1694 break
1695 except NgRoException as e:
1696 raise LcmException(
1697 "Reaching max tries injecting key. Error: {}".format(e)
1698 )
1699 except ROclient.ROClientException as e:
1700 if not nb_tries:
1701 self.logger.debug(
1702 logging_text
1703 + "error injecting key: {}. Retrying until {} seconds".format(
1704 e, 20 * 10
1705 )
1706 )
1707 nb_tries += 1
1708 if nb_tries >= 20:
1709 raise LcmException(
1710 "Reaching max tries injecting key. Error: {}".format(e)
1711 )
1712 else:
1713 break
1714
1715 return ip_address
1716
1717 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1718 """
1719 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1720 """
1721 my_vca = vca_deployed_list[vca_index]
1722 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1723 # vdu or kdu: no dependencies
1724 return
1725 timeout = 300
1726 while timeout >= 0:
1727 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1728 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1729 configuration_status_list = db_nsr["configurationStatus"]
1730 for index, vca_deployed in enumerate(configuration_status_list):
1731 if index == vca_index:
1732 # myself
1733 continue
1734 if not my_vca.get("member-vnf-index") or (
1735 vca_deployed.get("member-vnf-index")
1736 == my_vca.get("member-vnf-index")
1737 ):
1738 internal_status = configuration_status_list[index].get("status")
1739 if internal_status == "READY":
1740 continue
1741 elif internal_status == "BROKEN":
1742 raise LcmException(
1743 "Configuration aborted because dependent charm/s has failed"
1744 )
1745 else:
1746 break
1747 else:
1748 # no dependencies, return
1749 return
1750 await asyncio.sleep(10)
1751 timeout -= 1
1752
1753 raise LcmException("Configuration aborted because dependent charm/s timeout")
1754
1755 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1756 vca_id = None
1757 if db_vnfr:
1758 vca_id = deep_get(db_vnfr, ("vca-id",))
1759 elif db_nsr:
1760 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1761 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1762 return vca_id
1763
1764 async def instantiate_N2VC(
1765 self,
1766 logging_text,
1767 vca_index,
1768 nsi_id,
1769 db_nsr,
1770 db_vnfr,
1771 vdu_id,
1772 kdu_name,
1773 vdu_index,
1774 config_descriptor,
1775 deploy_params,
1776 base_folder,
1777 nslcmop_id,
1778 stage,
1779 vca_type,
1780 vca_name,
1781 ee_config_descriptor,
1782 ):
1783 nsr_id = db_nsr["_id"]
1784 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1785 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1786 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1787 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1788 db_dict = {
1789 "collection": "nsrs",
1790 "filter": {"_id": nsr_id},
1791 "path": db_update_entry,
1792 }
1793 step = ""
1794 try:
1795
1796 element_type = "NS"
1797 element_under_configuration = nsr_id
1798
1799 vnfr_id = None
1800 if db_vnfr:
1801 vnfr_id = db_vnfr["_id"]
1802 osm_config["osm"]["vnf_id"] = vnfr_id
1803
1804 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1805
1806 if vca_type == "native_charm":
1807 index_number = 0
1808 else:
1809 index_number = vdu_index or 0
1810
1811 if vnfr_id:
1812 element_type = "VNF"
1813 element_under_configuration = vnfr_id
1814 namespace += ".{}-{}".format(vnfr_id, index_number)
1815 if vdu_id:
1816 namespace += ".{}-{}".format(vdu_id, index_number)
1817 element_type = "VDU"
1818 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1819 osm_config["osm"]["vdu_id"] = vdu_id
1820 elif kdu_name:
1821 namespace += ".{}".format(kdu_name)
1822 element_type = "KDU"
1823 element_under_configuration = kdu_name
1824 osm_config["osm"]["kdu_name"] = kdu_name
1825
1826 # Get artifact path
1827 if base_folder["pkg-dir"]:
1828 artifact_path = "{}/{}/{}/{}".format(
1829 base_folder["folder"],
1830 base_folder["pkg-dir"],
1831 "charms"
1832 if vca_type
1833 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1834 else "helm-charts",
1835 vca_name,
1836 )
1837 else:
1838 artifact_path = "{}/Scripts/{}/{}/".format(
1839 base_folder["folder"],
1840 "charms"
1841 if vca_type
1842 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1843 else "helm-charts",
1844 vca_name,
1845 )
1846
1847 self.logger.debug("Artifact path > {}".format(artifact_path))
1848
1849 # get initial_config_primitive_list that applies to this element
1850 initial_config_primitive_list = config_descriptor.get(
1851 "initial-config-primitive"
1852 )
1853
1854 self.logger.debug(
1855 "Initial config primitive list > {}".format(
1856 initial_config_primitive_list
1857 )
1858 )
1859
1860 # add config if not present for NS charm
1861 ee_descriptor_id = ee_config_descriptor.get("id")
1862 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1863 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1864 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1865 )
1866
1867 self.logger.debug(
1868 "Initial config primitive list #2 > {}".format(
1869 initial_config_primitive_list
1870 )
1871 )
1872 # n2vc_redesign STEP 3.1
1873 # find old ee_id if exists
1874 ee_id = vca_deployed.get("ee_id")
1875
1876 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1877 # create or register execution environment in VCA
1878 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1879
1880 self._write_configuration_status(
1881 nsr_id=nsr_id,
1882 vca_index=vca_index,
1883 status="CREATING",
1884 element_under_configuration=element_under_configuration,
1885 element_type=element_type,
1886 )
1887
1888 step = "create execution environment"
1889 self.logger.debug(logging_text + step)
1890
1891 ee_id = None
1892 credentials = None
1893 if vca_type == "k8s_proxy_charm":
1894 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1895 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1896 namespace=namespace,
1897 artifact_path=artifact_path,
1898 db_dict=db_dict,
1899 vca_id=vca_id,
1900 )
1901 elif vca_type == "helm" or vca_type == "helm-v3":
1902 ee_id, credentials = await self.vca_map[
1903 vca_type
1904 ].create_execution_environment(
1905 namespace=namespace,
1906 reuse_ee_id=ee_id,
1907 db_dict=db_dict,
1908 config=osm_config,
1909 artifact_path=artifact_path,
1910 chart_model=vca_name,
1911 vca_type=vca_type,
1912 )
1913 else:
1914 ee_id, credentials = await self.vca_map[
1915 vca_type
1916 ].create_execution_environment(
1917 namespace=namespace,
1918 reuse_ee_id=ee_id,
1919 db_dict=db_dict,
1920 vca_id=vca_id,
1921 )
1922
1923 elif vca_type == "native_charm":
1924 step = "Waiting to VM being up and getting IP address"
1925 self.logger.debug(logging_text + step)
1926 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1927 logging_text,
1928 nsr_id,
1929 vnfr_id,
1930 vdu_id,
1931 vdu_index,
1932 user=None,
1933 pub_key=None,
1934 )
1935 credentials = {"hostname": rw_mgmt_ip}
1936 # get username
1937 username = deep_get(
1938 config_descriptor, ("config-access", "ssh-access", "default-user")
1939 )
1940 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1941 # merged. Meanwhile let's get username from initial-config-primitive
1942 if not username and initial_config_primitive_list:
1943 for config_primitive in initial_config_primitive_list:
1944 for param in config_primitive.get("parameter", ()):
1945 if param["name"] == "ssh-username":
1946 username = param["value"]
1947 break
1948 if not username:
1949 raise LcmException(
1950 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1951 "'config-access.ssh-access.default-user'"
1952 )
1953 credentials["username"] = username
1954 # n2vc_redesign STEP 3.2
1955
1956 self._write_configuration_status(
1957 nsr_id=nsr_id,
1958 vca_index=vca_index,
1959 status="REGISTERING",
1960 element_under_configuration=element_under_configuration,
1961 element_type=element_type,
1962 )
1963
1964 step = "register execution environment {}".format(credentials)
1965 self.logger.debug(logging_text + step)
1966 ee_id = await self.vca_map[vca_type].register_execution_environment(
1967 credentials=credentials,
1968 namespace=namespace,
1969 db_dict=db_dict,
1970 vca_id=vca_id,
1971 )
1972
1973 # for compatibility with MON/POL modules, the need model and application name at database
1974 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1975 ee_id_parts = ee_id.split(".")
1976 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1977 if len(ee_id_parts) >= 2:
1978 model_name = ee_id_parts[0]
1979 application_name = ee_id_parts[1]
1980 db_nsr_update[db_update_entry + "model"] = model_name
1981 db_nsr_update[db_update_entry + "application"] = application_name
1982
1983 # n2vc_redesign STEP 3.3
1984 step = "Install configuration Software"
1985
1986 self._write_configuration_status(
1987 nsr_id=nsr_id,
1988 vca_index=vca_index,
1989 status="INSTALLING SW",
1990 element_under_configuration=element_under_configuration,
1991 element_type=element_type,
1992 other_update=db_nsr_update,
1993 )
1994
1995 # TODO check if already done
1996 self.logger.debug(logging_text + step)
1997 config = None
1998 if vca_type == "native_charm":
1999 config_primitive = next(
2000 (p for p in initial_config_primitive_list if p["name"] == "config"),
2001 None,
2002 )
2003 if config_primitive:
2004 config = self._map_primitive_params(
2005 config_primitive, {}, deploy_params
2006 )
2007 num_units = 1
2008 if vca_type == "lxc_proxy_charm":
2009 if element_type == "NS":
2010 num_units = db_nsr.get("config-units") or 1
2011 elif element_type == "VNF":
2012 num_units = db_vnfr.get("config-units") or 1
2013 elif element_type == "VDU":
2014 for v in db_vnfr["vdur"]:
2015 if vdu_id == v["vdu-id-ref"]:
2016 num_units = v.get("config-units") or 1
2017 break
2018 if vca_type != "k8s_proxy_charm":
2019 await self.vca_map[vca_type].install_configuration_sw(
2020 ee_id=ee_id,
2021 artifact_path=artifact_path,
2022 db_dict=db_dict,
2023 config=config,
2024 num_units=num_units,
2025 vca_id=vca_id,
2026 vca_type=vca_type,
2027 )
2028
2029 # write in db flag of configuration_sw already installed
2030 self.update_db_2(
2031 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2032 )
2033
2034 # add relations for this VCA (wait for other peers related with this VCA)
2035 await self._add_vca_relations(
2036 logging_text=logging_text,
2037 nsr_id=nsr_id,
2038 vca_type=vca_type,
2039 vca_index=vca_index,
2040 )
2041
2042 # if SSH access is required, then get execution environment SSH public
2043 # if native charm we have waited already to VM be UP
2044 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2045 pub_key = None
2046 user = None
2047 # self.logger.debug("get ssh key block")
2048 if deep_get(
2049 config_descriptor, ("config-access", "ssh-access", "required")
2050 ):
2051 # self.logger.debug("ssh key needed")
2052 # Needed to inject a ssh key
2053 user = deep_get(
2054 config_descriptor,
2055 ("config-access", "ssh-access", "default-user"),
2056 )
2057 step = "Install configuration Software, getting public ssh key"
2058 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2059 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2060 )
2061
2062 step = "Insert public key into VM user={} ssh_key={}".format(
2063 user, pub_key
2064 )
2065 else:
2066 # self.logger.debug("no need to get ssh key")
2067 step = "Waiting to VM being up and getting IP address"
2068 self.logger.debug(logging_text + step)
2069
2070 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2071 rw_mgmt_ip = None
2072
2073 # n2vc_redesign STEP 5.1
2074 # wait for RO (ip-address) Insert pub_key into VM
2075 if vnfr_id:
2076 if kdu_name:
2077 rw_mgmt_ip, services = await self.wait_kdu_up(
2078 logging_text, nsr_id, vnfr_id, kdu_name
2079 )
2080 vnfd = self.db.get_one(
2081 "vnfds_revisions",
2082 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2083 )
2084 kdu = get_kdu(vnfd, kdu_name)
2085 kdu_services = [
2086 service["name"] for service in get_kdu_services(kdu)
2087 ]
2088 exposed_services = []
2089 for service in services:
2090 if any(s in service["name"] for s in kdu_services):
2091 exposed_services.append(service)
2092 await self.vca_map[vca_type].exec_primitive(
2093 ee_id=ee_id,
2094 primitive_name="config",
2095 params_dict={
2096 "osm-config": json.dumps(
2097 OsmConfigBuilder(
2098 k8s={"services": exposed_services}
2099 ).build()
2100 )
2101 },
2102 vca_id=vca_id,
2103 )
2104
2105 # This verification is needed in order to avoid trying to add a public key
2106 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2107 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2108 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2109 # or it is a KNF)
2110 elif db_vnfr.get("vdur"):
2111 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2112 logging_text,
2113 nsr_id,
2114 vnfr_id,
2115 vdu_id,
2116 vdu_index,
2117 user=user,
2118 pub_key=pub_key,
2119 )
2120
2121 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2122
2123 # store rw_mgmt_ip in deploy params for later replacement
2124 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2125
2126 # n2vc_redesign STEP 6 Execute initial config primitive
2127 step = "execute initial config primitive"
2128
2129 # wait for dependent primitives execution (NS -> VNF -> VDU)
2130 if initial_config_primitive_list:
2131 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2132
2133 # stage, in function of element type: vdu, kdu, vnf or ns
2134 my_vca = vca_deployed_list[vca_index]
2135 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2136 # VDU or KDU
2137 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2138 elif my_vca.get("member-vnf-index"):
2139 # VNF
2140 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2141 else:
2142 # NS
2143 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2144
2145 self._write_configuration_status(
2146 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2147 )
2148
2149 self._write_op_status(op_id=nslcmop_id, stage=stage)
2150
2151 check_if_terminated_needed = True
2152 for initial_config_primitive in initial_config_primitive_list:
2153 # adding information on the vca_deployed if it is a NS execution environment
2154 if not vca_deployed["member-vnf-index"]:
2155 deploy_params["ns_config_info"] = json.dumps(
2156 self._get_ns_config_info(nsr_id)
2157 )
2158 # TODO check if already done
2159 primitive_params_ = self._map_primitive_params(
2160 initial_config_primitive, {}, deploy_params
2161 )
2162
2163 step = "execute primitive '{}' params '{}'".format(
2164 initial_config_primitive["name"], primitive_params_
2165 )
2166 self.logger.debug(logging_text + step)
2167 await self.vca_map[vca_type].exec_primitive(
2168 ee_id=ee_id,
2169 primitive_name=initial_config_primitive["name"],
2170 params_dict=primitive_params_,
2171 db_dict=db_dict,
2172 vca_id=vca_id,
2173 vca_type=vca_type,
2174 )
2175 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2176 if check_if_terminated_needed:
2177 if config_descriptor.get("terminate-config-primitive"):
2178 self.update_db_2(
2179 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2180 )
2181 check_if_terminated_needed = False
2182
2183 # TODO register in database that primitive is done
2184
2185 # STEP 7 Configure metrics
2186 if vca_type == "helm" or vca_type == "helm-v3":
2187 # TODO: review for those cases where the helm chart is a reference and
2188 # is not part of the NF package
2189 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2190 ee_id=ee_id,
2191 artifact_path=artifact_path,
2192 ee_config_descriptor=ee_config_descriptor,
2193 vnfr_id=vnfr_id,
2194 nsr_id=nsr_id,
2195 target_ip=rw_mgmt_ip,
2196 )
2197 if prometheus_jobs:
2198 self.update_db_2(
2199 "nsrs",
2200 nsr_id,
2201 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2202 )
2203
2204 for job in prometheus_jobs:
2205 self.db.set_one(
2206 "prometheus_jobs",
2207 {"job_name": job["job_name"]},
2208 job,
2209 upsert=True,
2210 fail_on_empty=False,
2211 )
2212
2213 step = "instantiated at VCA"
2214 self.logger.debug(logging_text + step)
2215
2216 self._write_configuration_status(
2217 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2218 )
2219
2220 except Exception as e: # TODO not use Exception but N2VC exception
2221 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2222 if not isinstance(
2223 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2224 ):
2225 self.logger.error(
2226 "Exception while {} : {}".format(step, e), exc_info=True
2227 )
2228 self._write_configuration_status(
2229 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2230 )
2231 raise LcmException("{} {}".format(step, e)) from e
2232
2233 def _write_ns_status(
2234 self,
2235 nsr_id: str,
2236 ns_state: str,
2237 current_operation: str,
2238 current_operation_id: str,
2239 error_description: str = None,
2240 error_detail: str = None,
2241 other_update: dict = None,
2242 ):
2243 """
2244 Update db_nsr fields.
2245 :param nsr_id:
2246 :param ns_state:
2247 :param current_operation:
2248 :param current_operation_id:
2249 :param error_description:
2250 :param error_detail:
2251 :param other_update: Other required changes at database if provided, will be cleared
2252 :return:
2253 """
2254 try:
2255 db_dict = other_update or {}
2256 db_dict[
2257 "_admin.nslcmop"
2258 ] = current_operation_id # for backward compatibility
2259 db_dict["_admin.current-operation"] = current_operation_id
2260 db_dict["_admin.operation-type"] = (
2261 current_operation if current_operation != "IDLE" else None
2262 )
2263 db_dict["currentOperation"] = current_operation
2264 db_dict["currentOperationID"] = current_operation_id
2265 db_dict["errorDescription"] = error_description
2266 db_dict["errorDetail"] = error_detail
2267
2268 if ns_state:
2269 db_dict["nsState"] = ns_state
2270 self.update_db_2("nsrs", nsr_id, db_dict)
2271 except DbException as e:
2272 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2273
2274 def _write_op_status(
2275 self,
2276 op_id: str,
2277 stage: list = None,
2278 error_message: str = None,
2279 queuePosition: int = 0,
2280 operation_state: str = None,
2281 other_update: dict = None,
2282 ):
2283 try:
2284 db_dict = other_update or {}
2285 db_dict["queuePosition"] = queuePosition
2286 if isinstance(stage, list):
2287 db_dict["stage"] = stage[0]
2288 db_dict["detailed-status"] = " ".join(stage)
2289 elif stage is not None:
2290 db_dict["stage"] = str(stage)
2291
2292 if error_message is not None:
2293 db_dict["errorMessage"] = error_message
2294 if operation_state is not None:
2295 db_dict["operationState"] = operation_state
2296 db_dict["statusEnteredTime"] = time()
2297 self.update_db_2("nslcmops", op_id, db_dict)
2298 except DbException as e:
2299 self.logger.warn(
2300 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2301 )
2302
2303 def _write_all_config_status(self, db_nsr: dict, status: str):
2304 try:
2305 nsr_id = db_nsr["_id"]
2306 # configurationStatus
2307 config_status = db_nsr.get("configurationStatus")
2308 if config_status:
2309 db_nsr_update = {
2310 "configurationStatus.{}.status".format(index): status
2311 for index, v in enumerate(config_status)
2312 if v
2313 }
2314 # update status
2315 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2316
2317 except DbException as e:
2318 self.logger.warn(
2319 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2320 )
2321
2322 def _write_configuration_status(
2323 self,
2324 nsr_id: str,
2325 vca_index: int,
2326 status: str = None,
2327 element_under_configuration: str = None,
2328 element_type: str = None,
2329 other_update: dict = None,
2330 ):
2331
2332 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2333 # .format(vca_index, status))
2334
2335 try:
2336 db_path = "configurationStatus.{}.".format(vca_index)
2337 db_dict = other_update or {}
2338 if status:
2339 db_dict[db_path + "status"] = status
2340 if element_under_configuration:
2341 db_dict[
2342 db_path + "elementUnderConfiguration"
2343 ] = element_under_configuration
2344 if element_type:
2345 db_dict[db_path + "elementType"] = element_type
2346 self.update_db_2("nsrs", nsr_id, db_dict)
2347 except DbException as e:
2348 self.logger.warn(
2349 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2350 status, nsr_id, vca_index, e
2351 )
2352 )
2353
2354 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2355 """
2356 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2357 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2358 Database is used because the result can be obtained from a different LCM worker in case of HA.
2359 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2360 :param db_nslcmop: database content of nslcmop
2361 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2362 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2363 computed 'vim-account-id'
2364 """
2365 modified = False
2366 nslcmop_id = db_nslcmop["_id"]
2367 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2368 if placement_engine == "PLA":
2369 self.logger.debug(
2370 logging_text + "Invoke and wait for placement optimization"
2371 )
2372 await self.msg.aiowrite(
2373 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2374 )
2375 db_poll_interval = 5
2376 wait = db_poll_interval * 10
2377 pla_result = None
2378 while not pla_result and wait >= 0:
2379 await asyncio.sleep(db_poll_interval)
2380 wait -= db_poll_interval
2381 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2382 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2383
2384 if not pla_result:
2385 raise LcmException(
2386 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2387 )
2388
2389 for pla_vnf in pla_result["vnf"]:
2390 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2391 if not pla_vnf.get("vimAccountId") or not vnfr:
2392 continue
2393 modified = True
2394 self.db.set_one(
2395 "vnfrs",
2396 {"_id": vnfr["_id"]},
2397 {"vim-account-id": pla_vnf["vimAccountId"]},
2398 )
2399 # Modifies db_vnfrs
2400 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2401 return modified
2402
2403 def update_nsrs_with_pla_result(self, params):
2404 try:
2405 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2406 self.update_db_2(
2407 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2408 )
2409 except Exception as e:
2410 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2411
2412 async def instantiate(self, nsr_id, nslcmop_id):
2413 """
2414
2415 :param nsr_id: ns instance to deploy
2416 :param nslcmop_id: operation to run
2417 :return:
2418 """
2419
2420 # Try to lock HA task here
2421 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2422 if not task_is_locked_by_me:
2423 self.logger.debug(
2424 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2425 )
2426 return
2427
2428 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2429 self.logger.debug(logging_text + "Enter")
2430
2431 # get all needed from database
2432
2433 # database nsrs record
2434 db_nsr = None
2435
2436 # database nslcmops record
2437 db_nslcmop = None
2438
2439 # update operation on nsrs
2440 db_nsr_update = {}
2441 # update operation on nslcmops
2442 db_nslcmop_update = {}
2443
2444 nslcmop_operation_state = None
2445 db_vnfrs = {} # vnf's info indexed by member-index
2446 # n2vc_info = {}
2447 tasks_dict_info = {} # from task to info text
2448 exc = None
2449 error_list = []
2450 stage = [
2451 "Stage 1/5: preparation of the environment.",
2452 "Waiting for previous operations to terminate.",
2453 "",
2454 ]
2455 # ^ stage, step, VIM progress
2456 try:
2457 # wait for any previous tasks in process
2458 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2459
2460 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2461 stage[1] = "Reading from database."
2462 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2463 db_nsr_update["detailed-status"] = "creating"
2464 db_nsr_update["operational-status"] = "init"
2465 self._write_ns_status(
2466 nsr_id=nsr_id,
2467 ns_state="BUILDING",
2468 current_operation="INSTANTIATING",
2469 current_operation_id=nslcmop_id,
2470 other_update=db_nsr_update,
2471 )
2472 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2473
2474 # read from db: operation
2475 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2476 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2477 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2478 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2479 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2480 )
2481 ns_params = db_nslcmop.get("operationParams")
2482 if ns_params and ns_params.get("timeout_ns_deploy"):
2483 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2484 else:
2485 timeout_ns_deploy = self.timeout.get(
2486 "ns_deploy", self.timeout_ns_deploy
2487 )
2488
2489 # read from db: ns
2490 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2491 self.logger.debug(logging_text + stage[1])
2492 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2493 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2494 self.logger.debug(logging_text + stage[1])
2495 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2496 self.fs.sync(db_nsr["nsd-id"])
2497 db_nsr["nsd"] = nsd
2498 # nsr_name = db_nsr["name"] # TODO short-name??
2499
2500 # read from db: vnf's of this ns
2501 stage[1] = "Getting vnfrs from db."
2502 self.logger.debug(logging_text + stage[1])
2503 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2504
2505 # read from db: vnfd's for every vnf
2506 db_vnfds = [] # every vnfd data
2507
2508 # for each vnf in ns, read vnfd
2509 for vnfr in db_vnfrs_list:
2510 if vnfr.get("kdur"):
2511 kdur_list = []
2512 for kdur in vnfr["kdur"]:
2513 if kdur.get("additionalParams"):
2514 kdur["additionalParams"] = json.loads(
2515 kdur["additionalParams"]
2516 )
2517 kdur_list.append(kdur)
2518 vnfr["kdur"] = kdur_list
2519
2520 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2521 vnfd_id = vnfr["vnfd-id"]
2522 vnfd_ref = vnfr["vnfd-ref"]
2523 self.fs.sync(vnfd_id)
2524
2525 # if we haven't this vnfd, read it from db
2526 if vnfd_id not in db_vnfds:
2527 # read from db
2528 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2529 vnfd_id, vnfd_ref
2530 )
2531 self.logger.debug(logging_text + stage[1])
2532 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2533
2534 # store vnfd
2535 db_vnfds.append(vnfd)
2536
2537 # Get or generates the _admin.deployed.VCA list
2538 vca_deployed_list = None
2539 if db_nsr["_admin"].get("deployed"):
2540 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2541 if vca_deployed_list is None:
2542 vca_deployed_list = []
2543 configuration_status_list = []
2544 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2545 db_nsr_update["configurationStatus"] = configuration_status_list
2546 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2547 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2548 elif isinstance(vca_deployed_list, dict):
2549 # maintain backward compatibility. Change a dict to list at database
2550 vca_deployed_list = list(vca_deployed_list.values())
2551 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2552 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2553
2554 if not isinstance(
2555 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2556 ):
2557 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2558 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2559
2560 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2561 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2562 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2563 self.db.set_list(
2564 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2565 )
2566
2567 # n2vc_redesign STEP 2 Deploy Network Scenario
2568 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2569 self._write_op_status(op_id=nslcmop_id, stage=stage)
2570
2571 stage[1] = "Deploying KDUs."
2572 # self.logger.debug(logging_text + "Before deploy_kdus")
2573 # Call to deploy_kdus in case exists the "vdu:kdu" param
2574 await self.deploy_kdus(
2575 logging_text=logging_text,
2576 nsr_id=nsr_id,
2577 nslcmop_id=nslcmop_id,
2578 db_vnfrs=db_vnfrs,
2579 db_vnfds=db_vnfds,
2580 task_instantiation_info=tasks_dict_info,
2581 )
2582
2583 stage[1] = "Getting VCA public key."
2584 # n2vc_redesign STEP 1 Get VCA public ssh-key
2585 # feature 1429. Add n2vc public key to needed VMs
2586 n2vc_key = self.n2vc.get_public_key()
2587 n2vc_key_list = [n2vc_key]
2588 if self.vca_config.get("public_key"):
2589 n2vc_key_list.append(self.vca_config["public_key"])
2590
2591 stage[1] = "Deploying NS at VIM."
2592 task_ro = asyncio.ensure_future(
2593 self.instantiate_RO(
2594 logging_text=logging_text,
2595 nsr_id=nsr_id,
2596 nsd=nsd,
2597 db_nsr=db_nsr,
2598 db_nslcmop=db_nslcmop,
2599 db_vnfrs=db_vnfrs,
2600 db_vnfds=db_vnfds,
2601 n2vc_key_list=n2vc_key_list,
2602 stage=stage,
2603 )
2604 )
2605 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2606 tasks_dict_info[task_ro] = "Deploying at VIM"
2607
2608 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2609 stage[1] = "Deploying Execution Environments."
2610 self.logger.debug(logging_text + stage[1])
2611
2612 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2613 for vnf_profile in get_vnf_profiles(nsd):
2614 vnfd_id = vnf_profile["vnfd-id"]
2615 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2616 member_vnf_index = str(vnf_profile["id"])
2617 db_vnfr = db_vnfrs[member_vnf_index]
2618 base_folder = vnfd["_admin"]["storage"]
2619 vdu_id = None
2620 vdu_index = 0
2621 vdu_name = None
2622 kdu_name = None
2623
2624 # Get additional parameters
2625 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2626 if db_vnfr.get("additionalParamsForVnf"):
2627 deploy_params.update(
2628 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2629 )
2630
2631 descriptor_config = get_configuration(vnfd, vnfd["id"])
2632 if descriptor_config:
2633 self._deploy_n2vc(
2634 logging_text=logging_text
2635 + "member_vnf_index={} ".format(member_vnf_index),
2636 db_nsr=db_nsr,
2637 db_vnfr=db_vnfr,
2638 nslcmop_id=nslcmop_id,
2639 nsr_id=nsr_id,
2640 nsi_id=nsi_id,
2641 vnfd_id=vnfd_id,
2642 vdu_id=vdu_id,
2643 kdu_name=kdu_name,
2644 member_vnf_index=member_vnf_index,
2645 vdu_index=vdu_index,
2646 vdu_name=vdu_name,
2647 deploy_params=deploy_params,
2648 descriptor_config=descriptor_config,
2649 base_folder=base_folder,
2650 task_instantiation_info=tasks_dict_info,
2651 stage=stage,
2652 )
2653
2654 # Deploy charms for each VDU that supports one.
2655 for vdud in get_vdu_list(vnfd):
2656 vdu_id = vdud["id"]
2657 descriptor_config = get_configuration(vnfd, vdu_id)
2658 vdur = find_in_list(
2659 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2660 )
2661
2662 if vdur.get("additionalParams"):
2663 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2664 else:
2665 deploy_params_vdu = deploy_params
2666 deploy_params_vdu["OSM"] = get_osm_params(
2667 db_vnfr, vdu_id, vdu_count_index=0
2668 )
2669 vdud_count = get_number_of_instances(vnfd, vdu_id)
2670
2671 self.logger.debug("VDUD > {}".format(vdud))
2672 self.logger.debug(
2673 "Descriptor config > {}".format(descriptor_config)
2674 )
2675 if descriptor_config:
2676 vdu_name = None
2677 kdu_name = None
2678 for vdu_index in range(vdud_count):
2679 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2680 self._deploy_n2vc(
2681 logging_text=logging_text
2682 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2683 member_vnf_index, vdu_id, vdu_index
2684 ),
2685 db_nsr=db_nsr,
2686 db_vnfr=db_vnfr,
2687 nslcmop_id=nslcmop_id,
2688 nsr_id=nsr_id,
2689 nsi_id=nsi_id,
2690 vnfd_id=vnfd_id,
2691 vdu_id=vdu_id,
2692 kdu_name=kdu_name,
2693 member_vnf_index=member_vnf_index,
2694 vdu_index=vdu_index,
2695 vdu_name=vdu_name,
2696 deploy_params=deploy_params_vdu,
2697 descriptor_config=descriptor_config,
2698 base_folder=base_folder,
2699 task_instantiation_info=tasks_dict_info,
2700 stage=stage,
2701 )
2702 for kdud in get_kdu_list(vnfd):
2703 kdu_name = kdud["name"]
2704 descriptor_config = get_configuration(vnfd, kdu_name)
2705 if descriptor_config:
2706 vdu_id = None
2707 vdu_index = 0
2708 vdu_name = None
2709 kdur = next(
2710 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2711 )
2712 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2713 if kdur.get("additionalParams"):
2714 deploy_params_kdu.update(
2715 parse_yaml_strings(kdur["additionalParams"].copy())
2716 )
2717
2718 self._deploy_n2vc(
2719 logging_text=logging_text,
2720 db_nsr=db_nsr,
2721 db_vnfr=db_vnfr,
2722 nslcmop_id=nslcmop_id,
2723 nsr_id=nsr_id,
2724 nsi_id=nsi_id,
2725 vnfd_id=vnfd_id,
2726 vdu_id=vdu_id,
2727 kdu_name=kdu_name,
2728 member_vnf_index=member_vnf_index,
2729 vdu_index=vdu_index,
2730 vdu_name=vdu_name,
2731 deploy_params=deploy_params_kdu,
2732 descriptor_config=descriptor_config,
2733 base_folder=base_folder,
2734 task_instantiation_info=tasks_dict_info,
2735 stage=stage,
2736 )
2737
2738 # Check if this NS has a charm configuration
2739 descriptor_config = nsd.get("ns-configuration")
2740 if descriptor_config and descriptor_config.get("juju"):
2741 vnfd_id = None
2742 db_vnfr = None
2743 member_vnf_index = None
2744 vdu_id = None
2745 kdu_name = None
2746 vdu_index = 0
2747 vdu_name = None
2748
2749 # Get additional parameters
2750 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2751 if db_nsr.get("additionalParamsForNs"):
2752 deploy_params.update(
2753 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2754 )
2755 base_folder = nsd["_admin"]["storage"]
2756 self._deploy_n2vc(
2757 logging_text=logging_text,
2758 db_nsr=db_nsr,
2759 db_vnfr=db_vnfr,
2760 nslcmop_id=nslcmop_id,
2761 nsr_id=nsr_id,
2762 nsi_id=nsi_id,
2763 vnfd_id=vnfd_id,
2764 vdu_id=vdu_id,
2765 kdu_name=kdu_name,
2766 member_vnf_index=member_vnf_index,
2767 vdu_index=vdu_index,
2768 vdu_name=vdu_name,
2769 deploy_params=deploy_params,
2770 descriptor_config=descriptor_config,
2771 base_folder=base_folder,
2772 task_instantiation_info=tasks_dict_info,
2773 stage=stage,
2774 )
2775
2776 # rest of staff will be done at finally
2777
2778 except (
2779 ROclient.ROClientException,
2780 DbException,
2781 LcmException,
2782 N2VCException,
2783 ) as e:
2784 self.logger.error(
2785 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2786 )
2787 exc = e
2788 except asyncio.CancelledError:
2789 self.logger.error(
2790 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2791 )
2792 exc = "Operation was cancelled"
2793 except Exception as e:
2794 exc = traceback.format_exc()
2795 self.logger.critical(
2796 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2797 exc_info=True,
2798 )
2799 finally:
2800 if exc:
2801 error_list.append(str(exc))
2802 try:
2803 # wait for pending tasks
2804 if tasks_dict_info:
2805 stage[1] = "Waiting for instantiate pending tasks."
2806 self.logger.debug(logging_text + stage[1])
2807 error_list += await self._wait_for_tasks(
2808 logging_text,
2809 tasks_dict_info,
2810 timeout_ns_deploy,
2811 stage,
2812 nslcmop_id,
2813 nsr_id=nsr_id,
2814 )
2815 stage[1] = stage[2] = ""
2816 except asyncio.CancelledError:
2817 error_list.append("Cancelled")
2818 # TODO cancel all tasks
2819 except Exception as exc:
2820 error_list.append(str(exc))
2821
2822 # update operation-status
2823 db_nsr_update["operational-status"] = "running"
2824 # let's begin with VCA 'configured' status (later we can change it)
2825 db_nsr_update["config-status"] = "configured"
2826 for task, task_name in tasks_dict_info.items():
2827 if not task.done() or task.cancelled() or task.exception():
2828 if task_name.startswith(self.task_name_deploy_vca):
2829 # A N2VC task is pending
2830 db_nsr_update["config-status"] = "failed"
2831 else:
2832 # RO or KDU task is pending
2833 db_nsr_update["operational-status"] = "failed"
2834
2835 # update status at database
2836 if error_list:
2837 error_detail = ". ".join(error_list)
2838 self.logger.error(logging_text + error_detail)
2839 error_description_nslcmop = "{} Detail: {}".format(
2840 stage[0], error_detail
2841 )
2842 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2843 nslcmop_id, stage[0]
2844 )
2845
2846 db_nsr_update["detailed-status"] = (
2847 error_description_nsr + " Detail: " + error_detail
2848 )
2849 db_nslcmop_update["detailed-status"] = error_detail
2850 nslcmop_operation_state = "FAILED"
2851 ns_state = "BROKEN"
2852 else:
2853 error_detail = None
2854 error_description_nsr = error_description_nslcmop = None
2855 ns_state = "READY"
2856 db_nsr_update["detailed-status"] = "Done"
2857 db_nslcmop_update["detailed-status"] = "Done"
2858 nslcmop_operation_state = "COMPLETED"
2859
2860 if db_nsr:
2861 self._write_ns_status(
2862 nsr_id=nsr_id,
2863 ns_state=ns_state,
2864 current_operation="IDLE",
2865 current_operation_id=None,
2866 error_description=error_description_nsr,
2867 error_detail=error_detail,
2868 other_update=db_nsr_update,
2869 )
2870 self._write_op_status(
2871 op_id=nslcmop_id,
2872 stage="",
2873 error_message=error_description_nslcmop,
2874 operation_state=nslcmop_operation_state,
2875 other_update=db_nslcmop_update,
2876 )
2877
2878 if nslcmop_operation_state:
2879 try:
2880 await self.msg.aiowrite(
2881 "ns",
2882 "instantiated",
2883 {
2884 "nsr_id": nsr_id,
2885 "nslcmop_id": nslcmop_id,
2886 "operationState": nslcmop_operation_state,
2887 },
2888 loop=self.loop,
2889 )
2890 except Exception as e:
2891 self.logger.error(
2892 logging_text + "kafka_write notification Exception {}".format(e)
2893 )
2894
2895 self.logger.debug(logging_text + "Exit")
2896 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2897
2898 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2899 if vnfd_id not in cached_vnfds:
2900 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2901 return cached_vnfds[vnfd_id]
2902
2903 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2904 if vnf_profile_id not in cached_vnfrs:
2905 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2906 "vnfrs",
2907 {
2908 "member-vnf-index-ref": vnf_profile_id,
2909 "nsr-id-ref": nsr_id,
2910 },
2911 )
2912 return cached_vnfrs[vnf_profile_id]
2913
2914 def _is_deployed_vca_in_relation(
2915 self, vca: DeployedVCA, relation: Relation
2916 ) -> bool:
2917 found = False
2918 for endpoint in (relation.provider, relation.requirer):
2919 if endpoint["kdu-resource-profile-id"]:
2920 continue
2921 found = (
2922 vca.vnf_profile_id == endpoint.vnf_profile_id
2923 and vca.vdu_profile_id == endpoint.vdu_profile_id
2924 and vca.execution_environment_ref == endpoint.execution_environment_ref
2925 )
2926 if found:
2927 break
2928 return found
2929
2930 def _update_ee_relation_data_with_implicit_data(
2931 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2932 ):
2933 ee_relation_data = safe_get_ee_relation(
2934 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2935 )
2936 ee_relation_level = EELevel.get_level(ee_relation_data)
2937 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2938 "execution-environment-ref"
2939 ]:
2940 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2941 vnfd_id = vnf_profile["vnfd-id"]
2942 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2943 entity_id = (
2944 vnfd_id
2945 if ee_relation_level == EELevel.VNF
2946 else ee_relation_data["vdu-profile-id"]
2947 )
2948 ee = get_juju_ee_ref(db_vnfd, entity_id)
2949 if not ee:
2950 raise Exception(
2951 f"not execution environments found for ee_relation {ee_relation_data}"
2952 )
2953 ee_relation_data["execution-environment-ref"] = ee["id"]
2954 return ee_relation_data
2955
2956 def _get_ns_relations(
2957 self,
2958 nsr_id: str,
2959 nsd: Dict[str, Any],
2960 vca: DeployedVCA,
2961 cached_vnfds: Dict[str, Any],
2962 ) -> List[Relation]:
2963 relations = []
2964 db_ns_relations = get_ns_configuration_relation_list(nsd)
2965 for r in db_ns_relations:
2966 provider_dict = None
2967 requirer_dict = None
2968 if all(key in r for key in ("provider", "requirer")):
2969 provider_dict = r["provider"]
2970 requirer_dict = r["requirer"]
2971 elif "entities" in r:
2972 provider_id = r["entities"][0]["id"]
2973 provider_dict = {
2974 "nsr-id": nsr_id,
2975 "endpoint": r["entities"][0]["endpoint"],
2976 }
2977 if provider_id != nsd["id"]:
2978 provider_dict["vnf-profile-id"] = provider_id
2979 requirer_id = r["entities"][1]["id"]
2980 requirer_dict = {
2981 "nsr-id": nsr_id,
2982 "endpoint": r["entities"][1]["endpoint"],
2983 }
2984 if requirer_id != nsd["id"]:
2985 requirer_dict["vnf-profile-id"] = requirer_id
2986 else:
2987 raise Exception(
2988 "provider/requirer or entities must be included in the relation."
2989 )
2990 relation_provider = self._update_ee_relation_data_with_implicit_data(
2991 nsr_id, nsd, provider_dict, cached_vnfds
2992 )
2993 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2994 nsr_id, nsd, requirer_dict, cached_vnfds
2995 )
2996 provider = EERelation(relation_provider)
2997 requirer = EERelation(relation_requirer)
2998 relation = Relation(r["name"], provider, requirer)
2999 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3000 if vca_in_relation:
3001 relations.append(relation)
3002 return relations
3003
3004 def _get_vnf_relations(
3005 self,
3006 nsr_id: str,
3007 nsd: Dict[str, Any],
3008 vca: DeployedVCA,
3009 cached_vnfds: Dict[str, Any],
3010 ) -> List[Relation]:
3011 relations = []
3012 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3013 vnf_profile_id = vnf_profile["id"]
3014 vnfd_id = vnf_profile["vnfd-id"]
3015 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3016 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3017 for r in db_vnf_relations:
3018 provider_dict = None
3019 requirer_dict = None
3020 if all(key in r for key in ("provider", "requirer")):
3021 provider_dict = r["provider"]
3022 requirer_dict = r["requirer"]
3023 elif "entities" in r:
3024 provider_id = r["entities"][0]["id"]
3025 provider_dict = {
3026 "nsr-id": nsr_id,
3027 "vnf-profile-id": vnf_profile_id,
3028 "endpoint": r["entities"][0]["endpoint"],
3029 }
3030 if provider_id != vnfd_id:
3031 provider_dict["vdu-profile-id"] = provider_id
3032 requirer_id = r["entities"][1]["id"]
3033 requirer_dict = {
3034 "nsr-id": nsr_id,
3035 "vnf-profile-id": vnf_profile_id,
3036 "endpoint": r["entities"][1]["endpoint"],
3037 }
3038 if requirer_id != vnfd_id:
3039 requirer_dict["vdu-profile-id"] = requirer_id
3040 else:
3041 raise Exception(
3042 "provider/requirer or entities must be included in the relation."
3043 )
3044 relation_provider = self._update_ee_relation_data_with_implicit_data(
3045 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3046 )
3047 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3048 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3049 )
3050 provider = EERelation(relation_provider)
3051 requirer = EERelation(relation_requirer)
3052 relation = Relation(r["name"], provider, requirer)
3053 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3054 if vca_in_relation:
3055 relations.append(relation)
3056 return relations
3057
3058 def _get_kdu_resource_data(
3059 self,
3060 ee_relation: EERelation,
3061 db_nsr: Dict[str, Any],
3062 cached_vnfds: Dict[str, Any],
3063 ) -> DeployedK8sResource:
3064 nsd = get_nsd(db_nsr)
3065 vnf_profiles = get_vnf_profiles(nsd)
3066 vnfd_id = find_in_list(
3067 vnf_profiles,
3068 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3069 )["vnfd-id"]
3070 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3071 kdu_resource_profile = get_kdu_resource_profile(
3072 db_vnfd, ee_relation.kdu_resource_profile_id
3073 )
3074 kdu_name = kdu_resource_profile["kdu-name"]
3075 deployed_kdu, _ = get_deployed_kdu(
3076 db_nsr.get("_admin", ()).get("deployed", ()),
3077 kdu_name,
3078 ee_relation.vnf_profile_id,
3079 )
3080 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3081 return deployed_kdu
3082
3083 def _get_deployed_component(
3084 self,
3085 ee_relation: EERelation,
3086 db_nsr: Dict[str, Any],
3087 cached_vnfds: Dict[str, Any],
3088 ) -> DeployedComponent:
3089 nsr_id = db_nsr["_id"]
3090 deployed_component = None
3091 ee_level = EELevel.get_level(ee_relation)
3092 if ee_level == EELevel.NS:
3093 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3094 if vca:
3095 deployed_component = DeployedVCA(nsr_id, vca)
3096 elif ee_level == EELevel.VNF:
3097 vca = get_deployed_vca(
3098 db_nsr,
3099 {
3100 "vdu_id": None,
3101 "member-vnf-index": ee_relation.vnf_profile_id,
3102 "ee_descriptor_id": ee_relation.execution_environment_ref,
3103 },
3104 )
3105 if vca:
3106 deployed_component = DeployedVCA(nsr_id, vca)
3107 elif ee_level == EELevel.VDU:
3108 vca = get_deployed_vca(
3109 db_nsr,
3110 {
3111 "vdu_id": ee_relation.vdu_profile_id,
3112 "member-vnf-index": ee_relation.vnf_profile_id,
3113 "ee_descriptor_id": ee_relation.execution_environment_ref,
3114 },
3115 )
3116 if vca:
3117 deployed_component = DeployedVCA(nsr_id, vca)
3118 elif ee_level == EELevel.KDU:
3119 kdu_resource_data = self._get_kdu_resource_data(
3120 ee_relation, db_nsr, cached_vnfds
3121 )
3122 if kdu_resource_data:
3123 deployed_component = DeployedK8sResource(kdu_resource_data)
3124 return deployed_component
3125
3126 async def _add_relation(
3127 self,
3128 relation: Relation,
3129 vca_type: str,
3130 db_nsr: Dict[str, Any],
3131 cached_vnfds: Dict[str, Any],
3132 cached_vnfrs: Dict[str, Any],
3133 ) -> bool:
3134 deployed_provider = self._get_deployed_component(
3135 relation.provider, db_nsr, cached_vnfds
3136 )
3137 deployed_requirer = self._get_deployed_component(
3138 relation.requirer, db_nsr, cached_vnfds
3139 )
3140 if (
3141 deployed_provider
3142 and deployed_requirer
3143 and deployed_provider.config_sw_installed
3144 and deployed_requirer.config_sw_installed
3145 ):
3146 provider_db_vnfr = (
3147 self._get_vnfr(
3148 relation.provider.nsr_id,
3149 relation.provider.vnf_profile_id,
3150 cached_vnfrs,
3151 )
3152 if relation.provider.vnf_profile_id
3153 else None
3154 )
3155 requirer_db_vnfr = (
3156 self._get_vnfr(
3157 relation.requirer.nsr_id,
3158 relation.requirer.vnf_profile_id,
3159 cached_vnfrs,
3160 )
3161 if relation.requirer.vnf_profile_id
3162 else None
3163 )
3164 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3165 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3166 provider_relation_endpoint = RelationEndpoint(
3167 deployed_provider.ee_id,
3168 provider_vca_id,
3169 relation.provider.endpoint,
3170 )
3171 requirer_relation_endpoint = RelationEndpoint(
3172 deployed_requirer.ee_id,
3173 requirer_vca_id,
3174 relation.requirer.endpoint,
3175 )
3176 await self.vca_map[vca_type].add_relation(
3177 provider=provider_relation_endpoint,
3178 requirer=requirer_relation_endpoint,
3179 )
3180 # remove entry from relations list
3181 return True
3182 return False
3183
3184 async def _add_vca_relations(
3185 self,
3186 logging_text,
3187 nsr_id,
3188 vca_type: str,
3189 vca_index: int,
3190 timeout: int = 3600,
3191 ) -> bool:
3192
3193 # steps:
3194 # 1. find all relations for this VCA
3195 # 2. wait for other peers related
3196 # 3. add relations
3197
3198 try:
3199 # STEP 1: find all relations for this VCA
3200
3201 # read nsr record
3202 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3203 nsd = get_nsd(db_nsr)
3204
3205 # this VCA data
3206 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3207 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3208
3209 cached_vnfds = {}
3210 cached_vnfrs = {}
3211 relations = []
3212 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3213 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3214
3215 # if no relations, terminate
3216 if not relations:
3217 self.logger.debug(logging_text + " No relations")
3218 return True
3219
3220 self.logger.debug(logging_text + " adding relations {}".format(relations))
3221
3222 # add all relations
3223 start = time()
3224 while True:
3225 # check timeout
3226 now = time()
3227 if now - start >= timeout:
3228 self.logger.error(logging_text + " : timeout adding relations")
3229 return False
3230
3231 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3232 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3233
3234 # for each relation, find the VCA's related
3235 for relation in relations.copy():
3236 added = await self._add_relation(
3237 relation,
3238 vca_type,
3239 db_nsr,
3240 cached_vnfds,
3241 cached_vnfrs,
3242 )
3243 if added:
3244 relations.remove(relation)
3245
3246 if not relations:
3247 self.logger.debug("Relations added")
3248 break
3249 await asyncio.sleep(5.0)
3250
3251 return True
3252
3253 except Exception as e:
3254 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3255 return False
3256
3257 async def _install_kdu(
3258 self,
3259 nsr_id: str,
3260 nsr_db_path: str,
3261 vnfr_data: dict,
3262 kdu_index: int,
3263 kdud: dict,
3264 vnfd: dict,
3265 k8s_instance_info: dict,
3266 k8params: dict = None,
3267 timeout: int = 600,
3268 vca_id: str = None,
3269 ):
3270
3271 try:
3272 k8sclustertype = k8s_instance_info["k8scluster-type"]
3273 # Instantiate kdu
3274 db_dict_install = {
3275 "collection": "nsrs",
3276 "filter": {"_id": nsr_id},
3277 "path": nsr_db_path,
3278 }
3279
3280 if k8s_instance_info.get("kdu-deployment-name"):
3281 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3282 else:
3283 kdu_instance = self.k8scluster_map[
3284 k8sclustertype
3285 ].generate_kdu_instance_name(
3286 db_dict=db_dict_install,
3287 kdu_model=k8s_instance_info["kdu-model"],
3288 kdu_name=k8s_instance_info["kdu-name"],
3289 )
3290
3291 # Update the nsrs table with the kdu-instance value
3292 self.update_db_2(
3293 item="nsrs",
3294 _id=nsr_id,
3295 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3296 )
3297
3298 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3299 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3300 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3301 # namespace, this first verification could be removed, and the next step would be done for any kind
3302 # of KNF.
3303 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3304 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3305 if k8sclustertype in ("juju", "juju-bundle"):
3306 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3307 # that the user passed a namespace which he wants its KDU to be deployed in)
3308 if (
3309 self.db.count(
3310 table="nsrs",
3311 q_filter={
3312 "_id": nsr_id,
3313 "_admin.projects_write": k8s_instance_info["namespace"],
3314 "_admin.projects_read": k8s_instance_info["namespace"],
3315 },
3316 )
3317 > 0
3318 ):
3319 self.logger.debug(
3320 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3321 )
3322 self.update_db_2(
3323 item="nsrs",
3324 _id=nsr_id,
3325 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3326 )
3327 k8s_instance_info["namespace"] = kdu_instance
3328
3329 await self.k8scluster_map[k8sclustertype].install(
3330 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3331 kdu_model=k8s_instance_info["kdu-model"],
3332 atomic=True,
3333 params=k8params,
3334 db_dict=db_dict_install,
3335 timeout=timeout,
3336 kdu_name=k8s_instance_info["kdu-name"],
3337 namespace=k8s_instance_info["namespace"],
3338 kdu_instance=kdu_instance,
3339 vca_id=vca_id,
3340 )
3341
3342 # Obtain services to obtain management service ip
3343 services = await self.k8scluster_map[k8sclustertype].get_services(
3344 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3345 kdu_instance=kdu_instance,
3346 namespace=k8s_instance_info["namespace"],
3347 )
3348
3349 # Obtain management service info (if exists)
3350 vnfr_update_dict = {}
3351 kdu_config = get_configuration(vnfd, kdud["name"])
3352 if kdu_config:
3353 target_ee_list = kdu_config.get("execution-environment-list", [])
3354 else:
3355 target_ee_list = []
3356
3357 if services:
3358 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3359 mgmt_services = [
3360 service
3361 for service in kdud.get("service", [])
3362 if service.get("mgmt-service")
3363 ]
3364 for mgmt_service in mgmt_services:
3365 for service in services:
3366 if service["name"].startswith(mgmt_service["name"]):
3367 # Mgmt service found, Obtain service ip
3368 ip = service.get("external_ip", service.get("cluster_ip"))
3369 if isinstance(ip, list) and len(ip) == 1:
3370 ip = ip[0]
3371
3372 vnfr_update_dict[
3373 "kdur.{}.ip-address".format(kdu_index)
3374 ] = ip
3375
3376 # Check if must update also mgmt ip at the vnf
3377 service_external_cp = mgmt_service.get(
3378 "external-connection-point-ref"
3379 )
3380 if service_external_cp:
3381 if (
3382 deep_get(vnfd, ("mgmt-interface", "cp"))
3383 == service_external_cp
3384 ):
3385 vnfr_update_dict["ip-address"] = ip
3386
3387 if find_in_list(
3388 target_ee_list,
3389 lambda ee: ee.get(
3390 "external-connection-point-ref", ""
3391 )
3392 == service_external_cp,
3393 ):
3394 vnfr_update_dict[
3395 "kdur.{}.ip-address".format(kdu_index)
3396 ] = ip
3397 break
3398 else:
3399 self.logger.warn(
3400 "Mgmt service name: {} not found".format(
3401 mgmt_service["name"]
3402 )
3403 )
3404
3405 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3406 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3407
3408 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3409 if (
3410 kdu_config
3411 and kdu_config.get("initial-config-primitive")
3412 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3413 ):
3414 initial_config_primitive_list = kdu_config.get(
3415 "initial-config-primitive"
3416 )
3417 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3418
3419 for initial_config_primitive in initial_config_primitive_list:
3420 primitive_params_ = self._map_primitive_params(
3421 initial_config_primitive, {}, {}
3422 )
3423
3424 await asyncio.wait_for(
3425 self.k8scluster_map[k8sclustertype].exec_primitive(
3426 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3427 kdu_instance=kdu_instance,
3428 primitive_name=initial_config_primitive["name"],
3429 params=primitive_params_,
3430 db_dict=db_dict_install,
3431 vca_id=vca_id,
3432 ),
3433 timeout=timeout,
3434 )
3435
3436 except Exception as e:
3437 # Prepare update db with error and raise exception
3438 try:
3439 self.update_db_2(
3440 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3441 )
3442 self.update_db_2(
3443 "vnfrs",
3444 vnfr_data.get("_id"),
3445 {"kdur.{}.status".format(kdu_index): "ERROR"},
3446 )
3447 except Exception:
3448 # ignore to keep original exception
3449 pass
3450 # reraise original error
3451 raise
3452
3453 return kdu_instance
3454
3455 async def deploy_kdus(
3456 self,
3457 logging_text,
3458 nsr_id,
3459 nslcmop_id,
3460 db_vnfrs,
3461 db_vnfds,
3462 task_instantiation_info,
3463 ):
3464 # Launch kdus if present in the descriptor
3465
3466 k8scluster_id_2_uuic = {
3467 "helm-chart-v3": {},
3468 "helm-chart": {},
3469 "juju-bundle": {},
3470 }
3471
3472 async def _get_cluster_id(cluster_id, cluster_type):
3473 nonlocal k8scluster_id_2_uuic
3474 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3475 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3476
3477 # check if K8scluster is creating and wait look if previous tasks in process
3478 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3479 "k8scluster", cluster_id
3480 )
3481 if task_dependency:
3482 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3483 task_name, cluster_id
3484 )
3485 self.logger.debug(logging_text + text)
3486 await asyncio.wait(task_dependency, timeout=3600)
3487
3488 db_k8scluster = self.db.get_one(
3489 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3490 )
3491 if not db_k8scluster:
3492 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3493
3494 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3495 if not k8s_id:
3496 if cluster_type == "helm-chart-v3":
3497 try:
3498 # backward compatibility for existing clusters that have not been initialized for helm v3
3499 k8s_credentials = yaml.safe_dump(
3500 db_k8scluster.get("credentials")
3501 )
3502 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3503 k8s_credentials, reuse_cluster_uuid=cluster_id
3504 )
3505 db_k8scluster_update = {}
3506 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3507 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3508 db_k8scluster_update[
3509 "_admin.helm-chart-v3.created"
3510 ] = uninstall_sw
3511 db_k8scluster_update[
3512 "_admin.helm-chart-v3.operationalState"
3513 ] = "ENABLED"
3514 self.update_db_2(
3515 "k8sclusters", cluster_id, db_k8scluster_update
3516 )
3517 except Exception as e:
3518 self.logger.error(
3519 logging_text
3520 + "error initializing helm-v3 cluster: {}".format(str(e))
3521 )
3522 raise LcmException(
3523 "K8s cluster '{}' has not been initialized for '{}'".format(
3524 cluster_id, cluster_type
3525 )
3526 )
3527 else:
3528 raise LcmException(
3529 "K8s cluster '{}' has not been initialized for '{}'".format(
3530 cluster_id, cluster_type
3531 )
3532 )
3533 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3534 return k8s_id
3535
3536 logging_text += "Deploy kdus: "
3537 step = ""
3538 try:
3539 db_nsr_update = {"_admin.deployed.K8s": []}
3540 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3541
3542 index = 0
3543 updated_cluster_list = []
3544 updated_v3_cluster_list = []
3545
3546 for vnfr_data in db_vnfrs.values():
3547 vca_id = self.get_vca_id(vnfr_data, {})
3548 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3549 # Step 0: Prepare and set parameters
3550 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3551 vnfd_id = vnfr_data.get("vnfd-id")
3552 vnfd_with_id = find_in_list(
3553 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3554 )
3555 kdud = next(
3556 kdud
3557 for kdud in vnfd_with_id["kdu"]
3558 if kdud["name"] == kdur["kdu-name"]
3559 )
3560 namespace = kdur.get("k8s-namespace")
3561 kdu_deployment_name = kdur.get("kdu-deployment-name")
3562 if kdur.get("helm-chart"):
3563 kdumodel = kdur["helm-chart"]
3564 # Default version: helm3, if helm-version is v2 assign v2
3565 k8sclustertype = "helm-chart-v3"
3566 self.logger.debug("kdur: {}".format(kdur))
3567 if (
3568 kdur.get("helm-version")
3569 and kdur.get("helm-version") == "v2"
3570 ):
3571 k8sclustertype = "helm-chart"
3572 elif kdur.get("juju-bundle"):
3573 kdumodel = kdur["juju-bundle"]
3574 k8sclustertype = "juju-bundle"
3575 else:
3576 raise LcmException(
3577 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3578 "juju-bundle. Maybe an old NBI version is running".format(
3579 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3580 )
3581 )
3582 # check if kdumodel is a file and exists
3583 try:
3584 vnfd_with_id = find_in_list(
3585 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3586 )
3587 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3588 if storage: # may be not present if vnfd has not artifacts
3589 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3590 if storage["pkg-dir"]:
3591 filename = "{}/{}/{}s/{}".format(
3592 storage["folder"],
3593 storage["pkg-dir"],
3594 k8sclustertype,
3595 kdumodel,
3596 )
3597 else:
3598 filename = "{}/Scripts/{}s/{}".format(
3599 storage["folder"],
3600 k8sclustertype,
3601 kdumodel,
3602 )
3603 if self.fs.file_exists(
3604 filename, mode="file"
3605 ) or self.fs.file_exists(filename, mode="dir"):
3606 kdumodel = self.fs.path + filename
3607 except (asyncio.TimeoutError, asyncio.CancelledError):
3608 raise
3609 except Exception: # it is not a file
3610 pass
3611
3612 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3613 step = "Synchronize repos for k8s cluster '{}'".format(
3614 k8s_cluster_id
3615 )
3616 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3617
3618 # Synchronize repos
3619 if (
3620 k8sclustertype == "helm-chart"
3621 and cluster_uuid not in updated_cluster_list
3622 ) or (
3623 k8sclustertype == "helm-chart-v3"
3624 and cluster_uuid not in updated_v3_cluster_list
3625 ):
3626 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3627 self.k8scluster_map[k8sclustertype].synchronize_repos(
3628 cluster_uuid=cluster_uuid
3629 )
3630 )
3631 if del_repo_list or added_repo_dict:
3632 if k8sclustertype == "helm-chart":
3633 unset = {
3634 "_admin.helm_charts_added." + item: None
3635 for item in del_repo_list
3636 }
3637 updated = {
3638 "_admin.helm_charts_added." + item: name
3639 for item, name in added_repo_dict.items()
3640 }
3641 updated_cluster_list.append(cluster_uuid)
3642 elif k8sclustertype == "helm-chart-v3":
3643 unset = {
3644 "_admin.helm_charts_v3_added." + item: None
3645 for item in del_repo_list
3646 }
3647 updated = {
3648 "_admin.helm_charts_v3_added." + item: name
3649 for item, name in added_repo_dict.items()
3650 }
3651 updated_v3_cluster_list.append(cluster_uuid)
3652 self.logger.debug(
3653 logging_text + "repos synchronized on k8s cluster "
3654 "'{}' to_delete: {}, to_add: {}".format(
3655 k8s_cluster_id, del_repo_list, added_repo_dict
3656 )
3657 )
3658 self.db.set_one(
3659 "k8sclusters",
3660 {"_id": k8s_cluster_id},
3661 updated,
3662 unset=unset,
3663 )
3664
3665 # Instantiate kdu
3666 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3667 vnfr_data["member-vnf-index-ref"],
3668 kdur["kdu-name"],
3669 k8s_cluster_id,
3670 )
3671 k8s_instance_info = {
3672 "kdu-instance": None,
3673 "k8scluster-uuid": cluster_uuid,
3674 "k8scluster-type": k8sclustertype,
3675 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3676 "kdu-name": kdur["kdu-name"],
3677 "kdu-model": kdumodel,
3678 "namespace": namespace,
3679 "kdu-deployment-name": kdu_deployment_name,
3680 }
3681 db_path = "_admin.deployed.K8s.{}".format(index)
3682 db_nsr_update[db_path] = k8s_instance_info
3683 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3684 vnfd_with_id = find_in_list(
3685 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3686 )
3687 task = asyncio.ensure_future(
3688 self._install_kdu(
3689 nsr_id,
3690 db_path,
3691 vnfr_data,
3692 kdu_index,
3693 kdud,
3694 vnfd_with_id,
3695 k8s_instance_info,
3696 k8params=desc_params,
3697 timeout=1800,
3698 vca_id=vca_id,
3699 )
3700 )
3701 self.lcm_tasks.register(
3702 "ns",
3703 nsr_id,
3704 nslcmop_id,
3705 "instantiate_KDU-{}".format(index),
3706 task,
3707 )
3708 task_instantiation_info[task] = "Deploying KDU {}".format(
3709 kdur["kdu-name"]
3710 )
3711
3712 index += 1
3713
3714 except (LcmException, asyncio.CancelledError):
3715 raise
3716 except Exception as e:
3717 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3718 if isinstance(e, (N2VCException, DbException)):
3719 self.logger.error(logging_text + msg)
3720 else:
3721 self.logger.critical(logging_text + msg, exc_info=True)
3722 raise LcmException(msg)
3723 finally:
3724 if db_nsr_update:
3725 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3726
3727 def _deploy_n2vc(
3728 self,
3729 logging_text,
3730 db_nsr,
3731 db_vnfr,
3732 nslcmop_id,
3733 nsr_id,
3734 nsi_id,
3735 vnfd_id,
3736 vdu_id,
3737 kdu_name,
3738 member_vnf_index,
3739 vdu_index,
3740 vdu_name,
3741 deploy_params,
3742 descriptor_config,
3743 base_folder,
3744 task_instantiation_info,
3745 stage,
3746 ):
3747 # launch instantiate_N2VC in a asyncio task and register task object
3748 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3749 # if not found, create one entry and update database
3750 # fill db_nsr._admin.deployed.VCA.<index>
3751
3752 self.logger.debug(
3753 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3754 )
3755
3756 charm_name = ""
3757 get_charm_name = False
3758 if "execution-environment-list" in descriptor_config:
3759 ee_list = descriptor_config.get("execution-environment-list", [])
3760 elif "juju" in descriptor_config:
3761 ee_list = [descriptor_config] # ns charms
3762 if "execution-environment-list" not in descriptor_config:
3763 # charm name is only required for ns charms
3764 get_charm_name = True
3765 else: # other types as script are not supported
3766 ee_list = []
3767
3768 for ee_item in ee_list:
3769 self.logger.debug(
3770 logging_text
3771 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3772 ee_item.get("juju"), ee_item.get("helm-chart")
3773 )
3774 )
3775 ee_descriptor_id = ee_item.get("id")
3776 if ee_item.get("juju"):
3777 vca_name = ee_item["juju"].get("charm")
3778 if get_charm_name:
3779 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3780 vca_type = (
3781 "lxc_proxy_charm"
3782 if ee_item["juju"].get("charm") is not None
3783 else "native_charm"
3784 )
3785 if ee_item["juju"].get("cloud") == "k8s":
3786 vca_type = "k8s_proxy_charm"
3787 elif ee_item["juju"].get("proxy") is False:
3788 vca_type = "native_charm"
3789 elif ee_item.get("helm-chart"):
3790 vca_name = ee_item["helm-chart"]
3791 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3792 vca_type = "helm"
3793 else:
3794 vca_type = "helm-v3"
3795 else:
3796 self.logger.debug(
3797 logging_text + "skipping non juju neither charm configuration"
3798 )
3799 continue
3800
3801 vca_index = -1
3802 for vca_index, vca_deployed in enumerate(
3803 db_nsr["_admin"]["deployed"]["VCA"]
3804 ):
3805 if not vca_deployed:
3806 continue
3807 if (
3808 vca_deployed.get("member-vnf-index") == member_vnf_index
3809 and vca_deployed.get("vdu_id") == vdu_id
3810 and vca_deployed.get("kdu_name") == kdu_name
3811 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3812 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3813 ):
3814 break
3815 else:
3816 # not found, create one.
3817 target = (
3818 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3819 )
3820 if vdu_id:
3821 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3822 elif kdu_name:
3823 target += "/kdu/{}".format(kdu_name)
3824 vca_deployed = {
3825 "target_element": target,
3826 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3827 "member-vnf-index": member_vnf_index,
3828 "vdu_id": vdu_id,
3829 "kdu_name": kdu_name,
3830 "vdu_count_index": vdu_index,
3831 "operational-status": "init", # TODO revise
3832 "detailed-status": "", # TODO revise
3833 "step": "initial-deploy", # TODO revise
3834 "vnfd_id": vnfd_id,
3835 "vdu_name": vdu_name,
3836 "type": vca_type,
3837 "ee_descriptor_id": ee_descriptor_id,
3838 "charm_name": charm_name,
3839 }
3840 vca_index += 1
3841
3842 # create VCA and configurationStatus in db
3843 db_dict = {
3844 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3845 "configurationStatus.{}".format(vca_index): dict(),
3846 }
3847 self.update_db_2("nsrs", nsr_id, db_dict)
3848
3849 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3850
3851 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3852 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3853 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3854
3855 # Launch task
3856 task_n2vc = asyncio.ensure_future(
3857 self.instantiate_N2VC(
3858 logging_text=logging_text,
3859 vca_index=vca_index,
3860 nsi_id=nsi_id,
3861 db_nsr=db_nsr,
3862 db_vnfr=db_vnfr,
3863 vdu_id=vdu_id,
3864 kdu_name=kdu_name,
3865 vdu_index=vdu_index,
3866 deploy_params=deploy_params,
3867 config_descriptor=descriptor_config,
3868 base_folder=base_folder,
3869 nslcmop_id=nslcmop_id,
3870 stage=stage,
3871 vca_type=vca_type,
3872 vca_name=vca_name,
3873 ee_config_descriptor=ee_item,
3874 )
3875 )
3876 self.lcm_tasks.register(
3877 "ns",
3878 nsr_id,
3879 nslcmop_id,
3880 "instantiate_N2VC-{}".format(vca_index),
3881 task_n2vc,
3882 )
3883 task_instantiation_info[
3884 task_n2vc
3885 ] = self.task_name_deploy_vca + " {}.{}".format(
3886 member_vnf_index or "", vdu_id or ""
3887 )
3888
3889 @staticmethod
3890 def _create_nslcmop(nsr_id, operation, params):
3891 """
3892 Creates a ns-lcm-opp content to be stored at database.
3893 :param nsr_id: internal id of the instance
3894 :param operation: instantiate, terminate, scale, action, ...
3895 :param params: user parameters for the operation
3896 :return: dictionary following SOL005 format
3897 """
3898 # Raise exception if invalid arguments
3899 if not (nsr_id and operation and params):
3900 raise LcmException(
3901 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3902 )
3903 now = time()
3904 _id = str(uuid4())
3905 nslcmop = {
3906 "id": _id,
3907 "_id": _id,
3908 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3909 "operationState": "PROCESSING",
3910 "statusEnteredTime": now,
3911 "nsInstanceId": nsr_id,
3912 "lcmOperationType": operation,
3913 "startTime": now,
3914 "isAutomaticInvocation": False,
3915 "operationParams": params,
3916 "isCancelPending": False,
3917 "links": {
3918 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3919 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3920 },
3921 }
3922 return nslcmop
3923
3924 def _format_additional_params(self, params):
3925 params = params or {}
3926 for key, value in params.items():
3927 if str(value).startswith("!!yaml "):
3928 params[key] = yaml.safe_load(value[7:])
3929 return params
3930
3931 def _get_terminate_primitive_params(self, seq, vnf_index):
3932 primitive = seq.get("name")
3933 primitive_params = {}
3934 params = {
3935 "member_vnf_index": vnf_index,
3936 "primitive": primitive,
3937 "primitive_params": primitive_params,
3938 }
3939 desc_params = {}
3940 return self._map_primitive_params(seq, params, desc_params)
3941
3942 # sub-operations
3943
3944 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3945 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3946 if op.get("operationState") == "COMPLETED":
3947 # b. Skip sub-operation
3948 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3949 return self.SUBOPERATION_STATUS_SKIP
3950 else:
3951 # c. retry executing sub-operation
3952 # The sub-operation exists, and operationState != 'COMPLETED'
3953 # Update operationState = 'PROCESSING' to indicate a retry.
3954 operationState = "PROCESSING"
3955 detailed_status = "In progress"
3956 self._update_suboperation_status(
3957 db_nslcmop, op_index, operationState, detailed_status
3958 )
3959 # Return the sub-operation index
3960 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3961 # with arguments extracted from the sub-operation
3962 return op_index
3963
3964 # Find a sub-operation where all keys in a matching dictionary must match
3965 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3966 def _find_suboperation(self, db_nslcmop, match):
3967 if db_nslcmop and match:
3968 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3969 for i, op in enumerate(op_list):
3970 if all(op.get(k) == match[k] for k in match):
3971 return i
3972 return self.SUBOPERATION_STATUS_NOT_FOUND
3973
3974 # Update status for a sub-operation given its index
3975 def _update_suboperation_status(
3976 self, db_nslcmop, op_index, operationState, detailed_status
3977 ):
3978 # Update DB for HA tasks
3979 q_filter = {"_id": db_nslcmop["_id"]}
3980 update_dict = {
3981 "_admin.operations.{}.operationState".format(op_index): operationState,
3982 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3983 }
3984 self.db.set_one(
3985 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3986 )
3987
3988 # Add sub-operation, return the index of the added sub-operation
3989 # Optionally, set operationState, detailed-status, and operationType
3990 # Status and type are currently set for 'scale' sub-operations:
3991 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3992 # 'detailed-status' : status message
3993 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3994 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3995 def _add_suboperation(
3996 self,
3997 db_nslcmop,
3998 vnf_index,
3999 vdu_id,
4000 vdu_count_index,
4001 vdu_name,
4002 primitive,
4003 mapped_primitive_params,
4004 operationState=None,
4005 detailed_status=None,
4006 operationType=None,
4007 RO_nsr_id=None,
4008 RO_scaling_info=None,
4009 ):
4010 if not db_nslcmop:
4011 return self.SUBOPERATION_STATUS_NOT_FOUND
4012 # Get the "_admin.operations" list, if it exists
4013 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4014 op_list = db_nslcmop_admin.get("operations")
4015 # Create or append to the "_admin.operations" list
4016 new_op = {
4017 "member_vnf_index": vnf_index,
4018 "vdu_id": vdu_id,
4019 "vdu_count_index": vdu_count_index,
4020 "primitive": primitive,
4021 "primitive_params": mapped_primitive_params,
4022 }
4023 if operationState:
4024 new_op["operationState"] = operationState
4025 if detailed_status:
4026 new_op["detailed-status"] = detailed_status
4027 if operationType:
4028 new_op["lcmOperationType"] = operationType
4029 if RO_nsr_id:
4030 new_op["RO_nsr_id"] = RO_nsr_id
4031 if RO_scaling_info:
4032 new_op["RO_scaling_info"] = RO_scaling_info
4033 if not op_list:
4034 # No existing operations, create key 'operations' with current operation as first list element
4035 db_nslcmop_admin.update({"operations": [new_op]})
4036 op_list = db_nslcmop_admin.get("operations")
4037 else:
4038 # Existing operations, append operation to list
4039 op_list.append(new_op)
4040
4041 db_nslcmop_update = {"_admin.operations": op_list}
4042 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4043 op_index = len(op_list) - 1
4044 return op_index
4045
4046 # Helper methods for scale() sub-operations
4047
4048 # pre-scale/post-scale:
4049 # Check for 3 different cases:
4050 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4051 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4052 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4053 def _check_or_add_scale_suboperation(
4054 self,
4055 db_nslcmop,
4056 vnf_index,
4057 vnf_config_primitive,
4058 primitive_params,
4059 operationType,
4060 RO_nsr_id=None,
4061 RO_scaling_info=None,
4062 ):
4063 # Find this sub-operation
4064 if RO_nsr_id and RO_scaling_info:
4065 operationType = "SCALE-RO"
4066 match = {
4067 "member_vnf_index": vnf_index,
4068 "RO_nsr_id": RO_nsr_id,
4069 "RO_scaling_info": RO_scaling_info,
4070 }
4071 else:
4072 match = {
4073 "member_vnf_index": vnf_index,
4074 "primitive": vnf_config_primitive,
4075 "primitive_params": primitive_params,
4076 "lcmOperationType": operationType,
4077 }
4078 op_index = self._find_suboperation(db_nslcmop, match)
4079 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4080 # a. New sub-operation
4081 # The sub-operation does not exist, add it.
4082 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4083 # The following parameters are set to None for all kind of scaling:
4084 vdu_id = None
4085 vdu_count_index = None
4086 vdu_name = None
4087 if RO_nsr_id and RO_scaling_info:
4088 vnf_config_primitive = None
4089 primitive_params = None
4090 else:
4091 RO_nsr_id = None
4092 RO_scaling_info = None
4093 # Initial status for sub-operation
4094 operationState = "PROCESSING"
4095 detailed_status = "In progress"
4096 # Add sub-operation for pre/post-scaling (zero or more operations)
4097 self._add_suboperation(
4098 db_nslcmop,
4099 vnf_index,
4100 vdu_id,
4101 vdu_count_index,
4102 vdu_name,
4103 vnf_config_primitive,
4104 primitive_params,
4105 operationState,
4106 detailed_status,
4107 operationType,
4108 RO_nsr_id,
4109 RO_scaling_info,
4110 )
4111 return self.SUBOPERATION_STATUS_NEW
4112 else:
4113 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4114 # or op_index (operationState != 'COMPLETED')
4115 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4116
4117 # Function to return execution_environment id
4118
4119 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4120 # TODO vdu_index_count
4121 for vca in vca_deployed_list:
4122 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4123 return vca["ee_id"]
4124
4125 async def destroy_N2VC(
4126 self,
4127 logging_text,
4128 db_nslcmop,
4129 vca_deployed,
4130 config_descriptor,
4131 vca_index,
4132 destroy_ee=True,
4133 exec_primitives=True,
4134 scaling_in=False,
4135 vca_id: str = None,
4136 ):
4137 """
4138 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4139 :param logging_text:
4140 :param db_nslcmop:
4141 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4142 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4143 :param vca_index: index in the database _admin.deployed.VCA
4144 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4145 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4146 not executed properly
4147 :param scaling_in: True destroys the application, False destroys the model
4148 :return: None or exception
4149 """
4150
4151 self.logger.debug(
4152 logging_text
4153 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4154 vca_index, vca_deployed, config_descriptor, destroy_ee
4155 )
4156 )
4157
4158 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4159
4160 # execute terminate_primitives
4161 if exec_primitives:
4162 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4163 config_descriptor.get("terminate-config-primitive"),
4164 vca_deployed.get("ee_descriptor_id"),
4165 )
4166 vdu_id = vca_deployed.get("vdu_id")
4167 vdu_count_index = vca_deployed.get("vdu_count_index")
4168 vdu_name = vca_deployed.get("vdu_name")
4169 vnf_index = vca_deployed.get("member-vnf-index")
4170 if terminate_primitives and vca_deployed.get("needed_terminate"):
4171 for seq in terminate_primitives:
4172 # For each sequence in list, get primitive and call _ns_execute_primitive()
4173 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4174 vnf_index, seq.get("name")
4175 )
4176 self.logger.debug(logging_text + step)
4177 # Create the primitive for each sequence, i.e. "primitive": "touch"
4178 primitive = seq.get("name")
4179 mapped_primitive_params = self._get_terminate_primitive_params(
4180 seq, vnf_index
4181 )
4182
4183 # Add sub-operation
4184 self._add_suboperation(
4185 db_nslcmop,
4186 vnf_index,
4187 vdu_id,
4188 vdu_count_index,
4189 vdu_name,
4190 primitive,
4191 mapped_primitive_params,
4192 )
4193 # Sub-operations: Call _ns_execute_primitive() instead of action()
4194 try:
4195 result, result_detail = await self._ns_execute_primitive(
4196 vca_deployed["ee_id"],
4197 primitive,
4198 mapped_primitive_params,
4199 vca_type=vca_type,
4200 vca_id=vca_id,
4201 )
4202 except LcmException:
4203 # this happens when VCA is not deployed. In this case it is not needed to terminate
4204 continue
4205 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4206 if result not in result_ok:
4207 raise LcmException(
4208 "terminate_primitive {} for vnf_member_index={} fails with "
4209 "error {}".format(seq.get("name"), vnf_index, result_detail)
4210 )
4211 # set that this VCA do not need terminated
4212 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4213 vca_index
4214 )
4215 self.update_db_2(
4216 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4217 )
4218
4219 # Delete Prometheus Jobs if any
4220 # This uses NSR_ID, so it will destroy any jobs under this index
4221 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4222
4223 if destroy_ee:
4224 await self.vca_map[vca_type].delete_execution_environment(
4225 vca_deployed["ee_id"],
4226 scaling_in=scaling_in,
4227 vca_type=vca_type,
4228 vca_id=vca_id,
4229 )
4230
4231 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4232 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4233 namespace = "." + db_nsr["_id"]
4234 try:
4235 await self.n2vc.delete_namespace(
4236 namespace=namespace,
4237 total_timeout=self.timeout_charm_delete,
4238 vca_id=vca_id,
4239 )
4240 except N2VCNotFound: # already deleted. Skip
4241 pass
4242 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4243
4244 async def _terminate_RO(
4245 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4246 ):
4247 """
4248 Terminates a deployment from RO
4249 :param logging_text:
4250 :param nsr_deployed: db_nsr._admin.deployed
4251 :param nsr_id:
4252 :param nslcmop_id:
4253 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4254 this method will update only the index 2, but it will write on database the concatenated content of the list
4255 :return:
4256 """
4257 db_nsr_update = {}
4258 failed_detail = []
4259 ro_nsr_id = ro_delete_action = None
4260 if nsr_deployed and nsr_deployed.get("RO"):
4261 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4262 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4263 try:
4264 if ro_nsr_id:
4265 stage[2] = "Deleting ns from VIM."
4266 db_nsr_update["detailed-status"] = " ".join(stage)
4267 self._write_op_status(nslcmop_id, stage)
4268 self.logger.debug(logging_text + stage[2])
4269 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4270 self._write_op_status(nslcmop_id, stage)
4271 desc = await self.RO.delete("ns", ro_nsr_id)
4272 ro_delete_action = desc["action_id"]
4273 db_nsr_update[
4274 "_admin.deployed.RO.nsr_delete_action_id"
4275 ] = ro_delete_action
4276 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4277 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4278 if ro_delete_action:
4279 # wait until NS is deleted from VIM
4280 stage[2] = "Waiting ns deleted from VIM."
4281 detailed_status_old = None
4282 self.logger.debug(
4283 logging_text
4284 + stage[2]
4285 + " RO_id={} ro_delete_action={}".format(
4286 ro_nsr_id, ro_delete_action
4287 )
4288 )
4289 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4290 self._write_op_status(nslcmop_id, stage)
4291
4292 delete_timeout = 20 * 60 # 20 minutes
4293 while delete_timeout > 0:
4294 desc = await self.RO.show(
4295 "ns",
4296 item_id_name=ro_nsr_id,
4297 extra_item="action",
4298 extra_item_id=ro_delete_action,
4299 )
4300
4301 # deploymentStatus
4302 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4303
4304 ns_status, ns_status_info = self.RO.check_action_status(desc)
4305 if ns_status == "ERROR":
4306 raise ROclient.ROClientException(ns_status_info)
4307 elif ns_status == "BUILD":
4308 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4309 elif ns_status == "ACTIVE":
4310 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4311 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4312 break
4313 else:
4314 assert (
4315 False
4316 ), "ROclient.check_action_status returns unknown {}".format(
4317 ns_status
4318 )
4319 if stage[2] != detailed_status_old:
4320 detailed_status_old = stage[2]
4321 db_nsr_update["detailed-status"] = " ".join(stage)
4322 self._write_op_status(nslcmop_id, stage)
4323 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4324 await asyncio.sleep(5, loop=self.loop)
4325 delete_timeout -= 5
4326 else: # delete_timeout <= 0:
4327 raise ROclient.ROClientException(
4328 "Timeout waiting ns deleted from VIM"
4329 )
4330
4331 except Exception as e:
4332 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4333 if (
4334 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4335 ): # not found
4336 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4337 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4338 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4339 self.logger.debug(
4340 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4341 )
4342 elif (
4343 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4344 ): # conflict
4345 failed_detail.append("delete conflict: {}".format(e))
4346 self.logger.debug(
4347 logging_text
4348 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4349 )
4350 else:
4351 failed_detail.append("delete error: {}".format(e))
4352 self.logger.error(
4353 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4354 )
4355
4356 # Delete nsd
4357 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4358 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4359 try:
4360 stage[2] = "Deleting nsd from RO."
4361 db_nsr_update["detailed-status"] = " ".join(stage)
4362 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4363 self._write_op_status(nslcmop_id, stage)
4364 await self.RO.delete("nsd", ro_nsd_id)
4365 self.logger.debug(
4366 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4367 )
4368 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4369 except Exception as e:
4370 if (
4371 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4372 ): # not found
4373 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4374 self.logger.debug(
4375 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4376 )
4377 elif (
4378 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4379 ): # conflict
4380 failed_detail.append(
4381 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4382 )
4383 self.logger.debug(logging_text + failed_detail[-1])
4384 else:
4385 failed_detail.append(
4386 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4387 )
4388 self.logger.error(logging_text + failed_detail[-1])
4389
4390 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4391 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4392 if not vnf_deployed or not vnf_deployed["id"]:
4393 continue
4394 try:
4395 ro_vnfd_id = vnf_deployed["id"]
4396 stage[
4397 2
4398 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4399 vnf_deployed["member-vnf-index"], ro_vnfd_id
4400 )
4401 db_nsr_update["detailed-status"] = " ".join(stage)
4402 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4403 self._write_op_status(nslcmop_id, stage)
4404 await self.RO.delete("vnfd", ro_vnfd_id)
4405 self.logger.debug(
4406 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4407 )
4408 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4409 except Exception as e:
4410 if (
4411 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4412 ): # not found
4413 db_nsr_update[
4414 "_admin.deployed.RO.vnfd.{}.id".format(index)
4415 ] = None
4416 self.logger.debug(
4417 logging_text
4418 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4419 )
4420 elif (
4421 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4422 ): # conflict
4423 failed_detail.append(
4424 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4425 )
4426 self.logger.debug(logging_text + failed_detail[-1])
4427 else:
4428 failed_detail.append(
4429 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4430 )
4431 self.logger.error(logging_text + failed_detail[-1])
4432
4433 if failed_detail:
4434 stage[2] = "Error deleting from VIM"
4435 else:
4436 stage[2] = "Deleted from VIM"
4437 db_nsr_update["detailed-status"] = " ".join(stage)
4438 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4439 self._write_op_status(nslcmop_id, stage)
4440
4441 if failed_detail:
4442 raise LcmException("; ".join(failed_detail))
4443
4444 async def terminate(self, nsr_id, nslcmop_id):
4445 # Try to lock HA task here
4446 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4447 if not task_is_locked_by_me:
4448 return
4449
4450 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4451 self.logger.debug(logging_text + "Enter")
4452 timeout_ns_terminate = self.timeout_ns_terminate
4453 db_nsr = None
4454 db_nslcmop = None
4455 operation_params = None
4456 exc = None
4457 error_list = [] # annotates all failed error messages
4458 db_nslcmop_update = {}
4459 autoremove = False # autoremove after terminated
4460 tasks_dict_info = {}
4461 db_nsr_update = {}
4462 stage = [
4463 "Stage 1/3: Preparing task.",
4464 "Waiting for previous operations to terminate.",
4465 "",
4466 ]
4467 # ^ contains [stage, step, VIM-status]
4468 try:
4469 # wait for any previous tasks in process
4470 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4471
4472 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4473 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4474 operation_params = db_nslcmop.get("operationParams") or {}
4475 if operation_params.get("timeout_ns_terminate"):
4476 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4477 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4478 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4479
4480 db_nsr_update["operational-status"] = "terminating"
4481 db_nsr_update["config-status"] = "terminating"
4482 self._write_ns_status(
4483 nsr_id=nsr_id,
4484 ns_state="TERMINATING",
4485 current_operation="TERMINATING",
4486 current_operation_id=nslcmop_id,
4487 other_update=db_nsr_update,
4488 )
4489 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4490 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4491 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4492 return
4493
4494 stage[1] = "Getting vnf descriptors from db."
4495 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4496 db_vnfrs_dict = {
4497 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4498 }
4499 db_vnfds_from_id = {}
4500 db_vnfds_from_member_index = {}
4501 # Loop over VNFRs
4502 for vnfr in db_vnfrs_list:
4503 vnfd_id = vnfr["vnfd-id"]
4504 if vnfd_id not in db_vnfds_from_id:
4505 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4506 db_vnfds_from_id[vnfd_id] = vnfd
4507 db_vnfds_from_member_index[
4508 vnfr["member-vnf-index-ref"]
4509 ] = db_vnfds_from_id[vnfd_id]
4510
4511 # Destroy individual execution environments when there are terminating primitives.
4512 # Rest of EE will be deleted at once
4513 # TODO - check before calling _destroy_N2VC
4514 # if not operation_params.get("skip_terminate_primitives"):#
4515 # or not vca.get("needed_terminate"):
4516 stage[0] = "Stage 2/3 execute terminating primitives."
4517 self.logger.debug(logging_text + stage[0])
4518 stage[1] = "Looking execution environment that needs terminate."
4519 self.logger.debug(logging_text + stage[1])
4520
4521 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4522 config_descriptor = None
4523 vca_member_vnf_index = vca.get("member-vnf-index")
4524 vca_id = self.get_vca_id(
4525 db_vnfrs_dict.get(vca_member_vnf_index)
4526 if vca_member_vnf_index
4527 else None,
4528 db_nsr,
4529 )
4530 if not vca or not vca.get("ee_id"):
4531 continue
4532 if not vca.get("member-vnf-index"):
4533 # ns
4534 config_descriptor = db_nsr.get("ns-configuration")
4535 elif vca.get("vdu_id"):
4536 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4537 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4538 elif vca.get("kdu_name"):
4539 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4540 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4541 else:
4542 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4543 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4544 vca_type = vca.get("type")
4545 exec_terminate_primitives = not operation_params.get(
4546 "skip_terminate_primitives"
4547 ) and vca.get("needed_terminate")
4548 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4549 # pending native charms
4550 destroy_ee = (
4551 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4552 )
4553 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4554 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4555 task = asyncio.ensure_future(
4556 self.destroy_N2VC(
4557 logging_text,
4558 db_nslcmop,
4559 vca,
4560 config_descriptor,
4561 vca_index,
4562 destroy_ee,
4563 exec_terminate_primitives,
4564 vca_id=vca_id,
4565 )
4566 )
4567 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4568
4569 # wait for pending tasks of terminate primitives
4570 if tasks_dict_info:
4571 self.logger.debug(
4572 logging_text
4573 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4574 )
4575 error_list = await self._wait_for_tasks(
4576 logging_text,
4577 tasks_dict_info,
4578 min(self.timeout_charm_delete, timeout_ns_terminate),
4579 stage,
4580 nslcmop_id,
4581 )
4582 tasks_dict_info.clear()
4583 if error_list:
4584 return # raise LcmException("; ".join(error_list))
4585
4586 # remove All execution environments at once
4587 stage[0] = "Stage 3/3 delete all."
4588
4589 if nsr_deployed.get("VCA"):
4590 stage[1] = "Deleting all execution environments."
4591 self.logger.debug(logging_text + stage[1])
4592 vca_id = self.get_vca_id({}, db_nsr)
4593 task_delete_ee = asyncio.ensure_future(
4594 asyncio.wait_for(
4595 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4596 timeout=self.timeout_charm_delete,
4597 )
4598 )
4599 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4600 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4601
4602 # Delete from k8scluster
4603 stage[1] = "Deleting KDUs."
4604 self.logger.debug(logging_text + stage[1])
4605 # print(nsr_deployed)
4606 for kdu in get_iterable(nsr_deployed, "K8s"):
4607 if not kdu or not kdu.get("kdu-instance"):
4608 continue
4609 kdu_instance = kdu.get("kdu-instance")
4610 if kdu.get("k8scluster-type") in self.k8scluster_map:
4611 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4612 vca_id = self.get_vca_id({}, db_nsr)
4613 task_delete_kdu_instance = asyncio.ensure_future(
4614 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4615 cluster_uuid=kdu.get("k8scluster-uuid"),
4616 kdu_instance=kdu_instance,
4617 vca_id=vca_id,
4618 namespace=kdu.get("namespace"),
4619 )
4620 )
4621 else:
4622 self.logger.error(
4623 logging_text
4624 + "Unknown k8s deployment type {}".format(
4625 kdu.get("k8scluster-type")
4626 )
4627 )
4628 continue
4629 tasks_dict_info[
4630 task_delete_kdu_instance
4631 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4632
4633 # remove from RO
4634 stage[1] = "Deleting ns from VIM."
4635 if self.ng_ro:
4636 task_delete_ro = asyncio.ensure_future(
4637 self._terminate_ng_ro(
4638 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4639 )
4640 )
4641 else:
4642 task_delete_ro = asyncio.ensure_future(
4643 self._terminate_RO(
4644 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4645 )
4646 )
4647 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4648
4649 # rest of staff will be done at finally
4650
4651 except (
4652 ROclient.ROClientException,
4653 DbException,
4654 LcmException,
4655 N2VCException,
4656 ) as e:
4657 self.logger.error(logging_text + "Exit Exception {}".format(e))
4658 exc = e
4659 except asyncio.CancelledError:
4660 self.logger.error(
4661 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4662 )
4663 exc = "Operation was cancelled"
4664 except Exception as e:
4665 exc = traceback.format_exc()
4666 self.logger.critical(
4667 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4668 exc_info=True,
4669 )
4670 finally:
4671 if exc:
4672 error_list.append(str(exc))
4673 try:
4674 # wait for pending tasks
4675 if tasks_dict_info:
4676 stage[1] = "Waiting for terminate pending tasks."
4677 self.logger.debug(logging_text + stage[1])
4678 error_list += await self._wait_for_tasks(
4679 logging_text,
4680 tasks_dict_info,
4681 timeout_ns_terminate,
4682 stage,
4683 nslcmop_id,
4684 )
4685 stage[1] = stage[2] = ""
4686 except asyncio.CancelledError:
4687 error_list.append("Cancelled")
4688 # TODO cancell all tasks
4689 except Exception as exc:
4690 error_list.append(str(exc))
4691 # update status at database
4692 if error_list:
4693 error_detail = "; ".join(error_list)
4694 # self.logger.error(logging_text + error_detail)
4695 error_description_nslcmop = "{} Detail: {}".format(
4696 stage[0], error_detail
4697 )
4698 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4699 nslcmop_id, stage[0]
4700 )
4701
4702 db_nsr_update["operational-status"] = "failed"
4703 db_nsr_update["detailed-status"] = (
4704 error_description_nsr + " Detail: " + error_detail
4705 )
4706 db_nslcmop_update["detailed-status"] = error_detail
4707 nslcmop_operation_state = "FAILED"
4708 ns_state = "BROKEN"
4709 else:
4710 error_detail = None
4711 error_description_nsr = error_description_nslcmop = None
4712 ns_state = "NOT_INSTANTIATED"
4713 db_nsr_update["operational-status"] = "terminated"
4714 db_nsr_update["detailed-status"] = "Done"
4715 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4716 db_nslcmop_update["detailed-status"] = "Done"
4717 nslcmop_operation_state = "COMPLETED"
4718
4719 if db_nsr:
4720 self._write_ns_status(
4721 nsr_id=nsr_id,
4722 ns_state=ns_state,
4723 current_operation="IDLE",
4724 current_operation_id=None,
4725 error_description=error_description_nsr,
4726 error_detail=error_detail,
4727 other_update=db_nsr_update,
4728 )
4729 self._write_op_status(
4730 op_id=nslcmop_id,
4731 stage="",
4732 error_message=error_description_nslcmop,
4733 operation_state=nslcmop_operation_state,
4734 other_update=db_nslcmop_update,
4735 )
4736 if ns_state == "NOT_INSTANTIATED":
4737 try:
4738 self.db.set_list(
4739 "vnfrs",
4740 {"nsr-id-ref": nsr_id},
4741 {"_admin.nsState": "NOT_INSTANTIATED"},
4742 )
4743 except DbException as e:
4744 self.logger.warn(
4745 logging_text
4746 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4747 nsr_id, e
4748 )
4749 )
4750 if operation_params:
4751 autoremove = operation_params.get("autoremove", False)
4752 if nslcmop_operation_state:
4753 try:
4754 await self.msg.aiowrite(
4755 "ns",
4756 "terminated",
4757 {
4758 "nsr_id": nsr_id,
4759 "nslcmop_id": nslcmop_id,
4760 "operationState": nslcmop_operation_state,
4761 "autoremove": autoremove,
4762 },
4763 loop=self.loop,
4764 )
4765 except Exception as e:
4766 self.logger.error(
4767 logging_text + "kafka_write notification Exception {}".format(e)
4768 )
4769
4770 self.logger.debug(logging_text + "Exit")
4771 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4772
4773 async def _wait_for_tasks(
4774 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4775 ):
4776 time_start = time()
4777 error_detail_list = []
4778 error_list = []
4779 pending_tasks = list(created_tasks_info.keys())
4780 num_tasks = len(pending_tasks)
4781 num_done = 0
4782 stage[1] = "{}/{}.".format(num_done, num_tasks)
4783 self._write_op_status(nslcmop_id, stage)
4784 while pending_tasks:
4785 new_error = None
4786 _timeout = timeout + time_start - time()
4787 done, pending_tasks = await asyncio.wait(
4788 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4789 )
4790 num_done += len(done)
4791 if not done: # Timeout
4792 for task in pending_tasks:
4793 new_error = created_tasks_info[task] + ": Timeout"
4794 error_detail_list.append(new_error)
4795 error_list.append(new_error)
4796 break
4797 for task in done:
4798 if task.cancelled():
4799 exc = "Cancelled"
4800 else:
4801 exc = task.exception()
4802 if exc:
4803 if isinstance(exc, asyncio.TimeoutError):
4804 exc = "Timeout"
4805 new_error = created_tasks_info[task] + ": {}".format(exc)
4806 error_list.append(created_tasks_info[task])
4807 error_detail_list.append(new_error)
4808 if isinstance(
4809 exc,
4810 (
4811 str,
4812 DbException,
4813 N2VCException,
4814 ROclient.ROClientException,
4815 LcmException,
4816 K8sException,
4817 NgRoException,
4818 ),
4819 ):
4820 self.logger.error(logging_text + new_error)
4821 else:
4822 exc_traceback = "".join(
4823 traceback.format_exception(None, exc, exc.__traceback__)
4824 )
4825 self.logger.error(
4826 logging_text
4827 + created_tasks_info[task]
4828 + " "
4829 + exc_traceback
4830 )
4831 else:
4832 self.logger.debug(
4833 logging_text + created_tasks_info[task] + ": Done"
4834 )
4835 stage[1] = "{}/{}.".format(num_done, num_tasks)
4836 if new_error:
4837 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4838 if nsr_id: # update also nsr
4839 self.update_db_2(
4840 "nsrs",
4841 nsr_id,
4842 {
4843 "errorDescription": "Error at: " + ", ".join(error_list),
4844 "errorDetail": ". ".join(error_detail_list),
4845 },
4846 )
4847 self._write_op_status(nslcmop_id, stage)
4848 return error_detail_list
4849
4850 @staticmethod
4851 def _map_primitive_params(primitive_desc, params, instantiation_params):
4852 """
4853 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4854 The default-value is used. If it is between < > it look for a value at instantiation_params
4855 :param primitive_desc: portion of VNFD/NSD that describes primitive
4856 :param params: Params provided by user
4857 :param instantiation_params: Instantiation params provided by user
4858 :return: a dictionary with the calculated params
4859 """
4860 calculated_params = {}
4861 for parameter in primitive_desc.get("parameter", ()):
4862 param_name = parameter["name"]
4863 if param_name in params:
4864 calculated_params[param_name] = params[param_name]
4865 elif "default-value" in parameter or "value" in parameter:
4866 if "value" in parameter:
4867 calculated_params[param_name] = parameter["value"]
4868 else:
4869 calculated_params[param_name] = parameter["default-value"]
4870 if (
4871 isinstance(calculated_params[param_name], str)
4872 and calculated_params[param_name].startswith("<")
4873 and calculated_params[param_name].endswith(">")
4874 ):
4875 if calculated_params[param_name][1:-1] in instantiation_params:
4876 calculated_params[param_name] = instantiation_params[
4877 calculated_params[param_name][1:-1]
4878 ]
4879 else:
4880 raise LcmException(
4881 "Parameter {} needed to execute primitive {} not provided".format(
4882 calculated_params[param_name], primitive_desc["name"]
4883 )
4884 )
4885 else:
4886 raise LcmException(
4887 "Parameter {} needed to execute primitive {} not provided".format(
4888 param_name, primitive_desc["name"]
4889 )
4890 )
4891
4892 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4893 calculated_params[param_name] = yaml.safe_dump(
4894 calculated_params[param_name], default_flow_style=True, width=256
4895 )
4896 elif isinstance(calculated_params[param_name], str) and calculated_params[
4897 param_name
4898 ].startswith("!!yaml "):
4899 calculated_params[param_name] = calculated_params[param_name][7:]
4900 if parameter.get("data-type") == "INTEGER":
4901 try:
4902 calculated_params[param_name] = int(calculated_params[param_name])
4903 except ValueError: # error converting string to int
4904 raise LcmException(
4905 "Parameter {} of primitive {} must be integer".format(
4906 param_name, primitive_desc["name"]
4907 )
4908 )
4909 elif parameter.get("data-type") == "BOOLEAN":
4910 calculated_params[param_name] = not (
4911 (str(calculated_params[param_name])).lower() == "false"
4912 )
4913
4914 # add always ns_config_info if primitive name is config
4915 if primitive_desc["name"] == "config":
4916 if "ns_config_info" in instantiation_params:
4917 calculated_params["ns_config_info"] = instantiation_params[
4918 "ns_config_info"
4919 ]
4920 return calculated_params
4921
4922 def _look_for_deployed_vca(
4923 self,
4924 deployed_vca,
4925 member_vnf_index,
4926 vdu_id,
4927 vdu_count_index,
4928 kdu_name=None,
4929 ee_descriptor_id=None,
4930 ):
4931 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4932 for vca in deployed_vca:
4933 if not vca:
4934 continue
4935 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4936 continue
4937 if (
4938 vdu_count_index is not None
4939 and vdu_count_index != vca["vdu_count_index"]
4940 ):
4941 continue
4942 if kdu_name and kdu_name != vca["kdu_name"]:
4943 continue
4944 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4945 continue
4946 break
4947 else:
4948 # vca_deployed not found
4949 raise LcmException(
4950 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4951 " is not deployed".format(
4952 member_vnf_index,
4953 vdu_id,
4954 vdu_count_index,
4955 kdu_name,
4956 ee_descriptor_id,
4957 )
4958 )
4959 # get ee_id
4960 ee_id = vca.get("ee_id")
4961 vca_type = vca.get(
4962 "type", "lxc_proxy_charm"
4963 ) # default value for backward compatibility - proxy charm
4964 if not ee_id:
4965 raise LcmException(
4966 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4967 "execution environment".format(
4968 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4969 )
4970 )
4971 return ee_id, vca_type
4972
4973 async def _ns_execute_primitive(
4974 self,
4975 ee_id,
4976 primitive,
4977 primitive_params,
4978 retries=0,
4979 retries_interval=30,
4980 timeout=None,
4981 vca_type=None,
4982 db_dict=None,
4983 vca_id: str = None,
4984 ) -> (str, str):
4985 try:
4986 if primitive == "config":
4987 primitive_params = {"params": primitive_params}
4988
4989 vca_type = vca_type or "lxc_proxy_charm"
4990
4991 while retries >= 0:
4992 try:
4993 output = await asyncio.wait_for(
4994 self.vca_map[vca_type].exec_primitive(
4995 ee_id=ee_id,
4996 primitive_name=primitive,
4997 params_dict=primitive_params,
4998 progress_timeout=self.timeout_progress_primitive,
4999 total_timeout=self.timeout_primitive,
5000 db_dict=db_dict,
5001 vca_id=vca_id,
5002 vca_type=vca_type,
5003 ),
5004 timeout=timeout or self.timeout_primitive,
5005 )
5006 # execution was OK
5007 break
5008 except asyncio.CancelledError:
5009 raise
5010 except Exception as e:
5011 retries -= 1
5012 if retries >= 0:
5013 self.logger.debug(
5014 "Error executing action {} on {} -> {}".format(
5015 primitive, ee_id, e
5016 )
5017 )
5018 # wait and retry
5019 await asyncio.sleep(retries_interval, loop=self.loop)
5020 else:
5021 if isinstance(e, asyncio.TimeoutError):
5022 e = N2VCException(
5023 message="Timed out waiting for action to complete"
5024 )
5025 return "FAILED", getattr(e, "message", repr(e))
5026
5027 return "COMPLETED", output
5028
5029 except (LcmException, asyncio.CancelledError):
5030 raise
5031 except Exception as e:
5032 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5033
5034 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5035 """
5036 Updating the vca_status with latest juju information in nsrs record
5037 :param: nsr_id: Id of the nsr
5038 :param: nslcmop_id: Id of the nslcmop
5039 :return: None
5040 """
5041
5042 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5043 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5044 vca_id = self.get_vca_id({}, db_nsr)
5045 if db_nsr["_admin"]["deployed"]["K8s"]:
5046 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5047 cluster_uuid, kdu_instance, cluster_type = (
5048 k8s["k8scluster-uuid"],
5049 k8s["kdu-instance"],
5050 k8s["k8scluster-type"],
5051 )
5052 await self._on_update_k8s_db(
5053 cluster_uuid=cluster_uuid,
5054 kdu_instance=kdu_instance,
5055 filter={"_id": nsr_id},
5056 vca_id=vca_id,
5057 cluster_type=cluster_type,
5058 )
5059 else:
5060 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5061 table, filter = "nsrs", {"_id": nsr_id}
5062 path = "_admin.deployed.VCA.{}.".format(vca_index)
5063 await self._on_update_n2vc_db(table, filter, path, {})
5064
5065 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5066 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5067
5068 async def action(self, nsr_id, nslcmop_id):
5069 # Try to lock HA task here
5070 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5071 if not task_is_locked_by_me:
5072 return
5073
5074 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5075 self.logger.debug(logging_text + "Enter")
5076 # get all needed from database
5077 db_nsr = None
5078 db_nslcmop = None
5079 db_nsr_update = {}
5080 db_nslcmop_update = {}
5081 nslcmop_operation_state = None
5082 error_description_nslcmop = None
5083 exc = None
5084 try:
5085 # wait for any previous tasks in process
5086 step = "Waiting for previous operations to terminate"
5087 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5088
5089 self._write_ns_status(
5090 nsr_id=nsr_id,
5091 ns_state=None,
5092 current_operation="RUNNING ACTION",
5093 current_operation_id=nslcmop_id,
5094 )
5095
5096 step = "Getting information from database"
5097 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5098 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5099 if db_nslcmop["operationParams"].get("primitive_params"):
5100 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5101 db_nslcmop["operationParams"]["primitive_params"]
5102 )
5103
5104 nsr_deployed = db_nsr["_admin"].get("deployed")
5105 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5106 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5107 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5108 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5109 primitive = db_nslcmop["operationParams"]["primitive"]
5110 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5111 timeout_ns_action = db_nslcmop["operationParams"].get(
5112 "timeout_ns_action", self.timeout_primitive
5113 )
5114
5115 if vnf_index:
5116 step = "Getting vnfr from database"
5117 db_vnfr = self.db.get_one(
5118 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5119 )
5120 if db_vnfr.get("kdur"):
5121 kdur_list = []
5122 for kdur in db_vnfr["kdur"]:
5123 if kdur.get("additionalParams"):
5124 kdur["additionalParams"] = json.loads(
5125 kdur["additionalParams"]
5126 )
5127 kdur_list.append(kdur)
5128 db_vnfr["kdur"] = kdur_list
5129 step = "Getting vnfd from database"
5130 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5131
5132 # Sync filesystem before running a primitive
5133 self.fs.sync(db_vnfr["vnfd-id"])
5134 else:
5135 step = "Getting nsd from database"
5136 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5137
5138 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5139 # for backward compatibility
5140 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5141 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5142 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5143 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5144
5145 # look for primitive
5146 config_primitive_desc = descriptor_configuration = None
5147 if vdu_id:
5148 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5149 elif kdu_name:
5150 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5151 elif vnf_index:
5152 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5153 else:
5154 descriptor_configuration = db_nsd.get("ns-configuration")
5155
5156 if descriptor_configuration and descriptor_configuration.get(
5157 "config-primitive"
5158 ):
5159 for config_primitive in descriptor_configuration["config-primitive"]:
5160 if config_primitive["name"] == primitive:
5161 config_primitive_desc = config_primitive
5162 break
5163
5164 if not config_primitive_desc:
5165 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5166 raise LcmException(
5167 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5168 primitive
5169 )
5170 )
5171 primitive_name = primitive
5172 ee_descriptor_id = None
5173 else:
5174 primitive_name = config_primitive_desc.get(
5175 "execution-environment-primitive", primitive
5176 )
5177 ee_descriptor_id = config_primitive_desc.get(
5178 "execution-environment-ref"
5179 )
5180
5181 if vnf_index:
5182 if vdu_id:
5183 vdur = next(
5184 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5185 )
5186 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5187 elif kdu_name:
5188 kdur = next(
5189 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5190 )
5191 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5192 else:
5193 desc_params = parse_yaml_strings(
5194 db_vnfr.get("additionalParamsForVnf")
5195 )
5196 else:
5197 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5198 if kdu_name and get_configuration(db_vnfd, kdu_name):
5199 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5200 actions = set()
5201 for primitive in kdu_configuration.get("initial-config-primitive", []):
5202 actions.add(primitive["name"])
5203 for primitive in kdu_configuration.get("config-primitive", []):
5204 actions.add(primitive["name"])
5205 kdu = find_in_list(
5206 nsr_deployed["K8s"],
5207 lambda kdu: kdu_name == kdu["kdu-name"]
5208 and kdu["member-vnf-index"] == vnf_index,
5209 )
5210 kdu_action = (
5211 True
5212 if primitive_name in actions
5213 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5214 else False
5215 )
5216
5217 # TODO check if ns is in a proper status
5218 if kdu_name and (
5219 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5220 ):
5221 # kdur and desc_params already set from before
5222 if primitive_params:
5223 desc_params.update(primitive_params)
5224 # TODO Check if we will need something at vnf level
5225 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5226 if (
5227 kdu_name == kdu["kdu-name"]
5228 and kdu["member-vnf-index"] == vnf_index
5229 ):
5230 break
5231 else:
5232 raise LcmException(
5233 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5234 )
5235
5236 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5237 msg = "unknown k8scluster-type '{}'".format(
5238 kdu.get("k8scluster-type")
5239 )
5240 raise LcmException(msg)
5241
5242 db_dict = {
5243 "collection": "nsrs",
5244 "filter": {"_id": nsr_id},
5245 "path": "_admin.deployed.K8s.{}".format(index),
5246 }
5247 self.logger.debug(
5248 logging_text
5249 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5250 )
5251 step = "Executing kdu {}".format(primitive_name)
5252 if primitive_name == "upgrade":
5253 if desc_params.get("kdu_model"):
5254 kdu_model = desc_params.get("kdu_model")
5255 del desc_params["kdu_model"]
5256 else:
5257 kdu_model = kdu.get("kdu-model")
5258 parts = kdu_model.split(sep=":")
5259 if len(parts) == 2:
5260 kdu_model = parts[0]
5261 if desc_params.get("kdu_atomic_upgrade"):
5262 atomic_upgrade = desc_params.get("kdu_atomic_upgrade").lower() in ("yes", "true", "1")
5263 del desc_params["kdu_atomic_upgrade"]
5264 else:
5265 atomic_upgrade = True
5266
5267 detailed_status = await asyncio.wait_for(
5268 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5269 cluster_uuid=kdu.get("k8scluster-uuid"),
5270 kdu_instance=kdu.get("kdu-instance"),
5271 atomic=atomic_upgrade,
5272 kdu_model=kdu_model,
5273 params=desc_params,
5274 db_dict=db_dict,
5275 timeout=timeout_ns_action,
5276 ),
5277 timeout=timeout_ns_action + 10,
5278 )
5279 self.logger.debug(
5280 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5281 )
5282 elif primitive_name == "rollback":
5283 detailed_status = await asyncio.wait_for(
5284 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5285 cluster_uuid=kdu.get("k8scluster-uuid"),
5286 kdu_instance=kdu.get("kdu-instance"),
5287 db_dict=db_dict,
5288 ),
5289 timeout=timeout_ns_action,
5290 )
5291 elif primitive_name == "status":
5292 detailed_status = await asyncio.wait_for(
5293 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5294 cluster_uuid=kdu.get("k8scluster-uuid"),
5295 kdu_instance=kdu.get("kdu-instance"),
5296 vca_id=vca_id,
5297 ),
5298 timeout=timeout_ns_action,
5299 )
5300 else:
5301 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5302 kdu["kdu-name"], nsr_id
5303 )
5304 params = self._map_primitive_params(
5305 config_primitive_desc, primitive_params, desc_params
5306 )
5307
5308 detailed_status = await asyncio.wait_for(
5309 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5310 cluster_uuid=kdu.get("k8scluster-uuid"),
5311 kdu_instance=kdu_instance,
5312 primitive_name=primitive_name,
5313 params=params,
5314 db_dict=db_dict,
5315 timeout=timeout_ns_action,
5316 vca_id=vca_id,
5317 ),
5318 timeout=timeout_ns_action,
5319 )
5320
5321 if detailed_status:
5322 nslcmop_operation_state = "COMPLETED"
5323 else:
5324 detailed_status = ""
5325 nslcmop_operation_state = "FAILED"
5326 else:
5327 ee_id, vca_type = self._look_for_deployed_vca(
5328 nsr_deployed["VCA"],
5329 member_vnf_index=vnf_index,
5330 vdu_id=vdu_id,
5331 vdu_count_index=vdu_count_index,
5332 ee_descriptor_id=ee_descriptor_id,
5333 )
5334 for vca_index, vca_deployed in enumerate(
5335 db_nsr["_admin"]["deployed"]["VCA"]
5336 ):
5337 if vca_deployed.get("member-vnf-index") == vnf_index:
5338 db_dict = {
5339 "collection": "nsrs",
5340 "filter": {"_id": nsr_id},
5341 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5342 }
5343 break
5344 (
5345 nslcmop_operation_state,
5346 detailed_status,
5347 ) = await self._ns_execute_primitive(
5348 ee_id,
5349 primitive=primitive_name,
5350 primitive_params=self._map_primitive_params(
5351 config_primitive_desc, primitive_params, desc_params
5352 ),
5353 timeout=timeout_ns_action,
5354 vca_type=vca_type,
5355 db_dict=db_dict,
5356 vca_id=vca_id,
5357 )
5358
5359 db_nslcmop_update["detailed-status"] = detailed_status
5360 error_description_nslcmop = (
5361 detailed_status if nslcmop_operation_state == "FAILED" else ""
5362 )
5363 self.logger.debug(
5364 logging_text
5365 + "Done with result {} {}".format(
5366 nslcmop_operation_state, detailed_status
5367 )
5368 )
5369 return # database update is called inside finally
5370
5371 except (DbException, LcmException, N2VCException, K8sException) as e:
5372 self.logger.error(logging_text + "Exit Exception {}".format(e))
5373 exc = e
5374 except asyncio.CancelledError:
5375 self.logger.error(
5376 logging_text + "Cancelled Exception while '{}'".format(step)
5377 )
5378 exc = "Operation was cancelled"
5379 except asyncio.TimeoutError:
5380 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5381 exc = "Timeout"
5382 except Exception as e:
5383 exc = traceback.format_exc()
5384 self.logger.critical(
5385 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5386 exc_info=True,
5387 )
5388 finally:
5389 if exc:
5390 db_nslcmop_update[
5391 "detailed-status"
5392 ] = (
5393 detailed_status
5394 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5395 nslcmop_operation_state = "FAILED"
5396 if db_nsr:
5397 self._write_ns_status(
5398 nsr_id=nsr_id,
5399 ns_state=db_nsr[
5400 "nsState"
5401 ], # TODO check if degraded. For the moment use previous status
5402 current_operation="IDLE",
5403 current_operation_id=None,
5404 # error_description=error_description_nsr,
5405 # error_detail=error_detail,
5406 other_update=db_nsr_update,
5407 )
5408
5409 self._write_op_status(
5410 op_id=nslcmop_id,
5411 stage="",
5412 error_message=error_description_nslcmop,
5413 operation_state=nslcmop_operation_state,
5414 other_update=db_nslcmop_update,
5415 )
5416
5417 if nslcmop_operation_state:
5418 try:
5419 await self.msg.aiowrite(
5420 "ns",
5421 "actioned",
5422 {
5423 "nsr_id": nsr_id,
5424 "nslcmop_id": nslcmop_id,
5425 "operationState": nslcmop_operation_state,
5426 },
5427 loop=self.loop,
5428 )
5429 except Exception as e:
5430 self.logger.error(
5431 logging_text + "kafka_write notification Exception {}".format(e)
5432 )
5433 self.logger.debug(logging_text + "Exit")
5434 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5435 return nslcmop_operation_state, detailed_status
5436
5437 async def terminate_vdus(
5438 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5439 ):
5440 """This method terminates VDUs
5441
5442 Args:
5443 db_vnfr: VNF instance record
5444 member_vnf_index: VNF index to identify the VDUs to be removed
5445 db_nsr: NS instance record
5446 update_db_nslcmops: Nslcmop update record
5447 """
5448 vca_scaling_info = []
5449 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5450 scaling_info["scaling_direction"] = "IN"
5451 scaling_info["vdu-delete"] = {}
5452 scaling_info["kdu-delete"] = {}
5453 db_vdur = db_vnfr.get("vdur")
5454 vdur_list = copy(db_vdur)
5455 count_index = 0
5456 for index, vdu in enumerate(vdur_list):
5457 vca_scaling_info.append(
5458 {
5459 "osm_vdu_id": vdu["vdu-id-ref"],
5460 "member-vnf-index": member_vnf_index,
5461 "type": "delete",
5462 "vdu_index": count_index,
5463 }
5464 )
5465 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5466 scaling_info["vdu"].append(
5467 {
5468 "name": vdu.get("name") or vdu.get("vdu-name"),
5469 "vdu_id": vdu["vdu-id-ref"],
5470 "interface": [],
5471 }
5472 )
5473 for interface in vdu["interfaces"]:
5474 scaling_info["vdu"][index]["interface"].append(
5475 {
5476 "name": interface["name"],
5477 "ip_address": interface["ip-address"],
5478 "mac_address": interface.get("mac-address"),
5479 }
5480 )
5481 self.logger.info("NS update scaling info{}".format(scaling_info))
5482 stage[2] = "Terminating VDUs"
5483 if scaling_info.get("vdu-delete"):
5484 # scale_process = "RO"
5485 if self.ro_config.get("ng"):
5486 await self._scale_ng_ro(
5487 logging_text,
5488 db_nsr,
5489 update_db_nslcmops,
5490 db_vnfr,
5491 scaling_info,
5492 stage,
5493 )
5494
5495 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5496 """This method is to Remove VNF instances from NS.
5497
5498 Args:
5499 nsr_id: NS instance id
5500 nslcmop_id: nslcmop id of update
5501 vnf_instance_id: id of the VNF instance to be removed
5502
5503 Returns:
5504 result: (str, str) COMPLETED/FAILED, details
5505 """
5506 try:
5507 db_nsr_update = {}
5508 logging_text = "Task ns={} update ".format(nsr_id)
5509 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5510 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5511 if check_vnfr_count > 1:
5512 stage = ["", "", ""]
5513 step = "Getting nslcmop from database"
5514 self.logger.debug(
5515 step + " after having waited for previous tasks to be completed"
5516 )
5517 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5518 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5519 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5520 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5521 """ db_vnfr = self.db.get_one(
5522 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5523
5524 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5525 await self.terminate_vdus(
5526 db_vnfr,
5527 member_vnf_index,
5528 db_nsr,
5529 update_db_nslcmops,
5530 stage,
5531 logging_text,
5532 )
5533
5534 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5535 constituent_vnfr.remove(db_vnfr.get("_id"))
5536 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5537 "constituent-vnfr-ref"
5538 )
5539 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5540 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5541 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5542 return "COMPLETED", "Done"
5543 else:
5544 step = "Terminate VNF Failed with"
5545 raise LcmException(
5546 "{} Cannot terminate the last VNF in this NS.".format(
5547 vnf_instance_id
5548 )
5549 )
5550 except (LcmException, asyncio.CancelledError):
5551 raise
5552 except Exception as e:
5553 self.logger.debug("Error removing VNF {}".format(e))
5554 return "FAILED", "Error removing VNF {}".format(e)
5555
5556 async def _ns_redeploy_vnf(
5557 self,
5558 nsr_id,
5559 nslcmop_id,
5560 db_vnfd,
5561 db_vnfr,
5562 db_nsr,
5563 ):
5564 """This method updates and redeploys VNF instances
5565
5566 Args:
5567 nsr_id: NS instance id
5568 nslcmop_id: nslcmop id
5569 db_vnfd: VNF descriptor
5570 db_vnfr: VNF instance record
5571 db_nsr: NS instance record
5572
5573 Returns:
5574 result: (str, str) COMPLETED/FAILED, details
5575 """
5576 try:
5577 count_index = 0
5578 stage = ["", "", ""]
5579 logging_text = "Task ns={} update ".format(nsr_id)
5580 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5581 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5582
5583 # Terminate old VNF resources
5584 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5585 await self.terminate_vdus(
5586 db_vnfr,
5587 member_vnf_index,
5588 db_nsr,
5589 update_db_nslcmops,
5590 stage,
5591 logging_text,
5592 )
5593
5594 # old_vnfd_id = db_vnfr["vnfd-id"]
5595 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5596 new_db_vnfd = db_vnfd
5597 # new_vnfd_ref = new_db_vnfd["id"]
5598 # new_vnfd_id = vnfd_id
5599
5600 # Create VDUR
5601 new_vnfr_cp = []
5602 for cp in new_db_vnfd.get("ext-cpd", ()):
5603 vnf_cp = {
5604 "name": cp.get("id"),
5605 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5606 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5607 "id": cp.get("id"),
5608 }
5609 new_vnfr_cp.append(vnf_cp)
5610 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5611 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5612 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5613 new_vnfr_update = {
5614 "revision": latest_vnfd_revision,
5615 "connection-point": new_vnfr_cp,
5616 "vdur": new_vdur,
5617 "ip-address": "",
5618 }
5619 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5620 updated_db_vnfr = self.db.get_one(
5621 "vnfrs",
5622 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5623 )
5624
5625 # Instantiate new VNF resources
5626 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5627 vca_scaling_info = []
5628 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5629 scaling_info["scaling_direction"] = "OUT"
5630 scaling_info["vdu-create"] = {}
5631 scaling_info["kdu-create"] = {}
5632 vdud_instantiate_list = db_vnfd["vdu"]
5633 for index, vdud in enumerate(vdud_instantiate_list):
5634 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5635 if cloud_init_text:
5636 additional_params = (
5637 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5638 or {}
5639 )
5640 cloud_init_list = []
5641 if cloud_init_text:
5642 # TODO Information of its own ip is not available because db_vnfr is not updated.
5643 additional_params["OSM"] = get_osm_params(
5644 updated_db_vnfr, vdud["id"], 1
5645 )
5646 cloud_init_list.append(
5647 self._parse_cloud_init(
5648 cloud_init_text,
5649 additional_params,
5650 db_vnfd["id"],
5651 vdud["id"],
5652 )
5653 )
5654 vca_scaling_info.append(
5655 {
5656 "osm_vdu_id": vdud["id"],
5657 "member-vnf-index": member_vnf_index,
5658 "type": "create",
5659 "vdu_index": count_index,
5660 }
5661 )
5662 scaling_info["vdu-create"][vdud["id"]] = count_index
5663 if self.ro_config.get("ng"):
5664 self.logger.debug(
5665 "New Resources to be deployed: {}".format(scaling_info)
5666 )
5667 await self._scale_ng_ro(
5668 logging_text,
5669 db_nsr,
5670 update_db_nslcmops,
5671 updated_db_vnfr,
5672 scaling_info,
5673 stage,
5674 )
5675 return "COMPLETED", "Done"
5676 except (LcmException, asyncio.CancelledError):
5677 raise
5678 except Exception as e:
5679 self.logger.debug("Error updating VNF {}".format(e))
5680 return "FAILED", "Error updating VNF {}".format(e)
5681
5682 async def _ns_charm_upgrade(
5683 self,
5684 ee_id,
5685 charm_id,
5686 charm_type,
5687 path,
5688 timeout: float = None,
5689 ) -> (str, str):
5690 """This method upgrade charms in VNF instances
5691
5692 Args:
5693 ee_id: Execution environment id
5694 path: Local path to the charm
5695 charm_id: charm-id
5696 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5697 timeout: (Float) Timeout for the ns update operation
5698
5699 Returns:
5700 result: (str, str) COMPLETED/FAILED, details
5701 """
5702 try:
5703 charm_type = charm_type or "lxc_proxy_charm"
5704 output = await self.vca_map[charm_type].upgrade_charm(
5705 ee_id=ee_id,
5706 path=path,
5707 charm_id=charm_id,
5708 charm_type=charm_type,
5709 timeout=timeout or self.timeout_ns_update,
5710 )
5711
5712 if output:
5713 return "COMPLETED", output
5714
5715 except (LcmException, asyncio.CancelledError):
5716 raise
5717
5718 except Exception as e:
5719
5720 self.logger.debug("Error upgrading charm {}".format(path))
5721
5722 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5723
5724 async def update(self, nsr_id, nslcmop_id):
5725 """Update NS according to different update types
5726
5727 This method performs upgrade of VNF instances then updates the revision
5728 number in VNF record
5729
5730 Args:
5731 nsr_id: Network service will be updated
5732 nslcmop_id: ns lcm operation id
5733
5734 Returns:
5735 It may raise DbException, LcmException, N2VCException, K8sException
5736
5737 """
5738 # Try to lock HA task here
5739 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5740 if not task_is_locked_by_me:
5741 return
5742
5743 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5744 self.logger.debug(logging_text + "Enter")
5745
5746 # Set the required variables to be filled up later
5747 db_nsr = None
5748 db_nslcmop_update = {}
5749 vnfr_update = {}
5750 nslcmop_operation_state = None
5751 db_nsr_update = {}
5752 error_description_nslcmop = ""
5753 exc = None
5754 change_type = "updated"
5755 detailed_status = ""
5756
5757 try:
5758 # wait for any previous tasks in process
5759 step = "Waiting for previous operations to terminate"
5760 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5761 self._write_ns_status(
5762 nsr_id=nsr_id,
5763 ns_state=None,
5764 current_operation="UPDATING",
5765 current_operation_id=nslcmop_id,
5766 )
5767
5768 step = "Getting nslcmop from database"
5769 db_nslcmop = self.db.get_one(
5770 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5771 )
5772 update_type = db_nslcmop["operationParams"]["updateType"]
5773
5774 step = "Getting nsr from database"
5775 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5776 old_operational_status = db_nsr["operational-status"]
5777 db_nsr_update["operational-status"] = "updating"
5778 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5779 nsr_deployed = db_nsr["_admin"].get("deployed")
5780
5781 if update_type == "CHANGE_VNFPKG":
5782
5783 # Get the input parameters given through update request
5784 vnf_instance_id = db_nslcmop["operationParams"][
5785 "changeVnfPackageData"
5786 ].get("vnfInstanceId")
5787
5788 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5789 "vnfdId"
5790 )
5791 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5792
5793 step = "Getting vnfr from database"
5794 db_vnfr = self.db.get_one(
5795 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5796 )
5797
5798 step = "Getting vnfds from database"
5799 # Latest VNFD
5800 latest_vnfd = self.db.get_one(
5801 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5802 )
5803 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5804
5805 # Current VNFD
5806 current_vnf_revision = db_vnfr.get("revision", 1)
5807 current_vnfd = self.db.get_one(
5808 "vnfds_revisions",
5809 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5810 fail_on_empty=False,
5811 )
5812 # Charm artifact paths will be filled up later
5813 (
5814 current_charm_artifact_path,
5815 target_charm_artifact_path,
5816 charm_artifact_paths,
5817 ) = ([], [], [])
5818
5819 step = "Checking if revision has changed in VNFD"
5820 if current_vnf_revision != latest_vnfd_revision:
5821
5822 change_type = "policy_updated"
5823
5824 # There is new revision of VNFD, update operation is required
5825 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5826 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5827
5828 step = "Removing the VNFD packages if they exist in the local path"
5829 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5830 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5831
5832 step = "Get the VNFD packages from FSMongo"
5833 self.fs.sync(from_path=latest_vnfd_path)
5834 self.fs.sync(from_path=current_vnfd_path)
5835
5836 step = (
5837 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5838 )
5839 base_folder = latest_vnfd["_admin"]["storage"]
5840
5841 for charm_index, charm_deployed in enumerate(
5842 get_iterable(nsr_deployed, "VCA")
5843 ):
5844 vnf_index = db_vnfr.get("member-vnf-index-ref")
5845
5846 # Getting charm-id and charm-type
5847 if charm_deployed.get("member-vnf-index") == vnf_index:
5848 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5849 charm_type = charm_deployed.get("type")
5850
5851 # Getting ee-id
5852 ee_id = charm_deployed.get("ee_id")
5853
5854 step = "Getting descriptor config"
5855 descriptor_config = get_configuration(
5856 current_vnfd, current_vnfd["id"]
5857 )
5858
5859 if "execution-environment-list" in descriptor_config:
5860 ee_list = descriptor_config.get(
5861 "execution-environment-list", []
5862 )
5863 else:
5864 ee_list = []
5865
5866 # There could be several charm used in the same VNF
5867 for ee_item in ee_list:
5868 if ee_item.get("juju"):
5869
5870 step = "Getting charm name"
5871 charm_name = ee_item["juju"].get("charm")
5872
5873 step = "Setting Charm artifact paths"
5874 current_charm_artifact_path.append(
5875 get_charm_artifact_path(
5876 base_folder,
5877 charm_name,
5878 charm_type,
5879 current_vnf_revision,
5880 )
5881 )
5882 target_charm_artifact_path.append(
5883 get_charm_artifact_path(
5884 base_folder,
5885 charm_name,
5886 charm_type,
5887 latest_vnfd_revision,
5888 )
5889 )
5890
5891 charm_artifact_paths = zip(
5892 current_charm_artifact_path, target_charm_artifact_path
5893 )
5894
5895 step = "Checking if software version has changed in VNFD"
5896 if find_software_version(current_vnfd) != find_software_version(
5897 latest_vnfd
5898 ):
5899
5900 step = "Checking if existing VNF has charm"
5901 for current_charm_path, target_charm_path in list(
5902 charm_artifact_paths
5903 ):
5904 if current_charm_path:
5905 raise LcmException(
5906 "Software version change is not supported as VNF instance {} has charm.".format(
5907 vnf_instance_id
5908 )
5909 )
5910
5911 # There is no change in the charm package, then redeploy the VNF
5912 # based on new descriptor
5913 step = "Redeploying VNF"
5914 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5915 (result, detailed_status) = await self._ns_redeploy_vnf(
5916 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5917 )
5918 if result == "FAILED":
5919 nslcmop_operation_state = result
5920 error_description_nslcmop = detailed_status
5921 db_nslcmop_update["detailed-status"] = detailed_status
5922 self.logger.debug(
5923 logging_text
5924 + " step {} Done with result {} {}".format(
5925 step, nslcmop_operation_state, detailed_status
5926 )
5927 )
5928
5929 else:
5930 step = "Checking if any charm package has changed or not"
5931 for current_charm_path, target_charm_path in list(
5932 charm_artifact_paths
5933 ):
5934 if (
5935 current_charm_path
5936 and target_charm_path
5937 and self.check_charm_hash_changed(
5938 current_charm_path, target_charm_path
5939 )
5940 ):
5941
5942 step = "Checking whether VNF uses juju bundle"
5943 if check_juju_bundle_existence(current_vnfd):
5944
5945 raise LcmException(
5946 "Charm upgrade is not supported for the instance which"
5947 " uses juju-bundle: {}".format(
5948 check_juju_bundle_existence(current_vnfd)
5949 )
5950 )
5951
5952 step = "Upgrading Charm"
5953 (
5954 result,
5955 detailed_status,
5956 ) = await self._ns_charm_upgrade(
5957 ee_id=ee_id,
5958 charm_id=charm_id,
5959 charm_type=charm_type,
5960 path=self.fs.path + target_charm_path,
5961 timeout=timeout_seconds,
5962 )
5963
5964 if result == "FAILED":
5965 nslcmop_operation_state = result
5966 error_description_nslcmop = detailed_status
5967
5968 db_nslcmop_update["detailed-status"] = detailed_status
5969 self.logger.debug(
5970 logging_text
5971 + " step {} Done with result {} {}".format(
5972 step, nslcmop_operation_state, detailed_status
5973 )
5974 )
5975
5976 step = "Updating policies"
5977 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5978 result = "COMPLETED"
5979 detailed_status = "Done"
5980 db_nslcmop_update["detailed-status"] = "Done"
5981
5982 # If nslcmop_operation_state is None, so any operation is not failed.
5983 if not nslcmop_operation_state:
5984 nslcmop_operation_state = "COMPLETED"
5985
5986 # If update CHANGE_VNFPKG nslcmop_operation is successful
5987 # vnf revision need to be updated
5988 vnfr_update["revision"] = latest_vnfd_revision
5989 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5990
5991 self.logger.debug(
5992 logging_text
5993 + " task Done with result {} {}".format(
5994 nslcmop_operation_state, detailed_status
5995 )
5996 )
5997 elif update_type == "REMOVE_VNF":
5998 # This part is included in https://osm.etsi.org/gerrit/11876
5999 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6000 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6001 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6002 step = "Removing VNF"
6003 (result, detailed_status) = await self.remove_vnf(
6004 nsr_id, nslcmop_id, vnf_instance_id
6005 )
6006 if result == "FAILED":
6007 nslcmop_operation_state = result
6008 error_description_nslcmop = detailed_status
6009 db_nslcmop_update["detailed-status"] = detailed_status
6010 change_type = "vnf_terminated"
6011 if not nslcmop_operation_state:
6012 nslcmop_operation_state = "COMPLETED"
6013 self.logger.debug(
6014 logging_text
6015 + " task Done with result {} {}".format(
6016 nslcmop_operation_state, detailed_status
6017 )
6018 )
6019
6020 elif update_type == "OPERATE_VNF":
6021 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6022 "vnfInstanceId"
6023 ]
6024 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6025 "changeStateTo"
6026 ]
6027 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6028 "additionalParam"
6029 ]
6030 (result, detailed_status) = await self.rebuild_start_stop(
6031 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6032 )
6033 if result == "FAILED":
6034 nslcmop_operation_state = result
6035 error_description_nslcmop = detailed_status
6036 db_nslcmop_update["detailed-status"] = detailed_status
6037 if not nslcmop_operation_state:
6038 nslcmop_operation_state = "COMPLETED"
6039 self.logger.debug(
6040 logging_text
6041 + " task Done with result {} {}".format(
6042 nslcmop_operation_state, detailed_status
6043 )
6044 )
6045
6046 # If nslcmop_operation_state is None, so any operation is not failed.
6047 # All operations are executed in overall.
6048 if not nslcmop_operation_state:
6049 nslcmop_operation_state = "COMPLETED"
6050 db_nsr_update["operational-status"] = old_operational_status
6051
6052 except (DbException, LcmException, N2VCException, K8sException) as e:
6053 self.logger.error(logging_text + "Exit Exception {}".format(e))
6054 exc = e
6055 except asyncio.CancelledError:
6056 self.logger.error(
6057 logging_text + "Cancelled Exception while '{}'".format(step)
6058 )
6059 exc = "Operation was cancelled"
6060 except asyncio.TimeoutError:
6061 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6062 exc = "Timeout"
6063 except Exception as e:
6064 exc = traceback.format_exc()
6065 self.logger.critical(
6066 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6067 exc_info=True,
6068 )
6069 finally:
6070 if exc:
6071 db_nslcmop_update[
6072 "detailed-status"
6073 ] = (
6074 detailed_status
6075 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6076 nslcmop_operation_state = "FAILED"
6077 db_nsr_update["operational-status"] = old_operational_status
6078 if db_nsr:
6079 self._write_ns_status(
6080 nsr_id=nsr_id,
6081 ns_state=db_nsr["nsState"],
6082 current_operation="IDLE",
6083 current_operation_id=None,
6084 other_update=db_nsr_update,
6085 )
6086
6087 self._write_op_status(
6088 op_id=nslcmop_id,
6089 stage="",
6090 error_message=error_description_nslcmop,
6091 operation_state=nslcmop_operation_state,
6092 other_update=db_nslcmop_update,
6093 )
6094
6095 if nslcmop_operation_state:
6096 try:
6097 msg = {
6098 "nsr_id": nsr_id,
6099 "nslcmop_id": nslcmop_id,
6100 "operationState": nslcmop_operation_state,
6101 }
6102 if change_type in ("vnf_terminated", "policy_updated"):
6103 msg.update({"vnf_member_index": member_vnf_index})
6104 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6105 except Exception as e:
6106 self.logger.error(
6107 logging_text + "kafka_write notification Exception {}".format(e)
6108 )
6109 self.logger.debug(logging_text + "Exit")
6110 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6111 return nslcmop_operation_state, detailed_status
6112
6113 async def scale(self, nsr_id, nslcmop_id):
6114 # Try to lock HA task here
6115 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6116 if not task_is_locked_by_me:
6117 return
6118
6119 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6120 stage = ["", "", ""]
6121 tasks_dict_info = {}
6122 # ^ stage, step, VIM progress
6123 self.logger.debug(logging_text + "Enter")
6124 # get all needed from database
6125 db_nsr = None
6126 db_nslcmop_update = {}
6127 db_nsr_update = {}
6128 exc = None
6129 # in case of error, indicates what part of scale was failed to put nsr at error status
6130 scale_process = None
6131 old_operational_status = ""
6132 old_config_status = ""
6133 nsi_id = None
6134 try:
6135 # wait for any previous tasks in process
6136 step = "Waiting for previous operations to terminate"
6137 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6138 self._write_ns_status(
6139 nsr_id=nsr_id,
6140 ns_state=None,
6141 current_operation="SCALING",
6142 current_operation_id=nslcmop_id,
6143 )
6144
6145 step = "Getting nslcmop from database"
6146 self.logger.debug(
6147 step + " after having waited for previous tasks to be completed"
6148 )
6149 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6150
6151 step = "Getting nsr from database"
6152 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6153 old_operational_status = db_nsr["operational-status"]
6154 old_config_status = db_nsr["config-status"]
6155
6156 step = "Parsing scaling parameters"
6157 db_nsr_update["operational-status"] = "scaling"
6158 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6159 nsr_deployed = db_nsr["_admin"].get("deployed")
6160
6161 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6162 "scaleByStepData"
6163 ]["member-vnf-index"]
6164 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6165 "scaleByStepData"
6166 ]["scaling-group-descriptor"]
6167 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6168 # for backward compatibility
6169 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6170 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6171 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6172 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6173
6174 step = "Getting vnfr from database"
6175 db_vnfr = self.db.get_one(
6176 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6177 )
6178
6179 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6180
6181 step = "Getting vnfd from database"
6182 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6183
6184 base_folder = db_vnfd["_admin"]["storage"]
6185
6186 step = "Getting scaling-group-descriptor"
6187 scaling_descriptor = find_in_list(
6188 get_scaling_aspect(db_vnfd),
6189 lambda scale_desc: scale_desc["name"] == scaling_group,
6190 )
6191 if not scaling_descriptor:
6192 raise LcmException(
6193 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6194 "at vnfd:scaling-group-descriptor".format(scaling_group)
6195 )
6196
6197 step = "Sending scale order to VIM"
6198 # TODO check if ns is in a proper status
6199 nb_scale_op = 0
6200 if not db_nsr["_admin"].get("scaling-group"):
6201 self.update_db_2(
6202 "nsrs",
6203 nsr_id,
6204 {
6205 "_admin.scaling-group": [
6206 {"name": scaling_group, "nb-scale-op": 0}
6207 ]
6208 },
6209 )
6210 admin_scale_index = 0
6211 else:
6212 for admin_scale_index, admin_scale_info in enumerate(
6213 db_nsr["_admin"]["scaling-group"]
6214 ):
6215 if admin_scale_info["name"] == scaling_group:
6216 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6217 break
6218 else: # not found, set index one plus last element and add new entry with the name
6219 admin_scale_index += 1
6220 db_nsr_update[
6221 "_admin.scaling-group.{}.name".format(admin_scale_index)
6222 ] = scaling_group
6223
6224 vca_scaling_info = []
6225 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6226 if scaling_type == "SCALE_OUT":
6227 if "aspect-delta-details" not in scaling_descriptor:
6228 raise LcmException(
6229 "Aspect delta details not fount in scaling descriptor {}".format(
6230 scaling_descriptor["name"]
6231 )
6232 )
6233 # count if max-instance-count is reached
6234 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6235
6236 scaling_info["scaling_direction"] = "OUT"
6237 scaling_info["vdu-create"] = {}
6238 scaling_info["kdu-create"] = {}
6239 for delta in deltas:
6240 for vdu_delta in delta.get("vdu-delta", {}):
6241 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6242 # vdu_index also provides the number of instance of the targeted vdu
6243 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6244 cloud_init_text = self._get_vdu_cloud_init_content(
6245 vdud, db_vnfd
6246 )
6247 if cloud_init_text:
6248 additional_params = (
6249 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6250 or {}
6251 )
6252 cloud_init_list = []
6253
6254 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6255 max_instance_count = 10
6256 if vdu_profile and "max-number-of-instances" in vdu_profile:
6257 max_instance_count = vdu_profile.get(
6258 "max-number-of-instances", 10
6259 )
6260
6261 default_instance_num = get_number_of_instances(
6262 db_vnfd, vdud["id"]
6263 )
6264 instances_number = vdu_delta.get("number-of-instances", 1)
6265 nb_scale_op += instances_number
6266
6267 new_instance_count = nb_scale_op + default_instance_num
6268 # Control if new count is over max and vdu count is less than max.
6269 # Then assign new instance count
6270 if new_instance_count > max_instance_count > vdu_count:
6271 instances_number = new_instance_count - max_instance_count
6272 else:
6273 instances_number = instances_number
6274
6275 if new_instance_count > max_instance_count:
6276 raise LcmException(
6277 "reached the limit of {} (max-instance-count) "
6278 "scaling-out operations for the "
6279 "scaling-group-descriptor '{}'".format(
6280 nb_scale_op, scaling_group
6281 )
6282 )
6283 for x in range(vdu_delta.get("number-of-instances", 1)):
6284 if cloud_init_text:
6285 # TODO Information of its own ip is not available because db_vnfr is not updated.
6286 additional_params["OSM"] = get_osm_params(
6287 db_vnfr, vdu_delta["id"], vdu_index + x
6288 )
6289 cloud_init_list.append(
6290 self._parse_cloud_init(
6291 cloud_init_text,
6292 additional_params,
6293 db_vnfd["id"],
6294 vdud["id"],
6295 )
6296 )
6297 vca_scaling_info.append(
6298 {
6299 "osm_vdu_id": vdu_delta["id"],
6300 "member-vnf-index": vnf_index,
6301 "type": "create",
6302 "vdu_index": vdu_index + x,
6303 }
6304 )
6305 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6306 for kdu_delta in delta.get("kdu-resource-delta", {}):
6307 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6308 kdu_name = kdu_profile["kdu-name"]
6309 resource_name = kdu_profile.get("resource-name", "")
6310
6311 # Might have different kdus in the same delta
6312 # Should have list for each kdu
6313 if not scaling_info["kdu-create"].get(kdu_name, None):
6314 scaling_info["kdu-create"][kdu_name] = []
6315
6316 kdur = get_kdur(db_vnfr, kdu_name)
6317 if kdur.get("helm-chart"):
6318 k8s_cluster_type = "helm-chart-v3"
6319 self.logger.debug("kdur: {}".format(kdur))
6320 if (
6321 kdur.get("helm-version")
6322 and kdur.get("helm-version") == "v2"
6323 ):
6324 k8s_cluster_type = "helm-chart"
6325 elif kdur.get("juju-bundle"):
6326 k8s_cluster_type = "juju-bundle"
6327 else:
6328 raise LcmException(
6329 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6330 "juju-bundle. Maybe an old NBI version is running".format(
6331 db_vnfr["member-vnf-index-ref"], kdu_name
6332 )
6333 )
6334
6335 max_instance_count = 10
6336 if kdu_profile and "max-number-of-instances" in kdu_profile:
6337 max_instance_count = kdu_profile.get(
6338 "max-number-of-instances", 10
6339 )
6340
6341 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6342 deployed_kdu, _ = get_deployed_kdu(
6343 nsr_deployed, kdu_name, vnf_index
6344 )
6345 if deployed_kdu is None:
6346 raise LcmException(
6347 "KDU '{}' for vnf '{}' not deployed".format(
6348 kdu_name, vnf_index
6349 )
6350 )
6351 kdu_instance = deployed_kdu.get("kdu-instance")
6352 instance_num = await self.k8scluster_map[
6353 k8s_cluster_type
6354 ].get_scale_count(
6355 resource_name,
6356 kdu_instance,
6357 vca_id=vca_id,
6358 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6359 kdu_model=deployed_kdu.get("kdu-model"),
6360 )
6361 kdu_replica_count = instance_num + kdu_delta.get(
6362 "number-of-instances", 1
6363 )
6364
6365 # Control if new count is over max and instance_num is less than max.
6366 # Then assign max instance number to kdu replica count
6367 if kdu_replica_count > max_instance_count > instance_num:
6368 kdu_replica_count = max_instance_count
6369 if kdu_replica_count > max_instance_count:
6370 raise LcmException(
6371 "reached the limit of {} (max-instance-count) "
6372 "scaling-out operations for the "
6373 "scaling-group-descriptor '{}'".format(
6374 instance_num, scaling_group
6375 )
6376 )
6377
6378 for x in range(kdu_delta.get("number-of-instances", 1)):
6379 vca_scaling_info.append(
6380 {
6381 "osm_kdu_id": kdu_name,
6382 "member-vnf-index": vnf_index,
6383 "type": "create",
6384 "kdu_index": instance_num + x - 1,
6385 }
6386 )
6387 scaling_info["kdu-create"][kdu_name].append(
6388 {
6389 "member-vnf-index": vnf_index,
6390 "type": "create",
6391 "k8s-cluster-type": k8s_cluster_type,
6392 "resource-name": resource_name,
6393 "scale": kdu_replica_count,
6394 }
6395 )
6396 elif scaling_type == "SCALE_IN":
6397 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6398
6399 scaling_info["scaling_direction"] = "IN"
6400 scaling_info["vdu-delete"] = {}
6401 scaling_info["kdu-delete"] = {}
6402
6403 for delta in deltas:
6404 for vdu_delta in delta.get("vdu-delta", {}):
6405 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6406 min_instance_count = 0
6407 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6408 if vdu_profile and "min-number-of-instances" in vdu_profile:
6409 min_instance_count = vdu_profile["min-number-of-instances"]
6410
6411 default_instance_num = get_number_of_instances(
6412 db_vnfd, vdu_delta["id"]
6413 )
6414 instance_num = vdu_delta.get("number-of-instances", 1)
6415 nb_scale_op -= instance_num
6416
6417 new_instance_count = nb_scale_op + default_instance_num
6418
6419 if new_instance_count < min_instance_count < vdu_count:
6420 instances_number = min_instance_count - new_instance_count
6421 else:
6422 instances_number = instance_num
6423
6424 if new_instance_count < min_instance_count:
6425 raise LcmException(
6426 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6427 "scaling-group-descriptor '{}'".format(
6428 nb_scale_op, scaling_group
6429 )
6430 )
6431 for x in range(vdu_delta.get("number-of-instances", 1)):
6432 vca_scaling_info.append(
6433 {
6434 "osm_vdu_id": vdu_delta["id"],
6435 "member-vnf-index": vnf_index,
6436 "type": "delete",
6437 "vdu_index": vdu_index - 1 - x,
6438 }
6439 )
6440 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6441 for kdu_delta in delta.get("kdu-resource-delta", {}):
6442 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6443 kdu_name = kdu_profile["kdu-name"]
6444 resource_name = kdu_profile.get("resource-name", "")
6445
6446 if not scaling_info["kdu-delete"].get(kdu_name, None):
6447 scaling_info["kdu-delete"][kdu_name] = []
6448
6449 kdur = get_kdur(db_vnfr, kdu_name)
6450 if kdur.get("helm-chart"):
6451 k8s_cluster_type = "helm-chart-v3"
6452 self.logger.debug("kdur: {}".format(kdur))
6453 if (
6454 kdur.get("helm-version")
6455 and kdur.get("helm-version") == "v2"
6456 ):
6457 k8s_cluster_type = "helm-chart"
6458 elif kdur.get("juju-bundle"):
6459 k8s_cluster_type = "juju-bundle"
6460 else:
6461 raise LcmException(
6462 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6463 "juju-bundle. Maybe an old NBI version is running".format(
6464 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6465 )
6466 )
6467
6468 min_instance_count = 0
6469 if kdu_profile and "min-number-of-instances" in kdu_profile:
6470 min_instance_count = kdu_profile["min-number-of-instances"]
6471
6472 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6473 deployed_kdu, _ = get_deployed_kdu(
6474 nsr_deployed, kdu_name, vnf_index
6475 )
6476 if deployed_kdu is None:
6477 raise LcmException(
6478 "KDU '{}' for vnf '{}' not deployed".format(
6479 kdu_name, vnf_index
6480 )
6481 )
6482 kdu_instance = deployed_kdu.get("kdu-instance")
6483 instance_num = await self.k8scluster_map[
6484 k8s_cluster_type
6485 ].get_scale_count(
6486 resource_name,
6487 kdu_instance,
6488 vca_id=vca_id,
6489 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6490 kdu_model=deployed_kdu.get("kdu-model"),
6491 )
6492 kdu_replica_count = instance_num - kdu_delta.get(
6493 "number-of-instances", 1
6494 )
6495
6496 if kdu_replica_count < min_instance_count < instance_num:
6497 kdu_replica_count = min_instance_count
6498 if kdu_replica_count < min_instance_count:
6499 raise LcmException(
6500 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6501 "scaling-group-descriptor '{}'".format(
6502 instance_num, scaling_group
6503 )
6504 )
6505
6506 for x in range(kdu_delta.get("number-of-instances", 1)):
6507 vca_scaling_info.append(
6508 {
6509 "osm_kdu_id": kdu_name,
6510 "member-vnf-index": vnf_index,
6511 "type": "delete",
6512 "kdu_index": instance_num - x - 1,
6513 }
6514 )
6515 scaling_info["kdu-delete"][kdu_name].append(
6516 {
6517 "member-vnf-index": vnf_index,
6518 "type": "delete",
6519 "k8s-cluster-type": k8s_cluster_type,
6520 "resource-name": resource_name,
6521 "scale": kdu_replica_count,
6522 }
6523 )
6524
6525 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6526 vdu_delete = copy(scaling_info.get("vdu-delete"))
6527 if scaling_info["scaling_direction"] == "IN":
6528 for vdur in reversed(db_vnfr["vdur"]):
6529 if vdu_delete.get(vdur["vdu-id-ref"]):
6530 vdu_delete[vdur["vdu-id-ref"]] -= 1
6531 scaling_info["vdu"].append(
6532 {
6533 "name": vdur.get("name") or vdur.get("vdu-name"),
6534 "vdu_id": vdur["vdu-id-ref"],
6535 "interface": [],
6536 }
6537 )
6538 for interface in vdur["interfaces"]:
6539 scaling_info["vdu"][-1]["interface"].append(
6540 {
6541 "name": interface["name"],
6542 "ip_address": interface["ip-address"],
6543 "mac_address": interface.get("mac-address"),
6544 }
6545 )
6546 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6547
6548 # PRE-SCALE BEGIN
6549 step = "Executing pre-scale vnf-config-primitive"
6550 if scaling_descriptor.get("scaling-config-action"):
6551 for scaling_config_action in scaling_descriptor[
6552 "scaling-config-action"
6553 ]:
6554 if (
6555 scaling_config_action.get("trigger") == "pre-scale-in"
6556 and scaling_type == "SCALE_IN"
6557 ) or (
6558 scaling_config_action.get("trigger") == "pre-scale-out"
6559 and scaling_type == "SCALE_OUT"
6560 ):
6561 vnf_config_primitive = scaling_config_action[
6562 "vnf-config-primitive-name-ref"
6563 ]
6564 step = db_nslcmop_update[
6565 "detailed-status"
6566 ] = "executing pre-scale scaling-config-action '{}'".format(
6567 vnf_config_primitive
6568 )
6569
6570 # look for primitive
6571 for config_primitive in (
6572 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6573 ).get("config-primitive", ()):
6574 if config_primitive["name"] == vnf_config_primitive:
6575 break
6576 else:
6577 raise LcmException(
6578 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6579 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6580 "primitive".format(scaling_group, vnf_config_primitive)
6581 )
6582
6583 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6584 if db_vnfr.get("additionalParamsForVnf"):
6585 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6586
6587 scale_process = "VCA"
6588 db_nsr_update["config-status"] = "configuring pre-scaling"
6589 primitive_params = self._map_primitive_params(
6590 config_primitive, {}, vnfr_params
6591 )
6592
6593 # Pre-scale retry check: Check if this sub-operation has been executed before
6594 op_index = self._check_or_add_scale_suboperation(
6595 db_nslcmop,
6596 vnf_index,
6597 vnf_config_primitive,
6598 primitive_params,
6599 "PRE-SCALE",
6600 )
6601 if op_index == self.SUBOPERATION_STATUS_SKIP:
6602 # Skip sub-operation
6603 result = "COMPLETED"
6604 result_detail = "Done"
6605 self.logger.debug(
6606 logging_text
6607 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6608 vnf_config_primitive, result, result_detail
6609 )
6610 )
6611 else:
6612 if op_index == self.SUBOPERATION_STATUS_NEW:
6613 # New sub-operation: Get index of this sub-operation
6614 op_index = (
6615 len(db_nslcmop.get("_admin", {}).get("operations"))
6616 - 1
6617 )
6618 self.logger.debug(
6619 logging_text
6620 + "vnf_config_primitive={} New sub-operation".format(
6621 vnf_config_primitive
6622 )
6623 )
6624 else:
6625 # retry: Get registered params for this existing sub-operation
6626 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6627 op_index
6628 ]
6629 vnf_index = op.get("member_vnf_index")
6630 vnf_config_primitive = op.get("primitive")
6631 primitive_params = op.get("primitive_params")
6632 self.logger.debug(
6633 logging_text
6634 + "vnf_config_primitive={} Sub-operation retry".format(
6635 vnf_config_primitive
6636 )
6637 )
6638 # Execute the primitive, either with new (first-time) or registered (reintent) args
6639 ee_descriptor_id = config_primitive.get(
6640 "execution-environment-ref"
6641 )
6642 primitive_name = config_primitive.get(
6643 "execution-environment-primitive", vnf_config_primitive
6644 )
6645 ee_id, vca_type = self._look_for_deployed_vca(
6646 nsr_deployed["VCA"],
6647 member_vnf_index=vnf_index,
6648 vdu_id=None,
6649 vdu_count_index=None,
6650 ee_descriptor_id=ee_descriptor_id,
6651 )
6652 result, result_detail = await self._ns_execute_primitive(
6653 ee_id,
6654 primitive_name,
6655 primitive_params,
6656 vca_type=vca_type,
6657 vca_id=vca_id,
6658 )
6659 self.logger.debug(
6660 logging_text
6661 + "vnf_config_primitive={} Done with result {} {}".format(
6662 vnf_config_primitive, result, result_detail
6663 )
6664 )
6665 # Update operationState = COMPLETED | FAILED
6666 self._update_suboperation_status(
6667 db_nslcmop, op_index, result, result_detail
6668 )
6669
6670 if result == "FAILED":
6671 raise LcmException(result_detail)
6672 db_nsr_update["config-status"] = old_config_status
6673 scale_process = None
6674 # PRE-SCALE END
6675
6676 db_nsr_update[
6677 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6678 ] = nb_scale_op
6679 db_nsr_update[
6680 "_admin.scaling-group.{}.time".format(admin_scale_index)
6681 ] = time()
6682
6683 # SCALE-IN VCA - BEGIN
6684 if vca_scaling_info:
6685 step = db_nslcmop_update[
6686 "detailed-status"
6687 ] = "Deleting the execution environments"
6688 scale_process = "VCA"
6689 for vca_info in vca_scaling_info:
6690 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6691 member_vnf_index = str(vca_info["member-vnf-index"])
6692 self.logger.debug(
6693 logging_text + "vdu info: {}".format(vca_info)
6694 )
6695 if vca_info.get("osm_vdu_id"):
6696 vdu_id = vca_info["osm_vdu_id"]
6697 vdu_index = int(vca_info["vdu_index"])
6698 stage[
6699 1
6700 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6701 member_vnf_index, vdu_id, vdu_index
6702 )
6703 stage[2] = step = "Scaling in VCA"
6704 self._write_op_status(op_id=nslcmop_id, stage=stage)
6705 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6706 config_update = db_nsr["configurationStatus"]
6707 for vca_index, vca in enumerate(vca_update):
6708 if (
6709 (vca or vca.get("ee_id"))
6710 and vca["member-vnf-index"] == member_vnf_index
6711 and vca["vdu_count_index"] == vdu_index
6712 ):
6713 if vca.get("vdu_id"):
6714 config_descriptor = get_configuration(
6715 db_vnfd, vca.get("vdu_id")
6716 )
6717 elif vca.get("kdu_name"):
6718 config_descriptor = get_configuration(
6719 db_vnfd, vca.get("kdu_name")
6720 )
6721 else:
6722 config_descriptor = get_configuration(
6723 db_vnfd, db_vnfd["id"]
6724 )
6725 operation_params = (
6726 db_nslcmop.get("operationParams") or {}
6727 )
6728 exec_terminate_primitives = not operation_params.get(
6729 "skip_terminate_primitives"
6730 ) and vca.get("needed_terminate")
6731 task = asyncio.ensure_future(
6732 asyncio.wait_for(
6733 self.destroy_N2VC(
6734 logging_text,
6735 db_nslcmop,
6736 vca,
6737 config_descriptor,
6738 vca_index,
6739 destroy_ee=True,
6740 exec_primitives=exec_terminate_primitives,
6741 scaling_in=True,
6742 vca_id=vca_id,
6743 ),
6744 timeout=self.timeout_charm_delete,
6745 )
6746 )
6747 tasks_dict_info[task] = "Terminating VCA {}".format(
6748 vca.get("ee_id")
6749 )
6750 del vca_update[vca_index]
6751 del config_update[vca_index]
6752 # wait for pending tasks of terminate primitives
6753 if tasks_dict_info:
6754 self.logger.debug(
6755 logging_text
6756 + "Waiting for tasks {}".format(
6757 list(tasks_dict_info.keys())
6758 )
6759 )
6760 error_list = await self._wait_for_tasks(
6761 logging_text,
6762 tasks_dict_info,
6763 min(
6764 self.timeout_charm_delete, self.timeout_ns_terminate
6765 ),
6766 stage,
6767 nslcmop_id,
6768 )
6769 tasks_dict_info.clear()
6770 if error_list:
6771 raise LcmException("; ".join(error_list))
6772
6773 db_vca_and_config_update = {
6774 "_admin.deployed.VCA": vca_update,
6775 "configurationStatus": config_update,
6776 }
6777 self.update_db_2(
6778 "nsrs", db_nsr["_id"], db_vca_and_config_update
6779 )
6780 scale_process = None
6781 # SCALE-IN VCA - END
6782
6783 # SCALE RO - BEGIN
6784 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6785 scale_process = "RO"
6786 if self.ro_config.get("ng"):
6787 await self._scale_ng_ro(
6788 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6789 )
6790 scaling_info.pop("vdu-create", None)
6791 scaling_info.pop("vdu-delete", None)
6792
6793 scale_process = None
6794 # SCALE RO - END
6795
6796 # SCALE KDU - BEGIN
6797 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6798 scale_process = "KDU"
6799 await self._scale_kdu(
6800 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6801 )
6802 scaling_info.pop("kdu-create", None)
6803 scaling_info.pop("kdu-delete", None)
6804
6805 scale_process = None
6806 # SCALE KDU - END
6807
6808 if db_nsr_update:
6809 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6810
6811 # SCALE-UP VCA - BEGIN
6812 if vca_scaling_info:
6813 step = db_nslcmop_update[
6814 "detailed-status"
6815 ] = "Creating new execution environments"
6816 scale_process = "VCA"
6817 for vca_info in vca_scaling_info:
6818 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6819 member_vnf_index = str(vca_info["member-vnf-index"])
6820 self.logger.debug(
6821 logging_text + "vdu info: {}".format(vca_info)
6822 )
6823 vnfd_id = db_vnfr["vnfd-ref"]
6824 if vca_info.get("osm_vdu_id"):
6825 vdu_index = int(vca_info["vdu_index"])
6826 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6827 if db_vnfr.get("additionalParamsForVnf"):
6828 deploy_params.update(
6829 parse_yaml_strings(
6830 db_vnfr["additionalParamsForVnf"].copy()
6831 )
6832 )
6833 descriptor_config = get_configuration(
6834 db_vnfd, db_vnfd["id"]
6835 )
6836 if descriptor_config:
6837 vdu_id = None
6838 vdu_name = None
6839 kdu_name = None
6840 self._deploy_n2vc(
6841 logging_text=logging_text
6842 + "member_vnf_index={} ".format(member_vnf_index),
6843 db_nsr=db_nsr,
6844 db_vnfr=db_vnfr,
6845 nslcmop_id=nslcmop_id,
6846 nsr_id=nsr_id,
6847 nsi_id=nsi_id,
6848 vnfd_id=vnfd_id,
6849 vdu_id=vdu_id,
6850 kdu_name=kdu_name,
6851 member_vnf_index=member_vnf_index,
6852 vdu_index=vdu_index,
6853 vdu_name=vdu_name,
6854 deploy_params=deploy_params,
6855 descriptor_config=descriptor_config,
6856 base_folder=base_folder,
6857 task_instantiation_info=tasks_dict_info,
6858 stage=stage,
6859 )
6860 vdu_id = vca_info["osm_vdu_id"]
6861 vdur = find_in_list(
6862 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6863 )
6864 descriptor_config = get_configuration(db_vnfd, vdu_id)
6865 if vdur.get("additionalParams"):
6866 deploy_params_vdu = parse_yaml_strings(
6867 vdur["additionalParams"]
6868 )
6869 else:
6870 deploy_params_vdu = deploy_params
6871 deploy_params_vdu["OSM"] = get_osm_params(
6872 db_vnfr, vdu_id, vdu_count_index=vdu_index
6873 )
6874 if descriptor_config:
6875 vdu_name = None
6876 kdu_name = None
6877 stage[
6878 1
6879 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6880 member_vnf_index, vdu_id, vdu_index
6881 )
6882 stage[2] = step = "Scaling out VCA"
6883 self._write_op_status(op_id=nslcmop_id, stage=stage)
6884 self._deploy_n2vc(
6885 logging_text=logging_text
6886 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6887 member_vnf_index, vdu_id, vdu_index
6888 ),
6889 db_nsr=db_nsr,
6890 db_vnfr=db_vnfr,
6891 nslcmop_id=nslcmop_id,
6892 nsr_id=nsr_id,
6893 nsi_id=nsi_id,
6894 vnfd_id=vnfd_id,
6895 vdu_id=vdu_id,
6896 kdu_name=kdu_name,
6897 member_vnf_index=member_vnf_index,
6898 vdu_index=vdu_index,
6899 vdu_name=vdu_name,
6900 deploy_params=deploy_params_vdu,
6901 descriptor_config=descriptor_config,
6902 base_folder=base_folder,
6903 task_instantiation_info=tasks_dict_info,
6904 stage=stage,
6905 )
6906 # SCALE-UP VCA - END
6907 scale_process = None
6908
6909 # POST-SCALE BEGIN
6910 # execute primitive service POST-SCALING
6911 step = "Executing post-scale vnf-config-primitive"
6912 if scaling_descriptor.get("scaling-config-action"):
6913 for scaling_config_action in scaling_descriptor[
6914 "scaling-config-action"
6915 ]:
6916 if (
6917 scaling_config_action.get("trigger") == "post-scale-in"
6918 and scaling_type == "SCALE_IN"
6919 ) or (
6920 scaling_config_action.get("trigger") == "post-scale-out"
6921 and scaling_type == "SCALE_OUT"
6922 ):
6923 vnf_config_primitive = scaling_config_action[
6924 "vnf-config-primitive-name-ref"
6925 ]
6926 step = db_nslcmop_update[
6927 "detailed-status"
6928 ] = "executing post-scale scaling-config-action '{}'".format(
6929 vnf_config_primitive
6930 )
6931
6932 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6933 if db_vnfr.get("additionalParamsForVnf"):
6934 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6935
6936 # look for primitive
6937 for config_primitive in (
6938 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6939 ).get("config-primitive", ()):
6940 if config_primitive["name"] == vnf_config_primitive:
6941 break
6942 else:
6943 raise LcmException(
6944 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6945 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6946 "config-primitive".format(
6947 scaling_group, vnf_config_primitive
6948 )
6949 )
6950 scale_process = "VCA"
6951 db_nsr_update["config-status"] = "configuring post-scaling"
6952 primitive_params = self._map_primitive_params(
6953 config_primitive, {}, vnfr_params
6954 )
6955
6956 # Post-scale retry check: Check if this sub-operation has been executed before
6957 op_index = self._check_or_add_scale_suboperation(
6958 db_nslcmop,
6959 vnf_index,
6960 vnf_config_primitive,
6961 primitive_params,
6962 "POST-SCALE",
6963 )
6964 if op_index == self.SUBOPERATION_STATUS_SKIP:
6965 # Skip sub-operation
6966 result = "COMPLETED"
6967 result_detail = "Done"
6968 self.logger.debug(
6969 logging_text
6970 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6971 vnf_config_primitive, result, result_detail
6972 )
6973 )
6974 else:
6975 if op_index == self.SUBOPERATION_STATUS_NEW:
6976 # New sub-operation: Get index of this sub-operation
6977 op_index = (
6978 len(db_nslcmop.get("_admin", {}).get("operations"))
6979 - 1
6980 )
6981 self.logger.debug(
6982 logging_text
6983 + "vnf_config_primitive={} New sub-operation".format(
6984 vnf_config_primitive
6985 )
6986 )
6987 else:
6988 # retry: Get registered params for this existing sub-operation
6989 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6990 op_index
6991 ]
6992 vnf_index = op.get("member_vnf_index")
6993 vnf_config_primitive = op.get("primitive")
6994 primitive_params = op.get("primitive_params")
6995 self.logger.debug(
6996 logging_text
6997 + "vnf_config_primitive={} Sub-operation retry".format(
6998 vnf_config_primitive
6999 )
7000 )
7001 # Execute the primitive, either with new (first-time) or registered (reintent) args
7002 ee_descriptor_id = config_primitive.get(
7003 "execution-environment-ref"
7004 )
7005 primitive_name = config_primitive.get(
7006 "execution-environment-primitive", vnf_config_primitive
7007 )
7008 ee_id, vca_type = self._look_for_deployed_vca(
7009 nsr_deployed["VCA"],
7010 member_vnf_index=vnf_index,
7011 vdu_id=None,
7012 vdu_count_index=None,
7013 ee_descriptor_id=ee_descriptor_id,
7014 )
7015 result, result_detail = await self._ns_execute_primitive(
7016 ee_id,
7017 primitive_name,
7018 primitive_params,
7019 vca_type=vca_type,
7020 vca_id=vca_id,
7021 )
7022 self.logger.debug(
7023 logging_text
7024 + "vnf_config_primitive={} Done with result {} {}".format(
7025 vnf_config_primitive, result, result_detail
7026 )
7027 )
7028 # Update operationState = COMPLETED | FAILED
7029 self._update_suboperation_status(
7030 db_nslcmop, op_index, result, result_detail
7031 )
7032
7033 if result == "FAILED":
7034 raise LcmException(result_detail)
7035 db_nsr_update["config-status"] = old_config_status
7036 scale_process = None
7037 # POST-SCALE END
7038
7039 db_nsr_update[
7040 "detailed-status"
7041 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7042 db_nsr_update["operational-status"] = (
7043 "running"
7044 if old_operational_status == "failed"
7045 else old_operational_status
7046 )
7047 db_nsr_update["config-status"] = old_config_status
7048 return
7049 except (
7050 ROclient.ROClientException,
7051 DbException,
7052 LcmException,
7053 NgRoException,
7054 ) as e:
7055 self.logger.error(logging_text + "Exit Exception {}".format(e))
7056 exc = e
7057 except asyncio.CancelledError:
7058 self.logger.error(
7059 logging_text + "Cancelled Exception while '{}'".format(step)
7060 )
7061 exc = "Operation was cancelled"
7062 except Exception as e:
7063 exc = traceback.format_exc()
7064 self.logger.critical(
7065 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7066 exc_info=True,
7067 )
7068 finally:
7069 self._write_ns_status(
7070 nsr_id=nsr_id,
7071 ns_state=None,
7072 current_operation="IDLE",
7073 current_operation_id=None,
7074 )
7075 if tasks_dict_info:
7076 stage[1] = "Waiting for instantiate pending tasks."
7077 self.logger.debug(logging_text + stage[1])
7078 exc = await self._wait_for_tasks(
7079 logging_text,
7080 tasks_dict_info,
7081 self.timeout_ns_deploy,
7082 stage,
7083 nslcmop_id,
7084 nsr_id=nsr_id,
7085 )
7086 if exc:
7087 db_nslcmop_update[
7088 "detailed-status"
7089 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7090 nslcmop_operation_state = "FAILED"
7091 if db_nsr:
7092 db_nsr_update["operational-status"] = old_operational_status
7093 db_nsr_update["config-status"] = old_config_status
7094 db_nsr_update["detailed-status"] = ""
7095 if scale_process:
7096 if "VCA" in scale_process:
7097 db_nsr_update["config-status"] = "failed"
7098 if "RO" in scale_process:
7099 db_nsr_update["operational-status"] = "failed"
7100 db_nsr_update[
7101 "detailed-status"
7102 ] = "FAILED scaling nslcmop={} {}: {}".format(
7103 nslcmop_id, step, exc
7104 )
7105 else:
7106 error_description_nslcmop = None
7107 nslcmop_operation_state = "COMPLETED"
7108 db_nslcmop_update["detailed-status"] = "Done"
7109
7110 self._write_op_status(
7111 op_id=nslcmop_id,
7112 stage="",
7113 error_message=error_description_nslcmop,
7114 operation_state=nslcmop_operation_state,
7115 other_update=db_nslcmop_update,
7116 )
7117 if db_nsr:
7118 self._write_ns_status(
7119 nsr_id=nsr_id,
7120 ns_state=None,
7121 current_operation="IDLE",
7122 current_operation_id=None,
7123 other_update=db_nsr_update,
7124 )
7125
7126 if nslcmop_operation_state:
7127 try:
7128 msg = {
7129 "nsr_id": nsr_id,
7130 "nslcmop_id": nslcmop_id,
7131 "operationState": nslcmop_operation_state,
7132 }
7133 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7134 except Exception as e:
7135 self.logger.error(
7136 logging_text + "kafka_write notification Exception {}".format(e)
7137 )
7138 self.logger.debug(logging_text + "Exit")
7139 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7140
7141 async def _scale_kdu(
7142 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7143 ):
7144 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7145 for kdu_name in _scaling_info:
7146 for kdu_scaling_info in _scaling_info[kdu_name]:
7147 deployed_kdu, index = get_deployed_kdu(
7148 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7149 )
7150 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7151 kdu_instance = deployed_kdu["kdu-instance"]
7152 kdu_model = deployed_kdu.get("kdu-model")
7153 scale = int(kdu_scaling_info["scale"])
7154 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7155
7156 db_dict = {
7157 "collection": "nsrs",
7158 "filter": {"_id": nsr_id},
7159 "path": "_admin.deployed.K8s.{}".format(index),
7160 }
7161
7162 step = "scaling application {}".format(
7163 kdu_scaling_info["resource-name"]
7164 )
7165 self.logger.debug(logging_text + step)
7166
7167 if kdu_scaling_info["type"] == "delete":
7168 kdu_config = get_configuration(db_vnfd, kdu_name)
7169 if (
7170 kdu_config
7171 and kdu_config.get("terminate-config-primitive")
7172 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7173 ):
7174 terminate_config_primitive_list = kdu_config.get(
7175 "terminate-config-primitive"
7176 )
7177 terminate_config_primitive_list.sort(
7178 key=lambda val: int(val["seq"])
7179 )
7180
7181 for (
7182 terminate_config_primitive
7183 ) in terminate_config_primitive_list:
7184 primitive_params_ = self._map_primitive_params(
7185 terminate_config_primitive, {}, {}
7186 )
7187 step = "execute terminate config primitive"
7188 self.logger.debug(logging_text + step)
7189 await asyncio.wait_for(
7190 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7191 cluster_uuid=cluster_uuid,
7192 kdu_instance=kdu_instance,
7193 primitive_name=terminate_config_primitive["name"],
7194 params=primitive_params_,
7195 db_dict=db_dict,
7196 vca_id=vca_id,
7197 ),
7198 timeout=600,
7199 )
7200
7201 await asyncio.wait_for(
7202 self.k8scluster_map[k8s_cluster_type].scale(
7203 kdu_instance,
7204 scale,
7205 kdu_scaling_info["resource-name"],
7206 vca_id=vca_id,
7207 cluster_uuid=cluster_uuid,
7208 kdu_model=kdu_model,
7209 atomic=True,
7210 db_dict=db_dict,
7211 ),
7212 timeout=self.timeout_vca_on_error,
7213 )
7214
7215 if kdu_scaling_info["type"] == "create":
7216 kdu_config = get_configuration(db_vnfd, kdu_name)
7217 if (
7218 kdu_config
7219 and kdu_config.get("initial-config-primitive")
7220 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7221 ):
7222 initial_config_primitive_list = kdu_config.get(
7223 "initial-config-primitive"
7224 )
7225 initial_config_primitive_list.sort(
7226 key=lambda val: int(val["seq"])
7227 )
7228
7229 for initial_config_primitive in initial_config_primitive_list:
7230 primitive_params_ = self._map_primitive_params(
7231 initial_config_primitive, {}, {}
7232 )
7233 step = "execute initial config primitive"
7234 self.logger.debug(logging_text + step)
7235 await asyncio.wait_for(
7236 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7237 cluster_uuid=cluster_uuid,
7238 kdu_instance=kdu_instance,
7239 primitive_name=initial_config_primitive["name"],
7240 params=primitive_params_,
7241 db_dict=db_dict,
7242 vca_id=vca_id,
7243 ),
7244 timeout=600,
7245 )
7246
7247 async def _scale_ng_ro(
7248 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7249 ):
7250 nsr_id = db_nslcmop["nsInstanceId"]
7251 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7252 db_vnfrs = {}
7253
7254 # read from db: vnfd's for every vnf
7255 db_vnfds = []
7256
7257 # for each vnf in ns, read vnfd
7258 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7259 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7260 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7261 # if we haven't this vnfd, read it from db
7262 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7263 # read from db
7264 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7265 db_vnfds.append(vnfd)
7266 n2vc_key = self.n2vc.get_public_key()
7267 n2vc_key_list = [n2vc_key]
7268 self.scale_vnfr(
7269 db_vnfr,
7270 vdu_scaling_info.get("vdu-create"),
7271 vdu_scaling_info.get("vdu-delete"),
7272 mark_delete=True,
7273 )
7274 # db_vnfr has been updated, update db_vnfrs to use it
7275 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7276 await self._instantiate_ng_ro(
7277 logging_text,
7278 nsr_id,
7279 db_nsd,
7280 db_nsr,
7281 db_nslcmop,
7282 db_vnfrs,
7283 db_vnfds,
7284 n2vc_key_list,
7285 stage=stage,
7286 start_deploy=time(),
7287 timeout_ns_deploy=self.timeout_ns_deploy,
7288 )
7289 if vdu_scaling_info.get("vdu-delete"):
7290 self.scale_vnfr(
7291 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7292 )
7293
7294 async def extract_prometheus_scrape_jobs(
7295 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7296 ):
7297 # look if exist a file called 'prometheus*.j2' and
7298 artifact_content = self.fs.dir_ls(artifact_path)
7299 job_file = next(
7300 (
7301 f
7302 for f in artifact_content
7303 if f.startswith("prometheus") and f.endswith(".j2")
7304 ),
7305 None,
7306 )
7307 if not job_file:
7308 return
7309 with self.fs.file_open((artifact_path, job_file), "r") as f:
7310 job_data = f.read()
7311
7312 # TODO get_service
7313 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7314 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7315 host_port = "80"
7316 vnfr_id = vnfr_id.replace("-", "")
7317 variables = {
7318 "JOB_NAME": vnfr_id,
7319 "TARGET_IP": target_ip,
7320 "EXPORTER_POD_IP": host_name,
7321 "EXPORTER_POD_PORT": host_port,
7322 }
7323 job_list = parse_job(job_data, variables)
7324 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7325 for job in job_list:
7326 if (
7327 not isinstance(job.get("job_name"), str)
7328 or vnfr_id not in job["job_name"]
7329 ):
7330 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7331 job["nsr_id"] = nsr_id
7332 job["vnfr_id"] = vnfr_id
7333 return job_list
7334
7335 async def rebuild_start_stop(
7336 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7337 ):
7338 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7339 self.logger.info(logging_text + "Enter")
7340 stage = ["Preparing the environment", ""]
7341 # database nsrs record
7342 db_nsr_update = {}
7343 vdu_vim_name = None
7344 vim_vm_id = None
7345 # in case of error, indicates what part of scale was failed to put nsr at error status
7346 start_deploy = time()
7347 try:
7348 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7349 vim_account_id = db_vnfr.get("vim-account-id")
7350 vim_info_key = "vim:" + vim_account_id
7351 vdu_id = additional_param["vdu_id"]
7352 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7353 vdur = find_in_list(
7354 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7355 )
7356 if vdur:
7357 vdu_vim_name = vdur["name"]
7358 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7359 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7360 else:
7361 raise LcmException("Target vdu is not found")
7362 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7363 # wait for any previous tasks in process
7364 stage[1] = "Waiting for previous operations to terminate"
7365 self.logger.info(stage[1])
7366 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7367
7368 stage[1] = "Reading from database."
7369 self.logger.info(stage[1])
7370 self._write_ns_status(
7371 nsr_id=nsr_id,
7372 ns_state=None,
7373 current_operation=operation_type.upper(),
7374 current_operation_id=nslcmop_id,
7375 )
7376 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7377
7378 # read from db: ns
7379 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7380 db_nsr_update["operational-status"] = operation_type
7381 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7382 # Payload for RO
7383 desc = {
7384 operation_type: {
7385 "vim_vm_id": vim_vm_id,
7386 "vnf_id": vnf_id,
7387 "vdu_index": additional_param["count-index"],
7388 "vdu_id": vdur["id"],
7389 "target_vim": target_vim,
7390 "vim_account_id": vim_account_id,
7391 }
7392 }
7393 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7394 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7395 self.logger.info("ro nsr id: {}".format(nsr_id))
7396 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7397 self.logger.info("response from RO: {}".format(result_dict))
7398 action_id = result_dict["action_id"]
7399 await self._wait_ng_ro(
7400 nsr_id,
7401 action_id,
7402 nslcmop_id,
7403 start_deploy,
7404 self.timeout_operate,
7405 None,
7406 "start_stop_rebuild",
7407 )
7408 return "COMPLETED", "Done"
7409 except (ROclient.ROClientException, DbException, LcmException) as e:
7410 self.logger.error("Exit Exception {}".format(e))
7411 exc = e
7412 except asyncio.CancelledError:
7413 self.logger.error("Cancelled Exception while '{}'".format(stage))
7414 exc = "Operation was cancelled"
7415 except Exception as e:
7416 exc = traceback.format_exc()
7417 self.logger.critical(
7418 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7419 )
7420 return "FAILED", "Error in operate VNF {}".format(exc)
7421
7422 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7423 """
7424 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7425
7426 :param: vim_account_id: VIM Account ID
7427
7428 :return: (cloud_name, cloud_credential)
7429 """
7430 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7431 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7432
7433 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7434 """
7435 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7436
7437 :param: vim_account_id: VIM Account ID
7438
7439 :return: (cloud_name, cloud_credential)
7440 """
7441 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7442 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7443
7444 async def migrate(self, nsr_id, nslcmop_id):
7445 """
7446 Migrate VNFs and VDUs instances in a NS
7447
7448 :param: nsr_id: NS Instance ID
7449 :param: nslcmop_id: nslcmop ID of migrate
7450
7451 """
7452 # Try to lock HA task here
7453 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7454 if not task_is_locked_by_me:
7455 return
7456 logging_text = "Task ns={} migrate ".format(nsr_id)
7457 self.logger.debug(logging_text + "Enter")
7458 # get all needed from database
7459 db_nslcmop = None
7460 db_nslcmop_update = {}
7461 nslcmop_operation_state = None
7462 db_nsr_update = {}
7463 target = {}
7464 exc = None
7465 # in case of error, indicates what part of scale was failed to put nsr at error status
7466 start_deploy = time()
7467
7468 try:
7469 # wait for any previous tasks in process
7470 step = "Waiting for previous operations to terminate"
7471 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7472
7473 self._write_ns_status(
7474 nsr_id=nsr_id,
7475 ns_state=None,
7476 current_operation="MIGRATING",
7477 current_operation_id=nslcmop_id,
7478 )
7479 step = "Getting nslcmop from database"
7480 self.logger.debug(
7481 step + " after having waited for previous tasks to be completed"
7482 )
7483 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7484 migrate_params = db_nslcmop.get("operationParams")
7485
7486 target = {}
7487 target.update(migrate_params)
7488 desc = await self.RO.migrate(nsr_id, target)
7489 self.logger.debug("RO return > {}".format(desc))
7490 action_id = desc["action_id"]
7491 await self._wait_ng_ro(
7492 nsr_id,
7493 action_id,
7494 nslcmop_id,
7495 start_deploy,
7496 self.timeout_migrate,
7497 operation="migrate",
7498 )
7499 except (ROclient.ROClientException, DbException, LcmException) as e:
7500 self.logger.error("Exit Exception {}".format(e))
7501 exc = e
7502 except asyncio.CancelledError:
7503 self.logger.error("Cancelled Exception while '{}'".format(step))
7504 exc = "Operation was cancelled"
7505 except Exception as e:
7506 exc = traceback.format_exc()
7507 self.logger.critical(
7508 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7509 )
7510 finally:
7511 self._write_ns_status(
7512 nsr_id=nsr_id,
7513 ns_state=None,
7514 current_operation="IDLE",
7515 current_operation_id=None,
7516 )
7517 if exc:
7518 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7519 nslcmop_operation_state = "FAILED"
7520 else:
7521 nslcmop_operation_state = "COMPLETED"
7522 db_nslcmop_update["detailed-status"] = "Done"
7523 db_nsr_update["detailed-status"] = "Done"
7524
7525 self._write_op_status(
7526 op_id=nslcmop_id,
7527 stage="",
7528 error_message="",
7529 operation_state=nslcmop_operation_state,
7530 other_update=db_nslcmop_update,
7531 )
7532 if nslcmop_operation_state:
7533 try:
7534 msg = {
7535 "nsr_id": nsr_id,
7536 "nslcmop_id": nslcmop_id,
7537 "operationState": nslcmop_operation_state,
7538 }
7539 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7540 except Exception as e:
7541 self.logger.error(
7542 logging_text + "kafka_write notification Exception {}".format(e)
7543 )
7544 self.logger.debug(logging_text + "Exit")
7545 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7546
7547 async def heal(self, nsr_id, nslcmop_id):
7548 """
7549 Heal NS
7550
7551 :param nsr_id: ns instance to heal
7552 :param nslcmop_id: operation to run
7553 :return:
7554 """
7555
7556 # Try to lock HA task here
7557 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7558 if not task_is_locked_by_me:
7559 return
7560
7561 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7562 stage = ["", "", ""]
7563 tasks_dict_info = {}
7564 # ^ stage, step, VIM progress
7565 self.logger.debug(logging_text + "Enter")
7566 # get all needed from database
7567 db_nsr = None
7568 db_nslcmop_update = {}
7569 db_nsr_update = {}
7570 db_vnfrs = {} # vnf's info indexed by _id
7571 exc = None
7572 old_operational_status = ""
7573 old_config_status = ""
7574 nsi_id = None
7575 try:
7576 # wait for any previous tasks in process
7577 step = "Waiting for previous operations to terminate"
7578 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7579 self._write_ns_status(
7580 nsr_id=nsr_id,
7581 ns_state=None,
7582 current_operation="HEALING",
7583 current_operation_id=nslcmop_id,
7584 )
7585
7586 step = "Getting nslcmop from database"
7587 self.logger.debug(
7588 step + " after having waited for previous tasks to be completed"
7589 )
7590 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7591
7592 step = "Getting nsr from database"
7593 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7594 old_operational_status = db_nsr["operational-status"]
7595 old_config_status = db_nsr["config-status"]
7596
7597 db_nsr_update = {
7598 "_admin.deployed.RO.operational-status": "healing",
7599 }
7600 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7601
7602 step = "Sending heal order to VIM"
7603 task_ro = asyncio.ensure_future(
7604 self.heal_RO(
7605 logging_text=logging_text,
7606 nsr_id=nsr_id,
7607 db_nslcmop=db_nslcmop,
7608 stage=stage,
7609 )
7610 )
7611 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
7612 tasks_dict_info[task_ro] = "Healing at VIM"
7613
7614 # VCA tasks
7615 # read from db: nsd
7616 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7617 self.logger.debug(logging_text + stage[1])
7618 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7619 self.fs.sync(db_nsr["nsd-id"])
7620 db_nsr["nsd"] = nsd
7621 # read from db: vnfr's of this ns
7622 step = "Getting vnfrs from db"
7623 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7624 for vnfr in db_vnfrs_list:
7625 db_vnfrs[vnfr["_id"]] = vnfr
7626 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7627
7628 # Check for each target VNF
7629 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7630 for target_vnf in target_list:
7631 # Find this VNF in the list from DB
7632 vnfr_id = target_vnf.get("vnfInstanceId", None)
7633 if vnfr_id:
7634 db_vnfr = db_vnfrs[vnfr_id]
7635 vnfd_id = db_vnfr.get("vnfd-id")
7636 vnfd_ref = db_vnfr.get("vnfd-ref")
7637 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7638 base_folder = vnfd["_admin"]["storage"]
7639 vdu_id = None
7640 vdu_index = 0
7641 vdu_name = None
7642 kdu_name = None
7643 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7644 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7645
7646 # Check each target VDU and deploy N2VC
7647 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7648 "vdu", []
7649 )
7650 if not target_vdu_list:
7651 # Codigo nuevo para crear diccionario
7652 target_vdu_list = []
7653 for existing_vdu in db_vnfr.get("vdur"):
7654 vdu_name = existing_vdu.get("vdu-name", None)
7655 vdu_index = existing_vdu.get("count-index", 0)
7656 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7657 "run-day1", False
7658 )
7659 vdu_to_be_healed = {
7660 "vdu-id": vdu_name,
7661 "count-index": vdu_index,
7662 "run-day1": vdu_run_day1,
7663 }
7664 target_vdu_list.append(vdu_to_be_healed)
7665 for target_vdu in target_vdu_list:
7666 deploy_params_vdu = target_vdu
7667 # Set run-day1 vnf level value if not vdu level value exists
7668 if not deploy_params_vdu.get("run-day1") and target_vnf[
7669 "additionalParams"
7670 ].get("run-day1"):
7671 deploy_params_vdu["run-day1"] = target_vnf[
7672 "additionalParams"
7673 ].get("run-day1")
7674 vdu_name = target_vdu.get("vdu-id", None)
7675 # TODO: Get vdu_id from vdud.
7676 vdu_id = vdu_name
7677 # For multi instance VDU count-index is mandatory
7678 # For single session VDU count-indes is 0
7679 vdu_index = target_vdu.get("count-index", 0)
7680
7681 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7682 stage[1] = "Deploying Execution Environments."
7683 self.logger.debug(logging_text + stage[1])
7684
7685 # VNF Level charm. Normal case when proxy charms.
7686 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7687 descriptor_config = get_configuration(vnfd, vnfd_ref)
7688 if descriptor_config:
7689 # Continue if healed machine is management machine
7690 vnf_ip_address = db_vnfr.get("ip-address")
7691 target_instance = None
7692 for instance in db_vnfr.get("vdur", None):
7693 if (
7694 instance["vdu-name"] == vdu_name
7695 and instance["count-index"] == vdu_index
7696 ):
7697 target_instance = instance
7698 break
7699 if vnf_ip_address == target_instance.get("ip-address"):
7700 self._heal_n2vc(
7701 logging_text=logging_text
7702 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7703 member_vnf_index, vdu_name, vdu_index
7704 ),
7705 db_nsr=db_nsr,
7706 db_vnfr=db_vnfr,
7707 nslcmop_id=nslcmop_id,
7708 nsr_id=nsr_id,
7709 nsi_id=nsi_id,
7710 vnfd_id=vnfd_ref,
7711 vdu_id=None,
7712 kdu_name=None,
7713 member_vnf_index=member_vnf_index,
7714 vdu_index=0,
7715 vdu_name=None,
7716 deploy_params=deploy_params_vdu,
7717 descriptor_config=descriptor_config,
7718 base_folder=base_folder,
7719 task_instantiation_info=tasks_dict_info,
7720 stage=stage,
7721 )
7722
7723 # VDU Level charm. Normal case with native charms.
7724 descriptor_config = get_configuration(vnfd, vdu_name)
7725 if descriptor_config:
7726 self._heal_n2vc(
7727 logging_text=logging_text
7728 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7729 member_vnf_index, vdu_name, vdu_index
7730 ),
7731 db_nsr=db_nsr,
7732 db_vnfr=db_vnfr,
7733 nslcmop_id=nslcmop_id,
7734 nsr_id=nsr_id,
7735 nsi_id=nsi_id,
7736 vnfd_id=vnfd_ref,
7737 vdu_id=vdu_id,
7738 kdu_name=kdu_name,
7739 member_vnf_index=member_vnf_index,
7740 vdu_index=vdu_index,
7741 vdu_name=vdu_name,
7742 deploy_params=deploy_params_vdu,
7743 descriptor_config=descriptor_config,
7744 base_folder=base_folder,
7745 task_instantiation_info=tasks_dict_info,
7746 stage=stage,
7747 )
7748
7749 except (
7750 ROclient.ROClientException,
7751 DbException,
7752 LcmException,
7753 NgRoException,
7754 ) as e:
7755 self.logger.error(logging_text + "Exit Exception {}".format(e))
7756 exc = e
7757 except asyncio.CancelledError:
7758 self.logger.error(
7759 logging_text + "Cancelled Exception while '{}'".format(step)
7760 )
7761 exc = "Operation was cancelled"
7762 except Exception as e:
7763 exc = traceback.format_exc()
7764 self.logger.critical(
7765 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7766 exc_info=True,
7767 )
7768 finally:
7769 if tasks_dict_info:
7770 stage[1] = "Waiting for healing pending tasks."
7771 self.logger.debug(logging_text + stage[1])
7772 exc = await self._wait_for_tasks(
7773 logging_text,
7774 tasks_dict_info,
7775 self.timeout_ns_deploy,
7776 stage,
7777 nslcmop_id,
7778 nsr_id=nsr_id,
7779 )
7780 if exc:
7781 db_nslcmop_update[
7782 "detailed-status"
7783 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7784 nslcmop_operation_state = "FAILED"
7785 if db_nsr:
7786 db_nsr_update["operational-status"] = old_operational_status
7787 db_nsr_update["config-status"] = old_config_status
7788 db_nsr_update[
7789 "detailed-status"
7790 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7791 for task, task_name in tasks_dict_info.items():
7792 if not task.done() or task.cancelled() or task.exception():
7793 if task_name.startswith(self.task_name_deploy_vca):
7794 # A N2VC task is pending
7795 db_nsr_update["config-status"] = "failed"
7796 else:
7797 # RO task is pending
7798 db_nsr_update["operational-status"] = "failed"
7799 else:
7800 error_description_nslcmop = None
7801 nslcmop_operation_state = "COMPLETED"
7802 db_nslcmop_update["detailed-status"] = "Done"
7803 db_nsr_update["detailed-status"] = "Done"
7804 db_nsr_update["operational-status"] = "running"
7805 db_nsr_update["config-status"] = "configured"
7806
7807 self._write_op_status(
7808 op_id=nslcmop_id,
7809 stage="",
7810 error_message=error_description_nslcmop,
7811 operation_state=nslcmop_operation_state,
7812 other_update=db_nslcmop_update,
7813 )
7814 if db_nsr:
7815 self._write_ns_status(
7816 nsr_id=nsr_id,
7817 ns_state=None,
7818 current_operation="IDLE",
7819 current_operation_id=None,
7820 other_update=db_nsr_update,
7821 )
7822
7823 if nslcmop_operation_state:
7824 try:
7825 msg = {
7826 "nsr_id": nsr_id,
7827 "nslcmop_id": nslcmop_id,
7828 "operationState": nslcmop_operation_state,
7829 }
7830 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7831 except Exception as e:
7832 self.logger.error(
7833 logging_text + "kafka_write notification Exception {}".format(e)
7834 )
7835 self.logger.debug(logging_text + "Exit")
7836 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7837
7838 async def heal_RO(
7839 self,
7840 logging_text,
7841 nsr_id,
7842 db_nslcmop,
7843 stage,
7844 ):
7845 """
7846 Heal at RO
7847 :param logging_text: preffix text to use at logging
7848 :param nsr_id: nsr identity
7849 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7850 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7851 :return: None or exception
7852 """
7853
7854 def get_vim_account(vim_account_id):
7855 nonlocal db_vims
7856 if vim_account_id in db_vims:
7857 return db_vims[vim_account_id]
7858 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7859 db_vims[vim_account_id] = db_vim
7860 return db_vim
7861
7862 try:
7863 start_heal = time()
7864 ns_params = db_nslcmop.get("operationParams")
7865 if ns_params and ns_params.get("timeout_ns_heal"):
7866 timeout_ns_heal = ns_params["timeout_ns_heal"]
7867 else:
7868 timeout_ns_heal = self.timeout.get("ns_heal", self.timeout_ns_heal)
7869
7870 db_vims = {}
7871
7872 nslcmop_id = db_nslcmop["_id"]
7873 target = {
7874 "action_id": nslcmop_id,
7875 }
7876 self.logger.warning(
7877 "db_nslcmop={} and timeout_ns_heal={}".format(
7878 db_nslcmop, timeout_ns_heal
7879 )
7880 )
7881 target.update(db_nslcmop.get("operationParams", {}))
7882
7883 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7884 desc = await self.RO.recreate(nsr_id, target)
7885 self.logger.debug("RO return > {}".format(desc))
7886 action_id = desc["action_id"]
7887 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7888 await self._wait_ng_ro(
7889 nsr_id,
7890 action_id,
7891 nslcmop_id,
7892 start_heal,
7893 timeout_ns_heal,
7894 stage,
7895 operation="healing",
7896 )
7897
7898 # Updating NSR
7899 db_nsr_update = {
7900 "_admin.deployed.RO.operational-status": "running",
7901 "detailed-status": " ".join(stage),
7902 }
7903 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7904 self._write_op_status(nslcmop_id, stage)
7905 self.logger.debug(
7906 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7907 )
7908
7909 except Exception as e:
7910 stage[2] = "ERROR healing at VIM"
7911 # self.set_vnfr_at_error(db_vnfrs, str(e))
7912 self.logger.error(
7913 "Error healing at VIM {}".format(e),
7914 exc_info=not isinstance(
7915 e,
7916 (
7917 ROclient.ROClientException,
7918 LcmException,
7919 DbException,
7920 NgRoException,
7921 ),
7922 ),
7923 )
7924 raise
7925
7926 def _heal_n2vc(
7927 self,
7928 logging_text,
7929 db_nsr,
7930 db_vnfr,
7931 nslcmop_id,
7932 nsr_id,
7933 nsi_id,
7934 vnfd_id,
7935 vdu_id,
7936 kdu_name,
7937 member_vnf_index,
7938 vdu_index,
7939 vdu_name,
7940 deploy_params,
7941 descriptor_config,
7942 base_folder,
7943 task_instantiation_info,
7944 stage,
7945 ):
7946 # launch instantiate_N2VC in a asyncio task and register task object
7947 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7948 # if not found, create one entry and update database
7949 # fill db_nsr._admin.deployed.VCA.<index>
7950
7951 self.logger.debug(
7952 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7953 )
7954
7955 charm_name = ""
7956 get_charm_name = False
7957 if "execution-environment-list" in descriptor_config:
7958 ee_list = descriptor_config.get("execution-environment-list", [])
7959 elif "juju" in descriptor_config:
7960 ee_list = [descriptor_config] # ns charms
7961 if "execution-environment-list" not in descriptor_config:
7962 # charm name is only required for ns charms
7963 get_charm_name = True
7964 else: # other types as script are not supported
7965 ee_list = []
7966
7967 for ee_item in ee_list:
7968 self.logger.debug(
7969 logging_text
7970 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7971 ee_item.get("juju"), ee_item.get("helm-chart")
7972 )
7973 )
7974 ee_descriptor_id = ee_item.get("id")
7975 if ee_item.get("juju"):
7976 vca_name = ee_item["juju"].get("charm")
7977 if get_charm_name:
7978 charm_name = self.find_charm_name(db_nsr, str(vca_name))
7979 vca_type = (
7980 "lxc_proxy_charm"
7981 if ee_item["juju"].get("charm") is not None
7982 else "native_charm"
7983 )
7984 if ee_item["juju"].get("cloud") == "k8s":
7985 vca_type = "k8s_proxy_charm"
7986 elif ee_item["juju"].get("proxy") is False:
7987 vca_type = "native_charm"
7988 elif ee_item.get("helm-chart"):
7989 vca_name = ee_item["helm-chart"]
7990 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7991 vca_type = "helm"
7992 else:
7993 vca_type = "helm-v3"
7994 else:
7995 self.logger.debug(
7996 logging_text + "skipping non juju neither charm configuration"
7997 )
7998 continue
7999
8000 vca_index = -1
8001 for vca_index, vca_deployed in enumerate(
8002 db_nsr["_admin"]["deployed"]["VCA"]
8003 ):
8004 if not vca_deployed:
8005 continue
8006 if (
8007 vca_deployed.get("member-vnf-index") == member_vnf_index
8008 and vca_deployed.get("vdu_id") == vdu_id
8009 and vca_deployed.get("kdu_name") == kdu_name
8010 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8011 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8012 ):
8013 break
8014 else:
8015 # not found, create one.
8016 target = (
8017 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8018 )
8019 if vdu_id:
8020 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8021 elif kdu_name:
8022 target += "/kdu/{}".format(kdu_name)
8023 vca_deployed = {
8024 "target_element": target,
8025 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8026 "member-vnf-index": member_vnf_index,
8027 "vdu_id": vdu_id,
8028 "kdu_name": kdu_name,
8029 "vdu_count_index": vdu_index,
8030 "operational-status": "init", # TODO revise
8031 "detailed-status": "", # TODO revise
8032 "step": "initial-deploy", # TODO revise
8033 "vnfd_id": vnfd_id,
8034 "vdu_name": vdu_name,
8035 "type": vca_type,
8036 "ee_descriptor_id": ee_descriptor_id,
8037 "charm_name": charm_name,
8038 }
8039 vca_index += 1
8040
8041 # create VCA and configurationStatus in db
8042 db_dict = {
8043 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8044 "configurationStatus.{}".format(vca_index): dict(),
8045 }
8046 self.update_db_2("nsrs", nsr_id, db_dict)
8047
8048 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8049
8050 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8051 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8052 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8053
8054 # Launch task
8055 task_n2vc = asyncio.ensure_future(
8056 self.heal_N2VC(
8057 logging_text=logging_text,
8058 vca_index=vca_index,
8059 nsi_id=nsi_id,
8060 db_nsr=db_nsr,
8061 db_vnfr=db_vnfr,
8062 vdu_id=vdu_id,
8063 kdu_name=kdu_name,
8064 vdu_index=vdu_index,
8065 deploy_params=deploy_params,
8066 config_descriptor=descriptor_config,
8067 base_folder=base_folder,
8068 nslcmop_id=nslcmop_id,
8069 stage=stage,
8070 vca_type=vca_type,
8071 vca_name=vca_name,
8072 ee_config_descriptor=ee_item,
8073 )
8074 )
8075 self.lcm_tasks.register(
8076 "ns",
8077 nsr_id,
8078 nslcmop_id,
8079 "instantiate_N2VC-{}".format(vca_index),
8080 task_n2vc,
8081 )
8082 task_instantiation_info[
8083 task_n2vc
8084 ] = self.task_name_deploy_vca + " {}.{}".format(
8085 member_vnf_index or "", vdu_id or ""
8086 )
8087
8088 async def heal_N2VC(
8089 self,
8090 logging_text,
8091 vca_index,
8092 nsi_id,
8093 db_nsr,
8094 db_vnfr,
8095 vdu_id,
8096 kdu_name,
8097 vdu_index,
8098 config_descriptor,
8099 deploy_params,
8100 base_folder,
8101 nslcmop_id,
8102 stage,
8103 vca_type,
8104 vca_name,
8105 ee_config_descriptor,
8106 ):
8107 nsr_id = db_nsr["_id"]
8108 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8109 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8110 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8111 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8112 db_dict = {
8113 "collection": "nsrs",
8114 "filter": {"_id": nsr_id},
8115 "path": db_update_entry,
8116 }
8117 step = ""
8118 try:
8119
8120 element_type = "NS"
8121 element_under_configuration = nsr_id
8122
8123 vnfr_id = None
8124 if db_vnfr:
8125 vnfr_id = db_vnfr["_id"]
8126 osm_config["osm"]["vnf_id"] = vnfr_id
8127
8128 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8129
8130 if vca_type == "native_charm":
8131 index_number = 0
8132 else:
8133 index_number = vdu_index or 0
8134
8135 if vnfr_id:
8136 element_type = "VNF"
8137 element_under_configuration = vnfr_id
8138 namespace += ".{}-{}".format(vnfr_id, index_number)
8139 if vdu_id:
8140 namespace += ".{}-{}".format(vdu_id, index_number)
8141 element_type = "VDU"
8142 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8143 osm_config["osm"]["vdu_id"] = vdu_id
8144 elif kdu_name:
8145 namespace += ".{}".format(kdu_name)
8146 element_type = "KDU"
8147 element_under_configuration = kdu_name
8148 osm_config["osm"]["kdu_name"] = kdu_name
8149
8150 # Get artifact path
8151 if base_folder["pkg-dir"]:
8152 artifact_path = "{}/{}/{}/{}".format(
8153 base_folder["folder"],
8154 base_folder["pkg-dir"],
8155 "charms"
8156 if vca_type
8157 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8158 else "helm-charts",
8159 vca_name,
8160 )
8161 else:
8162 artifact_path = "{}/Scripts/{}/{}/".format(
8163 base_folder["folder"],
8164 "charms"
8165 if vca_type
8166 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8167 else "helm-charts",
8168 vca_name,
8169 )
8170
8171 self.logger.debug("Artifact path > {}".format(artifact_path))
8172
8173 # get initial_config_primitive_list that applies to this element
8174 initial_config_primitive_list = config_descriptor.get(
8175 "initial-config-primitive"
8176 )
8177
8178 self.logger.debug(
8179 "Initial config primitive list > {}".format(
8180 initial_config_primitive_list
8181 )
8182 )
8183
8184 # add config if not present for NS charm
8185 ee_descriptor_id = ee_config_descriptor.get("id")
8186 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8187 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8188 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8189 )
8190
8191 self.logger.debug(
8192 "Initial config primitive list #2 > {}".format(
8193 initial_config_primitive_list
8194 )
8195 )
8196 # n2vc_redesign STEP 3.1
8197 # find old ee_id if exists
8198 ee_id = vca_deployed.get("ee_id")
8199
8200 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8201 # create or register execution environment in VCA. Only for native charms when healing
8202 if vca_type == "native_charm":
8203 step = "Waiting to VM being up and getting IP address"
8204 self.logger.debug(logging_text + step)
8205 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8206 logging_text,
8207 nsr_id,
8208 vnfr_id,
8209 vdu_id,
8210 vdu_index,
8211 user=None,
8212 pub_key=None,
8213 )
8214 credentials = {"hostname": rw_mgmt_ip}
8215 # get username
8216 username = deep_get(
8217 config_descriptor, ("config-access", "ssh-access", "default-user")
8218 )
8219 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8220 # merged. Meanwhile let's get username from initial-config-primitive
8221 if not username and initial_config_primitive_list:
8222 for config_primitive in initial_config_primitive_list:
8223 for param in config_primitive.get("parameter", ()):
8224 if param["name"] == "ssh-username":
8225 username = param["value"]
8226 break
8227 if not username:
8228 raise LcmException(
8229 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8230 "'config-access.ssh-access.default-user'"
8231 )
8232 credentials["username"] = username
8233
8234 # n2vc_redesign STEP 3.2
8235 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8236 self._write_configuration_status(
8237 nsr_id=nsr_id,
8238 vca_index=vca_index,
8239 status="REGISTERING",
8240 element_under_configuration=element_under_configuration,
8241 element_type=element_type,
8242 )
8243
8244 step = "register execution environment {}".format(credentials)
8245 self.logger.debug(logging_text + step)
8246 ee_id = await self.vca_map[vca_type].register_execution_environment(
8247 credentials=credentials,
8248 namespace=namespace,
8249 db_dict=db_dict,
8250 vca_id=vca_id,
8251 )
8252
8253 # update ee_id en db
8254 db_dict_ee_id = {
8255 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8256 }
8257 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8258
8259 # for compatibility with MON/POL modules, the need model and application name at database
8260 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8261 # Not sure if this need to be done when healing
8262 """
8263 ee_id_parts = ee_id.split(".")
8264 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8265 if len(ee_id_parts) >= 2:
8266 model_name = ee_id_parts[0]
8267 application_name = ee_id_parts[1]
8268 db_nsr_update[db_update_entry + "model"] = model_name
8269 db_nsr_update[db_update_entry + "application"] = application_name
8270 """
8271
8272 # n2vc_redesign STEP 3.3
8273 # Install configuration software. Only for native charms.
8274 step = "Install configuration Software"
8275
8276 self._write_configuration_status(
8277 nsr_id=nsr_id,
8278 vca_index=vca_index,
8279 status="INSTALLING SW",
8280 element_under_configuration=element_under_configuration,
8281 element_type=element_type,
8282 # other_update=db_nsr_update,
8283 other_update=None,
8284 )
8285
8286 # TODO check if already done
8287 self.logger.debug(logging_text + step)
8288 config = None
8289 if vca_type == "native_charm":
8290 config_primitive = next(
8291 (p for p in initial_config_primitive_list if p["name"] == "config"),
8292 None,
8293 )
8294 if config_primitive:
8295 config = self._map_primitive_params(
8296 config_primitive, {}, deploy_params
8297 )
8298 await self.vca_map[vca_type].install_configuration_sw(
8299 ee_id=ee_id,
8300 artifact_path=artifact_path,
8301 db_dict=db_dict,
8302 config=config,
8303 num_units=1,
8304 vca_id=vca_id,
8305 vca_type=vca_type,
8306 )
8307
8308 # write in db flag of configuration_sw already installed
8309 self.update_db_2(
8310 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8311 )
8312
8313 # Not sure if this need to be done when healing
8314 """
8315 # add relations for this VCA (wait for other peers related with this VCA)
8316 await self._add_vca_relations(
8317 logging_text=logging_text,
8318 nsr_id=nsr_id,
8319 vca_type=vca_type,
8320 vca_index=vca_index,
8321 )
8322 """
8323
8324 # if SSH access is required, then get execution environment SSH public
8325 # if native charm we have waited already to VM be UP
8326 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8327 pub_key = None
8328 user = None
8329 # self.logger.debug("get ssh key block")
8330 if deep_get(
8331 config_descriptor, ("config-access", "ssh-access", "required")
8332 ):
8333 # self.logger.debug("ssh key needed")
8334 # Needed to inject a ssh key
8335 user = deep_get(
8336 config_descriptor,
8337 ("config-access", "ssh-access", "default-user"),
8338 )
8339 step = "Install configuration Software, getting public ssh key"
8340 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8341 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8342 )
8343
8344 step = "Insert public key into VM user={} ssh_key={}".format(
8345 user, pub_key
8346 )
8347 else:
8348 # self.logger.debug("no need to get ssh key")
8349 step = "Waiting to VM being up and getting IP address"
8350 self.logger.debug(logging_text + step)
8351
8352 # n2vc_redesign STEP 5.1
8353 # wait for RO (ip-address) Insert pub_key into VM
8354 # IMPORTANT: We need do wait for RO to complete healing operation.
8355 await self._wait_heal_ro(nsr_id, self.timeout_ns_heal)
8356 if vnfr_id:
8357 if kdu_name:
8358 rw_mgmt_ip = await self.wait_kdu_up(
8359 logging_text, nsr_id, vnfr_id, kdu_name
8360 )
8361 else:
8362 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8363 logging_text,
8364 nsr_id,
8365 vnfr_id,
8366 vdu_id,
8367 vdu_index,
8368 user=user,
8369 pub_key=pub_key,
8370 )
8371 else:
8372 rw_mgmt_ip = None # This is for a NS configuration
8373
8374 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8375
8376 # store rw_mgmt_ip in deploy params for later replacement
8377 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8378
8379 # Day1 operations.
8380 # get run-day1 operation parameter
8381 runDay1 = deploy_params.get("run-day1", False)
8382 self.logger.debug(
8383 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8384 )
8385 if runDay1:
8386 # n2vc_redesign STEP 6 Execute initial config primitive
8387 step = "execute initial config primitive"
8388
8389 # wait for dependent primitives execution (NS -> VNF -> VDU)
8390 if initial_config_primitive_list:
8391 await self._wait_dependent_n2vc(
8392 nsr_id, vca_deployed_list, vca_index
8393 )
8394
8395 # stage, in function of element type: vdu, kdu, vnf or ns
8396 my_vca = vca_deployed_list[vca_index]
8397 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8398 # VDU or KDU
8399 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8400 elif my_vca.get("member-vnf-index"):
8401 # VNF
8402 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8403 else:
8404 # NS
8405 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8406
8407 self._write_configuration_status(
8408 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8409 )
8410
8411 self._write_op_status(op_id=nslcmop_id, stage=stage)
8412
8413 check_if_terminated_needed = True
8414 for initial_config_primitive in initial_config_primitive_list:
8415 # adding information on the vca_deployed if it is a NS execution environment
8416 if not vca_deployed["member-vnf-index"]:
8417 deploy_params["ns_config_info"] = json.dumps(
8418 self._get_ns_config_info(nsr_id)
8419 )
8420 # TODO check if already done
8421 primitive_params_ = self._map_primitive_params(
8422 initial_config_primitive, {}, deploy_params
8423 )
8424
8425 step = "execute primitive '{}' params '{}'".format(
8426 initial_config_primitive["name"], primitive_params_
8427 )
8428 self.logger.debug(logging_text + step)
8429 await self.vca_map[vca_type].exec_primitive(
8430 ee_id=ee_id,
8431 primitive_name=initial_config_primitive["name"],
8432 params_dict=primitive_params_,
8433 db_dict=db_dict,
8434 vca_id=vca_id,
8435 vca_type=vca_type,
8436 )
8437 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8438 if check_if_terminated_needed:
8439 if config_descriptor.get("terminate-config-primitive"):
8440 self.update_db_2(
8441 "nsrs",
8442 nsr_id,
8443 {db_update_entry + "needed_terminate": True},
8444 )
8445 check_if_terminated_needed = False
8446
8447 # TODO register in database that primitive is done
8448
8449 # STEP 7 Configure metrics
8450 # Not sure if this need to be done when healing
8451 """
8452 if vca_type == "helm" or vca_type == "helm-v3":
8453 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8454 ee_id=ee_id,
8455 artifact_path=artifact_path,
8456 ee_config_descriptor=ee_config_descriptor,
8457 vnfr_id=vnfr_id,
8458 nsr_id=nsr_id,
8459 target_ip=rw_mgmt_ip,
8460 )
8461 if prometheus_jobs:
8462 self.update_db_2(
8463 "nsrs",
8464 nsr_id,
8465 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8466 )
8467
8468 for job in prometheus_jobs:
8469 self.db.set_one(
8470 "prometheus_jobs",
8471 {"job_name": job["job_name"]},
8472 job,
8473 upsert=True,
8474 fail_on_empty=False,
8475 )
8476
8477 """
8478 step = "instantiated at VCA"
8479 self.logger.debug(logging_text + step)
8480
8481 self._write_configuration_status(
8482 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8483 )
8484
8485 except Exception as e: # TODO not use Exception but N2VC exception
8486 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8487 if not isinstance(
8488 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8489 ):
8490 self.logger.error(
8491 "Exception while {} : {}".format(step, e), exc_info=True
8492 )
8493 self._write_configuration_status(
8494 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8495 )
8496 raise LcmException("{} {}".format(step, e)) from e
8497
8498 async def _wait_heal_ro(
8499 self,
8500 nsr_id,
8501 timeout=600,
8502 ):
8503 start_time = time()
8504 while time() <= start_time + timeout:
8505 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8506 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8507 "operational-status"
8508 ]
8509 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8510 if operational_status_ro != "healing":
8511 break
8512 await asyncio.sleep(15, loop=self.loop)
8513 else: # timeout_ns_deploy
8514 raise NgRoException("Timeout waiting ns to deploy")
8515
8516 async def vertical_scale(self, nsr_id, nslcmop_id):
8517 """
8518 Vertical Scale the VDUs in a NS
8519
8520 :param: nsr_id: NS Instance ID
8521 :param: nslcmop_id: nslcmop ID of migrate
8522
8523 """
8524 # Try to lock HA task here
8525 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8526 if not task_is_locked_by_me:
8527 return
8528 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8529 self.logger.debug(logging_text + "Enter")
8530 # get all needed from database
8531 db_nslcmop = None
8532 db_nslcmop_update = {}
8533 nslcmop_operation_state = None
8534 db_nsr_update = {}
8535 target = {}
8536 exc = None
8537 # in case of error, indicates what part of scale was failed to put nsr at error status
8538 start_deploy = time()
8539
8540 try:
8541 # wait for any previous tasks in process
8542 step = "Waiting for previous operations to terminate"
8543 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8544
8545 self._write_ns_status(
8546 nsr_id=nsr_id,
8547 ns_state=None,
8548 current_operation="VerticalScale",
8549 current_operation_id=nslcmop_id,
8550 )
8551 step = "Getting nslcmop from database"
8552 self.logger.debug(
8553 step + " after having waited for previous tasks to be completed"
8554 )
8555 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8556 operationParams = db_nslcmop.get("operationParams")
8557 target = {}
8558 target.update(operationParams)
8559 desc = await self.RO.vertical_scale(nsr_id, target)
8560 self.logger.debug("RO return > {}".format(desc))
8561 action_id = desc["action_id"]
8562 await self._wait_ng_ro(
8563 nsr_id,
8564 action_id,
8565 nslcmop_id,
8566 start_deploy,
8567 self.timeout_verticalscale,
8568 operation="verticalscale",
8569 )
8570 except (ROclient.ROClientException, DbException, LcmException) as e:
8571 self.logger.error("Exit Exception {}".format(e))
8572 exc = e
8573 except asyncio.CancelledError:
8574 self.logger.error("Cancelled Exception while '{}'".format(step))
8575 exc = "Operation was cancelled"
8576 except Exception as e:
8577 exc = traceback.format_exc()
8578 self.logger.critical(
8579 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8580 )
8581 finally:
8582 self._write_ns_status(
8583 nsr_id=nsr_id,
8584 ns_state=None,
8585 current_operation="IDLE",
8586 current_operation_id=None,
8587 )
8588 if exc:
8589 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8590 nslcmop_operation_state = "FAILED"
8591 else:
8592 nslcmop_operation_state = "COMPLETED"
8593 db_nslcmop_update["detailed-status"] = "Done"
8594 db_nsr_update["detailed-status"] = "Done"
8595
8596 self._write_op_status(
8597 op_id=nslcmop_id,
8598 stage="",
8599 error_message="",
8600 operation_state=nslcmop_operation_state,
8601 other_update=db_nslcmop_update,
8602 )
8603 if nslcmop_operation_state:
8604 try:
8605 msg = {
8606 "nsr_id": nsr_id,
8607 "nslcmop_id": nslcmop_id,
8608 "operationState": nslcmop_operation_state,
8609 }
8610 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8611 except Exception as e:
8612 self.logger.error(
8613 logging_text + "kafka_write notification Exception {}".format(e)
8614 )
8615 self.logger.debug(logging_text + "Exit")
8616 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")