Fix Bug 2187: Optional atomic flag in helm upgrades
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.nsr import (
38 get_deployed_kdu,
39 get_deployed_vca,
40 get_deployed_vca_list,
41 get_nsd,
42 )
43 from osm_lcm.data_utils.vca import (
44 DeployedComponent,
45 DeployedK8sResource,
46 DeployedVCA,
47 EELevel,
48 Relation,
49 EERelation,
50 safe_get_ee_relation,
51 )
52 from osm_lcm.ng_ro import NgRoClient, NgRoException
53 from osm_lcm.lcm_utils import (
54 LcmException,
55 LcmExceptionNoMgmtIP,
56 LcmBase,
57 deep_get,
58 get_iterable,
59 populate_dict,
60 check_juju_bundle_existence,
61 get_charm_artifact_path,
62 )
63 from osm_lcm.data_utils.nsd import (
64 get_ns_configuration_relation_list,
65 get_vnf_profile,
66 get_vnf_profiles,
67 )
68 from osm_lcm.data_utils.vnfd import (
69 get_kdu,
70 get_kdu_services,
71 get_relation_list,
72 get_vdu_list,
73 get_vdu_profile,
74 get_ee_sorted_initial_config_primitive_list,
75 get_ee_sorted_terminate_config_primitive_list,
76 get_kdu_list,
77 get_virtual_link_profiles,
78 get_vdu,
79 get_configuration,
80 get_vdu_index,
81 get_scaling_aspect,
82 get_number_of_instances,
83 get_juju_ee_ref,
84 get_kdu_resource_profile,
85 find_software_version,
86 )
87 from osm_lcm.data_utils.list_utils import find_in_list
88 from osm_lcm.data_utils.vnfr import (
89 get_osm_params,
90 get_vdur_index,
91 get_kdur,
92 get_volumes_from_instantiation_params,
93 )
94 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
95 from osm_lcm.data_utils.database.vim_account import VimAccountDB
96 from n2vc.definitions import RelationEndpoint
97 from n2vc.k8s_helm_conn import K8sHelmConnector
98 from n2vc.k8s_helm3_conn import K8sHelm3Connector
99 from n2vc.k8s_juju_conn import K8sJujuConnector
100
101 from osm_common.dbbase import DbException
102 from osm_common.fsbase import FsException
103
104 from osm_lcm.data_utils.database.database import Database
105 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
106
107 from n2vc.n2vc_juju_conn import N2VCJujuConnector
108 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
109
110 from osm_lcm.lcm_helm_conn import LCMHelmConn
111 from osm_lcm.osm_config import OsmConfigBuilder
112 from osm_lcm.prometheus import parse_job
113
114 from copy import copy, deepcopy
115 from time import time
116 from uuid import uuid4
117
118 from random import randint
119
120 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
121
122
123 class NsLcm(LcmBase):
124 timeout_vca_on_error = (
125 5 * 60
126 ) # Time for charm from first time at blocked,error status to mark as failed
127 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
128 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
129 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
130 timeout_charm_delete = 10 * 60
131 timeout_primitive = 30 * 60 # timeout for primitive execution
132 timeout_ns_update = 30 * 60 # timeout for ns update
133 timeout_progress_primitive = (
134 10 * 60
135 ) # timeout for some progress in a primitive execution
136 timeout_migrate = 1800 # default global timeout for migrating vnfs
137 timeout_operate = 1800 # default global timeout for migrating vnfs
138 timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
139 SUBOPERATION_STATUS_NOT_FOUND = -1
140 SUBOPERATION_STATUS_NEW = -2
141 SUBOPERATION_STATUS_SKIP = -3
142 task_name_deploy_vca = "Deploying VCA"
143
144 def __init__(self, msg, lcm_tasks, config, loop):
145 """
146 Init, Connect to database, filesystem storage, and messaging
147 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
148 :return: None
149 """
150 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
151
152 self.db = Database().instance.db
153 self.fs = Filesystem().instance.fs
154 self.loop = loop
155 self.lcm_tasks = lcm_tasks
156 self.timeout = config["timeout"]
157 self.ro_config = config["ro_config"]
158 self.ng_ro = config["ro_config"].get("ng")
159 self.vca_config = config["VCA"].copy()
160
161 # create N2VC connector
162 self.n2vc = N2VCJujuConnector(
163 log=self.logger,
164 loop=self.loop,
165 on_update_db=self._on_update_n2vc_db,
166 fs=self.fs,
167 db=self.db,
168 )
169
170 self.conn_helm_ee = LCMHelmConn(
171 log=self.logger,
172 loop=self.loop,
173 vca_config=self.vca_config,
174 on_update_db=self._on_update_n2vc_db,
175 )
176
177 self.k8sclusterhelm2 = K8sHelmConnector(
178 kubectl_command=self.vca_config.get("kubectlpath"),
179 helm_command=self.vca_config.get("helmpath"),
180 log=self.logger,
181 on_update_db=None,
182 fs=self.fs,
183 db=self.db,
184 )
185
186 self.k8sclusterhelm3 = K8sHelm3Connector(
187 kubectl_command=self.vca_config.get("kubectlpath"),
188 helm_command=self.vca_config.get("helm3path"),
189 fs=self.fs,
190 log=self.logger,
191 db=self.db,
192 on_update_db=None,
193 )
194
195 self.k8sclusterjuju = K8sJujuConnector(
196 kubectl_command=self.vca_config.get("kubectlpath"),
197 juju_command=self.vca_config.get("jujupath"),
198 log=self.logger,
199 loop=self.loop,
200 on_update_db=self._on_update_k8s_db,
201 fs=self.fs,
202 db=self.db,
203 )
204
205 self.k8scluster_map = {
206 "helm-chart": self.k8sclusterhelm2,
207 "helm-chart-v3": self.k8sclusterhelm3,
208 "chart": self.k8sclusterhelm3,
209 "juju-bundle": self.k8sclusterjuju,
210 "juju": self.k8sclusterjuju,
211 }
212
213 self.vca_map = {
214 "lxc_proxy_charm": self.n2vc,
215 "native_charm": self.n2vc,
216 "k8s_proxy_charm": self.n2vc,
217 "helm": self.conn_helm_ee,
218 "helm-v3": self.conn_helm_ee,
219 }
220
221 # create RO client
222 self.RO = NgRoClient(self.loop, **self.ro_config)
223
224 self.op_status_map = {
225 "instantiation": self.RO.status,
226 "termination": self.RO.status,
227 "migrate": self.RO.status,
228 "healing": self.RO.recreate_status,
229 "verticalscale": self.RO.status,
230 "start_stop_rebuild": self.RO.status,
231 }
232
233 @staticmethod
234 def increment_ip_mac(ip_mac, vm_index=1):
235 if not isinstance(ip_mac, str):
236 return ip_mac
237 try:
238 # try with ipv4 look for last dot
239 i = ip_mac.rfind(".")
240 if i > 0:
241 i += 1
242 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
243 # try with ipv6 or mac look for last colon. Operate in hex
244 i = ip_mac.rfind(":")
245 if i > 0:
246 i += 1
247 # format in hex, len can be 2 for mac or 4 for ipv6
248 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
249 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
250 )
251 except Exception:
252 pass
253 return None
254
255 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
256
257 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
258
259 try:
260 # TODO filter RO descriptor fields...
261
262 # write to database
263 db_dict = dict()
264 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
265 db_dict["deploymentStatus"] = ro_descriptor
266 self.update_db_2("nsrs", nsrs_id, db_dict)
267
268 except Exception as e:
269 self.logger.warn(
270 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
271 )
272
273 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
274
275 # remove last dot from path (if exists)
276 if path.endswith("."):
277 path = path[:-1]
278
279 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
280 # .format(table, filter, path, updated_data))
281 try:
282
283 nsr_id = filter.get("_id")
284
285 # read ns record from database
286 nsr = self.db.get_one(table="nsrs", q_filter=filter)
287 current_ns_status = nsr.get("nsState")
288
289 # get vca status for NS
290 status_dict = await self.n2vc.get_status(
291 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
292 )
293
294 # vcaStatus
295 db_dict = dict()
296 db_dict["vcaStatus"] = status_dict
297
298 # update configurationStatus for this VCA
299 try:
300 vca_index = int(path[path.rfind(".") + 1 :])
301
302 vca_list = deep_get(
303 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
304 )
305 vca_status = vca_list[vca_index].get("status")
306
307 configuration_status_list = nsr.get("configurationStatus")
308 config_status = configuration_status_list[vca_index].get("status")
309
310 if config_status == "BROKEN" and vca_status != "failed":
311 db_dict["configurationStatus"][vca_index] = "READY"
312 elif config_status != "BROKEN" and vca_status == "failed":
313 db_dict["configurationStatus"][vca_index] = "BROKEN"
314 except Exception as e:
315 # not update configurationStatus
316 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
317
318 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
319 # if nsState = 'DEGRADED' check if all is OK
320 is_degraded = False
321 if current_ns_status in ("READY", "DEGRADED"):
322 error_description = ""
323 # check machines
324 if status_dict.get("machines"):
325 for machine_id in status_dict.get("machines"):
326 machine = status_dict.get("machines").get(machine_id)
327 # check machine agent-status
328 if machine.get("agent-status"):
329 s = machine.get("agent-status").get("status")
330 if s != "started":
331 is_degraded = True
332 error_description += (
333 "machine {} agent-status={} ; ".format(
334 machine_id, s
335 )
336 )
337 # check machine instance status
338 if machine.get("instance-status"):
339 s = machine.get("instance-status").get("status")
340 if s != "running":
341 is_degraded = True
342 error_description += (
343 "machine {} instance-status={} ; ".format(
344 machine_id, s
345 )
346 )
347 # check applications
348 if status_dict.get("applications"):
349 for app_id in status_dict.get("applications"):
350 app = status_dict.get("applications").get(app_id)
351 # check application status
352 if app.get("status"):
353 s = app.get("status").get("status")
354 if s != "active":
355 is_degraded = True
356 error_description += (
357 "application {} status={} ; ".format(app_id, s)
358 )
359
360 if error_description:
361 db_dict["errorDescription"] = error_description
362 if current_ns_status == "READY" and is_degraded:
363 db_dict["nsState"] = "DEGRADED"
364 if current_ns_status == "DEGRADED" and not is_degraded:
365 db_dict["nsState"] = "READY"
366
367 # write to database
368 self.update_db_2("nsrs", nsr_id, db_dict)
369
370 except (asyncio.CancelledError, asyncio.TimeoutError):
371 raise
372 except Exception as e:
373 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
374
375 async def _on_update_k8s_db(
376 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
377 ):
378 """
379 Updating vca status in NSR record
380 :param cluster_uuid: UUID of a k8s cluster
381 :param kdu_instance: The unique name of the KDU instance
382 :param filter: To get nsr_id
383 :cluster_type: The cluster type (juju, k8s)
384 :return: none
385 """
386
387 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
388 # .format(cluster_uuid, kdu_instance, filter))
389
390 nsr_id = filter.get("_id")
391 try:
392 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
393 cluster_uuid=cluster_uuid,
394 kdu_instance=kdu_instance,
395 yaml_format=False,
396 complete_status=True,
397 vca_id=vca_id,
398 )
399
400 # vcaStatus
401 db_dict = dict()
402 db_dict["vcaStatus"] = {nsr_id: vca_status}
403
404 self.logger.debug(
405 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
406 )
407
408 # write to database
409 self.update_db_2("nsrs", nsr_id, db_dict)
410 except (asyncio.CancelledError, asyncio.TimeoutError):
411 raise
412 except Exception as e:
413 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
414
415 @staticmethod
416 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
417 try:
418 env = Environment(
419 undefined=StrictUndefined,
420 autoescape=select_autoescape(default_for_string=True, default=True),
421 )
422 template = env.from_string(cloud_init_text)
423 return template.render(additional_params or {})
424 except UndefinedError as e:
425 raise LcmException(
426 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
427 "file, must be provided in the instantiation parameters inside the "
428 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
429 )
430 except (TemplateError, TemplateNotFound) as e:
431 raise LcmException(
432 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
433 vnfd_id, vdu_id, e
434 )
435 )
436
437 def _get_vdu_cloud_init_content(self, vdu, vnfd):
438 cloud_init_content = cloud_init_file = None
439 try:
440 if vdu.get("cloud-init-file"):
441 base_folder = vnfd["_admin"]["storage"]
442 if base_folder["pkg-dir"]:
443 cloud_init_file = "{}/{}/cloud_init/{}".format(
444 base_folder["folder"],
445 base_folder["pkg-dir"],
446 vdu["cloud-init-file"],
447 )
448 else:
449 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
450 base_folder["folder"],
451 vdu["cloud-init-file"],
452 )
453 with self.fs.file_open(cloud_init_file, "r") as ci_file:
454 cloud_init_content = ci_file.read()
455 elif vdu.get("cloud-init"):
456 cloud_init_content = vdu["cloud-init"]
457
458 return cloud_init_content
459 except FsException as e:
460 raise LcmException(
461 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
462 vnfd["id"], vdu["id"], cloud_init_file, e
463 )
464 )
465
466 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
467 vdur = next(
468 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
469 )
470 additional_params = vdur.get("additionalParams")
471 return parse_yaml_strings(additional_params)
472
473 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
474 """
475 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
476 :param vnfd: input vnfd
477 :param new_id: overrides vnf id if provided
478 :param additionalParams: Instantiation params for VNFs provided
479 :param nsrId: Id of the NSR
480 :return: copy of vnfd
481 """
482 vnfd_RO = deepcopy(vnfd)
483 # remove unused by RO configuration, monitoring, scaling and internal keys
484 vnfd_RO.pop("_id", None)
485 vnfd_RO.pop("_admin", None)
486 vnfd_RO.pop("monitoring-param", None)
487 vnfd_RO.pop("scaling-group-descriptor", None)
488 vnfd_RO.pop("kdu", None)
489 vnfd_RO.pop("k8s-cluster", None)
490 if new_id:
491 vnfd_RO["id"] = new_id
492
493 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
494 for vdu in get_iterable(vnfd_RO, "vdu"):
495 vdu.pop("cloud-init-file", None)
496 vdu.pop("cloud-init", None)
497 return vnfd_RO
498
499 @staticmethod
500 def ip_profile_2_RO(ip_profile):
501 RO_ip_profile = deepcopy(ip_profile)
502 if "dns-server" in RO_ip_profile:
503 if isinstance(RO_ip_profile["dns-server"], list):
504 RO_ip_profile["dns-address"] = []
505 for ds in RO_ip_profile.pop("dns-server"):
506 RO_ip_profile["dns-address"].append(ds["address"])
507 else:
508 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
509 if RO_ip_profile.get("ip-version") == "ipv4":
510 RO_ip_profile["ip-version"] = "IPv4"
511 if RO_ip_profile.get("ip-version") == "ipv6":
512 RO_ip_profile["ip-version"] = "IPv6"
513 if "dhcp-params" in RO_ip_profile:
514 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
515 return RO_ip_profile
516
517 def _get_ro_vim_id_for_vim_account(self, vim_account):
518 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
519 if db_vim["_admin"]["operationalState"] != "ENABLED":
520 raise LcmException(
521 "VIM={} is not available. operationalState={}".format(
522 vim_account, db_vim["_admin"]["operationalState"]
523 )
524 )
525 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
526 return RO_vim_id
527
528 def get_ro_wim_id_for_wim_account(self, wim_account):
529 if isinstance(wim_account, str):
530 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
531 if db_wim["_admin"]["operationalState"] != "ENABLED":
532 raise LcmException(
533 "WIM={} is not available. operationalState={}".format(
534 wim_account, db_wim["_admin"]["operationalState"]
535 )
536 )
537 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
538 return RO_wim_id
539 else:
540 return wim_account
541
542 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
543
544 db_vdu_push_list = []
545 template_vdur = []
546 db_update = {"_admin.modified": time()}
547 if vdu_create:
548 for vdu_id, vdu_count in vdu_create.items():
549 vdur = next(
550 (
551 vdur
552 for vdur in reversed(db_vnfr["vdur"])
553 if vdur["vdu-id-ref"] == vdu_id
554 ),
555 None,
556 )
557 if not vdur:
558 # Read the template saved in the db:
559 self.logger.debug(
560 "No vdur in the database. Using the vdur-template to scale"
561 )
562 vdur_template = db_vnfr.get("vdur-template")
563 if not vdur_template:
564 raise LcmException(
565 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
566 vdu_id
567 )
568 )
569 vdur = vdur_template[0]
570 # Delete a template from the database after using it
571 self.db.set_one(
572 "vnfrs",
573 {"_id": db_vnfr["_id"]},
574 None,
575 pull={"vdur-template": {"_id": vdur["_id"]}},
576 )
577 for count in range(vdu_count):
578 vdur_copy = deepcopy(vdur)
579 vdur_copy["status"] = "BUILD"
580 vdur_copy["status-detailed"] = None
581 vdur_copy["ip-address"] = None
582 vdur_copy["_id"] = str(uuid4())
583 vdur_copy["count-index"] += count + 1
584 vdur_copy["id"] = "{}-{}".format(
585 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
586 )
587 vdur_copy.pop("vim_info", None)
588 for iface in vdur_copy["interfaces"]:
589 if iface.get("fixed-ip"):
590 iface["ip-address"] = self.increment_ip_mac(
591 iface["ip-address"], count + 1
592 )
593 else:
594 iface.pop("ip-address", None)
595 if iface.get("fixed-mac"):
596 iface["mac-address"] = self.increment_ip_mac(
597 iface["mac-address"], count + 1
598 )
599 else:
600 iface.pop("mac-address", None)
601 if db_vnfr["vdur"]:
602 iface.pop(
603 "mgmt_vnf", None
604 ) # only first vdu can be managment of vnf
605 db_vdu_push_list.append(vdur_copy)
606 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
607 if vdu_delete:
608 if len(db_vnfr["vdur"]) == 1:
609 # The scale will move to 0 instances
610 self.logger.debug(
611 "Scaling to 0 !, creating the template with the last vdur"
612 )
613 template_vdur = [db_vnfr["vdur"][0]]
614 for vdu_id, vdu_count in vdu_delete.items():
615 if mark_delete:
616 indexes_to_delete = [
617 iv[0]
618 for iv in enumerate(db_vnfr["vdur"])
619 if iv[1]["vdu-id-ref"] == vdu_id
620 ]
621 db_update.update(
622 {
623 "vdur.{}.status".format(i): "DELETING"
624 for i in indexes_to_delete[-vdu_count:]
625 }
626 )
627 else:
628 # it must be deleted one by one because common.db does not allow otherwise
629 vdus_to_delete = [
630 v
631 for v in reversed(db_vnfr["vdur"])
632 if v["vdu-id-ref"] == vdu_id
633 ]
634 for vdu in vdus_to_delete[:vdu_count]:
635 self.db.set_one(
636 "vnfrs",
637 {"_id": db_vnfr["_id"]},
638 None,
639 pull={"vdur": {"_id": vdu["_id"]}},
640 )
641 db_push = {}
642 if db_vdu_push_list:
643 db_push["vdur"] = db_vdu_push_list
644 if template_vdur:
645 db_push["vdur-template"] = template_vdur
646 if not db_push:
647 db_push = None
648 db_vnfr["vdur-template"] = template_vdur
649 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
650 # modify passed dictionary db_vnfr
651 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
652 db_vnfr["vdur"] = db_vnfr_["vdur"]
653
654 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
655 """
656 Updates database nsr with the RO info for the created vld
657 :param ns_update_nsr: dictionary to be filled with the updated info
658 :param db_nsr: content of db_nsr. This is also modified
659 :param nsr_desc_RO: nsr descriptor from RO
660 :return: Nothing, LcmException is raised on errors
661 """
662
663 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
664 for net_RO in get_iterable(nsr_desc_RO, "nets"):
665 if vld["id"] != net_RO.get("ns_net_osm_id"):
666 continue
667 vld["vim-id"] = net_RO.get("vim_net_id")
668 vld["name"] = net_RO.get("vim_name")
669 vld["status"] = net_RO.get("status")
670 vld["status-detailed"] = net_RO.get("error_msg")
671 ns_update_nsr["vld.{}".format(vld_index)] = vld
672 break
673 else:
674 raise LcmException(
675 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
676 )
677
678 def set_vnfr_at_error(self, db_vnfrs, error_text):
679 try:
680 for db_vnfr in db_vnfrs.values():
681 vnfr_update = {"status": "ERROR"}
682 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
683 if "status" not in vdur:
684 vdur["status"] = "ERROR"
685 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
686 if error_text:
687 vdur["status-detailed"] = str(error_text)
688 vnfr_update[
689 "vdur.{}.status-detailed".format(vdu_index)
690 ] = "ERROR"
691 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
692 except DbException as e:
693 self.logger.error("Cannot update vnf. {}".format(e))
694
695 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
696 """
697 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
698 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
699 :param nsr_desc_RO: nsr descriptor from RO
700 :return: Nothing, LcmException is raised on errors
701 """
702 for vnf_index, db_vnfr in db_vnfrs.items():
703 for vnf_RO in nsr_desc_RO["vnfs"]:
704 if vnf_RO["member_vnf_index"] != vnf_index:
705 continue
706 vnfr_update = {}
707 if vnf_RO.get("ip_address"):
708 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
709 "ip_address"
710 ].split(";")[0]
711 elif not db_vnfr.get("ip-address"):
712 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
713 raise LcmExceptionNoMgmtIP(
714 "ns member_vnf_index '{}' has no IP address".format(
715 vnf_index
716 )
717 )
718
719 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
720 vdur_RO_count_index = 0
721 if vdur.get("pdu-type"):
722 continue
723 for vdur_RO in get_iterable(vnf_RO, "vms"):
724 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
725 continue
726 if vdur["count-index"] != vdur_RO_count_index:
727 vdur_RO_count_index += 1
728 continue
729 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
730 if vdur_RO.get("ip_address"):
731 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
732 else:
733 vdur["ip-address"] = None
734 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
735 vdur["name"] = vdur_RO.get("vim_name")
736 vdur["status"] = vdur_RO.get("status")
737 vdur["status-detailed"] = vdur_RO.get("error_msg")
738 for ifacer in get_iterable(vdur, "interfaces"):
739 for interface_RO in get_iterable(vdur_RO, "interfaces"):
740 if ifacer["name"] == interface_RO.get("internal_name"):
741 ifacer["ip-address"] = interface_RO.get(
742 "ip_address"
743 )
744 ifacer["mac-address"] = interface_RO.get(
745 "mac_address"
746 )
747 break
748 else:
749 raise LcmException(
750 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
751 "from VIM info".format(
752 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
753 )
754 )
755 vnfr_update["vdur.{}".format(vdu_index)] = vdur
756 break
757 else:
758 raise LcmException(
759 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
760 "VIM info".format(
761 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
762 )
763 )
764
765 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
766 for net_RO in get_iterable(nsr_desc_RO, "nets"):
767 if vld["id"] != net_RO.get("vnf_net_osm_id"):
768 continue
769 vld["vim-id"] = net_RO.get("vim_net_id")
770 vld["name"] = net_RO.get("vim_name")
771 vld["status"] = net_RO.get("status")
772 vld["status-detailed"] = net_RO.get("error_msg")
773 vnfr_update["vld.{}".format(vld_index)] = vld
774 break
775 else:
776 raise LcmException(
777 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
778 vnf_index, vld["id"]
779 )
780 )
781
782 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
783 break
784
785 else:
786 raise LcmException(
787 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
788 vnf_index
789 )
790 )
791
792 def _get_ns_config_info(self, nsr_id):
793 """
794 Generates a mapping between vnf,vdu elements and the N2VC id
795 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
796 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
797 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
798 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
799 """
800 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
801 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
802 mapping = {}
803 ns_config_info = {"osm-config-mapping": mapping}
804 for vca in vca_deployed_list:
805 if not vca["member-vnf-index"]:
806 continue
807 if not vca["vdu_id"]:
808 mapping[vca["member-vnf-index"]] = vca["application"]
809 else:
810 mapping[
811 "{}.{}.{}".format(
812 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
813 )
814 ] = vca["application"]
815 return ns_config_info
816
817 async def _instantiate_ng_ro(
818 self,
819 logging_text,
820 nsr_id,
821 nsd,
822 db_nsr,
823 db_nslcmop,
824 db_vnfrs,
825 db_vnfds,
826 n2vc_key_list,
827 stage,
828 start_deploy,
829 timeout_ns_deploy,
830 ):
831
832 db_vims = {}
833
834 def get_vim_account(vim_account_id):
835 nonlocal db_vims
836 if vim_account_id in db_vims:
837 return db_vims[vim_account_id]
838 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
839 db_vims[vim_account_id] = db_vim
840 return db_vim
841
842 # modify target_vld info with instantiation parameters
843 def parse_vld_instantiation_params(
844 target_vim, target_vld, vld_params, target_sdn
845 ):
846 if vld_params.get("ip-profile"):
847 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
848 "ip-profile"
849 ]
850 if vld_params.get("provider-network"):
851 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
852 "provider-network"
853 ]
854 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
855 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
856 "provider-network"
857 ]["sdn-ports"]
858 if vld_params.get("wimAccountId"):
859 target_wim = "wim:{}".format(vld_params["wimAccountId"])
860 target_vld["vim_info"][target_wim] = {}
861 for param in ("vim-network-name", "vim-network-id"):
862 if vld_params.get(param):
863 if isinstance(vld_params[param], dict):
864 for vim, vim_net in vld_params[param].items():
865 other_target_vim = "vim:" + vim
866 populate_dict(
867 target_vld["vim_info"],
868 (other_target_vim, param.replace("-", "_")),
869 vim_net,
870 )
871 else: # isinstance str
872 target_vld["vim_info"][target_vim][
873 param.replace("-", "_")
874 ] = vld_params[param]
875 if vld_params.get("common_id"):
876 target_vld["common_id"] = vld_params.get("common_id")
877
878 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
879 def update_ns_vld_target(target, ns_params):
880 for vnf_params in ns_params.get("vnf", ()):
881 if vnf_params.get("vimAccountId"):
882 target_vnf = next(
883 (
884 vnfr
885 for vnfr in db_vnfrs.values()
886 if vnf_params["member-vnf-index"]
887 == vnfr["member-vnf-index-ref"]
888 ),
889 None,
890 )
891 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
892 for a_index, a_vld in enumerate(target["ns"]["vld"]):
893 target_vld = find_in_list(
894 get_iterable(vdur, "interfaces"),
895 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
896 )
897
898 vld_params = find_in_list(
899 get_iterable(ns_params, "vld"),
900 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
901 )
902 if target_vld:
903
904 if vnf_params.get("vimAccountId") not in a_vld.get(
905 "vim_info", {}
906 ):
907 target_vim_network_list = [
908 v for _, v in a_vld.get("vim_info").items()
909 ]
910 target_vim_network_name = next(
911 (
912 item.get("vim_network_name", "")
913 for item in target_vim_network_list
914 ),
915 "",
916 )
917
918 target["ns"]["vld"][a_index].get("vim_info").update(
919 {
920 "vim:{}".format(vnf_params["vimAccountId"]): {
921 "vim_network_name": target_vim_network_name,
922 }
923 }
924 )
925
926 if vld_params:
927 for param in ("vim-network-name", "vim-network-id"):
928 if vld_params.get(param) and isinstance(
929 vld_params[param], dict
930 ):
931 for vim, vim_net in vld_params[
932 param
933 ].items():
934 other_target_vim = "vim:" + vim
935 populate_dict(
936 target["ns"]["vld"][a_index].get(
937 "vim_info"
938 ),
939 (
940 other_target_vim,
941 param.replace("-", "_"),
942 ),
943 vim_net,
944 )
945
946 nslcmop_id = db_nslcmop["_id"]
947 target = {
948 "name": db_nsr["name"],
949 "ns": {"vld": []},
950 "vnf": [],
951 "image": deepcopy(db_nsr["image"]),
952 "flavor": deepcopy(db_nsr["flavor"]),
953 "action_id": nslcmop_id,
954 "cloud_init_content": {},
955 }
956 for image in target["image"]:
957 image["vim_info"] = {}
958 for flavor in target["flavor"]:
959 flavor["vim_info"] = {}
960 if db_nsr.get("affinity-or-anti-affinity-group"):
961 target["affinity-or-anti-affinity-group"] = deepcopy(
962 db_nsr["affinity-or-anti-affinity-group"]
963 )
964 for affinity_or_anti_affinity_group in target[
965 "affinity-or-anti-affinity-group"
966 ]:
967 affinity_or_anti_affinity_group["vim_info"] = {}
968
969 if db_nslcmop.get("lcmOperationType") != "instantiate":
970 # get parameters of instantiation:
971 db_nslcmop_instantiate = self.db.get_list(
972 "nslcmops",
973 {
974 "nsInstanceId": db_nslcmop["nsInstanceId"],
975 "lcmOperationType": "instantiate",
976 },
977 )[-1]
978 ns_params = db_nslcmop_instantiate.get("operationParams")
979 else:
980 ns_params = db_nslcmop.get("operationParams")
981 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
982 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
983
984 cp2target = {}
985 for vld_index, vld in enumerate(db_nsr.get("vld")):
986 target_vim = "vim:{}".format(ns_params["vimAccountId"])
987 target_vld = {
988 "id": vld["id"],
989 "name": vld["name"],
990 "mgmt-network": vld.get("mgmt-network", False),
991 "type": vld.get("type"),
992 "vim_info": {
993 target_vim: {
994 "vim_network_name": vld.get("vim-network-name"),
995 "vim_account_id": ns_params["vimAccountId"],
996 }
997 },
998 }
999 # check if this network needs SDN assist
1000 if vld.get("pci-interfaces"):
1001 db_vim = get_vim_account(ns_params["vimAccountId"])
1002 sdnc_id = db_vim["config"].get("sdn-controller")
1003 if sdnc_id:
1004 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1005 target_sdn = "sdn:{}".format(sdnc_id)
1006 target_vld["vim_info"][target_sdn] = {
1007 "sdn": True,
1008 "target_vim": target_vim,
1009 "vlds": [sdn_vld],
1010 "type": vld.get("type"),
1011 }
1012
1013 nsd_vnf_profiles = get_vnf_profiles(nsd)
1014 for nsd_vnf_profile in nsd_vnf_profiles:
1015 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1016 if cp["virtual-link-profile-id"] == vld["id"]:
1017 cp2target[
1018 "member_vnf:{}.{}".format(
1019 cp["constituent-cpd-id"][0][
1020 "constituent-base-element-id"
1021 ],
1022 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1023 )
1024 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1025
1026 # check at nsd descriptor, if there is an ip-profile
1027 vld_params = {}
1028 nsd_vlp = find_in_list(
1029 get_virtual_link_profiles(nsd),
1030 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1031 == vld["id"],
1032 )
1033 if (
1034 nsd_vlp
1035 and nsd_vlp.get("virtual-link-protocol-data")
1036 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1037 ):
1038 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1039 "l3-protocol-data"
1040 ]
1041 ip_profile_dest_data = {}
1042 if "ip-version" in ip_profile_source_data:
1043 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1044 "ip-version"
1045 ]
1046 if "cidr" in ip_profile_source_data:
1047 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1048 "cidr"
1049 ]
1050 if "gateway-ip" in ip_profile_source_data:
1051 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1052 "gateway-ip"
1053 ]
1054 if "dhcp-enabled" in ip_profile_source_data:
1055 ip_profile_dest_data["dhcp-params"] = {
1056 "enabled": ip_profile_source_data["dhcp-enabled"]
1057 }
1058 vld_params["ip-profile"] = ip_profile_dest_data
1059
1060 # update vld_params with instantiation params
1061 vld_instantiation_params = find_in_list(
1062 get_iterable(ns_params, "vld"),
1063 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1064 )
1065 if vld_instantiation_params:
1066 vld_params.update(vld_instantiation_params)
1067 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1068 target["ns"]["vld"].append(target_vld)
1069 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1070 update_ns_vld_target(target, ns_params)
1071
1072 for vnfr in db_vnfrs.values():
1073 vnfd = find_in_list(
1074 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1075 )
1076 vnf_params = find_in_list(
1077 get_iterable(ns_params, "vnf"),
1078 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1079 )
1080 target_vnf = deepcopy(vnfr)
1081 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1082 for vld in target_vnf.get("vld", ()):
1083 # check if connected to a ns.vld, to fill target'
1084 vnf_cp = find_in_list(
1085 vnfd.get("int-virtual-link-desc", ()),
1086 lambda cpd: cpd.get("id") == vld["id"],
1087 )
1088 if vnf_cp:
1089 ns_cp = "member_vnf:{}.{}".format(
1090 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1091 )
1092 if cp2target.get(ns_cp):
1093 vld["target"] = cp2target[ns_cp]
1094
1095 vld["vim_info"] = {
1096 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1097 }
1098 # check if this network needs SDN assist
1099 target_sdn = None
1100 if vld.get("pci-interfaces"):
1101 db_vim = get_vim_account(vnfr["vim-account-id"])
1102 sdnc_id = db_vim["config"].get("sdn-controller")
1103 if sdnc_id:
1104 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1105 target_sdn = "sdn:{}".format(sdnc_id)
1106 vld["vim_info"][target_sdn] = {
1107 "sdn": True,
1108 "target_vim": target_vim,
1109 "vlds": [sdn_vld],
1110 "type": vld.get("type"),
1111 }
1112
1113 # check at vnfd descriptor, if there is an ip-profile
1114 vld_params = {}
1115 vnfd_vlp = find_in_list(
1116 get_virtual_link_profiles(vnfd),
1117 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1118 )
1119 if (
1120 vnfd_vlp
1121 and vnfd_vlp.get("virtual-link-protocol-data")
1122 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1123 ):
1124 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1125 "l3-protocol-data"
1126 ]
1127 ip_profile_dest_data = {}
1128 if "ip-version" in ip_profile_source_data:
1129 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1130 "ip-version"
1131 ]
1132 if "cidr" in ip_profile_source_data:
1133 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1134 "cidr"
1135 ]
1136 if "gateway-ip" in ip_profile_source_data:
1137 ip_profile_dest_data[
1138 "gateway-address"
1139 ] = ip_profile_source_data["gateway-ip"]
1140 if "dhcp-enabled" in ip_profile_source_data:
1141 ip_profile_dest_data["dhcp-params"] = {
1142 "enabled": ip_profile_source_data["dhcp-enabled"]
1143 }
1144
1145 vld_params["ip-profile"] = ip_profile_dest_data
1146 # update vld_params with instantiation params
1147 if vnf_params:
1148 vld_instantiation_params = find_in_list(
1149 get_iterable(vnf_params, "internal-vld"),
1150 lambda i_vld: i_vld["name"] == vld["id"],
1151 )
1152 if vld_instantiation_params:
1153 vld_params.update(vld_instantiation_params)
1154 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1155
1156 vdur_list = []
1157 for vdur in target_vnf.get("vdur", ()):
1158 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1159 continue # This vdu must not be created
1160 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1161
1162 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1163
1164 if ssh_keys_all:
1165 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1166 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1167 if (
1168 vdu_configuration
1169 and vdu_configuration.get("config-access")
1170 and vdu_configuration.get("config-access").get("ssh-access")
1171 ):
1172 vdur["ssh-keys"] = ssh_keys_all
1173 vdur["ssh-access-required"] = vdu_configuration[
1174 "config-access"
1175 ]["ssh-access"]["required"]
1176 elif (
1177 vnf_configuration
1178 and vnf_configuration.get("config-access")
1179 and vnf_configuration.get("config-access").get("ssh-access")
1180 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1181 ):
1182 vdur["ssh-keys"] = ssh_keys_all
1183 vdur["ssh-access-required"] = vnf_configuration[
1184 "config-access"
1185 ]["ssh-access"]["required"]
1186 elif ssh_keys_instantiation and find_in_list(
1187 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1188 ):
1189 vdur["ssh-keys"] = ssh_keys_instantiation
1190
1191 self.logger.debug("NS > vdur > {}".format(vdur))
1192
1193 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1194 # cloud-init
1195 if vdud.get("cloud-init-file"):
1196 vdur["cloud-init"] = "{}:file:{}".format(
1197 vnfd["_id"], vdud.get("cloud-init-file")
1198 )
1199 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1200 if vdur["cloud-init"] not in target["cloud_init_content"]:
1201 base_folder = vnfd["_admin"]["storage"]
1202 if base_folder["pkg-dir"]:
1203 cloud_init_file = "{}/{}/cloud_init/{}".format(
1204 base_folder["folder"],
1205 base_folder["pkg-dir"],
1206 vdud.get("cloud-init-file"),
1207 )
1208 else:
1209 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1210 base_folder["folder"],
1211 vdud.get("cloud-init-file"),
1212 )
1213 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1214 target["cloud_init_content"][
1215 vdur["cloud-init"]
1216 ] = ci_file.read()
1217 elif vdud.get("cloud-init"):
1218 vdur["cloud-init"] = "{}:vdu:{}".format(
1219 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1220 )
1221 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1222 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1223 "cloud-init"
1224 ]
1225 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1226 deploy_params_vdu = self._format_additional_params(
1227 vdur.get("additionalParams") or {}
1228 )
1229 deploy_params_vdu["OSM"] = get_osm_params(
1230 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1231 )
1232 vdur["additionalParams"] = deploy_params_vdu
1233
1234 # flavor
1235 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1236 if target_vim not in ns_flavor["vim_info"]:
1237 ns_flavor["vim_info"][target_vim] = {}
1238
1239 # deal with images
1240 # in case alternative images are provided we must check if they should be applied
1241 # for the vim_type, modify the vim_type taking into account
1242 ns_image_id = int(vdur["ns-image-id"])
1243 if vdur.get("alt-image-ids"):
1244 db_vim = get_vim_account(vnfr["vim-account-id"])
1245 vim_type = db_vim["vim_type"]
1246 for alt_image_id in vdur.get("alt-image-ids"):
1247 ns_alt_image = target["image"][int(alt_image_id)]
1248 if vim_type == ns_alt_image.get("vim-type"):
1249 # must use alternative image
1250 self.logger.debug(
1251 "use alternative image id: {}".format(alt_image_id)
1252 )
1253 ns_image_id = alt_image_id
1254 vdur["ns-image-id"] = ns_image_id
1255 break
1256 ns_image = target["image"][int(ns_image_id)]
1257 if target_vim not in ns_image["vim_info"]:
1258 ns_image["vim_info"][target_vim] = {}
1259
1260 # Affinity groups
1261 if vdur.get("affinity-or-anti-affinity-group-id"):
1262 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1263 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1264 if target_vim not in ns_ags["vim_info"]:
1265 ns_ags["vim_info"][target_vim] = {}
1266
1267 vdur["vim_info"] = {target_vim: {}}
1268 # instantiation parameters
1269 if vnf_params:
1270 vdu_instantiation_params = find_in_list(
1271 get_iterable(vnf_params, "vdu"),
1272 lambda i_vdu: i_vdu["id"] == vdud["id"],
1273 )
1274 if vdu_instantiation_params:
1275 # Parse the vdu_volumes from the instantiation params
1276 vdu_volumes = get_volumes_from_instantiation_params(
1277 vdu_instantiation_params, vdud
1278 )
1279 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1280 vdur_list.append(vdur)
1281 target_vnf["vdur"] = vdur_list
1282 target["vnf"].append(target_vnf)
1283
1284 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1285 desc = await self.RO.deploy(nsr_id, target)
1286 self.logger.debug("RO return > {}".format(desc))
1287 action_id = desc["action_id"]
1288 await self._wait_ng_ro(
1289 nsr_id,
1290 action_id,
1291 nslcmop_id,
1292 start_deploy,
1293 timeout_ns_deploy,
1294 stage,
1295 operation="instantiation",
1296 )
1297
1298 # Updating NSR
1299 db_nsr_update = {
1300 "_admin.deployed.RO.operational-status": "running",
1301 "detailed-status": " ".join(stage),
1302 }
1303 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1304 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1305 self._write_op_status(nslcmop_id, stage)
1306 self.logger.debug(
1307 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1308 )
1309 return
1310
1311 async def _wait_ng_ro(
1312 self,
1313 nsr_id,
1314 action_id,
1315 nslcmop_id=None,
1316 start_time=None,
1317 timeout=600,
1318 stage=None,
1319 operation=None,
1320 ):
1321 detailed_status_old = None
1322 db_nsr_update = {}
1323 start_time = start_time or time()
1324 while time() <= start_time + timeout:
1325 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1326 self.logger.debug("Wait NG RO > {}".format(desc_status))
1327 if desc_status["status"] == "FAILED":
1328 raise NgRoException(desc_status["details"])
1329 elif desc_status["status"] == "BUILD":
1330 if stage:
1331 stage[2] = "VIM: ({})".format(desc_status["details"])
1332 elif desc_status["status"] == "DONE":
1333 if stage:
1334 stage[2] = "Deployed at VIM"
1335 break
1336 else:
1337 assert False, "ROclient.check_ns_status returns unknown {}".format(
1338 desc_status["status"]
1339 )
1340 if stage and nslcmop_id and stage[2] != detailed_status_old:
1341 detailed_status_old = stage[2]
1342 db_nsr_update["detailed-status"] = " ".join(stage)
1343 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1344 self._write_op_status(nslcmop_id, stage)
1345 await asyncio.sleep(15, loop=self.loop)
1346 else: # timeout_ns_deploy
1347 raise NgRoException("Timeout waiting ns to deploy")
1348
1349 async def _terminate_ng_ro(
1350 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1351 ):
1352 db_nsr_update = {}
1353 failed_detail = []
1354 action_id = None
1355 start_deploy = time()
1356 try:
1357 target = {
1358 "ns": {"vld": []},
1359 "vnf": [],
1360 "image": [],
1361 "flavor": [],
1362 "action_id": nslcmop_id,
1363 }
1364 desc = await self.RO.deploy(nsr_id, target)
1365 action_id = desc["action_id"]
1366 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1367 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1368 self.logger.debug(
1369 logging_text
1370 + "ns terminate action at RO. action_id={}".format(action_id)
1371 )
1372
1373 # wait until done
1374 delete_timeout = 20 * 60 # 20 minutes
1375 await self._wait_ng_ro(
1376 nsr_id,
1377 action_id,
1378 nslcmop_id,
1379 start_deploy,
1380 delete_timeout,
1381 stage,
1382 operation="termination",
1383 )
1384
1385 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1386 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1387 # delete all nsr
1388 await self.RO.delete(nsr_id)
1389 except Exception as e:
1390 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1391 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1392 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1393 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1394 self.logger.debug(
1395 logging_text + "RO_action_id={} already deleted".format(action_id)
1396 )
1397 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1398 failed_detail.append("delete conflict: {}".format(e))
1399 self.logger.debug(
1400 logging_text
1401 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1402 )
1403 else:
1404 failed_detail.append("delete error: {}".format(e))
1405 self.logger.error(
1406 logging_text
1407 + "RO_action_id={} delete error: {}".format(action_id, e)
1408 )
1409
1410 if failed_detail:
1411 stage[2] = "Error deleting from VIM"
1412 else:
1413 stage[2] = "Deleted from VIM"
1414 db_nsr_update["detailed-status"] = " ".join(stage)
1415 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1416 self._write_op_status(nslcmop_id, stage)
1417
1418 if failed_detail:
1419 raise LcmException("; ".join(failed_detail))
1420 return
1421
1422 async def instantiate_RO(
1423 self,
1424 logging_text,
1425 nsr_id,
1426 nsd,
1427 db_nsr,
1428 db_nslcmop,
1429 db_vnfrs,
1430 db_vnfds,
1431 n2vc_key_list,
1432 stage,
1433 ):
1434 """
1435 Instantiate at RO
1436 :param logging_text: preffix text to use at logging
1437 :param nsr_id: nsr identity
1438 :param nsd: database content of ns descriptor
1439 :param db_nsr: database content of ns record
1440 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1441 :param db_vnfrs:
1442 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1443 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1444 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1445 :return: None or exception
1446 """
1447 try:
1448 start_deploy = time()
1449 ns_params = db_nslcmop.get("operationParams")
1450 if ns_params and ns_params.get("timeout_ns_deploy"):
1451 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1452 else:
1453 timeout_ns_deploy = self.timeout.get(
1454 "ns_deploy", self.timeout_ns_deploy
1455 )
1456
1457 # Check for and optionally request placement optimization. Database will be updated if placement activated
1458 stage[2] = "Waiting for Placement."
1459 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1460 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1461 for vnfr in db_vnfrs.values():
1462 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1463 break
1464 else:
1465 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1466
1467 return await self._instantiate_ng_ro(
1468 logging_text,
1469 nsr_id,
1470 nsd,
1471 db_nsr,
1472 db_nslcmop,
1473 db_vnfrs,
1474 db_vnfds,
1475 n2vc_key_list,
1476 stage,
1477 start_deploy,
1478 timeout_ns_deploy,
1479 )
1480 except Exception as e:
1481 stage[2] = "ERROR deploying at VIM"
1482 self.set_vnfr_at_error(db_vnfrs, str(e))
1483 self.logger.error(
1484 "Error deploying at VIM {}".format(e),
1485 exc_info=not isinstance(
1486 e,
1487 (
1488 ROclient.ROClientException,
1489 LcmException,
1490 DbException,
1491 NgRoException,
1492 ),
1493 ),
1494 )
1495 raise
1496
1497 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1498 """
1499 Wait for kdu to be up, get ip address
1500 :param logging_text: prefix use for logging
1501 :param nsr_id:
1502 :param vnfr_id:
1503 :param kdu_name:
1504 :return: IP address, K8s services
1505 """
1506
1507 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1508 nb_tries = 0
1509
1510 while nb_tries < 360:
1511 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1512 kdur = next(
1513 (
1514 x
1515 for x in get_iterable(db_vnfr, "kdur")
1516 if x.get("kdu-name") == kdu_name
1517 ),
1518 None,
1519 )
1520 if not kdur:
1521 raise LcmException(
1522 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1523 )
1524 if kdur.get("status"):
1525 if kdur["status"] in ("READY", "ENABLED"):
1526 return kdur.get("ip-address"), kdur.get("services")
1527 else:
1528 raise LcmException(
1529 "target KDU={} is in error state".format(kdu_name)
1530 )
1531
1532 await asyncio.sleep(10, loop=self.loop)
1533 nb_tries += 1
1534 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1535
1536 async def wait_vm_up_insert_key_ro(
1537 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1538 ):
1539 """
1540 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1541 :param logging_text: prefix use for logging
1542 :param nsr_id:
1543 :param vnfr_id:
1544 :param vdu_id:
1545 :param vdu_index:
1546 :param pub_key: public ssh key to inject, None to skip
1547 :param user: user to apply the public ssh key
1548 :return: IP address
1549 """
1550
1551 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1552 ro_nsr_id = None
1553 ip_address = None
1554 nb_tries = 0
1555 target_vdu_id = None
1556 ro_retries = 0
1557
1558 while True:
1559
1560 ro_retries += 1
1561 if ro_retries >= 360: # 1 hour
1562 raise LcmException(
1563 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1564 )
1565
1566 await asyncio.sleep(10, loop=self.loop)
1567
1568 # get ip address
1569 if not target_vdu_id:
1570 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1571
1572 if not vdu_id: # for the VNF case
1573 if db_vnfr.get("status") == "ERROR":
1574 raise LcmException(
1575 "Cannot inject ssh-key because target VNF is in error state"
1576 )
1577 ip_address = db_vnfr.get("ip-address")
1578 if not ip_address:
1579 continue
1580 vdur = next(
1581 (
1582 x
1583 for x in get_iterable(db_vnfr, "vdur")
1584 if x.get("ip-address") == ip_address
1585 ),
1586 None,
1587 )
1588 else: # VDU case
1589 vdur = next(
1590 (
1591 x
1592 for x in get_iterable(db_vnfr, "vdur")
1593 if x.get("vdu-id-ref") == vdu_id
1594 and x.get("count-index") == vdu_index
1595 ),
1596 None,
1597 )
1598
1599 if (
1600 not vdur and len(db_vnfr.get("vdur", ())) == 1
1601 ): # If only one, this should be the target vdu
1602 vdur = db_vnfr["vdur"][0]
1603 if not vdur:
1604 raise LcmException(
1605 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1606 vnfr_id, vdu_id, vdu_index
1607 )
1608 )
1609 # New generation RO stores information at "vim_info"
1610 ng_ro_status = None
1611 target_vim = None
1612 if vdur.get("vim_info"):
1613 target_vim = next(
1614 t for t in vdur["vim_info"]
1615 ) # there should be only one key
1616 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1617 if (
1618 vdur.get("pdu-type")
1619 or vdur.get("status") == "ACTIVE"
1620 or ng_ro_status == "ACTIVE"
1621 ):
1622 ip_address = vdur.get("ip-address")
1623 if not ip_address:
1624 continue
1625 target_vdu_id = vdur["vdu-id-ref"]
1626 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1627 raise LcmException(
1628 "Cannot inject ssh-key because target VM is in error state"
1629 )
1630
1631 if not target_vdu_id:
1632 continue
1633
1634 # inject public key into machine
1635 if pub_key and user:
1636 self.logger.debug(logging_text + "Inserting RO key")
1637 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1638 if vdur.get("pdu-type"):
1639 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1640 return ip_address
1641 try:
1642 ro_vm_id = "{}-{}".format(
1643 db_vnfr["member-vnf-index-ref"], target_vdu_id
1644 ) # TODO add vdu_index
1645 if self.ng_ro:
1646 target = {
1647 "action": {
1648 "action": "inject_ssh_key",
1649 "key": pub_key,
1650 "user": user,
1651 },
1652 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1653 }
1654 desc = await self.RO.deploy(nsr_id, target)
1655 action_id = desc["action_id"]
1656 await self._wait_ng_ro(
1657 nsr_id, action_id, timeout=600, operation="instantiation"
1658 )
1659 break
1660 else:
1661 # wait until NS is deployed at RO
1662 if not ro_nsr_id:
1663 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1664 ro_nsr_id = deep_get(
1665 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1666 )
1667 if not ro_nsr_id:
1668 continue
1669 result_dict = await self.RO.create_action(
1670 item="ns",
1671 item_id_name=ro_nsr_id,
1672 descriptor={
1673 "add_public_key": pub_key,
1674 "vms": [ro_vm_id],
1675 "user": user,
1676 },
1677 )
1678 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1679 if not result_dict or not isinstance(result_dict, dict):
1680 raise LcmException(
1681 "Unknown response from RO when injecting key"
1682 )
1683 for result in result_dict.values():
1684 if result.get("vim_result") == 200:
1685 break
1686 else:
1687 raise ROclient.ROClientException(
1688 "error injecting key: {}".format(
1689 result.get("description")
1690 )
1691 )
1692 break
1693 except NgRoException as e:
1694 raise LcmException(
1695 "Reaching max tries injecting key. Error: {}".format(e)
1696 )
1697 except ROclient.ROClientException as e:
1698 if not nb_tries:
1699 self.logger.debug(
1700 logging_text
1701 + "error injecting key: {}. Retrying until {} seconds".format(
1702 e, 20 * 10
1703 )
1704 )
1705 nb_tries += 1
1706 if nb_tries >= 20:
1707 raise LcmException(
1708 "Reaching max tries injecting key. Error: {}".format(e)
1709 )
1710 else:
1711 break
1712
1713 return ip_address
1714
1715 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1716 """
1717 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1718 """
1719 my_vca = vca_deployed_list[vca_index]
1720 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1721 # vdu or kdu: no dependencies
1722 return
1723 timeout = 300
1724 while timeout >= 0:
1725 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1726 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1727 configuration_status_list = db_nsr["configurationStatus"]
1728 for index, vca_deployed in enumerate(configuration_status_list):
1729 if index == vca_index:
1730 # myself
1731 continue
1732 if not my_vca.get("member-vnf-index") or (
1733 vca_deployed.get("member-vnf-index")
1734 == my_vca.get("member-vnf-index")
1735 ):
1736 internal_status = configuration_status_list[index].get("status")
1737 if internal_status == "READY":
1738 continue
1739 elif internal_status == "BROKEN":
1740 raise LcmException(
1741 "Configuration aborted because dependent charm/s has failed"
1742 )
1743 else:
1744 break
1745 else:
1746 # no dependencies, return
1747 return
1748 await asyncio.sleep(10)
1749 timeout -= 1
1750
1751 raise LcmException("Configuration aborted because dependent charm/s timeout")
1752
1753 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1754 vca_id = None
1755 if db_vnfr:
1756 vca_id = deep_get(db_vnfr, ("vca-id",))
1757 elif db_nsr:
1758 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1759 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1760 return vca_id
1761
1762 async def instantiate_N2VC(
1763 self,
1764 logging_text,
1765 vca_index,
1766 nsi_id,
1767 db_nsr,
1768 db_vnfr,
1769 vdu_id,
1770 kdu_name,
1771 vdu_index,
1772 config_descriptor,
1773 deploy_params,
1774 base_folder,
1775 nslcmop_id,
1776 stage,
1777 vca_type,
1778 vca_name,
1779 ee_config_descriptor,
1780 ):
1781 nsr_id = db_nsr["_id"]
1782 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1783 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1784 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1785 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1786 db_dict = {
1787 "collection": "nsrs",
1788 "filter": {"_id": nsr_id},
1789 "path": db_update_entry,
1790 }
1791 step = ""
1792 try:
1793
1794 element_type = "NS"
1795 element_under_configuration = nsr_id
1796
1797 vnfr_id = None
1798 if db_vnfr:
1799 vnfr_id = db_vnfr["_id"]
1800 osm_config["osm"]["vnf_id"] = vnfr_id
1801
1802 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1803
1804 if vca_type == "native_charm":
1805 index_number = 0
1806 else:
1807 index_number = vdu_index or 0
1808
1809 if vnfr_id:
1810 element_type = "VNF"
1811 element_under_configuration = vnfr_id
1812 namespace += ".{}-{}".format(vnfr_id, index_number)
1813 if vdu_id:
1814 namespace += ".{}-{}".format(vdu_id, index_number)
1815 element_type = "VDU"
1816 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1817 osm_config["osm"]["vdu_id"] = vdu_id
1818 elif kdu_name:
1819 namespace += ".{}".format(kdu_name)
1820 element_type = "KDU"
1821 element_under_configuration = kdu_name
1822 osm_config["osm"]["kdu_name"] = kdu_name
1823
1824 # Get artifact path
1825 if base_folder["pkg-dir"]:
1826 artifact_path = "{}/{}/{}/{}".format(
1827 base_folder["folder"],
1828 base_folder["pkg-dir"],
1829 "charms"
1830 if vca_type
1831 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1832 else "helm-charts",
1833 vca_name,
1834 )
1835 else:
1836 artifact_path = "{}/Scripts/{}/{}/".format(
1837 base_folder["folder"],
1838 "charms"
1839 if vca_type
1840 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1841 else "helm-charts",
1842 vca_name,
1843 )
1844
1845 self.logger.debug("Artifact path > {}".format(artifact_path))
1846
1847 # get initial_config_primitive_list that applies to this element
1848 initial_config_primitive_list = config_descriptor.get(
1849 "initial-config-primitive"
1850 )
1851
1852 self.logger.debug(
1853 "Initial config primitive list > {}".format(
1854 initial_config_primitive_list
1855 )
1856 )
1857
1858 # add config if not present for NS charm
1859 ee_descriptor_id = ee_config_descriptor.get("id")
1860 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1861 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1862 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1863 )
1864
1865 self.logger.debug(
1866 "Initial config primitive list #2 > {}".format(
1867 initial_config_primitive_list
1868 )
1869 )
1870 # n2vc_redesign STEP 3.1
1871 # find old ee_id if exists
1872 ee_id = vca_deployed.get("ee_id")
1873
1874 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1875 # create or register execution environment in VCA
1876 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1877
1878 self._write_configuration_status(
1879 nsr_id=nsr_id,
1880 vca_index=vca_index,
1881 status="CREATING",
1882 element_under_configuration=element_under_configuration,
1883 element_type=element_type,
1884 )
1885
1886 step = "create execution environment"
1887 self.logger.debug(logging_text + step)
1888
1889 ee_id = None
1890 credentials = None
1891 if vca_type == "k8s_proxy_charm":
1892 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1893 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1894 namespace=namespace,
1895 artifact_path=artifact_path,
1896 db_dict=db_dict,
1897 vca_id=vca_id,
1898 )
1899 elif vca_type == "helm" or vca_type == "helm-v3":
1900 ee_id, credentials = await self.vca_map[
1901 vca_type
1902 ].create_execution_environment(
1903 namespace=namespace,
1904 reuse_ee_id=ee_id,
1905 db_dict=db_dict,
1906 config=osm_config,
1907 artifact_path=artifact_path,
1908 chart_model=vca_name,
1909 vca_type=vca_type,
1910 )
1911 else:
1912 ee_id, credentials = await self.vca_map[
1913 vca_type
1914 ].create_execution_environment(
1915 namespace=namespace,
1916 reuse_ee_id=ee_id,
1917 db_dict=db_dict,
1918 vca_id=vca_id,
1919 )
1920
1921 elif vca_type == "native_charm":
1922 step = "Waiting to VM being up and getting IP address"
1923 self.logger.debug(logging_text + step)
1924 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1925 logging_text,
1926 nsr_id,
1927 vnfr_id,
1928 vdu_id,
1929 vdu_index,
1930 user=None,
1931 pub_key=None,
1932 )
1933 credentials = {"hostname": rw_mgmt_ip}
1934 # get username
1935 username = deep_get(
1936 config_descriptor, ("config-access", "ssh-access", "default-user")
1937 )
1938 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1939 # merged. Meanwhile let's get username from initial-config-primitive
1940 if not username and initial_config_primitive_list:
1941 for config_primitive in initial_config_primitive_list:
1942 for param in config_primitive.get("parameter", ()):
1943 if param["name"] == "ssh-username":
1944 username = param["value"]
1945 break
1946 if not username:
1947 raise LcmException(
1948 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1949 "'config-access.ssh-access.default-user'"
1950 )
1951 credentials["username"] = username
1952 # n2vc_redesign STEP 3.2
1953
1954 self._write_configuration_status(
1955 nsr_id=nsr_id,
1956 vca_index=vca_index,
1957 status="REGISTERING",
1958 element_under_configuration=element_under_configuration,
1959 element_type=element_type,
1960 )
1961
1962 step = "register execution environment {}".format(credentials)
1963 self.logger.debug(logging_text + step)
1964 ee_id = await self.vca_map[vca_type].register_execution_environment(
1965 credentials=credentials,
1966 namespace=namespace,
1967 db_dict=db_dict,
1968 vca_id=vca_id,
1969 )
1970
1971 # for compatibility with MON/POL modules, the need model and application name at database
1972 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1973 ee_id_parts = ee_id.split(".")
1974 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1975 if len(ee_id_parts) >= 2:
1976 model_name = ee_id_parts[0]
1977 application_name = ee_id_parts[1]
1978 db_nsr_update[db_update_entry + "model"] = model_name
1979 db_nsr_update[db_update_entry + "application"] = application_name
1980
1981 # n2vc_redesign STEP 3.3
1982 step = "Install configuration Software"
1983
1984 self._write_configuration_status(
1985 nsr_id=nsr_id,
1986 vca_index=vca_index,
1987 status="INSTALLING SW",
1988 element_under_configuration=element_under_configuration,
1989 element_type=element_type,
1990 other_update=db_nsr_update,
1991 )
1992
1993 # TODO check if already done
1994 self.logger.debug(logging_text + step)
1995 config = None
1996 if vca_type == "native_charm":
1997 config_primitive = next(
1998 (p for p in initial_config_primitive_list if p["name"] == "config"),
1999 None,
2000 )
2001 if config_primitive:
2002 config = self._map_primitive_params(
2003 config_primitive, {}, deploy_params
2004 )
2005 num_units = 1
2006 if vca_type == "lxc_proxy_charm":
2007 if element_type == "NS":
2008 num_units = db_nsr.get("config-units") or 1
2009 elif element_type == "VNF":
2010 num_units = db_vnfr.get("config-units") or 1
2011 elif element_type == "VDU":
2012 for v in db_vnfr["vdur"]:
2013 if vdu_id == v["vdu-id-ref"]:
2014 num_units = v.get("config-units") or 1
2015 break
2016 if vca_type != "k8s_proxy_charm":
2017 await self.vca_map[vca_type].install_configuration_sw(
2018 ee_id=ee_id,
2019 artifact_path=artifact_path,
2020 db_dict=db_dict,
2021 config=config,
2022 num_units=num_units,
2023 vca_id=vca_id,
2024 vca_type=vca_type,
2025 )
2026
2027 # write in db flag of configuration_sw already installed
2028 self.update_db_2(
2029 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2030 )
2031
2032 # add relations for this VCA (wait for other peers related with this VCA)
2033 await self._add_vca_relations(
2034 logging_text=logging_text,
2035 nsr_id=nsr_id,
2036 vca_type=vca_type,
2037 vca_index=vca_index,
2038 )
2039
2040 # if SSH access is required, then get execution environment SSH public
2041 # if native charm we have waited already to VM be UP
2042 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2043 pub_key = None
2044 user = None
2045 # self.logger.debug("get ssh key block")
2046 if deep_get(
2047 config_descriptor, ("config-access", "ssh-access", "required")
2048 ):
2049 # self.logger.debug("ssh key needed")
2050 # Needed to inject a ssh key
2051 user = deep_get(
2052 config_descriptor,
2053 ("config-access", "ssh-access", "default-user"),
2054 )
2055 step = "Install configuration Software, getting public ssh key"
2056 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2057 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2058 )
2059
2060 step = "Insert public key into VM user={} ssh_key={}".format(
2061 user, pub_key
2062 )
2063 else:
2064 # self.logger.debug("no need to get ssh key")
2065 step = "Waiting to VM being up and getting IP address"
2066 self.logger.debug(logging_text + step)
2067
2068 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2069 rw_mgmt_ip = None
2070
2071 # n2vc_redesign STEP 5.1
2072 # wait for RO (ip-address) Insert pub_key into VM
2073 if vnfr_id:
2074 if kdu_name:
2075 rw_mgmt_ip, services = await self.wait_kdu_up(
2076 logging_text, nsr_id, vnfr_id, kdu_name
2077 )
2078 vnfd = self.db.get_one(
2079 "vnfds_revisions",
2080 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2081 )
2082 kdu = get_kdu(vnfd, kdu_name)
2083 kdu_services = [
2084 service["name"] for service in get_kdu_services(kdu)
2085 ]
2086 exposed_services = []
2087 for service in services:
2088 if any(s in service["name"] for s in kdu_services):
2089 exposed_services.append(service)
2090 await self.vca_map[vca_type].exec_primitive(
2091 ee_id=ee_id,
2092 primitive_name="config",
2093 params_dict={
2094 "osm-config": json.dumps(
2095 OsmConfigBuilder(
2096 k8s={"services": exposed_services}
2097 ).build()
2098 )
2099 },
2100 vca_id=vca_id,
2101 )
2102
2103 # This verification is needed in order to avoid trying to add a public key
2104 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2105 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2106 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2107 # or it is a KNF)
2108 elif db_vnfr.get("vdur"):
2109 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2110 logging_text,
2111 nsr_id,
2112 vnfr_id,
2113 vdu_id,
2114 vdu_index,
2115 user=user,
2116 pub_key=pub_key,
2117 )
2118
2119 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2120
2121 # store rw_mgmt_ip in deploy params for later replacement
2122 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2123
2124 # n2vc_redesign STEP 6 Execute initial config primitive
2125 step = "execute initial config primitive"
2126
2127 # wait for dependent primitives execution (NS -> VNF -> VDU)
2128 if initial_config_primitive_list:
2129 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2130
2131 # stage, in function of element type: vdu, kdu, vnf or ns
2132 my_vca = vca_deployed_list[vca_index]
2133 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2134 # VDU or KDU
2135 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2136 elif my_vca.get("member-vnf-index"):
2137 # VNF
2138 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2139 else:
2140 # NS
2141 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2142
2143 self._write_configuration_status(
2144 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2145 )
2146
2147 self._write_op_status(op_id=nslcmop_id, stage=stage)
2148
2149 check_if_terminated_needed = True
2150 for initial_config_primitive in initial_config_primitive_list:
2151 # adding information on the vca_deployed if it is a NS execution environment
2152 if not vca_deployed["member-vnf-index"]:
2153 deploy_params["ns_config_info"] = json.dumps(
2154 self._get_ns_config_info(nsr_id)
2155 )
2156 # TODO check if already done
2157 primitive_params_ = self._map_primitive_params(
2158 initial_config_primitive, {}, deploy_params
2159 )
2160
2161 step = "execute primitive '{}' params '{}'".format(
2162 initial_config_primitive["name"], primitive_params_
2163 )
2164 self.logger.debug(logging_text + step)
2165 await self.vca_map[vca_type].exec_primitive(
2166 ee_id=ee_id,
2167 primitive_name=initial_config_primitive["name"],
2168 params_dict=primitive_params_,
2169 db_dict=db_dict,
2170 vca_id=vca_id,
2171 vca_type=vca_type,
2172 )
2173 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2174 if check_if_terminated_needed:
2175 if config_descriptor.get("terminate-config-primitive"):
2176 self.update_db_2(
2177 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2178 )
2179 check_if_terminated_needed = False
2180
2181 # TODO register in database that primitive is done
2182
2183 # STEP 7 Configure metrics
2184 if vca_type == "helm" or vca_type == "helm-v3":
2185 # TODO: review for those cases where the helm chart is a reference and
2186 # is not part of the NF package
2187 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2188 ee_id=ee_id,
2189 artifact_path=artifact_path,
2190 ee_config_descriptor=ee_config_descriptor,
2191 vnfr_id=vnfr_id,
2192 nsr_id=nsr_id,
2193 target_ip=rw_mgmt_ip,
2194 )
2195 if prometheus_jobs:
2196 self.update_db_2(
2197 "nsrs",
2198 nsr_id,
2199 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2200 )
2201
2202 for job in prometheus_jobs:
2203 self.db.set_one(
2204 "prometheus_jobs",
2205 {"job_name": job["job_name"]},
2206 job,
2207 upsert=True,
2208 fail_on_empty=False,
2209 )
2210
2211 step = "instantiated at VCA"
2212 self.logger.debug(logging_text + step)
2213
2214 self._write_configuration_status(
2215 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2216 )
2217
2218 except Exception as e: # TODO not use Exception but N2VC exception
2219 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2220 if not isinstance(
2221 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2222 ):
2223 self.logger.error(
2224 "Exception while {} : {}".format(step, e), exc_info=True
2225 )
2226 self._write_configuration_status(
2227 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2228 )
2229 raise LcmException("{} {}".format(step, e)) from e
2230
2231 def _write_ns_status(
2232 self,
2233 nsr_id: str,
2234 ns_state: str,
2235 current_operation: str,
2236 current_operation_id: str,
2237 error_description: str = None,
2238 error_detail: str = None,
2239 other_update: dict = None,
2240 ):
2241 """
2242 Update db_nsr fields.
2243 :param nsr_id:
2244 :param ns_state:
2245 :param current_operation:
2246 :param current_operation_id:
2247 :param error_description:
2248 :param error_detail:
2249 :param other_update: Other required changes at database if provided, will be cleared
2250 :return:
2251 """
2252 try:
2253 db_dict = other_update or {}
2254 db_dict[
2255 "_admin.nslcmop"
2256 ] = current_operation_id # for backward compatibility
2257 db_dict["_admin.current-operation"] = current_operation_id
2258 db_dict["_admin.operation-type"] = (
2259 current_operation if current_operation != "IDLE" else None
2260 )
2261 db_dict["currentOperation"] = current_operation
2262 db_dict["currentOperationID"] = current_operation_id
2263 db_dict["errorDescription"] = error_description
2264 db_dict["errorDetail"] = error_detail
2265
2266 if ns_state:
2267 db_dict["nsState"] = ns_state
2268 self.update_db_2("nsrs", nsr_id, db_dict)
2269 except DbException as e:
2270 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2271
2272 def _write_op_status(
2273 self,
2274 op_id: str,
2275 stage: list = None,
2276 error_message: str = None,
2277 queuePosition: int = 0,
2278 operation_state: str = None,
2279 other_update: dict = None,
2280 ):
2281 try:
2282 db_dict = other_update or {}
2283 db_dict["queuePosition"] = queuePosition
2284 if isinstance(stage, list):
2285 db_dict["stage"] = stage[0]
2286 db_dict["detailed-status"] = " ".join(stage)
2287 elif stage is not None:
2288 db_dict["stage"] = str(stage)
2289
2290 if error_message is not None:
2291 db_dict["errorMessage"] = error_message
2292 if operation_state is not None:
2293 db_dict["operationState"] = operation_state
2294 db_dict["statusEnteredTime"] = time()
2295 self.update_db_2("nslcmops", op_id, db_dict)
2296 except DbException as e:
2297 self.logger.warn(
2298 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2299 )
2300
2301 def _write_all_config_status(self, db_nsr: dict, status: str):
2302 try:
2303 nsr_id = db_nsr["_id"]
2304 # configurationStatus
2305 config_status = db_nsr.get("configurationStatus")
2306 if config_status:
2307 db_nsr_update = {
2308 "configurationStatus.{}.status".format(index): status
2309 for index, v in enumerate(config_status)
2310 if v
2311 }
2312 # update status
2313 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2314
2315 except DbException as e:
2316 self.logger.warn(
2317 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2318 )
2319
2320 def _write_configuration_status(
2321 self,
2322 nsr_id: str,
2323 vca_index: int,
2324 status: str = None,
2325 element_under_configuration: str = None,
2326 element_type: str = None,
2327 other_update: dict = None,
2328 ):
2329
2330 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2331 # .format(vca_index, status))
2332
2333 try:
2334 db_path = "configurationStatus.{}.".format(vca_index)
2335 db_dict = other_update or {}
2336 if status:
2337 db_dict[db_path + "status"] = status
2338 if element_under_configuration:
2339 db_dict[
2340 db_path + "elementUnderConfiguration"
2341 ] = element_under_configuration
2342 if element_type:
2343 db_dict[db_path + "elementType"] = element_type
2344 self.update_db_2("nsrs", nsr_id, db_dict)
2345 except DbException as e:
2346 self.logger.warn(
2347 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2348 status, nsr_id, vca_index, e
2349 )
2350 )
2351
2352 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2353 """
2354 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2355 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2356 Database is used because the result can be obtained from a different LCM worker in case of HA.
2357 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2358 :param db_nslcmop: database content of nslcmop
2359 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2360 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2361 computed 'vim-account-id'
2362 """
2363 modified = False
2364 nslcmop_id = db_nslcmop["_id"]
2365 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2366 if placement_engine == "PLA":
2367 self.logger.debug(
2368 logging_text + "Invoke and wait for placement optimization"
2369 )
2370 await self.msg.aiowrite(
2371 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2372 )
2373 db_poll_interval = 5
2374 wait = db_poll_interval * 10
2375 pla_result = None
2376 while not pla_result and wait >= 0:
2377 await asyncio.sleep(db_poll_interval)
2378 wait -= db_poll_interval
2379 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2380 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2381
2382 if not pla_result:
2383 raise LcmException(
2384 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2385 )
2386
2387 for pla_vnf in pla_result["vnf"]:
2388 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2389 if not pla_vnf.get("vimAccountId") or not vnfr:
2390 continue
2391 modified = True
2392 self.db.set_one(
2393 "vnfrs",
2394 {"_id": vnfr["_id"]},
2395 {"vim-account-id": pla_vnf["vimAccountId"]},
2396 )
2397 # Modifies db_vnfrs
2398 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2399 return modified
2400
2401 def update_nsrs_with_pla_result(self, params):
2402 try:
2403 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2404 self.update_db_2(
2405 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2406 )
2407 except Exception as e:
2408 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2409
2410 async def instantiate(self, nsr_id, nslcmop_id):
2411 """
2412
2413 :param nsr_id: ns instance to deploy
2414 :param nslcmop_id: operation to run
2415 :return:
2416 """
2417
2418 # Try to lock HA task here
2419 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2420 if not task_is_locked_by_me:
2421 self.logger.debug(
2422 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2423 )
2424 return
2425
2426 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2427 self.logger.debug(logging_text + "Enter")
2428
2429 # get all needed from database
2430
2431 # database nsrs record
2432 db_nsr = None
2433
2434 # database nslcmops record
2435 db_nslcmop = None
2436
2437 # update operation on nsrs
2438 db_nsr_update = {}
2439 # update operation on nslcmops
2440 db_nslcmop_update = {}
2441
2442 nslcmop_operation_state = None
2443 db_vnfrs = {} # vnf's info indexed by member-index
2444 # n2vc_info = {}
2445 tasks_dict_info = {} # from task to info text
2446 exc = None
2447 error_list = []
2448 stage = [
2449 "Stage 1/5: preparation of the environment.",
2450 "Waiting for previous operations to terminate.",
2451 "",
2452 ]
2453 # ^ stage, step, VIM progress
2454 try:
2455 # wait for any previous tasks in process
2456 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2457
2458 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2459 stage[1] = "Reading from database."
2460 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2461 db_nsr_update["detailed-status"] = "creating"
2462 db_nsr_update["operational-status"] = "init"
2463 self._write_ns_status(
2464 nsr_id=nsr_id,
2465 ns_state="BUILDING",
2466 current_operation="INSTANTIATING",
2467 current_operation_id=nslcmop_id,
2468 other_update=db_nsr_update,
2469 )
2470 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2471
2472 # read from db: operation
2473 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2474 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2475 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2476 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2477 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2478 )
2479 ns_params = db_nslcmop.get("operationParams")
2480 if ns_params and ns_params.get("timeout_ns_deploy"):
2481 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2482 else:
2483 timeout_ns_deploy = self.timeout.get(
2484 "ns_deploy", self.timeout_ns_deploy
2485 )
2486
2487 # read from db: ns
2488 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2489 self.logger.debug(logging_text + stage[1])
2490 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2491 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2492 self.logger.debug(logging_text + stage[1])
2493 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2494 self.fs.sync(db_nsr["nsd-id"])
2495 db_nsr["nsd"] = nsd
2496 # nsr_name = db_nsr["name"] # TODO short-name??
2497
2498 # read from db: vnf's of this ns
2499 stage[1] = "Getting vnfrs from db."
2500 self.logger.debug(logging_text + stage[1])
2501 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2502
2503 # read from db: vnfd's for every vnf
2504 db_vnfds = [] # every vnfd data
2505
2506 # for each vnf in ns, read vnfd
2507 for vnfr in db_vnfrs_list:
2508 if vnfr.get("kdur"):
2509 kdur_list = []
2510 for kdur in vnfr["kdur"]:
2511 if kdur.get("additionalParams"):
2512 kdur["additionalParams"] = json.loads(
2513 kdur["additionalParams"]
2514 )
2515 kdur_list.append(kdur)
2516 vnfr["kdur"] = kdur_list
2517
2518 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2519 vnfd_id = vnfr["vnfd-id"]
2520 vnfd_ref = vnfr["vnfd-ref"]
2521 self.fs.sync(vnfd_id)
2522
2523 # if we haven't this vnfd, read it from db
2524 if vnfd_id not in db_vnfds:
2525 # read from db
2526 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2527 vnfd_id, vnfd_ref
2528 )
2529 self.logger.debug(logging_text + stage[1])
2530 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2531
2532 # store vnfd
2533 db_vnfds.append(vnfd)
2534
2535 # Get or generates the _admin.deployed.VCA list
2536 vca_deployed_list = None
2537 if db_nsr["_admin"].get("deployed"):
2538 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2539 if vca_deployed_list is None:
2540 vca_deployed_list = []
2541 configuration_status_list = []
2542 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2543 db_nsr_update["configurationStatus"] = configuration_status_list
2544 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2545 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2546 elif isinstance(vca_deployed_list, dict):
2547 # maintain backward compatibility. Change a dict to list at database
2548 vca_deployed_list = list(vca_deployed_list.values())
2549 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2550 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2551
2552 if not isinstance(
2553 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2554 ):
2555 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2556 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2557
2558 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2559 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2560 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2561 self.db.set_list(
2562 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2563 )
2564
2565 # n2vc_redesign STEP 2 Deploy Network Scenario
2566 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2567 self._write_op_status(op_id=nslcmop_id, stage=stage)
2568
2569 stage[1] = "Deploying KDUs."
2570 # self.logger.debug(logging_text + "Before deploy_kdus")
2571 # Call to deploy_kdus in case exists the "vdu:kdu" param
2572 await self.deploy_kdus(
2573 logging_text=logging_text,
2574 nsr_id=nsr_id,
2575 nslcmop_id=nslcmop_id,
2576 db_vnfrs=db_vnfrs,
2577 db_vnfds=db_vnfds,
2578 task_instantiation_info=tasks_dict_info,
2579 )
2580
2581 stage[1] = "Getting VCA public key."
2582 # n2vc_redesign STEP 1 Get VCA public ssh-key
2583 # feature 1429. Add n2vc public key to needed VMs
2584 n2vc_key = self.n2vc.get_public_key()
2585 n2vc_key_list = [n2vc_key]
2586 if self.vca_config.get("public_key"):
2587 n2vc_key_list.append(self.vca_config["public_key"])
2588
2589 stage[1] = "Deploying NS at VIM."
2590 task_ro = asyncio.ensure_future(
2591 self.instantiate_RO(
2592 logging_text=logging_text,
2593 nsr_id=nsr_id,
2594 nsd=nsd,
2595 db_nsr=db_nsr,
2596 db_nslcmop=db_nslcmop,
2597 db_vnfrs=db_vnfrs,
2598 db_vnfds=db_vnfds,
2599 n2vc_key_list=n2vc_key_list,
2600 stage=stage,
2601 )
2602 )
2603 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2604 tasks_dict_info[task_ro] = "Deploying at VIM"
2605
2606 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2607 stage[1] = "Deploying Execution Environments."
2608 self.logger.debug(logging_text + stage[1])
2609
2610 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2611 for vnf_profile in get_vnf_profiles(nsd):
2612 vnfd_id = vnf_profile["vnfd-id"]
2613 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2614 member_vnf_index = str(vnf_profile["id"])
2615 db_vnfr = db_vnfrs[member_vnf_index]
2616 base_folder = vnfd["_admin"]["storage"]
2617 vdu_id = None
2618 vdu_index = 0
2619 vdu_name = None
2620 kdu_name = None
2621
2622 # Get additional parameters
2623 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2624 if db_vnfr.get("additionalParamsForVnf"):
2625 deploy_params.update(
2626 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2627 )
2628
2629 descriptor_config = get_configuration(vnfd, vnfd["id"])
2630 if descriptor_config:
2631 self._deploy_n2vc(
2632 logging_text=logging_text
2633 + "member_vnf_index={} ".format(member_vnf_index),
2634 db_nsr=db_nsr,
2635 db_vnfr=db_vnfr,
2636 nslcmop_id=nslcmop_id,
2637 nsr_id=nsr_id,
2638 nsi_id=nsi_id,
2639 vnfd_id=vnfd_id,
2640 vdu_id=vdu_id,
2641 kdu_name=kdu_name,
2642 member_vnf_index=member_vnf_index,
2643 vdu_index=vdu_index,
2644 vdu_name=vdu_name,
2645 deploy_params=deploy_params,
2646 descriptor_config=descriptor_config,
2647 base_folder=base_folder,
2648 task_instantiation_info=tasks_dict_info,
2649 stage=stage,
2650 )
2651
2652 # Deploy charms for each VDU that supports one.
2653 for vdud in get_vdu_list(vnfd):
2654 vdu_id = vdud["id"]
2655 descriptor_config = get_configuration(vnfd, vdu_id)
2656 vdur = find_in_list(
2657 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2658 )
2659
2660 if vdur.get("additionalParams"):
2661 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2662 else:
2663 deploy_params_vdu = deploy_params
2664 deploy_params_vdu["OSM"] = get_osm_params(
2665 db_vnfr, vdu_id, vdu_count_index=0
2666 )
2667 vdud_count = get_number_of_instances(vnfd, vdu_id)
2668
2669 self.logger.debug("VDUD > {}".format(vdud))
2670 self.logger.debug(
2671 "Descriptor config > {}".format(descriptor_config)
2672 )
2673 if descriptor_config:
2674 vdu_name = None
2675 kdu_name = None
2676 for vdu_index in range(vdud_count):
2677 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2678 self._deploy_n2vc(
2679 logging_text=logging_text
2680 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2681 member_vnf_index, vdu_id, vdu_index
2682 ),
2683 db_nsr=db_nsr,
2684 db_vnfr=db_vnfr,
2685 nslcmop_id=nslcmop_id,
2686 nsr_id=nsr_id,
2687 nsi_id=nsi_id,
2688 vnfd_id=vnfd_id,
2689 vdu_id=vdu_id,
2690 kdu_name=kdu_name,
2691 member_vnf_index=member_vnf_index,
2692 vdu_index=vdu_index,
2693 vdu_name=vdu_name,
2694 deploy_params=deploy_params_vdu,
2695 descriptor_config=descriptor_config,
2696 base_folder=base_folder,
2697 task_instantiation_info=tasks_dict_info,
2698 stage=stage,
2699 )
2700 for kdud in get_kdu_list(vnfd):
2701 kdu_name = kdud["name"]
2702 descriptor_config = get_configuration(vnfd, kdu_name)
2703 if descriptor_config:
2704 vdu_id = None
2705 vdu_index = 0
2706 vdu_name = None
2707 kdur = next(
2708 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2709 )
2710 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2711 if kdur.get("additionalParams"):
2712 deploy_params_kdu.update(
2713 parse_yaml_strings(kdur["additionalParams"].copy())
2714 )
2715
2716 self._deploy_n2vc(
2717 logging_text=logging_text,
2718 db_nsr=db_nsr,
2719 db_vnfr=db_vnfr,
2720 nslcmop_id=nslcmop_id,
2721 nsr_id=nsr_id,
2722 nsi_id=nsi_id,
2723 vnfd_id=vnfd_id,
2724 vdu_id=vdu_id,
2725 kdu_name=kdu_name,
2726 member_vnf_index=member_vnf_index,
2727 vdu_index=vdu_index,
2728 vdu_name=vdu_name,
2729 deploy_params=deploy_params_kdu,
2730 descriptor_config=descriptor_config,
2731 base_folder=base_folder,
2732 task_instantiation_info=tasks_dict_info,
2733 stage=stage,
2734 )
2735
2736 # Check if this NS has a charm configuration
2737 descriptor_config = nsd.get("ns-configuration")
2738 if descriptor_config and descriptor_config.get("juju"):
2739 vnfd_id = None
2740 db_vnfr = None
2741 member_vnf_index = None
2742 vdu_id = None
2743 kdu_name = None
2744 vdu_index = 0
2745 vdu_name = None
2746
2747 # Get additional parameters
2748 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2749 if db_nsr.get("additionalParamsForNs"):
2750 deploy_params.update(
2751 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2752 )
2753 base_folder = nsd["_admin"]["storage"]
2754 self._deploy_n2vc(
2755 logging_text=logging_text,
2756 db_nsr=db_nsr,
2757 db_vnfr=db_vnfr,
2758 nslcmop_id=nslcmop_id,
2759 nsr_id=nsr_id,
2760 nsi_id=nsi_id,
2761 vnfd_id=vnfd_id,
2762 vdu_id=vdu_id,
2763 kdu_name=kdu_name,
2764 member_vnf_index=member_vnf_index,
2765 vdu_index=vdu_index,
2766 vdu_name=vdu_name,
2767 deploy_params=deploy_params,
2768 descriptor_config=descriptor_config,
2769 base_folder=base_folder,
2770 task_instantiation_info=tasks_dict_info,
2771 stage=stage,
2772 )
2773
2774 # rest of staff will be done at finally
2775
2776 except (
2777 ROclient.ROClientException,
2778 DbException,
2779 LcmException,
2780 N2VCException,
2781 ) as e:
2782 self.logger.error(
2783 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2784 )
2785 exc = e
2786 except asyncio.CancelledError:
2787 self.logger.error(
2788 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2789 )
2790 exc = "Operation was cancelled"
2791 except Exception as e:
2792 exc = traceback.format_exc()
2793 self.logger.critical(
2794 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2795 exc_info=True,
2796 )
2797 finally:
2798 if exc:
2799 error_list.append(str(exc))
2800 try:
2801 # wait for pending tasks
2802 if tasks_dict_info:
2803 stage[1] = "Waiting for instantiate pending tasks."
2804 self.logger.debug(logging_text + stage[1])
2805 error_list += await self._wait_for_tasks(
2806 logging_text,
2807 tasks_dict_info,
2808 timeout_ns_deploy,
2809 stage,
2810 nslcmop_id,
2811 nsr_id=nsr_id,
2812 )
2813 stage[1] = stage[2] = ""
2814 except asyncio.CancelledError:
2815 error_list.append("Cancelled")
2816 # TODO cancel all tasks
2817 except Exception as exc:
2818 error_list.append(str(exc))
2819
2820 # update operation-status
2821 db_nsr_update["operational-status"] = "running"
2822 # let's begin with VCA 'configured' status (later we can change it)
2823 db_nsr_update["config-status"] = "configured"
2824 for task, task_name in tasks_dict_info.items():
2825 if not task.done() or task.cancelled() or task.exception():
2826 if task_name.startswith(self.task_name_deploy_vca):
2827 # A N2VC task is pending
2828 db_nsr_update["config-status"] = "failed"
2829 else:
2830 # RO or KDU task is pending
2831 db_nsr_update["operational-status"] = "failed"
2832
2833 # update status at database
2834 if error_list:
2835 error_detail = ". ".join(error_list)
2836 self.logger.error(logging_text + error_detail)
2837 error_description_nslcmop = "{} Detail: {}".format(
2838 stage[0], error_detail
2839 )
2840 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2841 nslcmop_id, stage[0]
2842 )
2843
2844 db_nsr_update["detailed-status"] = (
2845 error_description_nsr + " Detail: " + error_detail
2846 )
2847 db_nslcmop_update["detailed-status"] = error_detail
2848 nslcmop_operation_state = "FAILED"
2849 ns_state = "BROKEN"
2850 else:
2851 error_detail = None
2852 error_description_nsr = error_description_nslcmop = None
2853 ns_state = "READY"
2854 db_nsr_update["detailed-status"] = "Done"
2855 db_nslcmop_update["detailed-status"] = "Done"
2856 nslcmop_operation_state = "COMPLETED"
2857
2858 if db_nsr:
2859 self._write_ns_status(
2860 nsr_id=nsr_id,
2861 ns_state=ns_state,
2862 current_operation="IDLE",
2863 current_operation_id=None,
2864 error_description=error_description_nsr,
2865 error_detail=error_detail,
2866 other_update=db_nsr_update,
2867 )
2868 self._write_op_status(
2869 op_id=nslcmop_id,
2870 stage="",
2871 error_message=error_description_nslcmop,
2872 operation_state=nslcmop_operation_state,
2873 other_update=db_nslcmop_update,
2874 )
2875
2876 if nslcmop_operation_state:
2877 try:
2878 await self.msg.aiowrite(
2879 "ns",
2880 "instantiated",
2881 {
2882 "nsr_id": nsr_id,
2883 "nslcmop_id": nslcmop_id,
2884 "operationState": nslcmop_operation_state,
2885 },
2886 loop=self.loop,
2887 )
2888 except Exception as e:
2889 self.logger.error(
2890 logging_text + "kafka_write notification Exception {}".format(e)
2891 )
2892
2893 self.logger.debug(logging_text + "Exit")
2894 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2895
2896 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2897 if vnfd_id not in cached_vnfds:
2898 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2899 return cached_vnfds[vnfd_id]
2900
2901 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2902 if vnf_profile_id not in cached_vnfrs:
2903 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2904 "vnfrs",
2905 {
2906 "member-vnf-index-ref": vnf_profile_id,
2907 "nsr-id-ref": nsr_id,
2908 },
2909 )
2910 return cached_vnfrs[vnf_profile_id]
2911
2912 def _is_deployed_vca_in_relation(
2913 self, vca: DeployedVCA, relation: Relation
2914 ) -> bool:
2915 found = False
2916 for endpoint in (relation.provider, relation.requirer):
2917 if endpoint["kdu-resource-profile-id"]:
2918 continue
2919 found = (
2920 vca.vnf_profile_id == endpoint.vnf_profile_id
2921 and vca.vdu_profile_id == endpoint.vdu_profile_id
2922 and vca.execution_environment_ref == endpoint.execution_environment_ref
2923 )
2924 if found:
2925 break
2926 return found
2927
2928 def _update_ee_relation_data_with_implicit_data(
2929 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2930 ):
2931 ee_relation_data = safe_get_ee_relation(
2932 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2933 )
2934 ee_relation_level = EELevel.get_level(ee_relation_data)
2935 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2936 "execution-environment-ref"
2937 ]:
2938 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2939 vnfd_id = vnf_profile["vnfd-id"]
2940 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2941 entity_id = (
2942 vnfd_id
2943 if ee_relation_level == EELevel.VNF
2944 else ee_relation_data["vdu-profile-id"]
2945 )
2946 ee = get_juju_ee_ref(db_vnfd, entity_id)
2947 if not ee:
2948 raise Exception(
2949 f"not execution environments found for ee_relation {ee_relation_data}"
2950 )
2951 ee_relation_data["execution-environment-ref"] = ee["id"]
2952 return ee_relation_data
2953
2954 def _get_ns_relations(
2955 self,
2956 nsr_id: str,
2957 nsd: Dict[str, Any],
2958 vca: DeployedVCA,
2959 cached_vnfds: Dict[str, Any],
2960 ) -> List[Relation]:
2961 relations = []
2962 db_ns_relations = get_ns_configuration_relation_list(nsd)
2963 for r in db_ns_relations:
2964 provider_dict = None
2965 requirer_dict = None
2966 if all(key in r for key in ("provider", "requirer")):
2967 provider_dict = r["provider"]
2968 requirer_dict = r["requirer"]
2969 elif "entities" in r:
2970 provider_id = r["entities"][0]["id"]
2971 provider_dict = {
2972 "nsr-id": nsr_id,
2973 "endpoint": r["entities"][0]["endpoint"],
2974 }
2975 if provider_id != nsd["id"]:
2976 provider_dict["vnf-profile-id"] = provider_id
2977 requirer_id = r["entities"][1]["id"]
2978 requirer_dict = {
2979 "nsr-id": nsr_id,
2980 "endpoint": r["entities"][1]["endpoint"],
2981 }
2982 if requirer_id != nsd["id"]:
2983 requirer_dict["vnf-profile-id"] = requirer_id
2984 else:
2985 raise Exception(
2986 "provider/requirer or entities must be included in the relation."
2987 )
2988 relation_provider = self._update_ee_relation_data_with_implicit_data(
2989 nsr_id, nsd, provider_dict, cached_vnfds
2990 )
2991 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2992 nsr_id, nsd, requirer_dict, cached_vnfds
2993 )
2994 provider = EERelation(relation_provider)
2995 requirer = EERelation(relation_requirer)
2996 relation = Relation(r["name"], provider, requirer)
2997 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2998 if vca_in_relation:
2999 relations.append(relation)
3000 return relations
3001
3002 def _get_vnf_relations(
3003 self,
3004 nsr_id: str,
3005 nsd: Dict[str, Any],
3006 vca: DeployedVCA,
3007 cached_vnfds: Dict[str, Any],
3008 ) -> List[Relation]:
3009 relations = []
3010 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3011 vnf_profile_id = vnf_profile["id"]
3012 vnfd_id = vnf_profile["vnfd-id"]
3013 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3014 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3015 for r in db_vnf_relations:
3016 provider_dict = None
3017 requirer_dict = None
3018 if all(key in r for key in ("provider", "requirer")):
3019 provider_dict = r["provider"]
3020 requirer_dict = r["requirer"]
3021 elif "entities" in r:
3022 provider_id = r["entities"][0]["id"]
3023 provider_dict = {
3024 "nsr-id": nsr_id,
3025 "vnf-profile-id": vnf_profile_id,
3026 "endpoint": r["entities"][0]["endpoint"],
3027 }
3028 if provider_id != vnfd_id:
3029 provider_dict["vdu-profile-id"] = provider_id
3030 requirer_id = r["entities"][1]["id"]
3031 requirer_dict = {
3032 "nsr-id": nsr_id,
3033 "vnf-profile-id": vnf_profile_id,
3034 "endpoint": r["entities"][1]["endpoint"],
3035 }
3036 if requirer_id != vnfd_id:
3037 requirer_dict["vdu-profile-id"] = requirer_id
3038 else:
3039 raise Exception(
3040 "provider/requirer or entities must be included in the relation."
3041 )
3042 relation_provider = self._update_ee_relation_data_with_implicit_data(
3043 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3044 )
3045 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3046 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3047 )
3048 provider = EERelation(relation_provider)
3049 requirer = EERelation(relation_requirer)
3050 relation = Relation(r["name"], provider, requirer)
3051 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3052 if vca_in_relation:
3053 relations.append(relation)
3054 return relations
3055
3056 def _get_kdu_resource_data(
3057 self,
3058 ee_relation: EERelation,
3059 db_nsr: Dict[str, Any],
3060 cached_vnfds: Dict[str, Any],
3061 ) -> DeployedK8sResource:
3062 nsd = get_nsd(db_nsr)
3063 vnf_profiles = get_vnf_profiles(nsd)
3064 vnfd_id = find_in_list(
3065 vnf_profiles,
3066 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3067 )["vnfd-id"]
3068 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3069 kdu_resource_profile = get_kdu_resource_profile(
3070 db_vnfd, ee_relation.kdu_resource_profile_id
3071 )
3072 kdu_name = kdu_resource_profile["kdu-name"]
3073 deployed_kdu, _ = get_deployed_kdu(
3074 db_nsr.get("_admin", ()).get("deployed", ()),
3075 kdu_name,
3076 ee_relation.vnf_profile_id,
3077 )
3078 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3079 return deployed_kdu
3080
3081 def _get_deployed_component(
3082 self,
3083 ee_relation: EERelation,
3084 db_nsr: Dict[str, Any],
3085 cached_vnfds: Dict[str, Any],
3086 ) -> DeployedComponent:
3087 nsr_id = db_nsr["_id"]
3088 deployed_component = None
3089 ee_level = EELevel.get_level(ee_relation)
3090 if ee_level == EELevel.NS:
3091 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3092 if vca:
3093 deployed_component = DeployedVCA(nsr_id, vca)
3094 elif ee_level == EELevel.VNF:
3095 vca = get_deployed_vca(
3096 db_nsr,
3097 {
3098 "vdu_id": None,
3099 "member-vnf-index": ee_relation.vnf_profile_id,
3100 "ee_descriptor_id": ee_relation.execution_environment_ref,
3101 },
3102 )
3103 if vca:
3104 deployed_component = DeployedVCA(nsr_id, vca)
3105 elif ee_level == EELevel.VDU:
3106 vca = get_deployed_vca(
3107 db_nsr,
3108 {
3109 "vdu_id": ee_relation.vdu_profile_id,
3110 "member-vnf-index": ee_relation.vnf_profile_id,
3111 "ee_descriptor_id": ee_relation.execution_environment_ref,
3112 },
3113 )
3114 if vca:
3115 deployed_component = DeployedVCA(nsr_id, vca)
3116 elif ee_level == EELevel.KDU:
3117 kdu_resource_data = self._get_kdu_resource_data(
3118 ee_relation, db_nsr, cached_vnfds
3119 )
3120 if kdu_resource_data:
3121 deployed_component = DeployedK8sResource(kdu_resource_data)
3122 return deployed_component
3123
3124 async def _add_relation(
3125 self,
3126 relation: Relation,
3127 vca_type: str,
3128 db_nsr: Dict[str, Any],
3129 cached_vnfds: Dict[str, Any],
3130 cached_vnfrs: Dict[str, Any],
3131 ) -> bool:
3132 deployed_provider = self._get_deployed_component(
3133 relation.provider, db_nsr, cached_vnfds
3134 )
3135 deployed_requirer = self._get_deployed_component(
3136 relation.requirer, db_nsr, cached_vnfds
3137 )
3138 if (
3139 deployed_provider
3140 and deployed_requirer
3141 and deployed_provider.config_sw_installed
3142 and deployed_requirer.config_sw_installed
3143 ):
3144 provider_db_vnfr = (
3145 self._get_vnfr(
3146 relation.provider.nsr_id,
3147 relation.provider.vnf_profile_id,
3148 cached_vnfrs,
3149 )
3150 if relation.provider.vnf_profile_id
3151 else None
3152 )
3153 requirer_db_vnfr = (
3154 self._get_vnfr(
3155 relation.requirer.nsr_id,
3156 relation.requirer.vnf_profile_id,
3157 cached_vnfrs,
3158 )
3159 if relation.requirer.vnf_profile_id
3160 else None
3161 )
3162 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3163 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3164 provider_relation_endpoint = RelationEndpoint(
3165 deployed_provider.ee_id,
3166 provider_vca_id,
3167 relation.provider.endpoint,
3168 )
3169 requirer_relation_endpoint = RelationEndpoint(
3170 deployed_requirer.ee_id,
3171 requirer_vca_id,
3172 relation.requirer.endpoint,
3173 )
3174 await self.vca_map[vca_type].add_relation(
3175 provider=provider_relation_endpoint,
3176 requirer=requirer_relation_endpoint,
3177 )
3178 # remove entry from relations list
3179 return True
3180 return False
3181
3182 async def _add_vca_relations(
3183 self,
3184 logging_text,
3185 nsr_id,
3186 vca_type: str,
3187 vca_index: int,
3188 timeout: int = 3600,
3189 ) -> bool:
3190
3191 # steps:
3192 # 1. find all relations for this VCA
3193 # 2. wait for other peers related
3194 # 3. add relations
3195
3196 try:
3197 # STEP 1: find all relations for this VCA
3198
3199 # read nsr record
3200 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3201 nsd = get_nsd(db_nsr)
3202
3203 # this VCA data
3204 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3205 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3206
3207 cached_vnfds = {}
3208 cached_vnfrs = {}
3209 relations = []
3210 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3211 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3212
3213 # if no relations, terminate
3214 if not relations:
3215 self.logger.debug(logging_text + " No relations")
3216 return True
3217
3218 self.logger.debug(logging_text + " adding relations {}".format(relations))
3219
3220 # add all relations
3221 start = time()
3222 while True:
3223 # check timeout
3224 now = time()
3225 if now - start >= timeout:
3226 self.logger.error(logging_text + " : timeout adding relations")
3227 return False
3228
3229 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3230 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3231
3232 # for each relation, find the VCA's related
3233 for relation in relations.copy():
3234 added = await self._add_relation(
3235 relation,
3236 vca_type,
3237 db_nsr,
3238 cached_vnfds,
3239 cached_vnfrs,
3240 )
3241 if added:
3242 relations.remove(relation)
3243
3244 if not relations:
3245 self.logger.debug("Relations added")
3246 break
3247 await asyncio.sleep(5.0)
3248
3249 return True
3250
3251 except Exception as e:
3252 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3253 return False
3254
3255 async def _install_kdu(
3256 self,
3257 nsr_id: str,
3258 nsr_db_path: str,
3259 vnfr_data: dict,
3260 kdu_index: int,
3261 kdud: dict,
3262 vnfd: dict,
3263 k8s_instance_info: dict,
3264 k8params: dict = None,
3265 timeout: int = 600,
3266 vca_id: str = None,
3267 ):
3268
3269 try:
3270 k8sclustertype = k8s_instance_info["k8scluster-type"]
3271 # Instantiate kdu
3272 db_dict_install = {
3273 "collection": "nsrs",
3274 "filter": {"_id": nsr_id},
3275 "path": nsr_db_path,
3276 }
3277
3278 if k8s_instance_info.get("kdu-deployment-name"):
3279 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3280 else:
3281 kdu_instance = self.k8scluster_map[
3282 k8sclustertype
3283 ].generate_kdu_instance_name(
3284 db_dict=db_dict_install,
3285 kdu_model=k8s_instance_info["kdu-model"],
3286 kdu_name=k8s_instance_info["kdu-name"],
3287 )
3288
3289 # Update the nsrs table with the kdu-instance value
3290 self.update_db_2(
3291 item="nsrs",
3292 _id=nsr_id,
3293 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3294 )
3295
3296 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3297 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3298 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3299 # namespace, this first verification could be removed, and the next step would be done for any kind
3300 # of KNF.
3301 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3302 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3303 if k8sclustertype in ("juju", "juju-bundle"):
3304 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3305 # that the user passed a namespace which he wants its KDU to be deployed in)
3306 if (
3307 self.db.count(
3308 table="nsrs",
3309 q_filter={
3310 "_id": nsr_id,
3311 "_admin.projects_write": k8s_instance_info["namespace"],
3312 "_admin.projects_read": k8s_instance_info["namespace"],
3313 },
3314 )
3315 > 0
3316 ):
3317 self.logger.debug(
3318 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3319 )
3320 self.update_db_2(
3321 item="nsrs",
3322 _id=nsr_id,
3323 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3324 )
3325 k8s_instance_info["namespace"] = kdu_instance
3326
3327 await self.k8scluster_map[k8sclustertype].install(
3328 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3329 kdu_model=k8s_instance_info["kdu-model"],
3330 atomic=True,
3331 params=k8params,
3332 db_dict=db_dict_install,
3333 timeout=timeout,
3334 kdu_name=k8s_instance_info["kdu-name"],
3335 namespace=k8s_instance_info["namespace"],
3336 kdu_instance=kdu_instance,
3337 vca_id=vca_id,
3338 )
3339
3340 # Obtain services to obtain management service ip
3341 services = await self.k8scluster_map[k8sclustertype].get_services(
3342 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3343 kdu_instance=kdu_instance,
3344 namespace=k8s_instance_info["namespace"],
3345 )
3346
3347 # Obtain management service info (if exists)
3348 vnfr_update_dict = {}
3349 kdu_config = get_configuration(vnfd, kdud["name"])
3350 if kdu_config:
3351 target_ee_list = kdu_config.get("execution-environment-list", [])
3352 else:
3353 target_ee_list = []
3354
3355 if services:
3356 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3357 mgmt_services = [
3358 service
3359 for service in kdud.get("service", [])
3360 if service.get("mgmt-service")
3361 ]
3362 for mgmt_service in mgmt_services:
3363 for service in services:
3364 if service["name"].startswith(mgmt_service["name"]):
3365 # Mgmt service found, Obtain service ip
3366 ip = service.get("external_ip", service.get("cluster_ip"))
3367 if isinstance(ip, list) and len(ip) == 1:
3368 ip = ip[0]
3369
3370 vnfr_update_dict[
3371 "kdur.{}.ip-address".format(kdu_index)
3372 ] = ip
3373
3374 # Check if must update also mgmt ip at the vnf
3375 service_external_cp = mgmt_service.get(
3376 "external-connection-point-ref"
3377 )
3378 if service_external_cp:
3379 if (
3380 deep_get(vnfd, ("mgmt-interface", "cp"))
3381 == service_external_cp
3382 ):
3383 vnfr_update_dict["ip-address"] = ip
3384
3385 if find_in_list(
3386 target_ee_list,
3387 lambda ee: ee.get(
3388 "external-connection-point-ref", ""
3389 )
3390 == service_external_cp,
3391 ):
3392 vnfr_update_dict[
3393 "kdur.{}.ip-address".format(kdu_index)
3394 ] = ip
3395 break
3396 else:
3397 self.logger.warn(
3398 "Mgmt service name: {} not found".format(
3399 mgmt_service["name"]
3400 )
3401 )
3402
3403 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3404 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3405
3406 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3407 if (
3408 kdu_config
3409 and kdu_config.get("initial-config-primitive")
3410 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3411 ):
3412 initial_config_primitive_list = kdu_config.get(
3413 "initial-config-primitive"
3414 )
3415 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3416
3417 for initial_config_primitive in initial_config_primitive_list:
3418 primitive_params_ = self._map_primitive_params(
3419 initial_config_primitive, {}, {}
3420 )
3421
3422 await asyncio.wait_for(
3423 self.k8scluster_map[k8sclustertype].exec_primitive(
3424 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3425 kdu_instance=kdu_instance,
3426 primitive_name=initial_config_primitive["name"],
3427 params=primitive_params_,
3428 db_dict=db_dict_install,
3429 vca_id=vca_id,
3430 ),
3431 timeout=timeout,
3432 )
3433
3434 except Exception as e:
3435 # Prepare update db with error and raise exception
3436 try:
3437 self.update_db_2(
3438 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3439 )
3440 self.update_db_2(
3441 "vnfrs",
3442 vnfr_data.get("_id"),
3443 {"kdur.{}.status".format(kdu_index): "ERROR"},
3444 )
3445 except Exception:
3446 # ignore to keep original exception
3447 pass
3448 # reraise original error
3449 raise
3450
3451 return kdu_instance
3452
3453 async def deploy_kdus(
3454 self,
3455 logging_text,
3456 nsr_id,
3457 nslcmop_id,
3458 db_vnfrs,
3459 db_vnfds,
3460 task_instantiation_info,
3461 ):
3462 # Launch kdus if present in the descriptor
3463
3464 k8scluster_id_2_uuic = {
3465 "helm-chart-v3": {},
3466 "helm-chart": {},
3467 "juju-bundle": {},
3468 }
3469
3470 async def _get_cluster_id(cluster_id, cluster_type):
3471 nonlocal k8scluster_id_2_uuic
3472 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3473 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3474
3475 # check if K8scluster is creating and wait look if previous tasks in process
3476 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3477 "k8scluster", cluster_id
3478 )
3479 if task_dependency:
3480 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3481 task_name, cluster_id
3482 )
3483 self.logger.debug(logging_text + text)
3484 await asyncio.wait(task_dependency, timeout=3600)
3485
3486 db_k8scluster = self.db.get_one(
3487 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3488 )
3489 if not db_k8scluster:
3490 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3491
3492 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3493 if not k8s_id:
3494 if cluster_type == "helm-chart-v3":
3495 try:
3496 # backward compatibility for existing clusters that have not been initialized for helm v3
3497 k8s_credentials = yaml.safe_dump(
3498 db_k8scluster.get("credentials")
3499 )
3500 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3501 k8s_credentials, reuse_cluster_uuid=cluster_id
3502 )
3503 db_k8scluster_update = {}
3504 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3505 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3506 db_k8scluster_update[
3507 "_admin.helm-chart-v3.created"
3508 ] = uninstall_sw
3509 db_k8scluster_update[
3510 "_admin.helm-chart-v3.operationalState"
3511 ] = "ENABLED"
3512 self.update_db_2(
3513 "k8sclusters", cluster_id, db_k8scluster_update
3514 )
3515 except Exception as e:
3516 self.logger.error(
3517 logging_text
3518 + "error initializing helm-v3 cluster: {}".format(str(e))
3519 )
3520 raise LcmException(
3521 "K8s cluster '{}' has not been initialized for '{}'".format(
3522 cluster_id, cluster_type
3523 )
3524 )
3525 else:
3526 raise LcmException(
3527 "K8s cluster '{}' has not been initialized for '{}'".format(
3528 cluster_id, cluster_type
3529 )
3530 )
3531 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3532 return k8s_id
3533
3534 logging_text += "Deploy kdus: "
3535 step = ""
3536 try:
3537 db_nsr_update = {"_admin.deployed.K8s": []}
3538 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3539
3540 index = 0
3541 updated_cluster_list = []
3542 updated_v3_cluster_list = []
3543
3544 for vnfr_data in db_vnfrs.values():
3545 vca_id = self.get_vca_id(vnfr_data, {})
3546 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3547 # Step 0: Prepare and set parameters
3548 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3549 vnfd_id = vnfr_data.get("vnfd-id")
3550 vnfd_with_id = find_in_list(
3551 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3552 )
3553 kdud = next(
3554 kdud
3555 for kdud in vnfd_with_id["kdu"]
3556 if kdud["name"] == kdur["kdu-name"]
3557 )
3558 namespace = kdur.get("k8s-namespace")
3559 kdu_deployment_name = kdur.get("kdu-deployment-name")
3560 if kdur.get("helm-chart"):
3561 kdumodel = kdur["helm-chart"]
3562 # Default version: helm3, if helm-version is v2 assign v2
3563 k8sclustertype = "helm-chart-v3"
3564 self.logger.debug("kdur: {}".format(kdur))
3565 if (
3566 kdur.get("helm-version")
3567 and kdur.get("helm-version") == "v2"
3568 ):
3569 k8sclustertype = "helm-chart"
3570 elif kdur.get("juju-bundle"):
3571 kdumodel = kdur["juju-bundle"]
3572 k8sclustertype = "juju-bundle"
3573 else:
3574 raise LcmException(
3575 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3576 "juju-bundle. Maybe an old NBI version is running".format(
3577 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3578 )
3579 )
3580 # check if kdumodel is a file and exists
3581 try:
3582 vnfd_with_id = find_in_list(
3583 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3584 )
3585 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3586 if storage: # may be not present if vnfd has not artifacts
3587 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3588 if storage["pkg-dir"]:
3589 filename = "{}/{}/{}s/{}".format(
3590 storage["folder"],
3591 storage["pkg-dir"],
3592 k8sclustertype,
3593 kdumodel,
3594 )
3595 else:
3596 filename = "{}/Scripts/{}s/{}".format(
3597 storage["folder"],
3598 k8sclustertype,
3599 kdumodel,
3600 )
3601 if self.fs.file_exists(
3602 filename, mode="file"
3603 ) or self.fs.file_exists(filename, mode="dir"):
3604 kdumodel = self.fs.path + filename
3605 except (asyncio.TimeoutError, asyncio.CancelledError):
3606 raise
3607 except Exception: # it is not a file
3608 pass
3609
3610 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3611 step = "Synchronize repos for k8s cluster '{}'".format(
3612 k8s_cluster_id
3613 )
3614 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3615
3616 # Synchronize repos
3617 if (
3618 k8sclustertype == "helm-chart"
3619 and cluster_uuid not in updated_cluster_list
3620 ) or (
3621 k8sclustertype == "helm-chart-v3"
3622 and cluster_uuid not in updated_v3_cluster_list
3623 ):
3624 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3625 self.k8scluster_map[k8sclustertype].synchronize_repos(
3626 cluster_uuid=cluster_uuid
3627 )
3628 )
3629 if del_repo_list or added_repo_dict:
3630 if k8sclustertype == "helm-chart":
3631 unset = {
3632 "_admin.helm_charts_added." + item: None
3633 for item in del_repo_list
3634 }
3635 updated = {
3636 "_admin.helm_charts_added." + item: name
3637 for item, name in added_repo_dict.items()
3638 }
3639 updated_cluster_list.append(cluster_uuid)
3640 elif k8sclustertype == "helm-chart-v3":
3641 unset = {
3642 "_admin.helm_charts_v3_added." + item: None
3643 for item in del_repo_list
3644 }
3645 updated = {
3646 "_admin.helm_charts_v3_added." + item: name
3647 for item, name in added_repo_dict.items()
3648 }
3649 updated_v3_cluster_list.append(cluster_uuid)
3650 self.logger.debug(
3651 logging_text + "repos synchronized on k8s cluster "
3652 "'{}' to_delete: {}, to_add: {}".format(
3653 k8s_cluster_id, del_repo_list, added_repo_dict
3654 )
3655 )
3656 self.db.set_one(
3657 "k8sclusters",
3658 {"_id": k8s_cluster_id},
3659 updated,
3660 unset=unset,
3661 )
3662
3663 # Instantiate kdu
3664 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3665 vnfr_data["member-vnf-index-ref"],
3666 kdur["kdu-name"],
3667 k8s_cluster_id,
3668 )
3669 k8s_instance_info = {
3670 "kdu-instance": None,
3671 "k8scluster-uuid": cluster_uuid,
3672 "k8scluster-type": k8sclustertype,
3673 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3674 "kdu-name": kdur["kdu-name"],
3675 "kdu-model": kdumodel,
3676 "namespace": namespace,
3677 "kdu-deployment-name": kdu_deployment_name,
3678 }
3679 db_path = "_admin.deployed.K8s.{}".format(index)
3680 db_nsr_update[db_path] = k8s_instance_info
3681 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3682 vnfd_with_id = find_in_list(
3683 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3684 )
3685 task = asyncio.ensure_future(
3686 self._install_kdu(
3687 nsr_id,
3688 db_path,
3689 vnfr_data,
3690 kdu_index,
3691 kdud,
3692 vnfd_with_id,
3693 k8s_instance_info,
3694 k8params=desc_params,
3695 timeout=1800,
3696 vca_id=vca_id,
3697 )
3698 )
3699 self.lcm_tasks.register(
3700 "ns",
3701 nsr_id,
3702 nslcmop_id,
3703 "instantiate_KDU-{}".format(index),
3704 task,
3705 )
3706 task_instantiation_info[task] = "Deploying KDU {}".format(
3707 kdur["kdu-name"]
3708 )
3709
3710 index += 1
3711
3712 except (LcmException, asyncio.CancelledError):
3713 raise
3714 except Exception as e:
3715 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3716 if isinstance(e, (N2VCException, DbException)):
3717 self.logger.error(logging_text + msg)
3718 else:
3719 self.logger.critical(logging_text + msg, exc_info=True)
3720 raise LcmException(msg)
3721 finally:
3722 if db_nsr_update:
3723 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3724
3725 def _deploy_n2vc(
3726 self,
3727 logging_text,
3728 db_nsr,
3729 db_vnfr,
3730 nslcmop_id,
3731 nsr_id,
3732 nsi_id,
3733 vnfd_id,
3734 vdu_id,
3735 kdu_name,
3736 member_vnf_index,
3737 vdu_index,
3738 vdu_name,
3739 deploy_params,
3740 descriptor_config,
3741 base_folder,
3742 task_instantiation_info,
3743 stage,
3744 ):
3745 # launch instantiate_N2VC in a asyncio task and register task object
3746 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3747 # if not found, create one entry and update database
3748 # fill db_nsr._admin.deployed.VCA.<index>
3749
3750 self.logger.debug(
3751 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3752 )
3753
3754 charm_name = ""
3755 get_charm_name = False
3756 if "execution-environment-list" in descriptor_config:
3757 ee_list = descriptor_config.get("execution-environment-list", [])
3758 elif "juju" in descriptor_config:
3759 ee_list = [descriptor_config] # ns charms
3760 if "execution-environment-list" not in descriptor_config:
3761 # charm name is only required for ns charms
3762 get_charm_name = True
3763 else: # other types as script are not supported
3764 ee_list = []
3765
3766 for ee_item in ee_list:
3767 self.logger.debug(
3768 logging_text
3769 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3770 ee_item.get("juju"), ee_item.get("helm-chart")
3771 )
3772 )
3773 ee_descriptor_id = ee_item.get("id")
3774 if ee_item.get("juju"):
3775 vca_name = ee_item["juju"].get("charm")
3776 if get_charm_name:
3777 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3778 vca_type = (
3779 "lxc_proxy_charm"
3780 if ee_item["juju"].get("charm") is not None
3781 else "native_charm"
3782 )
3783 if ee_item["juju"].get("cloud") == "k8s":
3784 vca_type = "k8s_proxy_charm"
3785 elif ee_item["juju"].get("proxy") is False:
3786 vca_type = "native_charm"
3787 elif ee_item.get("helm-chart"):
3788 vca_name = ee_item["helm-chart"]
3789 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3790 vca_type = "helm"
3791 else:
3792 vca_type = "helm-v3"
3793 else:
3794 self.logger.debug(
3795 logging_text + "skipping non juju neither charm configuration"
3796 )
3797 continue
3798
3799 vca_index = -1
3800 for vca_index, vca_deployed in enumerate(
3801 db_nsr["_admin"]["deployed"]["VCA"]
3802 ):
3803 if not vca_deployed:
3804 continue
3805 if (
3806 vca_deployed.get("member-vnf-index") == member_vnf_index
3807 and vca_deployed.get("vdu_id") == vdu_id
3808 and vca_deployed.get("kdu_name") == kdu_name
3809 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3810 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3811 ):
3812 break
3813 else:
3814 # not found, create one.
3815 target = (
3816 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3817 )
3818 if vdu_id:
3819 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3820 elif kdu_name:
3821 target += "/kdu/{}".format(kdu_name)
3822 vca_deployed = {
3823 "target_element": target,
3824 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3825 "member-vnf-index": member_vnf_index,
3826 "vdu_id": vdu_id,
3827 "kdu_name": kdu_name,
3828 "vdu_count_index": vdu_index,
3829 "operational-status": "init", # TODO revise
3830 "detailed-status": "", # TODO revise
3831 "step": "initial-deploy", # TODO revise
3832 "vnfd_id": vnfd_id,
3833 "vdu_name": vdu_name,
3834 "type": vca_type,
3835 "ee_descriptor_id": ee_descriptor_id,
3836 "charm_name": charm_name,
3837 }
3838 vca_index += 1
3839
3840 # create VCA and configurationStatus in db
3841 db_dict = {
3842 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3843 "configurationStatus.{}".format(vca_index): dict(),
3844 }
3845 self.update_db_2("nsrs", nsr_id, db_dict)
3846
3847 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3848
3849 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3850 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3851 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3852
3853 # Launch task
3854 task_n2vc = asyncio.ensure_future(
3855 self.instantiate_N2VC(
3856 logging_text=logging_text,
3857 vca_index=vca_index,
3858 nsi_id=nsi_id,
3859 db_nsr=db_nsr,
3860 db_vnfr=db_vnfr,
3861 vdu_id=vdu_id,
3862 kdu_name=kdu_name,
3863 vdu_index=vdu_index,
3864 deploy_params=deploy_params,
3865 config_descriptor=descriptor_config,
3866 base_folder=base_folder,
3867 nslcmop_id=nslcmop_id,
3868 stage=stage,
3869 vca_type=vca_type,
3870 vca_name=vca_name,
3871 ee_config_descriptor=ee_item,
3872 )
3873 )
3874 self.lcm_tasks.register(
3875 "ns",
3876 nsr_id,
3877 nslcmop_id,
3878 "instantiate_N2VC-{}".format(vca_index),
3879 task_n2vc,
3880 )
3881 task_instantiation_info[
3882 task_n2vc
3883 ] = self.task_name_deploy_vca + " {}.{}".format(
3884 member_vnf_index or "", vdu_id or ""
3885 )
3886
3887 @staticmethod
3888 def _create_nslcmop(nsr_id, operation, params):
3889 """
3890 Creates a ns-lcm-opp content to be stored at database.
3891 :param nsr_id: internal id of the instance
3892 :param operation: instantiate, terminate, scale, action, ...
3893 :param params: user parameters for the operation
3894 :return: dictionary following SOL005 format
3895 """
3896 # Raise exception if invalid arguments
3897 if not (nsr_id and operation and params):
3898 raise LcmException(
3899 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3900 )
3901 now = time()
3902 _id = str(uuid4())
3903 nslcmop = {
3904 "id": _id,
3905 "_id": _id,
3906 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3907 "operationState": "PROCESSING",
3908 "statusEnteredTime": now,
3909 "nsInstanceId": nsr_id,
3910 "lcmOperationType": operation,
3911 "startTime": now,
3912 "isAutomaticInvocation": False,
3913 "operationParams": params,
3914 "isCancelPending": False,
3915 "links": {
3916 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3917 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3918 },
3919 }
3920 return nslcmop
3921
3922 def _format_additional_params(self, params):
3923 params = params or {}
3924 for key, value in params.items():
3925 if str(value).startswith("!!yaml "):
3926 params[key] = yaml.safe_load(value[7:])
3927 return params
3928
3929 def _get_terminate_primitive_params(self, seq, vnf_index):
3930 primitive = seq.get("name")
3931 primitive_params = {}
3932 params = {
3933 "member_vnf_index": vnf_index,
3934 "primitive": primitive,
3935 "primitive_params": primitive_params,
3936 }
3937 desc_params = {}
3938 return self._map_primitive_params(seq, params, desc_params)
3939
3940 # sub-operations
3941
3942 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3943 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3944 if op.get("operationState") == "COMPLETED":
3945 # b. Skip sub-operation
3946 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3947 return self.SUBOPERATION_STATUS_SKIP
3948 else:
3949 # c. retry executing sub-operation
3950 # The sub-operation exists, and operationState != 'COMPLETED'
3951 # Update operationState = 'PROCESSING' to indicate a retry.
3952 operationState = "PROCESSING"
3953 detailed_status = "In progress"
3954 self._update_suboperation_status(
3955 db_nslcmop, op_index, operationState, detailed_status
3956 )
3957 # Return the sub-operation index
3958 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3959 # with arguments extracted from the sub-operation
3960 return op_index
3961
3962 # Find a sub-operation where all keys in a matching dictionary must match
3963 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3964 def _find_suboperation(self, db_nslcmop, match):
3965 if db_nslcmop and match:
3966 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3967 for i, op in enumerate(op_list):
3968 if all(op.get(k) == match[k] for k in match):
3969 return i
3970 return self.SUBOPERATION_STATUS_NOT_FOUND
3971
3972 # Update status for a sub-operation given its index
3973 def _update_suboperation_status(
3974 self, db_nslcmop, op_index, operationState, detailed_status
3975 ):
3976 # Update DB for HA tasks
3977 q_filter = {"_id": db_nslcmop["_id"]}
3978 update_dict = {
3979 "_admin.operations.{}.operationState".format(op_index): operationState,
3980 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3981 }
3982 self.db.set_one(
3983 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3984 )
3985
3986 # Add sub-operation, return the index of the added sub-operation
3987 # Optionally, set operationState, detailed-status, and operationType
3988 # Status and type are currently set for 'scale' sub-operations:
3989 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3990 # 'detailed-status' : status message
3991 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3992 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3993 def _add_suboperation(
3994 self,
3995 db_nslcmop,
3996 vnf_index,
3997 vdu_id,
3998 vdu_count_index,
3999 vdu_name,
4000 primitive,
4001 mapped_primitive_params,
4002 operationState=None,
4003 detailed_status=None,
4004 operationType=None,
4005 RO_nsr_id=None,
4006 RO_scaling_info=None,
4007 ):
4008 if not db_nslcmop:
4009 return self.SUBOPERATION_STATUS_NOT_FOUND
4010 # Get the "_admin.operations" list, if it exists
4011 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4012 op_list = db_nslcmop_admin.get("operations")
4013 # Create or append to the "_admin.operations" list
4014 new_op = {
4015 "member_vnf_index": vnf_index,
4016 "vdu_id": vdu_id,
4017 "vdu_count_index": vdu_count_index,
4018 "primitive": primitive,
4019 "primitive_params": mapped_primitive_params,
4020 }
4021 if operationState:
4022 new_op["operationState"] = operationState
4023 if detailed_status:
4024 new_op["detailed-status"] = detailed_status
4025 if operationType:
4026 new_op["lcmOperationType"] = operationType
4027 if RO_nsr_id:
4028 new_op["RO_nsr_id"] = RO_nsr_id
4029 if RO_scaling_info:
4030 new_op["RO_scaling_info"] = RO_scaling_info
4031 if not op_list:
4032 # No existing operations, create key 'operations' with current operation as first list element
4033 db_nslcmop_admin.update({"operations": [new_op]})
4034 op_list = db_nslcmop_admin.get("operations")
4035 else:
4036 # Existing operations, append operation to list
4037 op_list.append(new_op)
4038
4039 db_nslcmop_update = {"_admin.operations": op_list}
4040 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4041 op_index = len(op_list) - 1
4042 return op_index
4043
4044 # Helper methods for scale() sub-operations
4045
4046 # pre-scale/post-scale:
4047 # Check for 3 different cases:
4048 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4049 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4050 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4051 def _check_or_add_scale_suboperation(
4052 self,
4053 db_nslcmop,
4054 vnf_index,
4055 vnf_config_primitive,
4056 primitive_params,
4057 operationType,
4058 RO_nsr_id=None,
4059 RO_scaling_info=None,
4060 ):
4061 # Find this sub-operation
4062 if RO_nsr_id and RO_scaling_info:
4063 operationType = "SCALE-RO"
4064 match = {
4065 "member_vnf_index": vnf_index,
4066 "RO_nsr_id": RO_nsr_id,
4067 "RO_scaling_info": RO_scaling_info,
4068 }
4069 else:
4070 match = {
4071 "member_vnf_index": vnf_index,
4072 "primitive": vnf_config_primitive,
4073 "primitive_params": primitive_params,
4074 "lcmOperationType": operationType,
4075 }
4076 op_index = self._find_suboperation(db_nslcmop, match)
4077 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4078 # a. New sub-operation
4079 # The sub-operation does not exist, add it.
4080 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4081 # The following parameters are set to None for all kind of scaling:
4082 vdu_id = None
4083 vdu_count_index = None
4084 vdu_name = None
4085 if RO_nsr_id and RO_scaling_info:
4086 vnf_config_primitive = None
4087 primitive_params = None
4088 else:
4089 RO_nsr_id = None
4090 RO_scaling_info = None
4091 # Initial status for sub-operation
4092 operationState = "PROCESSING"
4093 detailed_status = "In progress"
4094 # Add sub-operation for pre/post-scaling (zero or more operations)
4095 self._add_suboperation(
4096 db_nslcmop,
4097 vnf_index,
4098 vdu_id,
4099 vdu_count_index,
4100 vdu_name,
4101 vnf_config_primitive,
4102 primitive_params,
4103 operationState,
4104 detailed_status,
4105 operationType,
4106 RO_nsr_id,
4107 RO_scaling_info,
4108 )
4109 return self.SUBOPERATION_STATUS_NEW
4110 else:
4111 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4112 # or op_index (operationState != 'COMPLETED')
4113 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4114
4115 # Function to return execution_environment id
4116
4117 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4118 # TODO vdu_index_count
4119 for vca in vca_deployed_list:
4120 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4121 return vca["ee_id"]
4122
4123 async def destroy_N2VC(
4124 self,
4125 logging_text,
4126 db_nslcmop,
4127 vca_deployed,
4128 config_descriptor,
4129 vca_index,
4130 destroy_ee=True,
4131 exec_primitives=True,
4132 scaling_in=False,
4133 vca_id: str = None,
4134 ):
4135 """
4136 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4137 :param logging_text:
4138 :param db_nslcmop:
4139 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4140 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4141 :param vca_index: index in the database _admin.deployed.VCA
4142 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4143 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4144 not executed properly
4145 :param scaling_in: True destroys the application, False destroys the model
4146 :return: None or exception
4147 """
4148
4149 self.logger.debug(
4150 logging_text
4151 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4152 vca_index, vca_deployed, config_descriptor, destroy_ee
4153 )
4154 )
4155
4156 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4157
4158 # execute terminate_primitives
4159 if exec_primitives:
4160 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4161 config_descriptor.get("terminate-config-primitive"),
4162 vca_deployed.get("ee_descriptor_id"),
4163 )
4164 vdu_id = vca_deployed.get("vdu_id")
4165 vdu_count_index = vca_deployed.get("vdu_count_index")
4166 vdu_name = vca_deployed.get("vdu_name")
4167 vnf_index = vca_deployed.get("member-vnf-index")
4168 if terminate_primitives and vca_deployed.get("needed_terminate"):
4169 for seq in terminate_primitives:
4170 # For each sequence in list, get primitive and call _ns_execute_primitive()
4171 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4172 vnf_index, seq.get("name")
4173 )
4174 self.logger.debug(logging_text + step)
4175 # Create the primitive for each sequence, i.e. "primitive": "touch"
4176 primitive = seq.get("name")
4177 mapped_primitive_params = self._get_terminate_primitive_params(
4178 seq, vnf_index
4179 )
4180
4181 # Add sub-operation
4182 self._add_suboperation(
4183 db_nslcmop,
4184 vnf_index,
4185 vdu_id,
4186 vdu_count_index,
4187 vdu_name,
4188 primitive,
4189 mapped_primitive_params,
4190 )
4191 # Sub-operations: Call _ns_execute_primitive() instead of action()
4192 try:
4193 result, result_detail = await self._ns_execute_primitive(
4194 vca_deployed["ee_id"],
4195 primitive,
4196 mapped_primitive_params,
4197 vca_type=vca_type,
4198 vca_id=vca_id,
4199 )
4200 except LcmException:
4201 # this happens when VCA is not deployed. In this case it is not needed to terminate
4202 continue
4203 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4204 if result not in result_ok:
4205 raise LcmException(
4206 "terminate_primitive {} for vnf_member_index={} fails with "
4207 "error {}".format(seq.get("name"), vnf_index, result_detail)
4208 )
4209 # set that this VCA do not need terminated
4210 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4211 vca_index
4212 )
4213 self.update_db_2(
4214 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4215 )
4216
4217 # Delete Prometheus Jobs if any
4218 # This uses NSR_ID, so it will destroy any jobs under this index
4219 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4220
4221 if destroy_ee:
4222 await self.vca_map[vca_type].delete_execution_environment(
4223 vca_deployed["ee_id"],
4224 scaling_in=scaling_in,
4225 vca_type=vca_type,
4226 vca_id=vca_id,
4227 )
4228
4229 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4230 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4231 namespace = "." + db_nsr["_id"]
4232 try:
4233 await self.n2vc.delete_namespace(
4234 namespace=namespace,
4235 total_timeout=self.timeout_charm_delete,
4236 vca_id=vca_id,
4237 )
4238 except N2VCNotFound: # already deleted. Skip
4239 pass
4240 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4241
4242 async def _terminate_RO(
4243 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4244 ):
4245 """
4246 Terminates a deployment from RO
4247 :param logging_text:
4248 :param nsr_deployed: db_nsr._admin.deployed
4249 :param nsr_id:
4250 :param nslcmop_id:
4251 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4252 this method will update only the index 2, but it will write on database the concatenated content of the list
4253 :return:
4254 """
4255 db_nsr_update = {}
4256 failed_detail = []
4257 ro_nsr_id = ro_delete_action = None
4258 if nsr_deployed and nsr_deployed.get("RO"):
4259 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4260 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4261 try:
4262 if ro_nsr_id:
4263 stage[2] = "Deleting ns from VIM."
4264 db_nsr_update["detailed-status"] = " ".join(stage)
4265 self._write_op_status(nslcmop_id, stage)
4266 self.logger.debug(logging_text + stage[2])
4267 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4268 self._write_op_status(nslcmop_id, stage)
4269 desc = await self.RO.delete("ns", ro_nsr_id)
4270 ro_delete_action = desc["action_id"]
4271 db_nsr_update[
4272 "_admin.deployed.RO.nsr_delete_action_id"
4273 ] = ro_delete_action
4274 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4275 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4276 if ro_delete_action:
4277 # wait until NS is deleted from VIM
4278 stage[2] = "Waiting ns deleted from VIM."
4279 detailed_status_old = None
4280 self.logger.debug(
4281 logging_text
4282 + stage[2]
4283 + " RO_id={} ro_delete_action={}".format(
4284 ro_nsr_id, ro_delete_action
4285 )
4286 )
4287 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4288 self._write_op_status(nslcmop_id, stage)
4289
4290 delete_timeout = 20 * 60 # 20 minutes
4291 while delete_timeout > 0:
4292 desc = await self.RO.show(
4293 "ns",
4294 item_id_name=ro_nsr_id,
4295 extra_item="action",
4296 extra_item_id=ro_delete_action,
4297 )
4298
4299 # deploymentStatus
4300 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4301
4302 ns_status, ns_status_info = self.RO.check_action_status(desc)
4303 if ns_status == "ERROR":
4304 raise ROclient.ROClientException(ns_status_info)
4305 elif ns_status == "BUILD":
4306 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4307 elif ns_status == "ACTIVE":
4308 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4309 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4310 break
4311 else:
4312 assert (
4313 False
4314 ), "ROclient.check_action_status returns unknown {}".format(
4315 ns_status
4316 )
4317 if stage[2] != detailed_status_old:
4318 detailed_status_old = stage[2]
4319 db_nsr_update["detailed-status"] = " ".join(stage)
4320 self._write_op_status(nslcmop_id, stage)
4321 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4322 await asyncio.sleep(5, loop=self.loop)
4323 delete_timeout -= 5
4324 else: # delete_timeout <= 0:
4325 raise ROclient.ROClientException(
4326 "Timeout waiting ns deleted from VIM"
4327 )
4328
4329 except Exception as e:
4330 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4331 if (
4332 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4333 ): # not found
4334 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4335 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4336 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4337 self.logger.debug(
4338 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4339 )
4340 elif (
4341 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4342 ): # conflict
4343 failed_detail.append("delete conflict: {}".format(e))
4344 self.logger.debug(
4345 logging_text
4346 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4347 )
4348 else:
4349 failed_detail.append("delete error: {}".format(e))
4350 self.logger.error(
4351 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4352 )
4353
4354 # Delete nsd
4355 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4356 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4357 try:
4358 stage[2] = "Deleting nsd from RO."
4359 db_nsr_update["detailed-status"] = " ".join(stage)
4360 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4361 self._write_op_status(nslcmop_id, stage)
4362 await self.RO.delete("nsd", ro_nsd_id)
4363 self.logger.debug(
4364 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4365 )
4366 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4367 except Exception as e:
4368 if (
4369 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4370 ): # not found
4371 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4372 self.logger.debug(
4373 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4374 )
4375 elif (
4376 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4377 ): # conflict
4378 failed_detail.append(
4379 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4380 )
4381 self.logger.debug(logging_text + failed_detail[-1])
4382 else:
4383 failed_detail.append(
4384 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4385 )
4386 self.logger.error(logging_text + failed_detail[-1])
4387
4388 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4389 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4390 if not vnf_deployed or not vnf_deployed["id"]:
4391 continue
4392 try:
4393 ro_vnfd_id = vnf_deployed["id"]
4394 stage[
4395 2
4396 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4397 vnf_deployed["member-vnf-index"], ro_vnfd_id
4398 )
4399 db_nsr_update["detailed-status"] = " ".join(stage)
4400 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4401 self._write_op_status(nslcmop_id, stage)
4402 await self.RO.delete("vnfd", ro_vnfd_id)
4403 self.logger.debug(
4404 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4405 )
4406 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4407 except Exception as e:
4408 if (
4409 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4410 ): # not found
4411 db_nsr_update[
4412 "_admin.deployed.RO.vnfd.{}.id".format(index)
4413 ] = None
4414 self.logger.debug(
4415 logging_text
4416 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4417 )
4418 elif (
4419 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4420 ): # conflict
4421 failed_detail.append(
4422 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4423 )
4424 self.logger.debug(logging_text + failed_detail[-1])
4425 else:
4426 failed_detail.append(
4427 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4428 )
4429 self.logger.error(logging_text + failed_detail[-1])
4430
4431 if failed_detail:
4432 stage[2] = "Error deleting from VIM"
4433 else:
4434 stage[2] = "Deleted from VIM"
4435 db_nsr_update["detailed-status"] = " ".join(stage)
4436 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4437 self._write_op_status(nslcmop_id, stage)
4438
4439 if failed_detail:
4440 raise LcmException("; ".join(failed_detail))
4441
4442 async def terminate(self, nsr_id, nslcmop_id):
4443 # Try to lock HA task here
4444 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4445 if not task_is_locked_by_me:
4446 return
4447
4448 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4449 self.logger.debug(logging_text + "Enter")
4450 timeout_ns_terminate = self.timeout_ns_terminate
4451 db_nsr = None
4452 db_nslcmop = None
4453 operation_params = None
4454 exc = None
4455 error_list = [] # annotates all failed error messages
4456 db_nslcmop_update = {}
4457 autoremove = False # autoremove after terminated
4458 tasks_dict_info = {}
4459 db_nsr_update = {}
4460 stage = [
4461 "Stage 1/3: Preparing task.",
4462 "Waiting for previous operations to terminate.",
4463 "",
4464 ]
4465 # ^ contains [stage, step, VIM-status]
4466 try:
4467 # wait for any previous tasks in process
4468 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4469
4470 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4471 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4472 operation_params = db_nslcmop.get("operationParams") or {}
4473 if operation_params.get("timeout_ns_terminate"):
4474 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4475 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4476 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4477
4478 db_nsr_update["operational-status"] = "terminating"
4479 db_nsr_update["config-status"] = "terminating"
4480 self._write_ns_status(
4481 nsr_id=nsr_id,
4482 ns_state="TERMINATING",
4483 current_operation="TERMINATING",
4484 current_operation_id=nslcmop_id,
4485 other_update=db_nsr_update,
4486 )
4487 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4488 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4489 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4490 return
4491
4492 stage[1] = "Getting vnf descriptors from db."
4493 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4494 db_vnfrs_dict = {
4495 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4496 }
4497 db_vnfds_from_id = {}
4498 db_vnfds_from_member_index = {}
4499 # Loop over VNFRs
4500 for vnfr in db_vnfrs_list:
4501 vnfd_id = vnfr["vnfd-id"]
4502 if vnfd_id not in db_vnfds_from_id:
4503 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4504 db_vnfds_from_id[vnfd_id] = vnfd
4505 db_vnfds_from_member_index[
4506 vnfr["member-vnf-index-ref"]
4507 ] = db_vnfds_from_id[vnfd_id]
4508
4509 # Destroy individual execution environments when there are terminating primitives.
4510 # Rest of EE will be deleted at once
4511 # TODO - check before calling _destroy_N2VC
4512 # if not operation_params.get("skip_terminate_primitives"):#
4513 # or not vca.get("needed_terminate"):
4514 stage[0] = "Stage 2/3 execute terminating primitives."
4515 self.logger.debug(logging_text + stage[0])
4516 stage[1] = "Looking execution environment that needs terminate."
4517 self.logger.debug(logging_text + stage[1])
4518
4519 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4520 config_descriptor = None
4521 vca_member_vnf_index = vca.get("member-vnf-index")
4522 vca_id = self.get_vca_id(
4523 db_vnfrs_dict.get(vca_member_vnf_index)
4524 if vca_member_vnf_index
4525 else None,
4526 db_nsr,
4527 )
4528 if not vca or not vca.get("ee_id"):
4529 continue
4530 if not vca.get("member-vnf-index"):
4531 # ns
4532 config_descriptor = db_nsr.get("ns-configuration")
4533 elif vca.get("vdu_id"):
4534 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4535 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4536 elif vca.get("kdu_name"):
4537 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4538 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4539 else:
4540 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4541 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4542 vca_type = vca.get("type")
4543 exec_terminate_primitives = not operation_params.get(
4544 "skip_terminate_primitives"
4545 ) and vca.get("needed_terminate")
4546 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4547 # pending native charms
4548 destroy_ee = (
4549 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4550 )
4551 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4552 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4553 task = asyncio.ensure_future(
4554 self.destroy_N2VC(
4555 logging_text,
4556 db_nslcmop,
4557 vca,
4558 config_descriptor,
4559 vca_index,
4560 destroy_ee,
4561 exec_terminate_primitives,
4562 vca_id=vca_id,
4563 )
4564 )
4565 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4566
4567 # wait for pending tasks of terminate primitives
4568 if tasks_dict_info:
4569 self.logger.debug(
4570 logging_text
4571 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4572 )
4573 error_list = await self._wait_for_tasks(
4574 logging_text,
4575 tasks_dict_info,
4576 min(self.timeout_charm_delete, timeout_ns_terminate),
4577 stage,
4578 nslcmop_id,
4579 )
4580 tasks_dict_info.clear()
4581 if error_list:
4582 return # raise LcmException("; ".join(error_list))
4583
4584 # remove All execution environments at once
4585 stage[0] = "Stage 3/3 delete all."
4586
4587 if nsr_deployed.get("VCA"):
4588 stage[1] = "Deleting all execution environments."
4589 self.logger.debug(logging_text + stage[1])
4590 vca_id = self.get_vca_id({}, db_nsr)
4591 task_delete_ee = asyncio.ensure_future(
4592 asyncio.wait_for(
4593 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4594 timeout=self.timeout_charm_delete,
4595 )
4596 )
4597 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4598 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4599
4600 # Delete from k8scluster
4601 stage[1] = "Deleting KDUs."
4602 self.logger.debug(logging_text + stage[1])
4603 # print(nsr_deployed)
4604 for kdu in get_iterable(nsr_deployed, "K8s"):
4605 if not kdu or not kdu.get("kdu-instance"):
4606 continue
4607 kdu_instance = kdu.get("kdu-instance")
4608 if kdu.get("k8scluster-type") in self.k8scluster_map:
4609 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4610 vca_id = self.get_vca_id({}, db_nsr)
4611 task_delete_kdu_instance = asyncio.ensure_future(
4612 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4613 cluster_uuid=kdu.get("k8scluster-uuid"),
4614 kdu_instance=kdu_instance,
4615 vca_id=vca_id,
4616 namespace=kdu.get("namespace"),
4617 )
4618 )
4619 else:
4620 self.logger.error(
4621 logging_text
4622 + "Unknown k8s deployment type {}".format(
4623 kdu.get("k8scluster-type")
4624 )
4625 )
4626 continue
4627 tasks_dict_info[
4628 task_delete_kdu_instance
4629 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4630
4631 # remove from RO
4632 stage[1] = "Deleting ns from VIM."
4633 if self.ng_ro:
4634 task_delete_ro = asyncio.ensure_future(
4635 self._terminate_ng_ro(
4636 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4637 )
4638 )
4639 else:
4640 task_delete_ro = asyncio.ensure_future(
4641 self._terminate_RO(
4642 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4643 )
4644 )
4645 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4646
4647 # rest of staff will be done at finally
4648
4649 except (
4650 ROclient.ROClientException,
4651 DbException,
4652 LcmException,
4653 N2VCException,
4654 ) as e:
4655 self.logger.error(logging_text + "Exit Exception {}".format(e))
4656 exc = e
4657 except asyncio.CancelledError:
4658 self.logger.error(
4659 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4660 )
4661 exc = "Operation was cancelled"
4662 except Exception as e:
4663 exc = traceback.format_exc()
4664 self.logger.critical(
4665 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4666 exc_info=True,
4667 )
4668 finally:
4669 if exc:
4670 error_list.append(str(exc))
4671 try:
4672 # wait for pending tasks
4673 if tasks_dict_info:
4674 stage[1] = "Waiting for terminate pending tasks."
4675 self.logger.debug(logging_text + stage[1])
4676 error_list += await self._wait_for_tasks(
4677 logging_text,
4678 tasks_dict_info,
4679 timeout_ns_terminate,
4680 stage,
4681 nslcmop_id,
4682 )
4683 stage[1] = stage[2] = ""
4684 except asyncio.CancelledError:
4685 error_list.append("Cancelled")
4686 # TODO cancell all tasks
4687 except Exception as exc:
4688 error_list.append(str(exc))
4689 # update status at database
4690 if error_list:
4691 error_detail = "; ".join(error_list)
4692 # self.logger.error(logging_text + error_detail)
4693 error_description_nslcmop = "{} Detail: {}".format(
4694 stage[0], error_detail
4695 )
4696 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4697 nslcmop_id, stage[0]
4698 )
4699
4700 db_nsr_update["operational-status"] = "failed"
4701 db_nsr_update["detailed-status"] = (
4702 error_description_nsr + " Detail: " + error_detail
4703 )
4704 db_nslcmop_update["detailed-status"] = error_detail
4705 nslcmop_operation_state = "FAILED"
4706 ns_state = "BROKEN"
4707 else:
4708 error_detail = None
4709 error_description_nsr = error_description_nslcmop = None
4710 ns_state = "NOT_INSTANTIATED"
4711 db_nsr_update["operational-status"] = "terminated"
4712 db_nsr_update["detailed-status"] = "Done"
4713 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4714 db_nslcmop_update["detailed-status"] = "Done"
4715 nslcmop_operation_state = "COMPLETED"
4716
4717 if db_nsr:
4718 self._write_ns_status(
4719 nsr_id=nsr_id,
4720 ns_state=ns_state,
4721 current_operation="IDLE",
4722 current_operation_id=None,
4723 error_description=error_description_nsr,
4724 error_detail=error_detail,
4725 other_update=db_nsr_update,
4726 )
4727 self._write_op_status(
4728 op_id=nslcmop_id,
4729 stage="",
4730 error_message=error_description_nslcmop,
4731 operation_state=nslcmop_operation_state,
4732 other_update=db_nslcmop_update,
4733 )
4734 if ns_state == "NOT_INSTANTIATED":
4735 try:
4736 self.db.set_list(
4737 "vnfrs",
4738 {"nsr-id-ref": nsr_id},
4739 {"_admin.nsState": "NOT_INSTANTIATED"},
4740 )
4741 except DbException as e:
4742 self.logger.warn(
4743 logging_text
4744 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4745 nsr_id, e
4746 )
4747 )
4748 if operation_params:
4749 autoremove = operation_params.get("autoremove", False)
4750 if nslcmop_operation_state:
4751 try:
4752 await self.msg.aiowrite(
4753 "ns",
4754 "terminated",
4755 {
4756 "nsr_id": nsr_id,
4757 "nslcmop_id": nslcmop_id,
4758 "operationState": nslcmop_operation_state,
4759 "autoremove": autoremove,
4760 },
4761 loop=self.loop,
4762 )
4763 except Exception as e:
4764 self.logger.error(
4765 logging_text + "kafka_write notification Exception {}".format(e)
4766 )
4767
4768 self.logger.debug(logging_text + "Exit")
4769 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4770
4771 async def _wait_for_tasks(
4772 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4773 ):
4774 time_start = time()
4775 error_detail_list = []
4776 error_list = []
4777 pending_tasks = list(created_tasks_info.keys())
4778 num_tasks = len(pending_tasks)
4779 num_done = 0
4780 stage[1] = "{}/{}.".format(num_done, num_tasks)
4781 self._write_op_status(nslcmop_id, stage)
4782 while pending_tasks:
4783 new_error = None
4784 _timeout = timeout + time_start - time()
4785 done, pending_tasks = await asyncio.wait(
4786 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4787 )
4788 num_done += len(done)
4789 if not done: # Timeout
4790 for task in pending_tasks:
4791 new_error = created_tasks_info[task] + ": Timeout"
4792 error_detail_list.append(new_error)
4793 error_list.append(new_error)
4794 break
4795 for task in done:
4796 if task.cancelled():
4797 exc = "Cancelled"
4798 else:
4799 exc = task.exception()
4800 if exc:
4801 if isinstance(exc, asyncio.TimeoutError):
4802 exc = "Timeout"
4803 new_error = created_tasks_info[task] + ": {}".format(exc)
4804 error_list.append(created_tasks_info[task])
4805 error_detail_list.append(new_error)
4806 if isinstance(
4807 exc,
4808 (
4809 str,
4810 DbException,
4811 N2VCException,
4812 ROclient.ROClientException,
4813 LcmException,
4814 K8sException,
4815 NgRoException,
4816 ),
4817 ):
4818 self.logger.error(logging_text + new_error)
4819 else:
4820 exc_traceback = "".join(
4821 traceback.format_exception(None, exc, exc.__traceback__)
4822 )
4823 self.logger.error(
4824 logging_text
4825 + created_tasks_info[task]
4826 + " "
4827 + exc_traceback
4828 )
4829 else:
4830 self.logger.debug(
4831 logging_text + created_tasks_info[task] + ": Done"
4832 )
4833 stage[1] = "{}/{}.".format(num_done, num_tasks)
4834 if new_error:
4835 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4836 if nsr_id: # update also nsr
4837 self.update_db_2(
4838 "nsrs",
4839 nsr_id,
4840 {
4841 "errorDescription": "Error at: " + ", ".join(error_list),
4842 "errorDetail": ". ".join(error_detail_list),
4843 },
4844 )
4845 self._write_op_status(nslcmop_id, stage)
4846 return error_detail_list
4847
4848 @staticmethod
4849 def _map_primitive_params(primitive_desc, params, instantiation_params):
4850 """
4851 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4852 The default-value is used. If it is between < > it look for a value at instantiation_params
4853 :param primitive_desc: portion of VNFD/NSD that describes primitive
4854 :param params: Params provided by user
4855 :param instantiation_params: Instantiation params provided by user
4856 :return: a dictionary with the calculated params
4857 """
4858 calculated_params = {}
4859 for parameter in primitive_desc.get("parameter", ()):
4860 param_name = parameter["name"]
4861 if param_name in params:
4862 calculated_params[param_name] = params[param_name]
4863 elif "default-value" in parameter or "value" in parameter:
4864 if "value" in parameter:
4865 calculated_params[param_name] = parameter["value"]
4866 else:
4867 calculated_params[param_name] = parameter["default-value"]
4868 if (
4869 isinstance(calculated_params[param_name], str)
4870 and calculated_params[param_name].startswith("<")
4871 and calculated_params[param_name].endswith(">")
4872 ):
4873 if calculated_params[param_name][1:-1] in instantiation_params:
4874 calculated_params[param_name] = instantiation_params[
4875 calculated_params[param_name][1:-1]
4876 ]
4877 else:
4878 raise LcmException(
4879 "Parameter {} needed to execute primitive {} not provided".format(
4880 calculated_params[param_name], primitive_desc["name"]
4881 )
4882 )
4883 else:
4884 raise LcmException(
4885 "Parameter {} needed to execute primitive {} not provided".format(
4886 param_name, primitive_desc["name"]
4887 )
4888 )
4889
4890 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4891 calculated_params[param_name] = yaml.safe_dump(
4892 calculated_params[param_name], default_flow_style=True, width=256
4893 )
4894 elif isinstance(calculated_params[param_name], str) and calculated_params[
4895 param_name
4896 ].startswith("!!yaml "):
4897 calculated_params[param_name] = calculated_params[param_name][7:]
4898 if parameter.get("data-type") == "INTEGER":
4899 try:
4900 calculated_params[param_name] = int(calculated_params[param_name])
4901 except ValueError: # error converting string to int
4902 raise LcmException(
4903 "Parameter {} of primitive {} must be integer".format(
4904 param_name, primitive_desc["name"]
4905 )
4906 )
4907 elif parameter.get("data-type") == "BOOLEAN":
4908 calculated_params[param_name] = not (
4909 (str(calculated_params[param_name])).lower() == "false"
4910 )
4911
4912 # add always ns_config_info if primitive name is config
4913 if primitive_desc["name"] == "config":
4914 if "ns_config_info" in instantiation_params:
4915 calculated_params["ns_config_info"] = instantiation_params[
4916 "ns_config_info"
4917 ]
4918 return calculated_params
4919
4920 def _look_for_deployed_vca(
4921 self,
4922 deployed_vca,
4923 member_vnf_index,
4924 vdu_id,
4925 vdu_count_index,
4926 kdu_name=None,
4927 ee_descriptor_id=None,
4928 ):
4929 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4930 for vca in deployed_vca:
4931 if not vca:
4932 continue
4933 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4934 continue
4935 if (
4936 vdu_count_index is not None
4937 and vdu_count_index != vca["vdu_count_index"]
4938 ):
4939 continue
4940 if kdu_name and kdu_name != vca["kdu_name"]:
4941 continue
4942 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4943 continue
4944 break
4945 else:
4946 # vca_deployed not found
4947 raise LcmException(
4948 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4949 " is not deployed".format(
4950 member_vnf_index,
4951 vdu_id,
4952 vdu_count_index,
4953 kdu_name,
4954 ee_descriptor_id,
4955 )
4956 )
4957 # get ee_id
4958 ee_id = vca.get("ee_id")
4959 vca_type = vca.get(
4960 "type", "lxc_proxy_charm"
4961 ) # default value for backward compatibility - proxy charm
4962 if not ee_id:
4963 raise LcmException(
4964 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4965 "execution environment".format(
4966 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4967 )
4968 )
4969 return ee_id, vca_type
4970
4971 async def _ns_execute_primitive(
4972 self,
4973 ee_id,
4974 primitive,
4975 primitive_params,
4976 retries=0,
4977 retries_interval=30,
4978 timeout=None,
4979 vca_type=None,
4980 db_dict=None,
4981 vca_id: str = None,
4982 ) -> (str, str):
4983 try:
4984 if primitive == "config":
4985 primitive_params = {"params": primitive_params}
4986
4987 vca_type = vca_type or "lxc_proxy_charm"
4988
4989 while retries >= 0:
4990 try:
4991 output = await asyncio.wait_for(
4992 self.vca_map[vca_type].exec_primitive(
4993 ee_id=ee_id,
4994 primitive_name=primitive,
4995 params_dict=primitive_params,
4996 progress_timeout=self.timeout_progress_primitive,
4997 total_timeout=self.timeout_primitive,
4998 db_dict=db_dict,
4999 vca_id=vca_id,
5000 vca_type=vca_type,
5001 ),
5002 timeout=timeout or self.timeout_primitive,
5003 )
5004 # execution was OK
5005 break
5006 except asyncio.CancelledError:
5007 raise
5008 except Exception as e:
5009 retries -= 1
5010 if retries >= 0:
5011 self.logger.debug(
5012 "Error executing action {} on {} -> {}".format(
5013 primitive, ee_id, e
5014 )
5015 )
5016 # wait and retry
5017 await asyncio.sleep(retries_interval, loop=self.loop)
5018 else:
5019 if isinstance(e, asyncio.TimeoutError):
5020 e = N2VCException(
5021 message="Timed out waiting for action to complete"
5022 )
5023 return "FAILED", getattr(e, "message", repr(e))
5024
5025 return "COMPLETED", output
5026
5027 except (LcmException, asyncio.CancelledError):
5028 raise
5029 except Exception as e:
5030 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5031
5032 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5033 """
5034 Updating the vca_status with latest juju information in nsrs record
5035 :param: nsr_id: Id of the nsr
5036 :param: nslcmop_id: Id of the nslcmop
5037 :return: None
5038 """
5039
5040 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5041 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5042 vca_id = self.get_vca_id({}, db_nsr)
5043 if db_nsr["_admin"]["deployed"]["K8s"]:
5044 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5045 cluster_uuid, kdu_instance, cluster_type = (
5046 k8s["k8scluster-uuid"],
5047 k8s["kdu-instance"],
5048 k8s["k8scluster-type"],
5049 )
5050 await self._on_update_k8s_db(
5051 cluster_uuid=cluster_uuid,
5052 kdu_instance=kdu_instance,
5053 filter={"_id": nsr_id},
5054 vca_id=vca_id,
5055 cluster_type=cluster_type,
5056 )
5057 else:
5058 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5059 table, filter = "nsrs", {"_id": nsr_id}
5060 path = "_admin.deployed.VCA.{}.".format(vca_index)
5061 await self._on_update_n2vc_db(table, filter, path, {})
5062
5063 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5064 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5065
5066 async def action(self, nsr_id, nslcmop_id):
5067 # Try to lock HA task here
5068 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5069 if not task_is_locked_by_me:
5070 return
5071
5072 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5073 self.logger.debug(logging_text + "Enter")
5074 # get all needed from database
5075 db_nsr = None
5076 db_nslcmop = None
5077 db_nsr_update = {}
5078 db_nslcmop_update = {}
5079 nslcmop_operation_state = None
5080 error_description_nslcmop = None
5081 exc = None
5082 try:
5083 # wait for any previous tasks in process
5084 step = "Waiting for previous operations to terminate"
5085 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5086
5087 self._write_ns_status(
5088 nsr_id=nsr_id,
5089 ns_state=None,
5090 current_operation="RUNNING ACTION",
5091 current_operation_id=nslcmop_id,
5092 )
5093
5094 step = "Getting information from database"
5095 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5096 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5097 if db_nslcmop["operationParams"].get("primitive_params"):
5098 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5099 db_nslcmop["operationParams"]["primitive_params"]
5100 )
5101
5102 nsr_deployed = db_nsr["_admin"].get("deployed")
5103 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5104 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5105 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5106 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5107 primitive = db_nslcmop["operationParams"]["primitive"]
5108 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5109 timeout_ns_action = db_nslcmop["operationParams"].get(
5110 "timeout_ns_action", self.timeout_primitive
5111 )
5112
5113 if vnf_index:
5114 step = "Getting vnfr from database"
5115 db_vnfr = self.db.get_one(
5116 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5117 )
5118 if db_vnfr.get("kdur"):
5119 kdur_list = []
5120 for kdur in db_vnfr["kdur"]:
5121 if kdur.get("additionalParams"):
5122 kdur["additionalParams"] = json.loads(
5123 kdur["additionalParams"]
5124 )
5125 kdur_list.append(kdur)
5126 db_vnfr["kdur"] = kdur_list
5127 step = "Getting vnfd from database"
5128 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5129
5130 # Sync filesystem before running a primitive
5131 self.fs.sync(db_vnfr["vnfd-id"])
5132 else:
5133 step = "Getting nsd from database"
5134 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5135
5136 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5137 # for backward compatibility
5138 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5139 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5140 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5141 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5142
5143 # look for primitive
5144 config_primitive_desc = descriptor_configuration = None
5145 if vdu_id:
5146 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5147 elif kdu_name:
5148 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5149 elif vnf_index:
5150 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5151 else:
5152 descriptor_configuration = db_nsd.get("ns-configuration")
5153
5154 if descriptor_configuration and descriptor_configuration.get(
5155 "config-primitive"
5156 ):
5157 for config_primitive in descriptor_configuration["config-primitive"]:
5158 if config_primitive["name"] == primitive:
5159 config_primitive_desc = config_primitive
5160 break
5161
5162 if not config_primitive_desc:
5163 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5164 raise LcmException(
5165 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5166 primitive
5167 )
5168 )
5169 primitive_name = primitive
5170 ee_descriptor_id = None
5171 else:
5172 primitive_name = config_primitive_desc.get(
5173 "execution-environment-primitive", primitive
5174 )
5175 ee_descriptor_id = config_primitive_desc.get(
5176 "execution-environment-ref"
5177 )
5178
5179 if vnf_index:
5180 if vdu_id:
5181 vdur = next(
5182 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5183 )
5184 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5185 elif kdu_name:
5186 kdur = next(
5187 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5188 )
5189 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5190 else:
5191 desc_params = parse_yaml_strings(
5192 db_vnfr.get("additionalParamsForVnf")
5193 )
5194 else:
5195 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5196 if kdu_name and get_configuration(db_vnfd, kdu_name):
5197 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5198 actions = set()
5199 for primitive in kdu_configuration.get("initial-config-primitive", []):
5200 actions.add(primitive["name"])
5201 for primitive in kdu_configuration.get("config-primitive", []):
5202 actions.add(primitive["name"])
5203 kdu = find_in_list(
5204 nsr_deployed["K8s"],
5205 lambda kdu: kdu_name == kdu["kdu-name"]
5206 and kdu["member-vnf-index"] == vnf_index,
5207 )
5208 kdu_action = (
5209 True
5210 if primitive_name in actions
5211 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5212 else False
5213 )
5214
5215 # TODO check if ns is in a proper status
5216 if kdu_name and (
5217 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5218 ):
5219 # kdur and desc_params already set from before
5220 if primitive_params:
5221 desc_params.update(primitive_params)
5222 # TODO Check if we will need something at vnf level
5223 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5224 if (
5225 kdu_name == kdu["kdu-name"]
5226 and kdu["member-vnf-index"] == vnf_index
5227 ):
5228 break
5229 else:
5230 raise LcmException(
5231 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5232 )
5233
5234 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5235 msg = "unknown k8scluster-type '{}'".format(
5236 kdu.get("k8scluster-type")
5237 )
5238 raise LcmException(msg)
5239
5240 db_dict = {
5241 "collection": "nsrs",
5242 "filter": {"_id": nsr_id},
5243 "path": "_admin.deployed.K8s.{}".format(index),
5244 }
5245 self.logger.debug(
5246 logging_text
5247 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5248 )
5249 step = "Executing kdu {}".format(primitive_name)
5250 if primitive_name == "upgrade":
5251 if desc_params.get("kdu_model"):
5252 kdu_model = desc_params.get("kdu_model")
5253 del desc_params["kdu_model"]
5254 else:
5255 kdu_model = kdu.get("kdu-model")
5256 parts = kdu_model.split(sep=":")
5257 if len(parts) == 2:
5258 kdu_model = parts[0]
5259 if desc_params.get("kdu_atomic_upgrade"):
5260 atomic_upgrade = desc_params.get("kdu_atomic_upgrade").lower() in ("yes", "true", "1")
5261 del desc_params["kdu_atomic_upgrade"]
5262 else:
5263 atomic_upgrade = True
5264
5265 detailed_status = await asyncio.wait_for(
5266 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5267 cluster_uuid=kdu.get("k8scluster-uuid"),
5268 kdu_instance=kdu.get("kdu-instance"),
5269 atomic=atomic_upgrade,
5270 kdu_model=kdu_model,
5271 params=desc_params,
5272 db_dict=db_dict,
5273 timeout=timeout_ns_action,
5274 ),
5275 timeout=timeout_ns_action + 10,
5276 )
5277 self.logger.debug(
5278 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5279 )
5280 elif primitive_name == "rollback":
5281 detailed_status = await asyncio.wait_for(
5282 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5283 cluster_uuid=kdu.get("k8scluster-uuid"),
5284 kdu_instance=kdu.get("kdu-instance"),
5285 db_dict=db_dict,
5286 ),
5287 timeout=timeout_ns_action,
5288 )
5289 elif primitive_name == "status":
5290 detailed_status = await asyncio.wait_for(
5291 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5292 cluster_uuid=kdu.get("k8scluster-uuid"),
5293 kdu_instance=kdu.get("kdu-instance"),
5294 vca_id=vca_id,
5295 ),
5296 timeout=timeout_ns_action,
5297 )
5298 else:
5299 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5300 kdu["kdu-name"], nsr_id
5301 )
5302 params = self._map_primitive_params(
5303 config_primitive_desc, primitive_params, desc_params
5304 )
5305
5306 detailed_status = await asyncio.wait_for(
5307 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5308 cluster_uuid=kdu.get("k8scluster-uuid"),
5309 kdu_instance=kdu_instance,
5310 primitive_name=primitive_name,
5311 params=params,
5312 db_dict=db_dict,
5313 timeout=timeout_ns_action,
5314 vca_id=vca_id,
5315 ),
5316 timeout=timeout_ns_action,
5317 )
5318
5319 if detailed_status:
5320 nslcmop_operation_state = "COMPLETED"
5321 else:
5322 detailed_status = ""
5323 nslcmop_operation_state = "FAILED"
5324 else:
5325 ee_id, vca_type = self._look_for_deployed_vca(
5326 nsr_deployed["VCA"],
5327 member_vnf_index=vnf_index,
5328 vdu_id=vdu_id,
5329 vdu_count_index=vdu_count_index,
5330 ee_descriptor_id=ee_descriptor_id,
5331 )
5332 for vca_index, vca_deployed in enumerate(
5333 db_nsr["_admin"]["deployed"]["VCA"]
5334 ):
5335 if vca_deployed.get("member-vnf-index") == vnf_index:
5336 db_dict = {
5337 "collection": "nsrs",
5338 "filter": {"_id": nsr_id},
5339 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5340 }
5341 break
5342 (
5343 nslcmop_operation_state,
5344 detailed_status,
5345 ) = await self._ns_execute_primitive(
5346 ee_id,
5347 primitive=primitive_name,
5348 primitive_params=self._map_primitive_params(
5349 config_primitive_desc, primitive_params, desc_params
5350 ),
5351 timeout=timeout_ns_action,
5352 vca_type=vca_type,
5353 db_dict=db_dict,
5354 vca_id=vca_id,
5355 )
5356
5357 db_nslcmop_update["detailed-status"] = detailed_status
5358 error_description_nslcmop = (
5359 detailed_status if nslcmop_operation_state == "FAILED" else ""
5360 )
5361 self.logger.debug(
5362 logging_text
5363 + "Done with result {} {}".format(
5364 nslcmop_operation_state, detailed_status
5365 )
5366 )
5367 return # database update is called inside finally
5368
5369 except (DbException, LcmException, N2VCException, K8sException) as e:
5370 self.logger.error(logging_text + "Exit Exception {}".format(e))
5371 exc = e
5372 except asyncio.CancelledError:
5373 self.logger.error(
5374 logging_text + "Cancelled Exception while '{}'".format(step)
5375 )
5376 exc = "Operation was cancelled"
5377 except asyncio.TimeoutError:
5378 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5379 exc = "Timeout"
5380 except Exception as e:
5381 exc = traceback.format_exc()
5382 self.logger.critical(
5383 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5384 exc_info=True,
5385 )
5386 finally:
5387 if exc:
5388 db_nslcmop_update[
5389 "detailed-status"
5390 ] = (
5391 detailed_status
5392 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5393 nslcmop_operation_state = "FAILED"
5394 if db_nsr:
5395 self._write_ns_status(
5396 nsr_id=nsr_id,
5397 ns_state=db_nsr[
5398 "nsState"
5399 ], # TODO check if degraded. For the moment use previous status
5400 current_operation="IDLE",
5401 current_operation_id=None,
5402 # error_description=error_description_nsr,
5403 # error_detail=error_detail,
5404 other_update=db_nsr_update,
5405 )
5406
5407 self._write_op_status(
5408 op_id=nslcmop_id,
5409 stage="",
5410 error_message=error_description_nslcmop,
5411 operation_state=nslcmop_operation_state,
5412 other_update=db_nslcmop_update,
5413 )
5414
5415 if nslcmop_operation_state:
5416 try:
5417 await self.msg.aiowrite(
5418 "ns",
5419 "actioned",
5420 {
5421 "nsr_id": nsr_id,
5422 "nslcmop_id": nslcmop_id,
5423 "operationState": nslcmop_operation_state,
5424 },
5425 loop=self.loop,
5426 )
5427 except Exception as e:
5428 self.logger.error(
5429 logging_text + "kafka_write notification Exception {}".format(e)
5430 )
5431 self.logger.debug(logging_text + "Exit")
5432 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5433 return nslcmop_operation_state, detailed_status
5434
5435 async def terminate_vdus(
5436 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5437 ):
5438 """This method terminates VDUs
5439
5440 Args:
5441 db_vnfr: VNF instance record
5442 member_vnf_index: VNF index to identify the VDUs to be removed
5443 db_nsr: NS instance record
5444 update_db_nslcmops: Nslcmop update record
5445 """
5446 vca_scaling_info = []
5447 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5448 scaling_info["scaling_direction"] = "IN"
5449 scaling_info["vdu-delete"] = {}
5450 scaling_info["kdu-delete"] = {}
5451 db_vdur = db_vnfr.get("vdur")
5452 vdur_list = copy(db_vdur)
5453 count_index = 0
5454 for index, vdu in enumerate(vdur_list):
5455 vca_scaling_info.append(
5456 {
5457 "osm_vdu_id": vdu["vdu-id-ref"],
5458 "member-vnf-index": member_vnf_index,
5459 "type": "delete",
5460 "vdu_index": count_index,
5461 }
5462 )
5463 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5464 scaling_info["vdu"].append(
5465 {
5466 "name": vdu.get("name") or vdu.get("vdu-name"),
5467 "vdu_id": vdu["vdu-id-ref"],
5468 "interface": [],
5469 }
5470 )
5471 for interface in vdu["interfaces"]:
5472 scaling_info["vdu"][index]["interface"].append(
5473 {
5474 "name": interface["name"],
5475 "ip_address": interface["ip-address"],
5476 "mac_address": interface.get("mac-address"),
5477 }
5478 )
5479 self.logger.info("NS update scaling info{}".format(scaling_info))
5480 stage[2] = "Terminating VDUs"
5481 if scaling_info.get("vdu-delete"):
5482 # scale_process = "RO"
5483 if self.ro_config.get("ng"):
5484 await self._scale_ng_ro(
5485 logging_text,
5486 db_nsr,
5487 update_db_nslcmops,
5488 db_vnfr,
5489 scaling_info,
5490 stage,
5491 )
5492
5493 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5494 """This method is to Remove VNF instances from NS.
5495
5496 Args:
5497 nsr_id: NS instance id
5498 nslcmop_id: nslcmop id of update
5499 vnf_instance_id: id of the VNF instance to be removed
5500
5501 Returns:
5502 result: (str, str) COMPLETED/FAILED, details
5503 """
5504 try:
5505 db_nsr_update = {}
5506 logging_text = "Task ns={} update ".format(nsr_id)
5507 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5508 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5509 if check_vnfr_count > 1:
5510 stage = ["", "", ""]
5511 step = "Getting nslcmop from database"
5512 self.logger.debug(
5513 step + " after having waited for previous tasks to be completed"
5514 )
5515 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5516 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5517 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5518 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5519 """ db_vnfr = self.db.get_one(
5520 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5521
5522 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5523 await self.terminate_vdus(
5524 db_vnfr,
5525 member_vnf_index,
5526 db_nsr,
5527 update_db_nslcmops,
5528 stage,
5529 logging_text,
5530 )
5531
5532 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5533 constituent_vnfr.remove(db_vnfr.get("_id"))
5534 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5535 "constituent-vnfr-ref"
5536 )
5537 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5538 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5539 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5540 return "COMPLETED", "Done"
5541 else:
5542 step = "Terminate VNF Failed with"
5543 raise LcmException(
5544 "{} Cannot terminate the last VNF in this NS.".format(
5545 vnf_instance_id
5546 )
5547 )
5548 except (LcmException, asyncio.CancelledError):
5549 raise
5550 except Exception as e:
5551 self.logger.debug("Error removing VNF {}".format(e))
5552 return "FAILED", "Error removing VNF {}".format(e)
5553
5554 async def _ns_redeploy_vnf(
5555 self,
5556 nsr_id,
5557 nslcmop_id,
5558 db_vnfd,
5559 db_vnfr,
5560 db_nsr,
5561 ):
5562 """This method updates and redeploys VNF instances
5563
5564 Args:
5565 nsr_id: NS instance id
5566 nslcmop_id: nslcmop id
5567 db_vnfd: VNF descriptor
5568 db_vnfr: VNF instance record
5569 db_nsr: NS instance record
5570
5571 Returns:
5572 result: (str, str) COMPLETED/FAILED, details
5573 """
5574 try:
5575 count_index = 0
5576 stage = ["", "", ""]
5577 logging_text = "Task ns={} update ".format(nsr_id)
5578 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5579 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5580
5581 # Terminate old VNF resources
5582 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5583 await self.terminate_vdus(
5584 db_vnfr,
5585 member_vnf_index,
5586 db_nsr,
5587 update_db_nslcmops,
5588 stage,
5589 logging_text,
5590 )
5591
5592 # old_vnfd_id = db_vnfr["vnfd-id"]
5593 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5594 new_db_vnfd = db_vnfd
5595 # new_vnfd_ref = new_db_vnfd["id"]
5596 # new_vnfd_id = vnfd_id
5597
5598 # Create VDUR
5599 new_vnfr_cp = []
5600 for cp in new_db_vnfd.get("ext-cpd", ()):
5601 vnf_cp = {
5602 "name": cp.get("id"),
5603 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5604 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5605 "id": cp.get("id"),
5606 }
5607 new_vnfr_cp.append(vnf_cp)
5608 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5609 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5610 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5611 new_vnfr_update = {
5612 "revision": latest_vnfd_revision,
5613 "connection-point": new_vnfr_cp,
5614 "vdur": new_vdur,
5615 "ip-address": "",
5616 }
5617 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5618 updated_db_vnfr = self.db.get_one(
5619 "vnfrs",
5620 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5621 )
5622
5623 # Instantiate new VNF resources
5624 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5625 vca_scaling_info = []
5626 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5627 scaling_info["scaling_direction"] = "OUT"
5628 scaling_info["vdu-create"] = {}
5629 scaling_info["kdu-create"] = {}
5630 vdud_instantiate_list = db_vnfd["vdu"]
5631 for index, vdud in enumerate(vdud_instantiate_list):
5632 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5633 if cloud_init_text:
5634 additional_params = (
5635 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5636 or {}
5637 )
5638 cloud_init_list = []
5639 if cloud_init_text:
5640 # TODO Information of its own ip is not available because db_vnfr is not updated.
5641 additional_params["OSM"] = get_osm_params(
5642 updated_db_vnfr, vdud["id"], 1
5643 )
5644 cloud_init_list.append(
5645 self._parse_cloud_init(
5646 cloud_init_text,
5647 additional_params,
5648 db_vnfd["id"],
5649 vdud["id"],
5650 )
5651 )
5652 vca_scaling_info.append(
5653 {
5654 "osm_vdu_id": vdud["id"],
5655 "member-vnf-index": member_vnf_index,
5656 "type": "create",
5657 "vdu_index": count_index,
5658 }
5659 )
5660 scaling_info["vdu-create"][vdud["id"]] = count_index
5661 if self.ro_config.get("ng"):
5662 self.logger.debug(
5663 "New Resources to be deployed: {}".format(scaling_info)
5664 )
5665 await self._scale_ng_ro(
5666 logging_text,
5667 db_nsr,
5668 update_db_nslcmops,
5669 updated_db_vnfr,
5670 scaling_info,
5671 stage,
5672 )
5673 return "COMPLETED", "Done"
5674 except (LcmException, asyncio.CancelledError):
5675 raise
5676 except Exception as e:
5677 self.logger.debug("Error updating VNF {}".format(e))
5678 return "FAILED", "Error updating VNF {}".format(e)
5679
5680 async def _ns_charm_upgrade(
5681 self,
5682 ee_id,
5683 charm_id,
5684 charm_type,
5685 path,
5686 timeout: float = None,
5687 ) -> (str, str):
5688 """This method upgrade charms in VNF instances
5689
5690 Args:
5691 ee_id: Execution environment id
5692 path: Local path to the charm
5693 charm_id: charm-id
5694 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5695 timeout: (Float) Timeout for the ns update operation
5696
5697 Returns:
5698 result: (str, str) COMPLETED/FAILED, details
5699 """
5700 try:
5701 charm_type = charm_type or "lxc_proxy_charm"
5702 output = await self.vca_map[charm_type].upgrade_charm(
5703 ee_id=ee_id,
5704 path=path,
5705 charm_id=charm_id,
5706 charm_type=charm_type,
5707 timeout=timeout or self.timeout_ns_update,
5708 )
5709
5710 if output:
5711 return "COMPLETED", output
5712
5713 except (LcmException, asyncio.CancelledError):
5714 raise
5715
5716 except Exception as e:
5717
5718 self.logger.debug("Error upgrading charm {}".format(path))
5719
5720 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5721
5722 async def update(self, nsr_id, nslcmop_id):
5723 """Update NS according to different update types
5724
5725 This method performs upgrade of VNF instances then updates the revision
5726 number in VNF record
5727
5728 Args:
5729 nsr_id: Network service will be updated
5730 nslcmop_id: ns lcm operation id
5731
5732 Returns:
5733 It may raise DbException, LcmException, N2VCException, K8sException
5734
5735 """
5736 # Try to lock HA task here
5737 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5738 if not task_is_locked_by_me:
5739 return
5740
5741 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5742 self.logger.debug(logging_text + "Enter")
5743
5744 # Set the required variables to be filled up later
5745 db_nsr = None
5746 db_nslcmop_update = {}
5747 vnfr_update = {}
5748 nslcmop_operation_state = None
5749 db_nsr_update = {}
5750 error_description_nslcmop = ""
5751 exc = None
5752 change_type = "updated"
5753 detailed_status = ""
5754
5755 try:
5756 # wait for any previous tasks in process
5757 step = "Waiting for previous operations to terminate"
5758 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5759 self._write_ns_status(
5760 nsr_id=nsr_id,
5761 ns_state=None,
5762 current_operation="UPDATING",
5763 current_operation_id=nslcmop_id,
5764 )
5765
5766 step = "Getting nslcmop from database"
5767 db_nslcmop = self.db.get_one(
5768 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5769 )
5770 update_type = db_nslcmop["operationParams"]["updateType"]
5771
5772 step = "Getting nsr from database"
5773 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5774 old_operational_status = db_nsr["operational-status"]
5775 db_nsr_update["operational-status"] = "updating"
5776 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5777 nsr_deployed = db_nsr["_admin"].get("deployed")
5778
5779 if update_type == "CHANGE_VNFPKG":
5780
5781 # Get the input parameters given through update request
5782 vnf_instance_id = db_nslcmop["operationParams"][
5783 "changeVnfPackageData"
5784 ].get("vnfInstanceId")
5785
5786 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5787 "vnfdId"
5788 )
5789 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5790
5791 step = "Getting vnfr from database"
5792 db_vnfr = self.db.get_one(
5793 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5794 )
5795
5796 step = "Getting vnfds from database"
5797 # Latest VNFD
5798 latest_vnfd = self.db.get_one(
5799 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5800 )
5801 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5802
5803 # Current VNFD
5804 current_vnf_revision = db_vnfr.get("revision", 1)
5805 current_vnfd = self.db.get_one(
5806 "vnfds_revisions",
5807 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5808 fail_on_empty=False,
5809 )
5810 # Charm artifact paths will be filled up later
5811 (
5812 current_charm_artifact_path,
5813 target_charm_artifact_path,
5814 charm_artifact_paths,
5815 ) = ([], [], [])
5816
5817 step = "Checking if revision has changed in VNFD"
5818 if current_vnf_revision != latest_vnfd_revision:
5819
5820 change_type = "policy_updated"
5821
5822 # There is new revision of VNFD, update operation is required
5823 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5824 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5825
5826 step = "Removing the VNFD packages if they exist in the local path"
5827 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5828 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5829
5830 step = "Get the VNFD packages from FSMongo"
5831 self.fs.sync(from_path=latest_vnfd_path)
5832 self.fs.sync(from_path=current_vnfd_path)
5833
5834 step = (
5835 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5836 )
5837 base_folder = latest_vnfd["_admin"]["storage"]
5838
5839 for charm_index, charm_deployed in enumerate(
5840 get_iterable(nsr_deployed, "VCA")
5841 ):
5842 vnf_index = db_vnfr.get("member-vnf-index-ref")
5843
5844 # Getting charm-id and charm-type
5845 if charm_deployed.get("member-vnf-index") == vnf_index:
5846 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5847 charm_type = charm_deployed.get("type")
5848
5849 # Getting ee-id
5850 ee_id = charm_deployed.get("ee_id")
5851
5852 step = "Getting descriptor config"
5853 descriptor_config = get_configuration(
5854 current_vnfd, current_vnfd["id"]
5855 )
5856
5857 if "execution-environment-list" in descriptor_config:
5858 ee_list = descriptor_config.get(
5859 "execution-environment-list", []
5860 )
5861 else:
5862 ee_list = []
5863
5864 # There could be several charm used in the same VNF
5865 for ee_item in ee_list:
5866 if ee_item.get("juju"):
5867
5868 step = "Getting charm name"
5869 charm_name = ee_item["juju"].get("charm")
5870
5871 step = "Setting Charm artifact paths"
5872 current_charm_artifact_path.append(
5873 get_charm_artifact_path(
5874 base_folder,
5875 charm_name,
5876 charm_type,
5877 current_vnf_revision,
5878 )
5879 )
5880 target_charm_artifact_path.append(
5881 get_charm_artifact_path(
5882 base_folder,
5883 charm_name,
5884 charm_type,
5885 latest_vnfd_revision,
5886 )
5887 )
5888
5889 charm_artifact_paths = zip(
5890 current_charm_artifact_path, target_charm_artifact_path
5891 )
5892
5893 step = "Checking if software version has changed in VNFD"
5894 if find_software_version(current_vnfd) != find_software_version(
5895 latest_vnfd
5896 ):
5897
5898 step = "Checking if existing VNF has charm"
5899 for current_charm_path, target_charm_path in list(
5900 charm_artifact_paths
5901 ):
5902 if current_charm_path:
5903 raise LcmException(
5904 "Software version change is not supported as VNF instance {} has charm.".format(
5905 vnf_instance_id
5906 )
5907 )
5908
5909 # There is no change in the charm package, then redeploy the VNF
5910 # based on new descriptor
5911 step = "Redeploying VNF"
5912 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5913 (result, detailed_status) = await self._ns_redeploy_vnf(
5914 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5915 )
5916 if result == "FAILED":
5917 nslcmop_operation_state = result
5918 error_description_nslcmop = detailed_status
5919 db_nslcmop_update["detailed-status"] = detailed_status
5920 self.logger.debug(
5921 logging_text
5922 + " step {} Done with result {} {}".format(
5923 step, nslcmop_operation_state, detailed_status
5924 )
5925 )
5926
5927 else:
5928 step = "Checking if any charm package has changed or not"
5929 for current_charm_path, target_charm_path in list(
5930 charm_artifact_paths
5931 ):
5932 if (
5933 current_charm_path
5934 and target_charm_path
5935 and self.check_charm_hash_changed(
5936 current_charm_path, target_charm_path
5937 )
5938 ):
5939
5940 step = "Checking whether VNF uses juju bundle"
5941 if check_juju_bundle_existence(current_vnfd):
5942
5943 raise LcmException(
5944 "Charm upgrade is not supported for the instance which"
5945 " uses juju-bundle: {}".format(
5946 check_juju_bundle_existence(current_vnfd)
5947 )
5948 )
5949
5950 step = "Upgrading Charm"
5951 (
5952 result,
5953 detailed_status,
5954 ) = await self._ns_charm_upgrade(
5955 ee_id=ee_id,
5956 charm_id=charm_id,
5957 charm_type=charm_type,
5958 path=self.fs.path + target_charm_path,
5959 timeout=timeout_seconds,
5960 )
5961
5962 if result == "FAILED":
5963 nslcmop_operation_state = result
5964 error_description_nslcmop = detailed_status
5965
5966 db_nslcmop_update["detailed-status"] = detailed_status
5967 self.logger.debug(
5968 logging_text
5969 + " step {} Done with result {} {}".format(
5970 step, nslcmop_operation_state, detailed_status
5971 )
5972 )
5973
5974 step = "Updating policies"
5975 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5976 result = "COMPLETED"
5977 detailed_status = "Done"
5978 db_nslcmop_update["detailed-status"] = "Done"
5979
5980 # If nslcmop_operation_state is None, so any operation is not failed.
5981 if not nslcmop_operation_state:
5982 nslcmop_operation_state = "COMPLETED"
5983
5984 # If update CHANGE_VNFPKG nslcmop_operation is successful
5985 # vnf revision need to be updated
5986 vnfr_update["revision"] = latest_vnfd_revision
5987 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5988
5989 self.logger.debug(
5990 logging_text
5991 + " task Done with result {} {}".format(
5992 nslcmop_operation_state, detailed_status
5993 )
5994 )
5995 elif update_type == "REMOVE_VNF":
5996 # This part is included in https://osm.etsi.org/gerrit/11876
5997 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5998 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5999 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6000 step = "Removing VNF"
6001 (result, detailed_status) = await self.remove_vnf(
6002 nsr_id, nslcmop_id, vnf_instance_id
6003 )
6004 if result == "FAILED":
6005 nslcmop_operation_state = result
6006 error_description_nslcmop = detailed_status
6007 db_nslcmop_update["detailed-status"] = detailed_status
6008 change_type = "vnf_terminated"
6009 if not nslcmop_operation_state:
6010 nslcmop_operation_state = "COMPLETED"
6011 self.logger.debug(
6012 logging_text
6013 + " task Done with result {} {}".format(
6014 nslcmop_operation_state, detailed_status
6015 )
6016 )
6017
6018 elif update_type == "OPERATE_VNF":
6019 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6020 "vnfInstanceId"
6021 ]
6022 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6023 "changeStateTo"
6024 ]
6025 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6026 "additionalParam"
6027 ]
6028 (result, detailed_status) = await self.rebuild_start_stop(
6029 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6030 )
6031 if result == "FAILED":
6032 nslcmop_operation_state = result
6033 error_description_nslcmop = detailed_status
6034 db_nslcmop_update["detailed-status"] = detailed_status
6035 if not nslcmop_operation_state:
6036 nslcmop_operation_state = "COMPLETED"
6037 self.logger.debug(
6038 logging_text
6039 + " task Done with result {} {}".format(
6040 nslcmop_operation_state, detailed_status
6041 )
6042 )
6043
6044 # If nslcmop_operation_state is None, so any operation is not failed.
6045 # All operations are executed in overall.
6046 if not nslcmop_operation_state:
6047 nslcmop_operation_state = "COMPLETED"
6048 db_nsr_update["operational-status"] = old_operational_status
6049
6050 except (DbException, LcmException, N2VCException, K8sException) as e:
6051 self.logger.error(logging_text + "Exit Exception {}".format(e))
6052 exc = e
6053 except asyncio.CancelledError:
6054 self.logger.error(
6055 logging_text + "Cancelled Exception while '{}'".format(step)
6056 )
6057 exc = "Operation was cancelled"
6058 except asyncio.TimeoutError:
6059 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6060 exc = "Timeout"
6061 except Exception as e:
6062 exc = traceback.format_exc()
6063 self.logger.critical(
6064 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6065 exc_info=True,
6066 )
6067 finally:
6068 if exc:
6069 db_nslcmop_update[
6070 "detailed-status"
6071 ] = (
6072 detailed_status
6073 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6074 nslcmop_operation_state = "FAILED"
6075 db_nsr_update["operational-status"] = old_operational_status
6076 if db_nsr:
6077 self._write_ns_status(
6078 nsr_id=nsr_id,
6079 ns_state=db_nsr["nsState"],
6080 current_operation="IDLE",
6081 current_operation_id=None,
6082 other_update=db_nsr_update,
6083 )
6084
6085 self._write_op_status(
6086 op_id=nslcmop_id,
6087 stage="",
6088 error_message=error_description_nslcmop,
6089 operation_state=nslcmop_operation_state,
6090 other_update=db_nslcmop_update,
6091 )
6092
6093 if nslcmop_operation_state:
6094 try:
6095 msg = {
6096 "nsr_id": nsr_id,
6097 "nslcmop_id": nslcmop_id,
6098 "operationState": nslcmop_operation_state,
6099 }
6100 if change_type in ("vnf_terminated", "policy_updated"):
6101 msg.update({"vnf_member_index": member_vnf_index})
6102 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6103 except Exception as e:
6104 self.logger.error(
6105 logging_text + "kafka_write notification Exception {}".format(e)
6106 )
6107 self.logger.debug(logging_text + "Exit")
6108 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6109 return nslcmop_operation_state, detailed_status
6110
6111 async def scale(self, nsr_id, nslcmop_id):
6112 # Try to lock HA task here
6113 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6114 if not task_is_locked_by_me:
6115 return
6116
6117 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6118 stage = ["", "", ""]
6119 tasks_dict_info = {}
6120 # ^ stage, step, VIM progress
6121 self.logger.debug(logging_text + "Enter")
6122 # get all needed from database
6123 db_nsr = None
6124 db_nslcmop_update = {}
6125 db_nsr_update = {}
6126 exc = None
6127 # in case of error, indicates what part of scale was failed to put nsr at error status
6128 scale_process = None
6129 old_operational_status = ""
6130 old_config_status = ""
6131 nsi_id = None
6132 try:
6133 # wait for any previous tasks in process
6134 step = "Waiting for previous operations to terminate"
6135 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6136 self._write_ns_status(
6137 nsr_id=nsr_id,
6138 ns_state=None,
6139 current_operation="SCALING",
6140 current_operation_id=nslcmop_id,
6141 )
6142
6143 step = "Getting nslcmop from database"
6144 self.logger.debug(
6145 step + " after having waited for previous tasks to be completed"
6146 )
6147 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6148
6149 step = "Getting nsr from database"
6150 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6151 old_operational_status = db_nsr["operational-status"]
6152 old_config_status = db_nsr["config-status"]
6153
6154 step = "Parsing scaling parameters"
6155 db_nsr_update["operational-status"] = "scaling"
6156 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6157 nsr_deployed = db_nsr["_admin"].get("deployed")
6158
6159 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6160 "scaleByStepData"
6161 ]["member-vnf-index"]
6162 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6163 "scaleByStepData"
6164 ]["scaling-group-descriptor"]
6165 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6166 # for backward compatibility
6167 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6168 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6169 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6170 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6171
6172 step = "Getting vnfr from database"
6173 db_vnfr = self.db.get_one(
6174 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6175 )
6176
6177 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6178
6179 step = "Getting vnfd from database"
6180 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6181
6182 base_folder = db_vnfd["_admin"]["storage"]
6183
6184 step = "Getting scaling-group-descriptor"
6185 scaling_descriptor = find_in_list(
6186 get_scaling_aspect(db_vnfd),
6187 lambda scale_desc: scale_desc["name"] == scaling_group,
6188 )
6189 if not scaling_descriptor:
6190 raise LcmException(
6191 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6192 "at vnfd:scaling-group-descriptor".format(scaling_group)
6193 )
6194
6195 step = "Sending scale order to VIM"
6196 # TODO check if ns is in a proper status
6197 nb_scale_op = 0
6198 if not db_nsr["_admin"].get("scaling-group"):
6199 self.update_db_2(
6200 "nsrs",
6201 nsr_id,
6202 {
6203 "_admin.scaling-group": [
6204 {"name": scaling_group, "nb-scale-op": 0}
6205 ]
6206 },
6207 )
6208 admin_scale_index = 0
6209 else:
6210 for admin_scale_index, admin_scale_info in enumerate(
6211 db_nsr["_admin"]["scaling-group"]
6212 ):
6213 if admin_scale_info["name"] == scaling_group:
6214 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6215 break
6216 else: # not found, set index one plus last element and add new entry with the name
6217 admin_scale_index += 1
6218 db_nsr_update[
6219 "_admin.scaling-group.{}.name".format(admin_scale_index)
6220 ] = scaling_group
6221
6222 vca_scaling_info = []
6223 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6224 if scaling_type == "SCALE_OUT":
6225 if "aspect-delta-details" not in scaling_descriptor:
6226 raise LcmException(
6227 "Aspect delta details not fount in scaling descriptor {}".format(
6228 scaling_descriptor["name"]
6229 )
6230 )
6231 # count if max-instance-count is reached
6232 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6233
6234 scaling_info["scaling_direction"] = "OUT"
6235 scaling_info["vdu-create"] = {}
6236 scaling_info["kdu-create"] = {}
6237 for delta in deltas:
6238 for vdu_delta in delta.get("vdu-delta", {}):
6239 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6240 # vdu_index also provides the number of instance of the targeted vdu
6241 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6242 cloud_init_text = self._get_vdu_cloud_init_content(
6243 vdud, db_vnfd
6244 )
6245 if cloud_init_text:
6246 additional_params = (
6247 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6248 or {}
6249 )
6250 cloud_init_list = []
6251
6252 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6253 max_instance_count = 10
6254 if vdu_profile and "max-number-of-instances" in vdu_profile:
6255 max_instance_count = vdu_profile.get(
6256 "max-number-of-instances", 10
6257 )
6258
6259 default_instance_num = get_number_of_instances(
6260 db_vnfd, vdud["id"]
6261 )
6262 instances_number = vdu_delta.get("number-of-instances", 1)
6263 nb_scale_op += instances_number
6264
6265 new_instance_count = nb_scale_op + default_instance_num
6266 # Control if new count is over max and vdu count is less than max.
6267 # Then assign new instance count
6268 if new_instance_count > max_instance_count > vdu_count:
6269 instances_number = new_instance_count - max_instance_count
6270 else:
6271 instances_number = instances_number
6272
6273 if new_instance_count > max_instance_count:
6274 raise LcmException(
6275 "reached the limit of {} (max-instance-count) "
6276 "scaling-out operations for the "
6277 "scaling-group-descriptor '{}'".format(
6278 nb_scale_op, scaling_group
6279 )
6280 )
6281 for x in range(vdu_delta.get("number-of-instances", 1)):
6282 if cloud_init_text:
6283 # TODO Information of its own ip is not available because db_vnfr is not updated.
6284 additional_params["OSM"] = get_osm_params(
6285 db_vnfr, vdu_delta["id"], vdu_index + x
6286 )
6287 cloud_init_list.append(
6288 self._parse_cloud_init(
6289 cloud_init_text,
6290 additional_params,
6291 db_vnfd["id"],
6292 vdud["id"],
6293 )
6294 )
6295 vca_scaling_info.append(
6296 {
6297 "osm_vdu_id": vdu_delta["id"],
6298 "member-vnf-index": vnf_index,
6299 "type": "create",
6300 "vdu_index": vdu_index + x,
6301 }
6302 )
6303 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6304 for kdu_delta in delta.get("kdu-resource-delta", {}):
6305 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6306 kdu_name = kdu_profile["kdu-name"]
6307 resource_name = kdu_profile.get("resource-name", "")
6308
6309 # Might have different kdus in the same delta
6310 # Should have list for each kdu
6311 if not scaling_info["kdu-create"].get(kdu_name, None):
6312 scaling_info["kdu-create"][kdu_name] = []
6313
6314 kdur = get_kdur(db_vnfr, kdu_name)
6315 if kdur.get("helm-chart"):
6316 k8s_cluster_type = "helm-chart-v3"
6317 self.logger.debug("kdur: {}".format(kdur))
6318 if (
6319 kdur.get("helm-version")
6320 and kdur.get("helm-version") == "v2"
6321 ):
6322 k8s_cluster_type = "helm-chart"
6323 elif kdur.get("juju-bundle"):
6324 k8s_cluster_type = "juju-bundle"
6325 else:
6326 raise LcmException(
6327 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6328 "juju-bundle. Maybe an old NBI version is running".format(
6329 db_vnfr["member-vnf-index-ref"], kdu_name
6330 )
6331 )
6332
6333 max_instance_count = 10
6334 if kdu_profile and "max-number-of-instances" in kdu_profile:
6335 max_instance_count = kdu_profile.get(
6336 "max-number-of-instances", 10
6337 )
6338
6339 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6340 deployed_kdu, _ = get_deployed_kdu(
6341 nsr_deployed, kdu_name, vnf_index
6342 )
6343 if deployed_kdu is None:
6344 raise LcmException(
6345 "KDU '{}' for vnf '{}' not deployed".format(
6346 kdu_name, vnf_index
6347 )
6348 )
6349 kdu_instance = deployed_kdu.get("kdu-instance")
6350 instance_num = await self.k8scluster_map[
6351 k8s_cluster_type
6352 ].get_scale_count(
6353 resource_name,
6354 kdu_instance,
6355 vca_id=vca_id,
6356 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6357 kdu_model=deployed_kdu.get("kdu-model"),
6358 )
6359 kdu_replica_count = instance_num + kdu_delta.get(
6360 "number-of-instances", 1
6361 )
6362
6363 # Control if new count is over max and instance_num is less than max.
6364 # Then assign max instance number to kdu replica count
6365 if kdu_replica_count > max_instance_count > instance_num:
6366 kdu_replica_count = max_instance_count
6367 if kdu_replica_count > max_instance_count:
6368 raise LcmException(
6369 "reached the limit of {} (max-instance-count) "
6370 "scaling-out operations for the "
6371 "scaling-group-descriptor '{}'".format(
6372 instance_num, scaling_group
6373 )
6374 )
6375
6376 for x in range(kdu_delta.get("number-of-instances", 1)):
6377 vca_scaling_info.append(
6378 {
6379 "osm_kdu_id": kdu_name,
6380 "member-vnf-index": vnf_index,
6381 "type": "create",
6382 "kdu_index": instance_num + x - 1,
6383 }
6384 )
6385 scaling_info["kdu-create"][kdu_name].append(
6386 {
6387 "member-vnf-index": vnf_index,
6388 "type": "create",
6389 "k8s-cluster-type": k8s_cluster_type,
6390 "resource-name": resource_name,
6391 "scale": kdu_replica_count,
6392 }
6393 )
6394 elif scaling_type == "SCALE_IN":
6395 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6396
6397 scaling_info["scaling_direction"] = "IN"
6398 scaling_info["vdu-delete"] = {}
6399 scaling_info["kdu-delete"] = {}
6400
6401 for delta in deltas:
6402 for vdu_delta in delta.get("vdu-delta", {}):
6403 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6404 min_instance_count = 0
6405 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6406 if vdu_profile and "min-number-of-instances" in vdu_profile:
6407 min_instance_count = vdu_profile["min-number-of-instances"]
6408
6409 default_instance_num = get_number_of_instances(
6410 db_vnfd, vdu_delta["id"]
6411 )
6412 instance_num = vdu_delta.get("number-of-instances", 1)
6413 nb_scale_op -= instance_num
6414
6415 new_instance_count = nb_scale_op + default_instance_num
6416
6417 if new_instance_count < min_instance_count < vdu_count:
6418 instances_number = min_instance_count - new_instance_count
6419 else:
6420 instances_number = instance_num
6421
6422 if new_instance_count < min_instance_count:
6423 raise LcmException(
6424 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6425 "scaling-group-descriptor '{}'".format(
6426 nb_scale_op, scaling_group
6427 )
6428 )
6429 for x in range(vdu_delta.get("number-of-instances", 1)):
6430 vca_scaling_info.append(
6431 {
6432 "osm_vdu_id": vdu_delta["id"],
6433 "member-vnf-index": vnf_index,
6434 "type": "delete",
6435 "vdu_index": vdu_index - 1 - x,
6436 }
6437 )
6438 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6439 for kdu_delta in delta.get("kdu-resource-delta", {}):
6440 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6441 kdu_name = kdu_profile["kdu-name"]
6442 resource_name = kdu_profile.get("resource-name", "")
6443
6444 if not scaling_info["kdu-delete"].get(kdu_name, None):
6445 scaling_info["kdu-delete"][kdu_name] = []
6446
6447 kdur = get_kdur(db_vnfr, kdu_name)
6448 if kdur.get("helm-chart"):
6449 k8s_cluster_type = "helm-chart-v3"
6450 self.logger.debug("kdur: {}".format(kdur))
6451 if (
6452 kdur.get("helm-version")
6453 and kdur.get("helm-version") == "v2"
6454 ):
6455 k8s_cluster_type = "helm-chart"
6456 elif kdur.get("juju-bundle"):
6457 k8s_cluster_type = "juju-bundle"
6458 else:
6459 raise LcmException(
6460 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6461 "juju-bundle. Maybe an old NBI version is running".format(
6462 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6463 )
6464 )
6465
6466 min_instance_count = 0
6467 if kdu_profile and "min-number-of-instances" in kdu_profile:
6468 min_instance_count = kdu_profile["min-number-of-instances"]
6469
6470 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6471 deployed_kdu, _ = get_deployed_kdu(
6472 nsr_deployed, kdu_name, vnf_index
6473 )
6474 if deployed_kdu is None:
6475 raise LcmException(
6476 "KDU '{}' for vnf '{}' not deployed".format(
6477 kdu_name, vnf_index
6478 )
6479 )
6480 kdu_instance = deployed_kdu.get("kdu-instance")
6481 instance_num = await self.k8scluster_map[
6482 k8s_cluster_type
6483 ].get_scale_count(
6484 resource_name,
6485 kdu_instance,
6486 vca_id=vca_id,
6487 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6488 kdu_model=deployed_kdu.get("kdu-model"),
6489 )
6490 kdu_replica_count = instance_num - kdu_delta.get(
6491 "number-of-instances", 1
6492 )
6493
6494 if kdu_replica_count < min_instance_count < instance_num:
6495 kdu_replica_count = min_instance_count
6496 if kdu_replica_count < min_instance_count:
6497 raise LcmException(
6498 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6499 "scaling-group-descriptor '{}'".format(
6500 instance_num, scaling_group
6501 )
6502 )
6503
6504 for x in range(kdu_delta.get("number-of-instances", 1)):
6505 vca_scaling_info.append(
6506 {
6507 "osm_kdu_id": kdu_name,
6508 "member-vnf-index": vnf_index,
6509 "type": "delete",
6510 "kdu_index": instance_num - x - 1,
6511 }
6512 )
6513 scaling_info["kdu-delete"][kdu_name].append(
6514 {
6515 "member-vnf-index": vnf_index,
6516 "type": "delete",
6517 "k8s-cluster-type": k8s_cluster_type,
6518 "resource-name": resource_name,
6519 "scale": kdu_replica_count,
6520 }
6521 )
6522
6523 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6524 vdu_delete = copy(scaling_info.get("vdu-delete"))
6525 if scaling_info["scaling_direction"] == "IN":
6526 for vdur in reversed(db_vnfr["vdur"]):
6527 if vdu_delete.get(vdur["vdu-id-ref"]):
6528 vdu_delete[vdur["vdu-id-ref"]] -= 1
6529 scaling_info["vdu"].append(
6530 {
6531 "name": vdur.get("name") or vdur.get("vdu-name"),
6532 "vdu_id": vdur["vdu-id-ref"],
6533 "interface": [],
6534 }
6535 )
6536 for interface in vdur["interfaces"]:
6537 scaling_info["vdu"][-1]["interface"].append(
6538 {
6539 "name": interface["name"],
6540 "ip_address": interface["ip-address"],
6541 "mac_address": interface.get("mac-address"),
6542 }
6543 )
6544 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6545
6546 # PRE-SCALE BEGIN
6547 step = "Executing pre-scale vnf-config-primitive"
6548 if scaling_descriptor.get("scaling-config-action"):
6549 for scaling_config_action in scaling_descriptor[
6550 "scaling-config-action"
6551 ]:
6552 if (
6553 scaling_config_action.get("trigger") == "pre-scale-in"
6554 and scaling_type == "SCALE_IN"
6555 ) or (
6556 scaling_config_action.get("trigger") == "pre-scale-out"
6557 and scaling_type == "SCALE_OUT"
6558 ):
6559 vnf_config_primitive = scaling_config_action[
6560 "vnf-config-primitive-name-ref"
6561 ]
6562 step = db_nslcmop_update[
6563 "detailed-status"
6564 ] = "executing pre-scale scaling-config-action '{}'".format(
6565 vnf_config_primitive
6566 )
6567
6568 # look for primitive
6569 for config_primitive in (
6570 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6571 ).get("config-primitive", ()):
6572 if config_primitive["name"] == vnf_config_primitive:
6573 break
6574 else:
6575 raise LcmException(
6576 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6577 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6578 "primitive".format(scaling_group, vnf_config_primitive)
6579 )
6580
6581 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6582 if db_vnfr.get("additionalParamsForVnf"):
6583 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6584
6585 scale_process = "VCA"
6586 db_nsr_update["config-status"] = "configuring pre-scaling"
6587 primitive_params = self._map_primitive_params(
6588 config_primitive, {}, vnfr_params
6589 )
6590
6591 # Pre-scale retry check: Check if this sub-operation has been executed before
6592 op_index = self._check_or_add_scale_suboperation(
6593 db_nslcmop,
6594 vnf_index,
6595 vnf_config_primitive,
6596 primitive_params,
6597 "PRE-SCALE",
6598 )
6599 if op_index == self.SUBOPERATION_STATUS_SKIP:
6600 # Skip sub-operation
6601 result = "COMPLETED"
6602 result_detail = "Done"
6603 self.logger.debug(
6604 logging_text
6605 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6606 vnf_config_primitive, result, result_detail
6607 )
6608 )
6609 else:
6610 if op_index == self.SUBOPERATION_STATUS_NEW:
6611 # New sub-operation: Get index of this sub-operation
6612 op_index = (
6613 len(db_nslcmop.get("_admin", {}).get("operations"))
6614 - 1
6615 )
6616 self.logger.debug(
6617 logging_text
6618 + "vnf_config_primitive={} New sub-operation".format(
6619 vnf_config_primitive
6620 )
6621 )
6622 else:
6623 # retry: Get registered params for this existing sub-operation
6624 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6625 op_index
6626 ]
6627 vnf_index = op.get("member_vnf_index")
6628 vnf_config_primitive = op.get("primitive")
6629 primitive_params = op.get("primitive_params")
6630 self.logger.debug(
6631 logging_text
6632 + "vnf_config_primitive={} Sub-operation retry".format(
6633 vnf_config_primitive
6634 )
6635 )
6636 # Execute the primitive, either with new (first-time) or registered (reintent) args
6637 ee_descriptor_id = config_primitive.get(
6638 "execution-environment-ref"
6639 )
6640 primitive_name = config_primitive.get(
6641 "execution-environment-primitive", vnf_config_primitive
6642 )
6643 ee_id, vca_type = self._look_for_deployed_vca(
6644 nsr_deployed["VCA"],
6645 member_vnf_index=vnf_index,
6646 vdu_id=None,
6647 vdu_count_index=None,
6648 ee_descriptor_id=ee_descriptor_id,
6649 )
6650 result, result_detail = await self._ns_execute_primitive(
6651 ee_id,
6652 primitive_name,
6653 primitive_params,
6654 vca_type=vca_type,
6655 vca_id=vca_id,
6656 )
6657 self.logger.debug(
6658 logging_text
6659 + "vnf_config_primitive={} Done with result {} {}".format(
6660 vnf_config_primitive, result, result_detail
6661 )
6662 )
6663 # Update operationState = COMPLETED | FAILED
6664 self._update_suboperation_status(
6665 db_nslcmop, op_index, result, result_detail
6666 )
6667
6668 if result == "FAILED":
6669 raise LcmException(result_detail)
6670 db_nsr_update["config-status"] = old_config_status
6671 scale_process = None
6672 # PRE-SCALE END
6673
6674 db_nsr_update[
6675 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6676 ] = nb_scale_op
6677 db_nsr_update[
6678 "_admin.scaling-group.{}.time".format(admin_scale_index)
6679 ] = time()
6680
6681 # SCALE-IN VCA - BEGIN
6682 if vca_scaling_info:
6683 step = db_nslcmop_update[
6684 "detailed-status"
6685 ] = "Deleting the execution environments"
6686 scale_process = "VCA"
6687 for vca_info in vca_scaling_info:
6688 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6689 member_vnf_index = str(vca_info["member-vnf-index"])
6690 self.logger.debug(
6691 logging_text + "vdu info: {}".format(vca_info)
6692 )
6693 if vca_info.get("osm_vdu_id"):
6694 vdu_id = vca_info["osm_vdu_id"]
6695 vdu_index = int(vca_info["vdu_index"])
6696 stage[
6697 1
6698 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6699 member_vnf_index, vdu_id, vdu_index
6700 )
6701 stage[2] = step = "Scaling in VCA"
6702 self._write_op_status(op_id=nslcmop_id, stage=stage)
6703 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6704 config_update = db_nsr["configurationStatus"]
6705 for vca_index, vca in enumerate(vca_update):
6706 if (
6707 (vca or vca.get("ee_id"))
6708 and vca["member-vnf-index"] == member_vnf_index
6709 and vca["vdu_count_index"] == vdu_index
6710 ):
6711 if vca.get("vdu_id"):
6712 config_descriptor = get_configuration(
6713 db_vnfd, vca.get("vdu_id")
6714 )
6715 elif vca.get("kdu_name"):
6716 config_descriptor = get_configuration(
6717 db_vnfd, vca.get("kdu_name")
6718 )
6719 else:
6720 config_descriptor = get_configuration(
6721 db_vnfd, db_vnfd["id"]
6722 )
6723 operation_params = (
6724 db_nslcmop.get("operationParams") or {}
6725 )
6726 exec_terminate_primitives = not operation_params.get(
6727 "skip_terminate_primitives"
6728 ) and vca.get("needed_terminate")
6729 task = asyncio.ensure_future(
6730 asyncio.wait_for(
6731 self.destroy_N2VC(
6732 logging_text,
6733 db_nslcmop,
6734 vca,
6735 config_descriptor,
6736 vca_index,
6737 destroy_ee=True,
6738 exec_primitives=exec_terminate_primitives,
6739 scaling_in=True,
6740 vca_id=vca_id,
6741 ),
6742 timeout=self.timeout_charm_delete,
6743 )
6744 )
6745 tasks_dict_info[task] = "Terminating VCA {}".format(
6746 vca.get("ee_id")
6747 )
6748 del vca_update[vca_index]
6749 del config_update[vca_index]
6750 # wait for pending tasks of terminate primitives
6751 if tasks_dict_info:
6752 self.logger.debug(
6753 logging_text
6754 + "Waiting for tasks {}".format(
6755 list(tasks_dict_info.keys())
6756 )
6757 )
6758 error_list = await self._wait_for_tasks(
6759 logging_text,
6760 tasks_dict_info,
6761 min(
6762 self.timeout_charm_delete, self.timeout_ns_terminate
6763 ),
6764 stage,
6765 nslcmop_id,
6766 )
6767 tasks_dict_info.clear()
6768 if error_list:
6769 raise LcmException("; ".join(error_list))
6770
6771 db_vca_and_config_update = {
6772 "_admin.deployed.VCA": vca_update,
6773 "configurationStatus": config_update,
6774 }
6775 self.update_db_2(
6776 "nsrs", db_nsr["_id"], db_vca_and_config_update
6777 )
6778 scale_process = None
6779 # SCALE-IN VCA - END
6780
6781 # SCALE RO - BEGIN
6782 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6783 scale_process = "RO"
6784 if self.ro_config.get("ng"):
6785 await self._scale_ng_ro(
6786 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6787 )
6788 scaling_info.pop("vdu-create", None)
6789 scaling_info.pop("vdu-delete", None)
6790
6791 scale_process = None
6792 # SCALE RO - END
6793
6794 # SCALE KDU - BEGIN
6795 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6796 scale_process = "KDU"
6797 await self._scale_kdu(
6798 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6799 )
6800 scaling_info.pop("kdu-create", None)
6801 scaling_info.pop("kdu-delete", None)
6802
6803 scale_process = None
6804 # SCALE KDU - END
6805
6806 if db_nsr_update:
6807 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6808
6809 # SCALE-UP VCA - BEGIN
6810 if vca_scaling_info:
6811 step = db_nslcmop_update[
6812 "detailed-status"
6813 ] = "Creating new execution environments"
6814 scale_process = "VCA"
6815 for vca_info in vca_scaling_info:
6816 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6817 member_vnf_index = str(vca_info["member-vnf-index"])
6818 self.logger.debug(
6819 logging_text + "vdu info: {}".format(vca_info)
6820 )
6821 vnfd_id = db_vnfr["vnfd-ref"]
6822 if vca_info.get("osm_vdu_id"):
6823 vdu_index = int(vca_info["vdu_index"])
6824 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6825 if db_vnfr.get("additionalParamsForVnf"):
6826 deploy_params.update(
6827 parse_yaml_strings(
6828 db_vnfr["additionalParamsForVnf"].copy()
6829 )
6830 )
6831 descriptor_config = get_configuration(
6832 db_vnfd, db_vnfd["id"]
6833 )
6834 if descriptor_config:
6835 vdu_id = None
6836 vdu_name = None
6837 kdu_name = None
6838 self._deploy_n2vc(
6839 logging_text=logging_text
6840 + "member_vnf_index={} ".format(member_vnf_index),
6841 db_nsr=db_nsr,
6842 db_vnfr=db_vnfr,
6843 nslcmop_id=nslcmop_id,
6844 nsr_id=nsr_id,
6845 nsi_id=nsi_id,
6846 vnfd_id=vnfd_id,
6847 vdu_id=vdu_id,
6848 kdu_name=kdu_name,
6849 member_vnf_index=member_vnf_index,
6850 vdu_index=vdu_index,
6851 vdu_name=vdu_name,
6852 deploy_params=deploy_params,
6853 descriptor_config=descriptor_config,
6854 base_folder=base_folder,
6855 task_instantiation_info=tasks_dict_info,
6856 stage=stage,
6857 )
6858 vdu_id = vca_info["osm_vdu_id"]
6859 vdur = find_in_list(
6860 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6861 )
6862 descriptor_config = get_configuration(db_vnfd, vdu_id)
6863 if vdur.get("additionalParams"):
6864 deploy_params_vdu = parse_yaml_strings(
6865 vdur["additionalParams"]
6866 )
6867 else:
6868 deploy_params_vdu = deploy_params
6869 deploy_params_vdu["OSM"] = get_osm_params(
6870 db_vnfr, vdu_id, vdu_count_index=vdu_index
6871 )
6872 if descriptor_config:
6873 vdu_name = None
6874 kdu_name = None
6875 stage[
6876 1
6877 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6878 member_vnf_index, vdu_id, vdu_index
6879 )
6880 stage[2] = step = "Scaling out VCA"
6881 self._write_op_status(op_id=nslcmop_id, stage=stage)
6882 self._deploy_n2vc(
6883 logging_text=logging_text
6884 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6885 member_vnf_index, vdu_id, vdu_index
6886 ),
6887 db_nsr=db_nsr,
6888 db_vnfr=db_vnfr,
6889 nslcmop_id=nslcmop_id,
6890 nsr_id=nsr_id,
6891 nsi_id=nsi_id,
6892 vnfd_id=vnfd_id,
6893 vdu_id=vdu_id,
6894 kdu_name=kdu_name,
6895 member_vnf_index=member_vnf_index,
6896 vdu_index=vdu_index,
6897 vdu_name=vdu_name,
6898 deploy_params=deploy_params_vdu,
6899 descriptor_config=descriptor_config,
6900 base_folder=base_folder,
6901 task_instantiation_info=tasks_dict_info,
6902 stage=stage,
6903 )
6904 # SCALE-UP VCA - END
6905 scale_process = None
6906
6907 # POST-SCALE BEGIN
6908 # execute primitive service POST-SCALING
6909 step = "Executing post-scale vnf-config-primitive"
6910 if scaling_descriptor.get("scaling-config-action"):
6911 for scaling_config_action in scaling_descriptor[
6912 "scaling-config-action"
6913 ]:
6914 if (
6915 scaling_config_action.get("trigger") == "post-scale-in"
6916 and scaling_type == "SCALE_IN"
6917 ) or (
6918 scaling_config_action.get("trigger") == "post-scale-out"
6919 and scaling_type == "SCALE_OUT"
6920 ):
6921 vnf_config_primitive = scaling_config_action[
6922 "vnf-config-primitive-name-ref"
6923 ]
6924 step = db_nslcmop_update[
6925 "detailed-status"
6926 ] = "executing post-scale scaling-config-action '{}'".format(
6927 vnf_config_primitive
6928 )
6929
6930 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6931 if db_vnfr.get("additionalParamsForVnf"):
6932 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6933
6934 # look for primitive
6935 for config_primitive in (
6936 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6937 ).get("config-primitive", ()):
6938 if config_primitive["name"] == vnf_config_primitive:
6939 break
6940 else:
6941 raise LcmException(
6942 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6943 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6944 "config-primitive".format(
6945 scaling_group, vnf_config_primitive
6946 )
6947 )
6948 scale_process = "VCA"
6949 db_nsr_update["config-status"] = "configuring post-scaling"
6950 primitive_params = self._map_primitive_params(
6951 config_primitive, {}, vnfr_params
6952 )
6953
6954 # Post-scale retry check: Check if this sub-operation has been executed before
6955 op_index = self._check_or_add_scale_suboperation(
6956 db_nslcmop,
6957 vnf_index,
6958 vnf_config_primitive,
6959 primitive_params,
6960 "POST-SCALE",
6961 )
6962 if op_index == self.SUBOPERATION_STATUS_SKIP:
6963 # Skip sub-operation
6964 result = "COMPLETED"
6965 result_detail = "Done"
6966 self.logger.debug(
6967 logging_text
6968 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6969 vnf_config_primitive, result, result_detail
6970 )
6971 )
6972 else:
6973 if op_index == self.SUBOPERATION_STATUS_NEW:
6974 # New sub-operation: Get index of this sub-operation
6975 op_index = (
6976 len(db_nslcmop.get("_admin", {}).get("operations"))
6977 - 1
6978 )
6979 self.logger.debug(
6980 logging_text
6981 + "vnf_config_primitive={} New sub-operation".format(
6982 vnf_config_primitive
6983 )
6984 )
6985 else:
6986 # retry: Get registered params for this existing sub-operation
6987 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6988 op_index
6989 ]
6990 vnf_index = op.get("member_vnf_index")
6991 vnf_config_primitive = op.get("primitive")
6992 primitive_params = op.get("primitive_params")
6993 self.logger.debug(
6994 logging_text
6995 + "vnf_config_primitive={} Sub-operation retry".format(
6996 vnf_config_primitive
6997 )
6998 )
6999 # Execute the primitive, either with new (first-time) or registered (reintent) args
7000 ee_descriptor_id = config_primitive.get(
7001 "execution-environment-ref"
7002 )
7003 primitive_name = config_primitive.get(
7004 "execution-environment-primitive", vnf_config_primitive
7005 )
7006 ee_id, vca_type = self._look_for_deployed_vca(
7007 nsr_deployed["VCA"],
7008 member_vnf_index=vnf_index,
7009 vdu_id=None,
7010 vdu_count_index=None,
7011 ee_descriptor_id=ee_descriptor_id,
7012 )
7013 result, result_detail = await self._ns_execute_primitive(
7014 ee_id,
7015 primitive_name,
7016 primitive_params,
7017 vca_type=vca_type,
7018 vca_id=vca_id,
7019 )
7020 self.logger.debug(
7021 logging_text
7022 + "vnf_config_primitive={} Done with result {} {}".format(
7023 vnf_config_primitive, result, result_detail
7024 )
7025 )
7026 # Update operationState = COMPLETED | FAILED
7027 self._update_suboperation_status(
7028 db_nslcmop, op_index, result, result_detail
7029 )
7030
7031 if result == "FAILED":
7032 raise LcmException(result_detail)
7033 db_nsr_update["config-status"] = old_config_status
7034 scale_process = None
7035 # POST-SCALE END
7036
7037 db_nsr_update[
7038 "detailed-status"
7039 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7040 db_nsr_update["operational-status"] = (
7041 "running"
7042 if old_operational_status == "failed"
7043 else old_operational_status
7044 )
7045 db_nsr_update["config-status"] = old_config_status
7046 return
7047 except (
7048 ROclient.ROClientException,
7049 DbException,
7050 LcmException,
7051 NgRoException,
7052 ) as e:
7053 self.logger.error(logging_text + "Exit Exception {}".format(e))
7054 exc = e
7055 except asyncio.CancelledError:
7056 self.logger.error(
7057 logging_text + "Cancelled Exception while '{}'".format(step)
7058 )
7059 exc = "Operation was cancelled"
7060 except Exception as e:
7061 exc = traceback.format_exc()
7062 self.logger.critical(
7063 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7064 exc_info=True,
7065 )
7066 finally:
7067 self._write_ns_status(
7068 nsr_id=nsr_id,
7069 ns_state=None,
7070 current_operation="IDLE",
7071 current_operation_id=None,
7072 )
7073 if tasks_dict_info:
7074 stage[1] = "Waiting for instantiate pending tasks."
7075 self.logger.debug(logging_text + stage[1])
7076 exc = await self._wait_for_tasks(
7077 logging_text,
7078 tasks_dict_info,
7079 self.timeout_ns_deploy,
7080 stage,
7081 nslcmop_id,
7082 nsr_id=nsr_id,
7083 )
7084 if exc:
7085 db_nslcmop_update[
7086 "detailed-status"
7087 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7088 nslcmop_operation_state = "FAILED"
7089 if db_nsr:
7090 db_nsr_update["operational-status"] = old_operational_status
7091 db_nsr_update["config-status"] = old_config_status
7092 db_nsr_update["detailed-status"] = ""
7093 if scale_process:
7094 if "VCA" in scale_process:
7095 db_nsr_update["config-status"] = "failed"
7096 if "RO" in scale_process:
7097 db_nsr_update["operational-status"] = "failed"
7098 db_nsr_update[
7099 "detailed-status"
7100 ] = "FAILED scaling nslcmop={} {}: {}".format(
7101 nslcmop_id, step, exc
7102 )
7103 else:
7104 error_description_nslcmop = None
7105 nslcmop_operation_state = "COMPLETED"
7106 db_nslcmop_update["detailed-status"] = "Done"
7107
7108 self._write_op_status(
7109 op_id=nslcmop_id,
7110 stage="",
7111 error_message=error_description_nslcmop,
7112 operation_state=nslcmop_operation_state,
7113 other_update=db_nslcmop_update,
7114 )
7115 if db_nsr:
7116 self._write_ns_status(
7117 nsr_id=nsr_id,
7118 ns_state=None,
7119 current_operation="IDLE",
7120 current_operation_id=None,
7121 other_update=db_nsr_update,
7122 )
7123
7124 if nslcmop_operation_state:
7125 try:
7126 msg = {
7127 "nsr_id": nsr_id,
7128 "nslcmop_id": nslcmop_id,
7129 "operationState": nslcmop_operation_state,
7130 }
7131 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7132 except Exception as e:
7133 self.logger.error(
7134 logging_text + "kafka_write notification Exception {}".format(e)
7135 )
7136 self.logger.debug(logging_text + "Exit")
7137 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7138
7139 async def _scale_kdu(
7140 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7141 ):
7142 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7143 for kdu_name in _scaling_info:
7144 for kdu_scaling_info in _scaling_info[kdu_name]:
7145 deployed_kdu, index = get_deployed_kdu(
7146 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7147 )
7148 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7149 kdu_instance = deployed_kdu["kdu-instance"]
7150 kdu_model = deployed_kdu.get("kdu-model")
7151 scale = int(kdu_scaling_info["scale"])
7152 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7153
7154 db_dict = {
7155 "collection": "nsrs",
7156 "filter": {"_id": nsr_id},
7157 "path": "_admin.deployed.K8s.{}".format(index),
7158 }
7159
7160 step = "scaling application {}".format(
7161 kdu_scaling_info["resource-name"]
7162 )
7163 self.logger.debug(logging_text + step)
7164
7165 if kdu_scaling_info["type"] == "delete":
7166 kdu_config = get_configuration(db_vnfd, kdu_name)
7167 if (
7168 kdu_config
7169 and kdu_config.get("terminate-config-primitive")
7170 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7171 ):
7172 terminate_config_primitive_list = kdu_config.get(
7173 "terminate-config-primitive"
7174 )
7175 terminate_config_primitive_list.sort(
7176 key=lambda val: int(val["seq"])
7177 )
7178
7179 for (
7180 terminate_config_primitive
7181 ) in terminate_config_primitive_list:
7182 primitive_params_ = self._map_primitive_params(
7183 terminate_config_primitive, {}, {}
7184 )
7185 step = "execute terminate config primitive"
7186 self.logger.debug(logging_text + step)
7187 await asyncio.wait_for(
7188 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7189 cluster_uuid=cluster_uuid,
7190 kdu_instance=kdu_instance,
7191 primitive_name=terminate_config_primitive["name"],
7192 params=primitive_params_,
7193 db_dict=db_dict,
7194 vca_id=vca_id,
7195 ),
7196 timeout=600,
7197 )
7198
7199 await asyncio.wait_for(
7200 self.k8scluster_map[k8s_cluster_type].scale(
7201 kdu_instance,
7202 scale,
7203 kdu_scaling_info["resource-name"],
7204 vca_id=vca_id,
7205 cluster_uuid=cluster_uuid,
7206 kdu_model=kdu_model,
7207 atomic=True,
7208 db_dict=db_dict,
7209 ),
7210 timeout=self.timeout_vca_on_error,
7211 )
7212
7213 if kdu_scaling_info["type"] == "create":
7214 kdu_config = get_configuration(db_vnfd, kdu_name)
7215 if (
7216 kdu_config
7217 and kdu_config.get("initial-config-primitive")
7218 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7219 ):
7220 initial_config_primitive_list = kdu_config.get(
7221 "initial-config-primitive"
7222 )
7223 initial_config_primitive_list.sort(
7224 key=lambda val: int(val["seq"])
7225 )
7226
7227 for initial_config_primitive in initial_config_primitive_list:
7228 primitive_params_ = self._map_primitive_params(
7229 initial_config_primitive, {}, {}
7230 )
7231 step = "execute initial config primitive"
7232 self.logger.debug(logging_text + step)
7233 await asyncio.wait_for(
7234 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7235 cluster_uuid=cluster_uuid,
7236 kdu_instance=kdu_instance,
7237 primitive_name=initial_config_primitive["name"],
7238 params=primitive_params_,
7239 db_dict=db_dict,
7240 vca_id=vca_id,
7241 ),
7242 timeout=600,
7243 )
7244
7245 async def _scale_ng_ro(
7246 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7247 ):
7248 nsr_id = db_nslcmop["nsInstanceId"]
7249 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7250 db_vnfrs = {}
7251
7252 # read from db: vnfd's for every vnf
7253 db_vnfds = []
7254
7255 # for each vnf in ns, read vnfd
7256 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7257 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7258 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7259 # if we haven't this vnfd, read it from db
7260 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7261 # read from db
7262 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7263 db_vnfds.append(vnfd)
7264 n2vc_key = self.n2vc.get_public_key()
7265 n2vc_key_list = [n2vc_key]
7266 self.scale_vnfr(
7267 db_vnfr,
7268 vdu_scaling_info.get("vdu-create"),
7269 vdu_scaling_info.get("vdu-delete"),
7270 mark_delete=True,
7271 )
7272 # db_vnfr has been updated, update db_vnfrs to use it
7273 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7274 await self._instantiate_ng_ro(
7275 logging_text,
7276 nsr_id,
7277 db_nsd,
7278 db_nsr,
7279 db_nslcmop,
7280 db_vnfrs,
7281 db_vnfds,
7282 n2vc_key_list,
7283 stage=stage,
7284 start_deploy=time(),
7285 timeout_ns_deploy=self.timeout_ns_deploy,
7286 )
7287 if vdu_scaling_info.get("vdu-delete"):
7288 self.scale_vnfr(
7289 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7290 )
7291
7292 async def extract_prometheus_scrape_jobs(
7293 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7294 ):
7295 # look if exist a file called 'prometheus*.j2' and
7296 artifact_content = self.fs.dir_ls(artifact_path)
7297 job_file = next(
7298 (
7299 f
7300 for f in artifact_content
7301 if f.startswith("prometheus") and f.endswith(".j2")
7302 ),
7303 None,
7304 )
7305 if not job_file:
7306 return
7307 with self.fs.file_open((artifact_path, job_file), "r") as f:
7308 job_data = f.read()
7309
7310 # TODO get_service
7311 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7312 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7313 host_port = "80"
7314 vnfr_id = vnfr_id.replace("-", "")
7315 variables = {
7316 "JOB_NAME": vnfr_id,
7317 "TARGET_IP": target_ip,
7318 "EXPORTER_POD_IP": host_name,
7319 "EXPORTER_POD_PORT": host_port,
7320 }
7321 job_list = parse_job(job_data, variables)
7322 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7323 for job in job_list:
7324 if (
7325 not isinstance(job.get("job_name"), str)
7326 or vnfr_id not in job["job_name"]
7327 ):
7328 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7329 job["nsr_id"] = nsr_id
7330 job["vnfr_id"] = vnfr_id
7331 return job_list
7332
7333 async def rebuild_start_stop(
7334 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7335 ):
7336 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7337 self.logger.info(logging_text + "Enter")
7338 stage = ["Preparing the environment", ""]
7339 # database nsrs record
7340 db_nsr_update = {}
7341 vdu_vim_name = None
7342 vim_vm_id = None
7343 # in case of error, indicates what part of scale was failed to put nsr at error status
7344 start_deploy = time()
7345 try:
7346 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7347 vim_account_id = db_vnfr.get("vim-account-id")
7348 vim_info_key = "vim:" + vim_account_id
7349 vdu_id = additional_param["vdu_id"]
7350 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7351 vdur = find_in_list(
7352 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7353 )
7354 if vdur:
7355 vdu_vim_name = vdur["name"]
7356 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7357 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7358 else:
7359 raise LcmException("Target vdu is not found")
7360 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7361 # wait for any previous tasks in process
7362 stage[1] = "Waiting for previous operations to terminate"
7363 self.logger.info(stage[1])
7364 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7365
7366 stage[1] = "Reading from database."
7367 self.logger.info(stage[1])
7368 self._write_ns_status(
7369 nsr_id=nsr_id,
7370 ns_state=None,
7371 current_operation=operation_type.upper(),
7372 current_operation_id=nslcmop_id,
7373 )
7374 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7375
7376 # read from db: ns
7377 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7378 db_nsr_update["operational-status"] = operation_type
7379 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7380 # Payload for RO
7381 desc = {
7382 operation_type: {
7383 "vim_vm_id": vim_vm_id,
7384 "vnf_id": vnf_id,
7385 "vdu_index": additional_param["count-index"],
7386 "vdu_id": vdur["id"],
7387 "target_vim": target_vim,
7388 "vim_account_id": vim_account_id,
7389 }
7390 }
7391 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7392 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7393 self.logger.info("ro nsr id: {}".format(nsr_id))
7394 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7395 self.logger.info("response from RO: {}".format(result_dict))
7396 action_id = result_dict["action_id"]
7397 await self._wait_ng_ro(
7398 nsr_id,
7399 action_id,
7400 nslcmop_id,
7401 start_deploy,
7402 self.timeout_operate,
7403 None,
7404 "start_stop_rebuild",
7405 )
7406 return "COMPLETED", "Done"
7407 except (ROclient.ROClientException, DbException, LcmException) as e:
7408 self.logger.error("Exit Exception {}".format(e))
7409 exc = e
7410 except asyncio.CancelledError:
7411 self.logger.error("Cancelled Exception while '{}'".format(stage))
7412 exc = "Operation was cancelled"
7413 except Exception as e:
7414 exc = traceback.format_exc()
7415 self.logger.critical(
7416 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7417 )
7418 return "FAILED", "Error in operate VNF {}".format(exc)
7419
7420 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7421 """
7422 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7423
7424 :param: vim_account_id: VIM Account ID
7425
7426 :return: (cloud_name, cloud_credential)
7427 """
7428 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7429 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7430
7431 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7432 """
7433 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7434
7435 :param: vim_account_id: VIM Account ID
7436
7437 :return: (cloud_name, cloud_credential)
7438 """
7439 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7440 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7441
7442 async def migrate(self, nsr_id, nslcmop_id):
7443 """
7444 Migrate VNFs and VDUs instances in a NS
7445
7446 :param: nsr_id: NS Instance ID
7447 :param: nslcmop_id: nslcmop ID of migrate
7448
7449 """
7450 # Try to lock HA task here
7451 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7452 if not task_is_locked_by_me:
7453 return
7454 logging_text = "Task ns={} migrate ".format(nsr_id)
7455 self.logger.debug(logging_text + "Enter")
7456 # get all needed from database
7457 db_nslcmop = None
7458 db_nslcmop_update = {}
7459 nslcmop_operation_state = None
7460 db_nsr_update = {}
7461 target = {}
7462 exc = None
7463 # in case of error, indicates what part of scale was failed to put nsr at error status
7464 start_deploy = time()
7465
7466 try:
7467 # wait for any previous tasks in process
7468 step = "Waiting for previous operations to terminate"
7469 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7470
7471 self._write_ns_status(
7472 nsr_id=nsr_id,
7473 ns_state=None,
7474 current_operation="MIGRATING",
7475 current_operation_id=nslcmop_id,
7476 )
7477 step = "Getting nslcmop from database"
7478 self.logger.debug(
7479 step + " after having waited for previous tasks to be completed"
7480 )
7481 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7482 migrate_params = db_nslcmop.get("operationParams")
7483
7484 target = {}
7485 target.update(migrate_params)
7486 desc = await self.RO.migrate(nsr_id, target)
7487 self.logger.debug("RO return > {}".format(desc))
7488 action_id = desc["action_id"]
7489 await self._wait_ng_ro(
7490 nsr_id,
7491 action_id,
7492 nslcmop_id,
7493 start_deploy,
7494 self.timeout_migrate,
7495 operation="migrate",
7496 )
7497 except (ROclient.ROClientException, DbException, LcmException) as e:
7498 self.logger.error("Exit Exception {}".format(e))
7499 exc = e
7500 except asyncio.CancelledError:
7501 self.logger.error("Cancelled Exception while '{}'".format(step))
7502 exc = "Operation was cancelled"
7503 except Exception as e:
7504 exc = traceback.format_exc()
7505 self.logger.critical(
7506 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7507 )
7508 finally:
7509 self._write_ns_status(
7510 nsr_id=nsr_id,
7511 ns_state=None,
7512 current_operation="IDLE",
7513 current_operation_id=None,
7514 )
7515 if exc:
7516 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7517 nslcmop_operation_state = "FAILED"
7518 else:
7519 nslcmop_operation_state = "COMPLETED"
7520 db_nslcmop_update["detailed-status"] = "Done"
7521 db_nsr_update["detailed-status"] = "Done"
7522
7523 self._write_op_status(
7524 op_id=nslcmop_id,
7525 stage="",
7526 error_message="",
7527 operation_state=nslcmop_operation_state,
7528 other_update=db_nslcmop_update,
7529 )
7530 if nslcmop_operation_state:
7531 try:
7532 msg = {
7533 "nsr_id": nsr_id,
7534 "nslcmop_id": nslcmop_id,
7535 "operationState": nslcmop_operation_state,
7536 }
7537 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7538 except Exception as e:
7539 self.logger.error(
7540 logging_text + "kafka_write notification Exception {}".format(e)
7541 )
7542 self.logger.debug(logging_text + "Exit")
7543 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7544
7545 async def heal(self, nsr_id, nslcmop_id):
7546 """
7547 Heal NS
7548
7549 :param nsr_id: ns instance to heal
7550 :param nslcmop_id: operation to run
7551 :return:
7552 """
7553
7554 # Try to lock HA task here
7555 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7556 if not task_is_locked_by_me:
7557 return
7558
7559 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7560 stage = ["", "", ""]
7561 tasks_dict_info = {}
7562 # ^ stage, step, VIM progress
7563 self.logger.debug(logging_text + "Enter")
7564 # get all needed from database
7565 db_nsr = None
7566 db_nslcmop_update = {}
7567 db_nsr_update = {}
7568 db_vnfrs = {} # vnf's info indexed by _id
7569 exc = None
7570 old_operational_status = ""
7571 old_config_status = ""
7572 nsi_id = None
7573 try:
7574 # wait for any previous tasks in process
7575 step = "Waiting for previous operations to terminate"
7576 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7577 self._write_ns_status(
7578 nsr_id=nsr_id,
7579 ns_state=None,
7580 current_operation="HEALING",
7581 current_operation_id=nslcmop_id,
7582 )
7583
7584 step = "Getting nslcmop from database"
7585 self.logger.debug(
7586 step + " after having waited for previous tasks to be completed"
7587 )
7588 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7589
7590 step = "Getting nsr from database"
7591 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7592 old_operational_status = db_nsr["operational-status"]
7593 old_config_status = db_nsr["config-status"]
7594
7595 db_nsr_update = {
7596 "_admin.deployed.RO.operational-status": "healing",
7597 }
7598 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7599
7600 step = "Sending heal order to VIM"
7601 task_ro = asyncio.ensure_future(
7602 self.heal_RO(
7603 logging_text=logging_text,
7604 nsr_id=nsr_id,
7605 db_nslcmop=db_nslcmop,
7606 stage=stage,
7607 )
7608 )
7609 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
7610 tasks_dict_info[task_ro] = "Healing at VIM"
7611
7612 # VCA tasks
7613 # read from db: nsd
7614 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7615 self.logger.debug(logging_text + stage[1])
7616 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7617 self.fs.sync(db_nsr["nsd-id"])
7618 db_nsr["nsd"] = nsd
7619 # read from db: vnfr's of this ns
7620 step = "Getting vnfrs from db"
7621 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7622 for vnfr in db_vnfrs_list:
7623 db_vnfrs[vnfr["_id"]] = vnfr
7624 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7625
7626 # Check for each target VNF
7627 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7628 for target_vnf in target_list:
7629 # Find this VNF in the list from DB
7630 vnfr_id = target_vnf.get("vnfInstanceId", None)
7631 if vnfr_id:
7632 db_vnfr = db_vnfrs[vnfr_id]
7633 vnfd_id = db_vnfr.get("vnfd-id")
7634 vnfd_ref = db_vnfr.get("vnfd-ref")
7635 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7636 base_folder = vnfd["_admin"]["storage"]
7637 vdu_id = None
7638 vdu_index = 0
7639 vdu_name = None
7640 kdu_name = None
7641 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7642 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7643
7644 # Check each target VDU and deploy N2VC
7645 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7646 "vdu", []
7647 )
7648 if not target_vdu_list:
7649 # Codigo nuevo para crear diccionario
7650 target_vdu_list = []
7651 for existing_vdu in db_vnfr.get("vdur"):
7652 vdu_name = existing_vdu.get("vdu-name", None)
7653 vdu_index = existing_vdu.get("count-index", 0)
7654 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7655 "run-day1", False
7656 )
7657 vdu_to_be_healed = {
7658 "vdu-id": vdu_name,
7659 "count-index": vdu_index,
7660 "run-day1": vdu_run_day1,
7661 }
7662 target_vdu_list.append(vdu_to_be_healed)
7663 for target_vdu in target_vdu_list:
7664 deploy_params_vdu = target_vdu
7665 # Set run-day1 vnf level value if not vdu level value exists
7666 if not deploy_params_vdu.get("run-day1") and target_vnf[
7667 "additionalParams"
7668 ].get("run-day1"):
7669 deploy_params_vdu["run-day1"] = target_vnf[
7670 "additionalParams"
7671 ].get("run-day1")
7672 vdu_name = target_vdu.get("vdu-id", None)
7673 # TODO: Get vdu_id from vdud.
7674 vdu_id = vdu_name
7675 # For multi instance VDU count-index is mandatory
7676 # For single session VDU count-indes is 0
7677 vdu_index = target_vdu.get("count-index", 0)
7678
7679 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7680 stage[1] = "Deploying Execution Environments."
7681 self.logger.debug(logging_text + stage[1])
7682
7683 # VNF Level charm. Normal case when proxy charms.
7684 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7685 descriptor_config = get_configuration(vnfd, vnfd_ref)
7686 if descriptor_config:
7687 # Continue if healed machine is management machine
7688 vnf_ip_address = db_vnfr.get("ip-address")
7689 target_instance = None
7690 for instance in db_vnfr.get("vdur", None):
7691 if (
7692 instance["vdu-name"] == vdu_name
7693 and instance["count-index"] == vdu_index
7694 ):
7695 target_instance = instance
7696 break
7697 if vnf_ip_address == target_instance.get("ip-address"):
7698 self._heal_n2vc(
7699 logging_text=logging_text
7700 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7701 member_vnf_index, vdu_name, vdu_index
7702 ),
7703 db_nsr=db_nsr,
7704 db_vnfr=db_vnfr,
7705 nslcmop_id=nslcmop_id,
7706 nsr_id=nsr_id,
7707 nsi_id=nsi_id,
7708 vnfd_id=vnfd_ref,
7709 vdu_id=None,
7710 kdu_name=None,
7711 member_vnf_index=member_vnf_index,
7712 vdu_index=0,
7713 vdu_name=None,
7714 deploy_params=deploy_params_vdu,
7715 descriptor_config=descriptor_config,
7716 base_folder=base_folder,
7717 task_instantiation_info=tasks_dict_info,
7718 stage=stage,
7719 )
7720
7721 # VDU Level charm. Normal case with native charms.
7722 descriptor_config = get_configuration(vnfd, vdu_name)
7723 if descriptor_config:
7724 self._heal_n2vc(
7725 logging_text=logging_text
7726 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7727 member_vnf_index, vdu_name, vdu_index
7728 ),
7729 db_nsr=db_nsr,
7730 db_vnfr=db_vnfr,
7731 nslcmop_id=nslcmop_id,
7732 nsr_id=nsr_id,
7733 nsi_id=nsi_id,
7734 vnfd_id=vnfd_ref,
7735 vdu_id=vdu_id,
7736 kdu_name=kdu_name,
7737 member_vnf_index=member_vnf_index,
7738 vdu_index=vdu_index,
7739 vdu_name=vdu_name,
7740 deploy_params=deploy_params_vdu,
7741 descriptor_config=descriptor_config,
7742 base_folder=base_folder,
7743 task_instantiation_info=tasks_dict_info,
7744 stage=stage,
7745 )
7746
7747 except (
7748 ROclient.ROClientException,
7749 DbException,
7750 LcmException,
7751 NgRoException,
7752 ) as e:
7753 self.logger.error(logging_text + "Exit Exception {}".format(e))
7754 exc = e
7755 except asyncio.CancelledError:
7756 self.logger.error(
7757 logging_text + "Cancelled Exception while '{}'".format(step)
7758 )
7759 exc = "Operation was cancelled"
7760 except Exception as e:
7761 exc = traceback.format_exc()
7762 self.logger.critical(
7763 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7764 exc_info=True,
7765 )
7766 finally:
7767 if tasks_dict_info:
7768 stage[1] = "Waiting for healing pending tasks."
7769 self.logger.debug(logging_text + stage[1])
7770 exc = await self._wait_for_tasks(
7771 logging_text,
7772 tasks_dict_info,
7773 self.timeout_ns_deploy,
7774 stage,
7775 nslcmop_id,
7776 nsr_id=nsr_id,
7777 )
7778 if exc:
7779 db_nslcmop_update[
7780 "detailed-status"
7781 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7782 nslcmop_operation_state = "FAILED"
7783 if db_nsr:
7784 db_nsr_update["operational-status"] = old_operational_status
7785 db_nsr_update["config-status"] = old_config_status
7786 db_nsr_update[
7787 "detailed-status"
7788 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7789 for task, task_name in tasks_dict_info.items():
7790 if not task.done() or task.cancelled() or task.exception():
7791 if task_name.startswith(self.task_name_deploy_vca):
7792 # A N2VC task is pending
7793 db_nsr_update["config-status"] = "failed"
7794 else:
7795 # RO task is pending
7796 db_nsr_update["operational-status"] = "failed"
7797 else:
7798 error_description_nslcmop = None
7799 nslcmop_operation_state = "COMPLETED"
7800 db_nslcmop_update["detailed-status"] = "Done"
7801 db_nsr_update["detailed-status"] = "Done"
7802 db_nsr_update["operational-status"] = "running"
7803 db_nsr_update["config-status"] = "configured"
7804
7805 self._write_op_status(
7806 op_id=nslcmop_id,
7807 stage="",
7808 error_message=error_description_nslcmop,
7809 operation_state=nslcmop_operation_state,
7810 other_update=db_nslcmop_update,
7811 )
7812 if db_nsr:
7813 self._write_ns_status(
7814 nsr_id=nsr_id,
7815 ns_state=None,
7816 current_operation="IDLE",
7817 current_operation_id=None,
7818 other_update=db_nsr_update,
7819 )
7820
7821 if nslcmop_operation_state:
7822 try:
7823 msg = {
7824 "nsr_id": nsr_id,
7825 "nslcmop_id": nslcmop_id,
7826 "operationState": nslcmop_operation_state,
7827 }
7828 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7829 except Exception as e:
7830 self.logger.error(
7831 logging_text + "kafka_write notification Exception {}".format(e)
7832 )
7833 self.logger.debug(logging_text + "Exit")
7834 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7835
7836 async def heal_RO(
7837 self,
7838 logging_text,
7839 nsr_id,
7840 db_nslcmop,
7841 stage,
7842 ):
7843 """
7844 Heal at RO
7845 :param logging_text: preffix text to use at logging
7846 :param nsr_id: nsr identity
7847 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7848 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7849 :return: None or exception
7850 """
7851
7852 def get_vim_account(vim_account_id):
7853 nonlocal db_vims
7854 if vim_account_id in db_vims:
7855 return db_vims[vim_account_id]
7856 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7857 db_vims[vim_account_id] = db_vim
7858 return db_vim
7859
7860 try:
7861 start_heal = time()
7862 ns_params = db_nslcmop.get("operationParams")
7863 if ns_params and ns_params.get("timeout_ns_heal"):
7864 timeout_ns_heal = ns_params["timeout_ns_heal"]
7865 else:
7866 timeout_ns_heal = self.timeout.get("ns_heal", self.timeout_ns_heal)
7867
7868 db_vims = {}
7869
7870 nslcmop_id = db_nslcmop["_id"]
7871 target = {
7872 "action_id": nslcmop_id,
7873 }
7874 self.logger.warning(
7875 "db_nslcmop={} and timeout_ns_heal={}".format(
7876 db_nslcmop, timeout_ns_heal
7877 )
7878 )
7879 target.update(db_nslcmop.get("operationParams", {}))
7880
7881 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7882 desc = await self.RO.recreate(nsr_id, target)
7883 self.logger.debug("RO return > {}".format(desc))
7884 action_id = desc["action_id"]
7885 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7886 await self._wait_ng_ro(
7887 nsr_id,
7888 action_id,
7889 nslcmop_id,
7890 start_heal,
7891 timeout_ns_heal,
7892 stage,
7893 operation="healing",
7894 )
7895
7896 # Updating NSR
7897 db_nsr_update = {
7898 "_admin.deployed.RO.operational-status": "running",
7899 "detailed-status": " ".join(stage),
7900 }
7901 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7902 self._write_op_status(nslcmop_id, stage)
7903 self.logger.debug(
7904 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7905 )
7906
7907 except Exception as e:
7908 stage[2] = "ERROR healing at VIM"
7909 # self.set_vnfr_at_error(db_vnfrs, str(e))
7910 self.logger.error(
7911 "Error healing at VIM {}".format(e),
7912 exc_info=not isinstance(
7913 e,
7914 (
7915 ROclient.ROClientException,
7916 LcmException,
7917 DbException,
7918 NgRoException,
7919 ),
7920 ),
7921 )
7922 raise
7923
7924 def _heal_n2vc(
7925 self,
7926 logging_text,
7927 db_nsr,
7928 db_vnfr,
7929 nslcmop_id,
7930 nsr_id,
7931 nsi_id,
7932 vnfd_id,
7933 vdu_id,
7934 kdu_name,
7935 member_vnf_index,
7936 vdu_index,
7937 vdu_name,
7938 deploy_params,
7939 descriptor_config,
7940 base_folder,
7941 task_instantiation_info,
7942 stage,
7943 ):
7944 # launch instantiate_N2VC in a asyncio task and register task object
7945 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7946 # if not found, create one entry and update database
7947 # fill db_nsr._admin.deployed.VCA.<index>
7948
7949 self.logger.debug(
7950 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7951 )
7952
7953 charm_name = ""
7954 get_charm_name = False
7955 if "execution-environment-list" in descriptor_config:
7956 ee_list = descriptor_config.get("execution-environment-list", [])
7957 elif "juju" in descriptor_config:
7958 ee_list = [descriptor_config] # ns charms
7959 if "execution-environment-list" not in descriptor_config:
7960 # charm name is only required for ns charms
7961 get_charm_name = True
7962 else: # other types as script are not supported
7963 ee_list = []
7964
7965 for ee_item in ee_list:
7966 self.logger.debug(
7967 logging_text
7968 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7969 ee_item.get("juju"), ee_item.get("helm-chart")
7970 )
7971 )
7972 ee_descriptor_id = ee_item.get("id")
7973 if ee_item.get("juju"):
7974 vca_name = ee_item["juju"].get("charm")
7975 if get_charm_name:
7976 charm_name = self.find_charm_name(db_nsr, str(vca_name))
7977 vca_type = (
7978 "lxc_proxy_charm"
7979 if ee_item["juju"].get("charm") is not None
7980 else "native_charm"
7981 )
7982 if ee_item["juju"].get("cloud") == "k8s":
7983 vca_type = "k8s_proxy_charm"
7984 elif ee_item["juju"].get("proxy") is False:
7985 vca_type = "native_charm"
7986 elif ee_item.get("helm-chart"):
7987 vca_name = ee_item["helm-chart"]
7988 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7989 vca_type = "helm"
7990 else:
7991 vca_type = "helm-v3"
7992 else:
7993 self.logger.debug(
7994 logging_text + "skipping non juju neither charm configuration"
7995 )
7996 continue
7997
7998 vca_index = -1
7999 for vca_index, vca_deployed in enumerate(
8000 db_nsr["_admin"]["deployed"]["VCA"]
8001 ):
8002 if not vca_deployed:
8003 continue
8004 if (
8005 vca_deployed.get("member-vnf-index") == member_vnf_index
8006 and vca_deployed.get("vdu_id") == vdu_id
8007 and vca_deployed.get("kdu_name") == kdu_name
8008 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8009 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8010 ):
8011 break
8012 else:
8013 # not found, create one.
8014 target = (
8015 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8016 )
8017 if vdu_id:
8018 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8019 elif kdu_name:
8020 target += "/kdu/{}".format(kdu_name)
8021 vca_deployed = {
8022 "target_element": target,
8023 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8024 "member-vnf-index": member_vnf_index,
8025 "vdu_id": vdu_id,
8026 "kdu_name": kdu_name,
8027 "vdu_count_index": vdu_index,
8028 "operational-status": "init", # TODO revise
8029 "detailed-status": "", # TODO revise
8030 "step": "initial-deploy", # TODO revise
8031 "vnfd_id": vnfd_id,
8032 "vdu_name": vdu_name,
8033 "type": vca_type,
8034 "ee_descriptor_id": ee_descriptor_id,
8035 "charm_name": charm_name,
8036 }
8037 vca_index += 1
8038
8039 # create VCA and configurationStatus in db
8040 db_dict = {
8041 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8042 "configurationStatus.{}".format(vca_index): dict(),
8043 }
8044 self.update_db_2("nsrs", nsr_id, db_dict)
8045
8046 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8047
8048 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8049 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8050 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8051
8052 # Launch task
8053 task_n2vc = asyncio.ensure_future(
8054 self.heal_N2VC(
8055 logging_text=logging_text,
8056 vca_index=vca_index,
8057 nsi_id=nsi_id,
8058 db_nsr=db_nsr,
8059 db_vnfr=db_vnfr,
8060 vdu_id=vdu_id,
8061 kdu_name=kdu_name,
8062 vdu_index=vdu_index,
8063 deploy_params=deploy_params,
8064 config_descriptor=descriptor_config,
8065 base_folder=base_folder,
8066 nslcmop_id=nslcmop_id,
8067 stage=stage,
8068 vca_type=vca_type,
8069 vca_name=vca_name,
8070 ee_config_descriptor=ee_item,
8071 )
8072 )
8073 self.lcm_tasks.register(
8074 "ns",
8075 nsr_id,
8076 nslcmop_id,
8077 "instantiate_N2VC-{}".format(vca_index),
8078 task_n2vc,
8079 )
8080 task_instantiation_info[
8081 task_n2vc
8082 ] = self.task_name_deploy_vca + " {}.{}".format(
8083 member_vnf_index or "", vdu_id or ""
8084 )
8085
8086 async def heal_N2VC(
8087 self,
8088 logging_text,
8089 vca_index,
8090 nsi_id,
8091 db_nsr,
8092 db_vnfr,
8093 vdu_id,
8094 kdu_name,
8095 vdu_index,
8096 config_descriptor,
8097 deploy_params,
8098 base_folder,
8099 nslcmop_id,
8100 stage,
8101 vca_type,
8102 vca_name,
8103 ee_config_descriptor,
8104 ):
8105 nsr_id = db_nsr["_id"]
8106 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8107 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8108 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8109 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8110 db_dict = {
8111 "collection": "nsrs",
8112 "filter": {"_id": nsr_id},
8113 "path": db_update_entry,
8114 }
8115 step = ""
8116 try:
8117
8118 element_type = "NS"
8119 element_under_configuration = nsr_id
8120
8121 vnfr_id = None
8122 if db_vnfr:
8123 vnfr_id = db_vnfr["_id"]
8124 osm_config["osm"]["vnf_id"] = vnfr_id
8125
8126 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8127
8128 if vca_type == "native_charm":
8129 index_number = 0
8130 else:
8131 index_number = vdu_index or 0
8132
8133 if vnfr_id:
8134 element_type = "VNF"
8135 element_under_configuration = vnfr_id
8136 namespace += ".{}-{}".format(vnfr_id, index_number)
8137 if vdu_id:
8138 namespace += ".{}-{}".format(vdu_id, index_number)
8139 element_type = "VDU"
8140 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8141 osm_config["osm"]["vdu_id"] = vdu_id
8142 elif kdu_name:
8143 namespace += ".{}".format(kdu_name)
8144 element_type = "KDU"
8145 element_under_configuration = kdu_name
8146 osm_config["osm"]["kdu_name"] = kdu_name
8147
8148 # Get artifact path
8149 if base_folder["pkg-dir"]:
8150 artifact_path = "{}/{}/{}/{}".format(
8151 base_folder["folder"],
8152 base_folder["pkg-dir"],
8153 "charms"
8154 if vca_type
8155 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8156 else "helm-charts",
8157 vca_name,
8158 )
8159 else:
8160 artifact_path = "{}/Scripts/{}/{}/".format(
8161 base_folder["folder"],
8162 "charms"
8163 if vca_type
8164 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8165 else "helm-charts",
8166 vca_name,
8167 )
8168
8169 self.logger.debug("Artifact path > {}".format(artifact_path))
8170
8171 # get initial_config_primitive_list that applies to this element
8172 initial_config_primitive_list = config_descriptor.get(
8173 "initial-config-primitive"
8174 )
8175
8176 self.logger.debug(
8177 "Initial config primitive list > {}".format(
8178 initial_config_primitive_list
8179 )
8180 )
8181
8182 # add config if not present for NS charm
8183 ee_descriptor_id = ee_config_descriptor.get("id")
8184 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8185 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8186 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8187 )
8188
8189 self.logger.debug(
8190 "Initial config primitive list #2 > {}".format(
8191 initial_config_primitive_list
8192 )
8193 )
8194 # n2vc_redesign STEP 3.1
8195 # find old ee_id if exists
8196 ee_id = vca_deployed.get("ee_id")
8197
8198 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8199 # create or register execution environment in VCA. Only for native charms when healing
8200 if vca_type == "native_charm":
8201 step = "Waiting to VM being up and getting IP address"
8202 self.logger.debug(logging_text + step)
8203 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8204 logging_text,
8205 nsr_id,
8206 vnfr_id,
8207 vdu_id,
8208 vdu_index,
8209 user=None,
8210 pub_key=None,
8211 )
8212 credentials = {"hostname": rw_mgmt_ip}
8213 # get username
8214 username = deep_get(
8215 config_descriptor, ("config-access", "ssh-access", "default-user")
8216 )
8217 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8218 # merged. Meanwhile let's get username from initial-config-primitive
8219 if not username and initial_config_primitive_list:
8220 for config_primitive in initial_config_primitive_list:
8221 for param in config_primitive.get("parameter", ()):
8222 if param["name"] == "ssh-username":
8223 username = param["value"]
8224 break
8225 if not username:
8226 raise LcmException(
8227 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8228 "'config-access.ssh-access.default-user'"
8229 )
8230 credentials["username"] = username
8231
8232 # n2vc_redesign STEP 3.2
8233 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8234 self._write_configuration_status(
8235 nsr_id=nsr_id,
8236 vca_index=vca_index,
8237 status="REGISTERING",
8238 element_under_configuration=element_under_configuration,
8239 element_type=element_type,
8240 )
8241
8242 step = "register execution environment {}".format(credentials)
8243 self.logger.debug(logging_text + step)
8244 ee_id = await self.vca_map[vca_type].register_execution_environment(
8245 credentials=credentials,
8246 namespace=namespace,
8247 db_dict=db_dict,
8248 vca_id=vca_id,
8249 )
8250
8251 # update ee_id en db
8252 db_dict_ee_id = {
8253 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8254 }
8255 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8256
8257 # for compatibility with MON/POL modules, the need model and application name at database
8258 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8259 # Not sure if this need to be done when healing
8260 """
8261 ee_id_parts = ee_id.split(".")
8262 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8263 if len(ee_id_parts) >= 2:
8264 model_name = ee_id_parts[0]
8265 application_name = ee_id_parts[1]
8266 db_nsr_update[db_update_entry + "model"] = model_name
8267 db_nsr_update[db_update_entry + "application"] = application_name
8268 """
8269
8270 # n2vc_redesign STEP 3.3
8271 # Install configuration software. Only for native charms.
8272 step = "Install configuration Software"
8273
8274 self._write_configuration_status(
8275 nsr_id=nsr_id,
8276 vca_index=vca_index,
8277 status="INSTALLING SW",
8278 element_under_configuration=element_under_configuration,
8279 element_type=element_type,
8280 # other_update=db_nsr_update,
8281 other_update=None,
8282 )
8283
8284 # TODO check if already done
8285 self.logger.debug(logging_text + step)
8286 config = None
8287 if vca_type == "native_charm":
8288 config_primitive = next(
8289 (p for p in initial_config_primitive_list if p["name"] == "config"),
8290 None,
8291 )
8292 if config_primitive:
8293 config = self._map_primitive_params(
8294 config_primitive, {}, deploy_params
8295 )
8296 await self.vca_map[vca_type].install_configuration_sw(
8297 ee_id=ee_id,
8298 artifact_path=artifact_path,
8299 db_dict=db_dict,
8300 config=config,
8301 num_units=1,
8302 vca_id=vca_id,
8303 vca_type=vca_type,
8304 )
8305
8306 # write in db flag of configuration_sw already installed
8307 self.update_db_2(
8308 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8309 )
8310
8311 # Not sure if this need to be done when healing
8312 """
8313 # add relations for this VCA (wait for other peers related with this VCA)
8314 await self._add_vca_relations(
8315 logging_text=logging_text,
8316 nsr_id=nsr_id,
8317 vca_type=vca_type,
8318 vca_index=vca_index,
8319 )
8320 """
8321
8322 # if SSH access is required, then get execution environment SSH public
8323 # if native charm we have waited already to VM be UP
8324 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8325 pub_key = None
8326 user = None
8327 # self.logger.debug("get ssh key block")
8328 if deep_get(
8329 config_descriptor, ("config-access", "ssh-access", "required")
8330 ):
8331 # self.logger.debug("ssh key needed")
8332 # Needed to inject a ssh key
8333 user = deep_get(
8334 config_descriptor,
8335 ("config-access", "ssh-access", "default-user"),
8336 )
8337 step = "Install configuration Software, getting public ssh key"
8338 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8339 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8340 )
8341
8342 step = "Insert public key into VM user={} ssh_key={}".format(
8343 user, pub_key
8344 )
8345 else:
8346 # self.logger.debug("no need to get ssh key")
8347 step = "Waiting to VM being up and getting IP address"
8348 self.logger.debug(logging_text + step)
8349
8350 # n2vc_redesign STEP 5.1
8351 # wait for RO (ip-address) Insert pub_key into VM
8352 # IMPORTANT: We need do wait for RO to complete healing operation.
8353 await self._wait_heal_ro(nsr_id, self.timeout_ns_heal)
8354 if vnfr_id:
8355 if kdu_name:
8356 rw_mgmt_ip = await self.wait_kdu_up(
8357 logging_text, nsr_id, vnfr_id, kdu_name
8358 )
8359 else:
8360 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8361 logging_text,
8362 nsr_id,
8363 vnfr_id,
8364 vdu_id,
8365 vdu_index,
8366 user=user,
8367 pub_key=pub_key,
8368 )
8369 else:
8370 rw_mgmt_ip = None # This is for a NS configuration
8371
8372 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8373
8374 # store rw_mgmt_ip in deploy params for later replacement
8375 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8376
8377 # Day1 operations.
8378 # get run-day1 operation parameter
8379 runDay1 = deploy_params.get("run-day1", False)
8380 self.logger.debug(
8381 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8382 )
8383 if runDay1:
8384 # n2vc_redesign STEP 6 Execute initial config primitive
8385 step = "execute initial config primitive"
8386
8387 # wait for dependent primitives execution (NS -> VNF -> VDU)
8388 if initial_config_primitive_list:
8389 await self._wait_dependent_n2vc(
8390 nsr_id, vca_deployed_list, vca_index
8391 )
8392
8393 # stage, in function of element type: vdu, kdu, vnf or ns
8394 my_vca = vca_deployed_list[vca_index]
8395 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8396 # VDU or KDU
8397 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8398 elif my_vca.get("member-vnf-index"):
8399 # VNF
8400 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8401 else:
8402 # NS
8403 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8404
8405 self._write_configuration_status(
8406 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8407 )
8408
8409 self._write_op_status(op_id=nslcmop_id, stage=stage)
8410
8411 check_if_terminated_needed = True
8412 for initial_config_primitive in initial_config_primitive_list:
8413 # adding information on the vca_deployed if it is a NS execution environment
8414 if not vca_deployed["member-vnf-index"]:
8415 deploy_params["ns_config_info"] = json.dumps(
8416 self._get_ns_config_info(nsr_id)
8417 )
8418 # TODO check if already done
8419 primitive_params_ = self._map_primitive_params(
8420 initial_config_primitive, {}, deploy_params
8421 )
8422
8423 step = "execute primitive '{}' params '{}'".format(
8424 initial_config_primitive["name"], primitive_params_
8425 )
8426 self.logger.debug(logging_text + step)
8427 await self.vca_map[vca_type].exec_primitive(
8428 ee_id=ee_id,
8429 primitive_name=initial_config_primitive["name"],
8430 params_dict=primitive_params_,
8431 db_dict=db_dict,
8432 vca_id=vca_id,
8433 vca_type=vca_type,
8434 )
8435 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8436 if check_if_terminated_needed:
8437 if config_descriptor.get("terminate-config-primitive"):
8438 self.update_db_2(
8439 "nsrs",
8440 nsr_id,
8441 {db_update_entry + "needed_terminate": True},
8442 )
8443 check_if_terminated_needed = False
8444
8445 # TODO register in database that primitive is done
8446
8447 # STEP 7 Configure metrics
8448 # Not sure if this need to be done when healing
8449 """
8450 if vca_type == "helm" or vca_type == "helm-v3":
8451 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8452 ee_id=ee_id,
8453 artifact_path=artifact_path,
8454 ee_config_descriptor=ee_config_descriptor,
8455 vnfr_id=vnfr_id,
8456 nsr_id=nsr_id,
8457 target_ip=rw_mgmt_ip,
8458 )
8459 if prometheus_jobs:
8460 self.update_db_2(
8461 "nsrs",
8462 nsr_id,
8463 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8464 )
8465
8466 for job in prometheus_jobs:
8467 self.db.set_one(
8468 "prometheus_jobs",
8469 {"job_name": job["job_name"]},
8470 job,
8471 upsert=True,
8472 fail_on_empty=False,
8473 )
8474
8475 """
8476 step = "instantiated at VCA"
8477 self.logger.debug(logging_text + step)
8478
8479 self._write_configuration_status(
8480 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8481 )
8482
8483 except Exception as e: # TODO not use Exception but N2VC exception
8484 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8485 if not isinstance(
8486 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8487 ):
8488 self.logger.error(
8489 "Exception while {} : {}".format(step, e), exc_info=True
8490 )
8491 self._write_configuration_status(
8492 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8493 )
8494 raise LcmException("{} {}".format(step, e)) from e
8495
8496 async def _wait_heal_ro(
8497 self,
8498 nsr_id,
8499 timeout=600,
8500 ):
8501 start_time = time()
8502 while time() <= start_time + timeout:
8503 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8504 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8505 "operational-status"
8506 ]
8507 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8508 if operational_status_ro != "healing":
8509 break
8510 await asyncio.sleep(15, loop=self.loop)
8511 else: # timeout_ns_deploy
8512 raise NgRoException("Timeout waiting ns to deploy")
8513
8514 async def vertical_scale(self, nsr_id, nslcmop_id):
8515 """
8516 Vertical Scale the VDUs in a NS
8517
8518 :param: nsr_id: NS Instance ID
8519 :param: nslcmop_id: nslcmop ID of migrate
8520
8521 """
8522 # Try to lock HA task here
8523 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8524 if not task_is_locked_by_me:
8525 return
8526 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8527 self.logger.debug(logging_text + "Enter")
8528 # get all needed from database
8529 db_nslcmop = None
8530 db_nslcmop_update = {}
8531 nslcmop_operation_state = None
8532 db_nsr_update = {}
8533 target = {}
8534 exc = None
8535 # in case of error, indicates what part of scale was failed to put nsr at error status
8536 start_deploy = time()
8537
8538 try:
8539 # wait for any previous tasks in process
8540 step = "Waiting for previous operations to terminate"
8541 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8542
8543 self._write_ns_status(
8544 nsr_id=nsr_id,
8545 ns_state=None,
8546 current_operation="VerticalScale",
8547 current_operation_id=nslcmop_id,
8548 )
8549 step = "Getting nslcmop from database"
8550 self.logger.debug(
8551 step + " after having waited for previous tasks to be completed"
8552 )
8553 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8554 operationParams = db_nslcmop.get("operationParams")
8555 target = {}
8556 target.update(operationParams)
8557 desc = await self.RO.vertical_scale(nsr_id, target)
8558 self.logger.debug("RO return > {}".format(desc))
8559 action_id = desc["action_id"]
8560 await self._wait_ng_ro(
8561 nsr_id,
8562 action_id,
8563 nslcmop_id,
8564 start_deploy,
8565 self.timeout_verticalscale,
8566 operation="verticalscale",
8567 )
8568 except (ROclient.ROClientException, DbException, LcmException) as e:
8569 self.logger.error("Exit Exception {}".format(e))
8570 exc = e
8571 except asyncio.CancelledError:
8572 self.logger.error("Cancelled Exception while '{}'".format(step))
8573 exc = "Operation was cancelled"
8574 except Exception as e:
8575 exc = traceback.format_exc()
8576 self.logger.critical(
8577 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8578 )
8579 finally:
8580 self._write_ns_status(
8581 nsr_id=nsr_id,
8582 ns_state=None,
8583 current_operation="IDLE",
8584 current_operation_id=None,
8585 )
8586 if exc:
8587 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8588 nslcmop_operation_state = "FAILED"
8589 else:
8590 nslcmop_operation_state = "COMPLETED"
8591 db_nslcmop_update["detailed-status"] = "Done"
8592 db_nsr_update["detailed-status"] = "Done"
8593
8594 self._write_op_status(
8595 op_id=nslcmop_id,
8596 stage="",
8597 error_message="",
8598 operation_state=nslcmop_operation_state,
8599 other_update=db_nslcmop_update,
8600 )
8601 if nslcmop_operation_state:
8602 try:
8603 msg = {
8604 "nsr_id": nsr_id,
8605 "nslcmop_id": nslcmop_id,
8606 "operationState": nslcmop_operation_state,
8607 }
8608 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8609 except Exception as e:
8610 self.logger.error(
8611 logging_text + "kafka_write notification Exception {}".format(e)
8612 )
8613 self.logger.debug(logging_text + "Exit")
8614 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")