blob: 46403489f61f51cff066269a18403be7ff26c160 [file] [log] [blame]
tierno59d22d22018-09-25 18:10:19 +02001# -*- coding: utf-8 -*-
2
tierno2e215512018-11-28 09:37:52 +00003##
4# Copyright 2018 Telefonica S.A.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17##
18
tierno59d22d22018-09-25 18:10:19 +020019import asyncio
aticigdffa6212022-04-12 15:27:53 +030020import shutil
David Garcia444bf962021-11-11 16:35:26 +010021from typing import Any, Dict, List
tierno59d22d22018-09-25 18:10:19 +020022import yaml
23import logging
24import logging.handlers
tierno59d22d22018-09-25 18:10:19 +020025import traceback
David Garciad4816682019-12-09 14:57:43 +010026import json
garciadeblas5697b8b2021-03-24 09:17:02 +010027from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33)
tierno59d22d22018-09-25 18:10:19 +020034
tierno77677d92019-08-22 13:46:35 +000035from osm_lcm import ROclient
David Garciab4ebcd02021-10-28 02:00:43 +020036from osm_lcm.data_utils.nsr import (
37 get_deployed_kdu,
38 get_deployed_vca,
39 get_deployed_vca_list,
40 get_nsd,
41)
42from osm_lcm.data_utils.vca import (
43 DeployedComponent,
44 DeployedK8sResource,
45 DeployedVCA,
46 EELevel,
47 Relation,
48 EERelation,
49 safe_get_ee_relation,
50)
tierno69f0d382020-05-07 13:08:09 +000051from osm_lcm.ng_ro import NgRoClient, NgRoException
garciadeblas5697b8b2021-03-24 09:17:02 +010052from osm_lcm.lcm_utils import (
53 LcmException,
54 LcmExceptionNoMgmtIP,
55 LcmBase,
56 deep_get,
57 get_iterable,
58 populate_dict,
aticigdffa6212022-04-12 15:27:53 +030059 check_juju_bundle_existence,
60 get_charm_artifact_path,
garciadeblas5697b8b2021-03-24 09:17:02 +010061)
David Garciab4ebcd02021-10-28 02:00:43 +020062from osm_lcm.data_utils.nsd import (
63 get_ns_configuration_relation_list,
64 get_vnf_profile,
65 get_vnf_profiles,
66)
garciadeblas5697b8b2021-03-24 09:17:02 +010067from osm_lcm.data_utils.vnfd import (
David Garcia78b6e6d2022-04-29 05:50:46 +020068 get_kdu,
69 get_kdu_services,
David Garciab4ebcd02021-10-28 02:00:43 +020070 get_relation_list,
garciadeblas5697b8b2021-03-24 09:17:02 +010071 get_vdu_list,
72 get_vdu_profile,
73 get_ee_sorted_initial_config_primitive_list,
74 get_ee_sorted_terminate_config_primitive_list,
75 get_kdu_list,
76 get_virtual_link_profiles,
77 get_vdu,
78 get_configuration,
79 get_vdu_index,
80 get_scaling_aspect,
81 get_number_of_instances,
82 get_juju_ee_ref,
David Garciab4ebcd02021-10-28 02:00:43 +020083 get_kdu_resource_profile,
aticigdffa6212022-04-12 15:27:53 +030084 find_software_version,
garciadeblas5697b8b2021-03-24 09:17:02 +010085)
bravof922c4172020-11-24 21:21:43 -030086from osm_lcm.data_utils.list_utils import find_in_list
aticig349aa462022-05-19 12:29:35 +030087from osm_lcm.data_utils.vnfr import (
88 get_osm_params,
89 get_vdur_index,
90 get_kdur,
91 get_volumes_from_instantiation_params,
92)
bravof922c4172020-11-24 21:21:43 -030093from osm_lcm.data_utils.dict_utils import parse_yaml_strings
94from osm_lcm.data_utils.database.vim_account import VimAccountDB
David Garciab4ebcd02021-10-28 02:00:43 +020095from n2vc.definitions import RelationEndpoint
calvinosanch9f9c6f22019-11-04 13:37:39 +010096from n2vc.k8s_helm_conn import K8sHelmConnector
lloretgalleg18ebc3a2020-10-22 09:54:51 +000097from n2vc.k8s_helm3_conn import K8sHelm3Connector
Adam Israelbaacc302019-12-01 12:41:39 -050098from n2vc.k8s_juju_conn import K8sJujuConnector
tierno59d22d22018-09-25 18:10:19 +020099
tierno27246d82018-09-27 15:59:09 +0200100from osm_common.dbbase import DbException
tierno59d22d22018-09-25 18:10:19 +0200101from osm_common.fsbase import FsException
quilesj7e13aeb2019-10-08 13:34:55 +0200102
bravof922c4172020-11-24 21:21:43 -0300103from osm_lcm.data_utils.database.database import Database
104from osm_lcm.data_utils.filesystem.filesystem import Filesystem
105
quilesj7e13aeb2019-10-08 13:34:55 +0200106from n2vc.n2vc_juju_conn import N2VCJujuConnector
tiernof59ad6c2020-04-08 12:50:52 +0000107from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
tierno59d22d22018-09-25 18:10:19 +0200108
tierno588547c2020-07-01 15:30:20 +0000109from osm_lcm.lcm_helm_conn import LCMHelmConn
David Garcia78b6e6d2022-04-29 05:50:46 +0200110from osm_lcm.osm_config import OsmConfigBuilder
bravof73bac502021-05-11 07:38:47 -0400111from osm_lcm.prometheus import parse_job
tierno588547c2020-07-01 15:30:20 +0000112
tierno27246d82018-09-27 15:59:09 +0200113from copy import copy, deepcopy
tierno59d22d22018-09-25 18:10:19 +0200114from time import time
tierno27246d82018-09-27 15:59:09 +0200115from uuid import uuid4
lloretgalleg7c121132020-07-08 07:53:22 +0000116
tiernob996d942020-07-03 14:52:28 +0000117from random import randint
tierno59d22d22018-09-25 18:10:19 +0200118
tierno69f0d382020-05-07 13:08:09 +0000119__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
tierno59d22d22018-09-25 18:10:19 +0200120
121
122class NsLcm(LcmBase):
garciadeblas5697b8b2021-03-24 09:17:02 +0100123 timeout_vca_on_error = (
124 5 * 60
125 ) # Time for charm from first time at blocked,error status to mark as failed
126 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
127 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
garciadeblas07f4e4c2022-06-09 09:42:58 +0200128 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
garciadeblasf9b04952019-04-09 18:53:58 +0200129 timeout_charm_delete = 10 * 60
David Garciaf6919842020-05-21 16:41:07 +0200130 timeout_primitive = 30 * 60 # timeout for primitive execution
aticigdffa6212022-04-12 15:27:53 +0300131 timeout_ns_update = 30 * 60 # timeout for ns update
garciadeblas5697b8b2021-03-24 09:17:02 +0100132 timeout_progress_primitive = (
133 10 * 60
134 ) # timeout for some progress in a primitive execution
elumalai80bcf1c2022-04-28 18:05:01 +0530135 timeout_migrate = 1800 # default global timeout for migrating vnfs
k4.rahulb827de92022-05-02 16:35:02 +0000136 timeout_operate = 1800 # default global timeout for migrating vnfs
govindarajul4ff4b512022-05-02 20:02:41 +0530137 timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
kuuseac3a8882019-10-03 10:48:06 +0200138 SUBOPERATION_STATUS_NOT_FOUND = -1
139 SUBOPERATION_STATUS_NEW = -2
140 SUBOPERATION_STATUS_SKIP = -3
tiernoa2143262020-03-27 16:20:40 +0000141 task_name_deploy_vca = "Deploying VCA"
kuuseac3a8882019-10-03 10:48:06 +0200142
bravof73bac502021-05-11 07:38:47 -0400143 def __init__(self, msg, lcm_tasks, config, loop):
tierno59d22d22018-09-25 18:10:19 +0200144 """
145 Init, Connect to database, filesystem storage, and messaging
146 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
147 :return: None
148 """
garciadeblas5697b8b2021-03-24 09:17:02 +0100149 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
quilesj7e13aeb2019-10-08 13:34:55 +0200150
bravof922c4172020-11-24 21:21:43 -0300151 self.db = Database().instance.db
152 self.fs = Filesystem().instance.fs
tierno59d22d22018-09-25 18:10:19 +0200153 self.loop = loop
154 self.lcm_tasks = lcm_tasks
tierno744303e2020-01-13 16:46:31 +0000155 self.timeout = config["timeout"]
156 self.ro_config = config["ro_config"]
tierno69f0d382020-05-07 13:08:09 +0000157 self.ng_ro = config["ro_config"].get("ng")
tierno744303e2020-01-13 16:46:31 +0000158 self.vca_config = config["VCA"].copy()
tierno59d22d22018-09-25 18:10:19 +0200159
quilesj7e13aeb2019-10-08 13:34:55 +0200160 # create N2VC connector
David Garciaaae391f2020-11-09 11:12:54 +0100161 self.n2vc = N2VCJujuConnector(
tierno59d22d22018-09-25 18:10:19 +0200162 log=self.logger,
quilesj7e13aeb2019-10-08 13:34:55 +0200163 loop=self.loop,
bravof922c4172020-11-24 21:21:43 -0300164 on_update_db=self._on_update_n2vc_db,
165 fs=self.fs,
garciadeblas5697b8b2021-03-24 09:17:02 +0100166 db=self.db,
tierno59d22d22018-09-25 18:10:19 +0200167 )
quilesj7e13aeb2019-10-08 13:34:55 +0200168
tierno588547c2020-07-01 15:30:20 +0000169 self.conn_helm_ee = LCMHelmConn(
tierno588547c2020-07-01 15:30:20 +0000170 log=self.logger,
171 loop=self.loop,
tierno588547c2020-07-01 15:30:20 +0000172 vca_config=self.vca_config,
garciadeblas5697b8b2021-03-24 09:17:02 +0100173 on_update_db=self._on_update_n2vc_db,
tierno588547c2020-07-01 15:30:20 +0000174 )
175
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000176 self.k8sclusterhelm2 = K8sHelmConnector(
calvinosanch9f9c6f22019-11-04 13:37:39 +0100177 kubectl_command=self.vca_config.get("kubectlpath"),
178 helm_command=self.vca_config.get("helmpath"),
calvinosanch9f9c6f22019-11-04 13:37:39 +0100179 log=self.logger,
calvinosanch9f9c6f22019-11-04 13:37:39 +0100180 on_update_db=None,
bravof922c4172020-11-24 21:21:43 -0300181 fs=self.fs,
garciadeblas5697b8b2021-03-24 09:17:02 +0100182 db=self.db,
calvinosanch9f9c6f22019-11-04 13:37:39 +0100183 )
184
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000185 self.k8sclusterhelm3 = K8sHelm3Connector(
186 kubectl_command=self.vca_config.get("kubectlpath"),
187 helm_command=self.vca_config.get("helm3path"),
188 fs=self.fs,
189 log=self.logger,
190 db=self.db,
191 on_update_db=None,
192 )
193
Adam Israelbaacc302019-12-01 12:41:39 -0500194 self.k8sclusterjuju = K8sJujuConnector(
195 kubectl_command=self.vca_config.get("kubectlpath"),
196 juju_command=self.vca_config.get("jujupath"),
Adam Israelbaacc302019-12-01 12:41:39 -0500197 log=self.logger,
David Garciaba89cbb2020-10-16 13:05:34 +0200198 loop=self.loop,
ksaikiranr656b6dd2021-02-19 10:25:18 +0530199 on_update_db=self._on_update_k8s_db,
bravof922c4172020-11-24 21:21:43 -0300200 fs=self.fs,
garciadeblas5697b8b2021-03-24 09:17:02 +0100201 db=self.db,
Adam Israelbaacc302019-12-01 12:41:39 -0500202 )
203
tiernoa2143262020-03-27 16:20:40 +0000204 self.k8scluster_map = {
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000205 "helm-chart": self.k8sclusterhelm2,
206 "helm-chart-v3": self.k8sclusterhelm3,
207 "chart": self.k8sclusterhelm3,
tiernoa2143262020-03-27 16:20:40 +0000208 "juju-bundle": self.k8sclusterjuju,
209 "juju": self.k8sclusterjuju,
210 }
tierno588547c2020-07-01 15:30:20 +0000211
212 self.vca_map = {
213 "lxc_proxy_charm": self.n2vc,
214 "native_charm": self.n2vc,
215 "k8s_proxy_charm": self.n2vc,
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000216 "helm": self.conn_helm_ee,
garciadeblas5697b8b2021-03-24 09:17:02 +0100217 "helm-v3": self.conn_helm_ee,
tierno588547c2020-07-01 15:30:20 +0000218 }
219
quilesj7e13aeb2019-10-08 13:34:55 +0200220 # create RO client
bravof922c4172020-11-24 21:21:43 -0300221 self.RO = NgRoClient(self.loop, **self.ro_config)
tierno59d22d22018-09-25 18:10:19 +0200222
garciadeblas07f4e4c2022-06-09 09:42:58 +0200223 self.op_status_map = {
224 "instantiation": self.RO.status,
225 "termination": self.RO.status,
226 "migrate": self.RO.status,
227 "healing": self.RO.recreate_status,
govindarajul12794ee2022-07-06 10:47:00 +0000228 "verticalscale": self.RO.status,
k4.rahul08cc70b2022-07-07 07:23:53 +0000229 "start_stop_rebuild": self.RO.status,
garciadeblas07f4e4c2022-06-09 09:42:58 +0200230 }
231
tierno2357f4e2020-10-19 16:38:59 +0000232 @staticmethod
233 def increment_ip_mac(ip_mac, vm_index=1):
234 if not isinstance(ip_mac, str):
235 return ip_mac
236 try:
237 # try with ipv4 look for last dot
238 i = ip_mac.rfind(".")
239 if i > 0:
240 i += 1
241 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
242 # try with ipv6 or mac look for last colon. Operate in hex
243 i = ip_mac.rfind(":")
244 if i > 0:
245 i += 1
246 # format in hex, len can be 2 for mac or 4 for ipv6
garciadeblas5697b8b2021-03-24 09:17:02 +0100247 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
248 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
249 )
tierno2357f4e2020-10-19 16:38:59 +0000250 except Exception:
251 pass
252 return None
253
quilesj3655ae02019-12-12 16:08:35 +0000254 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
quilesj7e13aeb2019-10-08 13:34:55 +0200255
quilesj3655ae02019-12-12 16:08:35 +0000256 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
257
258 try:
259 # TODO filter RO descriptor fields...
260
261 # write to database
262 db_dict = dict()
263 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
garciadeblas5697b8b2021-03-24 09:17:02 +0100264 db_dict["deploymentStatus"] = ro_descriptor
quilesj3655ae02019-12-12 16:08:35 +0000265 self.update_db_2("nsrs", nsrs_id, db_dict)
266
267 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100268 self.logger.warn(
269 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
270 )
quilesj3655ae02019-12-12 16:08:35 +0000271
David Garciac1fe90a2021-03-31 19:12:02 +0200272 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
quilesj3655ae02019-12-12 16:08:35 +0000273
quilesj69a722c2020-01-09 08:30:17 +0000274 # remove last dot from path (if exists)
garciadeblas5697b8b2021-03-24 09:17:02 +0100275 if path.endswith("."):
quilesj69a722c2020-01-09 08:30:17 +0000276 path = path[:-1]
277
quilesj3655ae02019-12-12 16:08:35 +0000278 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
279 # .format(table, filter, path, updated_data))
quilesj3655ae02019-12-12 16:08:35 +0000280 try:
281
garciadeblas5697b8b2021-03-24 09:17:02 +0100282 nsr_id = filter.get("_id")
quilesj3655ae02019-12-12 16:08:35 +0000283
284 # read ns record from database
garciadeblas5697b8b2021-03-24 09:17:02 +0100285 nsr = self.db.get_one(table="nsrs", q_filter=filter)
286 current_ns_status = nsr.get("nsState")
quilesj3655ae02019-12-12 16:08:35 +0000287
288 # get vca status for NS
garciadeblas5697b8b2021-03-24 09:17:02 +0100289 status_dict = await self.n2vc.get_status(
290 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
291 )
quilesj3655ae02019-12-12 16:08:35 +0000292
293 # vcaStatus
294 db_dict = dict()
garciadeblas5697b8b2021-03-24 09:17:02 +0100295 db_dict["vcaStatus"] = status_dict
quilesj3655ae02019-12-12 16:08:35 +0000296
297 # update configurationStatus for this VCA
298 try:
garciadeblas5697b8b2021-03-24 09:17:02 +0100299 vca_index = int(path[path.rfind(".") + 1 :])
quilesj3655ae02019-12-12 16:08:35 +0000300
garciadeblas5697b8b2021-03-24 09:17:02 +0100301 vca_list = deep_get(
302 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
303 )
304 vca_status = vca_list[vca_index].get("status")
quilesj3655ae02019-12-12 16:08:35 +0000305
garciadeblas5697b8b2021-03-24 09:17:02 +0100306 configuration_status_list = nsr.get("configurationStatus")
307 config_status = configuration_status_list[vca_index].get("status")
quilesj3655ae02019-12-12 16:08:35 +0000308
garciadeblas5697b8b2021-03-24 09:17:02 +0100309 if config_status == "BROKEN" and vca_status != "failed":
310 db_dict["configurationStatus"][vca_index] = "READY"
311 elif config_status != "BROKEN" and vca_status == "failed":
312 db_dict["configurationStatus"][vca_index] = "BROKEN"
quilesj3655ae02019-12-12 16:08:35 +0000313 except Exception as e:
314 # not update configurationStatus
garciadeblas5697b8b2021-03-24 09:17:02 +0100315 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
quilesj3655ae02019-12-12 16:08:35 +0000316
317 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
318 # if nsState = 'DEGRADED' check if all is OK
319 is_degraded = False
garciadeblas5697b8b2021-03-24 09:17:02 +0100320 if current_ns_status in ("READY", "DEGRADED"):
321 error_description = ""
quilesj3655ae02019-12-12 16:08:35 +0000322 # check machines
garciadeblas5697b8b2021-03-24 09:17:02 +0100323 if status_dict.get("machines"):
324 for machine_id in status_dict.get("machines"):
325 machine = status_dict.get("machines").get(machine_id)
quilesj3655ae02019-12-12 16:08:35 +0000326 # check machine agent-status
garciadeblas5697b8b2021-03-24 09:17:02 +0100327 if machine.get("agent-status"):
328 s = machine.get("agent-status").get("status")
329 if s != "started":
quilesj3655ae02019-12-12 16:08:35 +0000330 is_degraded = True
garciadeblas5697b8b2021-03-24 09:17:02 +0100331 error_description += (
332 "machine {} agent-status={} ; ".format(
333 machine_id, s
334 )
335 )
quilesj3655ae02019-12-12 16:08:35 +0000336 # check machine instance status
garciadeblas5697b8b2021-03-24 09:17:02 +0100337 if machine.get("instance-status"):
338 s = machine.get("instance-status").get("status")
339 if s != "running":
quilesj3655ae02019-12-12 16:08:35 +0000340 is_degraded = True
garciadeblas5697b8b2021-03-24 09:17:02 +0100341 error_description += (
342 "machine {} instance-status={} ; ".format(
343 machine_id, s
344 )
345 )
quilesj3655ae02019-12-12 16:08:35 +0000346 # check applications
garciadeblas5697b8b2021-03-24 09:17:02 +0100347 if status_dict.get("applications"):
348 for app_id in status_dict.get("applications"):
349 app = status_dict.get("applications").get(app_id)
quilesj3655ae02019-12-12 16:08:35 +0000350 # check application status
garciadeblas5697b8b2021-03-24 09:17:02 +0100351 if app.get("status"):
352 s = app.get("status").get("status")
353 if s != "active":
quilesj3655ae02019-12-12 16:08:35 +0000354 is_degraded = True
garciadeblas5697b8b2021-03-24 09:17:02 +0100355 error_description += (
356 "application {} status={} ; ".format(app_id, s)
357 )
quilesj3655ae02019-12-12 16:08:35 +0000358
359 if error_description:
garciadeblas5697b8b2021-03-24 09:17:02 +0100360 db_dict["errorDescription"] = error_description
361 if current_ns_status == "READY" and is_degraded:
362 db_dict["nsState"] = "DEGRADED"
363 if current_ns_status == "DEGRADED" and not is_degraded:
364 db_dict["nsState"] = "READY"
quilesj3655ae02019-12-12 16:08:35 +0000365
366 # write to database
367 self.update_db_2("nsrs", nsr_id, db_dict)
368
tierno51183952020-04-03 15:48:18 +0000369 except (asyncio.CancelledError, asyncio.TimeoutError):
370 raise
quilesj3655ae02019-12-12 16:08:35 +0000371 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100372 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +0200373
garciadeblas5697b8b2021-03-24 09:17:02 +0100374 async def _on_update_k8s_db(
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100375 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
garciadeblas5697b8b2021-03-24 09:17:02 +0100376 ):
ksaikiranr656b6dd2021-02-19 10:25:18 +0530377 """
378 Updating vca status in NSR record
379 :param cluster_uuid: UUID of a k8s cluster
380 :param kdu_instance: The unique name of the KDU instance
381 :param filter: To get nsr_id
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100382 :cluster_type: The cluster type (juju, k8s)
ksaikiranr656b6dd2021-02-19 10:25:18 +0530383 :return: none
384 """
385
386 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
387 # .format(cluster_uuid, kdu_instance, filter))
388
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100389 nsr_id = filter.get("_id")
ksaikiranr656b6dd2021-02-19 10:25:18 +0530390 try:
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100391 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
392 cluster_uuid=cluster_uuid,
393 kdu_instance=kdu_instance,
David Garciac1fe90a2021-03-31 19:12:02 +0200394 yaml_format=False,
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100395 complete_status=True,
David Garciac1fe90a2021-03-31 19:12:02 +0200396 vca_id=vca_id,
397 )
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100398
ksaikiranr656b6dd2021-02-19 10:25:18 +0530399 # vcaStatus
400 db_dict = dict()
garciadeblas5697b8b2021-03-24 09:17:02 +0100401 db_dict["vcaStatus"] = {nsr_id: vca_status}
ksaikiranr656b6dd2021-02-19 10:25:18 +0530402
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100403 self.logger.debug(
404 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
David Garciac1fe90a2021-03-31 19:12:02 +0200405 )
ksaikiranr656b6dd2021-02-19 10:25:18 +0530406
407 # write to database
408 self.update_db_2("nsrs", nsr_id, db_dict)
ksaikiranr656b6dd2021-02-19 10:25:18 +0530409 except (asyncio.CancelledError, asyncio.TimeoutError):
410 raise
411 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100412 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
ksaikiranr656b6dd2021-02-19 10:25:18 +0530413
tierno72ef84f2020-10-06 08:22:07 +0000414 @staticmethod
415 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
416 try:
Luisccdc2162022-07-01 14:35:49 +0000417 env = Environment(undefined=StrictUndefined, autoescape=True)
tierno72ef84f2020-10-06 08:22:07 +0000418 template = env.from_string(cloud_init_text)
419 return template.render(additional_params or {})
420 except UndefinedError as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100421 raise LcmException(
422 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
423 "file, must be provided in the instantiation parameters inside the "
424 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
425 )
tierno72ef84f2020-10-06 08:22:07 +0000426 except (TemplateError, TemplateNotFound) as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100427 raise LcmException(
428 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
429 vnfd_id, vdu_id, e
430 )
431 )
tierno72ef84f2020-10-06 08:22:07 +0000432
bravof922c4172020-11-24 21:21:43 -0300433 def _get_vdu_cloud_init_content(self, vdu, vnfd):
434 cloud_init_content = cloud_init_file = None
tierno72ef84f2020-10-06 08:22:07 +0000435 try:
tierno72ef84f2020-10-06 08:22:07 +0000436 if vdu.get("cloud-init-file"):
437 base_folder = vnfd["_admin"]["storage"]
bravof486707f2021-11-08 17:18:50 -0300438 if base_folder["pkg-dir"]:
439 cloud_init_file = "{}/{}/cloud_init/{}".format(
440 base_folder["folder"],
441 base_folder["pkg-dir"],
442 vdu["cloud-init-file"],
443 )
444 else:
445 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
446 base_folder["folder"],
447 vdu["cloud-init-file"],
448 )
tierno72ef84f2020-10-06 08:22:07 +0000449 with self.fs.file_open(cloud_init_file, "r") as ci_file:
450 cloud_init_content = ci_file.read()
451 elif vdu.get("cloud-init"):
452 cloud_init_content = vdu["cloud-init"]
453
454 return cloud_init_content
455 except FsException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100456 raise LcmException(
457 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
458 vnfd["id"], vdu["id"], cloud_init_file, e
459 )
460 )
tierno72ef84f2020-10-06 08:22:07 +0000461
tierno72ef84f2020-10-06 08:22:07 +0000462 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
garciadeblas5697b8b2021-03-24 09:17:02 +0100463 vdur = next(
aticig349aa462022-05-19 12:29:35 +0300464 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
garciadeblas5697b8b2021-03-24 09:17:02 +0100465 )
tierno72ef84f2020-10-06 08:22:07 +0000466 additional_params = vdur.get("additionalParams")
bravof922c4172020-11-24 21:21:43 -0300467 return parse_yaml_strings(additional_params)
tierno72ef84f2020-10-06 08:22:07 +0000468
gcalvino35be9152018-12-20 09:33:12 +0100469 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
tierno59d22d22018-09-25 18:10:19 +0200470 """
471 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
472 :param vnfd: input vnfd
473 :param new_id: overrides vnf id if provided
tierno8a518872018-12-21 13:42:14 +0000474 :param additionalParams: Instantiation params for VNFs provided
gcalvino35be9152018-12-20 09:33:12 +0100475 :param nsrId: Id of the NSR
tierno59d22d22018-09-25 18:10:19 +0200476 :return: copy of vnfd
477 """
tierno72ef84f2020-10-06 08:22:07 +0000478 vnfd_RO = deepcopy(vnfd)
479 # remove unused by RO configuration, monitoring, scaling and internal keys
480 vnfd_RO.pop("_id", None)
481 vnfd_RO.pop("_admin", None)
tierno72ef84f2020-10-06 08:22:07 +0000482 vnfd_RO.pop("monitoring-param", None)
483 vnfd_RO.pop("scaling-group-descriptor", None)
484 vnfd_RO.pop("kdu", None)
485 vnfd_RO.pop("k8s-cluster", None)
486 if new_id:
487 vnfd_RO["id"] = new_id
tierno8a518872018-12-21 13:42:14 +0000488
tierno72ef84f2020-10-06 08:22:07 +0000489 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
490 for vdu in get_iterable(vnfd_RO, "vdu"):
491 vdu.pop("cloud-init-file", None)
492 vdu.pop("cloud-init", None)
493 return vnfd_RO
tierno59d22d22018-09-25 18:10:19 +0200494
tierno2357f4e2020-10-19 16:38:59 +0000495 @staticmethod
496 def ip_profile_2_RO(ip_profile):
497 RO_ip_profile = deepcopy(ip_profile)
498 if "dns-server" in RO_ip_profile:
499 if isinstance(RO_ip_profile["dns-server"], list):
500 RO_ip_profile["dns-address"] = []
501 for ds in RO_ip_profile.pop("dns-server"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100502 RO_ip_profile["dns-address"].append(ds["address"])
tierno2357f4e2020-10-19 16:38:59 +0000503 else:
504 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
505 if RO_ip_profile.get("ip-version") == "ipv4":
506 RO_ip_profile["ip-version"] = "IPv4"
507 if RO_ip_profile.get("ip-version") == "ipv6":
508 RO_ip_profile["ip-version"] = "IPv6"
509 if "dhcp-params" in RO_ip_profile:
510 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
511 return RO_ip_profile
512
bravof922c4172020-11-24 21:21:43 -0300513 def _get_ro_vim_id_for_vim_account(self, vim_account):
514 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
515 if db_vim["_admin"]["operationalState"] != "ENABLED":
garciadeblas5697b8b2021-03-24 09:17:02 +0100516 raise LcmException(
517 "VIM={} is not available. operationalState={}".format(
518 vim_account, db_vim["_admin"]["operationalState"]
519 )
520 )
bravof922c4172020-11-24 21:21:43 -0300521 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
522 return RO_vim_id
tierno59d22d22018-09-25 18:10:19 +0200523
bravof922c4172020-11-24 21:21:43 -0300524 def get_ro_wim_id_for_wim_account(self, wim_account):
525 if isinstance(wim_account, str):
526 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
527 if db_wim["_admin"]["operationalState"] != "ENABLED":
garciadeblas5697b8b2021-03-24 09:17:02 +0100528 raise LcmException(
529 "WIM={} is not available. operationalState={}".format(
530 wim_account, db_wim["_admin"]["operationalState"]
531 )
532 )
bravof922c4172020-11-24 21:21:43 -0300533 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
534 return RO_wim_id
535 else:
536 return wim_account
tierno59d22d22018-09-25 18:10:19 +0200537
tierno2357f4e2020-10-19 16:38:59 +0000538 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
tierno27246d82018-09-27 15:59:09 +0200539
tierno2357f4e2020-10-19 16:38:59 +0000540 db_vdu_push_list = []
vegall8d625f12022-03-22 16:23:30 +0000541 template_vdur = []
tierno2357f4e2020-10-19 16:38:59 +0000542 db_update = {"_admin.modified": time()}
543 if vdu_create:
544 for vdu_id, vdu_count in vdu_create.items():
garciadeblas5697b8b2021-03-24 09:17:02 +0100545 vdur = next(
546 (
547 vdur
548 for vdur in reversed(db_vnfr["vdur"])
549 if vdur["vdu-id-ref"] == vdu_id
550 ),
551 None,
552 )
tierno2357f4e2020-10-19 16:38:59 +0000553 if not vdur:
vegall8d625f12022-03-22 16:23:30 +0000554 # Read the template saved in the db:
aticig349aa462022-05-19 12:29:35 +0300555 self.logger.debug(
556 "No vdur in the database. Using the vdur-template to scale"
557 )
vegall8d625f12022-03-22 16:23:30 +0000558 vdur_template = db_vnfr.get("vdur-template")
559 if not vdur_template:
560 raise LcmException(
aticig349aa462022-05-19 12:29:35 +0300561 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
562 vdu_id
vegall8d625f12022-03-22 16:23:30 +0000563 )
garciadeblas5697b8b2021-03-24 09:17:02 +0100564 )
vegall8d625f12022-03-22 16:23:30 +0000565 vdur = vdur_template[0]
aticig349aa462022-05-19 12:29:35 +0300566 # Delete a template from the database after using it
567 self.db.set_one(
568 "vnfrs",
569 {"_id": db_vnfr["_id"]},
570 None,
571 pull={"vdur-template": {"_id": vdur["_id"]}},
572 )
tierno2357f4e2020-10-19 16:38:59 +0000573 for count in range(vdu_count):
574 vdur_copy = deepcopy(vdur)
575 vdur_copy["status"] = "BUILD"
576 vdur_copy["status-detailed"] = None
Guillermo Calvino57c68152022-01-26 17:40:31 +0100577 vdur_copy["ip-address"] = None
tierno683eb392020-09-25 12:33:15 +0000578 vdur_copy["_id"] = str(uuid4())
tierno2357f4e2020-10-19 16:38:59 +0000579 vdur_copy["count-index"] += count + 1
garciadeblas5697b8b2021-03-24 09:17:02 +0100580 vdur_copy["id"] = "{}-{}".format(
581 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
582 )
tierno2357f4e2020-10-19 16:38:59 +0000583 vdur_copy.pop("vim_info", None)
584 for iface in vdur_copy["interfaces"]:
585 if iface.get("fixed-ip"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100586 iface["ip-address"] = self.increment_ip_mac(
587 iface["ip-address"], count + 1
588 )
tierno2357f4e2020-10-19 16:38:59 +0000589 else:
590 iface.pop("ip-address", None)
591 if iface.get("fixed-mac"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100592 iface["mac-address"] = self.increment_ip_mac(
593 iface["mac-address"], count + 1
594 )
tierno2357f4e2020-10-19 16:38:59 +0000595 else:
596 iface.pop("mac-address", None)
vegall8d625f12022-03-22 16:23:30 +0000597 if db_vnfr["vdur"]:
598 iface.pop(
599 "mgmt_vnf", None
600 ) # only first vdu can be managment of vnf
tierno2357f4e2020-10-19 16:38:59 +0000601 db_vdu_push_list.append(vdur_copy)
602 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
tierno27246d82018-09-27 15:59:09 +0200603 if vdu_delete:
vegall8d625f12022-03-22 16:23:30 +0000604 if len(db_vnfr["vdur"]) == 1:
605 # The scale will move to 0 instances
aticig349aa462022-05-19 12:29:35 +0300606 self.logger.debug(
607 "Scaling to 0 !, creating the template with the last vdur"
608 )
vegall8d625f12022-03-22 16:23:30 +0000609 template_vdur = [db_vnfr["vdur"][0]]
tierno2357f4e2020-10-19 16:38:59 +0000610 for vdu_id, vdu_count in vdu_delete.items():
611 if mark_delete:
garciadeblas5697b8b2021-03-24 09:17:02 +0100612 indexes_to_delete = [
613 iv[0]
614 for iv in enumerate(db_vnfr["vdur"])
615 if iv[1]["vdu-id-ref"] == vdu_id
616 ]
617 db_update.update(
618 {
619 "vdur.{}.status".format(i): "DELETING"
620 for i in indexes_to_delete[-vdu_count:]
621 }
622 )
tierno2357f4e2020-10-19 16:38:59 +0000623 else:
624 # it must be deleted one by one because common.db does not allow otherwise
garciadeblas5697b8b2021-03-24 09:17:02 +0100625 vdus_to_delete = [
626 v
627 for v in reversed(db_vnfr["vdur"])
628 if v["vdu-id-ref"] == vdu_id
629 ]
tierno2357f4e2020-10-19 16:38:59 +0000630 for vdu in vdus_to_delete[:vdu_count]:
garciadeblas5697b8b2021-03-24 09:17:02 +0100631 self.db.set_one(
632 "vnfrs",
633 {"_id": db_vnfr["_id"]},
634 None,
635 pull={"vdur": {"_id": vdu["_id"]}},
636 )
vegall8d625f12022-03-22 16:23:30 +0000637 db_push = {}
638 if db_vdu_push_list:
639 db_push["vdur"] = db_vdu_push_list
640 if template_vdur:
641 db_push["vdur-template"] = template_vdur
642 if not db_push:
643 db_push = None
644 db_vnfr["vdur-template"] = template_vdur
tierno2357f4e2020-10-19 16:38:59 +0000645 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
646 # modify passed dictionary db_vnfr
647 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
648 db_vnfr["vdur"] = db_vnfr_["vdur"]
tierno27246d82018-09-27 15:59:09 +0200649
tiernof578e552018-11-08 19:07:20 +0100650 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
651 """
652 Updates database nsr with the RO info for the created vld
653 :param ns_update_nsr: dictionary to be filled with the updated info
654 :param db_nsr: content of db_nsr. This is also modified
655 :param nsr_desc_RO: nsr descriptor from RO
656 :return: Nothing, LcmException is raised on errors
657 """
658
659 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
660 for net_RO in get_iterable(nsr_desc_RO, "nets"):
661 if vld["id"] != net_RO.get("ns_net_osm_id"):
662 continue
663 vld["vim-id"] = net_RO.get("vim_net_id")
664 vld["name"] = net_RO.get("vim_name")
665 vld["status"] = net_RO.get("status")
666 vld["status-detailed"] = net_RO.get("error_msg")
667 ns_update_nsr["vld.{}".format(vld_index)] = vld
668 break
669 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100670 raise LcmException(
671 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
672 )
tiernof578e552018-11-08 19:07:20 +0100673
tiernoe876f672020-02-13 14:34:48 +0000674 def set_vnfr_at_error(self, db_vnfrs, error_text):
675 try:
676 for db_vnfr in db_vnfrs.values():
677 vnfr_update = {"status": "ERROR"}
678 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
679 if "status" not in vdur:
680 vdur["status"] = "ERROR"
681 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
682 if error_text:
683 vdur["status-detailed"] = str(error_text)
garciadeblas5697b8b2021-03-24 09:17:02 +0100684 vnfr_update[
685 "vdur.{}.status-detailed".format(vdu_index)
686 ] = "ERROR"
tiernoe876f672020-02-13 14:34:48 +0000687 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
688 except DbException as e:
689 self.logger.error("Cannot update vnf. {}".format(e))
690
tierno59d22d22018-09-25 18:10:19 +0200691 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
692 """
693 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
tierno27246d82018-09-27 15:59:09 +0200694 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
695 :param nsr_desc_RO: nsr descriptor from RO
696 :return: Nothing, LcmException is raised on errors
tierno59d22d22018-09-25 18:10:19 +0200697 """
698 for vnf_index, db_vnfr in db_vnfrs.items():
699 for vnf_RO in nsr_desc_RO["vnfs"]:
tierno27246d82018-09-27 15:59:09 +0200700 if vnf_RO["member_vnf_index"] != vnf_index:
701 continue
702 vnfr_update = {}
tiernof578e552018-11-08 19:07:20 +0100703 if vnf_RO.get("ip_address"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100704 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
705 "ip_address"
706 ].split(";")[0]
tiernof578e552018-11-08 19:07:20 +0100707 elif not db_vnfr.get("ip-address"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100708 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
709 raise LcmExceptionNoMgmtIP(
710 "ns member_vnf_index '{}' has no IP address".format(
711 vnf_index
712 )
713 )
tierno59d22d22018-09-25 18:10:19 +0200714
tierno27246d82018-09-27 15:59:09 +0200715 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
716 vdur_RO_count_index = 0
717 if vdur.get("pdu-type"):
718 continue
719 for vdur_RO in get_iterable(vnf_RO, "vms"):
720 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
721 continue
722 if vdur["count-index"] != vdur_RO_count_index:
723 vdur_RO_count_index += 1
724 continue
725 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
tierno1674de82019-04-09 13:03:14 +0000726 if vdur_RO.get("ip_address"):
727 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
tierno274ed572019-04-04 13:33:27 +0000728 else:
729 vdur["ip-address"] = None
tierno27246d82018-09-27 15:59:09 +0200730 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
731 vdur["name"] = vdur_RO.get("vim_name")
732 vdur["status"] = vdur_RO.get("status")
733 vdur["status-detailed"] = vdur_RO.get("error_msg")
734 for ifacer in get_iterable(vdur, "interfaces"):
735 for interface_RO in get_iterable(vdur_RO, "interfaces"):
736 if ifacer["name"] == interface_RO.get("internal_name"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100737 ifacer["ip-address"] = interface_RO.get(
738 "ip_address"
739 )
740 ifacer["mac-address"] = interface_RO.get(
741 "mac_address"
742 )
tierno27246d82018-09-27 15:59:09 +0200743 break
744 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100745 raise LcmException(
746 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
747 "from VIM info".format(
748 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
749 )
750 )
tierno27246d82018-09-27 15:59:09 +0200751 vnfr_update["vdur.{}".format(vdu_index)] = vdur
752 break
753 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100754 raise LcmException(
755 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
756 "VIM info".format(
757 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
758 )
759 )
tiernof578e552018-11-08 19:07:20 +0100760
761 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
762 for net_RO in get_iterable(nsr_desc_RO, "nets"):
763 if vld["id"] != net_RO.get("vnf_net_osm_id"):
764 continue
765 vld["vim-id"] = net_RO.get("vim_net_id")
766 vld["name"] = net_RO.get("vim_name")
767 vld["status"] = net_RO.get("status")
768 vld["status-detailed"] = net_RO.get("error_msg")
769 vnfr_update["vld.{}".format(vld_index)] = vld
770 break
771 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100772 raise LcmException(
773 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
774 vnf_index, vld["id"]
775 )
776 )
tiernof578e552018-11-08 19:07:20 +0100777
tierno27246d82018-09-27 15:59:09 +0200778 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
779 break
tierno59d22d22018-09-25 18:10:19 +0200780
781 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100782 raise LcmException(
783 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
784 vnf_index
785 )
786 )
tierno59d22d22018-09-25 18:10:19 +0200787
tierno5ee02052019-12-05 19:55:02 +0000788 def _get_ns_config_info(self, nsr_id):
tiernoc3f2a822019-11-05 13:45:04 +0000789 """
790 Generates a mapping between vnf,vdu elements and the N2VC id
tierno5ee02052019-12-05 19:55:02 +0000791 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
tiernoc3f2a822019-11-05 13:45:04 +0000792 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
793 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
794 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
795 """
tierno5ee02052019-12-05 19:55:02 +0000796 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
797 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernoc3f2a822019-11-05 13:45:04 +0000798 mapping = {}
799 ns_config_info = {"osm-config-mapping": mapping}
800 for vca in vca_deployed_list:
801 if not vca["member-vnf-index"]:
802 continue
803 if not vca["vdu_id"]:
804 mapping[vca["member-vnf-index"]] = vca["application"]
805 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100806 mapping[
807 "{}.{}.{}".format(
808 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
809 )
810 ] = vca["application"]
tiernoc3f2a822019-11-05 13:45:04 +0000811 return ns_config_info
812
garciadeblas5697b8b2021-03-24 09:17:02 +0100813 async def _instantiate_ng_ro(
814 self,
815 logging_text,
816 nsr_id,
817 nsd,
818 db_nsr,
819 db_nslcmop,
820 db_vnfrs,
821 db_vnfds,
822 n2vc_key_list,
823 stage,
824 start_deploy,
825 timeout_ns_deploy,
826 ):
tierno2357f4e2020-10-19 16:38:59 +0000827
828 db_vims = {}
829
830 def get_vim_account(vim_account_id):
831 nonlocal db_vims
832 if vim_account_id in db_vims:
833 return db_vims[vim_account_id]
834 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
835 db_vims[vim_account_id] = db_vim
836 return db_vim
837
838 # modify target_vld info with instantiation parameters
garciadeblas5697b8b2021-03-24 09:17:02 +0100839 def parse_vld_instantiation_params(
840 target_vim, target_vld, vld_params, target_sdn
841 ):
tierno2357f4e2020-10-19 16:38:59 +0000842 if vld_params.get("ip-profile"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100843 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
844 "ip-profile"
845 ]
tierno2357f4e2020-10-19 16:38:59 +0000846 if vld_params.get("provider-network"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100847 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
848 "provider-network"
849 ]
tierno2357f4e2020-10-19 16:38:59 +0000850 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
garciadeblas5697b8b2021-03-24 09:17:02 +0100851 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
852 "provider-network"
853 ]["sdn-ports"]
tierno2357f4e2020-10-19 16:38:59 +0000854 if vld_params.get("wimAccountId"):
855 target_wim = "wim:{}".format(vld_params["wimAccountId"])
856 target_vld["vim_info"][target_wim] = {}
857 for param in ("vim-network-name", "vim-network-id"):
858 if vld_params.get(param):
859 if isinstance(vld_params[param], dict):
garciaale04694c62021-03-02 10:49:28 -0300860 for vim, vim_net in vld_params[param].items():
bravof922c4172020-11-24 21:21:43 -0300861 other_target_vim = "vim:" + vim
garciadeblas5697b8b2021-03-24 09:17:02 +0100862 populate_dict(
863 target_vld["vim_info"],
864 (other_target_vim, param.replace("-", "_")),
865 vim_net,
866 )
tierno2357f4e2020-10-19 16:38:59 +0000867 else: # isinstance str
garciadeblas5697b8b2021-03-24 09:17:02 +0100868 target_vld["vim_info"][target_vim][
869 param.replace("-", "_")
870 ] = vld_params[param]
bravof922c4172020-11-24 21:21:43 -0300871 if vld_params.get("common_id"):
872 target_vld["common_id"] = vld_params.get("common_id")
tierno2357f4e2020-10-19 16:38:59 +0000873
aticig15db6142022-01-24 12:51:26 +0300874 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
875 def update_ns_vld_target(target, ns_params):
876 for vnf_params in ns_params.get("vnf", ()):
877 if vnf_params.get("vimAccountId"):
878 target_vnf = next(
879 (
880 vnfr
881 for vnfr in db_vnfrs.values()
882 if vnf_params["member-vnf-index"]
883 == vnfr["member-vnf-index-ref"]
884 ),
885 None,
886 )
887 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
888 for a_index, a_vld in enumerate(target["ns"]["vld"]):
889 target_vld = find_in_list(
890 get_iterable(vdur, "interfaces"),
891 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
892 )
aticig84bd9a72022-06-14 03:01:36 +0300893
894 vld_params = find_in_list(
895 get_iterable(ns_params, "vld"),
896 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
897 )
aticig15db6142022-01-24 12:51:26 +0300898 if target_vld:
aticig84bd9a72022-06-14 03:01:36 +0300899
aticig15db6142022-01-24 12:51:26 +0300900 if vnf_params.get("vimAccountId") not in a_vld.get(
901 "vim_info", {}
902 ):
aticig84bd9a72022-06-14 03:01:36 +0300903 target_vim_network_list = [
904 v for _, v in a_vld.get("vim_info").items()
905 ]
906 target_vim_network_name = next(
907 (
908 item.get("vim_network_name", "")
909 for item in target_vim_network_list
910 ),
911 "",
912 )
913
aticig15db6142022-01-24 12:51:26 +0300914 target["ns"]["vld"][a_index].get("vim_info").update(
915 {
916 "vim:{}".format(vnf_params["vimAccountId"]): {
aticig84bd9a72022-06-14 03:01:36 +0300917 "vim_network_name": target_vim_network_name,
aticig15db6142022-01-24 12:51:26 +0300918 }
919 }
920 )
921
aticig84bd9a72022-06-14 03:01:36 +0300922 if vld_params:
923 for param in ("vim-network-name", "vim-network-id"):
924 if vld_params.get(param) and isinstance(
925 vld_params[param], dict
926 ):
927 for vim, vim_net in vld_params[
928 param
929 ].items():
930 other_target_vim = "vim:" + vim
931 populate_dict(
932 target["ns"]["vld"][a_index].get(
933 "vim_info"
934 ),
935 (
936 other_target_vim,
937 param.replace("-", "_"),
938 ),
939 vim_net,
940 )
941
tierno69f0d382020-05-07 13:08:09 +0000942 nslcmop_id = db_nslcmop["_id"]
943 target = {
944 "name": db_nsr["name"],
945 "ns": {"vld": []},
946 "vnf": [],
947 "image": deepcopy(db_nsr["image"]),
948 "flavor": deepcopy(db_nsr["flavor"]),
949 "action_id": nslcmop_id,
tierno2357f4e2020-10-19 16:38:59 +0000950 "cloud_init_content": {},
tierno69f0d382020-05-07 13:08:09 +0000951 }
952 for image in target["image"]:
tierno2357f4e2020-10-19 16:38:59 +0000953 image["vim_info"] = {}
tierno69f0d382020-05-07 13:08:09 +0000954 for flavor in target["flavor"]:
tierno2357f4e2020-10-19 16:38:59 +0000955 flavor["vim_info"] = {}
Alexis Romero305b5c42022-03-11 15:29:18 +0100956 if db_nsr.get("affinity-or-anti-affinity-group"):
Pedro Escaleirab9a7c4d2022-03-31 00:08:05 +0100957 target["affinity-or-anti-affinity-group"] = deepcopy(
958 db_nsr["affinity-or-anti-affinity-group"]
959 )
960 for affinity_or_anti_affinity_group in target[
961 "affinity-or-anti-affinity-group"
962 ]:
Alexis Romero305b5c42022-03-11 15:29:18 +0100963 affinity_or_anti_affinity_group["vim_info"] = {}
tierno69f0d382020-05-07 13:08:09 +0000964
tierno2357f4e2020-10-19 16:38:59 +0000965 if db_nslcmop.get("lcmOperationType") != "instantiate":
966 # get parameters of instantiation:
garciadeblas5697b8b2021-03-24 09:17:02 +0100967 db_nslcmop_instantiate = self.db.get_list(
968 "nslcmops",
969 {
970 "nsInstanceId": db_nslcmop["nsInstanceId"],
971 "lcmOperationType": "instantiate",
972 },
973 )[-1]
tierno2357f4e2020-10-19 16:38:59 +0000974 ns_params = db_nslcmop_instantiate.get("operationParams")
975 else:
976 ns_params = db_nslcmop.get("operationParams")
bravof922c4172020-11-24 21:21:43 -0300977 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
978 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
tierno69f0d382020-05-07 13:08:09 +0000979
980 cp2target = {}
tierno2357f4e2020-10-19 16:38:59 +0000981 for vld_index, vld in enumerate(db_nsr.get("vld")):
982 target_vim = "vim:{}".format(ns_params["vimAccountId"])
983 target_vld = {
984 "id": vld["id"],
985 "name": vld["name"],
986 "mgmt-network": vld.get("mgmt-network", False),
987 "type": vld.get("type"),
988 "vim_info": {
bravof922c4172020-11-24 21:21:43 -0300989 target_vim: {
990 "vim_network_name": vld.get("vim-network-name"),
garciadeblas5697b8b2021-03-24 09:17:02 +0100991 "vim_account_id": ns_params["vimAccountId"],
bravof922c4172020-11-24 21:21:43 -0300992 }
garciadeblas5697b8b2021-03-24 09:17:02 +0100993 },
tierno2357f4e2020-10-19 16:38:59 +0000994 }
995 # check if this network needs SDN assist
tierno2357f4e2020-10-19 16:38:59 +0000996 if vld.get("pci-interfaces"):
garciadeblasa5ae90b2021-02-12 11:26:46 +0000997 db_vim = get_vim_account(ns_params["vimAccountId"])
tierno2357f4e2020-10-19 16:38:59 +0000998 sdnc_id = db_vim["config"].get("sdn-controller")
999 if sdnc_id:
garciadeblasa5ae90b2021-02-12 11:26:46 +00001000 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1001 target_sdn = "sdn:{}".format(sdnc_id)
1002 target_vld["vim_info"][target_sdn] = {
garciadeblas5697b8b2021-03-24 09:17:02 +01001003 "sdn": True,
1004 "target_vim": target_vim,
1005 "vlds": [sdn_vld],
1006 "type": vld.get("type"),
1007 }
tierno2357f4e2020-10-19 16:38:59 +00001008
bravof922c4172020-11-24 21:21:43 -03001009 nsd_vnf_profiles = get_vnf_profiles(nsd)
1010 for nsd_vnf_profile in nsd_vnf_profiles:
1011 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1012 if cp["virtual-link-profile-id"] == vld["id"]:
garciadeblas5697b8b2021-03-24 09:17:02 +01001013 cp2target[
1014 "member_vnf:{}.{}".format(
1015 cp["constituent-cpd-id"][0][
1016 "constituent-base-element-id"
1017 ],
1018 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1019 )
1020 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
tierno2357f4e2020-10-19 16:38:59 +00001021
1022 # check at nsd descriptor, if there is an ip-profile
1023 vld_params = {}
lloretgalleg19008482021-04-19 11:40:18 +00001024 nsd_vlp = find_in_list(
1025 get_virtual_link_profiles(nsd),
garciadeblas5697b8b2021-03-24 09:17:02 +01001026 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1027 == vld["id"],
1028 )
1029 if (
1030 nsd_vlp
1031 and nsd_vlp.get("virtual-link-protocol-data")
1032 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1033 ):
1034 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1035 "l3-protocol-data"
1036 ]
lloretgalleg19008482021-04-19 11:40:18 +00001037 ip_profile_dest_data = {}
1038 if "ip-version" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001039 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1040 "ip-version"
1041 ]
lloretgalleg19008482021-04-19 11:40:18 +00001042 if "cidr" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001043 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1044 "cidr"
1045 ]
lloretgalleg19008482021-04-19 11:40:18 +00001046 if "gateway-ip" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001047 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1048 "gateway-ip"
1049 ]
lloretgalleg19008482021-04-19 11:40:18 +00001050 if "dhcp-enabled" in ip_profile_source_data:
1051 ip_profile_dest_data["dhcp-params"] = {
1052 "enabled": ip_profile_source_data["dhcp-enabled"]
1053 }
1054 vld_params["ip-profile"] = ip_profile_dest_data
bravof922c4172020-11-24 21:21:43 -03001055
tierno2357f4e2020-10-19 16:38:59 +00001056 # update vld_params with instantiation params
garciadeblas5697b8b2021-03-24 09:17:02 +01001057 vld_instantiation_params = find_in_list(
1058 get_iterable(ns_params, "vld"),
1059 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1060 )
tierno2357f4e2020-10-19 16:38:59 +00001061 if vld_instantiation_params:
1062 vld_params.update(vld_instantiation_params)
bravof922c4172020-11-24 21:21:43 -03001063 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
tierno69f0d382020-05-07 13:08:09 +00001064 target["ns"]["vld"].append(target_vld)
aticig15db6142022-01-24 12:51:26 +03001065 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1066 update_ns_vld_target(target, ns_params)
bravof922c4172020-11-24 21:21:43 -03001067
tierno69f0d382020-05-07 13:08:09 +00001068 for vnfr in db_vnfrs.values():
garciadeblas5697b8b2021-03-24 09:17:02 +01001069 vnfd = find_in_list(
1070 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1071 )
1072 vnf_params = find_in_list(
1073 get_iterable(ns_params, "vnf"),
1074 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1075 )
tierno69f0d382020-05-07 13:08:09 +00001076 target_vnf = deepcopy(vnfr)
tierno2357f4e2020-10-19 16:38:59 +00001077 target_vim = "vim:{}".format(vnfr["vim-account-id"])
tierno69f0d382020-05-07 13:08:09 +00001078 for vld in target_vnf.get("vld", ()):
tierno2357f4e2020-10-19 16:38:59 +00001079 # check if connected to a ns.vld, to fill target'
garciadeblas5697b8b2021-03-24 09:17:02 +01001080 vnf_cp = find_in_list(
1081 vnfd.get("int-virtual-link-desc", ()),
1082 lambda cpd: cpd.get("id") == vld["id"],
1083 )
tierno69f0d382020-05-07 13:08:09 +00001084 if vnf_cp:
garciadeblas5697b8b2021-03-24 09:17:02 +01001085 ns_cp = "member_vnf:{}.{}".format(
1086 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1087 )
tierno69f0d382020-05-07 13:08:09 +00001088 if cp2target.get(ns_cp):
1089 vld["target"] = cp2target[ns_cp]
bravof922c4172020-11-24 21:21:43 -03001090
garciadeblas5697b8b2021-03-24 09:17:02 +01001091 vld["vim_info"] = {
1092 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1093 }
tierno2357f4e2020-10-19 16:38:59 +00001094 # check if this network needs SDN assist
1095 target_sdn = None
1096 if vld.get("pci-interfaces"):
1097 db_vim = get_vim_account(vnfr["vim-account-id"])
1098 sdnc_id = db_vim["config"].get("sdn-controller")
1099 if sdnc_id:
1100 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1101 target_sdn = "sdn:{}".format(sdnc_id)
1102 vld["vim_info"][target_sdn] = {
garciadeblas5697b8b2021-03-24 09:17:02 +01001103 "sdn": True,
1104 "target_vim": target_vim,
1105 "vlds": [sdn_vld],
1106 "type": vld.get("type"),
1107 }
tierno69f0d382020-05-07 13:08:09 +00001108
tierno2357f4e2020-10-19 16:38:59 +00001109 # check at vnfd descriptor, if there is an ip-profile
1110 vld_params = {}
bravof922c4172020-11-24 21:21:43 -03001111 vnfd_vlp = find_in_list(
1112 get_virtual_link_profiles(vnfd),
garciadeblas5697b8b2021-03-24 09:17:02 +01001113 lambda a_link_profile: a_link_profile["id"] == vld["id"],
bravof922c4172020-11-24 21:21:43 -03001114 )
garciadeblas5697b8b2021-03-24 09:17:02 +01001115 if (
1116 vnfd_vlp
1117 and vnfd_vlp.get("virtual-link-protocol-data")
1118 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1119 ):
1120 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1121 "l3-protocol-data"
1122 ]
bravof922c4172020-11-24 21:21:43 -03001123 ip_profile_dest_data = {}
1124 if "ip-version" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001125 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1126 "ip-version"
1127 ]
bravof922c4172020-11-24 21:21:43 -03001128 if "cidr" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001129 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1130 "cidr"
1131 ]
bravof922c4172020-11-24 21:21:43 -03001132 if "gateway-ip" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001133 ip_profile_dest_data[
1134 "gateway-address"
1135 ] = ip_profile_source_data["gateway-ip"]
bravof922c4172020-11-24 21:21:43 -03001136 if "dhcp-enabled" in ip_profile_source_data:
1137 ip_profile_dest_data["dhcp-params"] = {
1138 "enabled": ip_profile_source_data["dhcp-enabled"]
1139 }
1140
1141 vld_params["ip-profile"] = ip_profile_dest_data
tierno2357f4e2020-10-19 16:38:59 +00001142 # update vld_params with instantiation params
1143 if vnf_params:
garciadeblas5697b8b2021-03-24 09:17:02 +01001144 vld_instantiation_params = find_in_list(
1145 get_iterable(vnf_params, "internal-vld"),
1146 lambda i_vld: i_vld["name"] == vld["id"],
1147 )
tierno2357f4e2020-10-19 16:38:59 +00001148 if vld_instantiation_params:
1149 vld_params.update(vld_instantiation_params)
1150 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1151
1152 vdur_list = []
tierno69f0d382020-05-07 13:08:09 +00001153 for vdur in target_vnf.get("vdur", ()):
tierno2357f4e2020-10-19 16:38:59 +00001154 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1155 continue # This vdu must not be created
bravof922c4172020-11-24 21:21:43 -03001156 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
tierno69f0d382020-05-07 13:08:09 +00001157
bravof922c4172020-11-24 21:21:43 -03001158 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1159
1160 if ssh_keys_all:
bravofe5a31bc2021-02-17 19:09:12 -03001161 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1162 vnf_configuration = get_configuration(vnfd, vnfd["id"])
garciadeblas5697b8b2021-03-24 09:17:02 +01001163 if (
1164 vdu_configuration
1165 and vdu_configuration.get("config-access")
1166 and vdu_configuration.get("config-access").get("ssh-access")
1167 ):
bravof922c4172020-11-24 21:21:43 -03001168 vdur["ssh-keys"] = ssh_keys_all
garciadeblas5697b8b2021-03-24 09:17:02 +01001169 vdur["ssh-access-required"] = vdu_configuration[
1170 "config-access"
1171 ]["ssh-access"]["required"]
1172 elif (
1173 vnf_configuration
1174 and vnf_configuration.get("config-access")
1175 and vnf_configuration.get("config-access").get("ssh-access")
1176 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1177 ):
bravof922c4172020-11-24 21:21:43 -03001178 vdur["ssh-keys"] = ssh_keys_all
garciadeblas5697b8b2021-03-24 09:17:02 +01001179 vdur["ssh-access-required"] = vnf_configuration[
1180 "config-access"
1181 ]["ssh-access"]["required"]
1182 elif ssh_keys_instantiation and find_in_list(
1183 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1184 ):
bravof922c4172020-11-24 21:21:43 -03001185 vdur["ssh-keys"] = ssh_keys_instantiation
tierno69f0d382020-05-07 13:08:09 +00001186
bravof922c4172020-11-24 21:21:43 -03001187 self.logger.debug("NS > vdur > {}".format(vdur))
1188
1189 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
tierno69f0d382020-05-07 13:08:09 +00001190 # cloud-init
1191 if vdud.get("cloud-init-file"):
garciadeblas5697b8b2021-03-24 09:17:02 +01001192 vdur["cloud-init"] = "{}:file:{}".format(
1193 vnfd["_id"], vdud.get("cloud-init-file")
1194 )
tierno2357f4e2020-10-19 16:38:59 +00001195 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1196 if vdur["cloud-init"] not in target["cloud_init_content"]:
1197 base_folder = vnfd["_admin"]["storage"]
bravof486707f2021-11-08 17:18:50 -03001198 if base_folder["pkg-dir"]:
1199 cloud_init_file = "{}/{}/cloud_init/{}".format(
1200 base_folder["folder"],
1201 base_folder["pkg-dir"],
1202 vdud.get("cloud-init-file"),
1203 )
1204 else:
1205 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1206 base_folder["folder"],
1207 vdud.get("cloud-init-file"),
1208 )
tierno2357f4e2020-10-19 16:38:59 +00001209 with self.fs.file_open(cloud_init_file, "r") as ci_file:
garciadeblas5697b8b2021-03-24 09:17:02 +01001210 target["cloud_init_content"][
1211 vdur["cloud-init"]
1212 ] = ci_file.read()
tierno69f0d382020-05-07 13:08:09 +00001213 elif vdud.get("cloud-init"):
garciadeblas5697b8b2021-03-24 09:17:02 +01001214 vdur["cloud-init"] = "{}:vdu:{}".format(
1215 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1216 )
tierno2357f4e2020-10-19 16:38:59 +00001217 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
garciadeblas5697b8b2021-03-24 09:17:02 +01001218 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1219 "cloud-init"
1220 ]
tierno2357f4e2020-10-19 16:38:59 +00001221 vdur["additionalParams"] = vdur.get("additionalParams") or {}
garciadeblas5697b8b2021-03-24 09:17:02 +01001222 deploy_params_vdu = self._format_additional_params(
1223 vdur.get("additionalParams") or {}
1224 )
1225 deploy_params_vdu["OSM"] = get_osm_params(
1226 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1227 )
tierno2357f4e2020-10-19 16:38:59 +00001228 vdur["additionalParams"] = deploy_params_vdu
tierno69f0d382020-05-07 13:08:09 +00001229
1230 # flavor
1231 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
tierno2357f4e2020-10-19 16:38:59 +00001232 if target_vim not in ns_flavor["vim_info"]:
1233 ns_flavor["vim_info"][target_vim] = {}
lloretgalleg7dc94672021-02-08 11:49:50 +00001234
1235 # deal with images
1236 # in case alternative images are provided we must check if they should be applied
1237 # for the vim_type, modify the vim_type taking into account
1238 ns_image_id = int(vdur["ns-image-id"])
1239 if vdur.get("alt-image-ids"):
1240 db_vim = get_vim_account(vnfr["vim-account-id"])
1241 vim_type = db_vim["vim_type"]
1242 for alt_image_id in vdur.get("alt-image-ids"):
1243 ns_alt_image = target["image"][int(alt_image_id)]
1244 if vim_type == ns_alt_image.get("vim-type"):
1245 # must use alternative image
garciadeblas5697b8b2021-03-24 09:17:02 +01001246 self.logger.debug(
1247 "use alternative image id: {}".format(alt_image_id)
1248 )
lloretgalleg7dc94672021-02-08 11:49:50 +00001249 ns_image_id = alt_image_id
1250 vdur["ns-image-id"] = ns_image_id
1251 break
1252 ns_image = target["image"][int(ns_image_id)]
tierno2357f4e2020-10-19 16:38:59 +00001253 if target_vim not in ns_image["vim_info"]:
1254 ns_image["vim_info"][target_vim] = {}
tierno69f0d382020-05-07 13:08:09 +00001255
Alexis Romero305b5c42022-03-11 15:29:18 +01001256 # Affinity groups
1257 if vdur.get("affinity-or-anti-affinity-group-id"):
1258 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1259 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1260 if target_vim not in ns_ags["vim_info"]:
1261 ns_ags["vim_info"][target_vim] = {}
1262
tierno2357f4e2020-10-19 16:38:59 +00001263 vdur["vim_info"] = {target_vim: {}}
1264 # instantiation parameters
aticig349aa462022-05-19 12:29:35 +03001265 if vnf_params:
1266 vdu_instantiation_params = find_in_list(
1267 get_iterable(vnf_params, "vdu"),
1268 lambda i_vdu: i_vdu["id"] == vdud["id"],
1269 )
1270 if vdu_instantiation_params:
1271 # Parse the vdu_volumes from the instantiation params
1272 vdu_volumes = get_volumes_from_instantiation_params(
1273 vdu_instantiation_params, vdud
1274 )
1275 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
tierno2357f4e2020-10-19 16:38:59 +00001276 vdur_list.append(vdur)
1277 target_vnf["vdur"] = vdur_list
tierno69f0d382020-05-07 13:08:09 +00001278 target["vnf"].append(target_vnf)
1279
garciadeblas07f4e4c2022-06-09 09:42:58 +02001280 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
tierno69f0d382020-05-07 13:08:09 +00001281 desc = await self.RO.deploy(nsr_id, target)
bravof922c4172020-11-24 21:21:43 -03001282 self.logger.debug("RO return > {}".format(desc))
tierno69f0d382020-05-07 13:08:09 +00001283 action_id = desc["action_id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01001284 await self._wait_ng_ro(
garciadeblas07f4e4c2022-06-09 09:42:58 +02001285 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage,
1286 operation="instantiation"
garciadeblas5697b8b2021-03-24 09:17:02 +01001287 )
tierno69f0d382020-05-07 13:08:09 +00001288
1289 # Updating NSR
1290 db_nsr_update = {
1291 "_admin.deployed.RO.operational-status": "running",
garciadeblas5697b8b2021-03-24 09:17:02 +01001292 "detailed-status": " ".join(stage),
tierno69f0d382020-05-07 13:08:09 +00001293 }
1294 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1295 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1296 self._write_op_status(nslcmop_id, stage)
garciadeblas5697b8b2021-03-24 09:17:02 +01001297 self.logger.debug(
1298 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1299 )
tierno69f0d382020-05-07 13:08:09 +00001300 return
1301
garciadeblas5697b8b2021-03-24 09:17:02 +01001302 async def _wait_ng_ro(
1303 self,
1304 nsr_id,
1305 action_id,
1306 nslcmop_id=None,
1307 start_time=None,
1308 timeout=600,
1309 stage=None,
garciadeblas07f4e4c2022-06-09 09:42:58 +02001310 operation=None,
garciadeblas5697b8b2021-03-24 09:17:02 +01001311 ):
tierno69f0d382020-05-07 13:08:09 +00001312 detailed_status_old = None
1313 db_nsr_update = {}
tierno2357f4e2020-10-19 16:38:59 +00001314 start_time = start_time or time()
tierno69f0d382020-05-07 13:08:09 +00001315 while time() <= start_time + timeout:
garciadeblas07f4e4c2022-06-09 09:42:58 +02001316 desc_status = await self.op_status_map[operation](nsr_id, action_id)
bravof922c4172020-11-24 21:21:43 -03001317 self.logger.debug("Wait NG RO > {}".format(desc_status))
tierno69f0d382020-05-07 13:08:09 +00001318 if desc_status["status"] == "FAILED":
1319 raise NgRoException(desc_status["details"])
1320 elif desc_status["status"] == "BUILD":
tierno2357f4e2020-10-19 16:38:59 +00001321 if stage:
1322 stage[2] = "VIM: ({})".format(desc_status["details"])
tierno69f0d382020-05-07 13:08:09 +00001323 elif desc_status["status"] == "DONE":
tierno2357f4e2020-10-19 16:38:59 +00001324 if stage:
1325 stage[2] = "Deployed at VIM"
tierno69f0d382020-05-07 13:08:09 +00001326 break
1327 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01001328 assert False, "ROclient.check_ns_status returns unknown {}".format(
1329 desc_status["status"]
1330 )
tierno2357f4e2020-10-19 16:38:59 +00001331 if stage and nslcmop_id and stage[2] != detailed_status_old:
tierno69f0d382020-05-07 13:08:09 +00001332 detailed_status_old = stage[2]
1333 db_nsr_update["detailed-status"] = " ".join(stage)
1334 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1335 self._write_op_status(nslcmop_id, stage)
bravof922c4172020-11-24 21:21:43 -03001336 await asyncio.sleep(15, loop=self.loop)
tierno69f0d382020-05-07 13:08:09 +00001337 else: # timeout_ns_deploy
1338 raise NgRoException("Timeout waiting ns to deploy")
1339
garciadeblas5697b8b2021-03-24 09:17:02 +01001340 async def _terminate_ng_ro(
1341 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1342 ):
tierno69f0d382020-05-07 13:08:09 +00001343 db_nsr_update = {}
1344 failed_detail = []
1345 action_id = None
1346 start_deploy = time()
1347 try:
1348 target = {
1349 "ns": {"vld": []},
1350 "vnf": [],
1351 "image": [],
1352 "flavor": [],
garciadeblas5697b8b2021-03-24 09:17:02 +01001353 "action_id": nslcmop_id,
tierno69f0d382020-05-07 13:08:09 +00001354 }
1355 desc = await self.RO.deploy(nsr_id, target)
1356 action_id = desc["action_id"]
1357 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1358 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
garciadeblas5697b8b2021-03-24 09:17:02 +01001359 self.logger.debug(
1360 logging_text
1361 + "ns terminate action at RO. action_id={}".format(action_id)
1362 )
tierno69f0d382020-05-07 13:08:09 +00001363
1364 # wait until done
1365 delete_timeout = 20 * 60 # 20 minutes
garciadeblas5697b8b2021-03-24 09:17:02 +01001366 await self._wait_ng_ro(
garciadeblas07f4e4c2022-06-09 09:42:58 +02001367 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage,
1368 operation="termination"
garciadeblas5697b8b2021-03-24 09:17:02 +01001369 )
tierno69f0d382020-05-07 13:08:09 +00001370
1371 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1372 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1373 # delete all nsr
1374 await self.RO.delete(nsr_id)
1375 except Exception as e:
1376 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1377 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1378 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1379 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
garciadeblas5697b8b2021-03-24 09:17:02 +01001380 self.logger.debug(
1381 logging_text + "RO_action_id={} already deleted".format(action_id)
1382 )
tierno69f0d382020-05-07 13:08:09 +00001383 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1384 failed_detail.append("delete conflict: {}".format(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01001385 self.logger.debug(
1386 logging_text
1387 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1388 )
tierno69f0d382020-05-07 13:08:09 +00001389 else:
1390 failed_detail.append("delete error: {}".format(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01001391 self.logger.error(
1392 logging_text
1393 + "RO_action_id={} delete error: {}".format(action_id, e)
1394 )
tierno69f0d382020-05-07 13:08:09 +00001395
1396 if failed_detail:
1397 stage[2] = "Error deleting from VIM"
1398 else:
1399 stage[2] = "Deleted from VIM"
1400 db_nsr_update["detailed-status"] = " ".join(stage)
1401 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1402 self._write_op_status(nslcmop_id, stage)
1403
1404 if failed_detail:
1405 raise LcmException("; ".join(failed_detail))
1406 return
1407
garciadeblas5697b8b2021-03-24 09:17:02 +01001408 async def instantiate_RO(
1409 self,
1410 logging_text,
1411 nsr_id,
1412 nsd,
1413 db_nsr,
1414 db_nslcmop,
1415 db_vnfrs,
1416 db_vnfds,
1417 n2vc_key_list,
1418 stage,
1419 ):
tiernoe95ed362020-04-23 08:24:57 +00001420 """
1421 Instantiate at RO
1422 :param logging_text: preffix text to use at logging
1423 :param nsr_id: nsr identity
1424 :param nsd: database content of ns descriptor
1425 :param db_nsr: database content of ns record
1426 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1427 :param db_vnfrs:
bravof922c4172020-11-24 21:21:43 -03001428 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
tiernoe95ed362020-04-23 08:24:57 +00001429 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1430 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1431 :return: None or exception
1432 """
tiernoe876f672020-02-13 14:34:48 +00001433 try:
tiernoe876f672020-02-13 14:34:48 +00001434 start_deploy = time()
1435 ns_params = db_nslcmop.get("operationParams")
1436 if ns_params and ns_params.get("timeout_ns_deploy"):
1437 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1438 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01001439 timeout_ns_deploy = self.timeout.get(
1440 "ns_deploy", self.timeout_ns_deploy
1441 )
quilesj7e13aeb2019-10-08 13:34:55 +02001442
tiernoe876f672020-02-13 14:34:48 +00001443 # Check for and optionally request placement optimization. Database will be updated if placement activated
1444 stage[2] = "Waiting for Placement."
tierno8790a3d2020-04-23 22:49:52 +00001445 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1446 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1447 for vnfr in db_vnfrs.values():
1448 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1449 break
1450 else:
1451 ns_params["vimAccountId"] == vnfr["vim-account-id"]
quilesj7e13aeb2019-10-08 13:34:55 +02001452
garciadeblas5697b8b2021-03-24 09:17:02 +01001453 return await self._instantiate_ng_ro(
1454 logging_text,
1455 nsr_id,
1456 nsd,
1457 db_nsr,
1458 db_nslcmop,
1459 db_vnfrs,
1460 db_vnfds,
1461 n2vc_key_list,
1462 stage,
1463 start_deploy,
1464 timeout_ns_deploy,
1465 )
tierno2357f4e2020-10-19 16:38:59 +00001466 except Exception as e:
tierno067e04a2020-03-31 12:53:13 +00001467 stage[2] = "ERROR deploying at VIM"
tiernoe876f672020-02-13 14:34:48 +00001468 self.set_vnfr_at_error(db_vnfrs, str(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01001469 self.logger.error(
1470 "Error deploying at VIM {}".format(e),
1471 exc_info=not isinstance(
1472 e,
1473 (
1474 ROclient.ROClientException,
1475 LcmException,
1476 DbException,
1477 NgRoException,
1478 ),
1479 ),
1480 )
tiernoe876f672020-02-13 14:34:48 +00001481 raise
quilesj7e13aeb2019-10-08 13:34:55 +02001482
tierno7ecbc342020-09-21 14:05:39 +00001483 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1484 """
1485 Wait for kdu to be up, get ip address
1486 :param logging_text: prefix use for logging
1487 :param nsr_id:
1488 :param vnfr_id:
1489 :param kdu_name:
David Garcia78b6e6d2022-04-29 05:50:46 +02001490 :return: IP address, K8s services
tierno7ecbc342020-09-21 14:05:39 +00001491 """
1492
1493 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1494 nb_tries = 0
1495
1496 while nb_tries < 360:
1497 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
garciadeblas5697b8b2021-03-24 09:17:02 +01001498 kdur = next(
1499 (
1500 x
1501 for x in get_iterable(db_vnfr, "kdur")
1502 if x.get("kdu-name") == kdu_name
1503 ),
1504 None,
1505 )
tierno7ecbc342020-09-21 14:05:39 +00001506 if not kdur:
garciadeblas5697b8b2021-03-24 09:17:02 +01001507 raise LcmException(
1508 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1509 )
tierno7ecbc342020-09-21 14:05:39 +00001510 if kdur.get("status"):
1511 if kdur["status"] in ("READY", "ENABLED"):
David Garcia78b6e6d2022-04-29 05:50:46 +02001512 return kdur.get("ip-address"), kdur.get("services")
tierno7ecbc342020-09-21 14:05:39 +00001513 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01001514 raise LcmException(
1515 "target KDU={} is in error state".format(kdu_name)
1516 )
tierno7ecbc342020-09-21 14:05:39 +00001517
1518 await asyncio.sleep(10, loop=self.loop)
1519 nb_tries += 1
1520 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1521
garciadeblas5697b8b2021-03-24 09:17:02 +01001522 async def wait_vm_up_insert_key_ro(
1523 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1524 ):
tiernoa5088192019-11-26 16:12:53 +00001525 """
1526 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1527 :param logging_text: prefix use for logging
1528 :param nsr_id:
1529 :param vnfr_id:
1530 :param vdu_id:
1531 :param vdu_index:
1532 :param pub_key: public ssh key to inject, None to skip
1533 :param user: user to apply the public ssh key
1534 :return: IP address
1535 """
quilesj7e13aeb2019-10-08 13:34:55 +02001536
tierno2357f4e2020-10-19 16:38:59 +00001537 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
tiernod8323042019-08-09 11:32:23 +00001538 ro_nsr_id = None
1539 ip_address = None
1540 nb_tries = 0
1541 target_vdu_id = None
quilesj3149f262019-12-03 10:58:10 +00001542 ro_retries = 0
quilesj7e13aeb2019-10-08 13:34:55 +02001543
tiernod8323042019-08-09 11:32:23 +00001544 while True:
quilesj7e13aeb2019-10-08 13:34:55 +02001545
quilesj3149f262019-12-03 10:58:10 +00001546 ro_retries += 1
1547 if ro_retries >= 360: # 1 hour
garciadeblas5697b8b2021-03-24 09:17:02 +01001548 raise LcmException(
1549 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1550 )
quilesj3149f262019-12-03 10:58:10 +00001551
tiernod8323042019-08-09 11:32:23 +00001552 await asyncio.sleep(10, loop=self.loop)
quilesj7e13aeb2019-10-08 13:34:55 +02001553
1554 # get ip address
tiernod8323042019-08-09 11:32:23 +00001555 if not target_vdu_id:
1556 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
quilesj3149f262019-12-03 10:58:10 +00001557
1558 if not vdu_id: # for the VNF case
tiernoe876f672020-02-13 14:34:48 +00001559 if db_vnfr.get("status") == "ERROR":
garciadeblas5697b8b2021-03-24 09:17:02 +01001560 raise LcmException(
1561 "Cannot inject ssh-key because target VNF is in error state"
1562 )
tiernod8323042019-08-09 11:32:23 +00001563 ip_address = db_vnfr.get("ip-address")
1564 if not ip_address:
1565 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01001566 vdur = next(
1567 (
1568 x
1569 for x in get_iterable(db_vnfr, "vdur")
1570 if x.get("ip-address") == ip_address
1571 ),
1572 None,
1573 )
quilesj3149f262019-12-03 10:58:10 +00001574 else: # VDU case
garciadeblas5697b8b2021-03-24 09:17:02 +01001575 vdur = next(
1576 (
1577 x
1578 for x in get_iterable(db_vnfr, "vdur")
1579 if x.get("vdu-id-ref") == vdu_id
1580 and x.get("count-index") == vdu_index
1581 ),
1582 None,
1583 )
quilesj3149f262019-12-03 10:58:10 +00001584
garciadeblas5697b8b2021-03-24 09:17:02 +01001585 if (
1586 not vdur and len(db_vnfr.get("vdur", ())) == 1
1587 ): # If only one, this should be the target vdu
tierno0e8c3f02020-03-12 17:18:21 +00001588 vdur = db_vnfr["vdur"][0]
quilesj3149f262019-12-03 10:58:10 +00001589 if not vdur:
garciadeblas5697b8b2021-03-24 09:17:02 +01001590 raise LcmException(
1591 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1592 vnfr_id, vdu_id, vdu_index
1593 )
1594 )
tierno2357f4e2020-10-19 16:38:59 +00001595 # New generation RO stores information at "vim_info"
1596 ng_ro_status = None
David Garciaa8bbe672020-11-19 13:06:54 +01001597 target_vim = None
tierno2357f4e2020-10-19 16:38:59 +00001598 if vdur.get("vim_info"):
garciadeblas5697b8b2021-03-24 09:17:02 +01001599 target_vim = next(
1600 t for t in vdur["vim_info"]
1601 ) # there should be only one key
tierno2357f4e2020-10-19 16:38:59 +00001602 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
garciadeblas5697b8b2021-03-24 09:17:02 +01001603 if (
1604 vdur.get("pdu-type")
1605 or vdur.get("status") == "ACTIVE"
1606 or ng_ro_status == "ACTIVE"
1607 ):
quilesj3149f262019-12-03 10:58:10 +00001608 ip_address = vdur.get("ip-address")
1609 if not ip_address:
1610 continue
1611 target_vdu_id = vdur["vdu-id-ref"]
bravof922c4172020-11-24 21:21:43 -03001612 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
garciadeblas5697b8b2021-03-24 09:17:02 +01001613 raise LcmException(
1614 "Cannot inject ssh-key because target VM is in error state"
1615 )
quilesj3149f262019-12-03 10:58:10 +00001616
tiernod8323042019-08-09 11:32:23 +00001617 if not target_vdu_id:
1618 continue
tiernod8323042019-08-09 11:32:23 +00001619
quilesj7e13aeb2019-10-08 13:34:55 +02001620 # inject public key into machine
1621 if pub_key and user:
tierno2357f4e2020-10-19 16:38:59 +00001622 self.logger.debug(logging_text + "Inserting RO key")
bravof922c4172020-11-24 21:21:43 -03001623 self.logger.debug("SSH > PubKey > {}".format(pub_key))
tierno0e8c3f02020-03-12 17:18:21 +00001624 if vdur.get("pdu-type"):
1625 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1626 return ip_address
quilesj7e13aeb2019-10-08 13:34:55 +02001627 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01001628 ro_vm_id = "{}-{}".format(
1629 db_vnfr["member-vnf-index-ref"], target_vdu_id
1630 ) # TODO add vdu_index
tierno69f0d382020-05-07 13:08:09 +00001631 if self.ng_ro:
garciadeblas5697b8b2021-03-24 09:17:02 +01001632 target = {
1633 "action": {
1634 "action": "inject_ssh_key",
1635 "key": pub_key,
1636 "user": user,
1637 },
1638 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1639 }
tierno2357f4e2020-10-19 16:38:59 +00001640 desc = await self.RO.deploy(nsr_id, target)
1641 action_id = desc["action_id"]
garciadeblas07f4e4c2022-06-09 09:42:58 +02001642 await self._wait_ng_ro(nsr_id, action_id, timeout=600, operation="instantiation")
tierno2357f4e2020-10-19 16:38:59 +00001643 break
tierno69f0d382020-05-07 13:08:09 +00001644 else:
tierno2357f4e2020-10-19 16:38:59 +00001645 # wait until NS is deployed at RO
1646 if not ro_nsr_id:
1647 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
garciadeblas5697b8b2021-03-24 09:17:02 +01001648 ro_nsr_id = deep_get(
1649 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1650 )
tierno2357f4e2020-10-19 16:38:59 +00001651 if not ro_nsr_id:
1652 continue
tierno69f0d382020-05-07 13:08:09 +00001653 result_dict = await self.RO.create_action(
1654 item="ns",
1655 item_id_name=ro_nsr_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01001656 descriptor={
1657 "add_public_key": pub_key,
1658 "vms": [ro_vm_id],
1659 "user": user,
1660 },
tierno69f0d382020-05-07 13:08:09 +00001661 )
1662 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1663 if not result_dict or not isinstance(result_dict, dict):
garciadeblas5697b8b2021-03-24 09:17:02 +01001664 raise LcmException(
1665 "Unknown response from RO when injecting key"
1666 )
tierno69f0d382020-05-07 13:08:09 +00001667 for result in result_dict.values():
1668 if result.get("vim_result") == 200:
1669 break
1670 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01001671 raise ROclient.ROClientException(
1672 "error injecting key: {}".format(
1673 result.get("description")
1674 )
1675 )
tierno69f0d382020-05-07 13:08:09 +00001676 break
1677 except NgRoException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01001678 raise LcmException(
1679 "Reaching max tries injecting key. Error: {}".format(e)
1680 )
quilesj7e13aeb2019-10-08 13:34:55 +02001681 except ROclient.ROClientException as e:
tiernoa5088192019-11-26 16:12:53 +00001682 if not nb_tries:
garciadeblas5697b8b2021-03-24 09:17:02 +01001683 self.logger.debug(
1684 logging_text
1685 + "error injecting key: {}. Retrying until {} seconds".format(
1686 e, 20 * 10
1687 )
1688 )
quilesj7e13aeb2019-10-08 13:34:55 +02001689 nb_tries += 1
tiernoa5088192019-11-26 16:12:53 +00001690 if nb_tries >= 20:
garciadeblas5697b8b2021-03-24 09:17:02 +01001691 raise LcmException(
1692 "Reaching max tries injecting key. Error: {}".format(e)
1693 )
quilesj7e13aeb2019-10-08 13:34:55 +02001694 else:
quilesj7e13aeb2019-10-08 13:34:55 +02001695 break
1696
1697 return ip_address
1698
tierno5ee02052019-12-05 19:55:02 +00001699 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1700 """
1701 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1702 """
1703 my_vca = vca_deployed_list[vca_index]
1704 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
quilesj3655ae02019-12-12 16:08:35 +00001705 # vdu or kdu: no dependencies
tierno5ee02052019-12-05 19:55:02 +00001706 return
1707 timeout = 300
1708 while timeout >= 0:
quilesj3655ae02019-12-12 16:08:35 +00001709 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1710 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1711 configuration_status_list = db_nsr["configurationStatus"]
1712 for index, vca_deployed in enumerate(configuration_status_list):
tierno5ee02052019-12-05 19:55:02 +00001713 if index == vca_index:
quilesj3655ae02019-12-12 16:08:35 +00001714 # myself
tierno5ee02052019-12-05 19:55:02 +00001715 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01001716 if not my_vca.get("member-vnf-index") or (
1717 vca_deployed.get("member-vnf-index")
1718 == my_vca.get("member-vnf-index")
1719 ):
quilesj3655ae02019-12-12 16:08:35 +00001720 internal_status = configuration_status_list[index].get("status")
garciadeblas5697b8b2021-03-24 09:17:02 +01001721 if internal_status == "READY":
quilesj3655ae02019-12-12 16:08:35 +00001722 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01001723 elif internal_status == "BROKEN":
1724 raise LcmException(
1725 "Configuration aborted because dependent charm/s has failed"
1726 )
quilesj3655ae02019-12-12 16:08:35 +00001727 else:
1728 break
tierno5ee02052019-12-05 19:55:02 +00001729 else:
quilesj3655ae02019-12-12 16:08:35 +00001730 # no dependencies, return
tierno5ee02052019-12-05 19:55:02 +00001731 return
1732 await asyncio.sleep(10)
1733 timeout -= 1
tierno5ee02052019-12-05 19:55:02 +00001734
1735 raise LcmException("Configuration aborted because dependent charm/s timeout")
1736
David Garciac1fe90a2021-03-31 19:12:02 +02001737 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
David Garcia5506c182021-10-21 17:03:48 +02001738 vca_id = None
1739 if db_vnfr:
1740 vca_id = deep_get(db_vnfr, ("vca-id",))
1741 elif db_nsr:
1742 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1743 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1744 return vca_id
David Garciac1fe90a2021-03-31 19:12:02 +02001745
garciadeblas5697b8b2021-03-24 09:17:02 +01001746 async def instantiate_N2VC(
1747 self,
1748 logging_text,
1749 vca_index,
1750 nsi_id,
1751 db_nsr,
1752 db_vnfr,
1753 vdu_id,
1754 kdu_name,
1755 vdu_index,
1756 config_descriptor,
1757 deploy_params,
1758 base_folder,
1759 nslcmop_id,
1760 stage,
1761 vca_type,
1762 vca_name,
1763 ee_config_descriptor,
1764 ):
tiernod8323042019-08-09 11:32:23 +00001765 nsr_id = db_nsr["_id"]
1766 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
tiernoda6fb102019-11-23 00:36:52 +00001767 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernod8323042019-08-09 11:32:23 +00001768 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
tiernob996d942020-07-03 14:52:28 +00001769 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
quilesj7e13aeb2019-10-08 13:34:55 +02001770 db_dict = {
garciadeblas5697b8b2021-03-24 09:17:02 +01001771 "collection": "nsrs",
1772 "filter": {"_id": nsr_id},
1773 "path": db_update_entry,
quilesj7e13aeb2019-10-08 13:34:55 +02001774 }
tiernod8323042019-08-09 11:32:23 +00001775 step = ""
1776 try:
quilesj3655ae02019-12-12 16:08:35 +00001777
garciadeblas5697b8b2021-03-24 09:17:02 +01001778 element_type = "NS"
quilesj3655ae02019-12-12 16:08:35 +00001779 element_under_configuration = nsr_id
1780
tiernod8323042019-08-09 11:32:23 +00001781 vnfr_id = None
1782 if db_vnfr:
1783 vnfr_id = db_vnfr["_id"]
tiernob996d942020-07-03 14:52:28 +00001784 osm_config["osm"]["vnf_id"] = vnfr_id
tiernod8323042019-08-09 11:32:23 +00001785
garciadeblas5697b8b2021-03-24 09:17:02 +01001786 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001787
aktas98488ed2021-07-29 17:42:49 +03001788 if vca_type == "native_charm":
1789 index_number = 0
1790 else:
1791 index_number = vdu_index or 0
1792
tiernod8323042019-08-09 11:32:23 +00001793 if vnfr_id:
garciadeblas5697b8b2021-03-24 09:17:02 +01001794 element_type = "VNF"
quilesj3655ae02019-12-12 16:08:35 +00001795 element_under_configuration = vnfr_id
aktas98488ed2021-07-29 17:42:49 +03001796 namespace += ".{}-{}".format(vnfr_id, index_number)
tiernod8323042019-08-09 11:32:23 +00001797 if vdu_id:
aktas98488ed2021-07-29 17:42:49 +03001798 namespace += ".{}-{}".format(vdu_id, index_number)
garciadeblas5697b8b2021-03-24 09:17:02 +01001799 element_type = "VDU"
aktas98488ed2021-07-29 17:42:49 +03001800 element_under_configuration = "{}-{}".format(vdu_id, index_number)
tiernob996d942020-07-03 14:52:28 +00001801 osm_config["osm"]["vdu_id"] = vdu_id
tierno51183952020-04-03 15:48:18 +00001802 elif kdu_name:
aktas98488ed2021-07-29 17:42:49 +03001803 namespace += ".{}".format(kdu_name)
garciadeblas5697b8b2021-03-24 09:17:02 +01001804 element_type = "KDU"
tierno51183952020-04-03 15:48:18 +00001805 element_under_configuration = kdu_name
tiernob996d942020-07-03 14:52:28 +00001806 osm_config["osm"]["kdu_name"] = kdu_name
tiernod8323042019-08-09 11:32:23 +00001807
1808 # Get artifact path
bravof486707f2021-11-08 17:18:50 -03001809 if base_folder["pkg-dir"]:
1810 artifact_path = "{}/{}/{}/{}".format(
1811 base_folder["folder"],
1812 base_folder["pkg-dir"],
1813 "charms"
aticig15db6142022-01-24 12:51:26 +03001814 if vca_type
1815 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
bravof486707f2021-11-08 17:18:50 -03001816 else "helm-charts",
1817 vca_name,
1818 )
1819 else:
1820 artifact_path = "{}/Scripts/{}/{}/".format(
1821 base_folder["folder"],
1822 "charms"
aticig15db6142022-01-24 12:51:26 +03001823 if vca_type
1824 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
bravof486707f2021-11-08 17:18:50 -03001825 else "helm-charts",
1826 vca_name,
1827 )
bravof922c4172020-11-24 21:21:43 -03001828
1829 self.logger.debug("Artifact path > {}".format(artifact_path))
1830
tiernoa278b842020-07-08 15:33:55 +00001831 # get initial_config_primitive_list that applies to this element
garciadeblas5697b8b2021-03-24 09:17:02 +01001832 initial_config_primitive_list = config_descriptor.get(
1833 "initial-config-primitive"
1834 )
tiernoa278b842020-07-08 15:33:55 +00001835
garciadeblas5697b8b2021-03-24 09:17:02 +01001836 self.logger.debug(
1837 "Initial config primitive list > {}".format(
1838 initial_config_primitive_list
1839 )
1840 )
bravof922c4172020-11-24 21:21:43 -03001841
tiernoa278b842020-07-08 15:33:55 +00001842 # add config if not present for NS charm
1843 ee_descriptor_id = ee_config_descriptor.get("id")
bravof922c4172020-11-24 21:21:43 -03001844 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
garciadeblas5697b8b2021-03-24 09:17:02 +01001845 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1846 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1847 )
tiernod8323042019-08-09 11:32:23 +00001848
garciadeblas5697b8b2021-03-24 09:17:02 +01001849 self.logger.debug(
1850 "Initial config primitive list #2 > {}".format(
1851 initial_config_primitive_list
1852 )
1853 )
tierno588547c2020-07-01 15:30:20 +00001854 # n2vc_redesign STEP 3.1
tierno588547c2020-07-01 15:30:20 +00001855 # find old ee_id if exists
1856 ee_id = vca_deployed.get("ee_id")
tiernod8323042019-08-09 11:32:23 +00001857
David Garciac1fe90a2021-03-31 19:12:02 +02001858 vca_id = self.get_vca_id(db_vnfr, db_nsr)
tierno588547c2020-07-01 15:30:20 +00001859 # create or register execution environment in VCA
lloretgalleg18ebc3a2020-10-22 09:54:51 +00001860 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
quilesj7e13aeb2019-10-08 13:34:55 +02001861
tierno588547c2020-07-01 15:30:20 +00001862 self._write_configuration_status(
1863 nsr_id=nsr_id,
1864 vca_index=vca_index,
garciadeblas5697b8b2021-03-24 09:17:02 +01001865 status="CREATING",
tierno588547c2020-07-01 15:30:20 +00001866 element_under_configuration=element_under_configuration,
garciadeblas5697b8b2021-03-24 09:17:02 +01001867 element_type=element_type,
tierno588547c2020-07-01 15:30:20 +00001868 )
tiernod8323042019-08-09 11:32:23 +00001869
tierno588547c2020-07-01 15:30:20 +00001870 step = "create execution environment"
garciadeblas5697b8b2021-03-24 09:17:02 +01001871 self.logger.debug(logging_text + step)
David Garciaaae391f2020-11-09 11:12:54 +01001872
1873 ee_id = None
1874 credentials = None
1875 if vca_type == "k8s_proxy_charm":
1876 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
garciadeblas5697b8b2021-03-24 09:17:02 +01001877 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
David Garciaaae391f2020-11-09 11:12:54 +01001878 namespace=namespace,
1879 artifact_path=artifact_path,
1880 db_dict=db_dict,
David Garciac1fe90a2021-03-31 19:12:02 +02001881 vca_id=vca_id,
David Garciaaae391f2020-11-09 11:12:54 +01001882 )
garciadeblas5697b8b2021-03-24 09:17:02 +01001883 elif vca_type == "helm" or vca_type == "helm-v3":
1884 ee_id, credentials = await self.vca_map[
1885 vca_type
1886 ].create_execution_environment(
bravof922c4172020-11-24 21:21:43 -03001887 namespace=namespace,
1888 reuse_ee_id=ee_id,
1889 db_dict=db_dict,
lloretgalleg18cb3cb2020-12-10 14:21:10 +00001890 config=osm_config,
1891 artifact_path=artifact_path,
garciadeblas5697b8b2021-03-24 09:17:02 +01001892 vca_type=vca_type,
bravof922c4172020-11-24 21:21:43 -03001893 )
garciadeblas5697b8b2021-03-24 09:17:02 +01001894 else:
1895 ee_id, credentials = await self.vca_map[
1896 vca_type
1897 ].create_execution_environment(
David Garciaaae391f2020-11-09 11:12:54 +01001898 namespace=namespace,
1899 reuse_ee_id=ee_id,
1900 db_dict=db_dict,
David Garciac1fe90a2021-03-31 19:12:02 +02001901 vca_id=vca_id,
David Garciaaae391f2020-11-09 11:12:54 +01001902 )
quilesj3655ae02019-12-12 16:08:35 +00001903
tierno588547c2020-07-01 15:30:20 +00001904 elif vca_type == "native_charm":
1905 step = "Waiting to VM being up and getting IP address"
1906 self.logger.debug(logging_text + step)
garciadeblas5697b8b2021-03-24 09:17:02 +01001907 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1908 logging_text,
1909 nsr_id,
1910 vnfr_id,
1911 vdu_id,
1912 vdu_index,
1913 user=None,
1914 pub_key=None,
1915 )
tierno588547c2020-07-01 15:30:20 +00001916 credentials = {"hostname": rw_mgmt_ip}
1917 # get username
garciadeblas5697b8b2021-03-24 09:17:02 +01001918 username = deep_get(
1919 config_descriptor, ("config-access", "ssh-access", "default-user")
1920 )
tierno588547c2020-07-01 15:30:20 +00001921 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1922 # merged. Meanwhile let's get username from initial-config-primitive
tiernoa278b842020-07-08 15:33:55 +00001923 if not username and initial_config_primitive_list:
1924 for config_primitive in initial_config_primitive_list:
tierno588547c2020-07-01 15:30:20 +00001925 for param in config_primitive.get("parameter", ()):
1926 if param["name"] == "ssh-username":
1927 username = param["value"]
1928 break
1929 if not username:
garciadeblas5697b8b2021-03-24 09:17:02 +01001930 raise LcmException(
1931 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1932 "'config-access.ssh-access.default-user'"
1933 )
tierno588547c2020-07-01 15:30:20 +00001934 credentials["username"] = username
1935 # n2vc_redesign STEP 3.2
quilesj3655ae02019-12-12 16:08:35 +00001936
tierno588547c2020-07-01 15:30:20 +00001937 self._write_configuration_status(
1938 nsr_id=nsr_id,
1939 vca_index=vca_index,
garciadeblas5697b8b2021-03-24 09:17:02 +01001940 status="REGISTERING",
tierno588547c2020-07-01 15:30:20 +00001941 element_under_configuration=element_under_configuration,
garciadeblas5697b8b2021-03-24 09:17:02 +01001942 element_type=element_type,
tierno588547c2020-07-01 15:30:20 +00001943 )
quilesj3655ae02019-12-12 16:08:35 +00001944
tierno588547c2020-07-01 15:30:20 +00001945 step = "register execution environment {}".format(credentials)
1946 self.logger.debug(logging_text + step)
1947 ee_id = await self.vca_map[vca_type].register_execution_environment(
David Garciaaae391f2020-11-09 11:12:54 +01001948 credentials=credentials,
1949 namespace=namespace,
1950 db_dict=db_dict,
David Garciac1fe90a2021-03-31 19:12:02 +02001951 vca_id=vca_id,
David Garciaaae391f2020-11-09 11:12:54 +01001952 )
tierno3bedc9b2019-11-27 15:46:57 +00001953
tierno588547c2020-07-01 15:30:20 +00001954 # for compatibility with MON/POL modules, the need model and application name at database
1955 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
garciadeblas5697b8b2021-03-24 09:17:02 +01001956 ee_id_parts = ee_id.split(".")
tierno588547c2020-07-01 15:30:20 +00001957 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1958 if len(ee_id_parts) >= 2:
1959 model_name = ee_id_parts[0]
1960 application_name = ee_id_parts[1]
1961 db_nsr_update[db_update_entry + "model"] = model_name
1962 db_nsr_update[db_update_entry + "application"] = application_name
tiernod8323042019-08-09 11:32:23 +00001963
1964 # n2vc_redesign STEP 3.3
tiernod8323042019-08-09 11:32:23 +00001965 step = "Install configuration Software"
quilesj3655ae02019-12-12 16:08:35 +00001966
tiernoc231a872020-01-21 08:49:05 +00001967 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001968 nsr_id=nsr_id,
1969 vca_index=vca_index,
garciadeblas5697b8b2021-03-24 09:17:02 +01001970 status="INSTALLING SW",
quilesj3655ae02019-12-12 16:08:35 +00001971 element_under_configuration=element_under_configuration,
tierno51183952020-04-03 15:48:18 +00001972 element_type=element_type,
garciadeblas5697b8b2021-03-24 09:17:02 +01001973 other_update=db_nsr_update,
quilesj3655ae02019-12-12 16:08:35 +00001974 )
1975
tierno3bedc9b2019-11-27 15:46:57 +00001976 # TODO check if already done
quilesj7e13aeb2019-10-08 13:34:55 +02001977 self.logger.debug(logging_text + step)
David Garcia18a63322020-04-01 16:14:59 +02001978 config = None
tierno588547c2020-07-01 15:30:20 +00001979 if vca_type == "native_charm":
garciadeblas5697b8b2021-03-24 09:17:02 +01001980 config_primitive = next(
1981 (p for p in initial_config_primitive_list if p["name"] == "config"),
1982 None,
1983 )
tiernoa278b842020-07-08 15:33:55 +00001984 if config_primitive:
1985 config = self._map_primitive_params(
garciadeblas5697b8b2021-03-24 09:17:02 +01001986 config_primitive, {}, deploy_params
tiernoa278b842020-07-08 15:33:55 +00001987 )
tierno588547c2020-07-01 15:30:20 +00001988 num_units = 1
1989 if vca_type == "lxc_proxy_charm":
1990 if element_type == "NS":
1991 num_units = db_nsr.get("config-units") or 1
1992 elif element_type == "VNF":
1993 num_units = db_vnfr.get("config-units") or 1
1994 elif element_type == "VDU":
1995 for v in db_vnfr["vdur"]:
1996 if vdu_id == v["vdu-id-ref"]:
1997 num_units = v.get("config-units") or 1
1998 break
David Garciaaae391f2020-11-09 11:12:54 +01001999 if vca_type != "k8s_proxy_charm":
2000 await self.vca_map[vca_type].install_configuration_sw(
2001 ee_id=ee_id,
2002 artifact_path=artifact_path,
2003 db_dict=db_dict,
2004 config=config,
2005 num_units=num_units,
David Garciac1fe90a2021-03-31 19:12:02 +02002006 vca_id=vca_id,
aktas98488ed2021-07-29 17:42:49 +03002007 vca_type=vca_type,
David Garciaaae391f2020-11-09 11:12:54 +01002008 )
quilesj7e13aeb2019-10-08 13:34:55 +02002009
quilesj63f90042020-01-17 09:53:55 +00002010 # write in db flag of configuration_sw already installed
garciadeblas5697b8b2021-03-24 09:17:02 +01002011 self.update_db_2(
2012 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2013 )
quilesj63f90042020-01-17 09:53:55 +00002014
2015 # add relations for this VCA (wait for other peers related with this VCA)
garciadeblas5697b8b2021-03-24 09:17:02 +01002016 await self._add_vca_relations(
2017 logging_text=logging_text,
2018 nsr_id=nsr_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01002019 vca_type=vca_type,
David Garciab4ebcd02021-10-28 02:00:43 +02002020 vca_index=vca_index,
garciadeblas5697b8b2021-03-24 09:17:02 +01002021 )
quilesj63f90042020-01-17 09:53:55 +00002022
quilesj7e13aeb2019-10-08 13:34:55 +02002023 # if SSH access is required, then get execution environment SSH public
David Garciaa27e20a2020-07-10 13:12:44 +02002024 # if native charm we have waited already to VM be UP
lloretgalleg18ebc3a2020-10-22 09:54:51 +00002025 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
tierno3bedc9b2019-11-27 15:46:57 +00002026 pub_key = None
2027 user = None
tierno588547c2020-07-01 15:30:20 +00002028 # self.logger.debug("get ssh key block")
garciadeblas5697b8b2021-03-24 09:17:02 +01002029 if deep_get(
2030 config_descriptor, ("config-access", "ssh-access", "required")
2031 ):
tierno588547c2020-07-01 15:30:20 +00002032 # self.logger.debug("ssh key needed")
tierno3bedc9b2019-11-27 15:46:57 +00002033 # Needed to inject a ssh key
garciadeblas5697b8b2021-03-24 09:17:02 +01002034 user = deep_get(
2035 config_descriptor,
2036 ("config-access", "ssh-access", "default-user"),
2037 )
tierno3bedc9b2019-11-27 15:46:57 +00002038 step = "Install configuration Software, getting public ssh key"
David Garciac1fe90a2021-03-31 19:12:02 +02002039 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
garciadeblas5697b8b2021-03-24 09:17:02 +01002040 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
David Garciac1fe90a2021-03-31 19:12:02 +02002041 )
quilesj7e13aeb2019-10-08 13:34:55 +02002042
garciadeblas5697b8b2021-03-24 09:17:02 +01002043 step = "Insert public key into VM user={} ssh_key={}".format(
2044 user, pub_key
2045 )
tierno3bedc9b2019-11-27 15:46:57 +00002046 else:
tierno588547c2020-07-01 15:30:20 +00002047 # self.logger.debug("no need to get ssh key")
tierno3bedc9b2019-11-27 15:46:57 +00002048 step = "Waiting to VM being up and getting IP address"
2049 self.logger.debug(logging_text + step)
quilesj7e13aeb2019-10-08 13:34:55 +02002050
Pedro Escaleira1e9c3e32022-05-30 15:37:01 +01002051 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2052 rw_mgmt_ip = None
2053
tierno3bedc9b2019-11-27 15:46:57 +00002054 # n2vc_redesign STEP 5.1
2055 # wait for RO (ip-address) Insert pub_key into VM
tierno5ee02052019-12-05 19:55:02 +00002056 if vnfr_id:
tierno7ecbc342020-09-21 14:05:39 +00002057 if kdu_name:
David Garcia78b6e6d2022-04-29 05:50:46 +02002058 rw_mgmt_ip, services = await self.wait_kdu_up(
garciadeblas5697b8b2021-03-24 09:17:02 +01002059 logging_text, nsr_id, vnfr_id, kdu_name
2060 )
David Garcia78b6e6d2022-04-29 05:50:46 +02002061 vnfd = self.db.get_one(
2062 "vnfds_revisions",
2063 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2064 )
2065 kdu = get_kdu(vnfd, kdu_name)
2066 kdu_services = [
2067 service["name"] for service in get_kdu_services(kdu)
2068 ]
2069 exposed_services = []
2070 for service in services:
2071 if any(s in service["name"] for s in kdu_services):
2072 exposed_services.append(service)
2073 await self.vca_map[vca_type].exec_primitive(
2074 ee_id=ee_id,
2075 primitive_name="config",
2076 params_dict={
2077 "osm-config": json.dumps(
2078 OsmConfigBuilder(
2079 k8s={"services": exposed_services}
2080 ).build()
2081 )
2082 },
2083 vca_id=vca_id,
2084 )
Pedro Escaleira1e9c3e32022-05-30 15:37:01 +01002085
2086 # This verification is needed in order to avoid trying to add a public key
2087 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2088 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2089 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2090 # or it is a KNF)
2091 elif db_vnfr.get('vdur'):
garciadeblas5697b8b2021-03-24 09:17:02 +01002092 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2093 logging_text,
2094 nsr_id,
2095 vnfr_id,
2096 vdu_id,
2097 vdu_index,
2098 user=user,
2099 pub_key=pub_key,
2100 )
David Garcia78b6e6d2022-04-29 05:50:46 +02002101
garciadeblas5697b8b2021-03-24 09:17:02 +01002102 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
quilesj7e13aeb2019-10-08 13:34:55 +02002103
tiernoa5088192019-11-26 16:12:53 +00002104 # store rw_mgmt_ip in deploy params for later replacement
quilesj7e13aeb2019-10-08 13:34:55 +02002105 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
tiernod8323042019-08-09 11:32:23 +00002106
2107 # n2vc_redesign STEP 6 Execute initial config primitive
garciadeblas5697b8b2021-03-24 09:17:02 +01002108 step = "execute initial config primitive"
quilesj3655ae02019-12-12 16:08:35 +00002109
2110 # wait for dependent primitives execution (NS -> VNF -> VDU)
tierno5ee02052019-12-05 19:55:02 +00002111 if initial_config_primitive_list:
2112 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
quilesj3655ae02019-12-12 16:08:35 +00002113
2114 # stage, in function of element type: vdu, kdu, vnf or ns
2115 my_vca = vca_deployed_list[vca_index]
2116 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2117 # VDU or KDU
garciadeblas5697b8b2021-03-24 09:17:02 +01002118 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
quilesj3655ae02019-12-12 16:08:35 +00002119 elif my_vca.get("member-vnf-index"):
2120 # VNF
garciadeblas5697b8b2021-03-24 09:17:02 +01002121 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
quilesj3655ae02019-12-12 16:08:35 +00002122 else:
2123 # NS
garciadeblas5697b8b2021-03-24 09:17:02 +01002124 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
quilesj3655ae02019-12-12 16:08:35 +00002125
tiernoc231a872020-01-21 08:49:05 +00002126 self._write_configuration_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01002127 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
quilesj3655ae02019-12-12 16:08:35 +00002128 )
2129
garciadeblas5697b8b2021-03-24 09:17:02 +01002130 self._write_op_status(op_id=nslcmop_id, stage=stage)
quilesj3655ae02019-12-12 16:08:35 +00002131
tiernoe876f672020-02-13 14:34:48 +00002132 check_if_terminated_needed = True
tiernod8323042019-08-09 11:32:23 +00002133 for initial_config_primitive in initial_config_primitive_list:
tiernoda6fb102019-11-23 00:36:52 +00002134 # adding information on the vca_deployed if it is a NS execution environment
2135 if not vca_deployed["member-vnf-index"]:
garciadeblas5697b8b2021-03-24 09:17:02 +01002136 deploy_params["ns_config_info"] = json.dumps(
2137 self._get_ns_config_info(nsr_id)
2138 )
tiernod8323042019-08-09 11:32:23 +00002139 # TODO check if already done
garciadeblas5697b8b2021-03-24 09:17:02 +01002140 primitive_params_ = self._map_primitive_params(
2141 initial_config_primitive, {}, deploy_params
2142 )
tierno3bedc9b2019-11-27 15:46:57 +00002143
garciadeblas5697b8b2021-03-24 09:17:02 +01002144 step = "execute primitive '{}' params '{}'".format(
2145 initial_config_primitive["name"], primitive_params_
2146 )
tiernod8323042019-08-09 11:32:23 +00002147 self.logger.debug(logging_text + step)
tierno588547c2020-07-01 15:30:20 +00002148 await self.vca_map[vca_type].exec_primitive(
quilesj7e13aeb2019-10-08 13:34:55 +02002149 ee_id=ee_id,
2150 primitive_name=initial_config_primitive["name"],
2151 params_dict=primitive_params_,
David Garciac1fe90a2021-03-31 19:12:02 +02002152 db_dict=db_dict,
2153 vca_id=vca_id,
aktas98488ed2021-07-29 17:42:49 +03002154 vca_type=vca_type,
quilesj7e13aeb2019-10-08 13:34:55 +02002155 )
tiernoe876f672020-02-13 14:34:48 +00002156 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2157 if check_if_terminated_needed:
garciadeblas5697b8b2021-03-24 09:17:02 +01002158 if config_descriptor.get("terminate-config-primitive"):
2159 self.update_db_2(
2160 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2161 )
tiernoe876f672020-02-13 14:34:48 +00002162 check_if_terminated_needed = False
quilesj3655ae02019-12-12 16:08:35 +00002163
tiernod8323042019-08-09 11:32:23 +00002164 # TODO register in database that primitive is done
quilesj7e13aeb2019-10-08 13:34:55 +02002165
tiernob996d942020-07-03 14:52:28 +00002166 # STEP 7 Configure metrics
lloretgalleg18ebc3a2020-10-22 09:54:51 +00002167 if vca_type == "helm" or vca_type == "helm-v3":
bravof73bac502021-05-11 07:38:47 -04002168 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
tiernob996d942020-07-03 14:52:28 +00002169 ee_id=ee_id,
2170 artifact_path=artifact_path,
2171 ee_config_descriptor=ee_config_descriptor,
2172 vnfr_id=vnfr_id,
2173 nsr_id=nsr_id,
2174 target_ip=rw_mgmt_ip,
2175 )
2176 if prometheus_jobs:
garciadeblas5697b8b2021-03-24 09:17:02 +01002177 self.update_db_2(
2178 "nsrs",
2179 nsr_id,
2180 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2181 )
tiernob996d942020-07-03 14:52:28 +00002182
bravof73bac502021-05-11 07:38:47 -04002183 for job in prometheus_jobs:
2184 self.db.set_one(
2185 "prometheus_jobs",
aticig15db6142022-01-24 12:51:26 +03002186 {"job_name": job["job_name"]},
bravof73bac502021-05-11 07:38:47 -04002187 job,
2188 upsert=True,
aticig15db6142022-01-24 12:51:26 +03002189 fail_on_empty=False,
bravof73bac502021-05-11 07:38:47 -04002190 )
2191
quilesj7e13aeb2019-10-08 13:34:55 +02002192 step = "instantiated at VCA"
2193 self.logger.debug(logging_text + step)
2194
tiernoc231a872020-01-21 08:49:05 +00002195 self._write_configuration_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01002196 nsr_id=nsr_id, vca_index=vca_index, status="READY"
quilesj3655ae02019-12-12 16:08:35 +00002197 )
2198
tiernod8323042019-08-09 11:32:23 +00002199 except Exception as e: # TODO not use Exception but N2VC exception
quilesj3655ae02019-12-12 16:08:35 +00002200 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
garciadeblas5697b8b2021-03-24 09:17:02 +01002201 if not isinstance(
2202 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2203 ):
2204 self.logger.error(
2205 "Exception while {} : {}".format(step, e), exc_info=True
2206 )
tiernoc231a872020-01-21 08:49:05 +00002207 self._write_configuration_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01002208 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
quilesj3655ae02019-12-12 16:08:35 +00002209 )
tiernoe876f672020-02-13 14:34:48 +00002210 raise LcmException("{} {}".format(step, e)) from e
tiernod8323042019-08-09 11:32:23 +00002211
garciadeblas5697b8b2021-03-24 09:17:02 +01002212 def _write_ns_status(
2213 self,
2214 nsr_id: str,
2215 ns_state: str,
2216 current_operation: str,
2217 current_operation_id: str,
2218 error_description: str = None,
2219 error_detail: str = None,
2220 other_update: dict = None,
2221 ):
tiernoe876f672020-02-13 14:34:48 +00002222 """
2223 Update db_nsr fields.
2224 :param nsr_id:
2225 :param ns_state:
2226 :param current_operation:
2227 :param current_operation_id:
2228 :param error_description:
tiernoa2143262020-03-27 16:20:40 +00002229 :param error_detail:
tiernoe876f672020-02-13 14:34:48 +00002230 :param other_update: Other required changes at database if provided, will be cleared
2231 :return:
2232 """
quilesj4cda56b2019-12-05 10:02:20 +00002233 try:
tiernoe876f672020-02-13 14:34:48 +00002234 db_dict = other_update or {}
garciadeblas5697b8b2021-03-24 09:17:02 +01002235 db_dict[
2236 "_admin.nslcmop"
2237 ] = current_operation_id # for backward compatibility
tiernoe876f672020-02-13 14:34:48 +00002238 db_dict["_admin.current-operation"] = current_operation_id
garciadeblas5697b8b2021-03-24 09:17:02 +01002239 db_dict["_admin.operation-type"] = (
2240 current_operation if current_operation != "IDLE" else None
2241 )
quilesj4cda56b2019-12-05 10:02:20 +00002242 db_dict["currentOperation"] = current_operation
2243 db_dict["currentOperationID"] = current_operation_id
2244 db_dict["errorDescription"] = error_description
tiernoa2143262020-03-27 16:20:40 +00002245 db_dict["errorDetail"] = error_detail
tiernoe876f672020-02-13 14:34:48 +00002246
2247 if ns_state:
2248 db_dict["nsState"] = ns_state
quilesj4cda56b2019-12-05 10:02:20 +00002249 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00002250 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002251 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
quilesj3655ae02019-12-12 16:08:35 +00002252
garciadeblas5697b8b2021-03-24 09:17:02 +01002253 def _write_op_status(
2254 self,
2255 op_id: str,
2256 stage: list = None,
2257 error_message: str = None,
2258 queuePosition: int = 0,
2259 operation_state: str = None,
2260 other_update: dict = None,
2261 ):
quilesj3655ae02019-12-12 16:08:35 +00002262 try:
tiernoe876f672020-02-13 14:34:48 +00002263 db_dict = other_update or {}
garciadeblas5697b8b2021-03-24 09:17:02 +01002264 db_dict["queuePosition"] = queuePosition
tiernoe876f672020-02-13 14:34:48 +00002265 if isinstance(stage, list):
garciadeblas5697b8b2021-03-24 09:17:02 +01002266 db_dict["stage"] = stage[0]
2267 db_dict["detailed-status"] = " ".join(stage)
tiernoe876f672020-02-13 14:34:48 +00002268 elif stage is not None:
garciadeblas5697b8b2021-03-24 09:17:02 +01002269 db_dict["stage"] = str(stage)
tiernoe876f672020-02-13 14:34:48 +00002270
2271 if error_message is not None:
garciadeblas5697b8b2021-03-24 09:17:02 +01002272 db_dict["errorMessage"] = error_message
tiernoe876f672020-02-13 14:34:48 +00002273 if operation_state is not None:
garciadeblas5697b8b2021-03-24 09:17:02 +01002274 db_dict["operationState"] = operation_state
tiernoe876f672020-02-13 14:34:48 +00002275 db_dict["statusEnteredTime"] = time()
quilesj3655ae02019-12-12 16:08:35 +00002276 self.update_db_2("nslcmops", op_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00002277 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002278 self.logger.warn(
2279 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2280 )
quilesj3655ae02019-12-12 16:08:35 +00002281
tierno51183952020-04-03 15:48:18 +00002282 def _write_all_config_status(self, db_nsr: dict, status: str):
quilesj3655ae02019-12-12 16:08:35 +00002283 try:
tierno51183952020-04-03 15:48:18 +00002284 nsr_id = db_nsr["_id"]
quilesj3655ae02019-12-12 16:08:35 +00002285 # configurationStatus
garciadeblas5697b8b2021-03-24 09:17:02 +01002286 config_status = db_nsr.get("configurationStatus")
quilesj3655ae02019-12-12 16:08:35 +00002287 if config_status:
garciadeblas5697b8b2021-03-24 09:17:02 +01002288 db_nsr_update = {
2289 "configurationStatus.{}.status".format(index): status
2290 for index, v in enumerate(config_status)
2291 if v
2292 }
quilesj3655ae02019-12-12 16:08:35 +00002293 # update status
tierno51183952020-04-03 15:48:18 +00002294 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00002295
tiernoe876f672020-02-13 14:34:48 +00002296 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002297 self.logger.warn(
2298 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2299 )
quilesj3655ae02019-12-12 16:08:35 +00002300
garciadeblas5697b8b2021-03-24 09:17:02 +01002301 def _write_configuration_status(
2302 self,
2303 nsr_id: str,
2304 vca_index: int,
2305 status: str = None,
2306 element_under_configuration: str = None,
2307 element_type: str = None,
2308 other_update: dict = None,
2309 ):
quilesj3655ae02019-12-12 16:08:35 +00002310
2311 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2312 # .format(vca_index, status))
2313
2314 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01002315 db_path = "configurationStatus.{}.".format(vca_index)
tierno51183952020-04-03 15:48:18 +00002316 db_dict = other_update or {}
quilesj63f90042020-01-17 09:53:55 +00002317 if status:
garciadeblas5697b8b2021-03-24 09:17:02 +01002318 db_dict[db_path + "status"] = status
quilesj3655ae02019-12-12 16:08:35 +00002319 if element_under_configuration:
garciadeblas5697b8b2021-03-24 09:17:02 +01002320 db_dict[
2321 db_path + "elementUnderConfiguration"
2322 ] = element_under_configuration
quilesj3655ae02019-12-12 16:08:35 +00002323 if element_type:
garciadeblas5697b8b2021-03-24 09:17:02 +01002324 db_dict[db_path + "elementType"] = element_type
quilesj3655ae02019-12-12 16:08:35 +00002325 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00002326 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002327 self.logger.warn(
2328 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2329 status, nsr_id, vca_index, e
2330 )
2331 )
quilesj4cda56b2019-12-05 10:02:20 +00002332
tierno38089af2020-04-16 07:56:58 +00002333 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2334 """
2335 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2336 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2337 Database is used because the result can be obtained from a different LCM worker in case of HA.
2338 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2339 :param db_nslcmop: database content of nslcmop
2340 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
tierno8790a3d2020-04-23 22:49:52 +00002341 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2342 computed 'vim-account-id'
tierno38089af2020-04-16 07:56:58 +00002343 """
tierno8790a3d2020-04-23 22:49:52 +00002344 modified = False
garciadeblas5697b8b2021-03-24 09:17:02 +01002345 nslcmop_id = db_nslcmop["_id"]
2346 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
magnussonle9198bb2020-01-21 13:00:51 +01002347 if placement_engine == "PLA":
garciadeblas5697b8b2021-03-24 09:17:02 +01002348 self.logger.debug(
2349 logging_text + "Invoke and wait for placement optimization"
2350 )
2351 await self.msg.aiowrite(
2352 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2353 )
magnussonle9198bb2020-01-21 13:00:51 +01002354 db_poll_interval = 5
tierno38089af2020-04-16 07:56:58 +00002355 wait = db_poll_interval * 10
magnussonle9198bb2020-01-21 13:00:51 +01002356 pla_result = None
2357 while not pla_result and wait >= 0:
2358 await asyncio.sleep(db_poll_interval)
2359 wait -= db_poll_interval
tierno38089af2020-04-16 07:56:58 +00002360 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
garciadeblas5697b8b2021-03-24 09:17:02 +01002361 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
magnussonle9198bb2020-01-21 13:00:51 +01002362
2363 if not pla_result:
garciadeblas5697b8b2021-03-24 09:17:02 +01002364 raise LcmException(
2365 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2366 )
magnussonle9198bb2020-01-21 13:00:51 +01002367
garciadeblas5697b8b2021-03-24 09:17:02 +01002368 for pla_vnf in pla_result["vnf"]:
2369 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2370 if not pla_vnf.get("vimAccountId") or not vnfr:
magnussonle9198bb2020-01-21 13:00:51 +01002371 continue
tierno8790a3d2020-04-23 22:49:52 +00002372 modified = True
garciadeblas5697b8b2021-03-24 09:17:02 +01002373 self.db.set_one(
2374 "vnfrs",
2375 {"_id": vnfr["_id"]},
2376 {"vim-account-id": pla_vnf["vimAccountId"]},
2377 )
tierno38089af2020-04-16 07:56:58 +00002378 # Modifies db_vnfrs
garciadeblas5697b8b2021-03-24 09:17:02 +01002379 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
tierno8790a3d2020-04-23 22:49:52 +00002380 return modified
magnussonle9198bb2020-01-21 13:00:51 +01002381
2382 def update_nsrs_with_pla_result(self, params):
2383 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01002384 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2385 self.update_db_2(
2386 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2387 )
magnussonle9198bb2020-01-21 13:00:51 +01002388 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002389 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
magnussonle9198bb2020-01-21 13:00:51 +01002390
tierno59d22d22018-09-25 18:10:19 +02002391 async def instantiate(self, nsr_id, nslcmop_id):
quilesj7e13aeb2019-10-08 13:34:55 +02002392 """
2393
2394 :param nsr_id: ns instance to deploy
2395 :param nslcmop_id: operation to run
2396 :return:
2397 """
kuused124bfe2019-06-18 12:09:24 +02002398
2399 # Try to lock HA task here
garciadeblas5697b8b2021-03-24 09:17:02 +01002400 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02002401 if not task_is_locked_by_me:
garciadeblas5697b8b2021-03-24 09:17:02 +01002402 self.logger.debug(
2403 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2404 )
kuused124bfe2019-06-18 12:09:24 +02002405 return
2406
tierno59d22d22018-09-25 18:10:19 +02002407 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2408 self.logger.debug(logging_text + "Enter")
quilesj7e13aeb2019-10-08 13:34:55 +02002409
tierno59d22d22018-09-25 18:10:19 +02002410 # get all needed from database
quilesj7e13aeb2019-10-08 13:34:55 +02002411
2412 # database nsrs record
tierno59d22d22018-09-25 18:10:19 +02002413 db_nsr = None
quilesj7e13aeb2019-10-08 13:34:55 +02002414
2415 # database nslcmops record
tierno59d22d22018-09-25 18:10:19 +02002416 db_nslcmop = None
quilesj7e13aeb2019-10-08 13:34:55 +02002417
2418 # update operation on nsrs
tiernoe876f672020-02-13 14:34:48 +00002419 db_nsr_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02002420 # update operation on nslcmops
tierno59d22d22018-09-25 18:10:19 +02002421 db_nslcmop_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02002422
tierno59d22d22018-09-25 18:10:19 +02002423 nslcmop_operation_state = None
garciadeblas5697b8b2021-03-24 09:17:02 +01002424 db_vnfrs = {} # vnf's info indexed by member-index
quilesj7e13aeb2019-10-08 13:34:55 +02002425 # n2vc_info = {}
tiernoe876f672020-02-13 14:34:48 +00002426 tasks_dict_info = {} # from task to info text
tierno59d22d22018-09-25 18:10:19 +02002427 exc = None
tiernoe876f672020-02-13 14:34:48 +00002428 error_list = []
garciadeblas5697b8b2021-03-24 09:17:02 +01002429 stage = [
2430 "Stage 1/5: preparation of the environment.",
2431 "Waiting for previous operations to terminate.",
2432 "",
2433 ]
tiernoe876f672020-02-13 14:34:48 +00002434 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02002435 try:
kuused124bfe2019-06-18 12:09:24 +02002436 # wait for any previous tasks in process
garciadeblas5697b8b2021-03-24 09:17:02 +01002437 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02002438
quilesj7e13aeb2019-10-08 13:34:55 +02002439 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
tiernob5203912020-08-11 11:20:13 +00002440 stage[1] = "Reading from database."
quilesj4cda56b2019-12-05 10:02:20 +00002441 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
tiernoe876f672020-02-13 14:34:48 +00002442 db_nsr_update["detailed-status"] = "creating"
2443 db_nsr_update["operational-status"] = "init"
quilesj4cda56b2019-12-05 10:02:20 +00002444 self._write_ns_status(
2445 nsr_id=nsr_id,
2446 ns_state="BUILDING",
2447 current_operation="INSTANTIATING",
tiernoe876f672020-02-13 14:34:48 +00002448 current_operation_id=nslcmop_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01002449 other_update=db_nsr_update,
tiernoe876f672020-02-13 14:34:48 +00002450 )
garciadeblas5697b8b2021-03-24 09:17:02 +01002451 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
quilesj4cda56b2019-12-05 10:02:20 +00002452
quilesj7e13aeb2019-10-08 13:34:55 +02002453 # read from db: operation
tiernob5203912020-08-11 11:20:13 +00002454 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
tierno59d22d22018-09-25 18:10:19 +02002455 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
Guillermo Calvino57c68152022-01-26 17:40:31 +01002456 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2457 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2458 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2459 )
tierno744303e2020-01-13 16:46:31 +00002460 ns_params = db_nslcmop.get("operationParams")
2461 if ns_params and ns_params.get("timeout_ns_deploy"):
2462 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2463 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01002464 timeout_ns_deploy = self.timeout.get(
2465 "ns_deploy", self.timeout_ns_deploy
2466 )
quilesj7e13aeb2019-10-08 13:34:55 +02002467
2468 # read from db: ns
tiernob5203912020-08-11 11:20:13 +00002469 stage[1] = "Getting nsr={} from db.".format(nsr_id)
garciadeblascd509f52021-11-23 10:04:12 +01002470 self.logger.debug(logging_text + stage[1])
tierno59d22d22018-09-25 18:10:19 +02002471 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernob5203912020-08-11 11:20:13 +00002472 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
garciadeblascd509f52021-11-23 10:04:12 +01002473 self.logger.debug(logging_text + stage[1])
tiernod732fb82020-05-21 13:18:23 +00002474 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
bravof021e70d2021-03-11 12:03:30 -03002475 self.fs.sync(db_nsr["nsd-id"])
tiernod732fb82020-05-21 13:18:23 +00002476 db_nsr["nsd"] = nsd
tiernod8323042019-08-09 11:32:23 +00002477 # nsr_name = db_nsr["name"] # TODO short-name??
tierno47e86b52018-10-10 14:05:55 +02002478
quilesj7e13aeb2019-10-08 13:34:55 +02002479 # read from db: vnf's of this ns
tiernob5203912020-08-11 11:20:13 +00002480 stage[1] = "Getting vnfrs from db."
tiernoe876f672020-02-13 14:34:48 +00002481 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02002482 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
tierno27246d82018-09-27 15:59:09 +02002483
quilesj7e13aeb2019-10-08 13:34:55 +02002484 # read from db: vnfd's for every vnf
garciadeblas5697b8b2021-03-24 09:17:02 +01002485 db_vnfds = [] # every vnfd data
quilesj7e13aeb2019-10-08 13:34:55 +02002486
2487 # for each vnf in ns, read vnfd
tierno27246d82018-09-27 15:59:09 +02002488 for vnfr in db_vnfrs_list:
Guillermo Calvino57c68152022-01-26 17:40:31 +01002489 if vnfr.get("kdur"):
2490 kdur_list = []
2491 for kdur in vnfr["kdur"]:
2492 if kdur.get("additionalParams"):
Pedro Escaleirab9a7c4d2022-03-31 00:08:05 +01002493 kdur["additionalParams"] = json.loads(
2494 kdur["additionalParams"]
2495 )
Guillermo Calvino57c68152022-01-26 17:40:31 +01002496 kdur_list.append(kdur)
2497 vnfr["kdur"] = kdur_list
2498
bravof922c4172020-11-24 21:21:43 -03002499 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2500 vnfd_id = vnfr["vnfd-id"]
2501 vnfd_ref = vnfr["vnfd-ref"]
bravof021e70d2021-03-11 12:03:30 -03002502 self.fs.sync(vnfd_id)
lloretgalleg6d488782020-07-22 10:13:46 +00002503
quilesj7e13aeb2019-10-08 13:34:55 +02002504 # if we haven't this vnfd, read it from db
tierno27246d82018-09-27 15:59:09 +02002505 if vnfd_id not in db_vnfds:
quilesj63f90042020-01-17 09:53:55 +00002506 # read from db
garciadeblas5697b8b2021-03-24 09:17:02 +01002507 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2508 vnfd_id, vnfd_ref
2509 )
tiernoe876f672020-02-13 14:34:48 +00002510 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02002511 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
tierno27246d82018-09-27 15:59:09 +02002512
quilesj7e13aeb2019-10-08 13:34:55 +02002513 # store vnfd
David Garciad41dbd62020-12-10 12:52:52 +01002514 db_vnfds.append(vnfd)
quilesj7e13aeb2019-10-08 13:34:55 +02002515
2516 # Get or generates the _admin.deployed.VCA list
tiernoe4f7e6c2018-11-27 14:55:30 +00002517 vca_deployed_list = None
2518 if db_nsr["_admin"].get("deployed"):
2519 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2520 if vca_deployed_list is None:
2521 vca_deployed_list = []
quilesj3655ae02019-12-12 16:08:35 +00002522 configuration_status_list = []
tiernoe4f7e6c2018-11-27 14:55:30 +00002523 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
quilesj3655ae02019-12-12 16:08:35 +00002524 db_nsr_update["configurationStatus"] = configuration_status_list
quilesj7e13aeb2019-10-08 13:34:55 +02002525 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00002526 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00002527 elif isinstance(vca_deployed_list, dict):
2528 # maintain backward compatibility. Change a dict to list at database
2529 vca_deployed_list = list(vca_deployed_list.values())
2530 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00002531 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00002532
garciadeblas5697b8b2021-03-24 09:17:02 +01002533 if not isinstance(
2534 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2535 ):
tiernoa009e552019-01-30 16:45:44 +00002536 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2537 db_nsr_update["_admin.deployed.RO.vnfd"] = []
tierno59d22d22018-09-25 18:10:19 +02002538
tiernobaa51102018-12-14 13:16:18 +00002539 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2540 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2541 self.update_db_2("nsrs", nsr_id, db_nsr_update)
garciadeblas5697b8b2021-03-24 09:17:02 +01002542 self.db.set_list(
2543 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2544 )
quilesj3655ae02019-12-12 16:08:35 +00002545
2546 # n2vc_redesign STEP 2 Deploy Network Scenario
garciadeblas5697b8b2021-03-24 09:17:02 +01002547 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2548 self._write_op_status(op_id=nslcmop_id, stage=stage)
quilesj3655ae02019-12-12 16:08:35 +00002549
tiernob5203912020-08-11 11:20:13 +00002550 stage[1] = "Deploying KDUs."
tiernoe876f672020-02-13 14:34:48 +00002551 # self.logger.debug(logging_text + "Before deploy_kdus")
calvinosanch9f9c6f22019-11-04 13:37:39 +01002552 # Call to deploy_kdus in case exists the "vdu:kdu" param
tiernoe876f672020-02-13 14:34:48 +00002553 await self.deploy_kdus(
2554 logging_text=logging_text,
2555 nsr_id=nsr_id,
2556 nslcmop_id=nslcmop_id,
2557 db_vnfrs=db_vnfrs,
2558 db_vnfds=db_vnfds,
2559 task_instantiation_info=tasks_dict_info,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002560 )
tiernoe876f672020-02-13 14:34:48 +00002561
2562 stage[1] = "Getting VCA public key."
tiernod8323042019-08-09 11:32:23 +00002563 # n2vc_redesign STEP 1 Get VCA public ssh-key
2564 # feature 1429. Add n2vc public key to needed VMs
tierno3bedc9b2019-11-27 15:46:57 +00002565 n2vc_key = self.n2vc.get_public_key()
tiernoa5088192019-11-26 16:12:53 +00002566 n2vc_key_list = [n2vc_key]
2567 if self.vca_config.get("public_key"):
2568 n2vc_key_list.append(self.vca_config["public_key"])
tierno98ad6ea2019-05-30 17:16:28 +00002569
tiernoe876f672020-02-13 14:34:48 +00002570 stage[1] = "Deploying NS at VIM."
tiernod8323042019-08-09 11:32:23 +00002571 task_ro = asyncio.ensure_future(
quilesj7e13aeb2019-10-08 13:34:55 +02002572 self.instantiate_RO(
2573 logging_text=logging_text,
2574 nsr_id=nsr_id,
2575 nsd=nsd,
2576 db_nsr=db_nsr,
2577 db_nslcmop=db_nslcmop,
2578 db_vnfrs=db_vnfrs,
bravof922c4172020-11-24 21:21:43 -03002579 db_vnfds=db_vnfds,
tiernoe876f672020-02-13 14:34:48 +00002580 n2vc_key_list=n2vc_key_list,
garciadeblas5697b8b2021-03-24 09:17:02 +01002581 stage=stage,
tierno98ad6ea2019-05-30 17:16:28 +00002582 )
tiernod8323042019-08-09 11:32:23 +00002583 )
2584 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
tiernoa2143262020-03-27 16:20:40 +00002585 tasks_dict_info[task_ro] = "Deploying at VIM"
tierno98ad6ea2019-05-30 17:16:28 +00002586
tiernod8323042019-08-09 11:32:23 +00002587 # n2vc_redesign STEP 3 to 6 Deploy N2VC
tiernoe876f672020-02-13 14:34:48 +00002588 stage[1] = "Deploying Execution Environments."
2589 self.logger.debug(logging_text + stage[1])
tierno98ad6ea2019-05-30 17:16:28 +00002590
tiernod8323042019-08-09 11:32:23 +00002591 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
bravof922c4172020-11-24 21:21:43 -03002592 for vnf_profile in get_vnf_profiles(nsd):
2593 vnfd_id = vnf_profile["vnfd-id"]
2594 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2595 member_vnf_index = str(vnf_profile["id"])
tiernod8323042019-08-09 11:32:23 +00002596 db_vnfr = db_vnfrs[member_vnf_index]
2597 base_folder = vnfd["_admin"]["storage"]
2598 vdu_id = None
2599 vdu_index = 0
tierno98ad6ea2019-05-30 17:16:28 +00002600 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002601 kdu_name = None
tierno59d22d22018-09-25 18:10:19 +02002602
tierno8a518872018-12-21 13:42:14 +00002603 # Get additional parameters
bravof922c4172020-11-24 21:21:43 -03002604 deploy_params = {"OSM": get_osm_params(db_vnfr)}
tiernod8323042019-08-09 11:32:23 +00002605 if db_vnfr.get("additionalParamsForVnf"):
garciadeblas5697b8b2021-03-24 09:17:02 +01002606 deploy_params.update(
2607 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2608 )
tierno8a518872018-12-21 13:42:14 +00002609
bravofe5a31bc2021-02-17 19:09:12 -03002610 descriptor_config = get_configuration(vnfd, vnfd["id"])
tierno588547c2020-07-01 15:30:20 +00002611 if descriptor_config:
quilesj7e13aeb2019-10-08 13:34:55 +02002612 self._deploy_n2vc(
garciadeblas5697b8b2021-03-24 09:17:02 +01002613 logging_text=logging_text
2614 + "member_vnf_index={} ".format(member_vnf_index),
quilesj7e13aeb2019-10-08 13:34:55 +02002615 db_nsr=db_nsr,
2616 db_vnfr=db_vnfr,
2617 nslcmop_id=nslcmop_id,
2618 nsr_id=nsr_id,
2619 nsi_id=nsi_id,
2620 vnfd_id=vnfd_id,
2621 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002622 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002623 member_vnf_index=member_vnf_index,
2624 vdu_index=vdu_index,
2625 vdu_name=vdu_name,
2626 deploy_params=deploy_params,
2627 descriptor_config=descriptor_config,
2628 base_folder=base_folder,
tiernoe876f672020-02-13 14:34:48 +00002629 task_instantiation_info=tasks_dict_info,
garciadeblas5697b8b2021-03-24 09:17:02 +01002630 stage=stage,
quilesj7e13aeb2019-10-08 13:34:55 +02002631 )
tierno59d22d22018-09-25 18:10:19 +02002632
2633 # Deploy charms for each VDU that supports one.
bravof922c4172020-11-24 21:21:43 -03002634 for vdud in get_vdu_list(vnfd):
tiernod8323042019-08-09 11:32:23 +00002635 vdu_id = vdud["id"]
bravofe5a31bc2021-02-17 19:09:12 -03002636 descriptor_config = get_configuration(vnfd, vdu_id)
garciadeblas5697b8b2021-03-24 09:17:02 +01002637 vdur = find_in_list(
2638 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2639 )
bravof922c4172020-11-24 21:21:43 -03002640
tierno626e0152019-11-29 14:16:16 +00002641 if vdur.get("additionalParams"):
bravof922c4172020-11-24 21:21:43 -03002642 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
tierno626e0152019-11-29 14:16:16 +00002643 else:
2644 deploy_params_vdu = deploy_params
garciadeblas5697b8b2021-03-24 09:17:02 +01002645 deploy_params_vdu["OSM"] = get_osm_params(
2646 db_vnfr, vdu_id, vdu_count_index=0
2647 )
endika76ba9232021-06-21 18:55:07 +02002648 vdud_count = get_number_of_instances(vnfd, vdu_id)
bravof922c4172020-11-24 21:21:43 -03002649
2650 self.logger.debug("VDUD > {}".format(vdud))
garciadeblas5697b8b2021-03-24 09:17:02 +01002651 self.logger.debug(
2652 "Descriptor config > {}".format(descriptor_config)
2653 )
tierno588547c2020-07-01 15:30:20 +00002654 if descriptor_config:
tiernod8323042019-08-09 11:32:23 +00002655 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002656 kdu_name = None
bravof922c4172020-11-24 21:21:43 -03002657 for vdu_index in range(vdud_count):
tiernod8323042019-08-09 11:32:23 +00002658 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
quilesj7e13aeb2019-10-08 13:34:55 +02002659 self._deploy_n2vc(
garciadeblas5697b8b2021-03-24 09:17:02 +01002660 logging_text=logging_text
2661 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2662 member_vnf_index, vdu_id, vdu_index
2663 ),
quilesj7e13aeb2019-10-08 13:34:55 +02002664 db_nsr=db_nsr,
2665 db_vnfr=db_vnfr,
2666 nslcmop_id=nslcmop_id,
2667 nsr_id=nsr_id,
2668 nsi_id=nsi_id,
2669 vnfd_id=vnfd_id,
2670 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002671 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002672 member_vnf_index=member_vnf_index,
2673 vdu_index=vdu_index,
2674 vdu_name=vdu_name,
tierno626e0152019-11-29 14:16:16 +00002675 deploy_params=deploy_params_vdu,
quilesj7e13aeb2019-10-08 13:34:55 +02002676 descriptor_config=descriptor_config,
2677 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002678 task_instantiation_info=tasks_dict_info,
garciadeblas5697b8b2021-03-24 09:17:02 +01002679 stage=stage,
quilesj7e13aeb2019-10-08 13:34:55 +02002680 )
bravof922c4172020-11-24 21:21:43 -03002681 for kdud in get_kdu_list(vnfd):
calvinosanch9f9c6f22019-11-04 13:37:39 +01002682 kdu_name = kdud["name"]
bravofe5a31bc2021-02-17 19:09:12 -03002683 descriptor_config = get_configuration(vnfd, kdu_name)
tierno588547c2020-07-01 15:30:20 +00002684 if descriptor_config:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002685 vdu_id = None
2686 vdu_index = 0
2687 vdu_name = None
garciadeblas5697b8b2021-03-24 09:17:02 +01002688 kdur = next(
2689 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2690 )
bravof922c4172020-11-24 21:21:43 -03002691 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
tierno72ef84f2020-10-06 08:22:07 +00002692 if kdur.get("additionalParams"):
Pedro Escaleirab9a7c4d2022-03-31 00:08:05 +01002693 deploy_params_kdu.update(
2694 parse_yaml_strings(kdur["additionalParams"].copy())
garciadeblas5697b8b2021-03-24 09:17:02 +01002695 )
tierno59d22d22018-09-25 18:10:19 +02002696
calvinosanch9f9c6f22019-11-04 13:37:39 +01002697 self._deploy_n2vc(
2698 logging_text=logging_text,
2699 db_nsr=db_nsr,
2700 db_vnfr=db_vnfr,
2701 nslcmop_id=nslcmop_id,
2702 nsr_id=nsr_id,
2703 nsi_id=nsi_id,
2704 vnfd_id=vnfd_id,
2705 vdu_id=vdu_id,
2706 kdu_name=kdu_name,
2707 member_vnf_index=member_vnf_index,
2708 vdu_index=vdu_index,
2709 vdu_name=vdu_name,
tierno72ef84f2020-10-06 08:22:07 +00002710 deploy_params=deploy_params_kdu,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002711 descriptor_config=descriptor_config,
2712 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002713 task_instantiation_info=tasks_dict_info,
garciadeblas5697b8b2021-03-24 09:17:02 +01002714 stage=stage,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002715 )
tierno59d22d22018-09-25 18:10:19 +02002716
tierno1b633412019-02-25 16:48:23 +00002717 # Check if this NS has a charm configuration
tiernod8323042019-08-09 11:32:23 +00002718 descriptor_config = nsd.get("ns-configuration")
2719 if descriptor_config and descriptor_config.get("juju"):
2720 vnfd_id = None
2721 db_vnfr = None
2722 member_vnf_index = None
2723 vdu_id = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002724 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002725 vdu_index = 0
2726 vdu_name = None
tierno1b633412019-02-25 16:48:23 +00002727
tiernod8323042019-08-09 11:32:23 +00002728 # Get additional parameters
David Garcia40603572020-12-10 20:10:53 +01002729 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
tiernod8323042019-08-09 11:32:23 +00002730 if db_nsr.get("additionalParamsForNs"):
garciadeblas5697b8b2021-03-24 09:17:02 +01002731 deploy_params.update(
2732 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2733 )
tiernod8323042019-08-09 11:32:23 +00002734 base_folder = nsd["_admin"]["storage"]
quilesj7e13aeb2019-10-08 13:34:55 +02002735 self._deploy_n2vc(
2736 logging_text=logging_text,
2737 db_nsr=db_nsr,
2738 db_vnfr=db_vnfr,
2739 nslcmop_id=nslcmop_id,
2740 nsr_id=nsr_id,
2741 nsi_id=nsi_id,
2742 vnfd_id=vnfd_id,
2743 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002744 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002745 member_vnf_index=member_vnf_index,
2746 vdu_index=vdu_index,
2747 vdu_name=vdu_name,
2748 deploy_params=deploy_params,
2749 descriptor_config=descriptor_config,
2750 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002751 task_instantiation_info=tasks_dict_info,
garciadeblas5697b8b2021-03-24 09:17:02 +01002752 stage=stage,
quilesj7e13aeb2019-10-08 13:34:55 +02002753 )
tierno1b633412019-02-25 16:48:23 +00002754
tiernoe876f672020-02-13 14:34:48 +00002755 # rest of staff will be done at finally
tierno1b633412019-02-25 16:48:23 +00002756
garciadeblas5697b8b2021-03-24 09:17:02 +01002757 except (
2758 ROclient.ROClientException,
2759 DbException,
2760 LcmException,
2761 N2VCException,
2762 ) as e:
2763 self.logger.error(
2764 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2765 )
tierno59d22d22018-09-25 18:10:19 +02002766 exc = e
2767 except asyncio.CancelledError:
garciadeblas5697b8b2021-03-24 09:17:02 +01002768 self.logger.error(
2769 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2770 )
tierno59d22d22018-09-25 18:10:19 +02002771 exc = "Operation was cancelled"
2772 except Exception as e:
2773 exc = traceback.format_exc()
garciadeblas5697b8b2021-03-24 09:17:02 +01002774 self.logger.critical(
2775 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2776 exc_info=True,
2777 )
tierno59d22d22018-09-25 18:10:19 +02002778 finally:
2779 if exc:
tiernoe876f672020-02-13 14:34:48 +00002780 error_list.append(str(exc))
tiernobaa51102018-12-14 13:16:18 +00002781 try:
tiernoe876f672020-02-13 14:34:48 +00002782 # wait for pending tasks
2783 if tasks_dict_info:
2784 stage[1] = "Waiting for instantiate pending tasks."
2785 self.logger.debug(logging_text + stage[1])
garciadeblas5697b8b2021-03-24 09:17:02 +01002786 error_list += await self._wait_for_tasks(
2787 logging_text,
2788 tasks_dict_info,
2789 timeout_ns_deploy,
2790 stage,
2791 nslcmop_id,
2792 nsr_id=nsr_id,
2793 )
tiernoe876f672020-02-13 14:34:48 +00002794 stage[1] = stage[2] = ""
2795 except asyncio.CancelledError:
2796 error_list.append("Cancelled")
2797 # TODO cancel all tasks
2798 except Exception as exc:
2799 error_list.append(str(exc))
quilesj4cda56b2019-12-05 10:02:20 +00002800
tiernoe876f672020-02-13 14:34:48 +00002801 # update operation-status
2802 db_nsr_update["operational-status"] = "running"
2803 # let's begin with VCA 'configured' status (later we can change it)
2804 db_nsr_update["config-status"] = "configured"
2805 for task, task_name in tasks_dict_info.items():
2806 if not task.done() or task.cancelled() or task.exception():
2807 if task_name.startswith(self.task_name_deploy_vca):
2808 # A N2VC task is pending
2809 db_nsr_update["config-status"] = "failed"
quilesj4cda56b2019-12-05 10:02:20 +00002810 else:
tiernoe876f672020-02-13 14:34:48 +00002811 # RO or KDU task is pending
2812 db_nsr_update["operational-status"] = "failed"
quilesj3655ae02019-12-12 16:08:35 +00002813
tiernoe876f672020-02-13 14:34:48 +00002814 # update status at database
2815 if error_list:
tiernoa2143262020-03-27 16:20:40 +00002816 error_detail = ". ".join(error_list)
tiernoe876f672020-02-13 14:34:48 +00002817 self.logger.error(logging_text + error_detail)
garciadeblas5697b8b2021-03-24 09:17:02 +01002818 error_description_nslcmop = "{} Detail: {}".format(
2819 stage[0], error_detail
2820 )
2821 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2822 nslcmop_id, stage[0]
2823 )
quilesj3655ae02019-12-12 16:08:35 +00002824
garciadeblas5697b8b2021-03-24 09:17:02 +01002825 db_nsr_update["detailed-status"] = (
2826 error_description_nsr + " Detail: " + error_detail
2827 )
tiernoe876f672020-02-13 14:34:48 +00002828 db_nslcmop_update["detailed-status"] = error_detail
2829 nslcmop_operation_state = "FAILED"
2830 ns_state = "BROKEN"
2831 else:
tiernoa2143262020-03-27 16:20:40 +00002832 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00002833 error_description_nsr = error_description_nslcmop = None
2834 ns_state = "READY"
2835 db_nsr_update["detailed-status"] = "Done"
2836 db_nslcmop_update["detailed-status"] = "Done"
2837 nslcmop_operation_state = "COMPLETED"
quilesj4cda56b2019-12-05 10:02:20 +00002838
tiernoe876f672020-02-13 14:34:48 +00002839 if db_nsr:
2840 self._write_ns_status(
2841 nsr_id=nsr_id,
2842 ns_state=ns_state,
2843 current_operation="IDLE",
2844 current_operation_id=None,
2845 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00002846 error_detail=error_detail,
garciadeblas5697b8b2021-03-24 09:17:02 +01002847 other_update=db_nsr_update,
tiernoe876f672020-02-13 14:34:48 +00002848 )
tiernoa17d4f42020-04-28 09:59:23 +00002849 self._write_op_status(
2850 op_id=nslcmop_id,
2851 stage="",
2852 error_message=error_description_nslcmop,
2853 operation_state=nslcmop_operation_state,
2854 other_update=db_nslcmop_update,
2855 )
quilesj3655ae02019-12-12 16:08:35 +00002856
tierno59d22d22018-09-25 18:10:19 +02002857 if nslcmop_operation_state:
2858 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01002859 await self.msg.aiowrite(
2860 "ns",
2861 "instantiated",
2862 {
2863 "nsr_id": nsr_id,
2864 "nslcmop_id": nslcmop_id,
2865 "operationState": nslcmop_operation_state,
2866 },
2867 loop=self.loop,
2868 )
tierno59d22d22018-09-25 18:10:19 +02002869 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002870 self.logger.error(
2871 logging_text + "kafka_write notification Exception {}".format(e)
2872 )
tierno59d22d22018-09-25 18:10:19 +02002873
2874 self.logger.debug(logging_text + "Exit")
2875 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2876
David Garciab4ebcd02021-10-28 02:00:43 +02002877 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2878 if vnfd_id not in cached_vnfds:
2879 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2880 return cached_vnfds[vnfd_id]
2881
2882 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2883 if vnf_profile_id not in cached_vnfrs:
2884 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2885 "vnfrs",
2886 {
2887 "member-vnf-index-ref": vnf_profile_id,
2888 "nsr-id-ref": nsr_id,
2889 },
2890 )
2891 return cached_vnfrs[vnf_profile_id]
2892
2893 def _is_deployed_vca_in_relation(
2894 self, vca: DeployedVCA, relation: Relation
2895 ) -> bool:
2896 found = False
2897 for endpoint in (relation.provider, relation.requirer):
2898 if endpoint["kdu-resource-profile-id"]:
2899 continue
2900 found = (
2901 vca.vnf_profile_id == endpoint.vnf_profile_id
2902 and vca.vdu_profile_id == endpoint.vdu_profile_id
2903 and vca.execution_environment_ref == endpoint.execution_environment_ref
2904 )
2905 if found:
2906 break
2907 return found
2908
2909 def _update_ee_relation_data_with_implicit_data(
2910 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2911 ):
2912 ee_relation_data = safe_get_ee_relation(
2913 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2914 )
2915 ee_relation_level = EELevel.get_level(ee_relation_data)
2916 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2917 "execution-environment-ref"
2918 ]:
2919 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2920 vnfd_id = vnf_profile["vnfd-id"]
2921 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2922 entity_id = (
2923 vnfd_id
2924 if ee_relation_level == EELevel.VNF
2925 else ee_relation_data["vdu-profile-id"]
2926 )
2927 ee = get_juju_ee_ref(db_vnfd, entity_id)
2928 if not ee:
2929 raise Exception(
2930 f"not execution environments found for ee_relation {ee_relation_data}"
2931 )
2932 ee_relation_data["execution-environment-ref"] = ee["id"]
2933 return ee_relation_data
2934
2935 def _get_ns_relations(
2936 self,
2937 nsr_id: str,
2938 nsd: Dict[str, Any],
2939 vca: DeployedVCA,
2940 cached_vnfds: Dict[str, Any],
David Garcia444bf962021-11-11 16:35:26 +01002941 ) -> List[Relation]:
David Garciab4ebcd02021-10-28 02:00:43 +02002942 relations = []
2943 db_ns_relations = get_ns_configuration_relation_list(nsd)
2944 for r in db_ns_relations:
David Garcia444bf962021-11-11 16:35:26 +01002945 provider_dict = None
2946 requirer_dict = None
2947 if all(key in r for key in ("provider", "requirer")):
2948 provider_dict = r["provider"]
2949 requirer_dict = r["requirer"]
2950 elif "entities" in r:
2951 provider_id = r["entities"][0]["id"]
2952 provider_dict = {
2953 "nsr-id": nsr_id,
2954 "endpoint": r["entities"][0]["endpoint"],
2955 }
2956 if provider_id != nsd["id"]:
2957 provider_dict["vnf-profile-id"] = provider_id
2958 requirer_id = r["entities"][1]["id"]
2959 requirer_dict = {
2960 "nsr-id": nsr_id,
2961 "endpoint": r["entities"][1]["endpoint"],
2962 }
2963 if requirer_id != nsd["id"]:
2964 requirer_dict["vnf-profile-id"] = requirer_id
2965 else:
aticig15db6142022-01-24 12:51:26 +03002966 raise Exception(
2967 "provider/requirer or entities must be included in the relation."
2968 )
David Garciab4ebcd02021-10-28 02:00:43 +02002969 relation_provider = self._update_ee_relation_data_with_implicit_data(
David Garcia444bf962021-11-11 16:35:26 +01002970 nsr_id, nsd, provider_dict, cached_vnfds
David Garciab4ebcd02021-10-28 02:00:43 +02002971 )
2972 relation_requirer = self._update_ee_relation_data_with_implicit_data(
David Garcia444bf962021-11-11 16:35:26 +01002973 nsr_id, nsd, requirer_dict, cached_vnfds
David Garciab4ebcd02021-10-28 02:00:43 +02002974 )
2975 provider = EERelation(relation_provider)
2976 requirer = EERelation(relation_requirer)
2977 relation = Relation(r["name"], provider, requirer)
2978 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2979 if vca_in_relation:
2980 relations.append(relation)
2981 return relations
2982
2983 def _get_vnf_relations(
2984 self,
2985 nsr_id: str,
2986 nsd: Dict[str, Any],
2987 vca: DeployedVCA,
2988 cached_vnfds: Dict[str, Any],
David Garcia444bf962021-11-11 16:35:26 +01002989 ) -> List[Relation]:
David Garciab4ebcd02021-10-28 02:00:43 +02002990 relations = []
2991 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
2992 vnf_profile_id = vnf_profile["id"]
2993 vnfd_id = vnf_profile["vnfd-id"]
2994 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2995 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
2996 for r in db_vnf_relations:
David Garcia444bf962021-11-11 16:35:26 +01002997 provider_dict = None
2998 requirer_dict = None
2999 if all(key in r for key in ("provider", "requirer")):
3000 provider_dict = r["provider"]
3001 requirer_dict = r["requirer"]
3002 elif "entities" in r:
3003 provider_id = r["entities"][0]["id"]
3004 provider_dict = {
3005 "nsr-id": nsr_id,
3006 "vnf-profile-id": vnf_profile_id,
3007 "endpoint": r["entities"][0]["endpoint"],
3008 }
3009 if provider_id != vnfd_id:
3010 provider_dict["vdu-profile-id"] = provider_id
3011 requirer_id = r["entities"][1]["id"]
3012 requirer_dict = {
3013 "nsr-id": nsr_id,
3014 "vnf-profile-id": vnf_profile_id,
3015 "endpoint": r["entities"][1]["endpoint"],
3016 }
3017 if requirer_id != vnfd_id:
3018 requirer_dict["vdu-profile-id"] = requirer_id
3019 else:
aticig15db6142022-01-24 12:51:26 +03003020 raise Exception(
3021 "provider/requirer or entities must be included in the relation."
3022 )
David Garciab4ebcd02021-10-28 02:00:43 +02003023 relation_provider = self._update_ee_relation_data_with_implicit_data(
David Garcia444bf962021-11-11 16:35:26 +01003024 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
David Garciab4ebcd02021-10-28 02:00:43 +02003025 )
3026 relation_requirer = self._update_ee_relation_data_with_implicit_data(
David Garcia444bf962021-11-11 16:35:26 +01003027 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
David Garciab4ebcd02021-10-28 02:00:43 +02003028 )
3029 provider = EERelation(relation_provider)
3030 requirer = EERelation(relation_requirer)
3031 relation = Relation(r["name"], provider, requirer)
3032 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3033 if vca_in_relation:
3034 relations.append(relation)
3035 return relations
3036
3037 def _get_kdu_resource_data(
3038 self,
3039 ee_relation: EERelation,
3040 db_nsr: Dict[str, Any],
3041 cached_vnfds: Dict[str, Any],
3042 ) -> DeployedK8sResource:
3043 nsd = get_nsd(db_nsr)
3044 vnf_profiles = get_vnf_profiles(nsd)
3045 vnfd_id = find_in_list(
3046 vnf_profiles,
3047 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3048 )["vnfd-id"]
3049 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3050 kdu_resource_profile = get_kdu_resource_profile(
3051 db_vnfd, ee_relation.kdu_resource_profile_id
3052 )
3053 kdu_name = kdu_resource_profile["kdu-name"]
3054 deployed_kdu, _ = get_deployed_kdu(
3055 db_nsr.get("_admin", ()).get("deployed", ()),
3056 kdu_name,
3057 ee_relation.vnf_profile_id,
3058 )
3059 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3060 return deployed_kdu
3061
3062 def _get_deployed_component(
3063 self,
3064 ee_relation: EERelation,
3065 db_nsr: Dict[str, Any],
3066 cached_vnfds: Dict[str, Any],
3067 ) -> DeployedComponent:
3068 nsr_id = db_nsr["_id"]
3069 deployed_component = None
3070 ee_level = EELevel.get_level(ee_relation)
3071 if ee_level == EELevel.NS:
3072 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3073 if vca:
3074 deployed_component = DeployedVCA(nsr_id, vca)
3075 elif ee_level == EELevel.VNF:
3076 vca = get_deployed_vca(
3077 db_nsr,
3078 {
3079 "vdu_id": None,
3080 "member-vnf-index": ee_relation.vnf_profile_id,
3081 "ee_descriptor_id": ee_relation.execution_environment_ref,
3082 },
3083 )
3084 if vca:
3085 deployed_component = DeployedVCA(nsr_id, vca)
3086 elif ee_level == EELevel.VDU:
3087 vca = get_deployed_vca(
3088 db_nsr,
3089 {
3090 "vdu_id": ee_relation.vdu_profile_id,
3091 "member-vnf-index": ee_relation.vnf_profile_id,
3092 "ee_descriptor_id": ee_relation.execution_environment_ref,
3093 },
3094 )
3095 if vca:
3096 deployed_component = DeployedVCA(nsr_id, vca)
3097 elif ee_level == EELevel.KDU:
3098 kdu_resource_data = self._get_kdu_resource_data(
3099 ee_relation, db_nsr, cached_vnfds
3100 )
3101 if kdu_resource_data:
3102 deployed_component = DeployedK8sResource(kdu_resource_data)
3103 return deployed_component
3104
3105 async def _add_relation(
3106 self,
3107 relation: Relation,
3108 vca_type: str,
3109 db_nsr: Dict[str, Any],
3110 cached_vnfds: Dict[str, Any],
3111 cached_vnfrs: Dict[str, Any],
3112 ) -> bool:
3113 deployed_provider = self._get_deployed_component(
3114 relation.provider, db_nsr, cached_vnfds
3115 )
3116 deployed_requirer = self._get_deployed_component(
3117 relation.requirer, db_nsr, cached_vnfds
3118 )
3119 if (
3120 deployed_provider
3121 and deployed_requirer
3122 and deployed_provider.config_sw_installed
3123 and deployed_requirer.config_sw_installed
3124 ):
3125 provider_db_vnfr = (
3126 self._get_vnfr(
3127 relation.provider.nsr_id,
3128 relation.provider.vnf_profile_id,
3129 cached_vnfrs,
3130 )
3131 if relation.provider.vnf_profile_id
3132 else None
3133 )
3134 requirer_db_vnfr = (
3135 self._get_vnfr(
3136 relation.requirer.nsr_id,
3137 relation.requirer.vnf_profile_id,
3138 cached_vnfrs,
3139 )
3140 if relation.requirer.vnf_profile_id
3141 else None
3142 )
3143 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3144 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3145 provider_relation_endpoint = RelationEndpoint(
3146 deployed_provider.ee_id,
3147 provider_vca_id,
3148 relation.provider.endpoint,
3149 )
3150 requirer_relation_endpoint = RelationEndpoint(
3151 deployed_requirer.ee_id,
3152 requirer_vca_id,
3153 relation.requirer.endpoint,
3154 )
3155 await self.vca_map[vca_type].add_relation(
3156 provider=provider_relation_endpoint,
3157 requirer=requirer_relation_endpoint,
3158 )
3159 # remove entry from relations list
3160 return True
3161 return False
3162
David Garciac1fe90a2021-03-31 19:12:02 +02003163 async def _add_vca_relations(
3164 self,
3165 logging_text,
3166 nsr_id,
David Garciab4ebcd02021-10-28 02:00:43 +02003167 vca_type: str,
David Garciac1fe90a2021-03-31 19:12:02 +02003168 vca_index: int,
3169 timeout: int = 3600,
David Garciac1fe90a2021-03-31 19:12:02 +02003170 ) -> bool:
quilesj63f90042020-01-17 09:53:55 +00003171
3172 # steps:
3173 # 1. find all relations for this VCA
3174 # 2. wait for other peers related
3175 # 3. add relations
3176
3177 try:
quilesj63f90042020-01-17 09:53:55 +00003178 # STEP 1: find all relations for this VCA
3179
3180 # read nsr record
3181 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
David Garciab4ebcd02021-10-28 02:00:43 +02003182 nsd = get_nsd(db_nsr)
quilesj63f90042020-01-17 09:53:55 +00003183
3184 # this VCA data
David Garciab4ebcd02021-10-28 02:00:43 +02003185 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3186 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
quilesj63f90042020-01-17 09:53:55 +00003187
David Garciab4ebcd02021-10-28 02:00:43 +02003188 cached_vnfds = {}
3189 cached_vnfrs = {}
3190 relations = []
3191 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3192 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
quilesj63f90042020-01-17 09:53:55 +00003193
3194 # if no relations, terminate
David Garciab4ebcd02021-10-28 02:00:43 +02003195 if not relations:
garciadeblas5697b8b2021-03-24 09:17:02 +01003196 self.logger.debug(logging_text + " No relations")
quilesj63f90042020-01-17 09:53:55 +00003197 return True
3198
David Garciab4ebcd02021-10-28 02:00:43 +02003199 self.logger.debug(logging_text + " adding relations {}".format(relations))
quilesj63f90042020-01-17 09:53:55 +00003200
3201 # add all relations
3202 start = time()
3203 while True:
3204 # check timeout
3205 now = time()
3206 if now - start >= timeout:
garciadeblas5697b8b2021-03-24 09:17:02 +01003207 self.logger.error(logging_text + " : timeout adding relations")
quilesj63f90042020-01-17 09:53:55 +00003208 return False
3209
David Garciab4ebcd02021-10-28 02:00:43 +02003210 # reload nsr from database (we need to update record: _admin.deployed.VCA)
quilesj63f90042020-01-17 09:53:55 +00003211 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3212
David Garciab4ebcd02021-10-28 02:00:43 +02003213 # for each relation, find the VCA's related
3214 for relation in relations.copy():
3215 added = await self._add_relation(
3216 relation,
3217 vca_type,
3218 db_nsr,
3219 cached_vnfds,
3220 cached_vnfrs,
3221 )
3222 if added:
3223 relations.remove(relation)
quilesj63f90042020-01-17 09:53:55 +00003224
David Garciab4ebcd02021-10-28 02:00:43 +02003225 if not relations:
garciadeblas5697b8b2021-03-24 09:17:02 +01003226 self.logger.debug("Relations added")
quilesj63f90042020-01-17 09:53:55 +00003227 break
David Garciab4ebcd02021-10-28 02:00:43 +02003228 await asyncio.sleep(5.0)
quilesj63f90042020-01-17 09:53:55 +00003229
3230 return True
3231
3232 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01003233 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
quilesj63f90042020-01-17 09:53:55 +00003234 return False
3235
garciadeblas5697b8b2021-03-24 09:17:02 +01003236 async def _install_kdu(
3237 self,
3238 nsr_id: str,
3239 nsr_db_path: str,
3240 vnfr_data: dict,
3241 kdu_index: int,
3242 kdud: dict,
3243 vnfd: dict,
3244 k8s_instance_info: dict,
3245 k8params: dict = None,
3246 timeout: int = 600,
3247 vca_id: str = None,
3248 ):
lloretgalleg7c121132020-07-08 07:53:22 +00003249
tiernob9018152020-04-16 14:18:24 +00003250 try:
lloretgalleg7c121132020-07-08 07:53:22 +00003251 k8sclustertype = k8s_instance_info["k8scluster-type"]
3252 # Instantiate kdu
garciadeblas5697b8b2021-03-24 09:17:02 +01003253 db_dict_install = {
3254 "collection": "nsrs",
3255 "filter": {"_id": nsr_id},
3256 "path": nsr_db_path,
3257 }
lloretgalleg7c121132020-07-08 07:53:22 +00003258
romeromonser4554a702021-05-28 12:00:08 +02003259 if k8s_instance_info.get("kdu-deployment-name"):
3260 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3261 else:
3262 kdu_instance = self.k8scluster_map[
3263 k8sclustertype
3264 ].generate_kdu_instance_name(
3265 db_dict=db_dict_install,
3266 kdu_model=k8s_instance_info["kdu-model"],
3267 kdu_name=k8s_instance_info["kdu-name"],
3268 )
Pedro Escaleirada21d262022-04-21 16:31:06 +01003269
3270 # Update the nsrs table with the kdu-instance value
garciadeblas5697b8b2021-03-24 09:17:02 +01003271 self.update_db_2(
Pedro Escaleirada21d262022-04-21 16:31:06 +01003272 item="nsrs",
3273 _id=nsr_id,
3274 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
garciadeblas5697b8b2021-03-24 09:17:02 +01003275 )
Pedro Escaleirada21d262022-04-21 16:31:06 +01003276
3277 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3278 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3279 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3280 # namespace, this first verification could be removed, and the next step would be done for any kind
3281 # of KNF.
3282 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3283 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3284 if k8sclustertype in ("juju", "juju-bundle"):
3285 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3286 # that the user passed a namespace which he wants its KDU to be deployed in)
3287 if (
3288 self.db.count(
3289 table="nsrs",
3290 q_filter={
3291 "_id": nsr_id,
3292 "_admin.projects_write": k8s_instance_info["namespace"],
3293 "_admin.projects_read": k8s_instance_info["namespace"],
3294 },
3295 )
3296 > 0
3297 ):
3298 self.logger.debug(
3299 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3300 )
3301 self.update_db_2(
3302 item="nsrs",
3303 _id=nsr_id,
3304 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3305 )
3306 k8s_instance_info["namespace"] = kdu_instance
3307
David Garciad64e2742021-02-25 20:19:18 +01003308 await self.k8scluster_map[k8sclustertype].install(
lloretgalleg7c121132020-07-08 07:53:22 +00003309 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3310 kdu_model=k8s_instance_info["kdu-model"],
3311 atomic=True,
3312 params=k8params,
3313 db_dict=db_dict_install,
3314 timeout=timeout,
3315 kdu_name=k8s_instance_info["kdu-name"],
David Garciad64e2742021-02-25 20:19:18 +01003316 namespace=k8s_instance_info["namespace"],
3317 kdu_instance=kdu_instance,
David Garciac1fe90a2021-03-31 19:12:02 +02003318 vca_id=vca_id,
David Garciad64e2742021-02-25 20:19:18 +01003319 )
lloretgalleg7c121132020-07-08 07:53:22 +00003320
3321 # Obtain services to obtain management service ip
3322 services = await self.k8scluster_map[k8sclustertype].get_services(
3323 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3324 kdu_instance=kdu_instance,
garciadeblas5697b8b2021-03-24 09:17:02 +01003325 namespace=k8s_instance_info["namespace"],
3326 )
lloretgalleg7c121132020-07-08 07:53:22 +00003327
3328 # Obtain management service info (if exists)
tierno7ecbc342020-09-21 14:05:39 +00003329 vnfr_update_dict = {}
bravof6ec62b72021-02-25 17:20:35 -03003330 kdu_config = get_configuration(vnfd, kdud["name"])
3331 if kdu_config:
3332 target_ee_list = kdu_config.get("execution-environment-list", [])
3333 else:
3334 target_ee_list = []
3335
lloretgalleg7c121132020-07-08 07:53:22 +00003336 if services:
tierno7ecbc342020-09-21 14:05:39 +00003337 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
garciadeblas5697b8b2021-03-24 09:17:02 +01003338 mgmt_services = [
3339 service
3340 for service in kdud.get("service", [])
3341 if service.get("mgmt-service")
3342 ]
lloretgalleg7c121132020-07-08 07:53:22 +00003343 for mgmt_service in mgmt_services:
3344 for service in services:
3345 if service["name"].startswith(mgmt_service["name"]):
3346 # Mgmt service found, Obtain service ip
3347 ip = service.get("external_ip", service.get("cluster_ip"))
3348 if isinstance(ip, list) and len(ip) == 1:
3349 ip = ip[0]
3350
garciadeblas5697b8b2021-03-24 09:17:02 +01003351 vnfr_update_dict[
3352 "kdur.{}.ip-address".format(kdu_index)
3353 ] = ip
lloretgalleg7c121132020-07-08 07:53:22 +00003354
3355 # Check if must update also mgmt ip at the vnf
garciadeblas5697b8b2021-03-24 09:17:02 +01003356 service_external_cp = mgmt_service.get(
3357 "external-connection-point-ref"
3358 )
lloretgalleg7c121132020-07-08 07:53:22 +00003359 if service_external_cp:
garciadeblas5697b8b2021-03-24 09:17:02 +01003360 if (
3361 deep_get(vnfd, ("mgmt-interface", "cp"))
3362 == service_external_cp
3363 ):
lloretgalleg7c121132020-07-08 07:53:22 +00003364 vnfr_update_dict["ip-address"] = ip
3365
bravof6ec62b72021-02-25 17:20:35 -03003366 if find_in_list(
3367 target_ee_list,
garciadeblas5697b8b2021-03-24 09:17:02 +01003368 lambda ee: ee.get(
3369 "external-connection-point-ref", ""
3370 )
3371 == service_external_cp,
bravof6ec62b72021-02-25 17:20:35 -03003372 ):
garciadeblas5697b8b2021-03-24 09:17:02 +01003373 vnfr_update_dict[
3374 "kdur.{}.ip-address".format(kdu_index)
3375 ] = ip
lloretgalleg7c121132020-07-08 07:53:22 +00003376 break
3377 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01003378 self.logger.warn(
3379 "Mgmt service name: {} not found".format(
3380 mgmt_service["name"]
3381 )
3382 )
lloretgalleg7c121132020-07-08 07:53:22 +00003383
tierno7ecbc342020-09-21 14:05:39 +00003384 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3385 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
lloretgalleg7c121132020-07-08 07:53:22 +00003386
bravof9a256db2021-02-22 18:02:07 -03003387 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
garciadeblas5697b8b2021-03-24 09:17:02 +01003388 if (
3389 kdu_config
3390 and kdu_config.get("initial-config-primitive")
3391 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3392 ):
3393 initial_config_primitive_list = kdu_config.get(
3394 "initial-config-primitive"
3395 )
Dominik Fleischmannc1975dd2020-08-19 12:17:51 +02003396 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3397
3398 for initial_config_primitive in initial_config_primitive_list:
garciadeblas5697b8b2021-03-24 09:17:02 +01003399 primitive_params_ = self._map_primitive_params(
3400 initial_config_primitive, {}, {}
3401 )
Dominik Fleischmannc1975dd2020-08-19 12:17:51 +02003402
3403 await asyncio.wait_for(
3404 self.k8scluster_map[k8sclustertype].exec_primitive(
3405 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3406 kdu_instance=kdu_instance,
3407 primitive_name=initial_config_primitive["name"],
garciadeblas5697b8b2021-03-24 09:17:02 +01003408 params=primitive_params_,
3409 db_dict=db_dict_install,
David Garciac1fe90a2021-03-31 19:12:02 +02003410 vca_id=vca_id,
3411 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01003412 timeout=timeout,
David Garciac1fe90a2021-03-31 19:12:02 +02003413 )
Dominik Fleischmannc1975dd2020-08-19 12:17:51 +02003414
tiernob9018152020-04-16 14:18:24 +00003415 except Exception as e:
lloretgalleg7c121132020-07-08 07:53:22 +00003416 # Prepare update db with error and raise exception
tiernob9018152020-04-16 14:18:24 +00003417 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01003418 self.update_db_2(
3419 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3420 )
3421 self.update_db_2(
3422 "vnfrs",
3423 vnfr_data.get("_id"),
3424 {"kdur.{}.status".format(kdu_index): "ERROR"},
3425 )
tiernob9018152020-04-16 14:18:24 +00003426 except Exception:
lloretgalleg7c121132020-07-08 07:53:22 +00003427 # ignore to keep original exception
tiernob9018152020-04-16 14:18:24 +00003428 pass
lloretgalleg7c121132020-07-08 07:53:22 +00003429 # reraise original error
3430 raise
3431
3432 return kdu_instance
tiernob9018152020-04-16 14:18:24 +00003433
garciadeblas5697b8b2021-03-24 09:17:02 +01003434 async def deploy_kdus(
3435 self,
3436 logging_text,
3437 nsr_id,
3438 nslcmop_id,
3439 db_vnfrs,
3440 db_vnfds,
3441 task_instantiation_info,
3442 ):
calvinosanch9f9c6f22019-11-04 13:37:39 +01003443 # Launch kdus if present in the descriptor
tierno626e0152019-11-29 14:16:16 +00003444
garciadeblas5697b8b2021-03-24 09:17:02 +01003445 k8scluster_id_2_uuic = {
3446 "helm-chart-v3": {},
3447 "helm-chart": {},
3448 "juju-bundle": {},
3449 }
tierno626e0152019-11-29 14:16:16 +00003450
tierno16f4a4e2020-07-20 09:05:51 +00003451 async def _get_cluster_id(cluster_id, cluster_type):
tierno626e0152019-11-29 14:16:16 +00003452 nonlocal k8scluster_id_2_uuic
3453 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3454 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3455
tierno16f4a4e2020-07-20 09:05:51 +00003456 # check if K8scluster is creating and wait look if previous tasks in process
garciadeblas5697b8b2021-03-24 09:17:02 +01003457 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3458 "k8scluster", cluster_id
3459 )
tierno16f4a4e2020-07-20 09:05:51 +00003460 if task_dependency:
garciadeblas5697b8b2021-03-24 09:17:02 +01003461 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3462 task_name, cluster_id
3463 )
tierno16f4a4e2020-07-20 09:05:51 +00003464 self.logger.debug(logging_text + text)
3465 await asyncio.wait(task_dependency, timeout=3600)
3466
garciadeblas5697b8b2021-03-24 09:17:02 +01003467 db_k8scluster = self.db.get_one(
3468 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3469 )
tierno626e0152019-11-29 14:16:16 +00003470 if not db_k8scluster:
3471 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
tierno16f4a4e2020-07-20 09:05:51 +00003472
tierno626e0152019-11-29 14:16:16 +00003473 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3474 if not k8s_id:
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003475 if cluster_type == "helm-chart-v3":
3476 try:
3477 # backward compatibility for existing clusters that have not been initialized for helm v3
garciadeblas5697b8b2021-03-24 09:17:02 +01003478 k8s_credentials = yaml.safe_dump(
3479 db_k8scluster.get("credentials")
3480 )
3481 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3482 k8s_credentials, reuse_cluster_uuid=cluster_id
3483 )
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003484 db_k8scluster_update = {}
3485 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3486 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
garciadeblas5697b8b2021-03-24 09:17:02 +01003487 db_k8scluster_update[
3488 "_admin.helm-chart-v3.created"
3489 ] = uninstall_sw
3490 db_k8scluster_update[
3491 "_admin.helm-chart-v3.operationalState"
3492 ] = "ENABLED"
3493 self.update_db_2(
3494 "k8sclusters", cluster_id, db_k8scluster_update
3495 )
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003496 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01003497 self.logger.error(
3498 logging_text
3499 + "error initializing helm-v3 cluster: {}".format(str(e))
3500 )
3501 raise LcmException(
3502 "K8s cluster '{}' has not been initialized for '{}'".format(
3503 cluster_id, cluster_type
3504 )
3505 )
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003506 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01003507 raise LcmException(
3508 "K8s cluster '{}' has not been initialized for '{}'".format(
3509 cluster_id, cluster_type
3510 )
3511 )
tierno626e0152019-11-29 14:16:16 +00003512 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3513 return k8s_id
3514
3515 logging_text += "Deploy kdus: "
tiernoe876f672020-02-13 14:34:48 +00003516 step = ""
calvinosanch9f9c6f22019-11-04 13:37:39 +01003517 try:
tierno626e0152019-11-29 14:16:16 +00003518 db_nsr_update = {"_admin.deployed.K8s": []}
calvinosanch9f9c6f22019-11-04 13:37:39 +01003519 self.update_db_2("nsrs", nsr_id, db_nsr_update)
calvinosanch9f9c6f22019-11-04 13:37:39 +01003520
tierno626e0152019-11-29 14:16:16 +00003521 index = 0
tiernoe876f672020-02-13 14:34:48 +00003522 updated_cluster_list = []
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003523 updated_v3_cluster_list = []
tiernoe876f672020-02-13 14:34:48 +00003524
tierno626e0152019-11-29 14:16:16 +00003525 for vnfr_data in db_vnfrs.values():
David Garciac1fe90a2021-03-31 19:12:02 +02003526 vca_id = self.get_vca_id(vnfr_data, {})
lloretgalleg7c121132020-07-08 07:53:22 +00003527 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3528 # Step 0: Prepare and set parameters
bravof922c4172020-11-24 21:21:43 -03003529 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
garciadeblas5697b8b2021-03-24 09:17:02 +01003530 vnfd_id = vnfr_data.get("vnfd-id")
3531 vnfd_with_id = find_in_list(
3532 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3533 )
3534 kdud = next(
3535 kdud
3536 for kdud in vnfd_with_id["kdu"]
3537 if kdud["name"] == kdur["kdu-name"]
3538 )
tiernode1584f2020-04-07 09:07:33 +00003539 namespace = kdur.get("k8s-namespace")
romeromonser4554a702021-05-28 12:00:08 +02003540 kdu_deployment_name = kdur.get("kdu-deployment-name")
tierno626e0152019-11-29 14:16:16 +00003541 if kdur.get("helm-chart"):
lloretgalleg07e53f52020-12-15 10:54:02 +00003542 kdumodel = kdur["helm-chart"]
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003543 # Default version: helm3, if helm-version is v2 assign v2
3544 k8sclustertype = "helm-chart-v3"
3545 self.logger.debug("kdur: {}".format(kdur))
garciadeblas5697b8b2021-03-24 09:17:02 +01003546 if (
3547 kdur.get("helm-version")
3548 and kdur.get("helm-version") == "v2"
3549 ):
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003550 k8sclustertype = "helm-chart"
tierno626e0152019-11-29 14:16:16 +00003551 elif kdur.get("juju-bundle"):
lloretgalleg07e53f52020-12-15 10:54:02 +00003552 kdumodel = kdur["juju-bundle"]
tiernoe876f672020-02-13 14:34:48 +00003553 k8sclustertype = "juju-bundle"
tierno626e0152019-11-29 14:16:16 +00003554 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01003555 raise LcmException(
3556 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3557 "juju-bundle. Maybe an old NBI version is running".format(
3558 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3559 )
3560 )
quilesjacde94f2020-01-23 10:07:08 +00003561 # check if kdumodel is a file and exists
3562 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01003563 vnfd_with_id = find_in_list(
3564 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3565 )
3566 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
bravof486707f2021-11-08 17:18:50 -03003567 if storage: # may be not present if vnfd has not artifacts
tierno51183952020-04-03 15:48:18 +00003568 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
bravof486707f2021-11-08 17:18:50 -03003569 if storage["pkg-dir"]:
3570 filename = "{}/{}/{}s/{}".format(
3571 storage["folder"],
3572 storage["pkg-dir"],
3573 k8sclustertype,
3574 kdumodel,
3575 )
3576 else:
3577 filename = "{}/Scripts/{}s/{}".format(
3578 storage["folder"],
3579 k8sclustertype,
3580 kdumodel,
3581 )
garciadeblas5697b8b2021-03-24 09:17:02 +01003582 if self.fs.file_exists(
3583 filename, mode="file"
3584 ) or self.fs.file_exists(filename, mode="dir"):
tierno51183952020-04-03 15:48:18 +00003585 kdumodel = self.fs.path + filename
3586 except (asyncio.TimeoutError, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00003587 raise
garciadeblas5697b8b2021-03-24 09:17:02 +01003588 except Exception: # it is not a file
quilesjacde94f2020-01-23 10:07:08 +00003589 pass
lloretgallegedc5f332020-02-20 11:50:50 +01003590
tiernoe876f672020-02-13 14:34:48 +00003591 k8s_cluster_id = kdur["k8s-cluster"]["id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01003592 step = "Synchronize repos for k8s cluster '{}'".format(
3593 k8s_cluster_id
3594 )
tierno16f4a4e2020-07-20 09:05:51 +00003595 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
lloretgallegedc5f332020-02-20 11:50:50 +01003596
lloretgalleg7c121132020-07-08 07:53:22 +00003597 # Synchronize repos
garciadeblas5697b8b2021-03-24 09:17:02 +01003598 if (
3599 k8sclustertype == "helm-chart"
3600 and cluster_uuid not in updated_cluster_list
3601 ) or (
3602 k8sclustertype == "helm-chart-v3"
3603 and cluster_uuid not in updated_v3_cluster_list
3604 ):
tiernoe876f672020-02-13 14:34:48 +00003605 del_repo_list, added_repo_dict = await asyncio.ensure_future(
garciadeblas5697b8b2021-03-24 09:17:02 +01003606 self.k8scluster_map[k8sclustertype].synchronize_repos(
3607 cluster_uuid=cluster_uuid
3608 )
3609 )
tiernoe876f672020-02-13 14:34:48 +00003610 if del_repo_list or added_repo_dict:
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003611 if k8sclustertype == "helm-chart":
garciadeblas5697b8b2021-03-24 09:17:02 +01003612 unset = {
3613 "_admin.helm_charts_added." + item: None
3614 for item in del_repo_list
3615 }
3616 updated = {
3617 "_admin.helm_charts_added." + item: name
3618 for item, name in added_repo_dict.items()
3619 }
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003620 updated_cluster_list.append(cluster_uuid)
3621 elif k8sclustertype == "helm-chart-v3":
garciadeblas5697b8b2021-03-24 09:17:02 +01003622 unset = {
3623 "_admin.helm_charts_v3_added." + item: None
3624 for item in del_repo_list
3625 }
3626 updated = {
3627 "_admin.helm_charts_v3_added." + item: name
3628 for item, name in added_repo_dict.items()
3629 }
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003630 updated_v3_cluster_list.append(cluster_uuid)
garciadeblas5697b8b2021-03-24 09:17:02 +01003631 self.logger.debug(
3632 logging_text + "repos synchronized on k8s cluster "
3633 "'{}' to_delete: {}, to_add: {}".format(
3634 k8s_cluster_id, del_repo_list, added_repo_dict
3635 )
3636 )
3637 self.db.set_one(
3638 "k8sclusters",
3639 {"_id": k8s_cluster_id},
3640 updated,
3641 unset=unset,
3642 )
lloretgallegedc5f332020-02-20 11:50:50 +01003643
lloretgalleg7c121132020-07-08 07:53:22 +00003644 # Instantiate kdu
garciadeblas5697b8b2021-03-24 09:17:02 +01003645 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3646 vnfr_data["member-vnf-index-ref"],
3647 kdur["kdu-name"],
3648 k8s_cluster_id,
3649 )
3650 k8s_instance_info = {
3651 "kdu-instance": None,
3652 "k8scluster-uuid": cluster_uuid,
3653 "k8scluster-type": k8sclustertype,
3654 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3655 "kdu-name": kdur["kdu-name"],
3656 "kdu-model": kdumodel,
3657 "namespace": namespace,
romeromonser4554a702021-05-28 12:00:08 +02003658 "kdu-deployment-name": kdu_deployment_name,
garciadeblas5697b8b2021-03-24 09:17:02 +01003659 }
tiernob9018152020-04-16 14:18:24 +00003660 db_path = "_admin.deployed.K8s.{}".format(index)
lloretgalleg7c121132020-07-08 07:53:22 +00003661 db_nsr_update[db_path] = k8s_instance_info
tierno626e0152019-11-29 14:16:16 +00003662 self.update_db_2("nsrs", nsr_id, db_nsr_update)
garciadeblas5697b8b2021-03-24 09:17:02 +01003663 vnfd_with_id = find_in_list(
3664 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3665 )
tiernoa2143262020-03-27 16:20:40 +00003666 task = asyncio.ensure_future(
garciadeblas5697b8b2021-03-24 09:17:02 +01003667 self._install_kdu(
3668 nsr_id,
3669 db_path,
3670 vnfr_data,
3671 kdu_index,
3672 kdud,
3673 vnfd_with_id,
3674 k8s_instance_info,
3675 k8params=desc_params,
Alexis Romeroab16ae82022-05-17 18:18:02 +02003676 timeout=1800,
garciadeblas5697b8b2021-03-24 09:17:02 +01003677 vca_id=vca_id,
3678 )
3679 )
3680 self.lcm_tasks.register(
3681 "ns",
3682 nsr_id,
3683 nslcmop_id,
3684 "instantiate_KDU-{}".format(index),
3685 task,
3686 )
3687 task_instantiation_info[task] = "Deploying KDU {}".format(
3688 kdur["kdu-name"]
3689 )
tiernoe876f672020-02-13 14:34:48 +00003690
tierno626e0152019-11-29 14:16:16 +00003691 index += 1
quilesjdd799ac2020-01-23 16:31:11 +00003692
tiernoe876f672020-02-13 14:34:48 +00003693 except (LcmException, asyncio.CancelledError):
3694 raise
calvinosanch9f9c6f22019-11-04 13:37:39 +01003695 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00003696 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3697 if isinstance(e, (N2VCException, DbException)):
3698 self.logger.error(logging_text + msg)
3699 else:
3700 self.logger.critical(logging_text + msg, exc_info=True)
quilesjdd799ac2020-01-23 16:31:11 +00003701 raise LcmException(msg)
calvinosanch9f9c6f22019-11-04 13:37:39 +01003702 finally:
calvinosanch9f9c6f22019-11-04 13:37:39 +01003703 if db_nsr_update:
3704 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoda6fb102019-11-23 00:36:52 +00003705
garciadeblas5697b8b2021-03-24 09:17:02 +01003706 def _deploy_n2vc(
3707 self,
3708 logging_text,
3709 db_nsr,
3710 db_vnfr,
3711 nslcmop_id,
3712 nsr_id,
3713 nsi_id,
3714 vnfd_id,
3715 vdu_id,
3716 kdu_name,
3717 member_vnf_index,
3718 vdu_index,
3719 vdu_name,
3720 deploy_params,
3721 descriptor_config,
3722 base_folder,
3723 task_instantiation_info,
3724 stage,
3725 ):
quilesj7e13aeb2019-10-08 13:34:55 +02003726 # launch instantiate_N2VC in a asyncio task and register task object
3727 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3728 # if not found, create one entry and update database
quilesj7e13aeb2019-10-08 13:34:55 +02003729 # fill db_nsr._admin.deployed.VCA.<index>
tierno588547c2020-07-01 15:30:20 +00003730
garciadeblas5697b8b2021-03-24 09:17:02 +01003731 self.logger.debug(
3732 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3733 )
bravof9a256db2021-02-22 18:02:07 -03003734 if "execution-environment-list" in descriptor_config:
3735 ee_list = descriptor_config.get("execution-environment-list", [])
David Garcia9ad54a42021-05-28 12:08:18 +02003736 elif "juju" in descriptor_config:
3737 ee_list = [descriptor_config] # ns charms
tierno588547c2020-07-01 15:30:20 +00003738 else: # other types as script are not supported
3739 ee_list = []
3740
3741 for ee_item in ee_list:
garciadeblas5697b8b2021-03-24 09:17:02 +01003742 self.logger.debug(
3743 logging_text
3744 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3745 ee_item.get("juju"), ee_item.get("helm-chart")
3746 )
3747 )
tiernoa278b842020-07-08 15:33:55 +00003748 ee_descriptor_id = ee_item.get("id")
tierno588547c2020-07-01 15:30:20 +00003749 if ee_item.get("juju"):
garciadeblas5697b8b2021-03-24 09:17:02 +01003750 vca_name = ee_item["juju"].get("charm")
3751 vca_type = (
3752 "lxc_proxy_charm"
3753 if ee_item["juju"].get("charm") is not None
3754 else "native_charm"
3755 )
3756 if ee_item["juju"].get("cloud") == "k8s":
tierno588547c2020-07-01 15:30:20 +00003757 vca_type = "k8s_proxy_charm"
garciadeblas5697b8b2021-03-24 09:17:02 +01003758 elif ee_item["juju"].get("proxy") is False:
tierno588547c2020-07-01 15:30:20 +00003759 vca_type = "native_charm"
3760 elif ee_item.get("helm-chart"):
garciadeblas5697b8b2021-03-24 09:17:02 +01003761 vca_name = ee_item["helm-chart"]
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003762 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3763 vca_type = "helm"
3764 else:
3765 vca_type = "helm-v3"
tierno588547c2020-07-01 15:30:20 +00003766 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01003767 self.logger.debug(
3768 logging_text + "skipping non juju neither charm configuration"
3769 )
quilesj7e13aeb2019-10-08 13:34:55 +02003770 continue
quilesj3655ae02019-12-12 16:08:35 +00003771
tierno588547c2020-07-01 15:30:20 +00003772 vca_index = -1
garciadeblas5697b8b2021-03-24 09:17:02 +01003773 for vca_index, vca_deployed in enumerate(
3774 db_nsr["_admin"]["deployed"]["VCA"]
3775 ):
tierno588547c2020-07-01 15:30:20 +00003776 if not vca_deployed:
3777 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01003778 if (
3779 vca_deployed.get("member-vnf-index") == member_vnf_index
3780 and vca_deployed.get("vdu_id") == vdu_id
3781 and vca_deployed.get("kdu_name") == kdu_name
3782 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3783 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3784 ):
tierno588547c2020-07-01 15:30:20 +00003785 break
3786 else:
3787 # not found, create one.
garciadeblas5697b8b2021-03-24 09:17:02 +01003788 target = (
3789 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3790 )
tiernoa278b842020-07-08 15:33:55 +00003791 if vdu_id:
3792 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3793 elif kdu_name:
3794 target += "/kdu/{}".format(kdu_name)
tierno588547c2020-07-01 15:30:20 +00003795 vca_deployed = {
tiernoa278b842020-07-08 15:33:55 +00003796 "target_element": target,
3797 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
tierno588547c2020-07-01 15:30:20 +00003798 "member-vnf-index": member_vnf_index,
3799 "vdu_id": vdu_id,
3800 "kdu_name": kdu_name,
3801 "vdu_count_index": vdu_index,
3802 "operational-status": "init", # TODO revise
3803 "detailed-status": "", # TODO revise
garciadeblas5697b8b2021-03-24 09:17:02 +01003804 "step": "initial-deploy", # TODO revise
tierno588547c2020-07-01 15:30:20 +00003805 "vnfd_id": vnfd_id,
3806 "vdu_name": vdu_name,
tiernoa278b842020-07-08 15:33:55 +00003807 "type": vca_type,
garciadeblas5697b8b2021-03-24 09:17:02 +01003808 "ee_descriptor_id": ee_descriptor_id,
tierno588547c2020-07-01 15:30:20 +00003809 }
3810 vca_index += 1
quilesj3655ae02019-12-12 16:08:35 +00003811
tierno588547c2020-07-01 15:30:20 +00003812 # create VCA and configurationStatus in db
3813 db_dict = {
3814 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
garciadeblas5697b8b2021-03-24 09:17:02 +01003815 "configurationStatus.{}".format(vca_index): dict(),
tierno588547c2020-07-01 15:30:20 +00003816 }
3817 self.update_db_2("nsrs", nsr_id, db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02003818
tierno588547c2020-07-01 15:30:20 +00003819 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3820
bravof922c4172020-11-24 21:21:43 -03003821 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3822 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3823 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3824
tierno588547c2020-07-01 15:30:20 +00003825 # Launch task
3826 task_n2vc = asyncio.ensure_future(
3827 self.instantiate_N2VC(
3828 logging_text=logging_text,
3829 vca_index=vca_index,
3830 nsi_id=nsi_id,
3831 db_nsr=db_nsr,
3832 db_vnfr=db_vnfr,
3833 vdu_id=vdu_id,
3834 kdu_name=kdu_name,
3835 vdu_index=vdu_index,
3836 deploy_params=deploy_params,
3837 config_descriptor=descriptor_config,
3838 base_folder=base_folder,
3839 nslcmop_id=nslcmop_id,
3840 stage=stage,
3841 vca_type=vca_type,
tiernob996d942020-07-03 14:52:28 +00003842 vca_name=vca_name,
garciadeblas5697b8b2021-03-24 09:17:02 +01003843 ee_config_descriptor=ee_item,
tierno588547c2020-07-01 15:30:20 +00003844 )
quilesj7e13aeb2019-10-08 13:34:55 +02003845 )
garciadeblas5697b8b2021-03-24 09:17:02 +01003846 self.lcm_tasks.register(
3847 "ns",
3848 nsr_id,
3849 nslcmop_id,
3850 "instantiate_N2VC-{}".format(vca_index),
3851 task_n2vc,
3852 )
3853 task_instantiation_info[
3854 task_n2vc
3855 ] = self.task_name_deploy_vca + " {}.{}".format(
3856 member_vnf_index or "", vdu_id or ""
3857 )
tiernobaa51102018-12-14 13:16:18 +00003858
tiernoc9556972019-07-05 15:25:25 +00003859 @staticmethod
kuuse0ca67472019-05-13 15:59:27 +02003860 def _create_nslcmop(nsr_id, operation, params):
3861 """
3862 Creates a ns-lcm-opp content to be stored at database.
3863 :param nsr_id: internal id of the instance
3864 :param operation: instantiate, terminate, scale, action, ...
3865 :param params: user parameters for the operation
3866 :return: dictionary following SOL005 format
3867 """
3868 # Raise exception if invalid arguments
3869 if not (nsr_id and operation and params):
3870 raise LcmException(
garciadeblas5697b8b2021-03-24 09:17:02 +01003871 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3872 )
kuuse0ca67472019-05-13 15:59:27 +02003873 now = time()
3874 _id = str(uuid4())
3875 nslcmop = {
3876 "id": _id,
3877 "_id": _id,
3878 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3879 "operationState": "PROCESSING",
3880 "statusEnteredTime": now,
3881 "nsInstanceId": nsr_id,
3882 "lcmOperationType": operation,
3883 "startTime": now,
3884 "isAutomaticInvocation": False,
3885 "operationParams": params,
3886 "isCancelPending": False,
3887 "links": {
3888 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3889 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01003890 },
kuuse0ca67472019-05-13 15:59:27 +02003891 }
3892 return nslcmop
3893
calvinosanch9f9c6f22019-11-04 13:37:39 +01003894 def _format_additional_params(self, params):
tierno626e0152019-11-29 14:16:16 +00003895 params = params or {}
calvinosanch9f9c6f22019-11-04 13:37:39 +01003896 for key, value in params.items():
3897 if str(value).startswith("!!yaml "):
3898 params[key] = yaml.safe_load(value[7:])
calvinosanch9f9c6f22019-11-04 13:37:39 +01003899 return params
3900
kuuse8b998e42019-07-30 15:22:16 +02003901 def _get_terminate_primitive_params(self, seq, vnf_index):
garciadeblas5697b8b2021-03-24 09:17:02 +01003902 primitive = seq.get("name")
kuuse8b998e42019-07-30 15:22:16 +02003903 primitive_params = {}
3904 params = {
3905 "member_vnf_index": vnf_index,
3906 "primitive": primitive,
3907 "primitive_params": primitive_params,
3908 }
3909 desc_params = {}
3910 return self._map_primitive_params(seq, params, desc_params)
3911
kuuseac3a8882019-10-03 10:48:06 +02003912 # sub-operations
3913
tierno51183952020-04-03 15:48:18 +00003914 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
garciadeblas5697b8b2021-03-24 09:17:02 +01003915 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3916 if op.get("operationState") == "COMPLETED":
kuuseac3a8882019-10-03 10:48:06 +02003917 # b. Skip sub-operation
3918 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3919 return self.SUBOPERATION_STATUS_SKIP
3920 else:
tierno7c4e24c2020-05-13 08:41:35 +00003921 # c. retry executing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02003922 # The sub-operation exists, and operationState != 'COMPLETED'
tierno7c4e24c2020-05-13 08:41:35 +00003923 # Update operationState = 'PROCESSING' to indicate a retry.
garciadeblas5697b8b2021-03-24 09:17:02 +01003924 operationState = "PROCESSING"
3925 detailed_status = "In progress"
kuuseac3a8882019-10-03 10:48:06 +02003926 self._update_suboperation_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01003927 db_nslcmop, op_index, operationState, detailed_status
3928 )
kuuseac3a8882019-10-03 10:48:06 +02003929 # Return the sub-operation index
3930 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3931 # with arguments extracted from the sub-operation
3932 return op_index
3933
3934 # Find a sub-operation where all keys in a matching dictionary must match
3935 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3936 def _find_suboperation(self, db_nslcmop, match):
tierno7c4e24c2020-05-13 08:41:35 +00003937 if db_nslcmop and match:
garciadeblas5697b8b2021-03-24 09:17:02 +01003938 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
kuuseac3a8882019-10-03 10:48:06 +02003939 for i, op in enumerate(op_list):
3940 if all(op.get(k) == match[k] for k in match):
3941 return i
3942 return self.SUBOPERATION_STATUS_NOT_FOUND
3943
3944 # Update status for a sub-operation given its index
garciadeblas5697b8b2021-03-24 09:17:02 +01003945 def _update_suboperation_status(
3946 self, db_nslcmop, op_index, operationState, detailed_status
3947 ):
kuuseac3a8882019-10-03 10:48:06 +02003948 # Update DB for HA tasks
garciadeblas5697b8b2021-03-24 09:17:02 +01003949 q_filter = {"_id": db_nslcmop["_id"]}
3950 update_dict = {
3951 "_admin.operations.{}.operationState".format(op_index): operationState,
3952 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3953 }
3954 self.db.set_one(
3955 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3956 )
kuuseac3a8882019-10-03 10:48:06 +02003957
3958 # Add sub-operation, return the index of the added sub-operation
3959 # Optionally, set operationState, detailed-status, and operationType
3960 # Status and type are currently set for 'scale' sub-operations:
3961 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3962 # 'detailed-status' : status message
3963 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3964 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
garciadeblas5697b8b2021-03-24 09:17:02 +01003965 def _add_suboperation(
3966 self,
3967 db_nslcmop,
3968 vnf_index,
3969 vdu_id,
3970 vdu_count_index,
3971 vdu_name,
3972 primitive,
3973 mapped_primitive_params,
3974 operationState=None,
3975 detailed_status=None,
3976 operationType=None,
3977 RO_nsr_id=None,
3978 RO_scaling_info=None,
3979 ):
tiernoe876f672020-02-13 14:34:48 +00003980 if not db_nslcmop:
kuuseac3a8882019-10-03 10:48:06 +02003981 return self.SUBOPERATION_STATUS_NOT_FOUND
3982 # Get the "_admin.operations" list, if it exists
garciadeblas5697b8b2021-03-24 09:17:02 +01003983 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3984 op_list = db_nslcmop_admin.get("operations")
kuuseac3a8882019-10-03 10:48:06 +02003985 # Create or append to the "_admin.operations" list
garciadeblas5697b8b2021-03-24 09:17:02 +01003986 new_op = {
3987 "member_vnf_index": vnf_index,
3988 "vdu_id": vdu_id,
3989 "vdu_count_index": vdu_count_index,
3990 "primitive": primitive,
3991 "primitive_params": mapped_primitive_params,
3992 }
kuuseac3a8882019-10-03 10:48:06 +02003993 if operationState:
garciadeblas5697b8b2021-03-24 09:17:02 +01003994 new_op["operationState"] = operationState
kuuseac3a8882019-10-03 10:48:06 +02003995 if detailed_status:
garciadeblas5697b8b2021-03-24 09:17:02 +01003996 new_op["detailed-status"] = detailed_status
kuuseac3a8882019-10-03 10:48:06 +02003997 if operationType:
garciadeblas5697b8b2021-03-24 09:17:02 +01003998 new_op["lcmOperationType"] = operationType
kuuseac3a8882019-10-03 10:48:06 +02003999 if RO_nsr_id:
garciadeblas5697b8b2021-03-24 09:17:02 +01004000 new_op["RO_nsr_id"] = RO_nsr_id
kuuseac3a8882019-10-03 10:48:06 +02004001 if RO_scaling_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01004002 new_op["RO_scaling_info"] = RO_scaling_info
kuuseac3a8882019-10-03 10:48:06 +02004003 if not op_list:
4004 # No existing operations, create key 'operations' with current operation as first list element
garciadeblas5697b8b2021-03-24 09:17:02 +01004005 db_nslcmop_admin.update({"operations": [new_op]})
4006 op_list = db_nslcmop_admin.get("operations")
kuuseac3a8882019-10-03 10:48:06 +02004007 else:
4008 # Existing operations, append operation to list
4009 op_list.append(new_op)
kuuse8b998e42019-07-30 15:22:16 +02004010
garciadeblas5697b8b2021-03-24 09:17:02 +01004011 db_nslcmop_update = {"_admin.operations": op_list}
4012 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
kuuseac3a8882019-10-03 10:48:06 +02004013 op_index = len(op_list) - 1
4014 return op_index
4015
4016 # Helper methods for scale() sub-operations
4017
4018 # pre-scale/post-scale:
4019 # Check for 3 different cases:
4020 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4021 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
tierno7c4e24c2020-05-13 08:41:35 +00004022 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
garciadeblas5697b8b2021-03-24 09:17:02 +01004023 def _check_or_add_scale_suboperation(
4024 self,
4025 db_nslcmop,
4026 vnf_index,
4027 vnf_config_primitive,
4028 primitive_params,
4029 operationType,
4030 RO_nsr_id=None,
4031 RO_scaling_info=None,
4032 ):
kuuseac3a8882019-10-03 10:48:06 +02004033 # Find this sub-operation
tierno7c4e24c2020-05-13 08:41:35 +00004034 if RO_nsr_id and RO_scaling_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01004035 operationType = "SCALE-RO"
kuuseac3a8882019-10-03 10:48:06 +02004036 match = {
garciadeblas5697b8b2021-03-24 09:17:02 +01004037 "member_vnf_index": vnf_index,
4038 "RO_nsr_id": RO_nsr_id,
4039 "RO_scaling_info": RO_scaling_info,
kuuseac3a8882019-10-03 10:48:06 +02004040 }
4041 else:
4042 match = {
garciadeblas5697b8b2021-03-24 09:17:02 +01004043 "member_vnf_index": vnf_index,
4044 "primitive": vnf_config_primitive,
4045 "primitive_params": primitive_params,
4046 "lcmOperationType": operationType,
kuuseac3a8882019-10-03 10:48:06 +02004047 }
4048 op_index = self._find_suboperation(db_nslcmop, match)
tierno51183952020-04-03 15:48:18 +00004049 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
kuuseac3a8882019-10-03 10:48:06 +02004050 # a. New sub-operation
4051 # The sub-operation does not exist, add it.
4052 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4053 # The following parameters are set to None for all kind of scaling:
4054 vdu_id = None
4055 vdu_count_index = None
4056 vdu_name = None
tierno51183952020-04-03 15:48:18 +00004057 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02004058 vnf_config_primitive = None
4059 primitive_params = None
4060 else:
4061 RO_nsr_id = None
4062 RO_scaling_info = None
4063 # Initial status for sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01004064 operationState = "PROCESSING"
4065 detailed_status = "In progress"
kuuseac3a8882019-10-03 10:48:06 +02004066 # Add sub-operation for pre/post-scaling (zero or more operations)
garciadeblas5697b8b2021-03-24 09:17:02 +01004067 self._add_suboperation(
4068 db_nslcmop,
4069 vnf_index,
4070 vdu_id,
4071 vdu_count_index,
4072 vdu_name,
4073 vnf_config_primitive,
4074 primitive_params,
4075 operationState,
4076 detailed_status,
4077 operationType,
4078 RO_nsr_id,
4079 RO_scaling_info,
4080 )
kuuseac3a8882019-10-03 10:48:06 +02004081 return self.SUBOPERATION_STATUS_NEW
4082 else:
4083 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4084 # or op_index (operationState != 'COMPLETED')
tierno51183952020-04-03 15:48:18 +00004085 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
kuuseac3a8882019-10-03 10:48:06 +02004086
preethika.pdf7d8e02019-12-10 13:10:48 +00004087 # Function to return execution_environment id
4088
4089 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
tiernoe876f672020-02-13 14:34:48 +00004090 # TODO vdu_index_count
preethika.pdf7d8e02019-12-10 13:10:48 +00004091 for vca in vca_deployed_list:
4092 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4093 return vca["ee_id"]
4094
David Garciac1fe90a2021-03-31 19:12:02 +02004095 async def destroy_N2VC(
4096 self,
4097 logging_text,
4098 db_nslcmop,
4099 vca_deployed,
4100 config_descriptor,
4101 vca_index,
4102 destroy_ee=True,
4103 exec_primitives=True,
4104 scaling_in=False,
4105 vca_id: str = None,
4106 ):
tiernoe876f672020-02-13 14:34:48 +00004107 """
4108 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4109 :param logging_text:
4110 :param db_nslcmop:
4111 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4112 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4113 :param vca_index: index in the database _admin.deployed.VCA
4114 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
tierno588547c2020-07-01 15:30:20 +00004115 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4116 not executed properly
aktas13251562021-02-12 22:19:10 +03004117 :param scaling_in: True destroys the application, False destroys the model
tiernoe876f672020-02-13 14:34:48 +00004118 :return: None or exception
4119 """
tiernoe876f672020-02-13 14:34:48 +00004120
tierno588547c2020-07-01 15:30:20 +00004121 self.logger.debug(
garciadeblas5697b8b2021-03-24 09:17:02 +01004122 logging_text
4123 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
tierno588547c2020-07-01 15:30:20 +00004124 vca_index, vca_deployed, config_descriptor, destroy_ee
4125 )
4126 )
4127
4128 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4129
4130 # execute terminate_primitives
4131 if exec_primitives:
bravof922c4172020-11-24 21:21:43 -03004132 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
garciadeblas5697b8b2021-03-24 09:17:02 +01004133 config_descriptor.get("terminate-config-primitive"),
4134 vca_deployed.get("ee_descriptor_id"),
4135 )
tierno588547c2020-07-01 15:30:20 +00004136 vdu_id = vca_deployed.get("vdu_id")
4137 vdu_count_index = vca_deployed.get("vdu_count_index")
4138 vdu_name = vca_deployed.get("vdu_name")
4139 vnf_index = vca_deployed.get("member-vnf-index")
4140 if terminate_primitives and vca_deployed.get("needed_terminate"):
tierno588547c2020-07-01 15:30:20 +00004141 for seq in terminate_primitives:
4142 # For each sequence in list, get primitive and call _ns_execute_primitive()
4143 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
garciadeblas5697b8b2021-03-24 09:17:02 +01004144 vnf_index, seq.get("name")
4145 )
tierno588547c2020-07-01 15:30:20 +00004146 self.logger.debug(logging_text + step)
4147 # Create the primitive for each sequence, i.e. "primitive": "touch"
garciadeblas5697b8b2021-03-24 09:17:02 +01004148 primitive = seq.get("name")
4149 mapped_primitive_params = self._get_terminate_primitive_params(
4150 seq, vnf_index
4151 )
tierno588547c2020-07-01 15:30:20 +00004152
4153 # Add sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01004154 self._add_suboperation(
4155 db_nslcmop,
4156 vnf_index,
4157 vdu_id,
4158 vdu_count_index,
4159 vdu_name,
4160 primitive,
4161 mapped_primitive_params,
4162 )
tierno588547c2020-07-01 15:30:20 +00004163 # Sub-operations: Call _ns_execute_primitive() instead of action()
4164 try:
David Garciac1fe90a2021-03-31 19:12:02 +02004165 result, result_detail = await self._ns_execute_primitive(
garciadeblas5697b8b2021-03-24 09:17:02 +01004166 vca_deployed["ee_id"],
4167 primitive,
David Garciac1fe90a2021-03-31 19:12:02 +02004168 mapped_primitive_params,
4169 vca_type=vca_type,
4170 vca_id=vca_id,
4171 )
tierno588547c2020-07-01 15:30:20 +00004172 except LcmException:
4173 # this happens when VCA is not deployed. In this case it is not needed to terminate
4174 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01004175 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
tierno588547c2020-07-01 15:30:20 +00004176 if result not in result_ok:
garciadeblas5697b8b2021-03-24 09:17:02 +01004177 raise LcmException(
4178 "terminate_primitive {} for vnf_member_index={} fails with "
4179 "error {}".format(seq.get("name"), vnf_index, result_detail)
4180 )
tierno588547c2020-07-01 15:30:20 +00004181 # set that this VCA do not need terminated
garciadeblas5697b8b2021-03-24 09:17:02 +01004182 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4183 vca_index
4184 )
4185 self.update_db_2(
4186 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4187 )
tiernoe876f672020-02-13 14:34:48 +00004188
bravof73bac502021-05-11 07:38:47 -04004189 # Delete Prometheus Jobs if any
4190 # This uses NSR_ID, so it will destroy any jobs under this index
4191 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
tiernob996d942020-07-03 14:52:28 +00004192
tiernoe876f672020-02-13 14:34:48 +00004193 if destroy_ee:
David Garciac1fe90a2021-03-31 19:12:02 +02004194 await self.vca_map[vca_type].delete_execution_environment(
4195 vca_deployed["ee_id"],
4196 scaling_in=scaling_in,
aktas98488ed2021-07-29 17:42:49 +03004197 vca_type=vca_type,
David Garciac1fe90a2021-03-31 19:12:02 +02004198 vca_id=vca_id,
4199 )
kuuse0ca67472019-05-13 15:59:27 +02004200
David Garciac1fe90a2021-03-31 19:12:02 +02004201 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
garciadeblas5697b8b2021-03-24 09:17:02 +01004202 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
tierno51183952020-04-03 15:48:18 +00004203 namespace = "." + db_nsr["_id"]
tiernof59ad6c2020-04-08 12:50:52 +00004204 try:
David Garciac1fe90a2021-03-31 19:12:02 +02004205 await self.n2vc.delete_namespace(
4206 namespace=namespace,
4207 total_timeout=self.timeout_charm_delete,
4208 vca_id=vca_id,
4209 )
tiernof59ad6c2020-04-08 12:50:52 +00004210 except N2VCNotFound: # already deleted. Skip
4211 pass
garciadeblas5697b8b2021-03-24 09:17:02 +01004212 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
quilesj3655ae02019-12-12 16:08:35 +00004213
garciadeblas5697b8b2021-03-24 09:17:02 +01004214 async def _terminate_RO(
4215 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4216 ):
tiernoe876f672020-02-13 14:34:48 +00004217 """
4218 Terminates a deployment from RO
4219 :param logging_text:
4220 :param nsr_deployed: db_nsr._admin.deployed
4221 :param nsr_id:
4222 :param nslcmop_id:
4223 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4224 this method will update only the index 2, but it will write on database the concatenated content of the list
4225 :return:
4226 """
4227 db_nsr_update = {}
4228 failed_detail = []
4229 ro_nsr_id = ro_delete_action = None
4230 if nsr_deployed and nsr_deployed.get("RO"):
4231 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4232 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4233 try:
4234 if ro_nsr_id:
4235 stage[2] = "Deleting ns from VIM."
4236 db_nsr_update["detailed-status"] = " ".join(stage)
4237 self._write_op_status(nslcmop_id, stage)
4238 self.logger.debug(logging_text + stage[2])
4239 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4240 self._write_op_status(nslcmop_id, stage)
4241 desc = await self.RO.delete("ns", ro_nsr_id)
4242 ro_delete_action = desc["action_id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01004243 db_nsr_update[
4244 "_admin.deployed.RO.nsr_delete_action_id"
4245 ] = ro_delete_action
tiernoe876f672020-02-13 14:34:48 +00004246 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4247 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4248 if ro_delete_action:
4249 # wait until NS is deleted from VIM
4250 stage[2] = "Waiting ns deleted from VIM."
4251 detailed_status_old = None
garciadeblas5697b8b2021-03-24 09:17:02 +01004252 self.logger.debug(
4253 logging_text
4254 + stage[2]
4255 + " RO_id={} ro_delete_action={}".format(
4256 ro_nsr_id, ro_delete_action
4257 )
4258 )
tiernoe876f672020-02-13 14:34:48 +00004259 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4260 self._write_op_status(nslcmop_id, stage)
kuused124bfe2019-06-18 12:09:24 +02004261
tiernoe876f672020-02-13 14:34:48 +00004262 delete_timeout = 20 * 60 # 20 minutes
4263 while delete_timeout > 0:
4264 desc = await self.RO.show(
4265 "ns",
4266 item_id_name=ro_nsr_id,
4267 extra_item="action",
garciadeblas5697b8b2021-03-24 09:17:02 +01004268 extra_item_id=ro_delete_action,
4269 )
tiernoe876f672020-02-13 14:34:48 +00004270
4271 # deploymentStatus
4272 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4273
4274 ns_status, ns_status_info = self.RO.check_action_status(desc)
4275 if ns_status == "ERROR":
4276 raise ROclient.ROClientException(ns_status_info)
4277 elif ns_status == "BUILD":
4278 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4279 elif ns_status == "ACTIVE":
4280 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4281 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4282 break
4283 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004284 assert (
4285 False
4286 ), "ROclient.check_action_status returns unknown {}".format(
4287 ns_status
4288 )
tiernoe876f672020-02-13 14:34:48 +00004289 if stage[2] != detailed_status_old:
4290 detailed_status_old = stage[2]
4291 db_nsr_update["detailed-status"] = " ".join(stage)
4292 self._write_op_status(nslcmop_id, stage)
4293 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4294 await asyncio.sleep(5, loop=self.loop)
4295 delete_timeout -= 5
4296 else: # delete_timeout <= 0:
garciadeblas5697b8b2021-03-24 09:17:02 +01004297 raise ROclient.ROClientException(
4298 "Timeout waiting ns deleted from VIM"
4299 )
tiernoe876f672020-02-13 14:34:48 +00004300
4301 except Exception as e:
4302 self.update_db_2("nsrs", nsr_id, db_nsr_update)
garciadeblas5697b8b2021-03-24 09:17:02 +01004303 if (
4304 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4305 ): # not found
tiernoe876f672020-02-13 14:34:48 +00004306 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4307 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4308 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
garciadeblas5697b8b2021-03-24 09:17:02 +01004309 self.logger.debug(
4310 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4311 )
4312 elif (
4313 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4314 ): # conflict
tiernoa2143262020-03-27 16:20:40 +00004315 failed_detail.append("delete conflict: {}".format(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01004316 self.logger.debug(
4317 logging_text
4318 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4319 )
tiernoe876f672020-02-13 14:34:48 +00004320 else:
tiernoa2143262020-03-27 16:20:40 +00004321 failed_detail.append("delete error: {}".format(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01004322 self.logger.error(
4323 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4324 )
tiernoe876f672020-02-13 14:34:48 +00004325
4326 # Delete nsd
4327 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4328 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4329 try:
4330 stage[2] = "Deleting nsd from RO."
4331 db_nsr_update["detailed-status"] = " ".join(stage)
4332 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4333 self._write_op_status(nslcmop_id, stage)
4334 await self.RO.delete("nsd", ro_nsd_id)
garciadeblas5697b8b2021-03-24 09:17:02 +01004335 self.logger.debug(
4336 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4337 )
tiernoe876f672020-02-13 14:34:48 +00004338 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4339 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01004340 if (
4341 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4342 ): # not found
tiernoe876f672020-02-13 14:34:48 +00004343 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
garciadeblas5697b8b2021-03-24 09:17:02 +01004344 self.logger.debug(
4345 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4346 )
4347 elif (
4348 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4349 ): # conflict
4350 failed_detail.append(
4351 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4352 )
tiernoe876f672020-02-13 14:34:48 +00004353 self.logger.debug(logging_text + failed_detail[-1])
4354 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004355 failed_detail.append(
4356 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4357 )
tiernoe876f672020-02-13 14:34:48 +00004358 self.logger.error(logging_text + failed_detail[-1])
4359
4360 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4361 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4362 if not vnf_deployed or not vnf_deployed["id"]:
4363 continue
4364 try:
4365 ro_vnfd_id = vnf_deployed["id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01004366 stage[
4367 2
4368 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4369 vnf_deployed["member-vnf-index"], ro_vnfd_id
4370 )
tiernoe876f672020-02-13 14:34:48 +00004371 db_nsr_update["detailed-status"] = " ".join(stage)
4372 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4373 self._write_op_status(nslcmop_id, stage)
4374 await self.RO.delete("vnfd", ro_vnfd_id)
garciadeblas5697b8b2021-03-24 09:17:02 +01004375 self.logger.debug(
4376 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4377 )
tiernoe876f672020-02-13 14:34:48 +00004378 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4379 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01004380 if (
4381 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4382 ): # not found
4383 db_nsr_update[
4384 "_admin.deployed.RO.vnfd.{}.id".format(index)
4385 ] = None
4386 self.logger.debug(
4387 logging_text
4388 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4389 )
4390 elif (
4391 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4392 ): # conflict
4393 failed_detail.append(
4394 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4395 )
tiernoe876f672020-02-13 14:34:48 +00004396 self.logger.debug(logging_text + failed_detail[-1])
4397 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004398 failed_detail.append(
4399 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4400 )
tiernoe876f672020-02-13 14:34:48 +00004401 self.logger.error(logging_text + failed_detail[-1])
4402
tiernoa2143262020-03-27 16:20:40 +00004403 if failed_detail:
4404 stage[2] = "Error deleting from VIM"
4405 else:
4406 stage[2] = "Deleted from VIM"
tiernoe876f672020-02-13 14:34:48 +00004407 db_nsr_update["detailed-status"] = " ".join(stage)
4408 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4409 self._write_op_status(nslcmop_id, stage)
4410
4411 if failed_detail:
tiernoa2143262020-03-27 16:20:40 +00004412 raise LcmException("; ".join(failed_detail))
tiernoe876f672020-02-13 14:34:48 +00004413
4414 async def terminate(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02004415 # Try to lock HA task here
garciadeblas5697b8b2021-03-24 09:17:02 +01004416 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02004417 if not task_is_locked_by_me:
4418 return
4419
tierno59d22d22018-09-25 18:10:19 +02004420 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4421 self.logger.debug(logging_text + "Enter")
tiernoe876f672020-02-13 14:34:48 +00004422 timeout_ns_terminate = self.timeout_ns_terminate
tierno59d22d22018-09-25 18:10:19 +02004423 db_nsr = None
4424 db_nslcmop = None
tiernoa17d4f42020-04-28 09:59:23 +00004425 operation_params = None
tierno59d22d22018-09-25 18:10:19 +02004426 exc = None
garciadeblas5697b8b2021-03-24 09:17:02 +01004427 error_list = [] # annotates all failed error messages
tierno59d22d22018-09-25 18:10:19 +02004428 db_nslcmop_update = {}
tiernoc2564fe2019-01-28 16:18:56 +00004429 autoremove = False # autoremove after terminated
tiernoe876f672020-02-13 14:34:48 +00004430 tasks_dict_info = {}
4431 db_nsr_update = {}
garciadeblas5697b8b2021-03-24 09:17:02 +01004432 stage = [
4433 "Stage 1/3: Preparing task.",
4434 "Waiting for previous operations to terminate.",
4435 "",
4436 ]
tiernoe876f672020-02-13 14:34:48 +00004437 # ^ contains [stage, step, VIM-status]
tierno59d22d22018-09-25 18:10:19 +02004438 try:
kuused124bfe2019-06-18 12:09:24 +02004439 # wait for any previous tasks in process
garciadeblas5697b8b2021-03-24 09:17:02 +01004440 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02004441
tiernoe876f672020-02-13 14:34:48 +00004442 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4443 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4444 operation_params = db_nslcmop.get("operationParams") or {}
4445 if operation_params.get("timeout_ns_terminate"):
4446 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4447 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4448 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4449
4450 db_nsr_update["operational-status"] = "terminating"
4451 db_nsr_update["config-status"] = "terminating"
quilesj4cda56b2019-12-05 10:02:20 +00004452 self._write_ns_status(
4453 nsr_id=nsr_id,
4454 ns_state="TERMINATING",
4455 current_operation="TERMINATING",
tiernoe876f672020-02-13 14:34:48 +00004456 current_operation_id=nslcmop_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01004457 other_update=db_nsr_update,
quilesj4cda56b2019-12-05 10:02:20 +00004458 )
garciadeblas5697b8b2021-03-24 09:17:02 +01004459 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
tiernoe876f672020-02-13 14:34:48 +00004460 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
tierno59d22d22018-09-25 18:10:19 +02004461 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4462 return
tierno59d22d22018-09-25 18:10:19 +02004463
tiernoe876f672020-02-13 14:34:48 +00004464 stage[1] = "Getting vnf descriptors from db."
4465 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
garciadeblas5697b8b2021-03-24 09:17:02 +01004466 db_vnfrs_dict = {
4467 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4468 }
tiernoe876f672020-02-13 14:34:48 +00004469 db_vnfds_from_id = {}
4470 db_vnfds_from_member_index = {}
4471 # Loop over VNFRs
4472 for vnfr in db_vnfrs_list:
4473 vnfd_id = vnfr["vnfd-id"]
4474 if vnfd_id not in db_vnfds_from_id:
4475 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4476 db_vnfds_from_id[vnfd_id] = vnfd
garciadeblas5697b8b2021-03-24 09:17:02 +01004477 db_vnfds_from_member_index[
4478 vnfr["member-vnf-index-ref"]
4479 ] = db_vnfds_from_id[vnfd_id]
calvinosanch9f9c6f22019-11-04 13:37:39 +01004480
tiernoe876f672020-02-13 14:34:48 +00004481 # Destroy individual execution environments when there are terminating primitives.
4482 # Rest of EE will be deleted at once
tierno588547c2020-07-01 15:30:20 +00004483 # TODO - check before calling _destroy_N2VC
4484 # if not operation_params.get("skip_terminate_primitives"):#
4485 # or not vca.get("needed_terminate"):
4486 stage[0] = "Stage 2/3 execute terminating primitives."
4487 self.logger.debug(logging_text + stage[0])
4488 stage[1] = "Looking execution environment that needs terminate."
4489 self.logger.debug(logging_text + stage[1])
bravof922c4172020-11-24 21:21:43 -03004490
tierno588547c2020-07-01 15:30:20 +00004491 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
tierno588547c2020-07-01 15:30:20 +00004492 config_descriptor = None
David Garcia9ad54a42021-05-28 12:08:18 +02004493 vca_member_vnf_index = vca.get("member-vnf-index")
4494 vca_id = self.get_vca_id(
4495 db_vnfrs_dict.get(vca_member_vnf_index)
4496 if vca_member_vnf_index
4497 else None,
4498 db_nsr,
4499 )
tierno588547c2020-07-01 15:30:20 +00004500 if not vca or not vca.get("ee_id"):
4501 continue
4502 if not vca.get("member-vnf-index"):
4503 # ns
4504 config_descriptor = db_nsr.get("ns-configuration")
4505 elif vca.get("vdu_id"):
4506 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
bravofe5a31bc2021-02-17 19:09:12 -03004507 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
tierno588547c2020-07-01 15:30:20 +00004508 elif vca.get("kdu_name"):
4509 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
bravofe5a31bc2021-02-17 19:09:12 -03004510 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
tierno588547c2020-07-01 15:30:20 +00004511 else:
bravofe5a31bc2021-02-17 19:09:12 -03004512 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
aktas13251562021-02-12 22:19:10 +03004513 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
tierno588547c2020-07-01 15:30:20 +00004514 vca_type = vca.get("type")
garciadeblas5697b8b2021-03-24 09:17:02 +01004515 exec_terminate_primitives = not operation_params.get(
4516 "skip_terminate_primitives"
4517 ) and vca.get("needed_terminate")
tiernoaebd7da2020-08-07 06:36:38 +00004518 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4519 # pending native charms
garciadeblas5697b8b2021-03-24 09:17:02 +01004520 destroy_ee = (
4521 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4522 )
tierno86e33612020-09-16 14:13:06 +00004523 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4524 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
tiernob996d942020-07-03 14:52:28 +00004525 task = asyncio.ensure_future(
David Garciac1fe90a2021-03-31 19:12:02 +02004526 self.destroy_N2VC(
4527 logging_text,
4528 db_nslcmop,
4529 vca,
4530 config_descriptor,
4531 vca_index,
4532 destroy_ee,
4533 exec_terminate_primitives,
4534 vca_id=vca_id,
4535 )
4536 )
tierno588547c2020-07-01 15:30:20 +00004537 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
tierno59d22d22018-09-25 18:10:19 +02004538
tierno588547c2020-07-01 15:30:20 +00004539 # wait for pending tasks of terminate primitives
4540 if tasks_dict_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01004541 self.logger.debug(
4542 logging_text
4543 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4544 )
4545 error_list = await self._wait_for_tasks(
4546 logging_text,
4547 tasks_dict_info,
4548 min(self.timeout_charm_delete, timeout_ns_terminate),
4549 stage,
4550 nslcmop_id,
4551 )
tierno86e33612020-09-16 14:13:06 +00004552 tasks_dict_info.clear()
tierno588547c2020-07-01 15:30:20 +00004553 if error_list:
garciadeblas5697b8b2021-03-24 09:17:02 +01004554 return # raise LcmException("; ".join(error_list))
tierno82974b22018-11-27 21:55:36 +00004555
tiernoe876f672020-02-13 14:34:48 +00004556 # remove All execution environments at once
4557 stage[0] = "Stage 3/3 delete all."
quilesj3655ae02019-12-12 16:08:35 +00004558
tierno49676be2020-04-07 16:34:35 +00004559 if nsr_deployed.get("VCA"):
4560 stage[1] = "Deleting all execution environments."
4561 self.logger.debug(logging_text + stage[1])
David Garciac1fe90a2021-03-31 19:12:02 +02004562 vca_id = self.get_vca_id({}, db_nsr)
4563 task_delete_ee = asyncio.ensure_future(
4564 asyncio.wait_for(
4565 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
garciadeblas5697b8b2021-03-24 09:17:02 +01004566 timeout=self.timeout_charm_delete,
David Garciac1fe90a2021-03-31 19:12:02 +02004567 )
4568 )
tierno49676be2020-04-07 16:34:35 +00004569 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4570 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
tierno59d22d22018-09-25 18:10:19 +02004571
tiernoe876f672020-02-13 14:34:48 +00004572 # Delete from k8scluster
4573 stage[1] = "Deleting KDUs."
4574 self.logger.debug(logging_text + stage[1])
4575 # print(nsr_deployed)
4576 for kdu in get_iterable(nsr_deployed, "K8s"):
4577 if not kdu or not kdu.get("kdu-instance"):
4578 continue
4579 kdu_instance = kdu.get("kdu-instance")
tiernoa2143262020-03-27 16:20:40 +00004580 if kdu.get("k8scluster-type") in self.k8scluster_map:
David Garciac1fe90a2021-03-31 19:12:02 +02004581 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4582 vca_id = self.get_vca_id({}, db_nsr)
tiernoe876f672020-02-13 14:34:48 +00004583 task_delete_kdu_instance = asyncio.ensure_future(
tiernoa2143262020-03-27 16:20:40 +00004584 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4585 cluster_uuid=kdu.get("k8scluster-uuid"),
David Garciac1fe90a2021-03-31 19:12:02 +02004586 kdu_instance=kdu_instance,
4587 vca_id=vca_id,
Pedro Escaleirae1ea2672022-04-22 00:46:14 +01004588 namespace=kdu.get("namespace"),
David Garciac1fe90a2021-03-31 19:12:02 +02004589 )
4590 )
tiernoe876f672020-02-13 14:34:48 +00004591 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004592 self.logger.error(
4593 logging_text
4594 + "Unknown k8s deployment type {}".format(
4595 kdu.get("k8scluster-type")
4596 )
4597 )
tiernoe876f672020-02-13 14:34:48 +00004598 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01004599 tasks_dict_info[
4600 task_delete_kdu_instance
4601 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
tierno59d22d22018-09-25 18:10:19 +02004602
4603 # remove from RO
tiernoe876f672020-02-13 14:34:48 +00004604 stage[1] = "Deleting ns from VIM."
tierno69f0d382020-05-07 13:08:09 +00004605 if self.ng_ro:
4606 task_delete_ro = asyncio.ensure_future(
garciadeblas5697b8b2021-03-24 09:17:02 +01004607 self._terminate_ng_ro(
4608 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4609 )
4610 )
tierno69f0d382020-05-07 13:08:09 +00004611 else:
4612 task_delete_ro = asyncio.ensure_future(
garciadeblas5697b8b2021-03-24 09:17:02 +01004613 self._terminate_RO(
4614 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4615 )
4616 )
tiernoe876f672020-02-13 14:34:48 +00004617 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
tierno59d22d22018-09-25 18:10:19 +02004618
tiernoe876f672020-02-13 14:34:48 +00004619 # rest of staff will be done at finally
4620
garciadeblas5697b8b2021-03-24 09:17:02 +01004621 except (
4622 ROclient.ROClientException,
4623 DbException,
4624 LcmException,
4625 N2VCException,
4626 ) as e:
tiernoe876f672020-02-13 14:34:48 +00004627 self.logger.error(logging_text + "Exit Exception {}".format(e))
4628 exc = e
4629 except asyncio.CancelledError:
garciadeblas5697b8b2021-03-24 09:17:02 +01004630 self.logger.error(
4631 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4632 )
tiernoe876f672020-02-13 14:34:48 +00004633 exc = "Operation was cancelled"
4634 except Exception as e:
4635 exc = traceback.format_exc()
garciadeblas5697b8b2021-03-24 09:17:02 +01004636 self.logger.critical(
4637 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4638 exc_info=True,
4639 )
tiernoe876f672020-02-13 14:34:48 +00004640 finally:
4641 if exc:
4642 error_list.append(str(exc))
tierno59d22d22018-09-25 18:10:19 +02004643 try:
tiernoe876f672020-02-13 14:34:48 +00004644 # wait for pending tasks
4645 if tasks_dict_info:
4646 stage[1] = "Waiting for terminate pending tasks."
4647 self.logger.debug(logging_text + stage[1])
garciadeblas5697b8b2021-03-24 09:17:02 +01004648 error_list += await self._wait_for_tasks(
4649 logging_text,
4650 tasks_dict_info,
4651 timeout_ns_terminate,
4652 stage,
4653 nslcmop_id,
4654 )
tiernoe876f672020-02-13 14:34:48 +00004655 stage[1] = stage[2] = ""
4656 except asyncio.CancelledError:
4657 error_list.append("Cancelled")
4658 # TODO cancell all tasks
4659 except Exception as exc:
4660 error_list.append(str(exc))
4661 # update status at database
4662 if error_list:
4663 error_detail = "; ".join(error_list)
4664 # self.logger.error(logging_text + error_detail)
garciadeblas5697b8b2021-03-24 09:17:02 +01004665 error_description_nslcmop = "{} Detail: {}".format(
4666 stage[0], error_detail
4667 )
4668 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4669 nslcmop_id, stage[0]
4670 )
tierno59d22d22018-09-25 18:10:19 +02004671
tierno59d22d22018-09-25 18:10:19 +02004672 db_nsr_update["operational-status"] = "failed"
garciadeblas5697b8b2021-03-24 09:17:02 +01004673 db_nsr_update["detailed-status"] = (
4674 error_description_nsr + " Detail: " + error_detail
4675 )
tiernoe876f672020-02-13 14:34:48 +00004676 db_nslcmop_update["detailed-status"] = error_detail
4677 nslcmop_operation_state = "FAILED"
4678 ns_state = "BROKEN"
tierno59d22d22018-09-25 18:10:19 +02004679 else:
tiernoa2143262020-03-27 16:20:40 +00004680 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00004681 error_description_nsr = error_description_nslcmop = None
4682 ns_state = "NOT_INSTANTIATED"
tierno59d22d22018-09-25 18:10:19 +02004683 db_nsr_update["operational-status"] = "terminated"
4684 db_nsr_update["detailed-status"] = "Done"
4685 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4686 db_nslcmop_update["detailed-status"] = "Done"
tiernoe876f672020-02-13 14:34:48 +00004687 nslcmop_operation_state = "COMPLETED"
tierno59d22d22018-09-25 18:10:19 +02004688
tiernoe876f672020-02-13 14:34:48 +00004689 if db_nsr:
4690 self._write_ns_status(
4691 nsr_id=nsr_id,
4692 ns_state=ns_state,
4693 current_operation="IDLE",
4694 current_operation_id=None,
4695 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00004696 error_detail=error_detail,
garciadeblas5697b8b2021-03-24 09:17:02 +01004697 other_update=db_nsr_update,
tiernoe876f672020-02-13 14:34:48 +00004698 )
tiernoa17d4f42020-04-28 09:59:23 +00004699 self._write_op_status(
4700 op_id=nslcmop_id,
4701 stage="",
4702 error_message=error_description_nslcmop,
4703 operation_state=nslcmop_operation_state,
4704 other_update=db_nslcmop_update,
4705 )
lloretgalleg6d488782020-07-22 10:13:46 +00004706 if ns_state == "NOT_INSTANTIATED":
4707 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01004708 self.db.set_list(
4709 "vnfrs",
4710 {"nsr-id-ref": nsr_id},
4711 {"_admin.nsState": "NOT_INSTANTIATED"},
4712 )
lloretgalleg6d488782020-07-22 10:13:46 +00004713 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01004714 self.logger.warn(
4715 logging_text
4716 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4717 nsr_id, e
4718 )
4719 )
tiernoa17d4f42020-04-28 09:59:23 +00004720 if operation_params:
tiernoe876f672020-02-13 14:34:48 +00004721 autoremove = operation_params.get("autoremove", False)
tierno59d22d22018-09-25 18:10:19 +02004722 if nslcmop_operation_state:
4723 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01004724 await self.msg.aiowrite(
4725 "ns",
4726 "terminated",
4727 {
4728 "nsr_id": nsr_id,
4729 "nslcmop_id": nslcmop_id,
4730 "operationState": nslcmop_operation_state,
4731 "autoremove": autoremove,
4732 },
4733 loop=self.loop,
4734 )
tierno59d22d22018-09-25 18:10:19 +02004735 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01004736 self.logger.error(
4737 logging_text + "kafka_write notification Exception {}".format(e)
4738 )
quilesj7e13aeb2019-10-08 13:34:55 +02004739
tierno59d22d22018-09-25 18:10:19 +02004740 self.logger.debug(logging_text + "Exit")
4741 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4742
garciadeblas5697b8b2021-03-24 09:17:02 +01004743 async def _wait_for_tasks(
4744 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4745 ):
tiernoe876f672020-02-13 14:34:48 +00004746 time_start = time()
tiernoa2143262020-03-27 16:20:40 +00004747 error_detail_list = []
tiernoe876f672020-02-13 14:34:48 +00004748 error_list = []
4749 pending_tasks = list(created_tasks_info.keys())
4750 num_tasks = len(pending_tasks)
4751 num_done = 0
4752 stage[1] = "{}/{}.".format(num_done, num_tasks)
4753 self._write_op_status(nslcmop_id, stage)
tiernoe876f672020-02-13 14:34:48 +00004754 while pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00004755 new_error = None
tiernoe876f672020-02-13 14:34:48 +00004756 _timeout = timeout + time_start - time()
garciadeblas5697b8b2021-03-24 09:17:02 +01004757 done, pending_tasks = await asyncio.wait(
4758 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4759 )
tiernoe876f672020-02-13 14:34:48 +00004760 num_done += len(done)
garciadeblas5697b8b2021-03-24 09:17:02 +01004761 if not done: # Timeout
tiernoe876f672020-02-13 14:34:48 +00004762 for task in pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00004763 new_error = created_tasks_info[task] + ": Timeout"
4764 error_detail_list.append(new_error)
4765 error_list.append(new_error)
tiernoe876f672020-02-13 14:34:48 +00004766 break
4767 for task in done:
4768 if task.cancelled():
tierno067e04a2020-03-31 12:53:13 +00004769 exc = "Cancelled"
tiernoe876f672020-02-13 14:34:48 +00004770 else:
4771 exc = task.exception()
tierno067e04a2020-03-31 12:53:13 +00004772 if exc:
4773 if isinstance(exc, asyncio.TimeoutError):
4774 exc = "Timeout"
4775 new_error = created_tasks_info[task] + ": {}".format(exc)
4776 error_list.append(created_tasks_info[task])
4777 error_detail_list.append(new_error)
garciadeblas5697b8b2021-03-24 09:17:02 +01004778 if isinstance(
4779 exc,
4780 (
4781 str,
4782 DbException,
4783 N2VCException,
4784 ROclient.ROClientException,
4785 LcmException,
4786 K8sException,
4787 NgRoException,
4788 ),
4789 ):
tierno067e04a2020-03-31 12:53:13 +00004790 self.logger.error(logging_text + new_error)
tiernoe876f672020-02-13 14:34:48 +00004791 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004792 exc_traceback = "".join(
4793 traceback.format_exception(None, exc, exc.__traceback__)
4794 )
4795 self.logger.error(
4796 logging_text
4797 + created_tasks_info[task]
4798 + " "
4799 + exc_traceback
4800 )
tierno067e04a2020-03-31 12:53:13 +00004801 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004802 self.logger.debug(
4803 logging_text + created_tasks_info[task] + ": Done"
4804 )
tiernoe876f672020-02-13 14:34:48 +00004805 stage[1] = "{}/{}.".format(num_done, num_tasks)
4806 if new_error:
tiernoa2143262020-03-27 16:20:40 +00004807 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
tiernoe876f672020-02-13 14:34:48 +00004808 if nsr_id: # update also nsr
garciadeblas5697b8b2021-03-24 09:17:02 +01004809 self.update_db_2(
4810 "nsrs",
4811 nsr_id,
4812 {
4813 "errorDescription": "Error at: " + ", ".join(error_list),
4814 "errorDetail": ". ".join(error_detail_list),
4815 },
4816 )
tiernoe876f672020-02-13 14:34:48 +00004817 self._write_op_status(nslcmop_id, stage)
tiernoa2143262020-03-27 16:20:40 +00004818 return error_detail_list
tiernoe876f672020-02-13 14:34:48 +00004819
tiernoda1ff8c2020-10-22 14:12:46 +00004820 @staticmethod
4821 def _map_primitive_params(primitive_desc, params, instantiation_params):
tiernoda964822019-01-14 15:53:47 +00004822 """
4823 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4824 The default-value is used. If it is between < > it look for a value at instantiation_params
4825 :param primitive_desc: portion of VNFD/NSD that describes primitive
4826 :param params: Params provided by user
4827 :param instantiation_params: Instantiation params provided by user
4828 :return: a dictionary with the calculated params
4829 """
4830 calculated_params = {}
4831 for parameter in primitive_desc.get("parameter", ()):
4832 param_name = parameter["name"]
4833 if param_name in params:
4834 calculated_params[param_name] = params[param_name]
tierno98ad6ea2019-05-30 17:16:28 +00004835 elif "default-value" in parameter or "value" in parameter:
4836 if "value" in parameter:
4837 calculated_params[param_name] = parameter["value"]
4838 else:
4839 calculated_params[param_name] = parameter["default-value"]
garciadeblas5697b8b2021-03-24 09:17:02 +01004840 if (
4841 isinstance(calculated_params[param_name], str)
4842 and calculated_params[param_name].startswith("<")
4843 and calculated_params[param_name].endswith(">")
4844 ):
tierno98ad6ea2019-05-30 17:16:28 +00004845 if calculated_params[param_name][1:-1] in instantiation_params:
garciadeblas5697b8b2021-03-24 09:17:02 +01004846 calculated_params[param_name] = instantiation_params[
4847 calculated_params[param_name][1:-1]
4848 ]
tiernoda964822019-01-14 15:53:47 +00004849 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004850 raise LcmException(
4851 "Parameter {} needed to execute primitive {} not provided".format(
4852 calculated_params[param_name], primitive_desc["name"]
4853 )
4854 )
tiernoda964822019-01-14 15:53:47 +00004855 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004856 raise LcmException(
4857 "Parameter {} needed to execute primitive {} not provided".format(
4858 param_name, primitive_desc["name"]
4859 )
4860 )
tierno59d22d22018-09-25 18:10:19 +02004861
tiernoda964822019-01-14 15:53:47 +00004862 if isinstance(calculated_params[param_name], (dict, list, tuple)):
garciadeblas5697b8b2021-03-24 09:17:02 +01004863 calculated_params[param_name] = yaml.safe_dump(
4864 calculated_params[param_name], default_flow_style=True, width=256
4865 )
4866 elif isinstance(calculated_params[param_name], str) and calculated_params[
4867 param_name
4868 ].startswith("!!yaml "):
tiernoda964822019-01-14 15:53:47 +00004869 calculated_params[param_name] = calculated_params[param_name][7:]
tiernofa40e692020-10-14 14:59:36 +00004870 if parameter.get("data-type") == "INTEGER":
4871 try:
4872 calculated_params[param_name] = int(calculated_params[param_name])
4873 except ValueError: # error converting string to int
4874 raise LcmException(
garciadeblas5697b8b2021-03-24 09:17:02 +01004875 "Parameter {} of primitive {} must be integer".format(
4876 param_name, primitive_desc["name"]
4877 )
4878 )
tiernofa40e692020-10-14 14:59:36 +00004879 elif parameter.get("data-type") == "BOOLEAN":
garciadeblas5697b8b2021-03-24 09:17:02 +01004880 calculated_params[param_name] = not (
4881 (str(calculated_params[param_name])).lower() == "false"
4882 )
tiernoc3f2a822019-11-05 13:45:04 +00004883
4884 # add always ns_config_info if primitive name is config
4885 if primitive_desc["name"] == "config":
4886 if "ns_config_info" in instantiation_params:
garciadeblas5697b8b2021-03-24 09:17:02 +01004887 calculated_params["ns_config_info"] = instantiation_params[
4888 "ns_config_info"
4889 ]
tiernoda964822019-01-14 15:53:47 +00004890 return calculated_params
4891
garciadeblas5697b8b2021-03-24 09:17:02 +01004892 def _look_for_deployed_vca(
4893 self,
4894 deployed_vca,
4895 member_vnf_index,
4896 vdu_id,
4897 vdu_count_index,
4898 kdu_name=None,
4899 ee_descriptor_id=None,
4900 ):
tiernoe876f672020-02-13 14:34:48 +00004901 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4902 for vca in deployed_vca:
4903 if not vca:
4904 continue
4905 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4906 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01004907 if (
4908 vdu_count_index is not None
4909 and vdu_count_index != vca["vdu_count_index"]
4910 ):
tiernoe876f672020-02-13 14:34:48 +00004911 continue
4912 if kdu_name and kdu_name != vca["kdu_name"]:
4913 continue
tiernoa278b842020-07-08 15:33:55 +00004914 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4915 continue
tiernoe876f672020-02-13 14:34:48 +00004916 break
4917 else:
4918 # vca_deployed not found
garciadeblas5697b8b2021-03-24 09:17:02 +01004919 raise LcmException(
4920 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4921 " is not deployed".format(
4922 member_vnf_index,
4923 vdu_id,
4924 vdu_count_index,
4925 kdu_name,
4926 ee_descriptor_id,
4927 )
4928 )
tiernoe876f672020-02-13 14:34:48 +00004929 # get ee_id
4930 ee_id = vca.get("ee_id")
garciadeblas5697b8b2021-03-24 09:17:02 +01004931 vca_type = vca.get(
4932 "type", "lxc_proxy_charm"
4933 ) # default value for backward compatibility - proxy charm
tiernoe876f672020-02-13 14:34:48 +00004934 if not ee_id:
garciadeblas5697b8b2021-03-24 09:17:02 +01004935 raise LcmException(
4936 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4937 "execution environment".format(
4938 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4939 )
4940 )
tierno588547c2020-07-01 15:30:20 +00004941 return ee_id, vca_type
tiernoe876f672020-02-13 14:34:48 +00004942
David Garciac1fe90a2021-03-31 19:12:02 +02004943 async def _ns_execute_primitive(
4944 self,
4945 ee_id,
4946 primitive,
4947 primitive_params,
4948 retries=0,
4949 retries_interval=30,
4950 timeout=None,
4951 vca_type=None,
4952 db_dict=None,
4953 vca_id: str = None,
4954 ) -> (str, str):
tiernoda964822019-01-14 15:53:47 +00004955 try:
tierno98ad6ea2019-05-30 17:16:28 +00004956 if primitive == "config":
4957 primitive_params = {"params": primitive_params}
tierno2fc7ce52019-06-11 22:50:01 +00004958
tierno588547c2020-07-01 15:30:20 +00004959 vca_type = vca_type or "lxc_proxy_charm"
4960
quilesj7e13aeb2019-10-08 13:34:55 +02004961 while retries >= 0:
4962 try:
tierno067e04a2020-03-31 12:53:13 +00004963 output = await asyncio.wait_for(
tierno588547c2020-07-01 15:30:20 +00004964 self.vca_map[vca_type].exec_primitive(
tierno067e04a2020-03-31 12:53:13 +00004965 ee_id=ee_id,
4966 primitive_name=primitive,
4967 params_dict=primitive_params,
4968 progress_timeout=self.timeout_progress_primitive,
tierno588547c2020-07-01 15:30:20 +00004969 total_timeout=self.timeout_primitive,
David Garciac1fe90a2021-03-31 19:12:02 +02004970 db_dict=db_dict,
4971 vca_id=vca_id,
aktas98488ed2021-07-29 17:42:49 +03004972 vca_type=vca_type,
David Garciac1fe90a2021-03-31 19:12:02 +02004973 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01004974 timeout=timeout or self.timeout_primitive,
4975 )
quilesj7e13aeb2019-10-08 13:34:55 +02004976 # execution was OK
4977 break
tierno067e04a2020-03-31 12:53:13 +00004978 except asyncio.CancelledError:
4979 raise
4980 except Exception as e: # asyncio.TimeoutError
4981 if isinstance(e, asyncio.TimeoutError):
4982 e = "Timeout"
quilesj7e13aeb2019-10-08 13:34:55 +02004983 retries -= 1
4984 if retries >= 0:
garciadeblas5697b8b2021-03-24 09:17:02 +01004985 self.logger.debug(
4986 "Error executing action {} on {} -> {}".format(
4987 primitive, ee_id, e
4988 )
4989 )
quilesj7e13aeb2019-10-08 13:34:55 +02004990 # wait and retry
4991 await asyncio.sleep(retries_interval, loop=self.loop)
tierno73d8bd02019-11-18 17:33:27 +00004992 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004993 return "FAILED", str(e)
quilesj7e13aeb2019-10-08 13:34:55 +02004994
garciadeblas5697b8b2021-03-24 09:17:02 +01004995 return "COMPLETED", output
quilesj7e13aeb2019-10-08 13:34:55 +02004996
tierno067e04a2020-03-31 12:53:13 +00004997 except (LcmException, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00004998 raise
quilesj7e13aeb2019-10-08 13:34:55 +02004999 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01005000 return "FAIL", "Error executing action {}: {}".format(primitive, e)
tierno59d22d22018-09-25 18:10:19 +02005001
ksaikiranr3fde2c72021-03-15 10:39:06 +05305002 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5003 """
5004 Updating the vca_status with latest juju information in nsrs record
5005 :param: nsr_id: Id of the nsr
5006 :param: nslcmop_id: Id of the nslcmop
5007 :return: None
5008 """
5009
5010 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5011 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
David Garciac1fe90a2021-03-31 19:12:02 +02005012 vca_id = self.get_vca_id({}, db_nsr)
garciadeblas5697b8b2021-03-24 09:17:02 +01005013 if db_nsr["_admin"]["deployed"]["K8s"]:
Pedro Escaleira75b620d2022-04-01 01:49:22 +01005014 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5015 cluster_uuid, kdu_instance, cluster_type = (
5016 k8s["k8scluster-uuid"],
5017 k8s["kdu-instance"],
5018 k8s["k8scluster-type"],
5019 )
garciadeblas5697b8b2021-03-24 09:17:02 +01005020 await self._on_update_k8s_db(
Pedro Escaleira75b620d2022-04-01 01:49:22 +01005021 cluster_uuid=cluster_uuid,
5022 kdu_instance=kdu_instance,
5023 filter={"_id": nsr_id},
5024 vca_id=vca_id,
5025 cluster_type=cluster_type,
garciadeblas5697b8b2021-03-24 09:17:02 +01005026 )
ksaikiranr656b6dd2021-02-19 10:25:18 +05305027 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005028 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
ksaikiranr656b6dd2021-02-19 10:25:18 +05305029 table, filter = "nsrs", {"_id": nsr_id}
5030 path = "_admin.deployed.VCA.{}.".format(vca_index)
5031 await self._on_update_n2vc_db(table, filter, path, {})
ksaikiranr3fde2c72021-03-15 10:39:06 +05305032
5033 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5034 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5035
tierno59d22d22018-09-25 18:10:19 +02005036 async def action(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02005037 # Try to lock HA task here
garciadeblas5697b8b2021-03-24 09:17:02 +01005038 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02005039 if not task_is_locked_by_me:
5040 return
5041
tierno59d22d22018-09-25 18:10:19 +02005042 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5043 self.logger.debug(logging_text + "Enter")
5044 # get all needed from database
5045 db_nsr = None
5046 db_nslcmop = None
tiernoe876f672020-02-13 14:34:48 +00005047 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02005048 db_nslcmop_update = {}
5049 nslcmop_operation_state = None
tierno067e04a2020-03-31 12:53:13 +00005050 error_description_nslcmop = None
tierno59d22d22018-09-25 18:10:19 +02005051 exc = None
5052 try:
kuused124bfe2019-06-18 12:09:24 +02005053 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00005054 step = "Waiting for previous operations to terminate"
garciadeblas5697b8b2021-03-24 09:17:02 +01005055 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02005056
quilesj4cda56b2019-12-05 10:02:20 +00005057 self._write_ns_status(
5058 nsr_id=nsr_id,
5059 ns_state=None,
5060 current_operation="RUNNING ACTION",
garciadeblas5697b8b2021-03-24 09:17:02 +01005061 current_operation_id=nslcmop_id,
quilesj4cda56b2019-12-05 10:02:20 +00005062 )
5063
tierno59d22d22018-09-25 18:10:19 +02005064 step = "Getting information from database"
5065 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5066 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
Guillermo Calvino57c68152022-01-26 17:40:31 +01005067 if db_nslcmop["operationParams"].get("primitive_params"):
5068 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5069 db_nslcmop["operationParams"]["primitive_params"]
5070 )
tiernoda964822019-01-14 15:53:47 +00005071
tiernoe4f7e6c2018-11-27 14:55:30 +00005072 nsr_deployed = db_nsr["_admin"].get("deployed")
tierno1b633412019-02-25 16:48:23 +00005073 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tierno59d22d22018-09-25 18:10:19 +02005074 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
calvinosanch9f9c6f22019-11-04 13:37:39 +01005075 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
tiernoe4f7e6c2018-11-27 14:55:30 +00005076 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
tierno067e04a2020-03-31 12:53:13 +00005077 primitive = db_nslcmop["operationParams"]["primitive"]
5078 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
garciadeblas5697b8b2021-03-24 09:17:02 +01005079 timeout_ns_action = db_nslcmop["operationParams"].get(
5080 "timeout_ns_action", self.timeout_primitive
5081 )
tierno59d22d22018-09-25 18:10:19 +02005082
tierno1b633412019-02-25 16:48:23 +00005083 if vnf_index:
5084 step = "Getting vnfr from database"
garciadeblas5697b8b2021-03-24 09:17:02 +01005085 db_vnfr = self.db.get_one(
5086 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5087 )
Guillermo Calvino48aee4c2022-02-01 18:59:50 +01005088 if db_vnfr.get("kdur"):
5089 kdur_list = []
5090 for kdur in db_vnfr["kdur"]:
5091 if kdur.get("additionalParams"):
Pedro Escaleirab9a7c4d2022-03-31 00:08:05 +01005092 kdur["additionalParams"] = json.loads(
5093 kdur["additionalParams"]
5094 )
Guillermo Calvino48aee4c2022-02-01 18:59:50 +01005095 kdur_list.append(kdur)
5096 db_vnfr["kdur"] = kdur_list
tierno1b633412019-02-25 16:48:23 +00005097 step = "Getting vnfd from database"
5098 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
bravofa96dd9c2021-10-13 17:37:36 -03005099
5100 # Sync filesystem before running a primitive
5101 self.fs.sync(db_vnfr["vnfd-id"])
tierno1b633412019-02-25 16:48:23 +00005102 else:
tierno067e04a2020-03-31 12:53:13 +00005103 step = "Getting nsd from database"
5104 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
tiernoda964822019-01-14 15:53:47 +00005105
David Garciac1fe90a2021-03-31 19:12:02 +02005106 vca_id = self.get_vca_id(db_vnfr, db_nsr)
tierno82974b22018-11-27 21:55:36 +00005107 # for backward compatibility
5108 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5109 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5110 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5111 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5112
tiernoda964822019-01-14 15:53:47 +00005113 # look for primitive
tiernoa278b842020-07-08 15:33:55 +00005114 config_primitive_desc = descriptor_configuration = None
tiernoda964822019-01-14 15:53:47 +00005115 if vdu_id:
bravofe5a31bc2021-02-17 19:09:12 -03005116 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
calvinosanch9f9c6f22019-11-04 13:37:39 +01005117 elif kdu_name:
bravofe5a31bc2021-02-17 19:09:12 -03005118 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
tierno1b633412019-02-25 16:48:23 +00005119 elif vnf_index:
bravofe5a31bc2021-02-17 19:09:12 -03005120 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
tierno1b633412019-02-25 16:48:23 +00005121 else:
tiernoa278b842020-07-08 15:33:55 +00005122 descriptor_configuration = db_nsd.get("ns-configuration")
5123
garciadeblas5697b8b2021-03-24 09:17:02 +01005124 if descriptor_configuration and descriptor_configuration.get(
5125 "config-primitive"
5126 ):
tiernoa278b842020-07-08 15:33:55 +00005127 for config_primitive in descriptor_configuration["config-primitive"]:
tierno1b633412019-02-25 16:48:23 +00005128 if config_primitive["name"] == primitive:
5129 config_primitive_desc = config_primitive
5130 break
tiernoda964822019-01-14 15:53:47 +00005131
garciadeblas6bed6b32020-07-20 11:05:42 +00005132 if not config_primitive_desc:
5133 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
garciadeblas5697b8b2021-03-24 09:17:02 +01005134 raise LcmException(
5135 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5136 primitive
5137 )
5138 )
garciadeblas6bed6b32020-07-20 11:05:42 +00005139 primitive_name = primitive
5140 ee_descriptor_id = None
5141 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005142 primitive_name = config_primitive_desc.get(
5143 "execution-environment-primitive", primitive
5144 )
5145 ee_descriptor_id = config_primitive_desc.get(
5146 "execution-environment-ref"
5147 )
tierno1b633412019-02-25 16:48:23 +00005148
tierno1b633412019-02-25 16:48:23 +00005149 if vnf_index:
tierno626e0152019-11-29 14:16:16 +00005150 if vdu_id:
garciadeblas5697b8b2021-03-24 09:17:02 +01005151 vdur = next(
5152 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5153 )
bravof922c4172020-11-24 21:21:43 -03005154 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
tierno067e04a2020-03-31 12:53:13 +00005155 elif kdu_name:
garciadeblas5697b8b2021-03-24 09:17:02 +01005156 kdur = next(
5157 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5158 )
bravof922c4172020-11-24 21:21:43 -03005159 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
tierno067e04a2020-03-31 12:53:13 +00005160 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005161 desc_params = parse_yaml_strings(
5162 db_vnfr.get("additionalParamsForVnf")
5163 )
tierno1b633412019-02-25 16:48:23 +00005164 else:
bravof922c4172020-11-24 21:21:43 -03005165 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
bravofe5a31bc2021-02-17 19:09:12 -03005166 if kdu_name and get_configuration(db_vnfd, kdu_name):
5167 kdu_configuration = get_configuration(db_vnfd, kdu_name)
David Garciad41dbd62020-12-10 12:52:52 +01005168 actions = set()
David Garciaa1003662021-02-16 21:07:58 +01005169 for primitive in kdu_configuration.get("initial-config-primitive", []):
David Garciad41dbd62020-12-10 12:52:52 +01005170 actions.add(primitive["name"])
David Garciaa1003662021-02-16 21:07:58 +01005171 for primitive in kdu_configuration.get("config-primitive", []):
David Garciad41dbd62020-12-10 12:52:52 +01005172 actions.add(primitive["name"])
David Garciaae230232022-05-10 14:07:12 +02005173 kdu = find_in_list(
5174 nsr_deployed["K8s"],
5175 lambda kdu: kdu_name == kdu["kdu-name"]
5176 and kdu["member-vnf-index"] == vnf_index,
5177 )
5178 kdu_action = (
5179 True
5180 if primitive_name in actions
5181 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5182 else False
5183 )
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02005184
tiernoda964822019-01-14 15:53:47 +00005185 # TODO check if ns is in a proper status
garciadeblas5697b8b2021-03-24 09:17:02 +01005186 if kdu_name and (
5187 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5188 ):
tierno067e04a2020-03-31 12:53:13 +00005189 # kdur and desc_params already set from before
5190 if primitive_params:
5191 desc_params.update(primitive_params)
5192 # TODO Check if we will need something at vnf level
5193 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
garciadeblas5697b8b2021-03-24 09:17:02 +01005194 if (
5195 kdu_name == kdu["kdu-name"]
5196 and kdu["member-vnf-index"] == vnf_index
5197 ):
tierno067e04a2020-03-31 12:53:13 +00005198 break
5199 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005200 raise LcmException(
5201 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5202 )
quilesj7e13aeb2019-10-08 13:34:55 +02005203
tierno067e04a2020-03-31 12:53:13 +00005204 if kdu.get("k8scluster-type") not in self.k8scluster_map:
garciadeblas5697b8b2021-03-24 09:17:02 +01005205 msg = "unknown k8scluster-type '{}'".format(
5206 kdu.get("k8scluster-type")
5207 )
tierno067e04a2020-03-31 12:53:13 +00005208 raise LcmException(msg)
5209
garciadeblas5697b8b2021-03-24 09:17:02 +01005210 db_dict = {
5211 "collection": "nsrs",
5212 "filter": {"_id": nsr_id},
5213 "path": "_admin.deployed.K8s.{}".format(index),
5214 }
5215 self.logger.debug(
5216 logging_text
5217 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5218 )
tiernoa278b842020-07-08 15:33:55 +00005219 step = "Executing kdu {}".format(primitive_name)
5220 if primitive_name == "upgrade":
tierno067e04a2020-03-31 12:53:13 +00005221 if desc_params.get("kdu_model"):
5222 kdu_model = desc_params.get("kdu_model")
5223 del desc_params["kdu_model"]
5224 else:
5225 kdu_model = kdu.get("kdu-model")
5226 parts = kdu_model.split(sep=":")
5227 if len(parts) == 2:
5228 kdu_model = parts[0]
5229
5230 detailed_status = await asyncio.wait_for(
5231 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5232 cluster_uuid=kdu.get("k8scluster-uuid"),
5233 kdu_instance=kdu.get("kdu-instance"),
garciadeblas5697b8b2021-03-24 09:17:02 +01005234 atomic=True,
5235 kdu_model=kdu_model,
5236 params=desc_params,
5237 db_dict=db_dict,
5238 timeout=timeout_ns_action,
5239 ),
5240 timeout=timeout_ns_action + 10,
5241 )
5242 self.logger.debug(
5243 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5244 )
tiernoa278b842020-07-08 15:33:55 +00005245 elif primitive_name == "rollback":
tierno067e04a2020-03-31 12:53:13 +00005246 detailed_status = await asyncio.wait_for(
5247 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5248 cluster_uuid=kdu.get("k8scluster-uuid"),
5249 kdu_instance=kdu.get("kdu-instance"),
garciadeblas5697b8b2021-03-24 09:17:02 +01005250 db_dict=db_dict,
5251 ),
5252 timeout=timeout_ns_action,
5253 )
tiernoa278b842020-07-08 15:33:55 +00005254 elif primitive_name == "status":
tierno067e04a2020-03-31 12:53:13 +00005255 detailed_status = await asyncio.wait_for(
5256 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5257 cluster_uuid=kdu.get("k8scluster-uuid"),
David Garciac1fe90a2021-03-31 19:12:02 +02005258 kdu_instance=kdu.get("kdu-instance"),
5259 vca_id=vca_id,
5260 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01005261 timeout=timeout_ns_action,
David Garciac1fe90a2021-03-31 19:12:02 +02005262 )
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02005263 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005264 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5265 kdu["kdu-name"], nsr_id
5266 )
5267 params = self._map_primitive_params(
5268 config_primitive_desc, primitive_params, desc_params
5269 )
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02005270
5271 detailed_status = await asyncio.wait_for(
5272 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5273 cluster_uuid=kdu.get("k8scluster-uuid"),
5274 kdu_instance=kdu_instance,
tiernoa278b842020-07-08 15:33:55 +00005275 primitive_name=primitive_name,
garciadeblas5697b8b2021-03-24 09:17:02 +01005276 params=params,
5277 db_dict=db_dict,
David Garciac1fe90a2021-03-31 19:12:02 +02005278 timeout=timeout_ns_action,
5279 vca_id=vca_id,
5280 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01005281 timeout=timeout_ns_action,
David Garciac1fe90a2021-03-31 19:12:02 +02005282 )
tierno067e04a2020-03-31 12:53:13 +00005283
5284 if detailed_status:
garciadeblas5697b8b2021-03-24 09:17:02 +01005285 nslcmop_operation_state = "COMPLETED"
tierno067e04a2020-03-31 12:53:13 +00005286 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005287 detailed_status = ""
5288 nslcmop_operation_state = "FAILED"
tierno067e04a2020-03-31 12:53:13 +00005289 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005290 ee_id, vca_type = self._look_for_deployed_vca(
5291 nsr_deployed["VCA"],
5292 member_vnf_index=vnf_index,
5293 vdu_id=vdu_id,
5294 vdu_count_index=vdu_count_index,
5295 ee_descriptor_id=ee_descriptor_id,
5296 )
5297 for vca_index, vca_deployed in enumerate(
5298 db_nsr["_admin"]["deployed"]["VCA"]
5299 ):
ksaikiranrb1c9f372021-03-15 11:07:29 +05305300 if vca_deployed.get("member-vnf-index") == vnf_index:
garciadeblas5697b8b2021-03-24 09:17:02 +01005301 db_dict = {
5302 "collection": "nsrs",
5303 "filter": {"_id": nsr_id},
5304 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5305 }
ksaikiranrb1c9f372021-03-15 11:07:29 +05305306 break
garciadeblas5697b8b2021-03-24 09:17:02 +01005307 (
5308 nslcmop_operation_state,
5309 detailed_status,
5310 ) = await self._ns_execute_primitive(
tierno588547c2020-07-01 15:30:20 +00005311 ee_id,
tiernoa278b842020-07-08 15:33:55 +00005312 primitive=primitive_name,
garciadeblas5697b8b2021-03-24 09:17:02 +01005313 primitive_params=self._map_primitive_params(
5314 config_primitive_desc, primitive_params, desc_params
5315 ),
tierno588547c2020-07-01 15:30:20 +00005316 timeout=timeout_ns_action,
5317 vca_type=vca_type,
David Garciac1fe90a2021-03-31 19:12:02 +02005318 db_dict=db_dict,
5319 vca_id=vca_id,
5320 )
tierno067e04a2020-03-31 12:53:13 +00005321
5322 db_nslcmop_update["detailed-status"] = detailed_status
garciadeblas5697b8b2021-03-24 09:17:02 +01005323 error_description_nslcmop = (
5324 detailed_status if nslcmop_operation_state == "FAILED" else ""
5325 )
5326 self.logger.debug(
5327 logging_text
5328 + " task Done with result {} {}".format(
5329 nslcmop_operation_state, detailed_status
5330 )
5331 )
tierno59d22d22018-09-25 18:10:19 +02005332 return # database update is called inside finally
5333
tiernof59ad6c2020-04-08 12:50:52 +00005334 except (DbException, LcmException, N2VCException, K8sException) as e:
tierno59d22d22018-09-25 18:10:19 +02005335 self.logger.error(logging_text + "Exit Exception {}".format(e))
5336 exc = e
5337 except asyncio.CancelledError:
garciadeblas5697b8b2021-03-24 09:17:02 +01005338 self.logger.error(
5339 logging_text + "Cancelled Exception while '{}'".format(step)
5340 )
tierno59d22d22018-09-25 18:10:19 +02005341 exc = "Operation was cancelled"
tierno067e04a2020-03-31 12:53:13 +00005342 except asyncio.TimeoutError:
5343 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5344 exc = "Timeout"
tierno59d22d22018-09-25 18:10:19 +02005345 except Exception as e:
5346 exc = traceback.format_exc()
garciadeblas5697b8b2021-03-24 09:17:02 +01005347 self.logger.critical(
5348 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5349 exc_info=True,
5350 )
tierno59d22d22018-09-25 18:10:19 +02005351 finally:
tierno067e04a2020-03-31 12:53:13 +00005352 if exc:
garciadeblas5697b8b2021-03-24 09:17:02 +01005353 db_nslcmop_update[
5354 "detailed-status"
5355 ] = (
5356 detailed_status
5357 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
tierno067e04a2020-03-31 12:53:13 +00005358 nslcmop_operation_state = "FAILED"
5359 if db_nsr:
5360 self._write_ns_status(
5361 nsr_id=nsr_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01005362 ns_state=db_nsr[
5363 "nsState"
5364 ], # TODO check if degraded. For the moment use previous status
tierno067e04a2020-03-31 12:53:13 +00005365 current_operation="IDLE",
5366 current_operation_id=None,
5367 # error_description=error_description_nsr,
5368 # error_detail=error_detail,
garciadeblas5697b8b2021-03-24 09:17:02 +01005369 other_update=db_nsr_update,
tierno067e04a2020-03-31 12:53:13 +00005370 )
5371
garciadeblas5697b8b2021-03-24 09:17:02 +01005372 self._write_op_status(
5373 op_id=nslcmop_id,
5374 stage="",
5375 error_message=error_description_nslcmop,
5376 operation_state=nslcmop_operation_state,
5377 other_update=db_nslcmop_update,
5378 )
tierno067e04a2020-03-31 12:53:13 +00005379
tierno59d22d22018-09-25 18:10:19 +02005380 if nslcmop_operation_state:
5381 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01005382 await self.msg.aiowrite(
5383 "ns",
5384 "actioned",
5385 {
5386 "nsr_id": nsr_id,
5387 "nslcmop_id": nslcmop_id,
5388 "operationState": nslcmop_operation_state,
5389 },
5390 loop=self.loop,
5391 )
tierno59d22d22018-09-25 18:10:19 +02005392 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01005393 self.logger.error(
5394 logging_text + "kafka_write notification Exception {}".format(e)
5395 )
tierno59d22d22018-09-25 18:10:19 +02005396 self.logger.debug(logging_text + "Exit")
5397 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
tierno067e04a2020-03-31 12:53:13 +00005398 return nslcmop_operation_state, detailed_status
tierno59d22d22018-09-25 18:10:19 +02005399
elumalaica7ece02022-04-12 12:47:32 +05305400 async def terminate_vdus(
5401 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5402 ):
5403 """This method terminates VDUs
5404
5405 Args:
5406 db_vnfr: VNF instance record
5407 member_vnf_index: VNF index to identify the VDUs to be removed
5408 db_nsr: NS instance record
5409 update_db_nslcmops: Nslcmop update record
5410 """
5411 vca_scaling_info = []
5412 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5413 scaling_info["scaling_direction"] = "IN"
5414 scaling_info["vdu-delete"] = {}
5415 scaling_info["kdu-delete"] = {}
5416 db_vdur = db_vnfr.get("vdur")
5417 vdur_list = copy(db_vdur)
5418 count_index = 0
5419 for index, vdu in enumerate(vdur_list):
5420 vca_scaling_info.append(
5421 {
5422 "osm_vdu_id": vdu["vdu-id-ref"],
5423 "member-vnf-index": member_vnf_index,
5424 "type": "delete",
5425 "vdu_index": count_index,
5426 })
5427 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5428 scaling_info["vdu"].append(
5429 {
5430 "name": vdu.get("name") or vdu.get("vdu-name"),
5431 "vdu_id": vdu["vdu-id-ref"],
5432 "interface": [],
5433 })
5434 for interface in vdu["interfaces"]:
5435 scaling_info["vdu"][index]["interface"].append(
5436 {
5437 "name": interface["name"],
5438 "ip_address": interface["ip-address"],
5439 "mac_address": interface.get("mac-address"),
5440 })
5441 self.logger.info("NS update scaling info{}".format(scaling_info))
5442 stage[2] = "Terminating VDUs"
5443 if scaling_info.get("vdu-delete"):
5444 # scale_process = "RO"
5445 if self.ro_config.get("ng"):
5446 await self._scale_ng_ro(
5447 logging_text, db_nsr, update_db_nslcmops, db_vnfr, scaling_info, stage
5448 )
5449
5450 async def remove_vnf(
5451 self, nsr_id, nslcmop_id, vnf_instance_id
5452 ):
5453 """This method is to Remove VNF instances from NS.
5454
5455 Args:
5456 nsr_id: NS instance id
5457 nslcmop_id: nslcmop id of update
5458 vnf_instance_id: id of the VNF instance to be removed
5459
5460 Returns:
5461 result: (str, str) COMPLETED/FAILED, details
5462 """
5463 try:
5464 db_nsr_update = {}
5465 logging_text = "Task ns={} update ".format(nsr_id)
5466 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5467 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5468 if check_vnfr_count > 1:
5469 stage = ["", "", ""]
5470 step = "Getting nslcmop from database"
5471 self.logger.debug(step + " after having waited for previous tasks to be completed")
5472 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5473 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5474 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5475 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5476 """ db_vnfr = self.db.get_one(
5477 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5478
5479 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5480 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5481
5482 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5483 constituent_vnfr.remove(db_vnfr.get("_id"))
5484 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get("constituent-vnfr-ref")
5485 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5486 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5487 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5488 return "COMPLETED", "Done"
5489 else:
5490 step = "Terminate VNF Failed with"
5491 raise LcmException("{} Cannot terminate the last VNF in this NS.".format(
5492 vnf_instance_id))
5493 except (LcmException, asyncio.CancelledError):
5494 raise
5495 except Exception as e:
5496 self.logger.debug("Error removing VNF {}".format(e))
5497 return "FAILED", "Error removing VNF {}".format(e)
5498
elumalaib9e357c2022-04-27 09:58:38 +05305499 async def _ns_redeploy_vnf(
5500 self, nsr_id, nslcmop_id, db_vnfd, db_vnfr, db_nsr,
5501 ):
5502 """This method updates and redeploys VNF instances
5503
5504 Args:
5505 nsr_id: NS instance id
5506 nslcmop_id: nslcmop id
5507 db_vnfd: VNF descriptor
5508 db_vnfr: VNF instance record
5509 db_nsr: NS instance record
5510
5511 Returns:
5512 result: (str, str) COMPLETED/FAILED, details
5513 """
5514 try:
5515 count_index = 0
5516 stage = ["", "", ""]
5517 logging_text = "Task ns={} update ".format(nsr_id)
5518 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5519 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5520
5521 # Terminate old VNF resources
5522 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5523 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5524
5525 # old_vnfd_id = db_vnfr["vnfd-id"]
5526 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5527 new_db_vnfd = db_vnfd
5528 # new_vnfd_ref = new_db_vnfd["id"]
5529 # new_vnfd_id = vnfd_id
5530
5531 # Create VDUR
5532 new_vnfr_cp = []
5533 for cp in new_db_vnfd.get("ext-cpd", ()):
5534 vnf_cp = {
5535 "name": cp.get("id"),
5536 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5537 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5538 "id": cp.get("id"),
5539 }
5540 new_vnfr_cp.append(vnf_cp)
5541 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5542 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5543 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5544 new_vnfr_update = {"revision": latest_vnfd_revision, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5545 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5546 updated_db_vnfr = self.db.get_one(
5547 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}
5548 )
5549
5550 # Instantiate new VNF resources
5551 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5552 vca_scaling_info = []
5553 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5554 scaling_info["scaling_direction"] = "OUT"
5555 scaling_info["vdu-create"] = {}
5556 scaling_info["kdu-create"] = {}
5557 vdud_instantiate_list = db_vnfd["vdu"]
5558 for index, vdud in enumerate(vdud_instantiate_list):
5559 cloud_init_text = self._get_vdu_cloud_init_content(
5560 vdud, db_vnfd
5561 )
5562 if cloud_init_text:
5563 additional_params = (
5564 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5565 or {}
5566 )
5567 cloud_init_list = []
5568 if cloud_init_text:
5569 # TODO Information of its own ip is not available because db_vnfr is not updated.
5570 additional_params["OSM"] = get_osm_params(
5571 updated_db_vnfr, vdud["id"], 1
5572 )
5573 cloud_init_list.append(
5574 self._parse_cloud_init(
5575 cloud_init_text,
5576 additional_params,
5577 db_vnfd["id"],
5578 vdud["id"],
5579 )
5580 )
5581 vca_scaling_info.append(
5582 {
5583 "osm_vdu_id": vdud["id"],
5584 "member-vnf-index": member_vnf_index,
5585 "type": "create",
5586 "vdu_index": count_index,
5587 }
5588 )
5589 scaling_info["vdu-create"][vdud["id"]] = count_index
5590 if self.ro_config.get("ng"):
5591 self.logger.debug(
5592 "New Resources to be deployed: {}".format(scaling_info))
5593 await self._scale_ng_ro(
5594 logging_text, db_nsr, update_db_nslcmops, updated_db_vnfr, scaling_info, stage
5595 )
5596 return "COMPLETED", "Done"
5597 except (LcmException, asyncio.CancelledError):
5598 raise
5599 except Exception as e:
5600 self.logger.debug("Error updating VNF {}".format(e))
5601 return "FAILED", "Error updating VNF {}".format(e)
5602
aticigdffa6212022-04-12 15:27:53 +03005603 async def _ns_charm_upgrade(
5604 self,
5605 ee_id,
5606 charm_id,
5607 charm_type,
5608 path,
5609 timeout: float = None,
5610 ) -> (str, str):
5611 """This method upgrade charms in VNF instances
5612
5613 Args:
5614 ee_id: Execution environment id
5615 path: Local path to the charm
5616 charm_id: charm-id
5617 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5618 timeout: (Float) Timeout for the ns update operation
5619
5620 Returns:
5621 result: (str, str) COMPLETED/FAILED, details
5622 """
5623 try:
5624 charm_type = charm_type or "lxc_proxy_charm"
5625 output = await self.vca_map[charm_type].upgrade_charm(
5626 ee_id=ee_id,
5627 path=path,
5628 charm_id=charm_id,
5629 charm_type=charm_type,
5630 timeout=timeout or self.timeout_ns_update,
5631 )
5632
5633 if output:
5634 return "COMPLETED", output
5635
5636 except (LcmException, asyncio.CancelledError):
5637 raise
5638
5639 except Exception as e:
5640
5641 self.logger.debug("Error upgrading charm {}".format(path))
5642
5643 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5644
5645 async def update(self, nsr_id, nslcmop_id):
5646 """Update NS according to different update types
5647
5648 This method performs upgrade of VNF instances then updates the revision
5649 number in VNF record
5650
5651 Args:
5652 nsr_id: Network service will be updated
5653 nslcmop_id: ns lcm operation id
5654
5655 Returns:
5656 It may raise DbException, LcmException, N2VCException, K8sException
5657
5658 """
5659 # Try to lock HA task here
5660 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5661 if not task_is_locked_by_me:
5662 return
5663
5664 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5665 self.logger.debug(logging_text + "Enter")
5666
5667 # Set the required variables to be filled up later
5668 db_nsr = None
5669 db_nslcmop_update = {}
5670 vnfr_update = {}
5671 nslcmop_operation_state = None
5672 db_nsr_update = {}
5673 error_description_nslcmop = ""
5674 exc = None
elumalaica7ece02022-04-12 12:47:32 +05305675 change_type = "updated"
aticigdffa6212022-04-12 15:27:53 +03005676 detailed_status = ""
5677
5678 try:
5679 # wait for any previous tasks in process
5680 step = "Waiting for previous operations to terminate"
5681 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5682 self._write_ns_status(
5683 nsr_id=nsr_id,
5684 ns_state=None,
5685 current_operation="UPDATING",
5686 current_operation_id=nslcmop_id,
5687 )
5688
5689 step = "Getting nslcmop from database"
5690 db_nslcmop = self.db.get_one(
5691 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5692 )
5693 update_type = db_nslcmop["operationParams"]["updateType"]
5694
5695 step = "Getting nsr from database"
5696 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5697 old_operational_status = db_nsr["operational-status"]
5698 db_nsr_update["operational-status"] = "updating"
5699 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5700 nsr_deployed = db_nsr["_admin"].get("deployed")
5701
5702 if update_type == "CHANGE_VNFPKG":
5703
5704 # Get the input parameters given through update request
5705 vnf_instance_id = db_nslcmop["operationParams"][
5706 "changeVnfPackageData"
5707 ].get("vnfInstanceId")
5708
5709 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5710 "vnfdId"
5711 )
5712 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5713
5714 step = "Getting vnfr from database"
5715 db_vnfr = self.db.get_one(
5716 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5717 )
5718
5719 step = "Getting vnfds from database"
5720 # Latest VNFD
5721 latest_vnfd = self.db.get_one(
5722 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5723 )
5724 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5725
5726 # Current VNFD
5727 current_vnf_revision = db_vnfr.get("revision", 1)
5728 current_vnfd = self.db.get_one(
5729 "vnfds_revisions",
5730 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5731 fail_on_empty=False,
5732 )
5733 # Charm artifact paths will be filled up later
5734 (
5735 current_charm_artifact_path,
5736 target_charm_artifact_path,
5737 charm_artifact_paths,
5738 ) = ([], [], [])
5739
5740 step = "Checking if revision has changed in VNFD"
5741 if current_vnf_revision != latest_vnfd_revision:
5742
elumalaib9e357c2022-04-27 09:58:38 +05305743 change_type = "policy_updated"
5744
aticigdffa6212022-04-12 15:27:53 +03005745 # There is new revision of VNFD, update operation is required
5746 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
aticigd7083542022-05-30 20:45:55 +03005747 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
aticigdffa6212022-04-12 15:27:53 +03005748
5749 step = "Removing the VNFD packages if they exist in the local path"
5750 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5751 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5752
5753 step = "Get the VNFD packages from FSMongo"
5754 self.fs.sync(from_path=latest_vnfd_path)
5755 self.fs.sync(from_path=current_vnfd_path)
5756
5757 step = (
5758 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5759 )
5760 base_folder = latest_vnfd["_admin"]["storage"]
5761
5762 for charm_index, charm_deployed in enumerate(
5763 get_iterable(nsr_deployed, "VCA")
5764 ):
5765 vnf_index = db_vnfr.get("member-vnf-index-ref")
5766
5767 # Getting charm-id and charm-type
5768 if charm_deployed.get("member-vnf-index") == vnf_index:
5769 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5770 charm_type = charm_deployed.get("type")
5771
5772 # Getting ee-id
5773 ee_id = charm_deployed.get("ee_id")
5774
5775 step = "Getting descriptor config"
5776 descriptor_config = get_configuration(
5777 current_vnfd, current_vnfd["id"]
5778 )
5779
5780 if "execution-environment-list" in descriptor_config:
5781 ee_list = descriptor_config.get(
5782 "execution-environment-list", []
5783 )
5784 else:
5785 ee_list = []
5786
5787 # There could be several charm used in the same VNF
5788 for ee_item in ee_list:
5789 if ee_item.get("juju"):
5790
5791 step = "Getting charm name"
5792 charm_name = ee_item["juju"].get("charm")
5793
5794 step = "Setting Charm artifact paths"
5795 current_charm_artifact_path.append(
5796 get_charm_artifact_path(
5797 base_folder,
5798 charm_name,
5799 charm_type,
5800 current_vnf_revision,
5801 )
5802 )
5803 target_charm_artifact_path.append(
5804 get_charm_artifact_path(
5805 base_folder,
5806 charm_name,
5807 charm_type,
aticigd7083542022-05-30 20:45:55 +03005808 latest_vnfd_revision,
aticigdffa6212022-04-12 15:27:53 +03005809 )
5810 )
5811
5812 charm_artifact_paths = zip(
5813 current_charm_artifact_path, target_charm_artifact_path
5814 )
5815
5816 step = "Checking if software version has changed in VNFD"
5817 if find_software_version(current_vnfd) != find_software_version(
5818 latest_vnfd
5819 ):
5820
5821 step = "Checking if existing VNF has charm"
5822 for current_charm_path, target_charm_path in list(
5823 charm_artifact_paths
5824 ):
5825 if current_charm_path:
5826 raise LcmException(
5827 "Software version change is not supported as VNF instance {} has charm.".format(
5828 vnf_instance_id
5829 )
5830 )
5831
5832 # There is no change in the charm package, then redeploy the VNF
5833 # based on new descriptor
5834 step = "Redeploying VNF"
elumalaib9e357c2022-04-27 09:58:38 +05305835 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5836 (
5837 result,
5838 detailed_status
5839 ) = await self._ns_redeploy_vnf(
5840 nsr_id,
5841 nslcmop_id,
5842 latest_vnfd,
5843 db_vnfr,
5844 db_nsr
5845 )
5846 if result == "FAILED":
5847 nslcmop_operation_state = result
5848 error_description_nslcmop = detailed_status
5849 db_nslcmop_update["detailed-status"] = detailed_status
5850 self.logger.debug(
5851 logging_text
5852 + " step {} Done with result {} {}".format(
5853 step, nslcmop_operation_state, detailed_status
5854 )
5855 )
aticigdffa6212022-04-12 15:27:53 +03005856
5857 else:
5858 step = "Checking if any charm package has changed or not"
5859 for current_charm_path, target_charm_path in list(
5860 charm_artifact_paths
5861 ):
5862 if (
5863 current_charm_path
5864 and target_charm_path
5865 and self.check_charm_hash_changed(
5866 current_charm_path, target_charm_path
5867 )
5868 ):
5869
5870 step = "Checking whether VNF uses juju bundle"
5871 if check_juju_bundle_existence(current_vnfd):
5872
5873 raise LcmException(
5874 "Charm upgrade is not supported for the instance which"
5875 " uses juju-bundle: {}".format(
5876 check_juju_bundle_existence(current_vnfd)
5877 )
5878 )
5879
5880 step = "Upgrading Charm"
5881 (
5882 result,
5883 detailed_status,
5884 ) = await self._ns_charm_upgrade(
5885 ee_id=ee_id,
5886 charm_id=charm_id,
5887 charm_type=charm_type,
5888 path=self.fs.path + target_charm_path,
5889 timeout=timeout_seconds,
5890 )
5891
5892 if result == "FAILED":
5893 nslcmop_operation_state = result
5894 error_description_nslcmop = detailed_status
5895
5896 db_nslcmop_update["detailed-status"] = detailed_status
5897 self.logger.debug(
5898 logging_text
5899 + " step {} Done with result {} {}".format(
5900 step, nslcmop_operation_state, detailed_status
5901 )
5902 )
5903
5904 step = "Updating policies"
elumalaib9e357c2022-04-27 09:58:38 +05305905 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5906 result = "COMPLETED"
5907 detailed_status = "Done"
5908 db_nslcmop_update["detailed-status"] = "Done"
aticigdffa6212022-04-12 15:27:53 +03005909
5910 # If nslcmop_operation_state is None, so any operation is not failed.
5911 if not nslcmop_operation_state:
5912 nslcmop_operation_state = "COMPLETED"
5913
5914 # If update CHANGE_VNFPKG nslcmop_operation is successful
5915 # vnf revision need to be updated
5916 vnfr_update["revision"] = latest_vnfd_revision
5917 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5918
5919 self.logger.debug(
5920 logging_text
5921 + " task Done with result {} {}".format(
5922 nslcmop_operation_state, detailed_status
5923 )
5924 )
5925 elif update_type == "REMOVE_VNF":
5926 # This part is included in https://osm.etsi.org/gerrit/11876
elumalaica7ece02022-04-12 12:47:32 +05305927 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5928 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5929 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5930 step = "Removing VNF"
5931 (result, detailed_status) = await self.remove_vnf(nsr_id, nslcmop_id, vnf_instance_id)
5932 if result == "FAILED":
5933 nslcmop_operation_state = result
5934 error_description_nslcmop = detailed_status
5935 db_nslcmop_update["detailed-status"] = detailed_status
5936 change_type = "vnf_terminated"
5937 if not nslcmop_operation_state:
5938 nslcmop_operation_state = "COMPLETED"
5939 self.logger.debug(
5940 logging_text
5941 + " task Done with result {} {}".format(
5942 nslcmop_operation_state, detailed_status
5943 )
5944 )
aticigdffa6212022-04-12 15:27:53 +03005945
k4.rahulb827de92022-05-02 16:35:02 +00005946 elif update_type == "OPERATE_VNF":
5947 vnf_id = db_nslcmop["operationParams"]["operateVnfData"]["vnfInstanceId"]
5948 operation_type = db_nslcmop["operationParams"]["operateVnfData"]["changeStateTo"]
5949 additional_param = db_nslcmop["operationParams"]["operateVnfData"]["additionalParam"]
5950 (result, detailed_status) = await self.rebuild_start_stop(
5951 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
5952 )
5953 if result == "FAILED":
5954 nslcmop_operation_state = result
5955 error_description_nslcmop = detailed_status
5956 db_nslcmop_update["detailed-status"] = detailed_status
5957 if not nslcmop_operation_state:
5958 nslcmop_operation_state = "COMPLETED"
5959 self.logger.debug(
5960 logging_text
5961 + " task Done with result {} {}".format(
5962 nslcmop_operation_state, detailed_status
5963 )
5964 )
5965
aticigdffa6212022-04-12 15:27:53 +03005966 # If nslcmop_operation_state is None, so any operation is not failed.
5967 # All operations are executed in overall.
5968 if not nslcmop_operation_state:
5969 nslcmop_operation_state = "COMPLETED"
5970 db_nsr_update["operational-status"] = old_operational_status
5971
5972 except (DbException, LcmException, N2VCException, K8sException) as e:
5973 self.logger.error(logging_text + "Exit Exception {}".format(e))
5974 exc = e
5975 except asyncio.CancelledError:
5976 self.logger.error(
5977 logging_text + "Cancelled Exception while '{}'".format(step)
5978 )
5979 exc = "Operation was cancelled"
5980 except asyncio.TimeoutError:
5981 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5982 exc = "Timeout"
5983 except Exception as e:
5984 exc = traceback.format_exc()
5985 self.logger.critical(
5986 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5987 exc_info=True,
5988 )
5989 finally:
5990 if exc:
5991 db_nslcmop_update[
5992 "detailed-status"
5993 ] = (
5994 detailed_status
5995 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5996 nslcmop_operation_state = "FAILED"
5997 db_nsr_update["operational-status"] = old_operational_status
5998 if db_nsr:
5999 self._write_ns_status(
6000 nsr_id=nsr_id,
6001 ns_state=db_nsr["nsState"],
6002 current_operation="IDLE",
6003 current_operation_id=None,
6004 other_update=db_nsr_update,
6005 )
6006
6007 self._write_op_status(
6008 op_id=nslcmop_id,
6009 stage="",
6010 error_message=error_description_nslcmop,
6011 operation_state=nslcmop_operation_state,
6012 other_update=db_nslcmop_update,
6013 )
6014
6015 if nslcmop_operation_state:
6016 try:
elumalaica7ece02022-04-12 12:47:32 +05306017 msg = {
elumalaib9e357c2022-04-27 09:58:38 +05306018 "nsr_id": nsr_id,
6019 "nslcmop_id": nslcmop_id,
6020 "operationState": nslcmop_operation_state,
6021 }
6022 if change_type in ("vnf_terminated", "policy_updated"):
elumalaica7ece02022-04-12 12:47:32 +05306023 msg.update({"vnf_member_index": member_vnf_index})
6024 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
aticigdffa6212022-04-12 15:27:53 +03006025 except Exception as e:
6026 self.logger.error(
6027 logging_text + "kafka_write notification Exception {}".format(e)
6028 )
6029 self.logger.debug(logging_text + "Exit")
6030 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6031 return nslcmop_operation_state, detailed_status
6032
tierno59d22d22018-09-25 18:10:19 +02006033 async def scale(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02006034 # Try to lock HA task here
garciadeblas5697b8b2021-03-24 09:17:02 +01006035 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02006036 if not task_is_locked_by_me:
6037 return
6038
tierno59d22d22018-09-25 18:10:19 +02006039 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
garciadeblas5697b8b2021-03-24 09:17:02 +01006040 stage = ["", "", ""]
aktas13251562021-02-12 22:19:10 +03006041 tasks_dict_info = {}
tierno2357f4e2020-10-19 16:38:59 +00006042 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02006043 self.logger.debug(logging_text + "Enter")
6044 # get all needed from database
6045 db_nsr = None
tierno59d22d22018-09-25 18:10:19 +02006046 db_nslcmop_update = {}
tiernoe876f672020-02-13 14:34:48 +00006047 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02006048 exc = None
tierno9ab95942018-10-10 16:44:22 +02006049 # in case of error, indicates what part of scale was failed to put nsr at error status
6050 scale_process = None
tiernod6de1992018-10-11 13:05:52 +02006051 old_operational_status = ""
6052 old_config_status = ""
aktas13251562021-02-12 22:19:10 +03006053 nsi_id = None
tierno59d22d22018-09-25 18:10:19 +02006054 try:
kuused124bfe2019-06-18 12:09:24 +02006055 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00006056 step = "Waiting for previous operations to terminate"
garciadeblas5697b8b2021-03-24 09:17:02 +01006057 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6058 self._write_ns_status(
6059 nsr_id=nsr_id,
6060 ns_state=None,
6061 current_operation="SCALING",
6062 current_operation_id=nslcmop_id,
6063 )
quilesj4cda56b2019-12-05 10:02:20 +00006064
ikalyvas02d9e7b2019-05-27 18:16:01 +03006065 step = "Getting nslcmop from database"
garciadeblas5697b8b2021-03-24 09:17:02 +01006066 self.logger.debug(
6067 step + " after having waited for previous tasks to be completed"
6068 )
ikalyvas02d9e7b2019-05-27 18:16:01 +03006069 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
bravof922c4172020-11-24 21:21:43 -03006070
ikalyvas02d9e7b2019-05-27 18:16:01 +03006071 step = "Getting nsr from database"
6072 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
ikalyvas02d9e7b2019-05-27 18:16:01 +03006073 old_operational_status = db_nsr["operational-status"]
6074 old_config_status = db_nsr["config-status"]
bravof922c4172020-11-24 21:21:43 -03006075
tierno59d22d22018-09-25 18:10:19 +02006076 step = "Parsing scaling parameters"
6077 db_nsr_update["operational-status"] = "scaling"
6078 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoe4f7e6c2018-11-27 14:55:30 +00006079 nsr_deployed = db_nsr["_admin"].get("deployed")
calvinosanch9f9c6f22019-11-04 13:37:39 +01006080
garciadeblas5697b8b2021-03-24 09:17:02 +01006081 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6082 "scaleByStepData"
6083 ]["member-vnf-index"]
6084 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6085 "scaleByStepData"
6086 ]["scaling-group-descriptor"]
tierno59d22d22018-09-25 18:10:19 +02006087 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
tierno82974b22018-11-27 21:55:36 +00006088 # for backward compatibility
6089 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6090 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6091 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6092 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6093
tierno59d22d22018-09-25 18:10:19 +02006094 step = "Getting vnfr from database"
garciadeblas5697b8b2021-03-24 09:17:02 +01006095 db_vnfr = self.db.get_one(
6096 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6097 )
bravof922c4172020-11-24 21:21:43 -03006098
David Garciac1fe90a2021-03-31 19:12:02 +02006099 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6100
tierno59d22d22018-09-25 18:10:19 +02006101 step = "Getting vnfd from database"
6102 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
ikalyvas02d9e7b2019-05-27 18:16:01 +03006103
aktas13251562021-02-12 22:19:10 +03006104 base_folder = db_vnfd["_admin"]["storage"]
6105
tierno59d22d22018-09-25 18:10:19 +02006106 step = "Getting scaling-group-descriptor"
bravof832f8992020-12-07 12:57:31 -03006107 scaling_descriptor = find_in_list(
garciadeblas5697b8b2021-03-24 09:17:02 +01006108 get_scaling_aspect(db_vnfd),
6109 lambda scale_desc: scale_desc["name"] == scaling_group,
bravof832f8992020-12-07 12:57:31 -03006110 )
6111 if not scaling_descriptor:
garciadeblas5697b8b2021-03-24 09:17:02 +01006112 raise LcmException(
6113 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6114 "at vnfd:scaling-group-descriptor".format(scaling_group)
6115 )
ikalyvas02d9e7b2019-05-27 18:16:01 +03006116
tierno15b1cf12019-08-29 13:21:40 +00006117 step = "Sending scale order to VIM"
bravof922c4172020-11-24 21:21:43 -03006118 # TODO check if ns is in a proper status
tierno59d22d22018-09-25 18:10:19 +02006119 nb_scale_op = 0
6120 if not db_nsr["_admin"].get("scaling-group"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006121 self.update_db_2(
6122 "nsrs",
6123 nsr_id,
6124 {
6125 "_admin.scaling-group": [
6126 {"name": scaling_group, "nb-scale-op": 0}
6127 ]
6128 },
6129 )
tierno59d22d22018-09-25 18:10:19 +02006130 admin_scale_index = 0
6131 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01006132 for admin_scale_index, admin_scale_info in enumerate(
6133 db_nsr["_admin"]["scaling-group"]
6134 ):
tierno59d22d22018-09-25 18:10:19 +02006135 if admin_scale_info["name"] == scaling_group:
6136 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6137 break
tierno9ab95942018-10-10 16:44:22 +02006138 else: # not found, set index one plus last element and add new entry with the name
6139 admin_scale_index += 1
garciadeblas5697b8b2021-03-24 09:17:02 +01006140 db_nsr_update[
6141 "_admin.scaling-group.{}.name".format(admin_scale_index)
6142 ] = scaling_group
aktas5f75f102021-03-15 11:26:10 +03006143
6144 vca_scaling_info = []
6145 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
tierno59d22d22018-09-25 18:10:19 +02006146 if scaling_type == "SCALE_OUT":
bravof832f8992020-12-07 12:57:31 -03006147 if "aspect-delta-details" not in scaling_descriptor:
6148 raise LcmException(
6149 "Aspect delta details not fount in scaling descriptor {}".format(
6150 scaling_descriptor["name"]
6151 )
6152 )
tierno59d22d22018-09-25 18:10:19 +02006153 # count if max-instance-count is reached
bravof832f8992020-12-07 12:57:31 -03006154 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
kuuse8b998e42019-07-30 15:22:16 +02006155
aktas5f75f102021-03-15 11:26:10 +03006156 scaling_info["scaling_direction"] = "OUT"
6157 scaling_info["vdu-create"] = {}
6158 scaling_info["kdu-create"] = {}
bravof832f8992020-12-07 12:57:31 -03006159 for delta in deltas:
aktas5f75f102021-03-15 11:26:10 +03006160 for vdu_delta in delta.get("vdu-delta", {}):
bravof832f8992020-12-07 12:57:31 -03006161 vdud = get_vdu(db_vnfd, vdu_delta["id"])
aktas5f75f102021-03-15 11:26:10 +03006162 # vdu_index also provides the number of instance of the targeted vdu
6163 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
garciadeblas5697b8b2021-03-24 09:17:02 +01006164 cloud_init_text = self._get_vdu_cloud_init_content(
6165 vdud, db_vnfd
6166 )
tierno72ef84f2020-10-06 08:22:07 +00006167 if cloud_init_text:
garciadeblas5697b8b2021-03-24 09:17:02 +01006168 additional_params = (
6169 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6170 or {}
6171 )
bravof832f8992020-12-07 12:57:31 -03006172 cloud_init_list = []
6173
6174 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6175 max_instance_count = 10
6176 if vdu_profile and "max-number-of-instances" in vdu_profile:
garciadeblas5697b8b2021-03-24 09:17:02 +01006177 max_instance_count = vdu_profile.get(
6178 "max-number-of-instances", 10
6179 )
6180
6181 default_instance_num = get_number_of_instances(
6182 db_vnfd, vdud["id"]
6183 )
aktas5f75f102021-03-15 11:26:10 +03006184 instances_number = vdu_delta.get("number-of-instances", 1)
6185 nb_scale_op += instances_number
bravof832f8992020-12-07 12:57:31 -03006186
aktas5f75f102021-03-15 11:26:10 +03006187 new_instance_count = nb_scale_op + default_instance_num
6188 # Control if new count is over max and vdu count is less than max.
6189 # Then assign new instance count
6190 if new_instance_count > max_instance_count > vdu_count:
6191 instances_number = new_instance_count - max_instance_count
6192 else:
6193 instances_number = instances_number
bravof832f8992020-12-07 12:57:31 -03006194
aktas5f75f102021-03-15 11:26:10 +03006195 if new_instance_count > max_instance_count:
bravof832f8992020-12-07 12:57:31 -03006196 raise LcmException(
6197 "reached the limit of {} (max-instance-count) "
6198 "scaling-out operations for the "
garciadeblas5697b8b2021-03-24 09:17:02 +01006199 "scaling-group-descriptor '{}'".format(
6200 nb_scale_op, scaling_group
6201 )
bravof922c4172020-11-24 21:21:43 -03006202 )
bravof832f8992020-12-07 12:57:31 -03006203 for x in range(vdu_delta.get("number-of-instances", 1)):
6204 if cloud_init_text:
6205 # TODO Information of its own ip is not available because db_vnfr is not updated.
6206 additional_params["OSM"] = get_osm_params(
garciadeblas5697b8b2021-03-24 09:17:02 +01006207 db_vnfr, vdu_delta["id"], vdu_index + x
bravof922c4172020-11-24 21:21:43 -03006208 )
bravof832f8992020-12-07 12:57:31 -03006209 cloud_init_list.append(
6210 self._parse_cloud_init(
6211 cloud_init_text,
6212 additional_params,
6213 db_vnfd["id"],
garciadeblas5697b8b2021-03-24 09:17:02 +01006214 vdud["id"],
bravof832f8992020-12-07 12:57:31 -03006215 )
6216 )
aktas5f75f102021-03-15 11:26:10 +03006217 vca_scaling_info.append(
aktas13251562021-02-12 22:19:10 +03006218 {
6219 "osm_vdu_id": vdu_delta["id"],
6220 "member-vnf-index": vnf_index,
6221 "type": "create",
garciadeblas5697b8b2021-03-24 09:17:02 +01006222 "vdu_index": vdu_index + x,
aktas13251562021-02-12 22:19:10 +03006223 }
6224 )
aktas5f75f102021-03-15 11:26:10 +03006225 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6226 for kdu_delta in delta.get("kdu-resource-delta", {}):
David Garciab4ebcd02021-10-28 02:00:43 +02006227 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
aktas5f75f102021-03-15 11:26:10 +03006228 kdu_name = kdu_profile["kdu-name"]
aktasc41fe832021-11-29 18:41:42 +03006229 resource_name = kdu_profile.get("resource-name", "")
aktas5f75f102021-03-15 11:26:10 +03006230
6231 # Might have different kdus in the same delta
6232 # Should have list for each kdu
6233 if not scaling_info["kdu-create"].get(kdu_name, None):
6234 scaling_info["kdu-create"][kdu_name] = []
6235
6236 kdur = get_kdur(db_vnfr, kdu_name)
6237 if kdur.get("helm-chart"):
6238 k8s_cluster_type = "helm-chart-v3"
6239 self.logger.debug("kdur: {}".format(kdur))
6240 if (
6241 kdur.get("helm-version")
6242 and kdur.get("helm-version") == "v2"
6243 ):
6244 k8s_cluster_type = "helm-chart"
aktas5f75f102021-03-15 11:26:10 +03006245 elif kdur.get("juju-bundle"):
6246 k8s_cluster_type = "juju-bundle"
6247 else:
6248 raise LcmException(
6249 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6250 "juju-bundle. Maybe an old NBI version is running".format(
6251 db_vnfr["member-vnf-index-ref"], kdu_name
6252 )
6253 )
6254
6255 max_instance_count = 10
6256 if kdu_profile and "max-number-of-instances" in kdu_profile:
6257 max_instance_count = kdu_profile.get(
6258 "max-number-of-instances", 10
6259 )
6260
6261 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6262 deployed_kdu, _ = get_deployed_kdu(
6263 nsr_deployed, kdu_name, vnf_index
bravof832f8992020-12-07 12:57:31 -03006264 )
aktas5f75f102021-03-15 11:26:10 +03006265 if deployed_kdu is None:
6266 raise LcmException(
6267 "KDU '{}' for vnf '{}' not deployed".format(
6268 kdu_name, vnf_index
6269 )
6270 )
6271 kdu_instance = deployed_kdu.get("kdu-instance")
6272 instance_num = await self.k8scluster_map[
6273 k8s_cluster_type
aktasc41fe832021-11-29 18:41:42 +03006274 ].get_scale_count(
6275 resource_name,
6276 kdu_instance,
6277 vca_id=vca_id,
6278 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6279 kdu_model=deployed_kdu.get("kdu-model"),
6280 )
aktas5f75f102021-03-15 11:26:10 +03006281 kdu_replica_count = instance_num + kdu_delta.get(
garciadeblas5697b8b2021-03-24 09:17:02 +01006282 "number-of-instances", 1
6283 )
ikalyvas02d9e7b2019-05-27 18:16:01 +03006284
aktas5f75f102021-03-15 11:26:10 +03006285 # Control if new count is over max and instance_num is less than max.
6286 # Then assign max instance number to kdu replica count
6287 if kdu_replica_count > max_instance_count > instance_num:
6288 kdu_replica_count = max_instance_count
6289 if kdu_replica_count > max_instance_count:
6290 raise LcmException(
6291 "reached the limit of {} (max-instance-count) "
6292 "scaling-out operations for the "
6293 "scaling-group-descriptor '{}'".format(
6294 instance_num, scaling_group
6295 )
6296 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006297
aktas5f75f102021-03-15 11:26:10 +03006298 for x in range(kdu_delta.get("number-of-instances", 1)):
6299 vca_scaling_info.append(
6300 {
6301 "osm_kdu_id": kdu_name,
6302 "member-vnf-index": vnf_index,
6303 "type": "create",
6304 "kdu_index": instance_num + x - 1,
6305 }
6306 )
6307 scaling_info["kdu-create"][kdu_name].append(
6308 {
6309 "member-vnf-index": vnf_index,
6310 "type": "create",
6311 "k8s-cluster-type": k8s_cluster_type,
6312 "resource-name": resource_name,
6313 "scale": kdu_replica_count,
6314 }
6315 )
6316 elif scaling_type == "SCALE_IN":
bravof832f8992020-12-07 12:57:31 -03006317 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
aktas5f75f102021-03-15 11:26:10 +03006318
6319 scaling_info["scaling_direction"] = "IN"
6320 scaling_info["vdu-delete"] = {}
6321 scaling_info["kdu-delete"] = {}
6322
bravof832f8992020-12-07 12:57:31 -03006323 for delta in deltas:
aktas5f75f102021-03-15 11:26:10 +03006324 for vdu_delta in delta.get("vdu-delta", {}):
6325 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
bravof832f8992020-12-07 12:57:31 -03006326 min_instance_count = 0
6327 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6328 if vdu_profile and "min-number-of-instances" in vdu_profile:
6329 min_instance_count = vdu_profile["min-number-of-instances"]
6330
garciadeblas5697b8b2021-03-24 09:17:02 +01006331 default_instance_num = get_number_of_instances(
6332 db_vnfd, vdu_delta["id"]
6333 )
aktas5f75f102021-03-15 11:26:10 +03006334 instance_num = vdu_delta.get("number-of-instances", 1)
6335 nb_scale_op -= instance_num
bravof832f8992020-12-07 12:57:31 -03006336
aktas5f75f102021-03-15 11:26:10 +03006337 new_instance_count = nb_scale_op + default_instance_num
6338
6339 if new_instance_count < min_instance_count < vdu_count:
6340 instances_number = min_instance_count - new_instance_count
6341 else:
6342 instances_number = instance_num
6343
6344 if new_instance_count < min_instance_count:
bravof832f8992020-12-07 12:57:31 -03006345 raise LcmException(
6346 "reached the limit of {} (min-instance-count) scaling-in operations for the "
garciadeblas5697b8b2021-03-24 09:17:02 +01006347 "scaling-group-descriptor '{}'".format(
6348 nb_scale_op, scaling_group
6349 )
bravof832f8992020-12-07 12:57:31 -03006350 )
aktas13251562021-02-12 22:19:10 +03006351 for x in range(vdu_delta.get("number-of-instances", 1)):
aktas5f75f102021-03-15 11:26:10 +03006352 vca_scaling_info.append(
aktas13251562021-02-12 22:19:10 +03006353 {
6354 "osm_vdu_id": vdu_delta["id"],
6355 "member-vnf-index": vnf_index,
6356 "type": "delete",
garciadeblas5697b8b2021-03-24 09:17:02 +01006357 "vdu_index": vdu_index - 1 - x,
aktas13251562021-02-12 22:19:10 +03006358 }
6359 )
aktas5f75f102021-03-15 11:26:10 +03006360 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6361 for kdu_delta in delta.get("kdu-resource-delta", {}):
David Garciab4ebcd02021-10-28 02:00:43 +02006362 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
aktas5f75f102021-03-15 11:26:10 +03006363 kdu_name = kdu_profile["kdu-name"]
aktasc41fe832021-11-29 18:41:42 +03006364 resource_name = kdu_profile.get("resource-name", "")
aktas5f75f102021-03-15 11:26:10 +03006365
6366 if not scaling_info["kdu-delete"].get(kdu_name, None):
6367 scaling_info["kdu-delete"][kdu_name] = []
6368
6369 kdur = get_kdur(db_vnfr, kdu_name)
6370 if kdur.get("helm-chart"):
6371 k8s_cluster_type = "helm-chart-v3"
6372 self.logger.debug("kdur: {}".format(kdur))
6373 if (
6374 kdur.get("helm-version")
6375 and kdur.get("helm-version") == "v2"
6376 ):
6377 k8s_cluster_type = "helm-chart"
aktas5f75f102021-03-15 11:26:10 +03006378 elif kdur.get("juju-bundle"):
6379 k8s_cluster_type = "juju-bundle"
6380 else:
6381 raise LcmException(
6382 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6383 "juju-bundle. Maybe an old NBI version is running".format(
6384 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6385 )
6386 )
6387
6388 min_instance_count = 0
6389 if kdu_profile and "min-number-of-instances" in kdu_profile:
6390 min_instance_count = kdu_profile["min-number-of-instances"]
6391
6392 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6393 deployed_kdu, _ = get_deployed_kdu(
6394 nsr_deployed, kdu_name, vnf_index
6395 )
6396 if deployed_kdu is None:
6397 raise LcmException(
6398 "KDU '{}' for vnf '{}' not deployed".format(
6399 kdu_name, vnf_index
6400 )
6401 )
6402 kdu_instance = deployed_kdu.get("kdu-instance")
6403 instance_num = await self.k8scluster_map[
6404 k8s_cluster_type
aktasc41fe832021-11-29 18:41:42 +03006405 ].get_scale_count(
6406 resource_name,
6407 kdu_instance,
6408 vca_id=vca_id,
6409 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6410 kdu_model=deployed_kdu.get("kdu-model"),
6411 )
aktas5f75f102021-03-15 11:26:10 +03006412 kdu_replica_count = instance_num - kdu_delta.get(
garciadeblas5697b8b2021-03-24 09:17:02 +01006413 "number-of-instances", 1
6414 )
tierno59d22d22018-09-25 18:10:19 +02006415
aktas5f75f102021-03-15 11:26:10 +03006416 if kdu_replica_count < min_instance_count < instance_num:
6417 kdu_replica_count = min_instance_count
6418 if kdu_replica_count < min_instance_count:
6419 raise LcmException(
6420 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6421 "scaling-group-descriptor '{}'".format(
6422 instance_num, scaling_group
6423 )
6424 )
6425
6426 for x in range(kdu_delta.get("number-of-instances", 1)):
6427 vca_scaling_info.append(
6428 {
6429 "osm_kdu_id": kdu_name,
6430 "member-vnf-index": vnf_index,
6431 "type": "delete",
6432 "kdu_index": instance_num - x - 1,
6433 }
6434 )
6435 scaling_info["kdu-delete"][kdu_name].append(
6436 {
6437 "member-vnf-index": vnf_index,
6438 "type": "delete",
6439 "k8s-cluster-type": k8s_cluster_type,
6440 "resource-name": resource_name,
6441 "scale": kdu_replica_count,
6442 }
6443 )
6444
tierno59d22d22018-09-25 18:10:19 +02006445 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
aktas5f75f102021-03-15 11:26:10 +03006446 vdu_delete = copy(scaling_info.get("vdu-delete"))
6447 if scaling_info["scaling_direction"] == "IN":
tierno59d22d22018-09-25 18:10:19 +02006448 for vdur in reversed(db_vnfr["vdur"]):
tierno27246d82018-09-27 15:59:09 +02006449 if vdu_delete.get(vdur["vdu-id-ref"]):
6450 vdu_delete[vdur["vdu-id-ref"]] -= 1
aktas5f75f102021-03-15 11:26:10 +03006451 scaling_info["vdu"].append(
garciadeblas5697b8b2021-03-24 09:17:02 +01006452 {
6453 "name": vdur.get("name") or vdur.get("vdu-name"),
6454 "vdu_id": vdur["vdu-id-ref"],
6455 "interface": [],
6456 }
6457 )
tierno59d22d22018-09-25 18:10:19 +02006458 for interface in vdur["interfaces"]:
aktas5f75f102021-03-15 11:26:10 +03006459 scaling_info["vdu"][-1]["interface"].append(
garciadeblas5697b8b2021-03-24 09:17:02 +01006460 {
6461 "name": interface["name"],
6462 "ip_address": interface["ip-address"],
6463 "mac_address": interface.get("mac-address"),
6464 }
6465 )
tierno2357f4e2020-10-19 16:38:59 +00006466 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
tierno59d22d22018-09-25 18:10:19 +02006467
kuuseac3a8882019-10-03 10:48:06 +02006468 # PRE-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02006469 step = "Executing pre-scale vnf-config-primitive"
6470 if scaling_descriptor.get("scaling-config-action"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006471 for scaling_config_action in scaling_descriptor[
6472 "scaling-config-action"
6473 ]:
6474 if (
6475 scaling_config_action.get("trigger") == "pre-scale-in"
6476 and scaling_type == "SCALE_IN"
6477 ) or (
6478 scaling_config_action.get("trigger") == "pre-scale-out"
6479 and scaling_type == "SCALE_OUT"
6480 ):
6481 vnf_config_primitive = scaling_config_action[
6482 "vnf-config-primitive-name-ref"
6483 ]
6484 step = db_nslcmop_update[
6485 "detailed-status"
6486 ] = "executing pre-scale scaling-config-action '{}'".format(
6487 vnf_config_primitive
6488 )
tiernoda964822019-01-14 15:53:47 +00006489
tierno59d22d22018-09-25 18:10:19 +02006490 # look for primitive
garciadeblas5697b8b2021-03-24 09:17:02 +01006491 for config_primitive in (
6492 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6493 ).get("config-primitive", ()):
tierno59d22d22018-09-25 18:10:19 +02006494 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02006495 break
6496 else:
6497 raise LcmException(
6498 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
tiernoda964822019-01-14 15:53:47 +00006499 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
garciadeblas5697b8b2021-03-24 09:17:02 +01006500 "primitive".format(scaling_group, vnf_config_primitive)
6501 )
tiernoda964822019-01-14 15:53:47 +00006502
aktas5f75f102021-03-15 11:26:10 +03006503 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
tiernoda964822019-01-14 15:53:47 +00006504 if db_vnfr.get("additionalParamsForVnf"):
6505 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
quilesj7e13aeb2019-10-08 13:34:55 +02006506
tierno9ab95942018-10-10 16:44:22 +02006507 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02006508 db_nsr_update["config-status"] = "configuring pre-scaling"
garciadeblas5697b8b2021-03-24 09:17:02 +01006509 primitive_params = self._map_primitive_params(
6510 config_primitive, {}, vnfr_params
6511 )
kuuseac3a8882019-10-03 10:48:06 +02006512
tierno7c4e24c2020-05-13 08:41:35 +00006513 # Pre-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02006514 op_index = self._check_or_add_scale_suboperation(
garciadeblas5697b8b2021-03-24 09:17:02 +01006515 db_nslcmop,
garciadeblas5697b8b2021-03-24 09:17:02 +01006516 vnf_index,
6517 vnf_config_primitive,
6518 primitive_params,
6519 "PRE-SCALE",
6520 )
tierno7c4e24c2020-05-13 08:41:35 +00006521 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02006522 # Skip sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006523 result = "COMPLETED"
6524 result_detail = "Done"
6525 self.logger.debug(
6526 logging_text
6527 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6528 vnf_config_primitive, result, result_detail
6529 )
6530 )
kuuseac3a8882019-10-03 10:48:06 +02006531 else:
tierno7c4e24c2020-05-13 08:41:35 +00006532 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02006533 # New sub-operation: Get index of this sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006534 op_index = (
6535 len(db_nslcmop.get("_admin", {}).get("operations"))
6536 - 1
6537 )
6538 self.logger.debug(
6539 logging_text
6540 + "vnf_config_primitive={} New sub-operation".format(
6541 vnf_config_primitive
6542 )
6543 )
kuuseac3a8882019-10-03 10:48:06 +02006544 else:
tierno7c4e24c2020-05-13 08:41:35 +00006545 # retry: Get registered params for this existing sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006546 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6547 op_index
6548 ]
6549 vnf_index = op.get("member_vnf_index")
6550 vnf_config_primitive = op.get("primitive")
6551 primitive_params = op.get("primitive_params")
6552 self.logger.debug(
6553 logging_text
6554 + "vnf_config_primitive={} Sub-operation retry".format(
6555 vnf_config_primitive
6556 )
6557 )
tierno588547c2020-07-01 15:30:20 +00006558 # Execute the primitive, either with new (first-time) or registered (reintent) args
garciadeblas5697b8b2021-03-24 09:17:02 +01006559 ee_descriptor_id = config_primitive.get(
6560 "execution-environment-ref"
6561 )
6562 primitive_name = config_primitive.get(
6563 "execution-environment-primitive", vnf_config_primitive
6564 )
6565 ee_id, vca_type = self._look_for_deployed_vca(
6566 nsr_deployed["VCA"],
6567 member_vnf_index=vnf_index,
6568 vdu_id=None,
6569 vdu_count_index=None,
6570 ee_descriptor_id=ee_descriptor_id,
6571 )
kuuseac3a8882019-10-03 10:48:06 +02006572 result, result_detail = await self._ns_execute_primitive(
garciadeblas5697b8b2021-03-24 09:17:02 +01006573 ee_id,
6574 primitive_name,
David Garciac1fe90a2021-03-31 19:12:02 +02006575 primitive_params,
6576 vca_type=vca_type,
6577 vca_id=vca_id,
6578 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006579 self.logger.debug(
6580 logging_text
6581 + "vnf_config_primitive={} Done with result {} {}".format(
6582 vnf_config_primitive, result, result_detail
6583 )
6584 )
kuuseac3a8882019-10-03 10:48:06 +02006585 # Update operationState = COMPLETED | FAILED
6586 self._update_suboperation_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01006587 db_nslcmop, op_index, result, result_detail
6588 )
kuuseac3a8882019-10-03 10:48:06 +02006589
tierno59d22d22018-09-25 18:10:19 +02006590 if result == "FAILED":
6591 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02006592 db_nsr_update["config-status"] = old_config_status
6593 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02006594 # PRE-SCALE END
tierno59d22d22018-09-25 18:10:19 +02006595
garciadeblas5697b8b2021-03-24 09:17:02 +01006596 db_nsr_update[
6597 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6598 ] = nb_scale_op
6599 db_nsr_update[
6600 "_admin.scaling-group.{}.time".format(admin_scale_index)
6601 ] = time()
tierno2357f4e2020-10-19 16:38:59 +00006602
aktas13251562021-02-12 22:19:10 +03006603 # SCALE-IN VCA - BEGIN
aktas5f75f102021-03-15 11:26:10 +03006604 if vca_scaling_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01006605 step = db_nslcmop_update[
6606 "detailed-status"
6607 ] = "Deleting the execution environments"
aktas13251562021-02-12 22:19:10 +03006608 scale_process = "VCA"
aktas5f75f102021-03-15 11:26:10 +03006609 for vca_info in vca_scaling_info:
Guillermo Calvinoa0c6baf2022-02-02 19:04:50 +01006610 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
aktas5f75f102021-03-15 11:26:10 +03006611 member_vnf_index = str(vca_info["member-vnf-index"])
garciadeblas5697b8b2021-03-24 09:17:02 +01006612 self.logger.debug(
aktas5f75f102021-03-15 11:26:10 +03006613 logging_text + "vdu info: {}".format(vca_info)
garciadeblas5697b8b2021-03-24 09:17:02 +01006614 )
aktas5f75f102021-03-15 11:26:10 +03006615 if vca_info.get("osm_vdu_id"):
6616 vdu_id = vca_info["osm_vdu_id"]
6617 vdu_index = int(vca_info["vdu_index"])
6618 stage[
6619 1
6620 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6621 member_vnf_index, vdu_id, vdu_index
6622 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006623 stage[2] = step = "Scaling in VCA"
6624 self._write_op_status(op_id=nslcmop_id, stage=stage)
aktas13251562021-02-12 22:19:10 +03006625 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6626 config_update = db_nsr["configurationStatus"]
6627 for vca_index, vca in enumerate(vca_update):
garciadeblas5697b8b2021-03-24 09:17:02 +01006628 if (
6629 (vca or vca.get("ee_id"))
6630 and vca["member-vnf-index"] == member_vnf_index
6631 and vca["vdu_count_index"] == vdu_index
6632 ):
aktas13251562021-02-12 22:19:10 +03006633 if vca.get("vdu_id"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006634 config_descriptor = get_configuration(
6635 db_vnfd, vca.get("vdu_id")
6636 )
aktas13251562021-02-12 22:19:10 +03006637 elif vca.get("kdu_name"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006638 config_descriptor = get_configuration(
6639 db_vnfd, vca.get("kdu_name")
6640 )
aktas13251562021-02-12 22:19:10 +03006641 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01006642 config_descriptor = get_configuration(
6643 db_vnfd, db_vnfd["id"]
6644 )
6645 operation_params = (
6646 db_nslcmop.get("operationParams") or {}
6647 )
6648 exec_terminate_primitives = not operation_params.get(
6649 "skip_terminate_primitives"
6650 ) and vca.get("needed_terminate")
David Garciac1fe90a2021-03-31 19:12:02 +02006651 task = asyncio.ensure_future(
6652 asyncio.wait_for(
6653 self.destroy_N2VC(
6654 logging_text,
6655 db_nslcmop,
6656 vca,
6657 config_descriptor,
6658 vca_index,
6659 destroy_ee=True,
6660 exec_primitives=exec_terminate_primitives,
6661 scaling_in=True,
6662 vca_id=vca_id,
6663 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01006664 timeout=self.timeout_charm_delete,
David Garciac1fe90a2021-03-31 19:12:02 +02006665 )
6666 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006667 tasks_dict_info[task] = "Terminating VCA {}".format(
6668 vca.get("ee_id")
6669 )
aktas13251562021-02-12 22:19:10 +03006670 del vca_update[vca_index]
6671 del config_update[vca_index]
6672 # wait for pending tasks of terminate primitives
6673 if tasks_dict_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01006674 self.logger.debug(
6675 logging_text
6676 + "Waiting for tasks {}".format(
6677 list(tasks_dict_info.keys())
6678 )
6679 )
6680 error_list = await self._wait_for_tasks(
6681 logging_text,
6682 tasks_dict_info,
6683 min(
6684 self.timeout_charm_delete, self.timeout_ns_terminate
6685 ),
6686 stage,
6687 nslcmop_id,
6688 )
aktas13251562021-02-12 22:19:10 +03006689 tasks_dict_info.clear()
6690 if error_list:
6691 raise LcmException("; ".join(error_list))
6692
6693 db_vca_and_config_update = {
6694 "_admin.deployed.VCA": vca_update,
garciadeblas5697b8b2021-03-24 09:17:02 +01006695 "configurationStatus": config_update,
aktas13251562021-02-12 22:19:10 +03006696 }
garciadeblas5697b8b2021-03-24 09:17:02 +01006697 self.update_db_2(
6698 "nsrs", db_nsr["_id"], db_vca_and_config_update
6699 )
aktas13251562021-02-12 22:19:10 +03006700 scale_process = None
6701 # SCALE-IN VCA - END
6702
kuuseac3a8882019-10-03 10:48:06 +02006703 # SCALE RO - BEGIN
aktas5f75f102021-03-15 11:26:10 +03006704 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
tierno9ab95942018-10-10 16:44:22 +02006705 scale_process = "RO"
tierno2357f4e2020-10-19 16:38:59 +00006706 if self.ro_config.get("ng"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006707 await self._scale_ng_ro(
aktas5f75f102021-03-15 11:26:10 +03006708 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
garciadeblas5697b8b2021-03-24 09:17:02 +01006709 )
aktas5f75f102021-03-15 11:26:10 +03006710 scaling_info.pop("vdu-create", None)
6711 scaling_info.pop("vdu-delete", None)
tierno59d22d22018-09-25 18:10:19 +02006712
tierno9ab95942018-10-10 16:44:22 +02006713 scale_process = None
aktas13251562021-02-12 22:19:10 +03006714 # SCALE RO - END
6715
aktas5f75f102021-03-15 11:26:10 +03006716 # SCALE KDU - BEGIN
6717 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6718 scale_process = "KDU"
6719 await self._scale_kdu(
6720 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6721 )
6722 scaling_info.pop("kdu-create", None)
6723 scaling_info.pop("kdu-delete", None)
6724
6725 scale_process = None
6726 # SCALE KDU - END
6727
6728 if db_nsr_update:
6729 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6730
aktas13251562021-02-12 22:19:10 +03006731 # SCALE-UP VCA - BEGIN
aktas5f75f102021-03-15 11:26:10 +03006732 if vca_scaling_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01006733 step = db_nslcmop_update[
6734 "detailed-status"
6735 ] = "Creating new execution environments"
aktas13251562021-02-12 22:19:10 +03006736 scale_process = "VCA"
aktas5f75f102021-03-15 11:26:10 +03006737 for vca_info in vca_scaling_info:
Guillermo Calvinoa0c6baf2022-02-02 19:04:50 +01006738 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
aktas5f75f102021-03-15 11:26:10 +03006739 member_vnf_index = str(vca_info["member-vnf-index"])
garciadeblas5697b8b2021-03-24 09:17:02 +01006740 self.logger.debug(
aktas5f75f102021-03-15 11:26:10 +03006741 logging_text + "vdu info: {}".format(vca_info)
garciadeblas5697b8b2021-03-24 09:17:02 +01006742 )
aktas13251562021-02-12 22:19:10 +03006743 vnfd_id = db_vnfr["vnfd-ref"]
aktas5f75f102021-03-15 11:26:10 +03006744 if vca_info.get("osm_vdu_id"):
6745 vdu_index = int(vca_info["vdu_index"])
6746 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6747 if db_vnfr.get("additionalParamsForVnf"):
6748 deploy_params.update(
6749 parse_yaml_strings(
6750 db_vnfr["additionalParamsForVnf"].copy()
6751 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006752 )
aktas5f75f102021-03-15 11:26:10 +03006753 descriptor_config = get_configuration(
6754 db_vnfd, db_vnfd["id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01006755 )
aktas5f75f102021-03-15 11:26:10 +03006756 if descriptor_config:
6757 vdu_id = None
6758 vdu_name = None
6759 kdu_name = None
6760 self._deploy_n2vc(
6761 logging_text=logging_text
6762 + "member_vnf_index={} ".format(member_vnf_index),
6763 db_nsr=db_nsr,
6764 db_vnfr=db_vnfr,
6765 nslcmop_id=nslcmop_id,
6766 nsr_id=nsr_id,
6767 nsi_id=nsi_id,
6768 vnfd_id=vnfd_id,
6769 vdu_id=vdu_id,
6770 kdu_name=kdu_name,
6771 member_vnf_index=member_vnf_index,
6772 vdu_index=vdu_index,
6773 vdu_name=vdu_name,
6774 deploy_params=deploy_params,
6775 descriptor_config=descriptor_config,
6776 base_folder=base_folder,
6777 task_instantiation_info=tasks_dict_info,
6778 stage=stage,
6779 )
6780 vdu_id = vca_info["osm_vdu_id"]
6781 vdur = find_in_list(
6782 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
aktas13251562021-02-12 22:19:10 +03006783 )
aktas5f75f102021-03-15 11:26:10 +03006784 descriptor_config = get_configuration(db_vnfd, vdu_id)
6785 if vdur.get("additionalParams"):
6786 deploy_params_vdu = parse_yaml_strings(
6787 vdur["additionalParams"]
6788 )
6789 else:
6790 deploy_params_vdu = deploy_params
6791 deploy_params_vdu["OSM"] = get_osm_params(
6792 db_vnfr, vdu_id, vdu_count_index=vdu_index
garciadeblas5697b8b2021-03-24 09:17:02 +01006793 )
aktas5f75f102021-03-15 11:26:10 +03006794 if descriptor_config:
6795 vdu_name = None
6796 kdu_name = None
6797 stage[
6798 1
6799 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
garciadeblas5697b8b2021-03-24 09:17:02 +01006800 member_vnf_index, vdu_id, vdu_index
aktas5f75f102021-03-15 11:26:10 +03006801 )
6802 stage[2] = step = "Scaling out VCA"
6803 self._write_op_status(op_id=nslcmop_id, stage=stage)
6804 self._deploy_n2vc(
6805 logging_text=logging_text
6806 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6807 member_vnf_index, vdu_id, vdu_index
6808 ),
6809 db_nsr=db_nsr,
6810 db_vnfr=db_vnfr,
6811 nslcmop_id=nslcmop_id,
6812 nsr_id=nsr_id,
6813 nsi_id=nsi_id,
6814 vnfd_id=vnfd_id,
6815 vdu_id=vdu_id,
6816 kdu_name=kdu_name,
6817 member_vnf_index=member_vnf_index,
6818 vdu_index=vdu_index,
6819 vdu_name=vdu_name,
6820 deploy_params=deploy_params_vdu,
6821 descriptor_config=descriptor_config,
6822 base_folder=base_folder,
6823 task_instantiation_info=tasks_dict_info,
6824 stage=stage,
6825 )
aktas13251562021-02-12 22:19:10 +03006826 # SCALE-UP VCA - END
6827 scale_process = None
tierno59d22d22018-09-25 18:10:19 +02006828
kuuseac3a8882019-10-03 10:48:06 +02006829 # POST-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02006830 # execute primitive service POST-SCALING
6831 step = "Executing post-scale vnf-config-primitive"
6832 if scaling_descriptor.get("scaling-config-action"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006833 for scaling_config_action in scaling_descriptor[
6834 "scaling-config-action"
6835 ]:
6836 if (
6837 scaling_config_action.get("trigger") == "post-scale-in"
6838 and scaling_type == "SCALE_IN"
6839 ) or (
6840 scaling_config_action.get("trigger") == "post-scale-out"
6841 and scaling_type == "SCALE_OUT"
6842 ):
6843 vnf_config_primitive = scaling_config_action[
6844 "vnf-config-primitive-name-ref"
6845 ]
6846 step = db_nslcmop_update[
6847 "detailed-status"
6848 ] = "executing post-scale scaling-config-action '{}'".format(
6849 vnf_config_primitive
6850 )
tiernoda964822019-01-14 15:53:47 +00006851
aktas5f75f102021-03-15 11:26:10 +03006852 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
tiernoda964822019-01-14 15:53:47 +00006853 if db_vnfr.get("additionalParamsForVnf"):
6854 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6855
tierno59d22d22018-09-25 18:10:19 +02006856 # look for primitive
bravof9a256db2021-02-22 18:02:07 -03006857 for config_primitive in (
6858 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6859 ).get("config-primitive", ()):
tierno59d22d22018-09-25 18:10:19 +02006860 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02006861 break
6862 else:
tiernoa278b842020-07-08 15:33:55 +00006863 raise LcmException(
6864 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6865 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
garciadeblas5697b8b2021-03-24 09:17:02 +01006866 "config-primitive".format(
6867 scaling_group, vnf_config_primitive
6868 )
6869 )
tierno9ab95942018-10-10 16:44:22 +02006870 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02006871 db_nsr_update["config-status"] = "configuring post-scaling"
garciadeblas5697b8b2021-03-24 09:17:02 +01006872 primitive_params = self._map_primitive_params(
6873 config_primitive, {}, vnfr_params
6874 )
tiernod6de1992018-10-11 13:05:52 +02006875
tierno7c4e24c2020-05-13 08:41:35 +00006876 # Post-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02006877 op_index = self._check_or_add_scale_suboperation(
garciadeblas5697b8b2021-03-24 09:17:02 +01006878 db_nslcmop,
garciadeblas5697b8b2021-03-24 09:17:02 +01006879 vnf_index,
6880 vnf_config_primitive,
6881 primitive_params,
6882 "POST-SCALE",
6883 )
quilesj4cda56b2019-12-05 10:02:20 +00006884 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02006885 # Skip sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006886 result = "COMPLETED"
6887 result_detail = "Done"
6888 self.logger.debug(
6889 logging_text
6890 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6891 vnf_config_primitive, result, result_detail
6892 )
6893 )
kuuseac3a8882019-10-03 10:48:06 +02006894 else:
quilesj4cda56b2019-12-05 10:02:20 +00006895 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02006896 # New sub-operation: Get index of this sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006897 op_index = (
6898 len(db_nslcmop.get("_admin", {}).get("operations"))
6899 - 1
6900 )
6901 self.logger.debug(
6902 logging_text
6903 + "vnf_config_primitive={} New sub-operation".format(
6904 vnf_config_primitive
6905 )
6906 )
kuuseac3a8882019-10-03 10:48:06 +02006907 else:
tierno7c4e24c2020-05-13 08:41:35 +00006908 # retry: Get registered params for this existing sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006909 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6910 op_index
6911 ]
6912 vnf_index = op.get("member_vnf_index")
6913 vnf_config_primitive = op.get("primitive")
6914 primitive_params = op.get("primitive_params")
6915 self.logger.debug(
6916 logging_text
6917 + "vnf_config_primitive={} Sub-operation retry".format(
6918 vnf_config_primitive
6919 )
6920 )
tierno588547c2020-07-01 15:30:20 +00006921 # Execute the primitive, either with new (first-time) or registered (reintent) args
garciadeblas5697b8b2021-03-24 09:17:02 +01006922 ee_descriptor_id = config_primitive.get(
6923 "execution-environment-ref"
6924 )
6925 primitive_name = config_primitive.get(
6926 "execution-environment-primitive", vnf_config_primitive
6927 )
6928 ee_id, vca_type = self._look_for_deployed_vca(
6929 nsr_deployed["VCA"],
6930 member_vnf_index=vnf_index,
6931 vdu_id=None,
6932 vdu_count_index=None,
6933 ee_descriptor_id=ee_descriptor_id,
6934 )
kuuseac3a8882019-10-03 10:48:06 +02006935 result, result_detail = await self._ns_execute_primitive(
David Garciac1fe90a2021-03-31 19:12:02 +02006936 ee_id,
6937 primitive_name,
6938 primitive_params,
6939 vca_type=vca_type,
6940 vca_id=vca_id,
6941 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006942 self.logger.debug(
6943 logging_text
6944 + "vnf_config_primitive={} Done with result {} {}".format(
6945 vnf_config_primitive, result, result_detail
6946 )
6947 )
kuuseac3a8882019-10-03 10:48:06 +02006948 # Update operationState = COMPLETED | FAILED
6949 self._update_suboperation_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01006950 db_nslcmop, op_index, result, result_detail
6951 )
kuuseac3a8882019-10-03 10:48:06 +02006952
tierno59d22d22018-09-25 18:10:19 +02006953 if result == "FAILED":
6954 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02006955 db_nsr_update["config-status"] = old_config_status
6956 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02006957 # POST-SCALE END
tierno59d22d22018-09-25 18:10:19 +02006958
garciadeblas5697b8b2021-03-24 09:17:02 +01006959 db_nsr_update[
6960 "detailed-status"
6961 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6962 db_nsr_update["operational-status"] = (
6963 "running"
6964 if old_operational_status == "failed"
ikalyvas02d9e7b2019-05-27 18:16:01 +03006965 else old_operational_status
garciadeblas5697b8b2021-03-24 09:17:02 +01006966 )
tiernod6de1992018-10-11 13:05:52 +02006967 db_nsr_update["config-status"] = old_config_status
tierno59d22d22018-09-25 18:10:19 +02006968 return
garciadeblas5697b8b2021-03-24 09:17:02 +01006969 except (
6970 ROclient.ROClientException,
6971 DbException,
6972 LcmException,
6973 NgRoException,
6974 ) as e:
tierno59d22d22018-09-25 18:10:19 +02006975 self.logger.error(logging_text + "Exit Exception {}".format(e))
6976 exc = e
6977 except asyncio.CancelledError:
garciadeblas5697b8b2021-03-24 09:17:02 +01006978 self.logger.error(
6979 logging_text + "Cancelled Exception while '{}'".format(step)
6980 )
tierno59d22d22018-09-25 18:10:19 +02006981 exc = "Operation was cancelled"
6982 except Exception as e:
6983 exc = traceback.format_exc()
garciadeblas5697b8b2021-03-24 09:17:02 +01006984 self.logger.critical(
6985 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6986 exc_info=True,
6987 )
tierno59d22d22018-09-25 18:10:19 +02006988 finally:
garciadeblas5697b8b2021-03-24 09:17:02 +01006989 self._write_ns_status(
6990 nsr_id=nsr_id,
6991 ns_state=None,
6992 current_operation="IDLE",
6993 current_operation_id=None,
6994 )
aktas13251562021-02-12 22:19:10 +03006995 if tasks_dict_info:
6996 stage[1] = "Waiting for instantiate pending tasks."
6997 self.logger.debug(logging_text + stage[1])
garciadeblas5697b8b2021-03-24 09:17:02 +01006998 exc = await self._wait_for_tasks(
6999 logging_text,
7000 tasks_dict_info,
7001 self.timeout_ns_deploy,
7002 stage,
7003 nslcmop_id,
7004 nsr_id=nsr_id,
7005 )
tierno59d22d22018-09-25 18:10:19 +02007006 if exc:
garciadeblas5697b8b2021-03-24 09:17:02 +01007007 db_nslcmop_update[
7008 "detailed-status"
7009 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
tiernoa17d4f42020-04-28 09:59:23 +00007010 nslcmop_operation_state = "FAILED"
tierno59d22d22018-09-25 18:10:19 +02007011 if db_nsr:
tiernod6de1992018-10-11 13:05:52 +02007012 db_nsr_update["operational-status"] = old_operational_status
7013 db_nsr_update["config-status"] = old_config_status
7014 db_nsr_update["detailed-status"] = ""
7015 if scale_process:
7016 if "VCA" in scale_process:
7017 db_nsr_update["config-status"] = "failed"
7018 if "RO" in scale_process:
7019 db_nsr_update["operational-status"] = "failed"
garciadeblas5697b8b2021-03-24 09:17:02 +01007020 db_nsr_update[
7021 "detailed-status"
7022 ] = "FAILED scaling nslcmop={} {}: {}".format(
7023 nslcmop_id, step, exc
7024 )
tiernoa17d4f42020-04-28 09:59:23 +00007025 else:
7026 error_description_nslcmop = None
7027 nslcmop_operation_state = "COMPLETED"
7028 db_nslcmop_update["detailed-status"] = "Done"
quilesj4cda56b2019-12-05 10:02:20 +00007029
garciadeblas5697b8b2021-03-24 09:17:02 +01007030 self._write_op_status(
7031 op_id=nslcmop_id,
7032 stage="",
7033 error_message=error_description_nslcmop,
7034 operation_state=nslcmop_operation_state,
7035 other_update=db_nslcmop_update,
7036 )
tiernoa17d4f42020-04-28 09:59:23 +00007037 if db_nsr:
garciadeblas5697b8b2021-03-24 09:17:02 +01007038 self._write_ns_status(
7039 nsr_id=nsr_id,
7040 ns_state=None,
7041 current_operation="IDLE",
7042 current_operation_id=None,
7043 other_update=db_nsr_update,
7044 )
tiernoa17d4f42020-04-28 09:59:23 +00007045
tierno59d22d22018-09-25 18:10:19 +02007046 if nslcmop_operation_state:
7047 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01007048 msg = {
7049 "nsr_id": nsr_id,
7050 "nslcmop_id": nslcmop_id,
7051 "operationState": nslcmop_operation_state,
7052 }
bravof922c4172020-11-24 21:21:43 -03007053 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02007054 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01007055 self.logger.error(
7056 logging_text + "kafka_write notification Exception {}".format(e)
7057 )
tierno59d22d22018-09-25 18:10:19 +02007058 self.logger.debug(logging_text + "Exit")
7059 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
tiernob996d942020-07-03 14:52:28 +00007060
aktas5f75f102021-03-15 11:26:10 +03007061 async def _scale_kdu(
7062 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7063 ):
7064 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7065 for kdu_name in _scaling_info:
7066 for kdu_scaling_info in _scaling_info[kdu_name]:
7067 deployed_kdu, index = get_deployed_kdu(
7068 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7069 )
7070 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7071 kdu_instance = deployed_kdu["kdu-instance"]
aktasc41fe832021-11-29 18:41:42 +03007072 kdu_model = deployed_kdu.get("kdu-model")
aktas5f75f102021-03-15 11:26:10 +03007073 scale = int(kdu_scaling_info["scale"])
7074 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7075
7076 db_dict = {
7077 "collection": "nsrs",
7078 "filter": {"_id": nsr_id},
7079 "path": "_admin.deployed.K8s.{}".format(index),
7080 }
7081
7082 step = "scaling application {}".format(
7083 kdu_scaling_info["resource-name"]
7084 )
7085 self.logger.debug(logging_text + step)
7086
7087 if kdu_scaling_info["type"] == "delete":
7088 kdu_config = get_configuration(db_vnfd, kdu_name)
7089 if (
7090 kdu_config
7091 and kdu_config.get("terminate-config-primitive")
7092 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7093 ):
7094 terminate_config_primitive_list = kdu_config.get(
7095 "terminate-config-primitive"
7096 )
7097 terminate_config_primitive_list.sort(
7098 key=lambda val: int(val["seq"])
7099 )
7100
7101 for (
7102 terminate_config_primitive
7103 ) in terminate_config_primitive_list:
7104 primitive_params_ = self._map_primitive_params(
7105 terminate_config_primitive, {}, {}
7106 )
7107 step = "execute terminate config primitive"
7108 self.logger.debug(logging_text + step)
7109 await asyncio.wait_for(
7110 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7111 cluster_uuid=cluster_uuid,
7112 kdu_instance=kdu_instance,
7113 primitive_name=terminate_config_primitive["name"],
7114 params=primitive_params_,
7115 db_dict=db_dict,
7116 vca_id=vca_id,
7117 ),
7118 timeout=600,
7119 )
7120
7121 await asyncio.wait_for(
7122 self.k8scluster_map[k8s_cluster_type].scale(
7123 kdu_instance,
7124 scale,
7125 kdu_scaling_info["resource-name"],
7126 vca_id=vca_id,
aktasc41fe832021-11-29 18:41:42 +03007127 cluster_uuid=cluster_uuid,
7128 kdu_model=kdu_model,
7129 atomic=True,
7130 db_dict=db_dict,
aktas5f75f102021-03-15 11:26:10 +03007131 ),
7132 timeout=self.timeout_vca_on_error,
7133 )
7134
7135 if kdu_scaling_info["type"] == "create":
7136 kdu_config = get_configuration(db_vnfd, kdu_name)
7137 if (
7138 kdu_config
7139 and kdu_config.get("initial-config-primitive")
7140 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7141 ):
7142 initial_config_primitive_list = kdu_config.get(
7143 "initial-config-primitive"
7144 )
7145 initial_config_primitive_list.sort(
7146 key=lambda val: int(val["seq"])
7147 )
7148
7149 for initial_config_primitive in initial_config_primitive_list:
7150 primitive_params_ = self._map_primitive_params(
7151 initial_config_primitive, {}, {}
7152 )
7153 step = "execute initial config primitive"
7154 self.logger.debug(logging_text + step)
7155 await asyncio.wait_for(
7156 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7157 cluster_uuid=cluster_uuid,
7158 kdu_instance=kdu_instance,
7159 primitive_name=initial_config_primitive["name"],
7160 params=primitive_params_,
7161 db_dict=db_dict,
7162 vca_id=vca_id,
7163 ),
7164 timeout=600,
7165 )
7166
garciadeblas5697b8b2021-03-24 09:17:02 +01007167 async def _scale_ng_ro(
7168 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7169 ):
tierno2357f4e2020-10-19 16:38:59 +00007170 nsr_id = db_nslcmop["nsInstanceId"]
7171 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7172 db_vnfrs = {}
7173
7174 # read from db: vnfd's for every vnf
bravof832f8992020-12-07 12:57:31 -03007175 db_vnfds = []
tierno2357f4e2020-10-19 16:38:59 +00007176
7177 # for each vnf in ns, read vnfd
7178 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7179 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7180 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
tierno2357f4e2020-10-19 16:38:59 +00007181 # if we haven't this vnfd, read it from db
bravof832f8992020-12-07 12:57:31 -03007182 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
tierno2357f4e2020-10-19 16:38:59 +00007183 # read from db
7184 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
bravof832f8992020-12-07 12:57:31 -03007185 db_vnfds.append(vnfd)
tierno2357f4e2020-10-19 16:38:59 +00007186 n2vc_key = self.n2vc.get_public_key()
7187 n2vc_key_list = [n2vc_key]
garciadeblas5697b8b2021-03-24 09:17:02 +01007188 self.scale_vnfr(
7189 db_vnfr,
7190 vdu_scaling_info.get("vdu-create"),
7191 vdu_scaling_info.get("vdu-delete"),
7192 mark_delete=True,
7193 )
tierno2357f4e2020-10-19 16:38:59 +00007194 # db_vnfr has been updated, update db_vnfrs to use it
7195 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
garciadeblas5697b8b2021-03-24 09:17:02 +01007196 await self._instantiate_ng_ro(
7197 logging_text,
7198 nsr_id,
7199 db_nsd,
7200 db_nsr,
7201 db_nslcmop,
7202 db_vnfrs,
7203 db_vnfds,
7204 n2vc_key_list,
7205 stage=stage,
7206 start_deploy=time(),
7207 timeout_ns_deploy=self.timeout_ns_deploy,
7208 )
tierno2357f4e2020-10-19 16:38:59 +00007209 if vdu_scaling_info.get("vdu-delete"):
garciadeblas5697b8b2021-03-24 09:17:02 +01007210 self.scale_vnfr(
7211 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7212 )
tierno2357f4e2020-10-19 16:38:59 +00007213
bravof73bac502021-05-11 07:38:47 -04007214 async def extract_prometheus_scrape_jobs(
aticig15db6142022-01-24 12:51:26 +03007215 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
garciadeblas5697b8b2021-03-24 09:17:02 +01007216 ):
tiernob996d942020-07-03 14:52:28 +00007217 # look if exist a file called 'prometheus*.j2' and
7218 artifact_content = self.fs.dir_ls(artifact_path)
garciadeblas5697b8b2021-03-24 09:17:02 +01007219 job_file = next(
7220 (
7221 f
7222 for f in artifact_content
7223 if f.startswith("prometheus") and f.endswith(".j2")
7224 ),
7225 None,
7226 )
tiernob996d942020-07-03 14:52:28 +00007227 if not job_file:
7228 return
7229 with self.fs.file_open((artifact_path, job_file), "r") as f:
7230 job_data = f.read()
7231
7232 # TODO get_service
garciadeblas5697b8b2021-03-24 09:17:02 +01007233 _, _, service = ee_id.partition(".") # remove prefix "namespace."
tiernob996d942020-07-03 14:52:28 +00007234 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7235 host_port = "80"
7236 vnfr_id = vnfr_id.replace("-", "")
7237 variables = {
7238 "JOB_NAME": vnfr_id,
7239 "TARGET_IP": target_ip,
7240 "EXPORTER_POD_IP": host_name,
7241 "EXPORTER_POD_PORT": host_port,
7242 }
bravof73bac502021-05-11 07:38:47 -04007243 job_list = parse_job(job_data, variables)
tiernob996d942020-07-03 14:52:28 +00007244 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7245 for job in job_list:
garciadeblas5697b8b2021-03-24 09:17:02 +01007246 if (
7247 not isinstance(job.get("job_name"), str)
7248 or vnfr_id not in job["job_name"]
7249 ):
tiernob996d942020-07-03 14:52:28 +00007250 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7251 job["nsr_id"] = nsr_id
bravof73bac502021-05-11 07:38:47 -04007252 job["vnfr_id"] = vnfr_id
7253 return job_list
David Garciaaae391f2020-11-09 11:12:54 +01007254
k4.rahulb827de92022-05-02 16:35:02 +00007255 async def rebuild_start_stop(self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type):
7256 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7257 self.logger.info(logging_text + "Enter")
7258 stage = ["Preparing the environment", ""]
7259 # database nsrs record
7260 db_nsr_update = {}
7261 vdu_vim_name = None
7262 vim_vm_id = None
7263 # in case of error, indicates what part of scale was failed to put nsr at error status
7264 start_deploy = time()
7265 try:
7266 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7267 vim_account_id = db_vnfr.get("vim-account-id")
7268 vim_info_key = "vim:" + vim_account_id
7269 vdur = find_in_list(
7270 db_vnfr["vdur"], lambda vdu: vdu["count-index"] == additional_param["count-index"]
7271 )
7272 if vdur:
7273 vdu_vim_name = vdur["name"]
7274 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7275 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7276 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7277 # wait for any previous tasks in process
7278 stage[1] = "Waiting for previous operations to terminate"
7279 self.logger.info(stage[1])
7280 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
7281
7282 stage[1] = "Reading from database."
7283 self.logger.info(stage[1])
7284 self._write_ns_status(
7285 nsr_id=nsr_id,
7286 ns_state=None,
7287 current_operation=operation_type.upper(),
7288 current_operation_id=nslcmop_id
7289 )
7290 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7291
7292 # read from db: ns
7293 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7294 db_nsr_update["operational-status"] = operation_type
7295 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7296 # Payload for RO
7297 desc = {
7298 operation_type: {
7299 "vim_vm_id": vim_vm_id,
7300 "vnf_id": vnf_id,
7301 "vdu_index": additional_param["count-index"],
7302 "vdu_id": vdur["id"],
7303 "target_vim": target_vim,
7304 "vim_account_id": vim_account_id
7305 }
7306 }
7307 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7308 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7309 self.logger.info("ro nsr id: {}".format(nsr_id))
7310 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7311 self.logger.info("response from RO: {}".format(result_dict))
7312 action_id = result_dict["action_id"]
7313 await self._wait_ng_ro(
k4.rahul08cc70b2022-07-07 07:23:53 +00007314 nsr_id, action_id, nslcmop_id, start_deploy,
7315 self.timeout_operate, None, "start_stop_rebuild",
k4.rahulb827de92022-05-02 16:35:02 +00007316 )
7317 return "COMPLETED", "Done"
7318 except (ROclient.ROClientException, DbException, LcmException) as e:
7319 self.logger.error("Exit Exception {}".format(e))
7320 exc = e
7321 except asyncio.CancelledError:
7322 self.logger.error("Cancelled Exception while '{}'".format(stage))
7323 exc = "Operation was cancelled"
7324 except Exception as e:
7325 exc = traceback.format_exc()
7326 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
7327 return "FAILED", "Error in operate VNF {}".format(exc)
7328
David Garciaaae391f2020-11-09 11:12:54 +01007329 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7330 """
7331 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7332
7333 :param: vim_account_id: VIM Account ID
7334
7335 :return: (cloud_name, cloud_credential)
7336 """
bravof922c4172020-11-24 21:21:43 -03007337 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
David Garciaaae391f2020-11-09 11:12:54 +01007338 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7339
7340 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7341 """
7342 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7343
7344 :param: vim_account_id: VIM Account ID
7345
7346 :return: (cloud_name, cloud_credential)
7347 """
bravof922c4172020-11-24 21:21:43 -03007348 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
David Garciaaae391f2020-11-09 11:12:54 +01007349 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
elumalai80bcf1c2022-04-28 18:05:01 +05307350
7351 async def migrate(self, nsr_id, nslcmop_id):
7352 """
7353 Migrate VNFs and VDUs instances in a NS
7354
7355 :param: nsr_id: NS Instance ID
7356 :param: nslcmop_id: nslcmop ID of migrate
7357
7358 """
7359 # Try to lock HA task here
7360 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7361 if not task_is_locked_by_me:
7362 return
7363 logging_text = "Task ns={} migrate ".format(nsr_id)
7364 self.logger.debug(logging_text + "Enter")
7365 # get all needed from database
7366 db_nslcmop = None
7367 db_nslcmop_update = {}
7368 nslcmop_operation_state = None
7369 db_nsr_update = {}
7370 target = {}
7371 exc = None
7372 # in case of error, indicates what part of scale was failed to put nsr at error status
7373 start_deploy = time()
7374
7375 try:
7376 # wait for any previous tasks in process
7377 step = "Waiting for previous operations to terminate"
aticig349aa462022-05-19 12:29:35 +03007378 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
elumalai80bcf1c2022-04-28 18:05:01 +05307379
7380 self._write_ns_status(
7381 nsr_id=nsr_id,
7382 ns_state=None,
7383 current_operation="MIGRATING",
aticig349aa462022-05-19 12:29:35 +03007384 current_operation_id=nslcmop_id,
elumalai80bcf1c2022-04-28 18:05:01 +05307385 )
7386 step = "Getting nslcmop from database"
aticig349aa462022-05-19 12:29:35 +03007387 self.logger.debug(
7388 step + " after having waited for previous tasks to be completed"
7389 )
elumalai80bcf1c2022-04-28 18:05:01 +05307390 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7391 migrate_params = db_nslcmop.get("operationParams")
7392
7393 target = {}
7394 target.update(migrate_params)
7395 desc = await self.RO.migrate(nsr_id, target)
7396 self.logger.debug("RO return > {}".format(desc))
7397 action_id = desc["action_id"]
7398 await self._wait_ng_ro(
garciadeblas07f4e4c2022-06-09 09:42:58 +02007399 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_migrate,
7400 operation="migrate"
elumalai80bcf1c2022-04-28 18:05:01 +05307401 )
7402 except (ROclient.ROClientException, DbException, LcmException) as e:
7403 self.logger.error("Exit Exception {}".format(e))
7404 exc = e
7405 except asyncio.CancelledError:
7406 self.logger.error("Cancelled Exception while '{}'".format(step))
7407 exc = "Operation was cancelled"
7408 except Exception as e:
7409 exc = traceback.format_exc()
aticig349aa462022-05-19 12:29:35 +03007410 self.logger.critical(
7411 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7412 )
elumalai80bcf1c2022-04-28 18:05:01 +05307413 finally:
7414 self._write_ns_status(
7415 nsr_id=nsr_id,
7416 ns_state=None,
7417 current_operation="IDLE",
7418 current_operation_id=None,
7419 )
7420 if exc:
aticig349aa462022-05-19 12:29:35 +03007421 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
elumalai80bcf1c2022-04-28 18:05:01 +05307422 nslcmop_operation_state = "FAILED"
7423 else:
7424 nslcmop_operation_state = "COMPLETED"
7425 db_nslcmop_update["detailed-status"] = "Done"
7426 db_nsr_update["detailed-status"] = "Done"
7427
7428 self._write_op_status(
7429 op_id=nslcmop_id,
7430 stage="",
7431 error_message="",
7432 operation_state=nslcmop_operation_state,
7433 other_update=db_nslcmop_update,
7434 )
7435 if nslcmop_operation_state:
7436 try:
7437 msg = {
7438 "nsr_id": nsr_id,
7439 "nslcmop_id": nslcmop_id,
7440 "operationState": nslcmop_operation_state,
7441 }
7442 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7443 except Exception as e:
7444 self.logger.error(
7445 logging_text + "kafka_write notification Exception {}".format(e)
7446 )
7447 self.logger.debug(logging_text + "Exit")
7448 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
garciadeblas07f4e4c2022-06-09 09:42:58 +02007449
7450
7451 async def heal(self, nsr_id, nslcmop_id):
7452 """
7453 Heal NS
7454
7455 :param nsr_id: ns instance to heal
7456 :param nslcmop_id: operation to run
7457 :return:
7458 """
7459
7460 # Try to lock HA task here
7461 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7462 if not task_is_locked_by_me:
7463 return
7464
7465 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7466 stage = ["", "", ""]
7467 tasks_dict_info = {}
7468 # ^ stage, step, VIM progress
7469 self.logger.debug(logging_text + "Enter")
7470 # get all needed from database
7471 db_nsr = None
7472 db_nslcmop_update = {}
7473 db_nsr_update = {}
7474 db_vnfrs = {} # vnf's info indexed by _id
7475 exc = None
7476 old_operational_status = ""
7477 old_config_status = ""
7478 nsi_id = None
7479 try:
7480 # wait for any previous tasks in process
7481 step = "Waiting for previous operations to terminate"
7482 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7483 self._write_ns_status(
7484 nsr_id=nsr_id,
7485 ns_state=None,
7486 current_operation="HEALING",
7487 current_operation_id=nslcmop_id,
7488 )
7489
7490 step = "Getting nslcmop from database"
7491 self.logger.debug(
7492 step + " after having waited for previous tasks to be completed"
7493 )
7494 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7495
7496 step = "Getting nsr from database"
7497 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7498 old_operational_status = db_nsr["operational-status"]
7499 old_config_status = db_nsr["config-status"]
7500
7501 db_nsr_update = {
7502 "_admin.deployed.RO.operational-status": "healing",
7503 }
7504 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7505
7506 step = "Sending heal order to VIM"
7507 task_ro = asyncio.ensure_future(
7508 self.heal_RO(
7509 logging_text=logging_text,
7510 nsr_id=nsr_id,
7511 db_nslcmop=db_nslcmop,
7512 stage=stage,
7513 )
7514 )
7515 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
7516 tasks_dict_info[task_ro] = "Healing at VIM"
7517
7518 # VCA tasks
7519 # read from db: nsd
7520 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7521 self.logger.debug(logging_text + stage[1])
7522 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7523 self.fs.sync(db_nsr["nsd-id"])
7524 db_nsr["nsd"] = nsd
7525 # read from db: vnfr's of this ns
7526 step = "Getting vnfrs from db"
7527 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7528 for vnfr in db_vnfrs_list:
7529 db_vnfrs[vnfr["_id"]] = vnfr
7530 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7531
7532 # Check for each target VNF
7533 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7534 for target_vnf in target_list:
7535 # Find this VNF in the list from DB
7536 vnfr_id = target_vnf.get("vnfInstanceId", None)
7537 if vnfr_id:
7538 db_vnfr = db_vnfrs[vnfr_id]
7539 vnfd_id = db_vnfr.get("vnfd-id")
7540 vnfd_ref = db_vnfr.get("vnfd-ref")
7541 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7542 base_folder = vnfd["_admin"]["storage"]
7543 vdu_id = None
7544 vdu_index = 0
7545 vdu_name = None
7546 kdu_name = None
7547 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7548 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7549
7550 # Check each target VDU and deploy N2VC
7551 for target_vdu in target_vnf["additionalParams"].get("vdu", None):
7552 deploy_params_vdu = target_vdu
7553 # Set run-day1 vnf level value if not vdu level value exists
7554 if not deploy_params_vdu.get("run-day1") and target_vnf["additionalParams"].get("run-day1"):
7555 deploy_params_vdu["run-day1"] = target_vnf["additionalParams"].get("run-day1")
7556 vdu_name = target_vdu.get("vdu-id", None)
7557 # TODO: Get vdu_id from vdud.
7558 vdu_id = vdu_name
7559 # For multi instance VDU count-index is mandatory
7560 # For single session VDU count-indes is 0
7561 vdu_index = target_vdu.get("count-index",0)
7562
7563 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7564 stage[1] = "Deploying Execution Environments."
7565 self.logger.debug(logging_text + stage[1])
7566
7567 # VNF Level charm. Normal case when proxy charms.
7568 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7569 descriptor_config = get_configuration(vnfd, vnfd_ref)
7570 if descriptor_config:
7571 # Continue if healed machine is management machine
7572 vnf_ip_address = db_vnfr.get("ip-address")
7573 target_instance = None
7574 for instance in db_vnfr.get("vdur", None):
7575 if ( instance["vdu-name"] == vdu_name and instance["count-index"] == vdu_index ):
7576 target_instance = instance
7577 break
7578 if vnf_ip_address == target_instance.get("ip-address"):
7579 self._heal_n2vc(
7580 logging_text=logging_text
7581 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7582 member_vnf_index, vdu_name, vdu_index
7583 ),
7584 db_nsr=db_nsr,
7585 db_vnfr=db_vnfr,
7586 nslcmop_id=nslcmop_id,
7587 nsr_id=nsr_id,
7588 nsi_id=nsi_id,
7589 vnfd_id=vnfd_ref,
7590 vdu_id=None,
7591 kdu_name=None,
7592 member_vnf_index=member_vnf_index,
7593 vdu_index=0,
7594 vdu_name=None,
7595 deploy_params=deploy_params_vdu,
7596 descriptor_config=descriptor_config,
7597 base_folder=base_folder,
7598 task_instantiation_info=tasks_dict_info,
7599 stage=stage,
7600 )
7601
7602 # VDU Level charm. Normal case with native charms.
7603 descriptor_config = get_configuration(vnfd, vdu_name)
7604 if descriptor_config:
7605 self._heal_n2vc(
7606 logging_text=logging_text
7607 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7608 member_vnf_index, vdu_name, vdu_index
7609 ),
7610 db_nsr=db_nsr,
7611 db_vnfr=db_vnfr,
7612 nslcmop_id=nslcmop_id,
7613 nsr_id=nsr_id,
7614 nsi_id=nsi_id,
7615 vnfd_id=vnfd_ref,
7616 vdu_id=vdu_id,
7617 kdu_name=kdu_name,
7618 member_vnf_index=member_vnf_index,
7619 vdu_index=vdu_index,
7620 vdu_name=vdu_name,
7621 deploy_params=deploy_params_vdu,
7622 descriptor_config=descriptor_config,
7623 base_folder=base_folder,
7624 task_instantiation_info=tasks_dict_info,
7625 stage=stage,
7626 )
7627
7628 except (
7629 ROclient.ROClientException,
7630 DbException,
7631 LcmException,
7632 NgRoException,
7633 ) as e:
7634 self.logger.error(logging_text + "Exit Exception {}".format(e))
7635 exc = e
7636 except asyncio.CancelledError:
7637 self.logger.error(
7638 logging_text + "Cancelled Exception while '{}'".format(step)
7639 )
7640 exc = "Operation was cancelled"
7641 except Exception as e:
7642 exc = traceback.format_exc()
7643 self.logger.critical(
7644 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7645 exc_info=True,
7646 )
7647 finally:
7648 if tasks_dict_info:
7649 stage[1] = "Waiting for healing pending tasks."
7650 self.logger.debug(logging_text + stage[1])
7651 exc = await self._wait_for_tasks(
7652 logging_text,
7653 tasks_dict_info,
7654 self.timeout_ns_deploy,
7655 stage,
7656 nslcmop_id,
7657 nsr_id=nsr_id,
7658 )
7659 if exc:
7660 db_nslcmop_update[
7661 "detailed-status"
7662 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7663 nslcmop_operation_state = "FAILED"
7664 if db_nsr:
7665 db_nsr_update["operational-status"] = old_operational_status
7666 db_nsr_update["config-status"] = old_config_status
7667 db_nsr_update[
7668 "detailed-status"
7669 ] = "FAILED healing nslcmop={} {}: {}".format(
7670 nslcmop_id, step, exc
7671 )
7672 for task, task_name in tasks_dict_info.items():
7673 if not task.done() or task.cancelled() or task.exception():
7674 if task_name.startswith(self.task_name_deploy_vca):
7675 # A N2VC task is pending
7676 db_nsr_update["config-status"] = "failed"
7677 else:
7678 # RO task is pending
7679 db_nsr_update["operational-status"] = "failed"
7680 else:
7681 error_description_nslcmop = None
7682 nslcmop_operation_state = "COMPLETED"
7683 db_nslcmop_update["detailed-status"] = "Done"
7684 db_nsr_update["detailed-status"] = "Done"
7685 db_nsr_update["operational-status"] = "running"
7686 db_nsr_update["config-status"] = "configured"
7687
7688 self._write_op_status(
7689 op_id=nslcmop_id,
7690 stage="",
7691 error_message=error_description_nslcmop,
7692 operation_state=nslcmop_operation_state,
7693 other_update=db_nslcmop_update,
7694 )
7695 if db_nsr:
7696 self._write_ns_status(
7697 nsr_id=nsr_id,
7698 ns_state=None,
7699 current_operation="IDLE",
7700 current_operation_id=None,
7701 other_update=db_nsr_update,
7702 )
7703
7704 if nslcmop_operation_state:
7705 try:
7706 msg = {
7707 "nsr_id": nsr_id,
7708 "nslcmop_id": nslcmop_id,
7709 "operationState": nslcmop_operation_state,
7710 }
7711 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7712 except Exception as e:
7713 self.logger.error(
7714 logging_text + "kafka_write notification Exception {}".format(e)
7715 )
7716 self.logger.debug(logging_text + "Exit")
7717 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7718
7719 async def heal_RO(
7720 self,
7721 logging_text,
7722 nsr_id,
7723 db_nslcmop,
7724 stage,
7725 ):
7726 """
7727 Heal at RO
7728 :param logging_text: preffix text to use at logging
7729 :param nsr_id: nsr identity
7730 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7731 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7732 :return: None or exception
7733 """
7734 def get_vim_account(vim_account_id):
7735 nonlocal db_vims
7736 if vim_account_id in db_vims:
7737 return db_vims[vim_account_id]
7738 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7739 db_vims[vim_account_id] = db_vim
7740 return db_vim
7741
7742 try:
7743 start_heal = time()
7744 ns_params = db_nslcmop.get("operationParams")
7745 if ns_params and ns_params.get("timeout_ns_heal"):
7746 timeout_ns_heal = ns_params["timeout_ns_heal"]
7747 else:
7748 timeout_ns_heal = self.timeout.get(
7749 "ns_heal", self.timeout_ns_heal
7750 )
7751
7752 db_vims = {}
7753
7754 nslcmop_id = db_nslcmop["_id"]
7755 target = {
7756 "action_id": nslcmop_id,
7757 }
7758 self.logger.warning("db_nslcmop={} and timeout_ns_heal={}".format(db_nslcmop,timeout_ns_heal))
7759 target.update(db_nslcmop.get("operationParams", {}))
7760
7761 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7762 desc = await self.RO.recreate(nsr_id, target)
7763 self.logger.debug("RO return > {}".format(desc))
7764 action_id = desc["action_id"]
7765 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7766 await self._wait_ng_ro(
7767 nsr_id, action_id, nslcmop_id, start_heal, timeout_ns_heal, stage,
7768 operation="healing"
7769 )
7770
7771 # Updating NSR
7772 db_nsr_update = {
7773 "_admin.deployed.RO.operational-status": "running",
7774 "detailed-status": " ".join(stage),
7775 }
7776 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7777 self._write_op_status(nslcmop_id, stage)
7778 self.logger.debug(
7779 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7780 )
7781
7782 except Exception as e:
7783 stage[2] = "ERROR healing at VIM"
7784 #self.set_vnfr_at_error(db_vnfrs, str(e))
7785 self.logger.error(
7786 "Error healing at VIM {}".format(e),
7787 exc_info=not isinstance(
7788 e,
7789 (
7790 ROclient.ROClientException,
7791 LcmException,
7792 DbException,
7793 NgRoException,
7794 ),
7795 ),
7796 )
7797 raise
7798
7799 def _heal_n2vc(
7800 self,
7801 logging_text,
7802 db_nsr,
7803 db_vnfr,
7804 nslcmop_id,
7805 nsr_id,
7806 nsi_id,
7807 vnfd_id,
7808 vdu_id,
7809 kdu_name,
7810 member_vnf_index,
7811 vdu_index,
7812 vdu_name,
7813 deploy_params,
7814 descriptor_config,
7815 base_folder,
7816 task_instantiation_info,
7817 stage,
7818 ):
7819 # launch instantiate_N2VC in a asyncio task and register task object
7820 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7821 # if not found, create one entry and update database
7822 # fill db_nsr._admin.deployed.VCA.<index>
7823
7824 self.logger.debug(
7825 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7826 )
7827 if "execution-environment-list" in descriptor_config:
7828 ee_list = descriptor_config.get("execution-environment-list", [])
7829 elif "juju" in descriptor_config:
7830 ee_list = [descriptor_config] # ns charms
7831 else: # other types as script are not supported
7832 ee_list = []
7833
7834 for ee_item in ee_list:
7835 self.logger.debug(
7836 logging_text
7837 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7838 ee_item.get("juju"), ee_item.get("helm-chart")
7839 )
7840 )
7841 ee_descriptor_id = ee_item.get("id")
7842 if ee_item.get("juju"):
7843 vca_name = ee_item["juju"].get("charm")
7844 vca_type = (
7845 "lxc_proxy_charm"
7846 if ee_item["juju"].get("charm") is not None
7847 else "native_charm"
7848 )
7849 if ee_item["juju"].get("cloud") == "k8s":
7850 vca_type = "k8s_proxy_charm"
7851 elif ee_item["juju"].get("proxy") is False:
7852 vca_type = "native_charm"
7853 elif ee_item.get("helm-chart"):
7854 vca_name = ee_item["helm-chart"]
7855 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7856 vca_type = "helm"
7857 else:
7858 vca_type = "helm-v3"
7859 else:
7860 self.logger.debug(
7861 logging_text + "skipping non juju neither charm configuration"
7862 )
7863 continue
7864
7865 vca_index = -1
7866 for vca_index, vca_deployed in enumerate(
7867 db_nsr["_admin"]["deployed"]["VCA"]
7868 ):
7869 if not vca_deployed:
7870 continue
7871 if (
7872 vca_deployed.get("member-vnf-index") == member_vnf_index
7873 and vca_deployed.get("vdu_id") == vdu_id
7874 and vca_deployed.get("kdu_name") == kdu_name
7875 and vca_deployed.get("vdu_count_index", 0) == vdu_index
7876 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
7877 ):
7878 break
7879 else:
7880 # not found, create one.
7881 target = (
7882 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
7883 )
7884 if vdu_id:
7885 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
7886 elif kdu_name:
7887 target += "/kdu/{}".format(kdu_name)
7888 vca_deployed = {
7889 "target_element": target,
7890 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
7891 "member-vnf-index": member_vnf_index,
7892 "vdu_id": vdu_id,
7893 "kdu_name": kdu_name,
7894 "vdu_count_index": vdu_index,
7895 "operational-status": "init", # TODO revise
7896 "detailed-status": "", # TODO revise
7897 "step": "initial-deploy", # TODO revise
7898 "vnfd_id": vnfd_id,
7899 "vdu_name": vdu_name,
7900 "type": vca_type,
7901 "ee_descriptor_id": ee_descriptor_id,
7902 }
7903 vca_index += 1
7904
7905 # create VCA and configurationStatus in db
7906 db_dict = {
7907 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
7908 "configurationStatus.{}".format(vca_index): dict(),
7909 }
7910 self.update_db_2("nsrs", nsr_id, db_dict)
7911
7912 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
7913
7914 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
7915 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
7916 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
7917
7918 # Launch task
7919 task_n2vc = asyncio.ensure_future(
7920 self.heal_N2VC(
7921 logging_text=logging_text,
7922 vca_index=vca_index,
7923 nsi_id=nsi_id,
7924 db_nsr=db_nsr,
7925 db_vnfr=db_vnfr,
7926 vdu_id=vdu_id,
7927 kdu_name=kdu_name,
7928 vdu_index=vdu_index,
7929 deploy_params=deploy_params,
7930 config_descriptor=descriptor_config,
7931 base_folder=base_folder,
7932 nslcmop_id=nslcmop_id,
7933 stage=stage,
7934 vca_type=vca_type,
7935 vca_name=vca_name,
7936 ee_config_descriptor=ee_item,
7937 )
7938 )
7939 self.lcm_tasks.register(
7940 "ns",
7941 nsr_id,
7942 nslcmop_id,
7943 "instantiate_N2VC-{}".format(vca_index),
7944 task_n2vc,
7945 )
7946 task_instantiation_info[
7947 task_n2vc
7948 ] = self.task_name_deploy_vca + " {}.{}".format(
7949 member_vnf_index or "", vdu_id or ""
7950 )
7951
7952 async def heal_N2VC(
7953 self,
7954 logging_text,
7955 vca_index,
7956 nsi_id,
7957 db_nsr,
7958 db_vnfr,
7959 vdu_id,
7960 kdu_name,
7961 vdu_index,
7962 config_descriptor,
7963 deploy_params,
7964 base_folder,
7965 nslcmop_id,
7966 stage,
7967 vca_type,
7968 vca_name,
7969 ee_config_descriptor,
7970 ):
7971 nsr_id = db_nsr["_id"]
7972 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
7973 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
7974 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
7975 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
7976 db_dict = {
7977 "collection": "nsrs",
7978 "filter": {"_id": nsr_id},
7979 "path": db_update_entry,
7980 }
7981 step = ""
7982 try:
7983
7984 element_type = "NS"
7985 element_under_configuration = nsr_id
7986
7987 vnfr_id = None
7988 if db_vnfr:
7989 vnfr_id = db_vnfr["_id"]
7990 osm_config["osm"]["vnf_id"] = vnfr_id
7991
7992 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
7993
7994 if vca_type == "native_charm":
7995 index_number = 0
7996 else:
7997 index_number = vdu_index or 0
7998
7999 if vnfr_id:
8000 element_type = "VNF"
8001 element_under_configuration = vnfr_id
8002 namespace += ".{}-{}".format(vnfr_id, index_number)
8003 if vdu_id:
8004 namespace += ".{}-{}".format(vdu_id, index_number)
8005 element_type = "VDU"
8006 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8007 osm_config["osm"]["vdu_id"] = vdu_id
8008 elif kdu_name:
8009 namespace += ".{}".format(kdu_name)
8010 element_type = "KDU"
8011 element_under_configuration = kdu_name
8012 osm_config["osm"]["kdu_name"] = kdu_name
8013
8014 # Get artifact path
8015 if base_folder["pkg-dir"]:
8016 artifact_path = "{}/{}/{}/{}".format(
8017 base_folder["folder"],
8018 base_folder["pkg-dir"],
8019 "charms"
8020 if vca_type
8021 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8022 else "helm-charts",
8023 vca_name,
8024 )
8025 else:
8026 artifact_path = "{}/Scripts/{}/{}/".format(
8027 base_folder["folder"],
8028 "charms"
8029 if vca_type
8030 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8031 else "helm-charts",
8032 vca_name,
8033 )
8034
8035 self.logger.debug("Artifact path > {}".format(artifact_path))
8036
8037 # get initial_config_primitive_list that applies to this element
8038 initial_config_primitive_list = config_descriptor.get(
8039 "initial-config-primitive"
8040 )
8041
8042 self.logger.debug(
8043 "Initial config primitive list > {}".format(
8044 initial_config_primitive_list
8045 )
8046 )
8047
8048 # add config if not present for NS charm
8049 ee_descriptor_id = ee_config_descriptor.get("id")
8050 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8051 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8052 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8053 )
8054
8055 self.logger.debug(
8056 "Initial config primitive list #2 > {}".format(
8057 initial_config_primitive_list
8058 )
8059 )
8060 # n2vc_redesign STEP 3.1
8061 # find old ee_id if exists
8062 ee_id = vca_deployed.get("ee_id")
8063
8064 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8065 # create or register execution environment in VCA. Only for native charms when healing
8066 if vca_type == "native_charm":
8067 step = "Waiting to VM being up and getting IP address"
8068 self.logger.debug(logging_text + step)
8069 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8070 logging_text,
8071 nsr_id,
8072 vnfr_id,
8073 vdu_id,
8074 vdu_index,
8075 user=None,
8076 pub_key=None,
8077 )
8078 credentials = {"hostname": rw_mgmt_ip}
8079 # get username
8080 username = deep_get(
8081 config_descriptor, ("config-access", "ssh-access", "default-user")
8082 )
8083 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8084 # merged. Meanwhile let's get username from initial-config-primitive
8085 if not username and initial_config_primitive_list:
8086 for config_primitive in initial_config_primitive_list:
8087 for param in config_primitive.get("parameter", ()):
8088 if param["name"] == "ssh-username":
8089 username = param["value"]
8090 break
8091 if not username:
8092 raise LcmException(
8093 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8094 "'config-access.ssh-access.default-user'"
8095 )
8096 credentials["username"] = username
8097
8098 # n2vc_redesign STEP 3.2
8099 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8100 self._write_configuration_status(
8101 nsr_id=nsr_id,
8102 vca_index=vca_index,
8103 status="REGISTERING",
8104 element_under_configuration=element_under_configuration,
8105 element_type=element_type,
8106 )
8107
8108 step = "register execution environment {}".format(credentials)
8109 self.logger.debug(logging_text + step)
8110 ee_id = await self.vca_map[vca_type].register_execution_environment(
8111 credentials=credentials,
8112 namespace=namespace,
8113 db_dict=db_dict,
8114 vca_id=vca_id,
8115 )
8116
8117 # update ee_id en db
8118 db_dict_ee_id = {
8119 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8120 }
8121 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8122
8123 # for compatibility with MON/POL modules, the need model and application name at database
8124 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8125 # Not sure if this need to be done when healing
8126 """
8127 ee_id_parts = ee_id.split(".")
8128 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8129 if len(ee_id_parts) >= 2:
8130 model_name = ee_id_parts[0]
8131 application_name = ee_id_parts[1]
8132 db_nsr_update[db_update_entry + "model"] = model_name
8133 db_nsr_update[db_update_entry + "application"] = application_name
8134 """
8135
8136 # n2vc_redesign STEP 3.3
8137 # Install configuration software. Only for native charms.
8138 step = "Install configuration Software"
8139
8140 self._write_configuration_status(
8141 nsr_id=nsr_id,
8142 vca_index=vca_index,
8143 status="INSTALLING SW",
8144 element_under_configuration=element_under_configuration,
8145 element_type=element_type,
8146 #other_update=db_nsr_update,
8147 other_update=None,
8148 )
8149
8150 # TODO check if already done
8151 self.logger.debug(logging_text + step)
8152 config = None
8153 if vca_type == "native_charm":
8154 config_primitive = next(
8155 (p for p in initial_config_primitive_list if p["name"] == "config"),
8156 None,
8157 )
8158 if config_primitive:
8159 config = self._map_primitive_params(
8160 config_primitive, {}, deploy_params
8161 )
8162 await self.vca_map[vca_type].install_configuration_sw(
8163 ee_id=ee_id,
8164 artifact_path=artifact_path,
8165 db_dict=db_dict,
8166 config=config,
8167 num_units=1,
8168 vca_id=vca_id,
8169 vca_type=vca_type,
8170 )
8171
8172 # write in db flag of configuration_sw already installed
8173 self.update_db_2(
8174 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8175 )
8176
8177 # Not sure if this need to be done when healing
8178 """
8179 # add relations for this VCA (wait for other peers related with this VCA)
8180 await self._add_vca_relations(
8181 logging_text=logging_text,
8182 nsr_id=nsr_id,
8183 vca_type=vca_type,
8184 vca_index=vca_index,
8185 )
8186 """
8187
8188 # if SSH access is required, then get execution environment SSH public
8189 # if native charm we have waited already to VM be UP
8190 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8191 pub_key = None
8192 user = None
8193 # self.logger.debug("get ssh key block")
8194 if deep_get(
8195 config_descriptor, ("config-access", "ssh-access", "required")
8196 ):
8197 # self.logger.debug("ssh key needed")
8198 # Needed to inject a ssh key
8199 user = deep_get(
8200 config_descriptor,
8201 ("config-access", "ssh-access", "default-user"),
8202 )
8203 step = "Install configuration Software, getting public ssh key"
8204 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8205 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8206 )
8207
8208 step = "Insert public key into VM user={} ssh_key={}".format(
8209 user, pub_key
8210 )
8211 else:
8212 # self.logger.debug("no need to get ssh key")
8213 step = "Waiting to VM being up and getting IP address"
8214 self.logger.debug(logging_text + step)
8215
8216 # n2vc_redesign STEP 5.1
8217 # wait for RO (ip-address) Insert pub_key into VM
8218 # IMPORTANT: We need do wait for RO to complete healing operation.
8219 await self._wait_heal_ro(nsr_id,self.timeout_ns_heal)
8220 if vnfr_id:
8221 if kdu_name:
8222 rw_mgmt_ip = await self.wait_kdu_up(
8223 logging_text, nsr_id, vnfr_id, kdu_name
8224 )
8225 else:
8226 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8227 logging_text,
8228 nsr_id,
8229 vnfr_id,
8230 vdu_id,
8231 vdu_index,
8232 user=user,
8233 pub_key=pub_key,
8234 )
8235 else:
8236 rw_mgmt_ip = None # This is for a NS configuration
8237
8238 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8239
8240 # store rw_mgmt_ip in deploy params for later replacement
8241 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8242
8243 # Day1 operations.
8244 # get run-day1 operation parameter
8245 runDay1 = deploy_params.get("run-day1",False)
8246 self.logger.debug(" Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id,vdu_id,runDay1))
8247 if runDay1:
8248 # n2vc_redesign STEP 6 Execute initial config primitive
8249 step = "execute initial config primitive"
8250
8251 # wait for dependent primitives execution (NS -> VNF -> VDU)
8252 if initial_config_primitive_list:
8253 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
8254
8255 # stage, in function of element type: vdu, kdu, vnf or ns
8256 my_vca = vca_deployed_list[vca_index]
8257 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8258 # VDU or KDU
8259 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8260 elif my_vca.get("member-vnf-index"):
8261 # VNF
8262 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8263 else:
8264 # NS
8265 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8266
8267 self._write_configuration_status(
8268 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8269 )
8270
8271 self._write_op_status(op_id=nslcmop_id, stage=stage)
8272
8273 check_if_terminated_needed = True
8274 for initial_config_primitive in initial_config_primitive_list:
8275 # adding information on the vca_deployed if it is a NS execution environment
8276 if not vca_deployed["member-vnf-index"]:
8277 deploy_params["ns_config_info"] = json.dumps(
8278 self._get_ns_config_info(nsr_id)
8279 )
8280 # TODO check if already done
8281 primitive_params_ = self._map_primitive_params(
8282 initial_config_primitive, {}, deploy_params
8283 )
8284
8285 step = "execute primitive '{}' params '{}'".format(
8286 initial_config_primitive["name"], primitive_params_
8287 )
8288 self.logger.debug(logging_text + step)
8289 await self.vca_map[vca_type].exec_primitive(
8290 ee_id=ee_id,
8291 primitive_name=initial_config_primitive["name"],
8292 params_dict=primitive_params_,
8293 db_dict=db_dict,
8294 vca_id=vca_id,
8295 vca_type=vca_type,
8296 )
8297 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8298 if check_if_terminated_needed:
8299 if config_descriptor.get("terminate-config-primitive"):
8300 self.update_db_2(
8301 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
8302 )
8303 check_if_terminated_needed = False
8304
8305 # TODO register in database that primitive is done
8306
8307 # STEP 7 Configure metrics
8308 # Not sure if this need to be done when healing
8309 """
8310 if vca_type == "helm" or vca_type == "helm-v3":
8311 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8312 ee_id=ee_id,
8313 artifact_path=artifact_path,
8314 ee_config_descriptor=ee_config_descriptor,
8315 vnfr_id=vnfr_id,
8316 nsr_id=nsr_id,
8317 target_ip=rw_mgmt_ip,
8318 )
8319 if prometheus_jobs:
8320 self.update_db_2(
8321 "nsrs",
8322 nsr_id,
8323 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8324 )
8325
8326 for job in prometheus_jobs:
8327 self.db.set_one(
8328 "prometheus_jobs",
8329 {"job_name": job["job_name"]},
8330 job,
8331 upsert=True,
8332 fail_on_empty=False,
8333 )
8334
8335 """
8336 step = "instantiated at VCA"
8337 self.logger.debug(logging_text + step)
8338
8339 self._write_configuration_status(
8340 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8341 )
8342
8343 except Exception as e: # TODO not use Exception but N2VC exception
8344 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8345 if not isinstance(
8346 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8347 ):
8348 self.logger.error(
8349 "Exception while {} : {}".format(step, e), exc_info=True
8350 )
8351 self._write_configuration_status(
8352 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8353 )
8354 raise LcmException("{} {}".format(step, e)) from e
8355
8356 async def _wait_heal_ro(
8357 self,
8358 nsr_id,
8359 timeout=600,
8360 ):
8361 start_time = time()
8362 while time() <= start_time + timeout:
8363 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8364 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"]["operational-status"]
8365 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8366 if operational_status_ro != "healing":
8367 break
8368 await asyncio.sleep(15, loop=self.loop)
8369 else: # timeout_ns_deploy
8370 raise NgRoException("Timeout waiting ns to deploy")
govindarajul4ff4b512022-05-02 20:02:41 +05308371
8372 async def vertical_scale(self, nsr_id, nslcmop_id):
8373 """
8374 Vertical Scale the VDUs in a NS
8375
8376 :param: nsr_id: NS Instance ID
8377 :param: nslcmop_id: nslcmop ID of migrate
8378
8379 """
8380 # Try to lock HA task here
8381 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8382 if not task_is_locked_by_me:
8383 return
8384 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8385 self.logger.debug(logging_text + "Enter")
8386 # get all needed from database
8387 db_nslcmop = None
8388 db_nslcmop_update = {}
8389 nslcmop_operation_state = None
8390 db_nsr_update = {}
8391 target = {}
8392 exc = None
8393 # in case of error, indicates what part of scale was failed to put nsr at error status
8394 start_deploy = time()
8395
8396 try:
8397 # wait for any previous tasks in process
8398 step = "Waiting for previous operations to terminate"
8399 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
8400
8401 self._write_ns_status(
8402 nsr_id=nsr_id,
8403 ns_state=None,
8404 current_operation="VerticalScale",
8405 current_operation_id=nslcmop_id
8406 )
8407 step = "Getting nslcmop from database"
8408 self.logger.debug(step + " after having waited for previous tasks to be completed")
8409 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8410 operationParams = db_nslcmop.get("operationParams")
8411 target = {}
8412 target.update(operationParams)
8413 desc = await self.RO.vertical_scale(nsr_id, target)
8414 self.logger.debug("RO return > {}".format(desc))
8415 action_id = desc["action_id"]
8416 await self._wait_ng_ro(
govindarajul12794ee2022-07-06 10:47:00 +00008417 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_verticalscale,
8418 operation="verticalscale"
govindarajul4ff4b512022-05-02 20:02:41 +05308419 )
8420 except (ROclient.ROClientException, DbException, LcmException) as e:
8421 self.logger.error("Exit Exception {}".format(e))
8422 exc = e
8423 except asyncio.CancelledError:
8424 self.logger.error("Cancelled Exception while '{}'".format(step))
8425 exc = "Operation was cancelled"
8426 except Exception as e:
8427 exc = traceback.format_exc()
8428 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
8429 finally:
8430 self._write_ns_status(
8431 nsr_id=nsr_id,
8432 ns_state=None,
8433 current_operation="IDLE",
8434 current_operation_id=None,
8435 )
8436 if exc:
8437 db_nslcmop_update[
8438 "detailed-status"
8439 ] = "FAILED {}: {}".format(step, exc)
8440 nslcmop_operation_state = "FAILED"
8441 else:
8442 nslcmop_operation_state = "COMPLETED"
8443 db_nslcmop_update["detailed-status"] = "Done"
8444 db_nsr_update["detailed-status"] = "Done"
8445
8446 self._write_op_status(
8447 op_id=nslcmop_id,
8448 stage="",
8449 error_message="",
8450 operation_state=nslcmop_operation_state,
8451 other_update=db_nslcmop_update,
8452 )
8453 if nslcmop_operation_state:
8454 try:
8455 msg = {
8456 "nsr_id": nsr_id,
8457 "nslcmop_id": nslcmop_id,
8458 "operationState": nslcmop_operation_state,
8459 }
8460 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8461 except Exception as e:
8462 self.logger.error(
8463 logging_text + "kafka_write notification Exception {}".format(e)
8464 )
8465 self.logger.debug(logging_text + "Exit")
8466 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")