blob: 6aed3043cc704b2adb8807aa3b30fb273840b4ed [file] [log] [blame]
tierno59d22d22018-09-25 18:10:19 +02001# -*- coding: utf-8 -*-
2
tierno2e215512018-11-28 09:37:52 +00003##
4# Copyright 2018 Telefonica S.A.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17##
18
tierno59d22d22018-09-25 18:10:19 +020019import asyncio
aticigdffa6212022-04-12 15:27:53 +030020import shutil
David Garcia444bf962021-11-11 16:35:26 +010021from typing import Any, Dict, List
tierno59d22d22018-09-25 18:10:19 +020022import yaml
23import logging
24import logging.handlers
tierno59d22d22018-09-25 18:10:19 +020025import traceback
David Garciad4816682019-12-09 14:57:43 +010026import json
garciadeblas5697b8b2021-03-24 09:17:02 +010027from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33)
tierno59d22d22018-09-25 18:10:19 +020034
tierno77677d92019-08-22 13:46:35 +000035from osm_lcm import ROclient
David Garciab4ebcd02021-10-28 02:00:43 +020036from osm_lcm.data_utils.nsr import (
37 get_deployed_kdu,
38 get_deployed_vca,
39 get_deployed_vca_list,
40 get_nsd,
41)
42from osm_lcm.data_utils.vca import (
43 DeployedComponent,
44 DeployedK8sResource,
45 DeployedVCA,
46 EELevel,
47 Relation,
48 EERelation,
49 safe_get_ee_relation,
50)
tierno69f0d382020-05-07 13:08:09 +000051from osm_lcm.ng_ro import NgRoClient, NgRoException
garciadeblas5697b8b2021-03-24 09:17:02 +010052from osm_lcm.lcm_utils import (
53 LcmException,
54 LcmExceptionNoMgmtIP,
55 LcmBase,
56 deep_get,
57 get_iterable,
58 populate_dict,
aticigdffa6212022-04-12 15:27:53 +030059 check_juju_bundle_existence,
60 get_charm_artifact_path,
garciadeblas5697b8b2021-03-24 09:17:02 +010061)
David Garciab4ebcd02021-10-28 02:00:43 +020062from osm_lcm.data_utils.nsd import (
63 get_ns_configuration_relation_list,
64 get_vnf_profile,
65 get_vnf_profiles,
66)
garciadeblas5697b8b2021-03-24 09:17:02 +010067from osm_lcm.data_utils.vnfd import (
David Garcia78b6e6d2022-04-29 05:50:46 +020068 get_kdu,
69 get_kdu_services,
David Garciab4ebcd02021-10-28 02:00:43 +020070 get_relation_list,
garciadeblas5697b8b2021-03-24 09:17:02 +010071 get_vdu_list,
72 get_vdu_profile,
73 get_ee_sorted_initial_config_primitive_list,
74 get_ee_sorted_terminate_config_primitive_list,
75 get_kdu_list,
76 get_virtual_link_profiles,
77 get_vdu,
78 get_configuration,
79 get_vdu_index,
80 get_scaling_aspect,
81 get_number_of_instances,
82 get_juju_ee_ref,
David Garciab4ebcd02021-10-28 02:00:43 +020083 get_kdu_resource_profile,
aticigdffa6212022-04-12 15:27:53 +030084 find_software_version,
garciadeblas5697b8b2021-03-24 09:17:02 +010085)
bravof922c4172020-11-24 21:21:43 -030086from osm_lcm.data_utils.list_utils import find_in_list
aticig349aa462022-05-19 12:29:35 +030087from osm_lcm.data_utils.vnfr import (
88 get_osm_params,
89 get_vdur_index,
90 get_kdur,
91 get_volumes_from_instantiation_params,
92)
bravof922c4172020-11-24 21:21:43 -030093from osm_lcm.data_utils.dict_utils import parse_yaml_strings
94from osm_lcm.data_utils.database.vim_account import VimAccountDB
David Garciab4ebcd02021-10-28 02:00:43 +020095from n2vc.definitions import RelationEndpoint
calvinosanch9f9c6f22019-11-04 13:37:39 +010096from n2vc.k8s_helm_conn import K8sHelmConnector
lloretgalleg18ebc3a2020-10-22 09:54:51 +000097from n2vc.k8s_helm3_conn import K8sHelm3Connector
Adam Israelbaacc302019-12-01 12:41:39 -050098from n2vc.k8s_juju_conn import K8sJujuConnector
tierno59d22d22018-09-25 18:10:19 +020099
tierno27246d82018-09-27 15:59:09 +0200100from osm_common.dbbase import DbException
tierno59d22d22018-09-25 18:10:19 +0200101from osm_common.fsbase import FsException
quilesj7e13aeb2019-10-08 13:34:55 +0200102
bravof922c4172020-11-24 21:21:43 -0300103from osm_lcm.data_utils.database.database import Database
104from osm_lcm.data_utils.filesystem.filesystem import Filesystem
105
quilesj7e13aeb2019-10-08 13:34:55 +0200106from n2vc.n2vc_juju_conn import N2VCJujuConnector
tiernof59ad6c2020-04-08 12:50:52 +0000107from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
tierno59d22d22018-09-25 18:10:19 +0200108
tierno588547c2020-07-01 15:30:20 +0000109from osm_lcm.lcm_helm_conn import LCMHelmConn
David Garcia78b6e6d2022-04-29 05:50:46 +0200110from osm_lcm.osm_config import OsmConfigBuilder
bravof73bac502021-05-11 07:38:47 -0400111from osm_lcm.prometheus import parse_job
tierno588547c2020-07-01 15:30:20 +0000112
tierno27246d82018-09-27 15:59:09 +0200113from copy import copy, deepcopy
tierno59d22d22018-09-25 18:10:19 +0200114from time import time
tierno27246d82018-09-27 15:59:09 +0200115from uuid import uuid4
lloretgalleg7c121132020-07-08 07:53:22 +0000116
tiernob996d942020-07-03 14:52:28 +0000117from random import randint
tierno59d22d22018-09-25 18:10:19 +0200118
tierno69f0d382020-05-07 13:08:09 +0000119__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
tierno59d22d22018-09-25 18:10:19 +0200120
121
122class NsLcm(LcmBase):
garciadeblas5697b8b2021-03-24 09:17:02 +0100123 timeout_vca_on_error = (
124 5 * 60
125 ) # Time for charm from first time at blocked,error status to mark as failed
126 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
127 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
garciadeblas07f4e4c2022-06-09 09:42:58 +0200128 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
garciadeblasf9b04952019-04-09 18:53:58 +0200129 timeout_charm_delete = 10 * 60
David Garciaf6919842020-05-21 16:41:07 +0200130 timeout_primitive = 30 * 60 # timeout for primitive execution
aticigdffa6212022-04-12 15:27:53 +0300131 timeout_ns_update = 30 * 60 # timeout for ns update
garciadeblas5697b8b2021-03-24 09:17:02 +0100132 timeout_progress_primitive = (
133 10 * 60
134 ) # timeout for some progress in a primitive execution
elumalai80bcf1c2022-04-28 18:05:01 +0530135 timeout_migrate = 1800 # default global timeout for migrating vnfs
k4.rahulb827de92022-05-02 16:35:02 +0000136 timeout_operate = 1800 # default global timeout for migrating vnfs
govindarajul4ff4b512022-05-02 20:02:41 +0530137 timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
kuuseac3a8882019-10-03 10:48:06 +0200138 SUBOPERATION_STATUS_NOT_FOUND = -1
139 SUBOPERATION_STATUS_NEW = -2
140 SUBOPERATION_STATUS_SKIP = -3
tiernoa2143262020-03-27 16:20:40 +0000141 task_name_deploy_vca = "Deploying VCA"
kuuseac3a8882019-10-03 10:48:06 +0200142
bravof73bac502021-05-11 07:38:47 -0400143 def __init__(self, msg, lcm_tasks, config, loop):
tierno59d22d22018-09-25 18:10:19 +0200144 """
145 Init, Connect to database, filesystem storage, and messaging
146 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
147 :return: None
148 """
garciadeblas5697b8b2021-03-24 09:17:02 +0100149 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
quilesj7e13aeb2019-10-08 13:34:55 +0200150
bravof922c4172020-11-24 21:21:43 -0300151 self.db = Database().instance.db
152 self.fs = Filesystem().instance.fs
tierno59d22d22018-09-25 18:10:19 +0200153 self.loop = loop
154 self.lcm_tasks = lcm_tasks
tierno744303e2020-01-13 16:46:31 +0000155 self.timeout = config["timeout"]
156 self.ro_config = config["ro_config"]
tierno69f0d382020-05-07 13:08:09 +0000157 self.ng_ro = config["ro_config"].get("ng")
tierno744303e2020-01-13 16:46:31 +0000158 self.vca_config = config["VCA"].copy()
tierno59d22d22018-09-25 18:10:19 +0200159
quilesj7e13aeb2019-10-08 13:34:55 +0200160 # create N2VC connector
David Garciaaae391f2020-11-09 11:12:54 +0100161 self.n2vc = N2VCJujuConnector(
tierno59d22d22018-09-25 18:10:19 +0200162 log=self.logger,
quilesj7e13aeb2019-10-08 13:34:55 +0200163 loop=self.loop,
bravof922c4172020-11-24 21:21:43 -0300164 on_update_db=self._on_update_n2vc_db,
165 fs=self.fs,
garciadeblas5697b8b2021-03-24 09:17:02 +0100166 db=self.db,
tierno59d22d22018-09-25 18:10:19 +0200167 )
quilesj7e13aeb2019-10-08 13:34:55 +0200168
tierno588547c2020-07-01 15:30:20 +0000169 self.conn_helm_ee = LCMHelmConn(
tierno588547c2020-07-01 15:30:20 +0000170 log=self.logger,
171 loop=self.loop,
tierno588547c2020-07-01 15:30:20 +0000172 vca_config=self.vca_config,
garciadeblas5697b8b2021-03-24 09:17:02 +0100173 on_update_db=self._on_update_n2vc_db,
tierno588547c2020-07-01 15:30:20 +0000174 )
175
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000176 self.k8sclusterhelm2 = K8sHelmConnector(
calvinosanch9f9c6f22019-11-04 13:37:39 +0100177 kubectl_command=self.vca_config.get("kubectlpath"),
178 helm_command=self.vca_config.get("helmpath"),
calvinosanch9f9c6f22019-11-04 13:37:39 +0100179 log=self.logger,
calvinosanch9f9c6f22019-11-04 13:37:39 +0100180 on_update_db=None,
bravof922c4172020-11-24 21:21:43 -0300181 fs=self.fs,
garciadeblas5697b8b2021-03-24 09:17:02 +0100182 db=self.db,
calvinosanch9f9c6f22019-11-04 13:37:39 +0100183 )
184
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000185 self.k8sclusterhelm3 = K8sHelm3Connector(
186 kubectl_command=self.vca_config.get("kubectlpath"),
187 helm_command=self.vca_config.get("helm3path"),
188 fs=self.fs,
189 log=self.logger,
190 db=self.db,
191 on_update_db=None,
192 )
193
Adam Israelbaacc302019-12-01 12:41:39 -0500194 self.k8sclusterjuju = K8sJujuConnector(
195 kubectl_command=self.vca_config.get("kubectlpath"),
196 juju_command=self.vca_config.get("jujupath"),
Adam Israelbaacc302019-12-01 12:41:39 -0500197 log=self.logger,
David Garciaba89cbb2020-10-16 13:05:34 +0200198 loop=self.loop,
ksaikiranr656b6dd2021-02-19 10:25:18 +0530199 on_update_db=self._on_update_k8s_db,
bravof922c4172020-11-24 21:21:43 -0300200 fs=self.fs,
garciadeblas5697b8b2021-03-24 09:17:02 +0100201 db=self.db,
Adam Israelbaacc302019-12-01 12:41:39 -0500202 )
203
tiernoa2143262020-03-27 16:20:40 +0000204 self.k8scluster_map = {
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000205 "helm-chart": self.k8sclusterhelm2,
206 "helm-chart-v3": self.k8sclusterhelm3,
207 "chart": self.k8sclusterhelm3,
tiernoa2143262020-03-27 16:20:40 +0000208 "juju-bundle": self.k8sclusterjuju,
209 "juju": self.k8sclusterjuju,
210 }
tierno588547c2020-07-01 15:30:20 +0000211
212 self.vca_map = {
213 "lxc_proxy_charm": self.n2vc,
214 "native_charm": self.n2vc,
215 "k8s_proxy_charm": self.n2vc,
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000216 "helm": self.conn_helm_ee,
garciadeblas5697b8b2021-03-24 09:17:02 +0100217 "helm-v3": self.conn_helm_ee,
tierno588547c2020-07-01 15:30:20 +0000218 }
219
quilesj7e13aeb2019-10-08 13:34:55 +0200220 # create RO client
bravof922c4172020-11-24 21:21:43 -0300221 self.RO = NgRoClient(self.loop, **self.ro_config)
tierno59d22d22018-09-25 18:10:19 +0200222
garciadeblas07f4e4c2022-06-09 09:42:58 +0200223 self.op_status_map = {
224 "instantiation": self.RO.status,
225 "termination": self.RO.status,
226 "migrate": self.RO.status,
227 "healing": self.RO.recreate_status,
228 }
229
tierno2357f4e2020-10-19 16:38:59 +0000230 @staticmethod
231 def increment_ip_mac(ip_mac, vm_index=1):
232 if not isinstance(ip_mac, str):
233 return ip_mac
234 try:
235 # try with ipv4 look for last dot
236 i = ip_mac.rfind(".")
237 if i > 0:
238 i += 1
239 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
240 # try with ipv6 or mac look for last colon. Operate in hex
241 i = ip_mac.rfind(":")
242 if i > 0:
243 i += 1
244 # format in hex, len can be 2 for mac or 4 for ipv6
garciadeblas5697b8b2021-03-24 09:17:02 +0100245 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
246 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
247 )
tierno2357f4e2020-10-19 16:38:59 +0000248 except Exception:
249 pass
250 return None
251
quilesj3655ae02019-12-12 16:08:35 +0000252 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
quilesj7e13aeb2019-10-08 13:34:55 +0200253
quilesj3655ae02019-12-12 16:08:35 +0000254 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
255
256 try:
257 # TODO filter RO descriptor fields...
258
259 # write to database
260 db_dict = dict()
261 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
garciadeblas5697b8b2021-03-24 09:17:02 +0100262 db_dict["deploymentStatus"] = ro_descriptor
quilesj3655ae02019-12-12 16:08:35 +0000263 self.update_db_2("nsrs", nsrs_id, db_dict)
264
265 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100266 self.logger.warn(
267 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
268 )
quilesj3655ae02019-12-12 16:08:35 +0000269
David Garciac1fe90a2021-03-31 19:12:02 +0200270 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
quilesj3655ae02019-12-12 16:08:35 +0000271
quilesj69a722c2020-01-09 08:30:17 +0000272 # remove last dot from path (if exists)
garciadeblas5697b8b2021-03-24 09:17:02 +0100273 if path.endswith("."):
quilesj69a722c2020-01-09 08:30:17 +0000274 path = path[:-1]
275
quilesj3655ae02019-12-12 16:08:35 +0000276 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
277 # .format(table, filter, path, updated_data))
quilesj3655ae02019-12-12 16:08:35 +0000278 try:
279
garciadeblas5697b8b2021-03-24 09:17:02 +0100280 nsr_id = filter.get("_id")
quilesj3655ae02019-12-12 16:08:35 +0000281
282 # read ns record from database
garciadeblas5697b8b2021-03-24 09:17:02 +0100283 nsr = self.db.get_one(table="nsrs", q_filter=filter)
284 current_ns_status = nsr.get("nsState")
quilesj3655ae02019-12-12 16:08:35 +0000285
286 # get vca status for NS
garciadeblas5697b8b2021-03-24 09:17:02 +0100287 status_dict = await self.n2vc.get_status(
288 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
289 )
quilesj3655ae02019-12-12 16:08:35 +0000290
291 # vcaStatus
292 db_dict = dict()
garciadeblas5697b8b2021-03-24 09:17:02 +0100293 db_dict["vcaStatus"] = status_dict
294 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
quilesj3655ae02019-12-12 16:08:35 +0000295
296 # update configurationStatus for this VCA
297 try:
garciadeblas5697b8b2021-03-24 09:17:02 +0100298 vca_index = int(path[path.rfind(".") + 1 :])
quilesj3655ae02019-12-12 16:08:35 +0000299
garciadeblas5697b8b2021-03-24 09:17:02 +0100300 vca_list = deep_get(
301 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
302 )
303 vca_status = vca_list[vca_index].get("status")
quilesj3655ae02019-12-12 16:08:35 +0000304
garciadeblas5697b8b2021-03-24 09:17:02 +0100305 configuration_status_list = nsr.get("configurationStatus")
306 config_status = configuration_status_list[vca_index].get("status")
quilesj3655ae02019-12-12 16:08:35 +0000307
garciadeblas5697b8b2021-03-24 09:17:02 +0100308 if config_status == "BROKEN" and vca_status != "failed":
309 db_dict["configurationStatus"][vca_index] = "READY"
310 elif config_status != "BROKEN" and vca_status == "failed":
311 db_dict["configurationStatus"][vca_index] = "BROKEN"
quilesj3655ae02019-12-12 16:08:35 +0000312 except Exception as e:
313 # not update configurationStatus
garciadeblas5697b8b2021-03-24 09:17:02 +0100314 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
quilesj3655ae02019-12-12 16:08:35 +0000315
316 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
317 # if nsState = 'DEGRADED' check if all is OK
318 is_degraded = False
garciadeblas5697b8b2021-03-24 09:17:02 +0100319 if current_ns_status in ("READY", "DEGRADED"):
320 error_description = ""
quilesj3655ae02019-12-12 16:08:35 +0000321 # check machines
garciadeblas5697b8b2021-03-24 09:17:02 +0100322 if status_dict.get("machines"):
323 for machine_id in status_dict.get("machines"):
324 machine = status_dict.get("machines").get(machine_id)
quilesj3655ae02019-12-12 16:08:35 +0000325 # check machine agent-status
garciadeblas5697b8b2021-03-24 09:17:02 +0100326 if machine.get("agent-status"):
327 s = machine.get("agent-status").get("status")
328 if s != "started":
quilesj3655ae02019-12-12 16:08:35 +0000329 is_degraded = True
garciadeblas5697b8b2021-03-24 09:17:02 +0100330 error_description += (
331 "machine {} agent-status={} ; ".format(
332 machine_id, s
333 )
334 )
quilesj3655ae02019-12-12 16:08:35 +0000335 # check machine instance status
garciadeblas5697b8b2021-03-24 09:17:02 +0100336 if machine.get("instance-status"):
337 s = machine.get("instance-status").get("status")
338 if s != "running":
quilesj3655ae02019-12-12 16:08:35 +0000339 is_degraded = True
garciadeblas5697b8b2021-03-24 09:17:02 +0100340 error_description += (
341 "machine {} instance-status={} ; ".format(
342 machine_id, s
343 )
344 )
quilesj3655ae02019-12-12 16:08:35 +0000345 # check applications
garciadeblas5697b8b2021-03-24 09:17:02 +0100346 if status_dict.get("applications"):
347 for app_id in status_dict.get("applications"):
348 app = status_dict.get("applications").get(app_id)
quilesj3655ae02019-12-12 16:08:35 +0000349 # check application status
garciadeblas5697b8b2021-03-24 09:17:02 +0100350 if app.get("status"):
351 s = app.get("status").get("status")
352 if s != "active":
quilesj3655ae02019-12-12 16:08:35 +0000353 is_degraded = True
garciadeblas5697b8b2021-03-24 09:17:02 +0100354 error_description += (
355 "application {} status={} ; ".format(app_id, s)
356 )
quilesj3655ae02019-12-12 16:08:35 +0000357
358 if error_description:
garciadeblas5697b8b2021-03-24 09:17:02 +0100359 db_dict["errorDescription"] = error_description
360 if current_ns_status == "READY" and is_degraded:
361 db_dict["nsState"] = "DEGRADED"
362 if current_ns_status == "DEGRADED" and not is_degraded:
363 db_dict["nsState"] = "READY"
quilesj3655ae02019-12-12 16:08:35 +0000364
365 # write to database
366 self.update_db_2("nsrs", nsr_id, db_dict)
367
tierno51183952020-04-03 15:48:18 +0000368 except (asyncio.CancelledError, asyncio.TimeoutError):
369 raise
quilesj3655ae02019-12-12 16:08:35 +0000370 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100371 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +0200372
garciadeblas5697b8b2021-03-24 09:17:02 +0100373 async def _on_update_k8s_db(
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100374 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
garciadeblas5697b8b2021-03-24 09:17:02 +0100375 ):
ksaikiranr656b6dd2021-02-19 10:25:18 +0530376 """
377 Updating vca status in NSR record
378 :param cluster_uuid: UUID of a k8s cluster
379 :param kdu_instance: The unique name of the KDU instance
380 :param filter: To get nsr_id
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100381 :cluster_type: The cluster type (juju, k8s)
ksaikiranr656b6dd2021-02-19 10:25:18 +0530382 :return: none
383 """
384
385 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
386 # .format(cluster_uuid, kdu_instance, filter))
387
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100388 nsr_id = filter.get("_id")
ksaikiranr656b6dd2021-02-19 10:25:18 +0530389 try:
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100390 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
391 cluster_uuid=cluster_uuid,
392 kdu_instance=kdu_instance,
David Garciac1fe90a2021-03-31 19:12:02 +0200393 yaml_format=False,
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100394 complete_status=True,
David Garciac1fe90a2021-03-31 19:12:02 +0200395 vca_id=vca_id,
396 )
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100397
ksaikiranr656b6dd2021-02-19 10:25:18 +0530398 # vcaStatus
399 db_dict = dict()
garciadeblas5697b8b2021-03-24 09:17:02 +0100400 db_dict["vcaStatus"] = {nsr_id: vca_status}
ksaikiranr656b6dd2021-02-19 10:25:18 +0530401
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100402 if cluster_type in ("juju-bundle", "juju"):
403 # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
404 # status in a similar way between Juju Bundles and Helm Charts on this side
405 await self.k8sclusterjuju.update_vca_status(
406 db_dict["vcaStatus"],
407 kdu_instance,
408 vca_id=vca_id,
409 )
410
411 self.logger.debug(
412 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
David Garciac1fe90a2021-03-31 19:12:02 +0200413 )
ksaikiranr656b6dd2021-02-19 10:25:18 +0530414
415 # write to database
416 self.update_db_2("nsrs", nsr_id, db_dict)
ksaikiranr656b6dd2021-02-19 10:25:18 +0530417 except (asyncio.CancelledError, asyncio.TimeoutError):
418 raise
419 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100420 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
ksaikiranr656b6dd2021-02-19 10:25:18 +0530421
tierno72ef84f2020-10-06 08:22:07 +0000422 @staticmethod
423 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
424 try:
425 env = Environment(undefined=StrictUndefined)
426 template = env.from_string(cloud_init_text)
427 return template.render(additional_params or {})
428 except UndefinedError as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100429 raise LcmException(
430 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
431 "file, must be provided in the instantiation parameters inside the "
432 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
433 )
tierno72ef84f2020-10-06 08:22:07 +0000434 except (TemplateError, TemplateNotFound) as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100435 raise LcmException(
436 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
437 vnfd_id, vdu_id, e
438 )
439 )
tierno72ef84f2020-10-06 08:22:07 +0000440
bravof922c4172020-11-24 21:21:43 -0300441 def _get_vdu_cloud_init_content(self, vdu, vnfd):
442 cloud_init_content = cloud_init_file = None
tierno72ef84f2020-10-06 08:22:07 +0000443 try:
tierno72ef84f2020-10-06 08:22:07 +0000444 if vdu.get("cloud-init-file"):
445 base_folder = vnfd["_admin"]["storage"]
bravof486707f2021-11-08 17:18:50 -0300446 if base_folder["pkg-dir"]:
447 cloud_init_file = "{}/{}/cloud_init/{}".format(
448 base_folder["folder"],
449 base_folder["pkg-dir"],
450 vdu["cloud-init-file"],
451 )
452 else:
453 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
454 base_folder["folder"],
455 vdu["cloud-init-file"],
456 )
tierno72ef84f2020-10-06 08:22:07 +0000457 with self.fs.file_open(cloud_init_file, "r") as ci_file:
458 cloud_init_content = ci_file.read()
459 elif vdu.get("cloud-init"):
460 cloud_init_content = vdu["cloud-init"]
461
462 return cloud_init_content
463 except FsException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100464 raise LcmException(
465 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
466 vnfd["id"], vdu["id"], cloud_init_file, e
467 )
468 )
tierno72ef84f2020-10-06 08:22:07 +0000469
tierno72ef84f2020-10-06 08:22:07 +0000470 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
garciadeblas5697b8b2021-03-24 09:17:02 +0100471 vdur = next(
aticig349aa462022-05-19 12:29:35 +0300472 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
garciadeblas5697b8b2021-03-24 09:17:02 +0100473 )
tierno72ef84f2020-10-06 08:22:07 +0000474 additional_params = vdur.get("additionalParams")
bravof922c4172020-11-24 21:21:43 -0300475 return parse_yaml_strings(additional_params)
tierno72ef84f2020-10-06 08:22:07 +0000476
gcalvino35be9152018-12-20 09:33:12 +0100477 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
tierno59d22d22018-09-25 18:10:19 +0200478 """
479 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
480 :param vnfd: input vnfd
481 :param new_id: overrides vnf id if provided
tierno8a518872018-12-21 13:42:14 +0000482 :param additionalParams: Instantiation params for VNFs provided
gcalvino35be9152018-12-20 09:33:12 +0100483 :param nsrId: Id of the NSR
tierno59d22d22018-09-25 18:10:19 +0200484 :return: copy of vnfd
485 """
tierno72ef84f2020-10-06 08:22:07 +0000486 vnfd_RO = deepcopy(vnfd)
487 # remove unused by RO configuration, monitoring, scaling and internal keys
488 vnfd_RO.pop("_id", None)
489 vnfd_RO.pop("_admin", None)
tierno72ef84f2020-10-06 08:22:07 +0000490 vnfd_RO.pop("monitoring-param", None)
491 vnfd_RO.pop("scaling-group-descriptor", None)
492 vnfd_RO.pop("kdu", None)
493 vnfd_RO.pop("k8s-cluster", None)
494 if new_id:
495 vnfd_RO["id"] = new_id
tierno8a518872018-12-21 13:42:14 +0000496
tierno72ef84f2020-10-06 08:22:07 +0000497 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
498 for vdu in get_iterable(vnfd_RO, "vdu"):
499 vdu.pop("cloud-init-file", None)
500 vdu.pop("cloud-init", None)
501 return vnfd_RO
tierno59d22d22018-09-25 18:10:19 +0200502
tierno2357f4e2020-10-19 16:38:59 +0000503 @staticmethod
504 def ip_profile_2_RO(ip_profile):
505 RO_ip_profile = deepcopy(ip_profile)
506 if "dns-server" in RO_ip_profile:
507 if isinstance(RO_ip_profile["dns-server"], list):
508 RO_ip_profile["dns-address"] = []
509 for ds in RO_ip_profile.pop("dns-server"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100510 RO_ip_profile["dns-address"].append(ds["address"])
tierno2357f4e2020-10-19 16:38:59 +0000511 else:
512 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
513 if RO_ip_profile.get("ip-version") == "ipv4":
514 RO_ip_profile["ip-version"] = "IPv4"
515 if RO_ip_profile.get("ip-version") == "ipv6":
516 RO_ip_profile["ip-version"] = "IPv6"
517 if "dhcp-params" in RO_ip_profile:
518 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
519 return RO_ip_profile
520
bravof922c4172020-11-24 21:21:43 -0300521 def _get_ro_vim_id_for_vim_account(self, vim_account):
522 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
523 if db_vim["_admin"]["operationalState"] != "ENABLED":
garciadeblas5697b8b2021-03-24 09:17:02 +0100524 raise LcmException(
525 "VIM={} is not available. operationalState={}".format(
526 vim_account, db_vim["_admin"]["operationalState"]
527 )
528 )
bravof922c4172020-11-24 21:21:43 -0300529 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
530 return RO_vim_id
tierno59d22d22018-09-25 18:10:19 +0200531
bravof922c4172020-11-24 21:21:43 -0300532 def get_ro_wim_id_for_wim_account(self, wim_account):
533 if isinstance(wim_account, str):
534 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
535 if db_wim["_admin"]["operationalState"] != "ENABLED":
garciadeblas5697b8b2021-03-24 09:17:02 +0100536 raise LcmException(
537 "WIM={} is not available. operationalState={}".format(
538 wim_account, db_wim["_admin"]["operationalState"]
539 )
540 )
bravof922c4172020-11-24 21:21:43 -0300541 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
542 return RO_wim_id
543 else:
544 return wim_account
tierno59d22d22018-09-25 18:10:19 +0200545
tierno2357f4e2020-10-19 16:38:59 +0000546 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
tierno27246d82018-09-27 15:59:09 +0200547
tierno2357f4e2020-10-19 16:38:59 +0000548 db_vdu_push_list = []
vegall8d625f12022-03-22 16:23:30 +0000549 template_vdur = []
tierno2357f4e2020-10-19 16:38:59 +0000550 db_update = {"_admin.modified": time()}
551 if vdu_create:
552 for vdu_id, vdu_count in vdu_create.items():
garciadeblas5697b8b2021-03-24 09:17:02 +0100553 vdur = next(
554 (
555 vdur
556 for vdur in reversed(db_vnfr["vdur"])
557 if vdur["vdu-id-ref"] == vdu_id
558 ),
559 None,
560 )
tierno2357f4e2020-10-19 16:38:59 +0000561 if not vdur:
vegall8d625f12022-03-22 16:23:30 +0000562 # Read the template saved in the db:
aticig349aa462022-05-19 12:29:35 +0300563 self.logger.debug(
564 "No vdur in the database. Using the vdur-template to scale"
565 )
vegall8d625f12022-03-22 16:23:30 +0000566 vdur_template = db_vnfr.get("vdur-template")
567 if not vdur_template:
568 raise LcmException(
aticig349aa462022-05-19 12:29:35 +0300569 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
570 vdu_id
vegall8d625f12022-03-22 16:23:30 +0000571 )
garciadeblas5697b8b2021-03-24 09:17:02 +0100572 )
vegall8d625f12022-03-22 16:23:30 +0000573 vdur = vdur_template[0]
aticig349aa462022-05-19 12:29:35 +0300574 # Delete a template from the database after using it
575 self.db.set_one(
576 "vnfrs",
577 {"_id": db_vnfr["_id"]},
578 None,
579 pull={"vdur-template": {"_id": vdur["_id"]}},
580 )
tierno2357f4e2020-10-19 16:38:59 +0000581 for count in range(vdu_count):
582 vdur_copy = deepcopy(vdur)
583 vdur_copy["status"] = "BUILD"
584 vdur_copy["status-detailed"] = None
Guillermo Calvino57c68152022-01-26 17:40:31 +0100585 vdur_copy["ip-address"] = None
tierno683eb392020-09-25 12:33:15 +0000586 vdur_copy["_id"] = str(uuid4())
tierno2357f4e2020-10-19 16:38:59 +0000587 vdur_copy["count-index"] += count + 1
garciadeblas5697b8b2021-03-24 09:17:02 +0100588 vdur_copy["id"] = "{}-{}".format(
589 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
590 )
tierno2357f4e2020-10-19 16:38:59 +0000591 vdur_copy.pop("vim_info", None)
592 for iface in vdur_copy["interfaces"]:
593 if iface.get("fixed-ip"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100594 iface["ip-address"] = self.increment_ip_mac(
595 iface["ip-address"], count + 1
596 )
tierno2357f4e2020-10-19 16:38:59 +0000597 else:
598 iface.pop("ip-address", None)
599 if iface.get("fixed-mac"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100600 iface["mac-address"] = self.increment_ip_mac(
601 iface["mac-address"], count + 1
602 )
tierno2357f4e2020-10-19 16:38:59 +0000603 else:
604 iface.pop("mac-address", None)
vegall8d625f12022-03-22 16:23:30 +0000605 if db_vnfr["vdur"]:
606 iface.pop(
607 "mgmt_vnf", None
608 ) # only first vdu can be managment of vnf
tierno2357f4e2020-10-19 16:38:59 +0000609 db_vdu_push_list.append(vdur_copy)
610 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
tierno27246d82018-09-27 15:59:09 +0200611 if vdu_delete:
vegall8d625f12022-03-22 16:23:30 +0000612 if len(db_vnfr["vdur"]) == 1:
613 # The scale will move to 0 instances
aticig349aa462022-05-19 12:29:35 +0300614 self.logger.debug(
615 "Scaling to 0 !, creating the template with the last vdur"
616 )
vegall8d625f12022-03-22 16:23:30 +0000617 template_vdur = [db_vnfr["vdur"][0]]
tierno2357f4e2020-10-19 16:38:59 +0000618 for vdu_id, vdu_count in vdu_delete.items():
619 if mark_delete:
garciadeblas5697b8b2021-03-24 09:17:02 +0100620 indexes_to_delete = [
621 iv[0]
622 for iv in enumerate(db_vnfr["vdur"])
623 if iv[1]["vdu-id-ref"] == vdu_id
624 ]
625 db_update.update(
626 {
627 "vdur.{}.status".format(i): "DELETING"
628 for i in indexes_to_delete[-vdu_count:]
629 }
630 )
tierno2357f4e2020-10-19 16:38:59 +0000631 else:
632 # it must be deleted one by one because common.db does not allow otherwise
garciadeblas5697b8b2021-03-24 09:17:02 +0100633 vdus_to_delete = [
634 v
635 for v in reversed(db_vnfr["vdur"])
636 if v["vdu-id-ref"] == vdu_id
637 ]
tierno2357f4e2020-10-19 16:38:59 +0000638 for vdu in vdus_to_delete[:vdu_count]:
garciadeblas5697b8b2021-03-24 09:17:02 +0100639 self.db.set_one(
640 "vnfrs",
641 {"_id": db_vnfr["_id"]},
642 None,
643 pull={"vdur": {"_id": vdu["_id"]}},
644 )
vegall8d625f12022-03-22 16:23:30 +0000645 db_push = {}
646 if db_vdu_push_list:
647 db_push["vdur"] = db_vdu_push_list
648 if template_vdur:
649 db_push["vdur-template"] = template_vdur
650 if not db_push:
651 db_push = None
652 db_vnfr["vdur-template"] = template_vdur
tierno2357f4e2020-10-19 16:38:59 +0000653 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
654 # modify passed dictionary db_vnfr
655 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
656 db_vnfr["vdur"] = db_vnfr_["vdur"]
tierno27246d82018-09-27 15:59:09 +0200657
tiernof578e552018-11-08 19:07:20 +0100658 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
659 """
660 Updates database nsr with the RO info for the created vld
661 :param ns_update_nsr: dictionary to be filled with the updated info
662 :param db_nsr: content of db_nsr. This is also modified
663 :param nsr_desc_RO: nsr descriptor from RO
664 :return: Nothing, LcmException is raised on errors
665 """
666
667 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
668 for net_RO in get_iterable(nsr_desc_RO, "nets"):
669 if vld["id"] != net_RO.get("ns_net_osm_id"):
670 continue
671 vld["vim-id"] = net_RO.get("vim_net_id")
672 vld["name"] = net_RO.get("vim_name")
673 vld["status"] = net_RO.get("status")
674 vld["status-detailed"] = net_RO.get("error_msg")
675 ns_update_nsr["vld.{}".format(vld_index)] = vld
676 break
677 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100678 raise LcmException(
679 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
680 )
tiernof578e552018-11-08 19:07:20 +0100681
tiernoe876f672020-02-13 14:34:48 +0000682 def set_vnfr_at_error(self, db_vnfrs, error_text):
683 try:
684 for db_vnfr in db_vnfrs.values():
685 vnfr_update = {"status": "ERROR"}
686 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
687 if "status" not in vdur:
688 vdur["status"] = "ERROR"
689 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
690 if error_text:
691 vdur["status-detailed"] = str(error_text)
garciadeblas5697b8b2021-03-24 09:17:02 +0100692 vnfr_update[
693 "vdur.{}.status-detailed".format(vdu_index)
694 ] = "ERROR"
tiernoe876f672020-02-13 14:34:48 +0000695 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
696 except DbException as e:
697 self.logger.error("Cannot update vnf. {}".format(e))
698
tierno59d22d22018-09-25 18:10:19 +0200699 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
700 """
701 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
tierno27246d82018-09-27 15:59:09 +0200702 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
703 :param nsr_desc_RO: nsr descriptor from RO
704 :return: Nothing, LcmException is raised on errors
tierno59d22d22018-09-25 18:10:19 +0200705 """
706 for vnf_index, db_vnfr in db_vnfrs.items():
707 for vnf_RO in nsr_desc_RO["vnfs"]:
tierno27246d82018-09-27 15:59:09 +0200708 if vnf_RO["member_vnf_index"] != vnf_index:
709 continue
710 vnfr_update = {}
tiernof578e552018-11-08 19:07:20 +0100711 if vnf_RO.get("ip_address"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100712 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
713 "ip_address"
714 ].split(";")[0]
tiernof578e552018-11-08 19:07:20 +0100715 elif not db_vnfr.get("ip-address"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100716 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
717 raise LcmExceptionNoMgmtIP(
718 "ns member_vnf_index '{}' has no IP address".format(
719 vnf_index
720 )
721 )
tierno59d22d22018-09-25 18:10:19 +0200722
tierno27246d82018-09-27 15:59:09 +0200723 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
724 vdur_RO_count_index = 0
725 if vdur.get("pdu-type"):
726 continue
727 for vdur_RO in get_iterable(vnf_RO, "vms"):
728 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
729 continue
730 if vdur["count-index"] != vdur_RO_count_index:
731 vdur_RO_count_index += 1
732 continue
733 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
tierno1674de82019-04-09 13:03:14 +0000734 if vdur_RO.get("ip_address"):
735 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
tierno274ed572019-04-04 13:33:27 +0000736 else:
737 vdur["ip-address"] = None
tierno27246d82018-09-27 15:59:09 +0200738 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
739 vdur["name"] = vdur_RO.get("vim_name")
740 vdur["status"] = vdur_RO.get("status")
741 vdur["status-detailed"] = vdur_RO.get("error_msg")
742 for ifacer in get_iterable(vdur, "interfaces"):
743 for interface_RO in get_iterable(vdur_RO, "interfaces"):
744 if ifacer["name"] == interface_RO.get("internal_name"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100745 ifacer["ip-address"] = interface_RO.get(
746 "ip_address"
747 )
748 ifacer["mac-address"] = interface_RO.get(
749 "mac_address"
750 )
tierno27246d82018-09-27 15:59:09 +0200751 break
752 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100753 raise LcmException(
754 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
755 "from VIM info".format(
756 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
757 )
758 )
tierno27246d82018-09-27 15:59:09 +0200759 vnfr_update["vdur.{}".format(vdu_index)] = vdur
760 break
761 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100762 raise LcmException(
763 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
764 "VIM info".format(
765 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
766 )
767 )
tiernof578e552018-11-08 19:07:20 +0100768
769 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
770 for net_RO in get_iterable(nsr_desc_RO, "nets"):
771 if vld["id"] != net_RO.get("vnf_net_osm_id"):
772 continue
773 vld["vim-id"] = net_RO.get("vim_net_id")
774 vld["name"] = net_RO.get("vim_name")
775 vld["status"] = net_RO.get("status")
776 vld["status-detailed"] = net_RO.get("error_msg")
777 vnfr_update["vld.{}".format(vld_index)] = vld
778 break
779 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100780 raise LcmException(
781 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
782 vnf_index, vld["id"]
783 )
784 )
tiernof578e552018-11-08 19:07:20 +0100785
tierno27246d82018-09-27 15:59:09 +0200786 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
787 break
tierno59d22d22018-09-25 18:10:19 +0200788
789 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100790 raise LcmException(
791 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
792 vnf_index
793 )
794 )
tierno59d22d22018-09-25 18:10:19 +0200795
tierno5ee02052019-12-05 19:55:02 +0000796 def _get_ns_config_info(self, nsr_id):
tiernoc3f2a822019-11-05 13:45:04 +0000797 """
798 Generates a mapping between vnf,vdu elements and the N2VC id
tierno5ee02052019-12-05 19:55:02 +0000799 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
tiernoc3f2a822019-11-05 13:45:04 +0000800 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
801 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
802 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
803 """
tierno5ee02052019-12-05 19:55:02 +0000804 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
805 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernoc3f2a822019-11-05 13:45:04 +0000806 mapping = {}
807 ns_config_info = {"osm-config-mapping": mapping}
808 for vca in vca_deployed_list:
809 if not vca["member-vnf-index"]:
810 continue
811 if not vca["vdu_id"]:
812 mapping[vca["member-vnf-index"]] = vca["application"]
813 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100814 mapping[
815 "{}.{}.{}".format(
816 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
817 )
818 ] = vca["application"]
tiernoc3f2a822019-11-05 13:45:04 +0000819 return ns_config_info
820
garciadeblas5697b8b2021-03-24 09:17:02 +0100821 async def _instantiate_ng_ro(
822 self,
823 logging_text,
824 nsr_id,
825 nsd,
826 db_nsr,
827 db_nslcmop,
828 db_vnfrs,
829 db_vnfds,
830 n2vc_key_list,
831 stage,
832 start_deploy,
833 timeout_ns_deploy,
834 ):
tierno2357f4e2020-10-19 16:38:59 +0000835
836 db_vims = {}
837
838 def get_vim_account(vim_account_id):
839 nonlocal db_vims
840 if vim_account_id in db_vims:
841 return db_vims[vim_account_id]
842 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
843 db_vims[vim_account_id] = db_vim
844 return db_vim
845
846 # modify target_vld info with instantiation parameters
garciadeblas5697b8b2021-03-24 09:17:02 +0100847 def parse_vld_instantiation_params(
848 target_vim, target_vld, vld_params, target_sdn
849 ):
tierno2357f4e2020-10-19 16:38:59 +0000850 if vld_params.get("ip-profile"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100851 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
852 "ip-profile"
853 ]
tierno2357f4e2020-10-19 16:38:59 +0000854 if vld_params.get("provider-network"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100855 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
856 "provider-network"
857 ]
tierno2357f4e2020-10-19 16:38:59 +0000858 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
garciadeblas5697b8b2021-03-24 09:17:02 +0100859 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
860 "provider-network"
861 ]["sdn-ports"]
tierno2357f4e2020-10-19 16:38:59 +0000862 if vld_params.get("wimAccountId"):
863 target_wim = "wim:{}".format(vld_params["wimAccountId"])
864 target_vld["vim_info"][target_wim] = {}
865 for param in ("vim-network-name", "vim-network-id"):
866 if vld_params.get(param):
867 if isinstance(vld_params[param], dict):
garciaale04694c62021-03-02 10:49:28 -0300868 for vim, vim_net in vld_params[param].items():
bravof922c4172020-11-24 21:21:43 -0300869 other_target_vim = "vim:" + vim
garciadeblas5697b8b2021-03-24 09:17:02 +0100870 populate_dict(
871 target_vld["vim_info"],
872 (other_target_vim, param.replace("-", "_")),
873 vim_net,
874 )
tierno2357f4e2020-10-19 16:38:59 +0000875 else: # isinstance str
garciadeblas5697b8b2021-03-24 09:17:02 +0100876 target_vld["vim_info"][target_vim][
877 param.replace("-", "_")
878 ] = vld_params[param]
bravof922c4172020-11-24 21:21:43 -0300879 if vld_params.get("common_id"):
880 target_vld["common_id"] = vld_params.get("common_id")
tierno2357f4e2020-10-19 16:38:59 +0000881
aticig15db6142022-01-24 12:51:26 +0300882 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
883 def update_ns_vld_target(target, ns_params):
884 for vnf_params in ns_params.get("vnf", ()):
885 if vnf_params.get("vimAccountId"):
886 target_vnf = next(
887 (
888 vnfr
889 for vnfr in db_vnfrs.values()
890 if vnf_params["member-vnf-index"]
891 == vnfr["member-vnf-index-ref"]
892 ),
893 None,
894 )
895 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
896 for a_index, a_vld in enumerate(target["ns"]["vld"]):
897 target_vld = find_in_list(
898 get_iterable(vdur, "interfaces"),
899 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
900 )
aticig84bd9a72022-06-14 03:01:36 +0300901
902 vld_params = find_in_list(
903 get_iterable(ns_params, "vld"),
904 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
905 )
aticig15db6142022-01-24 12:51:26 +0300906 if target_vld:
aticig84bd9a72022-06-14 03:01:36 +0300907
aticig15db6142022-01-24 12:51:26 +0300908 if vnf_params.get("vimAccountId") not in a_vld.get(
909 "vim_info", {}
910 ):
aticig84bd9a72022-06-14 03:01:36 +0300911 target_vim_network_list = [
912 v for _, v in a_vld.get("vim_info").items()
913 ]
914 target_vim_network_name = next(
915 (
916 item.get("vim_network_name", "")
917 for item in target_vim_network_list
918 ),
919 "",
920 )
921
aticig15db6142022-01-24 12:51:26 +0300922 target["ns"]["vld"][a_index].get("vim_info").update(
923 {
924 "vim:{}".format(vnf_params["vimAccountId"]): {
aticig84bd9a72022-06-14 03:01:36 +0300925 "vim_network_name": target_vim_network_name,
aticig15db6142022-01-24 12:51:26 +0300926 }
927 }
928 )
929
aticig84bd9a72022-06-14 03:01:36 +0300930 if vld_params:
931 for param in ("vim-network-name", "vim-network-id"):
932 if vld_params.get(param) and isinstance(
933 vld_params[param], dict
934 ):
935 for vim, vim_net in vld_params[
936 param
937 ].items():
938 other_target_vim = "vim:" + vim
939 populate_dict(
940 target["ns"]["vld"][a_index].get(
941 "vim_info"
942 ),
943 (
944 other_target_vim,
945 param.replace("-", "_"),
946 ),
947 vim_net,
948 )
949
tierno69f0d382020-05-07 13:08:09 +0000950 nslcmop_id = db_nslcmop["_id"]
951 target = {
952 "name": db_nsr["name"],
953 "ns": {"vld": []},
954 "vnf": [],
955 "image": deepcopy(db_nsr["image"]),
956 "flavor": deepcopy(db_nsr["flavor"]),
957 "action_id": nslcmop_id,
tierno2357f4e2020-10-19 16:38:59 +0000958 "cloud_init_content": {},
tierno69f0d382020-05-07 13:08:09 +0000959 }
960 for image in target["image"]:
tierno2357f4e2020-10-19 16:38:59 +0000961 image["vim_info"] = {}
tierno69f0d382020-05-07 13:08:09 +0000962 for flavor in target["flavor"]:
tierno2357f4e2020-10-19 16:38:59 +0000963 flavor["vim_info"] = {}
Alexis Romero305b5c42022-03-11 15:29:18 +0100964 if db_nsr.get("affinity-or-anti-affinity-group"):
Pedro Escaleirab9a7c4d2022-03-31 00:08:05 +0100965 target["affinity-or-anti-affinity-group"] = deepcopy(
966 db_nsr["affinity-or-anti-affinity-group"]
967 )
968 for affinity_or_anti_affinity_group in target[
969 "affinity-or-anti-affinity-group"
970 ]:
Alexis Romero305b5c42022-03-11 15:29:18 +0100971 affinity_or_anti_affinity_group["vim_info"] = {}
tierno69f0d382020-05-07 13:08:09 +0000972
tierno2357f4e2020-10-19 16:38:59 +0000973 if db_nslcmop.get("lcmOperationType") != "instantiate":
974 # get parameters of instantiation:
garciadeblas5697b8b2021-03-24 09:17:02 +0100975 db_nslcmop_instantiate = self.db.get_list(
976 "nslcmops",
977 {
978 "nsInstanceId": db_nslcmop["nsInstanceId"],
979 "lcmOperationType": "instantiate",
980 },
981 )[-1]
tierno2357f4e2020-10-19 16:38:59 +0000982 ns_params = db_nslcmop_instantiate.get("operationParams")
983 else:
984 ns_params = db_nslcmop.get("operationParams")
bravof922c4172020-11-24 21:21:43 -0300985 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
986 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
tierno69f0d382020-05-07 13:08:09 +0000987
988 cp2target = {}
tierno2357f4e2020-10-19 16:38:59 +0000989 for vld_index, vld in enumerate(db_nsr.get("vld")):
990 target_vim = "vim:{}".format(ns_params["vimAccountId"])
991 target_vld = {
992 "id": vld["id"],
993 "name": vld["name"],
994 "mgmt-network": vld.get("mgmt-network", False),
995 "type": vld.get("type"),
996 "vim_info": {
bravof922c4172020-11-24 21:21:43 -0300997 target_vim: {
998 "vim_network_name": vld.get("vim-network-name"),
garciadeblas5697b8b2021-03-24 09:17:02 +0100999 "vim_account_id": ns_params["vimAccountId"],
bravof922c4172020-11-24 21:21:43 -03001000 }
garciadeblas5697b8b2021-03-24 09:17:02 +01001001 },
tierno2357f4e2020-10-19 16:38:59 +00001002 }
1003 # check if this network needs SDN assist
tierno2357f4e2020-10-19 16:38:59 +00001004 if vld.get("pci-interfaces"):
garciadeblasa5ae90b2021-02-12 11:26:46 +00001005 db_vim = get_vim_account(ns_params["vimAccountId"])
tierno2357f4e2020-10-19 16:38:59 +00001006 sdnc_id = db_vim["config"].get("sdn-controller")
1007 if sdnc_id:
garciadeblasa5ae90b2021-02-12 11:26:46 +00001008 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1009 target_sdn = "sdn:{}".format(sdnc_id)
1010 target_vld["vim_info"][target_sdn] = {
garciadeblas5697b8b2021-03-24 09:17:02 +01001011 "sdn": True,
1012 "target_vim": target_vim,
1013 "vlds": [sdn_vld],
1014 "type": vld.get("type"),
1015 }
tierno2357f4e2020-10-19 16:38:59 +00001016
bravof922c4172020-11-24 21:21:43 -03001017 nsd_vnf_profiles = get_vnf_profiles(nsd)
1018 for nsd_vnf_profile in nsd_vnf_profiles:
1019 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1020 if cp["virtual-link-profile-id"] == vld["id"]:
garciadeblas5697b8b2021-03-24 09:17:02 +01001021 cp2target[
1022 "member_vnf:{}.{}".format(
1023 cp["constituent-cpd-id"][0][
1024 "constituent-base-element-id"
1025 ],
1026 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1027 )
1028 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
tierno2357f4e2020-10-19 16:38:59 +00001029
1030 # check at nsd descriptor, if there is an ip-profile
1031 vld_params = {}
lloretgalleg19008482021-04-19 11:40:18 +00001032 nsd_vlp = find_in_list(
1033 get_virtual_link_profiles(nsd),
garciadeblas5697b8b2021-03-24 09:17:02 +01001034 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1035 == vld["id"],
1036 )
1037 if (
1038 nsd_vlp
1039 and nsd_vlp.get("virtual-link-protocol-data")
1040 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1041 ):
1042 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1043 "l3-protocol-data"
1044 ]
lloretgalleg19008482021-04-19 11:40:18 +00001045 ip_profile_dest_data = {}
1046 if "ip-version" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001047 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1048 "ip-version"
1049 ]
lloretgalleg19008482021-04-19 11:40:18 +00001050 if "cidr" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001051 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1052 "cidr"
1053 ]
lloretgalleg19008482021-04-19 11:40:18 +00001054 if "gateway-ip" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001055 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1056 "gateway-ip"
1057 ]
lloretgalleg19008482021-04-19 11:40:18 +00001058 if "dhcp-enabled" in ip_profile_source_data:
1059 ip_profile_dest_data["dhcp-params"] = {
1060 "enabled": ip_profile_source_data["dhcp-enabled"]
1061 }
1062 vld_params["ip-profile"] = ip_profile_dest_data
bravof922c4172020-11-24 21:21:43 -03001063
tierno2357f4e2020-10-19 16:38:59 +00001064 # update vld_params with instantiation params
garciadeblas5697b8b2021-03-24 09:17:02 +01001065 vld_instantiation_params = find_in_list(
1066 get_iterable(ns_params, "vld"),
1067 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1068 )
tierno2357f4e2020-10-19 16:38:59 +00001069 if vld_instantiation_params:
1070 vld_params.update(vld_instantiation_params)
bravof922c4172020-11-24 21:21:43 -03001071 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
tierno69f0d382020-05-07 13:08:09 +00001072 target["ns"]["vld"].append(target_vld)
aticig15db6142022-01-24 12:51:26 +03001073 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1074 update_ns_vld_target(target, ns_params)
bravof922c4172020-11-24 21:21:43 -03001075
tierno69f0d382020-05-07 13:08:09 +00001076 for vnfr in db_vnfrs.values():
garciadeblas5697b8b2021-03-24 09:17:02 +01001077 vnfd = find_in_list(
1078 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1079 )
1080 vnf_params = find_in_list(
1081 get_iterable(ns_params, "vnf"),
1082 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1083 )
tierno69f0d382020-05-07 13:08:09 +00001084 target_vnf = deepcopy(vnfr)
tierno2357f4e2020-10-19 16:38:59 +00001085 target_vim = "vim:{}".format(vnfr["vim-account-id"])
tierno69f0d382020-05-07 13:08:09 +00001086 for vld in target_vnf.get("vld", ()):
tierno2357f4e2020-10-19 16:38:59 +00001087 # check if connected to a ns.vld, to fill target'
garciadeblas5697b8b2021-03-24 09:17:02 +01001088 vnf_cp = find_in_list(
1089 vnfd.get("int-virtual-link-desc", ()),
1090 lambda cpd: cpd.get("id") == vld["id"],
1091 )
tierno69f0d382020-05-07 13:08:09 +00001092 if vnf_cp:
garciadeblas5697b8b2021-03-24 09:17:02 +01001093 ns_cp = "member_vnf:{}.{}".format(
1094 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1095 )
tierno69f0d382020-05-07 13:08:09 +00001096 if cp2target.get(ns_cp):
1097 vld["target"] = cp2target[ns_cp]
bravof922c4172020-11-24 21:21:43 -03001098
garciadeblas5697b8b2021-03-24 09:17:02 +01001099 vld["vim_info"] = {
1100 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1101 }
tierno2357f4e2020-10-19 16:38:59 +00001102 # check if this network needs SDN assist
1103 target_sdn = None
1104 if vld.get("pci-interfaces"):
1105 db_vim = get_vim_account(vnfr["vim-account-id"])
1106 sdnc_id = db_vim["config"].get("sdn-controller")
1107 if sdnc_id:
1108 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1109 target_sdn = "sdn:{}".format(sdnc_id)
1110 vld["vim_info"][target_sdn] = {
garciadeblas5697b8b2021-03-24 09:17:02 +01001111 "sdn": True,
1112 "target_vim": target_vim,
1113 "vlds": [sdn_vld],
1114 "type": vld.get("type"),
1115 }
tierno69f0d382020-05-07 13:08:09 +00001116
tierno2357f4e2020-10-19 16:38:59 +00001117 # check at vnfd descriptor, if there is an ip-profile
1118 vld_params = {}
bravof922c4172020-11-24 21:21:43 -03001119 vnfd_vlp = find_in_list(
1120 get_virtual_link_profiles(vnfd),
garciadeblas5697b8b2021-03-24 09:17:02 +01001121 lambda a_link_profile: a_link_profile["id"] == vld["id"],
bravof922c4172020-11-24 21:21:43 -03001122 )
garciadeblas5697b8b2021-03-24 09:17:02 +01001123 if (
1124 vnfd_vlp
1125 and vnfd_vlp.get("virtual-link-protocol-data")
1126 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1127 ):
1128 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1129 "l3-protocol-data"
1130 ]
bravof922c4172020-11-24 21:21:43 -03001131 ip_profile_dest_data = {}
1132 if "ip-version" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001133 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1134 "ip-version"
1135 ]
bravof922c4172020-11-24 21:21:43 -03001136 if "cidr" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001137 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1138 "cidr"
1139 ]
bravof922c4172020-11-24 21:21:43 -03001140 if "gateway-ip" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001141 ip_profile_dest_data[
1142 "gateway-address"
1143 ] = ip_profile_source_data["gateway-ip"]
bravof922c4172020-11-24 21:21:43 -03001144 if "dhcp-enabled" in ip_profile_source_data:
1145 ip_profile_dest_data["dhcp-params"] = {
1146 "enabled": ip_profile_source_data["dhcp-enabled"]
1147 }
1148
1149 vld_params["ip-profile"] = ip_profile_dest_data
tierno2357f4e2020-10-19 16:38:59 +00001150 # update vld_params with instantiation params
1151 if vnf_params:
garciadeblas5697b8b2021-03-24 09:17:02 +01001152 vld_instantiation_params = find_in_list(
1153 get_iterable(vnf_params, "internal-vld"),
1154 lambda i_vld: i_vld["name"] == vld["id"],
1155 )
tierno2357f4e2020-10-19 16:38:59 +00001156 if vld_instantiation_params:
1157 vld_params.update(vld_instantiation_params)
1158 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1159
1160 vdur_list = []
tierno69f0d382020-05-07 13:08:09 +00001161 for vdur in target_vnf.get("vdur", ()):
tierno2357f4e2020-10-19 16:38:59 +00001162 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1163 continue # This vdu must not be created
bravof922c4172020-11-24 21:21:43 -03001164 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
tierno69f0d382020-05-07 13:08:09 +00001165
bravof922c4172020-11-24 21:21:43 -03001166 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1167
1168 if ssh_keys_all:
bravofe5a31bc2021-02-17 19:09:12 -03001169 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1170 vnf_configuration = get_configuration(vnfd, vnfd["id"])
garciadeblas5697b8b2021-03-24 09:17:02 +01001171 if (
1172 vdu_configuration
1173 and vdu_configuration.get("config-access")
1174 and vdu_configuration.get("config-access").get("ssh-access")
1175 ):
bravof922c4172020-11-24 21:21:43 -03001176 vdur["ssh-keys"] = ssh_keys_all
garciadeblas5697b8b2021-03-24 09:17:02 +01001177 vdur["ssh-access-required"] = vdu_configuration[
1178 "config-access"
1179 ]["ssh-access"]["required"]
1180 elif (
1181 vnf_configuration
1182 and vnf_configuration.get("config-access")
1183 and vnf_configuration.get("config-access").get("ssh-access")
1184 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1185 ):
bravof922c4172020-11-24 21:21:43 -03001186 vdur["ssh-keys"] = ssh_keys_all
garciadeblas5697b8b2021-03-24 09:17:02 +01001187 vdur["ssh-access-required"] = vnf_configuration[
1188 "config-access"
1189 ]["ssh-access"]["required"]
1190 elif ssh_keys_instantiation and find_in_list(
1191 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1192 ):
bravof922c4172020-11-24 21:21:43 -03001193 vdur["ssh-keys"] = ssh_keys_instantiation
tierno69f0d382020-05-07 13:08:09 +00001194
bravof922c4172020-11-24 21:21:43 -03001195 self.logger.debug("NS > vdur > {}".format(vdur))
1196
1197 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
tierno69f0d382020-05-07 13:08:09 +00001198 # cloud-init
1199 if vdud.get("cloud-init-file"):
garciadeblas5697b8b2021-03-24 09:17:02 +01001200 vdur["cloud-init"] = "{}:file:{}".format(
1201 vnfd["_id"], vdud.get("cloud-init-file")
1202 )
tierno2357f4e2020-10-19 16:38:59 +00001203 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1204 if vdur["cloud-init"] not in target["cloud_init_content"]:
1205 base_folder = vnfd["_admin"]["storage"]
bravof486707f2021-11-08 17:18:50 -03001206 if base_folder["pkg-dir"]:
1207 cloud_init_file = "{}/{}/cloud_init/{}".format(
1208 base_folder["folder"],
1209 base_folder["pkg-dir"],
1210 vdud.get("cloud-init-file"),
1211 )
1212 else:
1213 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1214 base_folder["folder"],
1215 vdud.get("cloud-init-file"),
1216 )
tierno2357f4e2020-10-19 16:38:59 +00001217 with self.fs.file_open(cloud_init_file, "r") as ci_file:
garciadeblas5697b8b2021-03-24 09:17:02 +01001218 target["cloud_init_content"][
1219 vdur["cloud-init"]
1220 ] = ci_file.read()
tierno69f0d382020-05-07 13:08:09 +00001221 elif vdud.get("cloud-init"):
garciadeblas5697b8b2021-03-24 09:17:02 +01001222 vdur["cloud-init"] = "{}:vdu:{}".format(
1223 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1224 )
tierno2357f4e2020-10-19 16:38:59 +00001225 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
garciadeblas5697b8b2021-03-24 09:17:02 +01001226 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1227 "cloud-init"
1228 ]
tierno2357f4e2020-10-19 16:38:59 +00001229 vdur["additionalParams"] = vdur.get("additionalParams") or {}
garciadeblas5697b8b2021-03-24 09:17:02 +01001230 deploy_params_vdu = self._format_additional_params(
1231 vdur.get("additionalParams") or {}
1232 )
1233 deploy_params_vdu["OSM"] = get_osm_params(
1234 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1235 )
tierno2357f4e2020-10-19 16:38:59 +00001236 vdur["additionalParams"] = deploy_params_vdu
tierno69f0d382020-05-07 13:08:09 +00001237
1238 # flavor
1239 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
tierno2357f4e2020-10-19 16:38:59 +00001240 if target_vim not in ns_flavor["vim_info"]:
1241 ns_flavor["vim_info"][target_vim] = {}
lloretgalleg7dc94672021-02-08 11:49:50 +00001242
1243 # deal with images
1244 # in case alternative images are provided we must check if they should be applied
1245 # for the vim_type, modify the vim_type taking into account
1246 ns_image_id = int(vdur["ns-image-id"])
1247 if vdur.get("alt-image-ids"):
1248 db_vim = get_vim_account(vnfr["vim-account-id"])
1249 vim_type = db_vim["vim_type"]
1250 for alt_image_id in vdur.get("alt-image-ids"):
1251 ns_alt_image = target["image"][int(alt_image_id)]
1252 if vim_type == ns_alt_image.get("vim-type"):
1253 # must use alternative image
garciadeblas5697b8b2021-03-24 09:17:02 +01001254 self.logger.debug(
1255 "use alternative image id: {}".format(alt_image_id)
1256 )
lloretgalleg7dc94672021-02-08 11:49:50 +00001257 ns_image_id = alt_image_id
1258 vdur["ns-image-id"] = ns_image_id
1259 break
1260 ns_image = target["image"][int(ns_image_id)]
tierno2357f4e2020-10-19 16:38:59 +00001261 if target_vim not in ns_image["vim_info"]:
1262 ns_image["vim_info"][target_vim] = {}
tierno69f0d382020-05-07 13:08:09 +00001263
Alexis Romero305b5c42022-03-11 15:29:18 +01001264 # Affinity groups
1265 if vdur.get("affinity-or-anti-affinity-group-id"):
1266 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1267 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1268 if target_vim not in ns_ags["vim_info"]:
1269 ns_ags["vim_info"][target_vim] = {}
1270
tierno2357f4e2020-10-19 16:38:59 +00001271 vdur["vim_info"] = {target_vim: {}}
1272 # instantiation parameters
aticig349aa462022-05-19 12:29:35 +03001273 if vnf_params:
1274 vdu_instantiation_params = find_in_list(
1275 get_iterable(vnf_params, "vdu"),
1276 lambda i_vdu: i_vdu["id"] == vdud["id"],
1277 )
1278 if vdu_instantiation_params:
1279 # Parse the vdu_volumes from the instantiation params
1280 vdu_volumes = get_volumes_from_instantiation_params(
1281 vdu_instantiation_params, vdud
1282 )
1283 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
tierno2357f4e2020-10-19 16:38:59 +00001284 vdur_list.append(vdur)
1285 target_vnf["vdur"] = vdur_list
tierno69f0d382020-05-07 13:08:09 +00001286 target["vnf"].append(target_vnf)
1287
garciadeblas07f4e4c2022-06-09 09:42:58 +02001288 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
tierno69f0d382020-05-07 13:08:09 +00001289 desc = await self.RO.deploy(nsr_id, target)
bravof922c4172020-11-24 21:21:43 -03001290 self.logger.debug("RO return > {}".format(desc))
tierno69f0d382020-05-07 13:08:09 +00001291 action_id = desc["action_id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01001292 await self._wait_ng_ro(
garciadeblas07f4e4c2022-06-09 09:42:58 +02001293 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage,
1294 operation="instantiation"
garciadeblas5697b8b2021-03-24 09:17:02 +01001295 )
tierno69f0d382020-05-07 13:08:09 +00001296
1297 # Updating NSR
1298 db_nsr_update = {
1299 "_admin.deployed.RO.operational-status": "running",
garciadeblas5697b8b2021-03-24 09:17:02 +01001300 "detailed-status": " ".join(stage),
tierno69f0d382020-05-07 13:08:09 +00001301 }
1302 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1303 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1304 self._write_op_status(nslcmop_id, stage)
garciadeblas5697b8b2021-03-24 09:17:02 +01001305 self.logger.debug(
1306 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1307 )
tierno69f0d382020-05-07 13:08:09 +00001308 return
1309
garciadeblas5697b8b2021-03-24 09:17:02 +01001310 async def _wait_ng_ro(
1311 self,
1312 nsr_id,
1313 action_id,
1314 nslcmop_id=None,
1315 start_time=None,
1316 timeout=600,
1317 stage=None,
garciadeblas07f4e4c2022-06-09 09:42:58 +02001318 operation=None,
garciadeblas5697b8b2021-03-24 09:17:02 +01001319 ):
tierno69f0d382020-05-07 13:08:09 +00001320 detailed_status_old = None
1321 db_nsr_update = {}
tierno2357f4e2020-10-19 16:38:59 +00001322 start_time = start_time or time()
tierno69f0d382020-05-07 13:08:09 +00001323 while time() <= start_time + timeout:
garciadeblas07f4e4c2022-06-09 09:42:58 +02001324 desc_status = await self.op_status_map[operation](nsr_id, action_id)
bravof922c4172020-11-24 21:21:43 -03001325 self.logger.debug("Wait NG RO > {}".format(desc_status))
tierno69f0d382020-05-07 13:08:09 +00001326 if desc_status["status"] == "FAILED":
1327 raise NgRoException(desc_status["details"])
1328 elif desc_status["status"] == "BUILD":
tierno2357f4e2020-10-19 16:38:59 +00001329 if stage:
1330 stage[2] = "VIM: ({})".format(desc_status["details"])
tierno69f0d382020-05-07 13:08:09 +00001331 elif desc_status["status"] == "DONE":
tierno2357f4e2020-10-19 16:38:59 +00001332 if stage:
1333 stage[2] = "Deployed at VIM"
tierno69f0d382020-05-07 13:08:09 +00001334 break
1335 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01001336 assert False, "ROclient.check_ns_status returns unknown {}".format(
1337 desc_status["status"]
1338 )
tierno2357f4e2020-10-19 16:38:59 +00001339 if stage and nslcmop_id and stage[2] != detailed_status_old:
tierno69f0d382020-05-07 13:08:09 +00001340 detailed_status_old = stage[2]
1341 db_nsr_update["detailed-status"] = " ".join(stage)
1342 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1343 self._write_op_status(nslcmop_id, stage)
bravof922c4172020-11-24 21:21:43 -03001344 await asyncio.sleep(15, loop=self.loop)
tierno69f0d382020-05-07 13:08:09 +00001345 else: # timeout_ns_deploy
1346 raise NgRoException("Timeout waiting ns to deploy")
1347
garciadeblas5697b8b2021-03-24 09:17:02 +01001348 async def _terminate_ng_ro(
1349 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1350 ):
tierno69f0d382020-05-07 13:08:09 +00001351 db_nsr_update = {}
1352 failed_detail = []
1353 action_id = None
1354 start_deploy = time()
1355 try:
1356 target = {
1357 "ns": {"vld": []},
1358 "vnf": [],
1359 "image": [],
1360 "flavor": [],
garciadeblas5697b8b2021-03-24 09:17:02 +01001361 "action_id": nslcmop_id,
tierno69f0d382020-05-07 13:08:09 +00001362 }
1363 desc = await self.RO.deploy(nsr_id, target)
1364 action_id = desc["action_id"]
1365 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1366 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
garciadeblas5697b8b2021-03-24 09:17:02 +01001367 self.logger.debug(
1368 logging_text
1369 + "ns terminate action at RO. action_id={}".format(action_id)
1370 )
tierno69f0d382020-05-07 13:08:09 +00001371
1372 # wait until done
1373 delete_timeout = 20 * 60 # 20 minutes
garciadeblas5697b8b2021-03-24 09:17:02 +01001374 await self._wait_ng_ro(
garciadeblas07f4e4c2022-06-09 09:42:58 +02001375 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage,
1376 operation="termination"
garciadeblas5697b8b2021-03-24 09:17:02 +01001377 )
tierno69f0d382020-05-07 13:08:09 +00001378
1379 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1380 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1381 # delete all nsr
1382 await self.RO.delete(nsr_id)
1383 except Exception as e:
1384 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1385 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1386 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1387 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
garciadeblas5697b8b2021-03-24 09:17:02 +01001388 self.logger.debug(
1389 logging_text + "RO_action_id={} already deleted".format(action_id)
1390 )
tierno69f0d382020-05-07 13:08:09 +00001391 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1392 failed_detail.append("delete conflict: {}".format(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01001393 self.logger.debug(
1394 logging_text
1395 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1396 )
tierno69f0d382020-05-07 13:08:09 +00001397 else:
1398 failed_detail.append("delete error: {}".format(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01001399 self.logger.error(
1400 logging_text
1401 + "RO_action_id={} delete error: {}".format(action_id, e)
1402 )
tierno69f0d382020-05-07 13:08:09 +00001403
1404 if failed_detail:
1405 stage[2] = "Error deleting from VIM"
1406 else:
1407 stage[2] = "Deleted from VIM"
1408 db_nsr_update["detailed-status"] = " ".join(stage)
1409 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1410 self._write_op_status(nslcmop_id, stage)
1411
1412 if failed_detail:
1413 raise LcmException("; ".join(failed_detail))
1414 return
1415
garciadeblas5697b8b2021-03-24 09:17:02 +01001416 async def instantiate_RO(
1417 self,
1418 logging_text,
1419 nsr_id,
1420 nsd,
1421 db_nsr,
1422 db_nslcmop,
1423 db_vnfrs,
1424 db_vnfds,
1425 n2vc_key_list,
1426 stage,
1427 ):
tiernoe95ed362020-04-23 08:24:57 +00001428 """
1429 Instantiate at RO
1430 :param logging_text: preffix text to use at logging
1431 :param nsr_id: nsr identity
1432 :param nsd: database content of ns descriptor
1433 :param db_nsr: database content of ns record
1434 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1435 :param db_vnfrs:
bravof922c4172020-11-24 21:21:43 -03001436 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
tiernoe95ed362020-04-23 08:24:57 +00001437 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1438 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1439 :return: None or exception
1440 """
tiernoe876f672020-02-13 14:34:48 +00001441 try:
tiernoe876f672020-02-13 14:34:48 +00001442 start_deploy = time()
1443 ns_params = db_nslcmop.get("operationParams")
1444 if ns_params and ns_params.get("timeout_ns_deploy"):
1445 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1446 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01001447 timeout_ns_deploy = self.timeout.get(
1448 "ns_deploy", self.timeout_ns_deploy
1449 )
quilesj7e13aeb2019-10-08 13:34:55 +02001450
tiernoe876f672020-02-13 14:34:48 +00001451 # Check for and optionally request placement optimization. Database will be updated if placement activated
1452 stage[2] = "Waiting for Placement."
tierno8790a3d2020-04-23 22:49:52 +00001453 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1454 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1455 for vnfr in db_vnfrs.values():
1456 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1457 break
1458 else:
1459 ns_params["vimAccountId"] == vnfr["vim-account-id"]
quilesj7e13aeb2019-10-08 13:34:55 +02001460
garciadeblas5697b8b2021-03-24 09:17:02 +01001461 return await self._instantiate_ng_ro(
1462 logging_text,
1463 nsr_id,
1464 nsd,
1465 db_nsr,
1466 db_nslcmop,
1467 db_vnfrs,
1468 db_vnfds,
1469 n2vc_key_list,
1470 stage,
1471 start_deploy,
1472 timeout_ns_deploy,
1473 )
tierno2357f4e2020-10-19 16:38:59 +00001474 except Exception as e:
tierno067e04a2020-03-31 12:53:13 +00001475 stage[2] = "ERROR deploying at VIM"
tiernoe876f672020-02-13 14:34:48 +00001476 self.set_vnfr_at_error(db_vnfrs, str(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01001477 self.logger.error(
1478 "Error deploying at VIM {}".format(e),
1479 exc_info=not isinstance(
1480 e,
1481 (
1482 ROclient.ROClientException,
1483 LcmException,
1484 DbException,
1485 NgRoException,
1486 ),
1487 ),
1488 )
tiernoe876f672020-02-13 14:34:48 +00001489 raise
quilesj7e13aeb2019-10-08 13:34:55 +02001490
tierno7ecbc342020-09-21 14:05:39 +00001491 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1492 """
1493 Wait for kdu to be up, get ip address
1494 :param logging_text: prefix use for logging
1495 :param nsr_id:
1496 :param vnfr_id:
1497 :param kdu_name:
David Garcia78b6e6d2022-04-29 05:50:46 +02001498 :return: IP address, K8s services
tierno7ecbc342020-09-21 14:05:39 +00001499 """
1500
1501 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1502 nb_tries = 0
1503
1504 while nb_tries < 360:
1505 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
garciadeblas5697b8b2021-03-24 09:17:02 +01001506 kdur = next(
1507 (
1508 x
1509 for x in get_iterable(db_vnfr, "kdur")
1510 if x.get("kdu-name") == kdu_name
1511 ),
1512 None,
1513 )
tierno7ecbc342020-09-21 14:05:39 +00001514 if not kdur:
garciadeblas5697b8b2021-03-24 09:17:02 +01001515 raise LcmException(
1516 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1517 )
tierno7ecbc342020-09-21 14:05:39 +00001518 if kdur.get("status"):
1519 if kdur["status"] in ("READY", "ENABLED"):
David Garcia78b6e6d2022-04-29 05:50:46 +02001520 return kdur.get("ip-address"), kdur.get("services")
tierno7ecbc342020-09-21 14:05:39 +00001521 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01001522 raise LcmException(
1523 "target KDU={} is in error state".format(kdu_name)
1524 )
tierno7ecbc342020-09-21 14:05:39 +00001525
1526 await asyncio.sleep(10, loop=self.loop)
1527 nb_tries += 1
1528 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1529
garciadeblas5697b8b2021-03-24 09:17:02 +01001530 async def wait_vm_up_insert_key_ro(
1531 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1532 ):
tiernoa5088192019-11-26 16:12:53 +00001533 """
1534 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1535 :param logging_text: prefix use for logging
1536 :param nsr_id:
1537 :param vnfr_id:
1538 :param vdu_id:
1539 :param vdu_index:
1540 :param pub_key: public ssh key to inject, None to skip
1541 :param user: user to apply the public ssh key
1542 :return: IP address
1543 """
quilesj7e13aeb2019-10-08 13:34:55 +02001544
tierno2357f4e2020-10-19 16:38:59 +00001545 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
tiernod8323042019-08-09 11:32:23 +00001546 ro_nsr_id = None
1547 ip_address = None
1548 nb_tries = 0
1549 target_vdu_id = None
quilesj3149f262019-12-03 10:58:10 +00001550 ro_retries = 0
quilesj7e13aeb2019-10-08 13:34:55 +02001551
tiernod8323042019-08-09 11:32:23 +00001552 while True:
quilesj7e13aeb2019-10-08 13:34:55 +02001553
quilesj3149f262019-12-03 10:58:10 +00001554 ro_retries += 1
1555 if ro_retries >= 360: # 1 hour
garciadeblas5697b8b2021-03-24 09:17:02 +01001556 raise LcmException(
1557 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1558 )
quilesj3149f262019-12-03 10:58:10 +00001559
tiernod8323042019-08-09 11:32:23 +00001560 await asyncio.sleep(10, loop=self.loop)
quilesj7e13aeb2019-10-08 13:34:55 +02001561
1562 # get ip address
tiernod8323042019-08-09 11:32:23 +00001563 if not target_vdu_id:
1564 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
quilesj3149f262019-12-03 10:58:10 +00001565
1566 if not vdu_id: # for the VNF case
tiernoe876f672020-02-13 14:34:48 +00001567 if db_vnfr.get("status") == "ERROR":
garciadeblas5697b8b2021-03-24 09:17:02 +01001568 raise LcmException(
1569 "Cannot inject ssh-key because target VNF is in error state"
1570 )
tiernod8323042019-08-09 11:32:23 +00001571 ip_address = db_vnfr.get("ip-address")
1572 if not ip_address:
1573 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01001574 vdur = next(
1575 (
1576 x
1577 for x in get_iterable(db_vnfr, "vdur")
1578 if x.get("ip-address") == ip_address
1579 ),
1580 None,
1581 )
quilesj3149f262019-12-03 10:58:10 +00001582 else: # VDU case
garciadeblas5697b8b2021-03-24 09:17:02 +01001583 vdur = next(
1584 (
1585 x
1586 for x in get_iterable(db_vnfr, "vdur")
1587 if x.get("vdu-id-ref") == vdu_id
1588 and x.get("count-index") == vdu_index
1589 ),
1590 None,
1591 )
quilesj3149f262019-12-03 10:58:10 +00001592
garciadeblas5697b8b2021-03-24 09:17:02 +01001593 if (
1594 not vdur and len(db_vnfr.get("vdur", ())) == 1
1595 ): # If only one, this should be the target vdu
tierno0e8c3f02020-03-12 17:18:21 +00001596 vdur = db_vnfr["vdur"][0]
quilesj3149f262019-12-03 10:58:10 +00001597 if not vdur:
garciadeblas5697b8b2021-03-24 09:17:02 +01001598 raise LcmException(
1599 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1600 vnfr_id, vdu_id, vdu_index
1601 )
1602 )
tierno2357f4e2020-10-19 16:38:59 +00001603 # New generation RO stores information at "vim_info"
1604 ng_ro_status = None
David Garciaa8bbe672020-11-19 13:06:54 +01001605 target_vim = None
tierno2357f4e2020-10-19 16:38:59 +00001606 if vdur.get("vim_info"):
garciadeblas5697b8b2021-03-24 09:17:02 +01001607 target_vim = next(
1608 t for t in vdur["vim_info"]
1609 ) # there should be only one key
tierno2357f4e2020-10-19 16:38:59 +00001610 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
garciadeblas5697b8b2021-03-24 09:17:02 +01001611 if (
1612 vdur.get("pdu-type")
1613 or vdur.get("status") == "ACTIVE"
1614 or ng_ro_status == "ACTIVE"
1615 ):
quilesj3149f262019-12-03 10:58:10 +00001616 ip_address = vdur.get("ip-address")
1617 if not ip_address:
1618 continue
1619 target_vdu_id = vdur["vdu-id-ref"]
bravof922c4172020-11-24 21:21:43 -03001620 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
garciadeblas5697b8b2021-03-24 09:17:02 +01001621 raise LcmException(
1622 "Cannot inject ssh-key because target VM is in error state"
1623 )
quilesj3149f262019-12-03 10:58:10 +00001624
tiernod8323042019-08-09 11:32:23 +00001625 if not target_vdu_id:
1626 continue
tiernod8323042019-08-09 11:32:23 +00001627
quilesj7e13aeb2019-10-08 13:34:55 +02001628 # inject public key into machine
1629 if pub_key and user:
tierno2357f4e2020-10-19 16:38:59 +00001630 self.logger.debug(logging_text + "Inserting RO key")
bravof922c4172020-11-24 21:21:43 -03001631 self.logger.debug("SSH > PubKey > {}".format(pub_key))
tierno0e8c3f02020-03-12 17:18:21 +00001632 if vdur.get("pdu-type"):
1633 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1634 return ip_address
quilesj7e13aeb2019-10-08 13:34:55 +02001635 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01001636 ro_vm_id = "{}-{}".format(
1637 db_vnfr["member-vnf-index-ref"], target_vdu_id
1638 ) # TODO add vdu_index
tierno69f0d382020-05-07 13:08:09 +00001639 if self.ng_ro:
garciadeblas5697b8b2021-03-24 09:17:02 +01001640 target = {
1641 "action": {
1642 "action": "inject_ssh_key",
1643 "key": pub_key,
1644 "user": user,
1645 },
1646 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1647 }
tierno2357f4e2020-10-19 16:38:59 +00001648 desc = await self.RO.deploy(nsr_id, target)
1649 action_id = desc["action_id"]
garciadeblas07f4e4c2022-06-09 09:42:58 +02001650 await self._wait_ng_ro(nsr_id, action_id, timeout=600, operation="instantiation")
tierno2357f4e2020-10-19 16:38:59 +00001651 break
tierno69f0d382020-05-07 13:08:09 +00001652 else:
tierno2357f4e2020-10-19 16:38:59 +00001653 # wait until NS is deployed at RO
1654 if not ro_nsr_id:
1655 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
garciadeblas5697b8b2021-03-24 09:17:02 +01001656 ro_nsr_id = deep_get(
1657 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1658 )
tierno2357f4e2020-10-19 16:38:59 +00001659 if not ro_nsr_id:
1660 continue
tierno69f0d382020-05-07 13:08:09 +00001661 result_dict = await self.RO.create_action(
1662 item="ns",
1663 item_id_name=ro_nsr_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01001664 descriptor={
1665 "add_public_key": pub_key,
1666 "vms": [ro_vm_id],
1667 "user": user,
1668 },
tierno69f0d382020-05-07 13:08:09 +00001669 )
1670 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1671 if not result_dict or not isinstance(result_dict, dict):
garciadeblas5697b8b2021-03-24 09:17:02 +01001672 raise LcmException(
1673 "Unknown response from RO when injecting key"
1674 )
tierno69f0d382020-05-07 13:08:09 +00001675 for result in result_dict.values():
1676 if result.get("vim_result") == 200:
1677 break
1678 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01001679 raise ROclient.ROClientException(
1680 "error injecting key: {}".format(
1681 result.get("description")
1682 )
1683 )
tierno69f0d382020-05-07 13:08:09 +00001684 break
1685 except NgRoException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01001686 raise LcmException(
1687 "Reaching max tries injecting key. Error: {}".format(e)
1688 )
quilesj7e13aeb2019-10-08 13:34:55 +02001689 except ROclient.ROClientException as e:
tiernoa5088192019-11-26 16:12:53 +00001690 if not nb_tries:
garciadeblas5697b8b2021-03-24 09:17:02 +01001691 self.logger.debug(
1692 logging_text
1693 + "error injecting key: {}. Retrying until {} seconds".format(
1694 e, 20 * 10
1695 )
1696 )
quilesj7e13aeb2019-10-08 13:34:55 +02001697 nb_tries += 1
tiernoa5088192019-11-26 16:12:53 +00001698 if nb_tries >= 20:
garciadeblas5697b8b2021-03-24 09:17:02 +01001699 raise LcmException(
1700 "Reaching max tries injecting key. Error: {}".format(e)
1701 )
quilesj7e13aeb2019-10-08 13:34:55 +02001702 else:
quilesj7e13aeb2019-10-08 13:34:55 +02001703 break
1704
1705 return ip_address
1706
tierno5ee02052019-12-05 19:55:02 +00001707 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1708 """
1709 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1710 """
1711 my_vca = vca_deployed_list[vca_index]
1712 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
quilesj3655ae02019-12-12 16:08:35 +00001713 # vdu or kdu: no dependencies
tierno5ee02052019-12-05 19:55:02 +00001714 return
1715 timeout = 300
1716 while timeout >= 0:
quilesj3655ae02019-12-12 16:08:35 +00001717 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1718 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1719 configuration_status_list = db_nsr["configurationStatus"]
1720 for index, vca_deployed in enumerate(configuration_status_list):
tierno5ee02052019-12-05 19:55:02 +00001721 if index == vca_index:
quilesj3655ae02019-12-12 16:08:35 +00001722 # myself
tierno5ee02052019-12-05 19:55:02 +00001723 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01001724 if not my_vca.get("member-vnf-index") or (
1725 vca_deployed.get("member-vnf-index")
1726 == my_vca.get("member-vnf-index")
1727 ):
quilesj3655ae02019-12-12 16:08:35 +00001728 internal_status = configuration_status_list[index].get("status")
garciadeblas5697b8b2021-03-24 09:17:02 +01001729 if internal_status == "READY":
quilesj3655ae02019-12-12 16:08:35 +00001730 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01001731 elif internal_status == "BROKEN":
1732 raise LcmException(
1733 "Configuration aborted because dependent charm/s has failed"
1734 )
quilesj3655ae02019-12-12 16:08:35 +00001735 else:
1736 break
tierno5ee02052019-12-05 19:55:02 +00001737 else:
quilesj3655ae02019-12-12 16:08:35 +00001738 # no dependencies, return
tierno5ee02052019-12-05 19:55:02 +00001739 return
1740 await asyncio.sleep(10)
1741 timeout -= 1
tierno5ee02052019-12-05 19:55:02 +00001742
1743 raise LcmException("Configuration aborted because dependent charm/s timeout")
1744
David Garciac1fe90a2021-03-31 19:12:02 +02001745 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
David Garcia5506c182021-10-21 17:03:48 +02001746 vca_id = None
1747 if db_vnfr:
1748 vca_id = deep_get(db_vnfr, ("vca-id",))
1749 elif db_nsr:
1750 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1751 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1752 return vca_id
David Garciac1fe90a2021-03-31 19:12:02 +02001753
garciadeblas5697b8b2021-03-24 09:17:02 +01001754 async def instantiate_N2VC(
1755 self,
1756 logging_text,
1757 vca_index,
1758 nsi_id,
1759 db_nsr,
1760 db_vnfr,
1761 vdu_id,
1762 kdu_name,
1763 vdu_index,
1764 config_descriptor,
1765 deploy_params,
1766 base_folder,
1767 nslcmop_id,
1768 stage,
1769 vca_type,
1770 vca_name,
1771 ee_config_descriptor,
1772 ):
tiernod8323042019-08-09 11:32:23 +00001773 nsr_id = db_nsr["_id"]
1774 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
tiernoda6fb102019-11-23 00:36:52 +00001775 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernod8323042019-08-09 11:32:23 +00001776 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
tiernob996d942020-07-03 14:52:28 +00001777 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
quilesj7e13aeb2019-10-08 13:34:55 +02001778 db_dict = {
garciadeblas5697b8b2021-03-24 09:17:02 +01001779 "collection": "nsrs",
1780 "filter": {"_id": nsr_id},
1781 "path": db_update_entry,
quilesj7e13aeb2019-10-08 13:34:55 +02001782 }
tiernod8323042019-08-09 11:32:23 +00001783 step = ""
1784 try:
quilesj3655ae02019-12-12 16:08:35 +00001785
garciadeblas5697b8b2021-03-24 09:17:02 +01001786 element_type = "NS"
quilesj3655ae02019-12-12 16:08:35 +00001787 element_under_configuration = nsr_id
1788
tiernod8323042019-08-09 11:32:23 +00001789 vnfr_id = None
1790 if db_vnfr:
1791 vnfr_id = db_vnfr["_id"]
tiernob996d942020-07-03 14:52:28 +00001792 osm_config["osm"]["vnf_id"] = vnfr_id
tiernod8323042019-08-09 11:32:23 +00001793
garciadeblas5697b8b2021-03-24 09:17:02 +01001794 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001795
aktas98488ed2021-07-29 17:42:49 +03001796 if vca_type == "native_charm":
1797 index_number = 0
1798 else:
1799 index_number = vdu_index or 0
1800
tiernod8323042019-08-09 11:32:23 +00001801 if vnfr_id:
garciadeblas5697b8b2021-03-24 09:17:02 +01001802 element_type = "VNF"
quilesj3655ae02019-12-12 16:08:35 +00001803 element_under_configuration = vnfr_id
aktas98488ed2021-07-29 17:42:49 +03001804 namespace += ".{}-{}".format(vnfr_id, index_number)
tiernod8323042019-08-09 11:32:23 +00001805 if vdu_id:
aktas98488ed2021-07-29 17:42:49 +03001806 namespace += ".{}-{}".format(vdu_id, index_number)
garciadeblas5697b8b2021-03-24 09:17:02 +01001807 element_type = "VDU"
aktas98488ed2021-07-29 17:42:49 +03001808 element_under_configuration = "{}-{}".format(vdu_id, index_number)
tiernob996d942020-07-03 14:52:28 +00001809 osm_config["osm"]["vdu_id"] = vdu_id
tierno51183952020-04-03 15:48:18 +00001810 elif kdu_name:
aktas98488ed2021-07-29 17:42:49 +03001811 namespace += ".{}".format(kdu_name)
garciadeblas5697b8b2021-03-24 09:17:02 +01001812 element_type = "KDU"
tierno51183952020-04-03 15:48:18 +00001813 element_under_configuration = kdu_name
tiernob996d942020-07-03 14:52:28 +00001814 osm_config["osm"]["kdu_name"] = kdu_name
tiernod8323042019-08-09 11:32:23 +00001815
1816 # Get artifact path
bravof486707f2021-11-08 17:18:50 -03001817 if base_folder["pkg-dir"]:
1818 artifact_path = "{}/{}/{}/{}".format(
1819 base_folder["folder"],
1820 base_folder["pkg-dir"],
1821 "charms"
aticig15db6142022-01-24 12:51:26 +03001822 if vca_type
1823 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
bravof486707f2021-11-08 17:18:50 -03001824 else "helm-charts",
1825 vca_name,
1826 )
1827 else:
1828 artifact_path = "{}/Scripts/{}/{}/".format(
1829 base_folder["folder"],
1830 "charms"
aticig15db6142022-01-24 12:51:26 +03001831 if vca_type
1832 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
bravof486707f2021-11-08 17:18:50 -03001833 else "helm-charts",
1834 vca_name,
1835 )
bravof922c4172020-11-24 21:21:43 -03001836
1837 self.logger.debug("Artifact path > {}".format(artifact_path))
1838
tiernoa278b842020-07-08 15:33:55 +00001839 # get initial_config_primitive_list that applies to this element
garciadeblas5697b8b2021-03-24 09:17:02 +01001840 initial_config_primitive_list = config_descriptor.get(
1841 "initial-config-primitive"
1842 )
tiernoa278b842020-07-08 15:33:55 +00001843
garciadeblas5697b8b2021-03-24 09:17:02 +01001844 self.logger.debug(
1845 "Initial config primitive list > {}".format(
1846 initial_config_primitive_list
1847 )
1848 )
bravof922c4172020-11-24 21:21:43 -03001849
tiernoa278b842020-07-08 15:33:55 +00001850 # add config if not present for NS charm
1851 ee_descriptor_id = ee_config_descriptor.get("id")
bravof922c4172020-11-24 21:21:43 -03001852 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
garciadeblas5697b8b2021-03-24 09:17:02 +01001853 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1854 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1855 )
tiernod8323042019-08-09 11:32:23 +00001856
garciadeblas5697b8b2021-03-24 09:17:02 +01001857 self.logger.debug(
1858 "Initial config primitive list #2 > {}".format(
1859 initial_config_primitive_list
1860 )
1861 )
tierno588547c2020-07-01 15:30:20 +00001862 # n2vc_redesign STEP 3.1
tierno588547c2020-07-01 15:30:20 +00001863 # find old ee_id if exists
1864 ee_id = vca_deployed.get("ee_id")
tiernod8323042019-08-09 11:32:23 +00001865
David Garciac1fe90a2021-03-31 19:12:02 +02001866 vca_id = self.get_vca_id(db_vnfr, db_nsr)
tierno588547c2020-07-01 15:30:20 +00001867 # create or register execution environment in VCA
lloretgalleg18ebc3a2020-10-22 09:54:51 +00001868 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
quilesj7e13aeb2019-10-08 13:34:55 +02001869
tierno588547c2020-07-01 15:30:20 +00001870 self._write_configuration_status(
1871 nsr_id=nsr_id,
1872 vca_index=vca_index,
garciadeblas5697b8b2021-03-24 09:17:02 +01001873 status="CREATING",
tierno588547c2020-07-01 15:30:20 +00001874 element_under_configuration=element_under_configuration,
garciadeblas5697b8b2021-03-24 09:17:02 +01001875 element_type=element_type,
tierno588547c2020-07-01 15:30:20 +00001876 )
tiernod8323042019-08-09 11:32:23 +00001877
tierno588547c2020-07-01 15:30:20 +00001878 step = "create execution environment"
garciadeblas5697b8b2021-03-24 09:17:02 +01001879 self.logger.debug(logging_text + step)
David Garciaaae391f2020-11-09 11:12:54 +01001880
1881 ee_id = None
1882 credentials = None
1883 if vca_type == "k8s_proxy_charm":
1884 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
garciadeblas5697b8b2021-03-24 09:17:02 +01001885 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
David Garciaaae391f2020-11-09 11:12:54 +01001886 namespace=namespace,
1887 artifact_path=artifact_path,
1888 db_dict=db_dict,
David Garciac1fe90a2021-03-31 19:12:02 +02001889 vca_id=vca_id,
David Garciaaae391f2020-11-09 11:12:54 +01001890 )
garciadeblas5697b8b2021-03-24 09:17:02 +01001891 elif vca_type == "helm" or vca_type == "helm-v3":
1892 ee_id, credentials = await self.vca_map[
1893 vca_type
1894 ].create_execution_environment(
bravof922c4172020-11-24 21:21:43 -03001895 namespace=namespace,
1896 reuse_ee_id=ee_id,
1897 db_dict=db_dict,
lloretgalleg18cb3cb2020-12-10 14:21:10 +00001898 config=osm_config,
1899 artifact_path=artifact_path,
garciadeblas5697b8b2021-03-24 09:17:02 +01001900 vca_type=vca_type,
bravof922c4172020-11-24 21:21:43 -03001901 )
garciadeblas5697b8b2021-03-24 09:17:02 +01001902 else:
1903 ee_id, credentials = await self.vca_map[
1904 vca_type
1905 ].create_execution_environment(
David Garciaaae391f2020-11-09 11:12:54 +01001906 namespace=namespace,
1907 reuse_ee_id=ee_id,
1908 db_dict=db_dict,
David Garciac1fe90a2021-03-31 19:12:02 +02001909 vca_id=vca_id,
David Garciaaae391f2020-11-09 11:12:54 +01001910 )
quilesj3655ae02019-12-12 16:08:35 +00001911
tierno588547c2020-07-01 15:30:20 +00001912 elif vca_type == "native_charm":
1913 step = "Waiting to VM being up and getting IP address"
1914 self.logger.debug(logging_text + step)
garciadeblas5697b8b2021-03-24 09:17:02 +01001915 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1916 logging_text,
1917 nsr_id,
1918 vnfr_id,
1919 vdu_id,
1920 vdu_index,
1921 user=None,
1922 pub_key=None,
1923 )
tierno588547c2020-07-01 15:30:20 +00001924 credentials = {"hostname": rw_mgmt_ip}
1925 # get username
garciadeblas5697b8b2021-03-24 09:17:02 +01001926 username = deep_get(
1927 config_descriptor, ("config-access", "ssh-access", "default-user")
1928 )
tierno588547c2020-07-01 15:30:20 +00001929 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1930 # merged. Meanwhile let's get username from initial-config-primitive
tiernoa278b842020-07-08 15:33:55 +00001931 if not username and initial_config_primitive_list:
1932 for config_primitive in initial_config_primitive_list:
tierno588547c2020-07-01 15:30:20 +00001933 for param in config_primitive.get("parameter", ()):
1934 if param["name"] == "ssh-username":
1935 username = param["value"]
1936 break
1937 if not username:
garciadeblas5697b8b2021-03-24 09:17:02 +01001938 raise LcmException(
1939 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1940 "'config-access.ssh-access.default-user'"
1941 )
tierno588547c2020-07-01 15:30:20 +00001942 credentials["username"] = username
1943 # n2vc_redesign STEP 3.2
quilesj3655ae02019-12-12 16:08:35 +00001944
tierno588547c2020-07-01 15:30:20 +00001945 self._write_configuration_status(
1946 nsr_id=nsr_id,
1947 vca_index=vca_index,
garciadeblas5697b8b2021-03-24 09:17:02 +01001948 status="REGISTERING",
tierno588547c2020-07-01 15:30:20 +00001949 element_under_configuration=element_under_configuration,
garciadeblas5697b8b2021-03-24 09:17:02 +01001950 element_type=element_type,
tierno588547c2020-07-01 15:30:20 +00001951 )
quilesj3655ae02019-12-12 16:08:35 +00001952
tierno588547c2020-07-01 15:30:20 +00001953 step = "register execution environment {}".format(credentials)
1954 self.logger.debug(logging_text + step)
1955 ee_id = await self.vca_map[vca_type].register_execution_environment(
David Garciaaae391f2020-11-09 11:12:54 +01001956 credentials=credentials,
1957 namespace=namespace,
1958 db_dict=db_dict,
David Garciac1fe90a2021-03-31 19:12:02 +02001959 vca_id=vca_id,
David Garciaaae391f2020-11-09 11:12:54 +01001960 )
tierno3bedc9b2019-11-27 15:46:57 +00001961
tierno588547c2020-07-01 15:30:20 +00001962 # for compatibility with MON/POL modules, the need model and application name at database
1963 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
garciadeblas5697b8b2021-03-24 09:17:02 +01001964 ee_id_parts = ee_id.split(".")
tierno588547c2020-07-01 15:30:20 +00001965 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1966 if len(ee_id_parts) >= 2:
1967 model_name = ee_id_parts[0]
1968 application_name = ee_id_parts[1]
1969 db_nsr_update[db_update_entry + "model"] = model_name
1970 db_nsr_update[db_update_entry + "application"] = application_name
tiernod8323042019-08-09 11:32:23 +00001971
1972 # n2vc_redesign STEP 3.3
tiernod8323042019-08-09 11:32:23 +00001973 step = "Install configuration Software"
quilesj3655ae02019-12-12 16:08:35 +00001974
tiernoc231a872020-01-21 08:49:05 +00001975 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001976 nsr_id=nsr_id,
1977 vca_index=vca_index,
garciadeblas5697b8b2021-03-24 09:17:02 +01001978 status="INSTALLING SW",
quilesj3655ae02019-12-12 16:08:35 +00001979 element_under_configuration=element_under_configuration,
tierno51183952020-04-03 15:48:18 +00001980 element_type=element_type,
garciadeblas5697b8b2021-03-24 09:17:02 +01001981 other_update=db_nsr_update,
quilesj3655ae02019-12-12 16:08:35 +00001982 )
1983
tierno3bedc9b2019-11-27 15:46:57 +00001984 # TODO check if already done
quilesj7e13aeb2019-10-08 13:34:55 +02001985 self.logger.debug(logging_text + step)
David Garcia18a63322020-04-01 16:14:59 +02001986 config = None
tierno588547c2020-07-01 15:30:20 +00001987 if vca_type == "native_charm":
garciadeblas5697b8b2021-03-24 09:17:02 +01001988 config_primitive = next(
1989 (p for p in initial_config_primitive_list if p["name"] == "config"),
1990 None,
1991 )
tiernoa278b842020-07-08 15:33:55 +00001992 if config_primitive:
1993 config = self._map_primitive_params(
garciadeblas5697b8b2021-03-24 09:17:02 +01001994 config_primitive, {}, deploy_params
tiernoa278b842020-07-08 15:33:55 +00001995 )
tierno588547c2020-07-01 15:30:20 +00001996 num_units = 1
1997 if vca_type == "lxc_proxy_charm":
1998 if element_type == "NS":
1999 num_units = db_nsr.get("config-units") or 1
2000 elif element_type == "VNF":
2001 num_units = db_vnfr.get("config-units") or 1
2002 elif element_type == "VDU":
2003 for v in db_vnfr["vdur"]:
2004 if vdu_id == v["vdu-id-ref"]:
2005 num_units = v.get("config-units") or 1
2006 break
David Garciaaae391f2020-11-09 11:12:54 +01002007 if vca_type != "k8s_proxy_charm":
2008 await self.vca_map[vca_type].install_configuration_sw(
2009 ee_id=ee_id,
2010 artifact_path=artifact_path,
2011 db_dict=db_dict,
2012 config=config,
2013 num_units=num_units,
David Garciac1fe90a2021-03-31 19:12:02 +02002014 vca_id=vca_id,
aktas98488ed2021-07-29 17:42:49 +03002015 vca_type=vca_type,
David Garciaaae391f2020-11-09 11:12:54 +01002016 )
quilesj7e13aeb2019-10-08 13:34:55 +02002017
quilesj63f90042020-01-17 09:53:55 +00002018 # write in db flag of configuration_sw already installed
garciadeblas5697b8b2021-03-24 09:17:02 +01002019 self.update_db_2(
2020 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2021 )
quilesj63f90042020-01-17 09:53:55 +00002022
2023 # add relations for this VCA (wait for other peers related with this VCA)
garciadeblas5697b8b2021-03-24 09:17:02 +01002024 await self._add_vca_relations(
2025 logging_text=logging_text,
2026 nsr_id=nsr_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01002027 vca_type=vca_type,
David Garciab4ebcd02021-10-28 02:00:43 +02002028 vca_index=vca_index,
garciadeblas5697b8b2021-03-24 09:17:02 +01002029 )
quilesj63f90042020-01-17 09:53:55 +00002030
quilesj7e13aeb2019-10-08 13:34:55 +02002031 # if SSH access is required, then get execution environment SSH public
David Garciaa27e20a2020-07-10 13:12:44 +02002032 # if native charm we have waited already to VM be UP
lloretgalleg18ebc3a2020-10-22 09:54:51 +00002033 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
tierno3bedc9b2019-11-27 15:46:57 +00002034 pub_key = None
2035 user = None
tierno588547c2020-07-01 15:30:20 +00002036 # self.logger.debug("get ssh key block")
garciadeblas5697b8b2021-03-24 09:17:02 +01002037 if deep_get(
2038 config_descriptor, ("config-access", "ssh-access", "required")
2039 ):
tierno588547c2020-07-01 15:30:20 +00002040 # self.logger.debug("ssh key needed")
tierno3bedc9b2019-11-27 15:46:57 +00002041 # Needed to inject a ssh key
garciadeblas5697b8b2021-03-24 09:17:02 +01002042 user = deep_get(
2043 config_descriptor,
2044 ("config-access", "ssh-access", "default-user"),
2045 )
tierno3bedc9b2019-11-27 15:46:57 +00002046 step = "Install configuration Software, getting public ssh key"
David Garciac1fe90a2021-03-31 19:12:02 +02002047 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
garciadeblas5697b8b2021-03-24 09:17:02 +01002048 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
David Garciac1fe90a2021-03-31 19:12:02 +02002049 )
quilesj7e13aeb2019-10-08 13:34:55 +02002050
garciadeblas5697b8b2021-03-24 09:17:02 +01002051 step = "Insert public key into VM user={} ssh_key={}".format(
2052 user, pub_key
2053 )
tierno3bedc9b2019-11-27 15:46:57 +00002054 else:
tierno588547c2020-07-01 15:30:20 +00002055 # self.logger.debug("no need to get ssh key")
tierno3bedc9b2019-11-27 15:46:57 +00002056 step = "Waiting to VM being up and getting IP address"
2057 self.logger.debug(logging_text + step)
quilesj7e13aeb2019-10-08 13:34:55 +02002058
Pedro Escaleira1e9c3e32022-05-30 15:37:01 +01002059 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2060 rw_mgmt_ip = None
2061
tierno3bedc9b2019-11-27 15:46:57 +00002062 # n2vc_redesign STEP 5.1
2063 # wait for RO (ip-address) Insert pub_key into VM
tierno5ee02052019-12-05 19:55:02 +00002064 if vnfr_id:
tierno7ecbc342020-09-21 14:05:39 +00002065 if kdu_name:
David Garcia78b6e6d2022-04-29 05:50:46 +02002066 rw_mgmt_ip, services = await self.wait_kdu_up(
garciadeblas5697b8b2021-03-24 09:17:02 +01002067 logging_text, nsr_id, vnfr_id, kdu_name
2068 )
David Garcia78b6e6d2022-04-29 05:50:46 +02002069 vnfd = self.db.get_one(
2070 "vnfds_revisions",
2071 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2072 )
2073 kdu = get_kdu(vnfd, kdu_name)
2074 kdu_services = [
2075 service["name"] for service in get_kdu_services(kdu)
2076 ]
2077 exposed_services = []
2078 for service in services:
2079 if any(s in service["name"] for s in kdu_services):
2080 exposed_services.append(service)
2081 await self.vca_map[vca_type].exec_primitive(
2082 ee_id=ee_id,
2083 primitive_name="config",
2084 params_dict={
2085 "osm-config": json.dumps(
2086 OsmConfigBuilder(
2087 k8s={"services": exposed_services}
2088 ).build()
2089 )
2090 },
2091 vca_id=vca_id,
2092 )
Pedro Escaleira1e9c3e32022-05-30 15:37:01 +01002093
2094 # This verification is needed in order to avoid trying to add a public key
2095 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2096 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2097 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2098 # or it is a KNF)
2099 elif db_vnfr.get('vdur'):
garciadeblas5697b8b2021-03-24 09:17:02 +01002100 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2101 logging_text,
2102 nsr_id,
2103 vnfr_id,
2104 vdu_id,
2105 vdu_index,
2106 user=user,
2107 pub_key=pub_key,
2108 )
David Garcia78b6e6d2022-04-29 05:50:46 +02002109
garciadeblas5697b8b2021-03-24 09:17:02 +01002110 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
quilesj7e13aeb2019-10-08 13:34:55 +02002111
tiernoa5088192019-11-26 16:12:53 +00002112 # store rw_mgmt_ip in deploy params for later replacement
quilesj7e13aeb2019-10-08 13:34:55 +02002113 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
tiernod8323042019-08-09 11:32:23 +00002114
2115 # n2vc_redesign STEP 6 Execute initial config primitive
garciadeblas5697b8b2021-03-24 09:17:02 +01002116 step = "execute initial config primitive"
quilesj3655ae02019-12-12 16:08:35 +00002117
2118 # wait for dependent primitives execution (NS -> VNF -> VDU)
tierno5ee02052019-12-05 19:55:02 +00002119 if initial_config_primitive_list:
2120 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
quilesj3655ae02019-12-12 16:08:35 +00002121
2122 # stage, in function of element type: vdu, kdu, vnf or ns
2123 my_vca = vca_deployed_list[vca_index]
2124 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2125 # VDU or KDU
garciadeblas5697b8b2021-03-24 09:17:02 +01002126 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
quilesj3655ae02019-12-12 16:08:35 +00002127 elif my_vca.get("member-vnf-index"):
2128 # VNF
garciadeblas5697b8b2021-03-24 09:17:02 +01002129 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
quilesj3655ae02019-12-12 16:08:35 +00002130 else:
2131 # NS
garciadeblas5697b8b2021-03-24 09:17:02 +01002132 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
quilesj3655ae02019-12-12 16:08:35 +00002133
tiernoc231a872020-01-21 08:49:05 +00002134 self._write_configuration_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01002135 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
quilesj3655ae02019-12-12 16:08:35 +00002136 )
2137
garciadeblas5697b8b2021-03-24 09:17:02 +01002138 self._write_op_status(op_id=nslcmop_id, stage=stage)
quilesj3655ae02019-12-12 16:08:35 +00002139
tiernoe876f672020-02-13 14:34:48 +00002140 check_if_terminated_needed = True
tiernod8323042019-08-09 11:32:23 +00002141 for initial_config_primitive in initial_config_primitive_list:
tiernoda6fb102019-11-23 00:36:52 +00002142 # adding information on the vca_deployed if it is a NS execution environment
2143 if not vca_deployed["member-vnf-index"]:
garciadeblas5697b8b2021-03-24 09:17:02 +01002144 deploy_params["ns_config_info"] = json.dumps(
2145 self._get_ns_config_info(nsr_id)
2146 )
tiernod8323042019-08-09 11:32:23 +00002147 # TODO check if already done
garciadeblas5697b8b2021-03-24 09:17:02 +01002148 primitive_params_ = self._map_primitive_params(
2149 initial_config_primitive, {}, deploy_params
2150 )
tierno3bedc9b2019-11-27 15:46:57 +00002151
garciadeblas5697b8b2021-03-24 09:17:02 +01002152 step = "execute primitive '{}' params '{}'".format(
2153 initial_config_primitive["name"], primitive_params_
2154 )
tiernod8323042019-08-09 11:32:23 +00002155 self.logger.debug(logging_text + step)
tierno588547c2020-07-01 15:30:20 +00002156 await self.vca_map[vca_type].exec_primitive(
quilesj7e13aeb2019-10-08 13:34:55 +02002157 ee_id=ee_id,
2158 primitive_name=initial_config_primitive["name"],
2159 params_dict=primitive_params_,
David Garciac1fe90a2021-03-31 19:12:02 +02002160 db_dict=db_dict,
2161 vca_id=vca_id,
aktas98488ed2021-07-29 17:42:49 +03002162 vca_type=vca_type,
quilesj7e13aeb2019-10-08 13:34:55 +02002163 )
tiernoe876f672020-02-13 14:34:48 +00002164 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2165 if check_if_terminated_needed:
garciadeblas5697b8b2021-03-24 09:17:02 +01002166 if config_descriptor.get("terminate-config-primitive"):
2167 self.update_db_2(
2168 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2169 )
tiernoe876f672020-02-13 14:34:48 +00002170 check_if_terminated_needed = False
quilesj3655ae02019-12-12 16:08:35 +00002171
tiernod8323042019-08-09 11:32:23 +00002172 # TODO register in database that primitive is done
quilesj7e13aeb2019-10-08 13:34:55 +02002173
tiernob996d942020-07-03 14:52:28 +00002174 # STEP 7 Configure metrics
lloretgalleg18ebc3a2020-10-22 09:54:51 +00002175 if vca_type == "helm" or vca_type == "helm-v3":
bravof73bac502021-05-11 07:38:47 -04002176 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
tiernob996d942020-07-03 14:52:28 +00002177 ee_id=ee_id,
2178 artifact_path=artifact_path,
2179 ee_config_descriptor=ee_config_descriptor,
2180 vnfr_id=vnfr_id,
2181 nsr_id=nsr_id,
2182 target_ip=rw_mgmt_ip,
2183 )
2184 if prometheus_jobs:
garciadeblas5697b8b2021-03-24 09:17:02 +01002185 self.update_db_2(
2186 "nsrs",
2187 nsr_id,
2188 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2189 )
tiernob996d942020-07-03 14:52:28 +00002190
bravof73bac502021-05-11 07:38:47 -04002191 for job in prometheus_jobs:
2192 self.db.set_one(
2193 "prometheus_jobs",
aticig15db6142022-01-24 12:51:26 +03002194 {"job_name": job["job_name"]},
bravof73bac502021-05-11 07:38:47 -04002195 job,
2196 upsert=True,
aticig15db6142022-01-24 12:51:26 +03002197 fail_on_empty=False,
bravof73bac502021-05-11 07:38:47 -04002198 )
2199
quilesj7e13aeb2019-10-08 13:34:55 +02002200 step = "instantiated at VCA"
2201 self.logger.debug(logging_text + step)
2202
tiernoc231a872020-01-21 08:49:05 +00002203 self._write_configuration_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01002204 nsr_id=nsr_id, vca_index=vca_index, status="READY"
quilesj3655ae02019-12-12 16:08:35 +00002205 )
2206
tiernod8323042019-08-09 11:32:23 +00002207 except Exception as e: # TODO not use Exception but N2VC exception
quilesj3655ae02019-12-12 16:08:35 +00002208 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
garciadeblas5697b8b2021-03-24 09:17:02 +01002209 if not isinstance(
2210 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2211 ):
2212 self.logger.error(
2213 "Exception while {} : {}".format(step, e), exc_info=True
2214 )
tiernoc231a872020-01-21 08:49:05 +00002215 self._write_configuration_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01002216 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
quilesj3655ae02019-12-12 16:08:35 +00002217 )
tiernoe876f672020-02-13 14:34:48 +00002218 raise LcmException("{} {}".format(step, e)) from e
tiernod8323042019-08-09 11:32:23 +00002219
garciadeblas5697b8b2021-03-24 09:17:02 +01002220 def _write_ns_status(
2221 self,
2222 nsr_id: str,
2223 ns_state: str,
2224 current_operation: str,
2225 current_operation_id: str,
2226 error_description: str = None,
2227 error_detail: str = None,
2228 other_update: dict = None,
2229 ):
tiernoe876f672020-02-13 14:34:48 +00002230 """
2231 Update db_nsr fields.
2232 :param nsr_id:
2233 :param ns_state:
2234 :param current_operation:
2235 :param current_operation_id:
2236 :param error_description:
tiernoa2143262020-03-27 16:20:40 +00002237 :param error_detail:
tiernoe876f672020-02-13 14:34:48 +00002238 :param other_update: Other required changes at database if provided, will be cleared
2239 :return:
2240 """
quilesj4cda56b2019-12-05 10:02:20 +00002241 try:
tiernoe876f672020-02-13 14:34:48 +00002242 db_dict = other_update or {}
garciadeblas5697b8b2021-03-24 09:17:02 +01002243 db_dict[
2244 "_admin.nslcmop"
2245 ] = current_operation_id # for backward compatibility
tiernoe876f672020-02-13 14:34:48 +00002246 db_dict["_admin.current-operation"] = current_operation_id
garciadeblas5697b8b2021-03-24 09:17:02 +01002247 db_dict["_admin.operation-type"] = (
2248 current_operation if current_operation != "IDLE" else None
2249 )
quilesj4cda56b2019-12-05 10:02:20 +00002250 db_dict["currentOperation"] = current_operation
2251 db_dict["currentOperationID"] = current_operation_id
2252 db_dict["errorDescription"] = error_description
tiernoa2143262020-03-27 16:20:40 +00002253 db_dict["errorDetail"] = error_detail
tiernoe876f672020-02-13 14:34:48 +00002254
2255 if ns_state:
2256 db_dict["nsState"] = ns_state
quilesj4cda56b2019-12-05 10:02:20 +00002257 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00002258 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002259 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
quilesj3655ae02019-12-12 16:08:35 +00002260
garciadeblas5697b8b2021-03-24 09:17:02 +01002261 def _write_op_status(
2262 self,
2263 op_id: str,
2264 stage: list = None,
2265 error_message: str = None,
2266 queuePosition: int = 0,
2267 operation_state: str = None,
2268 other_update: dict = None,
2269 ):
quilesj3655ae02019-12-12 16:08:35 +00002270 try:
tiernoe876f672020-02-13 14:34:48 +00002271 db_dict = other_update or {}
garciadeblas5697b8b2021-03-24 09:17:02 +01002272 db_dict["queuePosition"] = queuePosition
tiernoe876f672020-02-13 14:34:48 +00002273 if isinstance(stage, list):
garciadeblas5697b8b2021-03-24 09:17:02 +01002274 db_dict["stage"] = stage[0]
2275 db_dict["detailed-status"] = " ".join(stage)
tiernoe876f672020-02-13 14:34:48 +00002276 elif stage is not None:
garciadeblas5697b8b2021-03-24 09:17:02 +01002277 db_dict["stage"] = str(stage)
tiernoe876f672020-02-13 14:34:48 +00002278
2279 if error_message is not None:
garciadeblas5697b8b2021-03-24 09:17:02 +01002280 db_dict["errorMessage"] = error_message
tiernoe876f672020-02-13 14:34:48 +00002281 if operation_state is not None:
garciadeblas5697b8b2021-03-24 09:17:02 +01002282 db_dict["operationState"] = operation_state
tiernoe876f672020-02-13 14:34:48 +00002283 db_dict["statusEnteredTime"] = time()
quilesj3655ae02019-12-12 16:08:35 +00002284 self.update_db_2("nslcmops", op_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00002285 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002286 self.logger.warn(
2287 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2288 )
quilesj3655ae02019-12-12 16:08:35 +00002289
tierno51183952020-04-03 15:48:18 +00002290 def _write_all_config_status(self, db_nsr: dict, status: str):
quilesj3655ae02019-12-12 16:08:35 +00002291 try:
tierno51183952020-04-03 15:48:18 +00002292 nsr_id = db_nsr["_id"]
quilesj3655ae02019-12-12 16:08:35 +00002293 # configurationStatus
garciadeblas5697b8b2021-03-24 09:17:02 +01002294 config_status = db_nsr.get("configurationStatus")
quilesj3655ae02019-12-12 16:08:35 +00002295 if config_status:
garciadeblas5697b8b2021-03-24 09:17:02 +01002296 db_nsr_update = {
2297 "configurationStatus.{}.status".format(index): status
2298 for index, v in enumerate(config_status)
2299 if v
2300 }
quilesj3655ae02019-12-12 16:08:35 +00002301 # update status
tierno51183952020-04-03 15:48:18 +00002302 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00002303
tiernoe876f672020-02-13 14:34:48 +00002304 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002305 self.logger.warn(
2306 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2307 )
quilesj3655ae02019-12-12 16:08:35 +00002308
garciadeblas5697b8b2021-03-24 09:17:02 +01002309 def _write_configuration_status(
2310 self,
2311 nsr_id: str,
2312 vca_index: int,
2313 status: str = None,
2314 element_under_configuration: str = None,
2315 element_type: str = None,
2316 other_update: dict = None,
2317 ):
quilesj3655ae02019-12-12 16:08:35 +00002318
2319 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2320 # .format(vca_index, status))
2321
2322 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01002323 db_path = "configurationStatus.{}.".format(vca_index)
tierno51183952020-04-03 15:48:18 +00002324 db_dict = other_update or {}
quilesj63f90042020-01-17 09:53:55 +00002325 if status:
garciadeblas5697b8b2021-03-24 09:17:02 +01002326 db_dict[db_path + "status"] = status
quilesj3655ae02019-12-12 16:08:35 +00002327 if element_under_configuration:
garciadeblas5697b8b2021-03-24 09:17:02 +01002328 db_dict[
2329 db_path + "elementUnderConfiguration"
2330 ] = element_under_configuration
quilesj3655ae02019-12-12 16:08:35 +00002331 if element_type:
garciadeblas5697b8b2021-03-24 09:17:02 +01002332 db_dict[db_path + "elementType"] = element_type
quilesj3655ae02019-12-12 16:08:35 +00002333 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00002334 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002335 self.logger.warn(
2336 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2337 status, nsr_id, vca_index, e
2338 )
2339 )
quilesj4cda56b2019-12-05 10:02:20 +00002340
tierno38089af2020-04-16 07:56:58 +00002341 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2342 """
2343 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2344 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2345 Database is used because the result can be obtained from a different LCM worker in case of HA.
2346 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2347 :param db_nslcmop: database content of nslcmop
2348 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
tierno8790a3d2020-04-23 22:49:52 +00002349 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2350 computed 'vim-account-id'
tierno38089af2020-04-16 07:56:58 +00002351 """
tierno8790a3d2020-04-23 22:49:52 +00002352 modified = False
garciadeblas5697b8b2021-03-24 09:17:02 +01002353 nslcmop_id = db_nslcmop["_id"]
2354 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
magnussonle9198bb2020-01-21 13:00:51 +01002355 if placement_engine == "PLA":
garciadeblas5697b8b2021-03-24 09:17:02 +01002356 self.logger.debug(
2357 logging_text + "Invoke and wait for placement optimization"
2358 )
2359 await self.msg.aiowrite(
2360 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2361 )
magnussonle9198bb2020-01-21 13:00:51 +01002362 db_poll_interval = 5
tierno38089af2020-04-16 07:56:58 +00002363 wait = db_poll_interval * 10
magnussonle9198bb2020-01-21 13:00:51 +01002364 pla_result = None
2365 while not pla_result and wait >= 0:
2366 await asyncio.sleep(db_poll_interval)
2367 wait -= db_poll_interval
tierno38089af2020-04-16 07:56:58 +00002368 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
garciadeblas5697b8b2021-03-24 09:17:02 +01002369 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
magnussonle9198bb2020-01-21 13:00:51 +01002370
2371 if not pla_result:
garciadeblas5697b8b2021-03-24 09:17:02 +01002372 raise LcmException(
2373 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2374 )
magnussonle9198bb2020-01-21 13:00:51 +01002375
garciadeblas5697b8b2021-03-24 09:17:02 +01002376 for pla_vnf in pla_result["vnf"]:
2377 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2378 if not pla_vnf.get("vimAccountId") or not vnfr:
magnussonle9198bb2020-01-21 13:00:51 +01002379 continue
tierno8790a3d2020-04-23 22:49:52 +00002380 modified = True
garciadeblas5697b8b2021-03-24 09:17:02 +01002381 self.db.set_one(
2382 "vnfrs",
2383 {"_id": vnfr["_id"]},
2384 {"vim-account-id": pla_vnf["vimAccountId"]},
2385 )
tierno38089af2020-04-16 07:56:58 +00002386 # Modifies db_vnfrs
garciadeblas5697b8b2021-03-24 09:17:02 +01002387 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
tierno8790a3d2020-04-23 22:49:52 +00002388 return modified
magnussonle9198bb2020-01-21 13:00:51 +01002389
2390 def update_nsrs_with_pla_result(self, params):
2391 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01002392 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2393 self.update_db_2(
2394 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2395 )
magnussonle9198bb2020-01-21 13:00:51 +01002396 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002397 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
magnussonle9198bb2020-01-21 13:00:51 +01002398
tierno59d22d22018-09-25 18:10:19 +02002399 async def instantiate(self, nsr_id, nslcmop_id):
quilesj7e13aeb2019-10-08 13:34:55 +02002400 """
2401
2402 :param nsr_id: ns instance to deploy
2403 :param nslcmop_id: operation to run
2404 :return:
2405 """
kuused124bfe2019-06-18 12:09:24 +02002406
2407 # Try to lock HA task here
garciadeblas5697b8b2021-03-24 09:17:02 +01002408 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02002409 if not task_is_locked_by_me:
garciadeblas5697b8b2021-03-24 09:17:02 +01002410 self.logger.debug(
2411 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2412 )
kuused124bfe2019-06-18 12:09:24 +02002413 return
2414
tierno59d22d22018-09-25 18:10:19 +02002415 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2416 self.logger.debug(logging_text + "Enter")
quilesj7e13aeb2019-10-08 13:34:55 +02002417
tierno59d22d22018-09-25 18:10:19 +02002418 # get all needed from database
quilesj7e13aeb2019-10-08 13:34:55 +02002419
2420 # database nsrs record
tierno59d22d22018-09-25 18:10:19 +02002421 db_nsr = None
quilesj7e13aeb2019-10-08 13:34:55 +02002422
2423 # database nslcmops record
tierno59d22d22018-09-25 18:10:19 +02002424 db_nslcmop = None
quilesj7e13aeb2019-10-08 13:34:55 +02002425
2426 # update operation on nsrs
tiernoe876f672020-02-13 14:34:48 +00002427 db_nsr_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02002428 # update operation on nslcmops
tierno59d22d22018-09-25 18:10:19 +02002429 db_nslcmop_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02002430
tierno59d22d22018-09-25 18:10:19 +02002431 nslcmop_operation_state = None
garciadeblas5697b8b2021-03-24 09:17:02 +01002432 db_vnfrs = {} # vnf's info indexed by member-index
quilesj7e13aeb2019-10-08 13:34:55 +02002433 # n2vc_info = {}
tiernoe876f672020-02-13 14:34:48 +00002434 tasks_dict_info = {} # from task to info text
tierno59d22d22018-09-25 18:10:19 +02002435 exc = None
tiernoe876f672020-02-13 14:34:48 +00002436 error_list = []
garciadeblas5697b8b2021-03-24 09:17:02 +01002437 stage = [
2438 "Stage 1/5: preparation of the environment.",
2439 "Waiting for previous operations to terminate.",
2440 "",
2441 ]
tiernoe876f672020-02-13 14:34:48 +00002442 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02002443 try:
kuused124bfe2019-06-18 12:09:24 +02002444 # wait for any previous tasks in process
garciadeblas5697b8b2021-03-24 09:17:02 +01002445 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02002446
quilesj7e13aeb2019-10-08 13:34:55 +02002447 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
tiernob5203912020-08-11 11:20:13 +00002448 stage[1] = "Reading from database."
quilesj4cda56b2019-12-05 10:02:20 +00002449 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
tiernoe876f672020-02-13 14:34:48 +00002450 db_nsr_update["detailed-status"] = "creating"
2451 db_nsr_update["operational-status"] = "init"
quilesj4cda56b2019-12-05 10:02:20 +00002452 self._write_ns_status(
2453 nsr_id=nsr_id,
2454 ns_state="BUILDING",
2455 current_operation="INSTANTIATING",
tiernoe876f672020-02-13 14:34:48 +00002456 current_operation_id=nslcmop_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01002457 other_update=db_nsr_update,
tiernoe876f672020-02-13 14:34:48 +00002458 )
garciadeblas5697b8b2021-03-24 09:17:02 +01002459 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
quilesj4cda56b2019-12-05 10:02:20 +00002460
quilesj7e13aeb2019-10-08 13:34:55 +02002461 # read from db: operation
tiernob5203912020-08-11 11:20:13 +00002462 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
tierno59d22d22018-09-25 18:10:19 +02002463 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
Guillermo Calvino57c68152022-01-26 17:40:31 +01002464 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2465 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2466 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2467 )
tierno744303e2020-01-13 16:46:31 +00002468 ns_params = db_nslcmop.get("operationParams")
2469 if ns_params and ns_params.get("timeout_ns_deploy"):
2470 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2471 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01002472 timeout_ns_deploy = self.timeout.get(
2473 "ns_deploy", self.timeout_ns_deploy
2474 )
quilesj7e13aeb2019-10-08 13:34:55 +02002475
2476 # read from db: ns
tiernob5203912020-08-11 11:20:13 +00002477 stage[1] = "Getting nsr={} from db.".format(nsr_id)
garciadeblascd509f52021-11-23 10:04:12 +01002478 self.logger.debug(logging_text + stage[1])
tierno59d22d22018-09-25 18:10:19 +02002479 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernob5203912020-08-11 11:20:13 +00002480 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
garciadeblascd509f52021-11-23 10:04:12 +01002481 self.logger.debug(logging_text + stage[1])
tiernod732fb82020-05-21 13:18:23 +00002482 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
bravof021e70d2021-03-11 12:03:30 -03002483 self.fs.sync(db_nsr["nsd-id"])
tiernod732fb82020-05-21 13:18:23 +00002484 db_nsr["nsd"] = nsd
tiernod8323042019-08-09 11:32:23 +00002485 # nsr_name = db_nsr["name"] # TODO short-name??
tierno47e86b52018-10-10 14:05:55 +02002486
quilesj7e13aeb2019-10-08 13:34:55 +02002487 # read from db: vnf's of this ns
tiernob5203912020-08-11 11:20:13 +00002488 stage[1] = "Getting vnfrs from db."
tiernoe876f672020-02-13 14:34:48 +00002489 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02002490 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
tierno27246d82018-09-27 15:59:09 +02002491
quilesj7e13aeb2019-10-08 13:34:55 +02002492 # read from db: vnfd's for every vnf
garciadeblas5697b8b2021-03-24 09:17:02 +01002493 db_vnfds = [] # every vnfd data
quilesj7e13aeb2019-10-08 13:34:55 +02002494
2495 # for each vnf in ns, read vnfd
tierno27246d82018-09-27 15:59:09 +02002496 for vnfr in db_vnfrs_list:
Guillermo Calvino57c68152022-01-26 17:40:31 +01002497 if vnfr.get("kdur"):
2498 kdur_list = []
2499 for kdur in vnfr["kdur"]:
2500 if kdur.get("additionalParams"):
Pedro Escaleirab9a7c4d2022-03-31 00:08:05 +01002501 kdur["additionalParams"] = json.loads(
2502 kdur["additionalParams"]
2503 )
Guillermo Calvino57c68152022-01-26 17:40:31 +01002504 kdur_list.append(kdur)
2505 vnfr["kdur"] = kdur_list
2506
bravof922c4172020-11-24 21:21:43 -03002507 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2508 vnfd_id = vnfr["vnfd-id"]
2509 vnfd_ref = vnfr["vnfd-ref"]
bravof021e70d2021-03-11 12:03:30 -03002510 self.fs.sync(vnfd_id)
lloretgalleg6d488782020-07-22 10:13:46 +00002511
quilesj7e13aeb2019-10-08 13:34:55 +02002512 # if we haven't this vnfd, read it from db
tierno27246d82018-09-27 15:59:09 +02002513 if vnfd_id not in db_vnfds:
quilesj63f90042020-01-17 09:53:55 +00002514 # read from db
garciadeblas5697b8b2021-03-24 09:17:02 +01002515 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2516 vnfd_id, vnfd_ref
2517 )
tiernoe876f672020-02-13 14:34:48 +00002518 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02002519 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
tierno27246d82018-09-27 15:59:09 +02002520
quilesj7e13aeb2019-10-08 13:34:55 +02002521 # store vnfd
David Garciad41dbd62020-12-10 12:52:52 +01002522 db_vnfds.append(vnfd)
quilesj7e13aeb2019-10-08 13:34:55 +02002523
2524 # Get or generates the _admin.deployed.VCA list
tiernoe4f7e6c2018-11-27 14:55:30 +00002525 vca_deployed_list = None
2526 if db_nsr["_admin"].get("deployed"):
2527 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2528 if vca_deployed_list is None:
2529 vca_deployed_list = []
quilesj3655ae02019-12-12 16:08:35 +00002530 configuration_status_list = []
tiernoe4f7e6c2018-11-27 14:55:30 +00002531 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
quilesj3655ae02019-12-12 16:08:35 +00002532 db_nsr_update["configurationStatus"] = configuration_status_list
quilesj7e13aeb2019-10-08 13:34:55 +02002533 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00002534 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00002535 elif isinstance(vca_deployed_list, dict):
2536 # maintain backward compatibility. Change a dict to list at database
2537 vca_deployed_list = list(vca_deployed_list.values())
2538 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00002539 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00002540
garciadeblas5697b8b2021-03-24 09:17:02 +01002541 if not isinstance(
2542 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2543 ):
tiernoa009e552019-01-30 16:45:44 +00002544 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2545 db_nsr_update["_admin.deployed.RO.vnfd"] = []
tierno59d22d22018-09-25 18:10:19 +02002546
tiernobaa51102018-12-14 13:16:18 +00002547 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2548 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2549 self.update_db_2("nsrs", nsr_id, db_nsr_update)
garciadeblas5697b8b2021-03-24 09:17:02 +01002550 self.db.set_list(
2551 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2552 )
quilesj3655ae02019-12-12 16:08:35 +00002553
2554 # n2vc_redesign STEP 2 Deploy Network Scenario
garciadeblas5697b8b2021-03-24 09:17:02 +01002555 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2556 self._write_op_status(op_id=nslcmop_id, stage=stage)
quilesj3655ae02019-12-12 16:08:35 +00002557
tiernob5203912020-08-11 11:20:13 +00002558 stage[1] = "Deploying KDUs."
tiernoe876f672020-02-13 14:34:48 +00002559 # self.logger.debug(logging_text + "Before deploy_kdus")
calvinosanch9f9c6f22019-11-04 13:37:39 +01002560 # Call to deploy_kdus in case exists the "vdu:kdu" param
tiernoe876f672020-02-13 14:34:48 +00002561 await self.deploy_kdus(
2562 logging_text=logging_text,
2563 nsr_id=nsr_id,
2564 nslcmop_id=nslcmop_id,
2565 db_vnfrs=db_vnfrs,
2566 db_vnfds=db_vnfds,
2567 task_instantiation_info=tasks_dict_info,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002568 )
tiernoe876f672020-02-13 14:34:48 +00002569
2570 stage[1] = "Getting VCA public key."
tiernod8323042019-08-09 11:32:23 +00002571 # n2vc_redesign STEP 1 Get VCA public ssh-key
2572 # feature 1429. Add n2vc public key to needed VMs
tierno3bedc9b2019-11-27 15:46:57 +00002573 n2vc_key = self.n2vc.get_public_key()
tiernoa5088192019-11-26 16:12:53 +00002574 n2vc_key_list = [n2vc_key]
2575 if self.vca_config.get("public_key"):
2576 n2vc_key_list.append(self.vca_config["public_key"])
tierno98ad6ea2019-05-30 17:16:28 +00002577
tiernoe876f672020-02-13 14:34:48 +00002578 stage[1] = "Deploying NS at VIM."
tiernod8323042019-08-09 11:32:23 +00002579 task_ro = asyncio.ensure_future(
quilesj7e13aeb2019-10-08 13:34:55 +02002580 self.instantiate_RO(
2581 logging_text=logging_text,
2582 nsr_id=nsr_id,
2583 nsd=nsd,
2584 db_nsr=db_nsr,
2585 db_nslcmop=db_nslcmop,
2586 db_vnfrs=db_vnfrs,
bravof922c4172020-11-24 21:21:43 -03002587 db_vnfds=db_vnfds,
tiernoe876f672020-02-13 14:34:48 +00002588 n2vc_key_list=n2vc_key_list,
garciadeblas5697b8b2021-03-24 09:17:02 +01002589 stage=stage,
tierno98ad6ea2019-05-30 17:16:28 +00002590 )
tiernod8323042019-08-09 11:32:23 +00002591 )
2592 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
tiernoa2143262020-03-27 16:20:40 +00002593 tasks_dict_info[task_ro] = "Deploying at VIM"
tierno98ad6ea2019-05-30 17:16:28 +00002594
tiernod8323042019-08-09 11:32:23 +00002595 # n2vc_redesign STEP 3 to 6 Deploy N2VC
tiernoe876f672020-02-13 14:34:48 +00002596 stage[1] = "Deploying Execution Environments."
2597 self.logger.debug(logging_text + stage[1])
tierno98ad6ea2019-05-30 17:16:28 +00002598
tiernod8323042019-08-09 11:32:23 +00002599 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
bravof922c4172020-11-24 21:21:43 -03002600 for vnf_profile in get_vnf_profiles(nsd):
2601 vnfd_id = vnf_profile["vnfd-id"]
2602 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2603 member_vnf_index = str(vnf_profile["id"])
tiernod8323042019-08-09 11:32:23 +00002604 db_vnfr = db_vnfrs[member_vnf_index]
2605 base_folder = vnfd["_admin"]["storage"]
2606 vdu_id = None
2607 vdu_index = 0
tierno98ad6ea2019-05-30 17:16:28 +00002608 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002609 kdu_name = None
tierno59d22d22018-09-25 18:10:19 +02002610
tierno8a518872018-12-21 13:42:14 +00002611 # Get additional parameters
bravof922c4172020-11-24 21:21:43 -03002612 deploy_params = {"OSM": get_osm_params(db_vnfr)}
tiernod8323042019-08-09 11:32:23 +00002613 if db_vnfr.get("additionalParamsForVnf"):
garciadeblas5697b8b2021-03-24 09:17:02 +01002614 deploy_params.update(
2615 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2616 )
tierno8a518872018-12-21 13:42:14 +00002617
bravofe5a31bc2021-02-17 19:09:12 -03002618 descriptor_config = get_configuration(vnfd, vnfd["id"])
tierno588547c2020-07-01 15:30:20 +00002619 if descriptor_config:
quilesj7e13aeb2019-10-08 13:34:55 +02002620 self._deploy_n2vc(
garciadeblas5697b8b2021-03-24 09:17:02 +01002621 logging_text=logging_text
2622 + "member_vnf_index={} ".format(member_vnf_index),
quilesj7e13aeb2019-10-08 13:34:55 +02002623 db_nsr=db_nsr,
2624 db_vnfr=db_vnfr,
2625 nslcmop_id=nslcmop_id,
2626 nsr_id=nsr_id,
2627 nsi_id=nsi_id,
2628 vnfd_id=vnfd_id,
2629 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002630 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002631 member_vnf_index=member_vnf_index,
2632 vdu_index=vdu_index,
2633 vdu_name=vdu_name,
2634 deploy_params=deploy_params,
2635 descriptor_config=descriptor_config,
2636 base_folder=base_folder,
tiernoe876f672020-02-13 14:34:48 +00002637 task_instantiation_info=tasks_dict_info,
garciadeblas5697b8b2021-03-24 09:17:02 +01002638 stage=stage,
quilesj7e13aeb2019-10-08 13:34:55 +02002639 )
tierno59d22d22018-09-25 18:10:19 +02002640
2641 # Deploy charms for each VDU that supports one.
bravof922c4172020-11-24 21:21:43 -03002642 for vdud in get_vdu_list(vnfd):
tiernod8323042019-08-09 11:32:23 +00002643 vdu_id = vdud["id"]
bravofe5a31bc2021-02-17 19:09:12 -03002644 descriptor_config = get_configuration(vnfd, vdu_id)
garciadeblas5697b8b2021-03-24 09:17:02 +01002645 vdur = find_in_list(
2646 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2647 )
bravof922c4172020-11-24 21:21:43 -03002648
tierno626e0152019-11-29 14:16:16 +00002649 if vdur.get("additionalParams"):
bravof922c4172020-11-24 21:21:43 -03002650 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
tierno626e0152019-11-29 14:16:16 +00002651 else:
2652 deploy_params_vdu = deploy_params
garciadeblas5697b8b2021-03-24 09:17:02 +01002653 deploy_params_vdu["OSM"] = get_osm_params(
2654 db_vnfr, vdu_id, vdu_count_index=0
2655 )
endika76ba9232021-06-21 18:55:07 +02002656 vdud_count = get_number_of_instances(vnfd, vdu_id)
bravof922c4172020-11-24 21:21:43 -03002657
2658 self.logger.debug("VDUD > {}".format(vdud))
garciadeblas5697b8b2021-03-24 09:17:02 +01002659 self.logger.debug(
2660 "Descriptor config > {}".format(descriptor_config)
2661 )
tierno588547c2020-07-01 15:30:20 +00002662 if descriptor_config:
tiernod8323042019-08-09 11:32:23 +00002663 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002664 kdu_name = None
bravof922c4172020-11-24 21:21:43 -03002665 for vdu_index in range(vdud_count):
tiernod8323042019-08-09 11:32:23 +00002666 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
quilesj7e13aeb2019-10-08 13:34:55 +02002667 self._deploy_n2vc(
garciadeblas5697b8b2021-03-24 09:17:02 +01002668 logging_text=logging_text
2669 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2670 member_vnf_index, vdu_id, vdu_index
2671 ),
quilesj7e13aeb2019-10-08 13:34:55 +02002672 db_nsr=db_nsr,
2673 db_vnfr=db_vnfr,
2674 nslcmop_id=nslcmop_id,
2675 nsr_id=nsr_id,
2676 nsi_id=nsi_id,
2677 vnfd_id=vnfd_id,
2678 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002679 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002680 member_vnf_index=member_vnf_index,
2681 vdu_index=vdu_index,
2682 vdu_name=vdu_name,
tierno626e0152019-11-29 14:16:16 +00002683 deploy_params=deploy_params_vdu,
quilesj7e13aeb2019-10-08 13:34:55 +02002684 descriptor_config=descriptor_config,
2685 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002686 task_instantiation_info=tasks_dict_info,
garciadeblas5697b8b2021-03-24 09:17:02 +01002687 stage=stage,
quilesj7e13aeb2019-10-08 13:34:55 +02002688 )
bravof922c4172020-11-24 21:21:43 -03002689 for kdud in get_kdu_list(vnfd):
calvinosanch9f9c6f22019-11-04 13:37:39 +01002690 kdu_name = kdud["name"]
bravofe5a31bc2021-02-17 19:09:12 -03002691 descriptor_config = get_configuration(vnfd, kdu_name)
tierno588547c2020-07-01 15:30:20 +00002692 if descriptor_config:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002693 vdu_id = None
2694 vdu_index = 0
2695 vdu_name = None
garciadeblas5697b8b2021-03-24 09:17:02 +01002696 kdur = next(
2697 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2698 )
bravof922c4172020-11-24 21:21:43 -03002699 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
tierno72ef84f2020-10-06 08:22:07 +00002700 if kdur.get("additionalParams"):
Pedro Escaleirab9a7c4d2022-03-31 00:08:05 +01002701 deploy_params_kdu.update(
2702 parse_yaml_strings(kdur["additionalParams"].copy())
garciadeblas5697b8b2021-03-24 09:17:02 +01002703 )
tierno59d22d22018-09-25 18:10:19 +02002704
calvinosanch9f9c6f22019-11-04 13:37:39 +01002705 self._deploy_n2vc(
2706 logging_text=logging_text,
2707 db_nsr=db_nsr,
2708 db_vnfr=db_vnfr,
2709 nslcmop_id=nslcmop_id,
2710 nsr_id=nsr_id,
2711 nsi_id=nsi_id,
2712 vnfd_id=vnfd_id,
2713 vdu_id=vdu_id,
2714 kdu_name=kdu_name,
2715 member_vnf_index=member_vnf_index,
2716 vdu_index=vdu_index,
2717 vdu_name=vdu_name,
tierno72ef84f2020-10-06 08:22:07 +00002718 deploy_params=deploy_params_kdu,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002719 descriptor_config=descriptor_config,
2720 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002721 task_instantiation_info=tasks_dict_info,
garciadeblas5697b8b2021-03-24 09:17:02 +01002722 stage=stage,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002723 )
tierno59d22d22018-09-25 18:10:19 +02002724
tierno1b633412019-02-25 16:48:23 +00002725 # Check if this NS has a charm configuration
tiernod8323042019-08-09 11:32:23 +00002726 descriptor_config = nsd.get("ns-configuration")
2727 if descriptor_config and descriptor_config.get("juju"):
2728 vnfd_id = None
2729 db_vnfr = None
2730 member_vnf_index = None
2731 vdu_id = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002732 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002733 vdu_index = 0
2734 vdu_name = None
tierno1b633412019-02-25 16:48:23 +00002735
tiernod8323042019-08-09 11:32:23 +00002736 # Get additional parameters
David Garcia40603572020-12-10 20:10:53 +01002737 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
tiernod8323042019-08-09 11:32:23 +00002738 if db_nsr.get("additionalParamsForNs"):
garciadeblas5697b8b2021-03-24 09:17:02 +01002739 deploy_params.update(
2740 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2741 )
tiernod8323042019-08-09 11:32:23 +00002742 base_folder = nsd["_admin"]["storage"]
quilesj7e13aeb2019-10-08 13:34:55 +02002743 self._deploy_n2vc(
2744 logging_text=logging_text,
2745 db_nsr=db_nsr,
2746 db_vnfr=db_vnfr,
2747 nslcmop_id=nslcmop_id,
2748 nsr_id=nsr_id,
2749 nsi_id=nsi_id,
2750 vnfd_id=vnfd_id,
2751 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002752 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002753 member_vnf_index=member_vnf_index,
2754 vdu_index=vdu_index,
2755 vdu_name=vdu_name,
2756 deploy_params=deploy_params,
2757 descriptor_config=descriptor_config,
2758 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002759 task_instantiation_info=tasks_dict_info,
garciadeblas5697b8b2021-03-24 09:17:02 +01002760 stage=stage,
quilesj7e13aeb2019-10-08 13:34:55 +02002761 )
tierno1b633412019-02-25 16:48:23 +00002762
tiernoe876f672020-02-13 14:34:48 +00002763 # rest of staff will be done at finally
tierno1b633412019-02-25 16:48:23 +00002764
garciadeblas5697b8b2021-03-24 09:17:02 +01002765 except (
2766 ROclient.ROClientException,
2767 DbException,
2768 LcmException,
2769 N2VCException,
2770 ) as e:
2771 self.logger.error(
2772 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2773 )
tierno59d22d22018-09-25 18:10:19 +02002774 exc = e
2775 except asyncio.CancelledError:
garciadeblas5697b8b2021-03-24 09:17:02 +01002776 self.logger.error(
2777 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2778 )
tierno59d22d22018-09-25 18:10:19 +02002779 exc = "Operation was cancelled"
2780 except Exception as e:
2781 exc = traceback.format_exc()
garciadeblas5697b8b2021-03-24 09:17:02 +01002782 self.logger.critical(
2783 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2784 exc_info=True,
2785 )
tierno59d22d22018-09-25 18:10:19 +02002786 finally:
2787 if exc:
tiernoe876f672020-02-13 14:34:48 +00002788 error_list.append(str(exc))
tiernobaa51102018-12-14 13:16:18 +00002789 try:
tiernoe876f672020-02-13 14:34:48 +00002790 # wait for pending tasks
2791 if tasks_dict_info:
2792 stage[1] = "Waiting for instantiate pending tasks."
2793 self.logger.debug(logging_text + stage[1])
garciadeblas5697b8b2021-03-24 09:17:02 +01002794 error_list += await self._wait_for_tasks(
2795 logging_text,
2796 tasks_dict_info,
2797 timeout_ns_deploy,
2798 stage,
2799 nslcmop_id,
2800 nsr_id=nsr_id,
2801 )
tiernoe876f672020-02-13 14:34:48 +00002802 stage[1] = stage[2] = ""
2803 except asyncio.CancelledError:
2804 error_list.append("Cancelled")
2805 # TODO cancel all tasks
2806 except Exception as exc:
2807 error_list.append(str(exc))
quilesj4cda56b2019-12-05 10:02:20 +00002808
tiernoe876f672020-02-13 14:34:48 +00002809 # update operation-status
2810 db_nsr_update["operational-status"] = "running"
2811 # let's begin with VCA 'configured' status (later we can change it)
2812 db_nsr_update["config-status"] = "configured"
2813 for task, task_name in tasks_dict_info.items():
2814 if not task.done() or task.cancelled() or task.exception():
2815 if task_name.startswith(self.task_name_deploy_vca):
2816 # A N2VC task is pending
2817 db_nsr_update["config-status"] = "failed"
quilesj4cda56b2019-12-05 10:02:20 +00002818 else:
tiernoe876f672020-02-13 14:34:48 +00002819 # RO or KDU task is pending
2820 db_nsr_update["operational-status"] = "failed"
quilesj3655ae02019-12-12 16:08:35 +00002821
tiernoe876f672020-02-13 14:34:48 +00002822 # update status at database
2823 if error_list:
tiernoa2143262020-03-27 16:20:40 +00002824 error_detail = ". ".join(error_list)
tiernoe876f672020-02-13 14:34:48 +00002825 self.logger.error(logging_text + error_detail)
garciadeblas5697b8b2021-03-24 09:17:02 +01002826 error_description_nslcmop = "{} Detail: {}".format(
2827 stage[0], error_detail
2828 )
2829 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2830 nslcmop_id, stage[0]
2831 )
quilesj3655ae02019-12-12 16:08:35 +00002832
garciadeblas5697b8b2021-03-24 09:17:02 +01002833 db_nsr_update["detailed-status"] = (
2834 error_description_nsr + " Detail: " + error_detail
2835 )
tiernoe876f672020-02-13 14:34:48 +00002836 db_nslcmop_update["detailed-status"] = error_detail
2837 nslcmop_operation_state = "FAILED"
2838 ns_state = "BROKEN"
2839 else:
tiernoa2143262020-03-27 16:20:40 +00002840 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00002841 error_description_nsr = error_description_nslcmop = None
2842 ns_state = "READY"
2843 db_nsr_update["detailed-status"] = "Done"
2844 db_nslcmop_update["detailed-status"] = "Done"
2845 nslcmop_operation_state = "COMPLETED"
quilesj4cda56b2019-12-05 10:02:20 +00002846
tiernoe876f672020-02-13 14:34:48 +00002847 if db_nsr:
2848 self._write_ns_status(
2849 nsr_id=nsr_id,
2850 ns_state=ns_state,
2851 current_operation="IDLE",
2852 current_operation_id=None,
2853 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00002854 error_detail=error_detail,
garciadeblas5697b8b2021-03-24 09:17:02 +01002855 other_update=db_nsr_update,
tiernoe876f672020-02-13 14:34:48 +00002856 )
tiernoa17d4f42020-04-28 09:59:23 +00002857 self._write_op_status(
2858 op_id=nslcmop_id,
2859 stage="",
2860 error_message=error_description_nslcmop,
2861 operation_state=nslcmop_operation_state,
2862 other_update=db_nslcmop_update,
2863 )
quilesj3655ae02019-12-12 16:08:35 +00002864
tierno59d22d22018-09-25 18:10:19 +02002865 if nslcmop_operation_state:
2866 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01002867 await self.msg.aiowrite(
2868 "ns",
2869 "instantiated",
2870 {
2871 "nsr_id": nsr_id,
2872 "nslcmop_id": nslcmop_id,
2873 "operationState": nslcmop_operation_state,
2874 },
2875 loop=self.loop,
2876 )
tierno59d22d22018-09-25 18:10:19 +02002877 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002878 self.logger.error(
2879 logging_text + "kafka_write notification Exception {}".format(e)
2880 )
tierno59d22d22018-09-25 18:10:19 +02002881
2882 self.logger.debug(logging_text + "Exit")
2883 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2884
David Garciab4ebcd02021-10-28 02:00:43 +02002885 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2886 if vnfd_id not in cached_vnfds:
2887 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2888 return cached_vnfds[vnfd_id]
2889
2890 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2891 if vnf_profile_id not in cached_vnfrs:
2892 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2893 "vnfrs",
2894 {
2895 "member-vnf-index-ref": vnf_profile_id,
2896 "nsr-id-ref": nsr_id,
2897 },
2898 )
2899 return cached_vnfrs[vnf_profile_id]
2900
2901 def _is_deployed_vca_in_relation(
2902 self, vca: DeployedVCA, relation: Relation
2903 ) -> bool:
2904 found = False
2905 for endpoint in (relation.provider, relation.requirer):
2906 if endpoint["kdu-resource-profile-id"]:
2907 continue
2908 found = (
2909 vca.vnf_profile_id == endpoint.vnf_profile_id
2910 and vca.vdu_profile_id == endpoint.vdu_profile_id
2911 and vca.execution_environment_ref == endpoint.execution_environment_ref
2912 )
2913 if found:
2914 break
2915 return found
2916
2917 def _update_ee_relation_data_with_implicit_data(
2918 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2919 ):
2920 ee_relation_data = safe_get_ee_relation(
2921 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2922 )
2923 ee_relation_level = EELevel.get_level(ee_relation_data)
2924 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2925 "execution-environment-ref"
2926 ]:
2927 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2928 vnfd_id = vnf_profile["vnfd-id"]
2929 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2930 entity_id = (
2931 vnfd_id
2932 if ee_relation_level == EELevel.VNF
2933 else ee_relation_data["vdu-profile-id"]
2934 )
2935 ee = get_juju_ee_ref(db_vnfd, entity_id)
2936 if not ee:
2937 raise Exception(
2938 f"not execution environments found for ee_relation {ee_relation_data}"
2939 )
2940 ee_relation_data["execution-environment-ref"] = ee["id"]
2941 return ee_relation_data
2942
2943 def _get_ns_relations(
2944 self,
2945 nsr_id: str,
2946 nsd: Dict[str, Any],
2947 vca: DeployedVCA,
2948 cached_vnfds: Dict[str, Any],
David Garcia444bf962021-11-11 16:35:26 +01002949 ) -> List[Relation]:
David Garciab4ebcd02021-10-28 02:00:43 +02002950 relations = []
2951 db_ns_relations = get_ns_configuration_relation_list(nsd)
2952 for r in db_ns_relations:
David Garcia444bf962021-11-11 16:35:26 +01002953 provider_dict = None
2954 requirer_dict = None
2955 if all(key in r for key in ("provider", "requirer")):
2956 provider_dict = r["provider"]
2957 requirer_dict = r["requirer"]
2958 elif "entities" in r:
2959 provider_id = r["entities"][0]["id"]
2960 provider_dict = {
2961 "nsr-id": nsr_id,
2962 "endpoint": r["entities"][0]["endpoint"],
2963 }
2964 if provider_id != nsd["id"]:
2965 provider_dict["vnf-profile-id"] = provider_id
2966 requirer_id = r["entities"][1]["id"]
2967 requirer_dict = {
2968 "nsr-id": nsr_id,
2969 "endpoint": r["entities"][1]["endpoint"],
2970 }
2971 if requirer_id != nsd["id"]:
2972 requirer_dict["vnf-profile-id"] = requirer_id
2973 else:
aticig15db6142022-01-24 12:51:26 +03002974 raise Exception(
2975 "provider/requirer or entities must be included in the relation."
2976 )
David Garciab4ebcd02021-10-28 02:00:43 +02002977 relation_provider = self._update_ee_relation_data_with_implicit_data(
David Garcia444bf962021-11-11 16:35:26 +01002978 nsr_id, nsd, provider_dict, cached_vnfds
David Garciab4ebcd02021-10-28 02:00:43 +02002979 )
2980 relation_requirer = self._update_ee_relation_data_with_implicit_data(
David Garcia444bf962021-11-11 16:35:26 +01002981 nsr_id, nsd, requirer_dict, cached_vnfds
David Garciab4ebcd02021-10-28 02:00:43 +02002982 )
2983 provider = EERelation(relation_provider)
2984 requirer = EERelation(relation_requirer)
2985 relation = Relation(r["name"], provider, requirer)
2986 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2987 if vca_in_relation:
2988 relations.append(relation)
2989 return relations
2990
2991 def _get_vnf_relations(
2992 self,
2993 nsr_id: str,
2994 nsd: Dict[str, Any],
2995 vca: DeployedVCA,
2996 cached_vnfds: Dict[str, Any],
David Garcia444bf962021-11-11 16:35:26 +01002997 ) -> List[Relation]:
David Garciab4ebcd02021-10-28 02:00:43 +02002998 relations = []
2999 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3000 vnf_profile_id = vnf_profile["id"]
3001 vnfd_id = vnf_profile["vnfd-id"]
3002 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3003 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3004 for r in db_vnf_relations:
David Garcia444bf962021-11-11 16:35:26 +01003005 provider_dict = None
3006 requirer_dict = None
3007 if all(key in r for key in ("provider", "requirer")):
3008 provider_dict = r["provider"]
3009 requirer_dict = r["requirer"]
3010 elif "entities" in r:
3011 provider_id = r["entities"][0]["id"]
3012 provider_dict = {
3013 "nsr-id": nsr_id,
3014 "vnf-profile-id": vnf_profile_id,
3015 "endpoint": r["entities"][0]["endpoint"],
3016 }
3017 if provider_id != vnfd_id:
3018 provider_dict["vdu-profile-id"] = provider_id
3019 requirer_id = r["entities"][1]["id"]
3020 requirer_dict = {
3021 "nsr-id": nsr_id,
3022 "vnf-profile-id": vnf_profile_id,
3023 "endpoint": r["entities"][1]["endpoint"],
3024 }
3025 if requirer_id != vnfd_id:
3026 requirer_dict["vdu-profile-id"] = requirer_id
3027 else:
aticig15db6142022-01-24 12:51:26 +03003028 raise Exception(
3029 "provider/requirer or entities must be included in the relation."
3030 )
David Garciab4ebcd02021-10-28 02:00:43 +02003031 relation_provider = self._update_ee_relation_data_with_implicit_data(
David Garcia444bf962021-11-11 16:35:26 +01003032 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
David Garciab4ebcd02021-10-28 02:00:43 +02003033 )
3034 relation_requirer = self._update_ee_relation_data_with_implicit_data(
David Garcia444bf962021-11-11 16:35:26 +01003035 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
David Garciab4ebcd02021-10-28 02:00:43 +02003036 )
3037 provider = EERelation(relation_provider)
3038 requirer = EERelation(relation_requirer)
3039 relation = Relation(r["name"], provider, requirer)
3040 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3041 if vca_in_relation:
3042 relations.append(relation)
3043 return relations
3044
3045 def _get_kdu_resource_data(
3046 self,
3047 ee_relation: EERelation,
3048 db_nsr: Dict[str, Any],
3049 cached_vnfds: Dict[str, Any],
3050 ) -> DeployedK8sResource:
3051 nsd = get_nsd(db_nsr)
3052 vnf_profiles = get_vnf_profiles(nsd)
3053 vnfd_id = find_in_list(
3054 vnf_profiles,
3055 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3056 )["vnfd-id"]
3057 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3058 kdu_resource_profile = get_kdu_resource_profile(
3059 db_vnfd, ee_relation.kdu_resource_profile_id
3060 )
3061 kdu_name = kdu_resource_profile["kdu-name"]
3062 deployed_kdu, _ = get_deployed_kdu(
3063 db_nsr.get("_admin", ()).get("deployed", ()),
3064 kdu_name,
3065 ee_relation.vnf_profile_id,
3066 )
3067 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3068 return deployed_kdu
3069
3070 def _get_deployed_component(
3071 self,
3072 ee_relation: EERelation,
3073 db_nsr: Dict[str, Any],
3074 cached_vnfds: Dict[str, Any],
3075 ) -> DeployedComponent:
3076 nsr_id = db_nsr["_id"]
3077 deployed_component = None
3078 ee_level = EELevel.get_level(ee_relation)
3079 if ee_level == EELevel.NS:
3080 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3081 if vca:
3082 deployed_component = DeployedVCA(nsr_id, vca)
3083 elif ee_level == EELevel.VNF:
3084 vca = get_deployed_vca(
3085 db_nsr,
3086 {
3087 "vdu_id": None,
3088 "member-vnf-index": ee_relation.vnf_profile_id,
3089 "ee_descriptor_id": ee_relation.execution_environment_ref,
3090 },
3091 )
3092 if vca:
3093 deployed_component = DeployedVCA(nsr_id, vca)
3094 elif ee_level == EELevel.VDU:
3095 vca = get_deployed_vca(
3096 db_nsr,
3097 {
3098 "vdu_id": ee_relation.vdu_profile_id,
3099 "member-vnf-index": ee_relation.vnf_profile_id,
3100 "ee_descriptor_id": ee_relation.execution_environment_ref,
3101 },
3102 )
3103 if vca:
3104 deployed_component = DeployedVCA(nsr_id, vca)
3105 elif ee_level == EELevel.KDU:
3106 kdu_resource_data = self._get_kdu_resource_data(
3107 ee_relation, db_nsr, cached_vnfds
3108 )
3109 if kdu_resource_data:
3110 deployed_component = DeployedK8sResource(kdu_resource_data)
3111 return deployed_component
3112
3113 async def _add_relation(
3114 self,
3115 relation: Relation,
3116 vca_type: str,
3117 db_nsr: Dict[str, Any],
3118 cached_vnfds: Dict[str, Any],
3119 cached_vnfrs: Dict[str, Any],
3120 ) -> bool:
3121 deployed_provider = self._get_deployed_component(
3122 relation.provider, db_nsr, cached_vnfds
3123 )
3124 deployed_requirer = self._get_deployed_component(
3125 relation.requirer, db_nsr, cached_vnfds
3126 )
3127 if (
3128 deployed_provider
3129 and deployed_requirer
3130 and deployed_provider.config_sw_installed
3131 and deployed_requirer.config_sw_installed
3132 ):
3133 provider_db_vnfr = (
3134 self._get_vnfr(
3135 relation.provider.nsr_id,
3136 relation.provider.vnf_profile_id,
3137 cached_vnfrs,
3138 )
3139 if relation.provider.vnf_profile_id
3140 else None
3141 )
3142 requirer_db_vnfr = (
3143 self._get_vnfr(
3144 relation.requirer.nsr_id,
3145 relation.requirer.vnf_profile_id,
3146 cached_vnfrs,
3147 )
3148 if relation.requirer.vnf_profile_id
3149 else None
3150 )
3151 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3152 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3153 provider_relation_endpoint = RelationEndpoint(
3154 deployed_provider.ee_id,
3155 provider_vca_id,
3156 relation.provider.endpoint,
3157 )
3158 requirer_relation_endpoint = RelationEndpoint(
3159 deployed_requirer.ee_id,
3160 requirer_vca_id,
3161 relation.requirer.endpoint,
3162 )
3163 await self.vca_map[vca_type].add_relation(
3164 provider=provider_relation_endpoint,
3165 requirer=requirer_relation_endpoint,
3166 )
3167 # remove entry from relations list
3168 return True
3169 return False
3170
David Garciac1fe90a2021-03-31 19:12:02 +02003171 async def _add_vca_relations(
3172 self,
3173 logging_text,
3174 nsr_id,
David Garciab4ebcd02021-10-28 02:00:43 +02003175 vca_type: str,
David Garciac1fe90a2021-03-31 19:12:02 +02003176 vca_index: int,
3177 timeout: int = 3600,
David Garciac1fe90a2021-03-31 19:12:02 +02003178 ) -> bool:
quilesj63f90042020-01-17 09:53:55 +00003179
3180 # steps:
3181 # 1. find all relations for this VCA
3182 # 2. wait for other peers related
3183 # 3. add relations
3184
3185 try:
quilesj63f90042020-01-17 09:53:55 +00003186 # STEP 1: find all relations for this VCA
3187
3188 # read nsr record
3189 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
David Garciab4ebcd02021-10-28 02:00:43 +02003190 nsd = get_nsd(db_nsr)
quilesj63f90042020-01-17 09:53:55 +00003191
3192 # this VCA data
David Garciab4ebcd02021-10-28 02:00:43 +02003193 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3194 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
quilesj63f90042020-01-17 09:53:55 +00003195
David Garciab4ebcd02021-10-28 02:00:43 +02003196 cached_vnfds = {}
3197 cached_vnfrs = {}
3198 relations = []
3199 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3200 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
quilesj63f90042020-01-17 09:53:55 +00003201
3202 # if no relations, terminate
David Garciab4ebcd02021-10-28 02:00:43 +02003203 if not relations:
garciadeblas5697b8b2021-03-24 09:17:02 +01003204 self.logger.debug(logging_text + " No relations")
quilesj63f90042020-01-17 09:53:55 +00003205 return True
3206
David Garciab4ebcd02021-10-28 02:00:43 +02003207 self.logger.debug(logging_text + " adding relations {}".format(relations))
quilesj63f90042020-01-17 09:53:55 +00003208
3209 # add all relations
3210 start = time()
3211 while True:
3212 # check timeout
3213 now = time()
3214 if now - start >= timeout:
garciadeblas5697b8b2021-03-24 09:17:02 +01003215 self.logger.error(logging_text + " : timeout adding relations")
quilesj63f90042020-01-17 09:53:55 +00003216 return False
3217
David Garciab4ebcd02021-10-28 02:00:43 +02003218 # reload nsr from database (we need to update record: _admin.deployed.VCA)
quilesj63f90042020-01-17 09:53:55 +00003219 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3220
David Garciab4ebcd02021-10-28 02:00:43 +02003221 # for each relation, find the VCA's related
3222 for relation in relations.copy():
3223 added = await self._add_relation(
3224 relation,
3225 vca_type,
3226 db_nsr,
3227 cached_vnfds,
3228 cached_vnfrs,
3229 )
3230 if added:
3231 relations.remove(relation)
quilesj63f90042020-01-17 09:53:55 +00003232
David Garciab4ebcd02021-10-28 02:00:43 +02003233 if not relations:
garciadeblas5697b8b2021-03-24 09:17:02 +01003234 self.logger.debug("Relations added")
quilesj63f90042020-01-17 09:53:55 +00003235 break
David Garciab4ebcd02021-10-28 02:00:43 +02003236 await asyncio.sleep(5.0)
quilesj63f90042020-01-17 09:53:55 +00003237
3238 return True
3239
3240 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01003241 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
quilesj63f90042020-01-17 09:53:55 +00003242 return False
3243
garciadeblas5697b8b2021-03-24 09:17:02 +01003244 async def _install_kdu(
3245 self,
3246 nsr_id: str,
3247 nsr_db_path: str,
3248 vnfr_data: dict,
3249 kdu_index: int,
3250 kdud: dict,
3251 vnfd: dict,
3252 k8s_instance_info: dict,
3253 k8params: dict = None,
3254 timeout: int = 600,
3255 vca_id: str = None,
3256 ):
lloretgalleg7c121132020-07-08 07:53:22 +00003257
tiernob9018152020-04-16 14:18:24 +00003258 try:
lloretgalleg7c121132020-07-08 07:53:22 +00003259 k8sclustertype = k8s_instance_info["k8scluster-type"]
3260 # Instantiate kdu
garciadeblas5697b8b2021-03-24 09:17:02 +01003261 db_dict_install = {
3262 "collection": "nsrs",
3263 "filter": {"_id": nsr_id},
3264 "path": nsr_db_path,
3265 }
lloretgalleg7c121132020-07-08 07:53:22 +00003266
romeromonser4554a702021-05-28 12:00:08 +02003267 if k8s_instance_info.get("kdu-deployment-name"):
3268 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3269 else:
3270 kdu_instance = self.k8scluster_map[
3271 k8sclustertype
3272 ].generate_kdu_instance_name(
3273 db_dict=db_dict_install,
3274 kdu_model=k8s_instance_info["kdu-model"],
3275 kdu_name=k8s_instance_info["kdu-name"],
3276 )
Pedro Escaleirada21d262022-04-21 16:31:06 +01003277
3278 # Update the nsrs table with the kdu-instance value
garciadeblas5697b8b2021-03-24 09:17:02 +01003279 self.update_db_2(
Pedro Escaleirada21d262022-04-21 16:31:06 +01003280 item="nsrs",
3281 _id=nsr_id,
3282 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
garciadeblas5697b8b2021-03-24 09:17:02 +01003283 )
Pedro Escaleirada21d262022-04-21 16:31:06 +01003284
3285 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3286 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3287 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3288 # namespace, this first verification could be removed, and the next step would be done for any kind
3289 # of KNF.
3290 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3291 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3292 if k8sclustertype in ("juju", "juju-bundle"):
3293 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3294 # that the user passed a namespace which he wants its KDU to be deployed in)
3295 if (
3296 self.db.count(
3297 table="nsrs",
3298 q_filter={
3299 "_id": nsr_id,
3300 "_admin.projects_write": k8s_instance_info["namespace"],
3301 "_admin.projects_read": k8s_instance_info["namespace"],
3302 },
3303 )
3304 > 0
3305 ):
3306 self.logger.debug(
3307 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3308 )
3309 self.update_db_2(
3310 item="nsrs",
3311 _id=nsr_id,
3312 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3313 )
3314 k8s_instance_info["namespace"] = kdu_instance
3315
David Garciad64e2742021-02-25 20:19:18 +01003316 await self.k8scluster_map[k8sclustertype].install(
lloretgalleg7c121132020-07-08 07:53:22 +00003317 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3318 kdu_model=k8s_instance_info["kdu-model"],
3319 atomic=True,
3320 params=k8params,
3321 db_dict=db_dict_install,
3322 timeout=timeout,
3323 kdu_name=k8s_instance_info["kdu-name"],
David Garciad64e2742021-02-25 20:19:18 +01003324 namespace=k8s_instance_info["namespace"],
3325 kdu_instance=kdu_instance,
David Garciac1fe90a2021-03-31 19:12:02 +02003326 vca_id=vca_id,
David Garciad64e2742021-02-25 20:19:18 +01003327 )
lloretgalleg7c121132020-07-08 07:53:22 +00003328
3329 # Obtain services to obtain management service ip
3330 services = await self.k8scluster_map[k8sclustertype].get_services(
3331 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3332 kdu_instance=kdu_instance,
garciadeblas5697b8b2021-03-24 09:17:02 +01003333 namespace=k8s_instance_info["namespace"],
3334 )
lloretgalleg7c121132020-07-08 07:53:22 +00003335
3336 # Obtain management service info (if exists)
tierno7ecbc342020-09-21 14:05:39 +00003337 vnfr_update_dict = {}
bravof6ec62b72021-02-25 17:20:35 -03003338 kdu_config = get_configuration(vnfd, kdud["name"])
3339 if kdu_config:
3340 target_ee_list = kdu_config.get("execution-environment-list", [])
3341 else:
3342 target_ee_list = []
3343
lloretgalleg7c121132020-07-08 07:53:22 +00003344 if services:
tierno7ecbc342020-09-21 14:05:39 +00003345 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
garciadeblas5697b8b2021-03-24 09:17:02 +01003346 mgmt_services = [
3347 service
3348 for service in kdud.get("service", [])
3349 if service.get("mgmt-service")
3350 ]
lloretgalleg7c121132020-07-08 07:53:22 +00003351 for mgmt_service in mgmt_services:
3352 for service in services:
3353 if service["name"].startswith(mgmt_service["name"]):
3354 # Mgmt service found, Obtain service ip
3355 ip = service.get("external_ip", service.get("cluster_ip"))
3356 if isinstance(ip, list) and len(ip) == 1:
3357 ip = ip[0]
3358
garciadeblas5697b8b2021-03-24 09:17:02 +01003359 vnfr_update_dict[
3360 "kdur.{}.ip-address".format(kdu_index)
3361 ] = ip
lloretgalleg7c121132020-07-08 07:53:22 +00003362
3363 # Check if must update also mgmt ip at the vnf
garciadeblas5697b8b2021-03-24 09:17:02 +01003364 service_external_cp = mgmt_service.get(
3365 "external-connection-point-ref"
3366 )
lloretgalleg7c121132020-07-08 07:53:22 +00003367 if service_external_cp:
garciadeblas5697b8b2021-03-24 09:17:02 +01003368 if (
3369 deep_get(vnfd, ("mgmt-interface", "cp"))
3370 == service_external_cp
3371 ):
lloretgalleg7c121132020-07-08 07:53:22 +00003372 vnfr_update_dict["ip-address"] = ip
3373
bravof6ec62b72021-02-25 17:20:35 -03003374 if find_in_list(
3375 target_ee_list,
garciadeblas5697b8b2021-03-24 09:17:02 +01003376 lambda ee: ee.get(
3377 "external-connection-point-ref", ""
3378 )
3379 == service_external_cp,
bravof6ec62b72021-02-25 17:20:35 -03003380 ):
garciadeblas5697b8b2021-03-24 09:17:02 +01003381 vnfr_update_dict[
3382 "kdur.{}.ip-address".format(kdu_index)
3383 ] = ip
lloretgalleg7c121132020-07-08 07:53:22 +00003384 break
3385 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01003386 self.logger.warn(
3387 "Mgmt service name: {} not found".format(
3388 mgmt_service["name"]
3389 )
3390 )
lloretgalleg7c121132020-07-08 07:53:22 +00003391
tierno7ecbc342020-09-21 14:05:39 +00003392 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3393 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
lloretgalleg7c121132020-07-08 07:53:22 +00003394
bravof9a256db2021-02-22 18:02:07 -03003395 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
garciadeblas5697b8b2021-03-24 09:17:02 +01003396 if (
3397 kdu_config
3398 and kdu_config.get("initial-config-primitive")
3399 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3400 ):
3401 initial_config_primitive_list = kdu_config.get(
3402 "initial-config-primitive"
3403 )
Dominik Fleischmannc1975dd2020-08-19 12:17:51 +02003404 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3405
3406 for initial_config_primitive in initial_config_primitive_list:
garciadeblas5697b8b2021-03-24 09:17:02 +01003407 primitive_params_ = self._map_primitive_params(
3408 initial_config_primitive, {}, {}
3409 )
Dominik Fleischmannc1975dd2020-08-19 12:17:51 +02003410
3411 await asyncio.wait_for(
3412 self.k8scluster_map[k8sclustertype].exec_primitive(
3413 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3414 kdu_instance=kdu_instance,
3415 primitive_name=initial_config_primitive["name"],
garciadeblas5697b8b2021-03-24 09:17:02 +01003416 params=primitive_params_,
3417 db_dict=db_dict_install,
David Garciac1fe90a2021-03-31 19:12:02 +02003418 vca_id=vca_id,
3419 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01003420 timeout=timeout,
David Garciac1fe90a2021-03-31 19:12:02 +02003421 )
Dominik Fleischmannc1975dd2020-08-19 12:17:51 +02003422
tiernob9018152020-04-16 14:18:24 +00003423 except Exception as e:
lloretgalleg7c121132020-07-08 07:53:22 +00003424 # Prepare update db with error and raise exception
tiernob9018152020-04-16 14:18:24 +00003425 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01003426 self.update_db_2(
3427 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3428 )
3429 self.update_db_2(
3430 "vnfrs",
3431 vnfr_data.get("_id"),
3432 {"kdur.{}.status".format(kdu_index): "ERROR"},
3433 )
tiernob9018152020-04-16 14:18:24 +00003434 except Exception:
lloretgalleg7c121132020-07-08 07:53:22 +00003435 # ignore to keep original exception
tiernob9018152020-04-16 14:18:24 +00003436 pass
lloretgalleg7c121132020-07-08 07:53:22 +00003437 # reraise original error
3438 raise
3439
3440 return kdu_instance
tiernob9018152020-04-16 14:18:24 +00003441
garciadeblas5697b8b2021-03-24 09:17:02 +01003442 async def deploy_kdus(
3443 self,
3444 logging_text,
3445 nsr_id,
3446 nslcmop_id,
3447 db_vnfrs,
3448 db_vnfds,
3449 task_instantiation_info,
3450 ):
calvinosanch9f9c6f22019-11-04 13:37:39 +01003451 # Launch kdus if present in the descriptor
tierno626e0152019-11-29 14:16:16 +00003452
garciadeblas5697b8b2021-03-24 09:17:02 +01003453 k8scluster_id_2_uuic = {
3454 "helm-chart-v3": {},
3455 "helm-chart": {},
3456 "juju-bundle": {},
3457 }
tierno626e0152019-11-29 14:16:16 +00003458
tierno16f4a4e2020-07-20 09:05:51 +00003459 async def _get_cluster_id(cluster_id, cluster_type):
tierno626e0152019-11-29 14:16:16 +00003460 nonlocal k8scluster_id_2_uuic
3461 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3462 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3463
tierno16f4a4e2020-07-20 09:05:51 +00003464 # check if K8scluster is creating and wait look if previous tasks in process
garciadeblas5697b8b2021-03-24 09:17:02 +01003465 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3466 "k8scluster", cluster_id
3467 )
tierno16f4a4e2020-07-20 09:05:51 +00003468 if task_dependency:
garciadeblas5697b8b2021-03-24 09:17:02 +01003469 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3470 task_name, cluster_id
3471 )
tierno16f4a4e2020-07-20 09:05:51 +00003472 self.logger.debug(logging_text + text)
3473 await asyncio.wait(task_dependency, timeout=3600)
3474
garciadeblas5697b8b2021-03-24 09:17:02 +01003475 db_k8scluster = self.db.get_one(
3476 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3477 )
tierno626e0152019-11-29 14:16:16 +00003478 if not db_k8scluster:
3479 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
tierno16f4a4e2020-07-20 09:05:51 +00003480
tierno626e0152019-11-29 14:16:16 +00003481 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3482 if not k8s_id:
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003483 if cluster_type == "helm-chart-v3":
3484 try:
3485 # backward compatibility for existing clusters that have not been initialized for helm v3
garciadeblas5697b8b2021-03-24 09:17:02 +01003486 k8s_credentials = yaml.safe_dump(
3487 db_k8scluster.get("credentials")
3488 )
3489 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3490 k8s_credentials, reuse_cluster_uuid=cluster_id
3491 )
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003492 db_k8scluster_update = {}
3493 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3494 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
garciadeblas5697b8b2021-03-24 09:17:02 +01003495 db_k8scluster_update[
3496 "_admin.helm-chart-v3.created"
3497 ] = uninstall_sw
3498 db_k8scluster_update[
3499 "_admin.helm-chart-v3.operationalState"
3500 ] = "ENABLED"
3501 self.update_db_2(
3502 "k8sclusters", cluster_id, db_k8scluster_update
3503 )
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003504 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01003505 self.logger.error(
3506 logging_text
3507 + "error initializing helm-v3 cluster: {}".format(str(e))
3508 )
3509 raise LcmException(
3510 "K8s cluster '{}' has not been initialized for '{}'".format(
3511 cluster_id, cluster_type
3512 )
3513 )
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003514 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01003515 raise LcmException(
3516 "K8s cluster '{}' has not been initialized for '{}'".format(
3517 cluster_id, cluster_type
3518 )
3519 )
tierno626e0152019-11-29 14:16:16 +00003520 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3521 return k8s_id
3522
3523 logging_text += "Deploy kdus: "
tiernoe876f672020-02-13 14:34:48 +00003524 step = ""
calvinosanch9f9c6f22019-11-04 13:37:39 +01003525 try:
tierno626e0152019-11-29 14:16:16 +00003526 db_nsr_update = {"_admin.deployed.K8s": []}
calvinosanch9f9c6f22019-11-04 13:37:39 +01003527 self.update_db_2("nsrs", nsr_id, db_nsr_update)
calvinosanch9f9c6f22019-11-04 13:37:39 +01003528
tierno626e0152019-11-29 14:16:16 +00003529 index = 0
tiernoe876f672020-02-13 14:34:48 +00003530 updated_cluster_list = []
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003531 updated_v3_cluster_list = []
tiernoe876f672020-02-13 14:34:48 +00003532
tierno626e0152019-11-29 14:16:16 +00003533 for vnfr_data in db_vnfrs.values():
David Garciac1fe90a2021-03-31 19:12:02 +02003534 vca_id = self.get_vca_id(vnfr_data, {})
lloretgalleg7c121132020-07-08 07:53:22 +00003535 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3536 # Step 0: Prepare and set parameters
bravof922c4172020-11-24 21:21:43 -03003537 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
garciadeblas5697b8b2021-03-24 09:17:02 +01003538 vnfd_id = vnfr_data.get("vnfd-id")
3539 vnfd_with_id = find_in_list(
3540 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3541 )
3542 kdud = next(
3543 kdud
3544 for kdud in vnfd_with_id["kdu"]
3545 if kdud["name"] == kdur["kdu-name"]
3546 )
tiernode1584f2020-04-07 09:07:33 +00003547 namespace = kdur.get("k8s-namespace")
romeromonser4554a702021-05-28 12:00:08 +02003548 kdu_deployment_name = kdur.get("kdu-deployment-name")
tierno626e0152019-11-29 14:16:16 +00003549 if kdur.get("helm-chart"):
lloretgalleg07e53f52020-12-15 10:54:02 +00003550 kdumodel = kdur["helm-chart"]
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003551 # Default version: helm3, if helm-version is v2 assign v2
3552 k8sclustertype = "helm-chart-v3"
3553 self.logger.debug("kdur: {}".format(kdur))
garciadeblas5697b8b2021-03-24 09:17:02 +01003554 if (
3555 kdur.get("helm-version")
3556 and kdur.get("helm-version") == "v2"
3557 ):
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003558 k8sclustertype = "helm-chart"
tierno626e0152019-11-29 14:16:16 +00003559 elif kdur.get("juju-bundle"):
lloretgalleg07e53f52020-12-15 10:54:02 +00003560 kdumodel = kdur["juju-bundle"]
tiernoe876f672020-02-13 14:34:48 +00003561 k8sclustertype = "juju-bundle"
tierno626e0152019-11-29 14:16:16 +00003562 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01003563 raise LcmException(
3564 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3565 "juju-bundle. Maybe an old NBI version is running".format(
3566 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3567 )
3568 )
quilesjacde94f2020-01-23 10:07:08 +00003569 # check if kdumodel is a file and exists
3570 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01003571 vnfd_with_id = find_in_list(
3572 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3573 )
3574 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
bravof486707f2021-11-08 17:18:50 -03003575 if storage: # may be not present if vnfd has not artifacts
tierno51183952020-04-03 15:48:18 +00003576 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
bravof486707f2021-11-08 17:18:50 -03003577 if storage["pkg-dir"]:
3578 filename = "{}/{}/{}s/{}".format(
3579 storage["folder"],
3580 storage["pkg-dir"],
3581 k8sclustertype,
3582 kdumodel,
3583 )
3584 else:
3585 filename = "{}/Scripts/{}s/{}".format(
3586 storage["folder"],
3587 k8sclustertype,
3588 kdumodel,
3589 )
garciadeblas5697b8b2021-03-24 09:17:02 +01003590 if self.fs.file_exists(
3591 filename, mode="file"
3592 ) or self.fs.file_exists(filename, mode="dir"):
tierno51183952020-04-03 15:48:18 +00003593 kdumodel = self.fs.path + filename
3594 except (asyncio.TimeoutError, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00003595 raise
garciadeblas5697b8b2021-03-24 09:17:02 +01003596 except Exception: # it is not a file
quilesjacde94f2020-01-23 10:07:08 +00003597 pass
lloretgallegedc5f332020-02-20 11:50:50 +01003598
tiernoe876f672020-02-13 14:34:48 +00003599 k8s_cluster_id = kdur["k8s-cluster"]["id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01003600 step = "Synchronize repos for k8s cluster '{}'".format(
3601 k8s_cluster_id
3602 )
tierno16f4a4e2020-07-20 09:05:51 +00003603 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
lloretgallegedc5f332020-02-20 11:50:50 +01003604
lloretgalleg7c121132020-07-08 07:53:22 +00003605 # Synchronize repos
garciadeblas5697b8b2021-03-24 09:17:02 +01003606 if (
3607 k8sclustertype == "helm-chart"
3608 and cluster_uuid not in updated_cluster_list
3609 ) or (
3610 k8sclustertype == "helm-chart-v3"
3611 and cluster_uuid not in updated_v3_cluster_list
3612 ):
tiernoe876f672020-02-13 14:34:48 +00003613 del_repo_list, added_repo_dict = await asyncio.ensure_future(
garciadeblas5697b8b2021-03-24 09:17:02 +01003614 self.k8scluster_map[k8sclustertype].synchronize_repos(
3615 cluster_uuid=cluster_uuid
3616 )
3617 )
tiernoe876f672020-02-13 14:34:48 +00003618 if del_repo_list or added_repo_dict:
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003619 if k8sclustertype == "helm-chart":
garciadeblas5697b8b2021-03-24 09:17:02 +01003620 unset = {
3621 "_admin.helm_charts_added." + item: None
3622 for item in del_repo_list
3623 }
3624 updated = {
3625 "_admin.helm_charts_added." + item: name
3626 for item, name in added_repo_dict.items()
3627 }
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003628 updated_cluster_list.append(cluster_uuid)
3629 elif k8sclustertype == "helm-chart-v3":
garciadeblas5697b8b2021-03-24 09:17:02 +01003630 unset = {
3631 "_admin.helm_charts_v3_added." + item: None
3632 for item in del_repo_list
3633 }
3634 updated = {
3635 "_admin.helm_charts_v3_added." + item: name
3636 for item, name in added_repo_dict.items()
3637 }
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003638 updated_v3_cluster_list.append(cluster_uuid)
garciadeblas5697b8b2021-03-24 09:17:02 +01003639 self.logger.debug(
3640 logging_text + "repos synchronized on k8s cluster "
3641 "'{}' to_delete: {}, to_add: {}".format(
3642 k8s_cluster_id, del_repo_list, added_repo_dict
3643 )
3644 )
3645 self.db.set_one(
3646 "k8sclusters",
3647 {"_id": k8s_cluster_id},
3648 updated,
3649 unset=unset,
3650 )
lloretgallegedc5f332020-02-20 11:50:50 +01003651
lloretgalleg7c121132020-07-08 07:53:22 +00003652 # Instantiate kdu
garciadeblas5697b8b2021-03-24 09:17:02 +01003653 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3654 vnfr_data["member-vnf-index-ref"],
3655 kdur["kdu-name"],
3656 k8s_cluster_id,
3657 )
3658 k8s_instance_info = {
3659 "kdu-instance": None,
3660 "k8scluster-uuid": cluster_uuid,
3661 "k8scluster-type": k8sclustertype,
3662 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3663 "kdu-name": kdur["kdu-name"],
3664 "kdu-model": kdumodel,
3665 "namespace": namespace,
romeromonser4554a702021-05-28 12:00:08 +02003666 "kdu-deployment-name": kdu_deployment_name,
garciadeblas5697b8b2021-03-24 09:17:02 +01003667 }
tiernob9018152020-04-16 14:18:24 +00003668 db_path = "_admin.deployed.K8s.{}".format(index)
lloretgalleg7c121132020-07-08 07:53:22 +00003669 db_nsr_update[db_path] = k8s_instance_info
tierno626e0152019-11-29 14:16:16 +00003670 self.update_db_2("nsrs", nsr_id, db_nsr_update)
garciadeblas5697b8b2021-03-24 09:17:02 +01003671 vnfd_with_id = find_in_list(
3672 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3673 )
tiernoa2143262020-03-27 16:20:40 +00003674 task = asyncio.ensure_future(
garciadeblas5697b8b2021-03-24 09:17:02 +01003675 self._install_kdu(
3676 nsr_id,
3677 db_path,
3678 vnfr_data,
3679 kdu_index,
3680 kdud,
3681 vnfd_with_id,
3682 k8s_instance_info,
3683 k8params=desc_params,
Alexis Romeroab16ae82022-05-17 18:18:02 +02003684 timeout=1800,
garciadeblas5697b8b2021-03-24 09:17:02 +01003685 vca_id=vca_id,
3686 )
3687 )
3688 self.lcm_tasks.register(
3689 "ns",
3690 nsr_id,
3691 nslcmop_id,
3692 "instantiate_KDU-{}".format(index),
3693 task,
3694 )
3695 task_instantiation_info[task] = "Deploying KDU {}".format(
3696 kdur["kdu-name"]
3697 )
tiernoe876f672020-02-13 14:34:48 +00003698
tierno626e0152019-11-29 14:16:16 +00003699 index += 1
quilesjdd799ac2020-01-23 16:31:11 +00003700
tiernoe876f672020-02-13 14:34:48 +00003701 except (LcmException, asyncio.CancelledError):
3702 raise
calvinosanch9f9c6f22019-11-04 13:37:39 +01003703 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00003704 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3705 if isinstance(e, (N2VCException, DbException)):
3706 self.logger.error(logging_text + msg)
3707 else:
3708 self.logger.critical(logging_text + msg, exc_info=True)
quilesjdd799ac2020-01-23 16:31:11 +00003709 raise LcmException(msg)
calvinosanch9f9c6f22019-11-04 13:37:39 +01003710 finally:
calvinosanch9f9c6f22019-11-04 13:37:39 +01003711 if db_nsr_update:
3712 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoda6fb102019-11-23 00:36:52 +00003713
garciadeblas5697b8b2021-03-24 09:17:02 +01003714 def _deploy_n2vc(
3715 self,
3716 logging_text,
3717 db_nsr,
3718 db_vnfr,
3719 nslcmop_id,
3720 nsr_id,
3721 nsi_id,
3722 vnfd_id,
3723 vdu_id,
3724 kdu_name,
3725 member_vnf_index,
3726 vdu_index,
3727 vdu_name,
3728 deploy_params,
3729 descriptor_config,
3730 base_folder,
3731 task_instantiation_info,
3732 stage,
3733 ):
quilesj7e13aeb2019-10-08 13:34:55 +02003734 # launch instantiate_N2VC in a asyncio task and register task object
3735 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3736 # if not found, create one entry and update database
quilesj7e13aeb2019-10-08 13:34:55 +02003737 # fill db_nsr._admin.deployed.VCA.<index>
tierno588547c2020-07-01 15:30:20 +00003738
garciadeblas5697b8b2021-03-24 09:17:02 +01003739 self.logger.debug(
3740 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3741 )
bravof9a256db2021-02-22 18:02:07 -03003742 if "execution-environment-list" in descriptor_config:
3743 ee_list = descriptor_config.get("execution-environment-list", [])
David Garcia9ad54a42021-05-28 12:08:18 +02003744 elif "juju" in descriptor_config:
3745 ee_list = [descriptor_config] # ns charms
tierno588547c2020-07-01 15:30:20 +00003746 else: # other types as script are not supported
3747 ee_list = []
3748
3749 for ee_item in ee_list:
garciadeblas5697b8b2021-03-24 09:17:02 +01003750 self.logger.debug(
3751 logging_text
3752 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3753 ee_item.get("juju"), ee_item.get("helm-chart")
3754 )
3755 )
tiernoa278b842020-07-08 15:33:55 +00003756 ee_descriptor_id = ee_item.get("id")
tierno588547c2020-07-01 15:30:20 +00003757 if ee_item.get("juju"):
garciadeblas5697b8b2021-03-24 09:17:02 +01003758 vca_name = ee_item["juju"].get("charm")
3759 vca_type = (
3760 "lxc_proxy_charm"
3761 if ee_item["juju"].get("charm") is not None
3762 else "native_charm"
3763 )
3764 if ee_item["juju"].get("cloud") == "k8s":
tierno588547c2020-07-01 15:30:20 +00003765 vca_type = "k8s_proxy_charm"
garciadeblas5697b8b2021-03-24 09:17:02 +01003766 elif ee_item["juju"].get("proxy") is False:
tierno588547c2020-07-01 15:30:20 +00003767 vca_type = "native_charm"
3768 elif ee_item.get("helm-chart"):
garciadeblas5697b8b2021-03-24 09:17:02 +01003769 vca_name = ee_item["helm-chart"]
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003770 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3771 vca_type = "helm"
3772 else:
3773 vca_type = "helm-v3"
tierno588547c2020-07-01 15:30:20 +00003774 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01003775 self.logger.debug(
3776 logging_text + "skipping non juju neither charm configuration"
3777 )
quilesj7e13aeb2019-10-08 13:34:55 +02003778 continue
quilesj3655ae02019-12-12 16:08:35 +00003779
tierno588547c2020-07-01 15:30:20 +00003780 vca_index = -1
garciadeblas5697b8b2021-03-24 09:17:02 +01003781 for vca_index, vca_deployed in enumerate(
3782 db_nsr["_admin"]["deployed"]["VCA"]
3783 ):
tierno588547c2020-07-01 15:30:20 +00003784 if not vca_deployed:
3785 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01003786 if (
3787 vca_deployed.get("member-vnf-index") == member_vnf_index
3788 and vca_deployed.get("vdu_id") == vdu_id
3789 and vca_deployed.get("kdu_name") == kdu_name
3790 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3791 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3792 ):
tierno588547c2020-07-01 15:30:20 +00003793 break
3794 else:
3795 # not found, create one.
garciadeblas5697b8b2021-03-24 09:17:02 +01003796 target = (
3797 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3798 )
tiernoa278b842020-07-08 15:33:55 +00003799 if vdu_id:
3800 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3801 elif kdu_name:
3802 target += "/kdu/{}".format(kdu_name)
tierno588547c2020-07-01 15:30:20 +00003803 vca_deployed = {
tiernoa278b842020-07-08 15:33:55 +00003804 "target_element": target,
3805 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
tierno588547c2020-07-01 15:30:20 +00003806 "member-vnf-index": member_vnf_index,
3807 "vdu_id": vdu_id,
3808 "kdu_name": kdu_name,
3809 "vdu_count_index": vdu_index,
3810 "operational-status": "init", # TODO revise
3811 "detailed-status": "", # TODO revise
garciadeblas5697b8b2021-03-24 09:17:02 +01003812 "step": "initial-deploy", # TODO revise
tierno588547c2020-07-01 15:30:20 +00003813 "vnfd_id": vnfd_id,
3814 "vdu_name": vdu_name,
tiernoa278b842020-07-08 15:33:55 +00003815 "type": vca_type,
garciadeblas5697b8b2021-03-24 09:17:02 +01003816 "ee_descriptor_id": ee_descriptor_id,
tierno588547c2020-07-01 15:30:20 +00003817 }
3818 vca_index += 1
quilesj3655ae02019-12-12 16:08:35 +00003819
tierno588547c2020-07-01 15:30:20 +00003820 # create VCA and configurationStatus in db
3821 db_dict = {
3822 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
garciadeblas5697b8b2021-03-24 09:17:02 +01003823 "configurationStatus.{}".format(vca_index): dict(),
tierno588547c2020-07-01 15:30:20 +00003824 }
3825 self.update_db_2("nsrs", nsr_id, db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02003826
tierno588547c2020-07-01 15:30:20 +00003827 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3828
bravof922c4172020-11-24 21:21:43 -03003829 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3830 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3831 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3832
tierno588547c2020-07-01 15:30:20 +00003833 # Launch task
3834 task_n2vc = asyncio.ensure_future(
3835 self.instantiate_N2VC(
3836 logging_text=logging_text,
3837 vca_index=vca_index,
3838 nsi_id=nsi_id,
3839 db_nsr=db_nsr,
3840 db_vnfr=db_vnfr,
3841 vdu_id=vdu_id,
3842 kdu_name=kdu_name,
3843 vdu_index=vdu_index,
3844 deploy_params=deploy_params,
3845 config_descriptor=descriptor_config,
3846 base_folder=base_folder,
3847 nslcmop_id=nslcmop_id,
3848 stage=stage,
3849 vca_type=vca_type,
tiernob996d942020-07-03 14:52:28 +00003850 vca_name=vca_name,
garciadeblas5697b8b2021-03-24 09:17:02 +01003851 ee_config_descriptor=ee_item,
tierno588547c2020-07-01 15:30:20 +00003852 )
quilesj7e13aeb2019-10-08 13:34:55 +02003853 )
garciadeblas5697b8b2021-03-24 09:17:02 +01003854 self.lcm_tasks.register(
3855 "ns",
3856 nsr_id,
3857 nslcmop_id,
3858 "instantiate_N2VC-{}".format(vca_index),
3859 task_n2vc,
3860 )
3861 task_instantiation_info[
3862 task_n2vc
3863 ] = self.task_name_deploy_vca + " {}.{}".format(
3864 member_vnf_index or "", vdu_id or ""
3865 )
tiernobaa51102018-12-14 13:16:18 +00003866
tiernoc9556972019-07-05 15:25:25 +00003867 @staticmethod
kuuse0ca67472019-05-13 15:59:27 +02003868 def _create_nslcmop(nsr_id, operation, params):
3869 """
3870 Creates a ns-lcm-opp content to be stored at database.
3871 :param nsr_id: internal id of the instance
3872 :param operation: instantiate, terminate, scale, action, ...
3873 :param params: user parameters for the operation
3874 :return: dictionary following SOL005 format
3875 """
3876 # Raise exception if invalid arguments
3877 if not (nsr_id and operation and params):
3878 raise LcmException(
garciadeblas5697b8b2021-03-24 09:17:02 +01003879 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3880 )
kuuse0ca67472019-05-13 15:59:27 +02003881 now = time()
3882 _id = str(uuid4())
3883 nslcmop = {
3884 "id": _id,
3885 "_id": _id,
3886 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3887 "operationState": "PROCESSING",
3888 "statusEnteredTime": now,
3889 "nsInstanceId": nsr_id,
3890 "lcmOperationType": operation,
3891 "startTime": now,
3892 "isAutomaticInvocation": False,
3893 "operationParams": params,
3894 "isCancelPending": False,
3895 "links": {
3896 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3897 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01003898 },
kuuse0ca67472019-05-13 15:59:27 +02003899 }
3900 return nslcmop
3901
calvinosanch9f9c6f22019-11-04 13:37:39 +01003902 def _format_additional_params(self, params):
tierno626e0152019-11-29 14:16:16 +00003903 params = params or {}
calvinosanch9f9c6f22019-11-04 13:37:39 +01003904 for key, value in params.items():
3905 if str(value).startswith("!!yaml "):
3906 params[key] = yaml.safe_load(value[7:])
calvinosanch9f9c6f22019-11-04 13:37:39 +01003907 return params
3908
kuuse8b998e42019-07-30 15:22:16 +02003909 def _get_terminate_primitive_params(self, seq, vnf_index):
garciadeblas5697b8b2021-03-24 09:17:02 +01003910 primitive = seq.get("name")
kuuse8b998e42019-07-30 15:22:16 +02003911 primitive_params = {}
3912 params = {
3913 "member_vnf_index": vnf_index,
3914 "primitive": primitive,
3915 "primitive_params": primitive_params,
3916 }
3917 desc_params = {}
3918 return self._map_primitive_params(seq, params, desc_params)
3919
kuuseac3a8882019-10-03 10:48:06 +02003920 # sub-operations
3921
tierno51183952020-04-03 15:48:18 +00003922 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
garciadeblas5697b8b2021-03-24 09:17:02 +01003923 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3924 if op.get("operationState") == "COMPLETED":
kuuseac3a8882019-10-03 10:48:06 +02003925 # b. Skip sub-operation
3926 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3927 return self.SUBOPERATION_STATUS_SKIP
3928 else:
tierno7c4e24c2020-05-13 08:41:35 +00003929 # c. retry executing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02003930 # The sub-operation exists, and operationState != 'COMPLETED'
tierno7c4e24c2020-05-13 08:41:35 +00003931 # Update operationState = 'PROCESSING' to indicate a retry.
garciadeblas5697b8b2021-03-24 09:17:02 +01003932 operationState = "PROCESSING"
3933 detailed_status = "In progress"
kuuseac3a8882019-10-03 10:48:06 +02003934 self._update_suboperation_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01003935 db_nslcmop, op_index, operationState, detailed_status
3936 )
kuuseac3a8882019-10-03 10:48:06 +02003937 # Return the sub-operation index
3938 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3939 # with arguments extracted from the sub-operation
3940 return op_index
3941
3942 # Find a sub-operation where all keys in a matching dictionary must match
3943 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3944 def _find_suboperation(self, db_nslcmop, match):
tierno7c4e24c2020-05-13 08:41:35 +00003945 if db_nslcmop and match:
garciadeblas5697b8b2021-03-24 09:17:02 +01003946 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
kuuseac3a8882019-10-03 10:48:06 +02003947 for i, op in enumerate(op_list):
3948 if all(op.get(k) == match[k] for k in match):
3949 return i
3950 return self.SUBOPERATION_STATUS_NOT_FOUND
3951
3952 # Update status for a sub-operation given its index
garciadeblas5697b8b2021-03-24 09:17:02 +01003953 def _update_suboperation_status(
3954 self, db_nslcmop, op_index, operationState, detailed_status
3955 ):
kuuseac3a8882019-10-03 10:48:06 +02003956 # Update DB for HA tasks
garciadeblas5697b8b2021-03-24 09:17:02 +01003957 q_filter = {"_id": db_nslcmop["_id"]}
3958 update_dict = {
3959 "_admin.operations.{}.operationState".format(op_index): operationState,
3960 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3961 }
3962 self.db.set_one(
3963 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3964 )
kuuseac3a8882019-10-03 10:48:06 +02003965
3966 # Add sub-operation, return the index of the added sub-operation
3967 # Optionally, set operationState, detailed-status, and operationType
3968 # Status and type are currently set for 'scale' sub-operations:
3969 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3970 # 'detailed-status' : status message
3971 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3972 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
garciadeblas5697b8b2021-03-24 09:17:02 +01003973 def _add_suboperation(
3974 self,
3975 db_nslcmop,
3976 vnf_index,
3977 vdu_id,
3978 vdu_count_index,
3979 vdu_name,
3980 primitive,
3981 mapped_primitive_params,
3982 operationState=None,
3983 detailed_status=None,
3984 operationType=None,
3985 RO_nsr_id=None,
3986 RO_scaling_info=None,
3987 ):
tiernoe876f672020-02-13 14:34:48 +00003988 if not db_nslcmop:
kuuseac3a8882019-10-03 10:48:06 +02003989 return self.SUBOPERATION_STATUS_NOT_FOUND
3990 # Get the "_admin.operations" list, if it exists
garciadeblas5697b8b2021-03-24 09:17:02 +01003991 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3992 op_list = db_nslcmop_admin.get("operations")
kuuseac3a8882019-10-03 10:48:06 +02003993 # Create or append to the "_admin.operations" list
garciadeblas5697b8b2021-03-24 09:17:02 +01003994 new_op = {
3995 "member_vnf_index": vnf_index,
3996 "vdu_id": vdu_id,
3997 "vdu_count_index": vdu_count_index,
3998 "primitive": primitive,
3999 "primitive_params": mapped_primitive_params,
4000 }
kuuseac3a8882019-10-03 10:48:06 +02004001 if operationState:
garciadeblas5697b8b2021-03-24 09:17:02 +01004002 new_op["operationState"] = operationState
kuuseac3a8882019-10-03 10:48:06 +02004003 if detailed_status:
garciadeblas5697b8b2021-03-24 09:17:02 +01004004 new_op["detailed-status"] = detailed_status
kuuseac3a8882019-10-03 10:48:06 +02004005 if operationType:
garciadeblas5697b8b2021-03-24 09:17:02 +01004006 new_op["lcmOperationType"] = operationType
kuuseac3a8882019-10-03 10:48:06 +02004007 if RO_nsr_id:
garciadeblas5697b8b2021-03-24 09:17:02 +01004008 new_op["RO_nsr_id"] = RO_nsr_id
kuuseac3a8882019-10-03 10:48:06 +02004009 if RO_scaling_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01004010 new_op["RO_scaling_info"] = RO_scaling_info
kuuseac3a8882019-10-03 10:48:06 +02004011 if not op_list:
4012 # No existing operations, create key 'operations' with current operation as first list element
garciadeblas5697b8b2021-03-24 09:17:02 +01004013 db_nslcmop_admin.update({"operations": [new_op]})
4014 op_list = db_nslcmop_admin.get("operations")
kuuseac3a8882019-10-03 10:48:06 +02004015 else:
4016 # Existing operations, append operation to list
4017 op_list.append(new_op)
kuuse8b998e42019-07-30 15:22:16 +02004018
garciadeblas5697b8b2021-03-24 09:17:02 +01004019 db_nslcmop_update = {"_admin.operations": op_list}
4020 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
kuuseac3a8882019-10-03 10:48:06 +02004021 op_index = len(op_list) - 1
4022 return op_index
4023
4024 # Helper methods for scale() sub-operations
4025
4026 # pre-scale/post-scale:
4027 # Check for 3 different cases:
4028 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4029 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
tierno7c4e24c2020-05-13 08:41:35 +00004030 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
garciadeblas5697b8b2021-03-24 09:17:02 +01004031 def _check_or_add_scale_suboperation(
4032 self,
4033 db_nslcmop,
4034 vnf_index,
4035 vnf_config_primitive,
4036 primitive_params,
4037 operationType,
4038 RO_nsr_id=None,
4039 RO_scaling_info=None,
4040 ):
kuuseac3a8882019-10-03 10:48:06 +02004041 # Find this sub-operation
tierno7c4e24c2020-05-13 08:41:35 +00004042 if RO_nsr_id and RO_scaling_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01004043 operationType = "SCALE-RO"
kuuseac3a8882019-10-03 10:48:06 +02004044 match = {
garciadeblas5697b8b2021-03-24 09:17:02 +01004045 "member_vnf_index": vnf_index,
4046 "RO_nsr_id": RO_nsr_id,
4047 "RO_scaling_info": RO_scaling_info,
kuuseac3a8882019-10-03 10:48:06 +02004048 }
4049 else:
4050 match = {
garciadeblas5697b8b2021-03-24 09:17:02 +01004051 "member_vnf_index": vnf_index,
4052 "primitive": vnf_config_primitive,
4053 "primitive_params": primitive_params,
4054 "lcmOperationType": operationType,
kuuseac3a8882019-10-03 10:48:06 +02004055 }
4056 op_index = self._find_suboperation(db_nslcmop, match)
tierno51183952020-04-03 15:48:18 +00004057 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
kuuseac3a8882019-10-03 10:48:06 +02004058 # a. New sub-operation
4059 # The sub-operation does not exist, add it.
4060 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4061 # The following parameters are set to None for all kind of scaling:
4062 vdu_id = None
4063 vdu_count_index = None
4064 vdu_name = None
tierno51183952020-04-03 15:48:18 +00004065 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02004066 vnf_config_primitive = None
4067 primitive_params = None
4068 else:
4069 RO_nsr_id = None
4070 RO_scaling_info = None
4071 # Initial status for sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01004072 operationState = "PROCESSING"
4073 detailed_status = "In progress"
kuuseac3a8882019-10-03 10:48:06 +02004074 # Add sub-operation for pre/post-scaling (zero or more operations)
garciadeblas5697b8b2021-03-24 09:17:02 +01004075 self._add_suboperation(
4076 db_nslcmop,
4077 vnf_index,
4078 vdu_id,
4079 vdu_count_index,
4080 vdu_name,
4081 vnf_config_primitive,
4082 primitive_params,
4083 operationState,
4084 detailed_status,
4085 operationType,
4086 RO_nsr_id,
4087 RO_scaling_info,
4088 )
kuuseac3a8882019-10-03 10:48:06 +02004089 return self.SUBOPERATION_STATUS_NEW
4090 else:
4091 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4092 # or op_index (operationState != 'COMPLETED')
tierno51183952020-04-03 15:48:18 +00004093 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
kuuseac3a8882019-10-03 10:48:06 +02004094
preethika.pdf7d8e02019-12-10 13:10:48 +00004095 # Function to return execution_environment id
4096
4097 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
tiernoe876f672020-02-13 14:34:48 +00004098 # TODO vdu_index_count
preethika.pdf7d8e02019-12-10 13:10:48 +00004099 for vca in vca_deployed_list:
4100 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4101 return vca["ee_id"]
4102
David Garciac1fe90a2021-03-31 19:12:02 +02004103 async def destroy_N2VC(
4104 self,
4105 logging_text,
4106 db_nslcmop,
4107 vca_deployed,
4108 config_descriptor,
4109 vca_index,
4110 destroy_ee=True,
4111 exec_primitives=True,
4112 scaling_in=False,
4113 vca_id: str = None,
4114 ):
tiernoe876f672020-02-13 14:34:48 +00004115 """
4116 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4117 :param logging_text:
4118 :param db_nslcmop:
4119 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4120 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4121 :param vca_index: index in the database _admin.deployed.VCA
4122 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
tierno588547c2020-07-01 15:30:20 +00004123 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4124 not executed properly
aktas13251562021-02-12 22:19:10 +03004125 :param scaling_in: True destroys the application, False destroys the model
tiernoe876f672020-02-13 14:34:48 +00004126 :return: None or exception
4127 """
tiernoe876f672020-02-13 14:34:48 +00004128
tierno588547c2020-07-01 15:30:20 +00004129 self.logger.debug(
garciadeblas5697b8b2021-03-24 09:17:02 +01004130 logging_text
4131 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
tierno588547c2020-07-01 15:30:20 +00004132 vca_index, vca_deployed, config_descriptor, destroy_ee
4133 )
4134 )
4135
4136 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4137
4138 # execute terminate_primitives
4139 if exec_primitives:
bravof922c4172020-11-24 21:21:43 -03004140 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
garciadeblas5697b8b2021-03-24 09:17:02 +01004141 config_descriptor.get("terminate-config-primitive"),
4142 vca_deployed.get("ee_descriptor_id"),
4143 )
tierno588547c2020-07-01 15:30:20 +00004144 vdu_id = vca_deployed.get("vdu_id")
4145 vdu_count_index = vca_deployed.get("vdu_count_index")
4146 vdu_name = vca_deployed.get("vdu_name")
4147 vnf_index = vca_deployed.get("member-vnf-index")
4148 if terminate_primitives and vca_deployed.get("needed_terminate"):
tierno588547c2020-07-01 15:30:20 +00004149 for seq in terminate_primitives:
4150 # For each sequence in list, get primitive and call _ns_execute_primitive()
4151 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
garciadeblas5697b8b2021-03-24 09:17:02 +01004152 vnf_index, seq.get("name")
4153 )
tierno588547c2020-07-01 15:30:20 +00004154 self.logger.debug(logging_text + step)
4155 # Create the primitive for each sequence, i.e. "primitive": "touch"
garciadeblas5697b8b2021-03-24 09:17:02 +01004156 primitive = seq.get("name")
4157 mapped_primitive_params = self._get_terminate_primitive_params(
4158 seq, vnf_index
4159 )
tierno588547c2020-07-01 15:30:20 +00004160
4161 # Add sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01004162 self._add_suboperation(
4163 db_nslcmop,
4164 vnf_index,
4165 vdu_id,
4166 vdu_count_index,
4167 vdu_name,
4168 primitive,
4169 mapped_primitive_params,
4170 )
tierno588547c2020-07-01 15:30:20 +00004171 # Sub-operations: Call _ns_execute_primitive() instead of action()
4172 try:
David Garciac1fe90a2021-03-31 19:12:02 +02004173 result, result_detail = await self._ns_execute_primitive(
garciadeblas5697b8b2021-03-24 09:17:02 +01004174 vca_deployed["ee_id"],
4175 primitive,
David Garciac1fe90a2021-03-31 19:12:02 +02004176 mapped_primitive_params,
4177 vca_type=vca_type,
4178 vca_id=vca_id,
4179 )
tierno588547c2020-07-01 15:30:20 +00004180 except LcmException:
4181 # this happens when VCA is not deployed. In this case it is not needed to terminate
4182 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01004183 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
tierno588547c2020-07-01 15:30:20 +00004184 if result not in result_ok:
garciadeblas5697b8b2021-03-24 09:17:02 +01004185 raise LcmException(
4186 "terminate_primitive {} for vnf_member_index={} fails with "
4187 "error {}".format(seq.get("name"), vnf_index, result_detail)
4188 )
tierno588547c2020-07-01 15:30:20 +00004189 # set that this VCA do not need terminated
garciadeblas5697b8b2021-03-24 09:17:02 +01004190 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4191 vca_index
4192 )
4193 self.update_db_2(
4194 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4195 )
tiernoe876f672020-02-13 14:34:48 +00004196
bravof73bac502021-05-11 07:38:47 -04004197 # Delete Prometheus Jobs if any
4198 # This uses NSR_ID, so it will destroy any jobs under this index
4199 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
tiernob996d942020-07-03 14:52:28 +00004200
tiernoe876f672020-02-13 14:34:48 +00004201 if destroy_ee:
David Garciac1fe90a2021-03-31 19:12:02 +02004202 await self.vca_map[vca_type].delete_execution_environment(
4203 vca_deployed["ee_id"],
4204 scaling_in=scaling_in,
aktas98488ed2021-07-29 17:42:49 +03004205 vca_type=vca_type,
David Garciac1fe90a2021-03-31 19:12:02 +02004206 vca_id=vca_id,
4207 )
kuuse0ca67472019-05-13 15:59:27 +02004208
David Garciac1fe90a2021-03-31 19:12:02 +02004209 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
garciadeblas5697b8b2021-03-24 09:17:02 +01004210 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
tierno51183952020-04-03 15:48:18 +00004211 namespace = "." + db_nsr["_id"]
tiernof59ad6c2020-04-08 12:50:52 +00004212 try:
David Garciac1fe90a2021-03-31 19:12:02 +02004213 await self.n2vc.delete_namespace(
4214 namespace=namespace,
4215 total_timeout=self.timeout_charm_delete,
4216 vca_id=vca_id,
4217 )
tiernof59ad6c2020-04-08 12:50:52 +00004218 except N2VCNotFound: # already deleted. Skip
4219 pass
garciadeblas5697b8b2021-03-24 09:17:02 +01004220 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
quilesj3655ae02019-12-12 16:08:35 +00004221
garciadeblas5697b8b2021-03-24 09:17:02 +01004222 async def _terminate_RO(
4223 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4224 ):
tiernoe876f672020-02-13 14:34:48 +00004225 """
4226 Terminates a deployment from RO
4227 :param logging_text:
4228 :param nsr_deployed: db_nsr._admin.deployed
4229 :param nsr_id:
4230 :param nslcmop_id:
4231 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4232 this method will update only the index 2, but it will write on database the concatenated content of the list
4233 :return:
4234 """
4235 db_nsr_update = {}
4236 failed_detail = []
4237 ro_nsr_id = ro_delete_action = None
4238 if nsr_deployed and nsr_deployed.get("RO"):
4239 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4240 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4241 try:
4242 if ro_nsr_id:
4243 stage[2] = "Deleting ns from VIM."
4244 db_nsr_update["detailed-status"] = " ".join(stage)
4245 self._write_op_status(nslcmop_id, stage)
4246 self.logger.debug(logging_text + stage[2])
4247 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4248 self._write_op_status(nslcmop_id, stage)
4249 desc = await self.RO.delete("ns", ro_nsr_id)
4250 ro_delete_action = desc["action_id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01004251 db_nsr_update[
4252 "_admin.deployed.RO.nsr_delete_action_id"
4253 ] = ro_delete_action
tiernoe876f672020-02-13 14:34:48 +00004254 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4255 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4256 if ro_delete_action:
4257 # wait until NS is deleted from VIM
4258 stage[2] = "Waiting ns deleted from VIM."
4259 detailed_status_old = None
garciadeblas5697b8b2021-03-24 09:17:02 +01004260 self.logger.debug(
4261 logging_text
4262 + stage[2]
4263 + " RO_id={} ro_delete_action={}".format(
4264 ro_nsr_id, ro_delete_action
4265 )
4266 )
tiernoe876f672020-02-13 14:34:48 +00004267 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4268 self._write_op_status(nslcmop_id, stage)
kuused124bfe2019-06-18 12:09:24 +02004269
tiernoe876f672020-02-13 14:34:48 +00004270 delete_timeout = 20 * 60 # 20 minutes
4271 while delete_timeout > 0:
4272 desc = await self.RO.show(
4273 "ns",
4274 item_id_name=ro_nsr_id,
4275 extra_item="action",
garciadeblas5697b8b2021-03-24 09:17:02 +01004276 extra_item_id=ro_delete_action,
4277 )
tiernoe876f672020-02-13 14:34:48 +00004278
4279 # deploymentStatus
4280 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4281
4282 ns_status, ns_status_info = self.RO.check_action_status(desc)
4283 if ns_status == "ERROR":
4284 raise ROclient.ROClientException(ns_status_info)
4285 elif ns_status == "BUILD":
4286 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4287 elif ns_status == "ACTIVE":
4288 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4289 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4290 break
4291 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004292 assert (
4293 False
4294 ), "ROclient.check_action_status returns unknown {}".format(
4295 ns_status
4296 )
tiernoe876f672020-02-13 14:34:48 +00004297 if stage[2] != detailed_status_old:
4298 detailed_status_old = stage[2]
4299 db_nsr_update["detailed-status"] = " ".join(stage)
4300 self._write_op_status(nslcmop_id, stage)
4301 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4302 await asyncio.sleep(5, loop=self.loop)
4303 delete_timeout -= 5
4304 else: # delete_timeout <= 0:
garciadeblas5697b8b2021-03-24 09:17:02 +01004305 raise ROclient.ROClientException(
4306 "Timeout waiting ns deleted from VIM"
4307 )
tiernoe876f672020-02-13 14:34:48 +00004308
4309 except Exception as e:
4310 self.update_db_2("nsrs", nsr_id, db_nsr_update)
garciadeblas5697b8b2021-03-24 09:17:02 +01004311 if (
4312 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4313 ): # not found
tiernoe876f672020-02-13 14:34:48 +00004314 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4315 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4316 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
garciadeblas5697b8b2021-03-24 09:17:02 +01004317 self.logger.debug(
4318 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4319 )
4320 elif (
4321 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4322 ): # conflict
tiernoa2143262020-03-27 16:20:40 +00004323 failed_detail.append("delete conflict: {}".format(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01004324 self.logger.debug(
4325 logging_text
4326 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4327 )
tiernoe876f672020-02-13 14:34:48 +00004328 else:
tiernoa2143262020-03-27 16:20:40 +00004329 failed_detail.append("delete error: {}".format(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01004330 self.logger.error(
4331 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4332 )
tiernoe876f672020-02-13 14:34:48 +00004333
4334 # Delete nsd
4335 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4336 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4337 try:
4338 stage[2] = "Deleting nsd from RO."
4339 db_nsr_update["detailed-status"] = " ".join(stage)
4340 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4341 self._write_op_status(nslcmop_id, stage)
4342 await self.RO.delete("nsd", ro_nsd_id)
garciadeblas5697b8b2021-03-24 09:17:02 +01004343 self.logger.debug(
4344 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4345 )
tiernoe876f672020-02-13 14:34:48 +00004346 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4347 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01004348 if (
4349 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4350 ): # not found
tiernoe876f672020-02-13 14:34:48 +00004351 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
garciadeblas5697b8b2021-03-24 09:17:02 +01004352 self.logger.debug(
4353 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4354 )
4355 elif (
4356 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4357 ): # conflict
4358 failed_detail.append(
4359 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4360 )
tiernoe876f672020-02-13 14:34:48 +00004361 self.logger.debug(logging_text + failed_detail[-1])
4362 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004363 failed_detail.append(
4364 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4365 )
tiernoe876f672020-02-13 14:34:48 +00004366 self.logger.error(logging_text + failed_detail[-1])
4367
4368 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4369 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4370 if not vnf_deployed or not vnf_deployed["id"]:
4371 continue
4372 try:
4373 ro_vnfd_id = vnf_deployed["id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01004374 stage[
4375 2
4376 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4377 vnf_deployed["member-vnf-index"], ro_vnfd_id
4378 )
tiernoe876f672020-02-13 14:34:48 +00004379 db_nsr_update["detailed-status"] = " ".join(stage)
4380 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4381 self._write_op_status(nslcmop_id, stage)
4382 await self.RO.delete("vnfd", ro_vnfd_id)
garciadeblas5697b8b2021-03-24 09:17:02 +01004383 self.logger.debug(
4384 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4385 )
tiernoe876f672020-02-13 14:34:48 +00004386 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4387 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01004388 if (
4389 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4390 ): # not found
4391 db_nsr_update[
4392 "_admin.deployed.RO.vnfd.{}.id".format(index)
4393 ] = None
4394 self.logger.debug(
4395 logging_text
4396 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4397 )
4398 elif (
4399 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4400 ): # conflict
4401 failed_detail.append(
4402 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4403 )
tiernoe876f672020-02-13 14:34:48 +00004404 self.logger.debug(logging_text + failed_detail[-1])
4405 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004406 failed_detail.append(
4407 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4408 )
tiernoe876f672020-02-13 14:34:48 +00004409 self.logger.error(logging_text + failed_detail[-1])
4410
tiernoa2143262020-03-27 16:20:40 +00004411 if failed_detail:
4412 stage[2] = "Error deleting from VIM"
4413 else:
4414 stage[2] = "Deleted from VIM"
tiernoe876f672020-02-13 14:34:48 +00004415 db_nsr_update["detailed-status"] = " ".join(stage)
4416 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4417 self._write_op_status(nslcmop_id, stage)
4418
4419 if failed_detail:
tiernoa2143262020-03-27 16:20:40 +00004420 raise LcmException("; ".join(failed_detail))
tiernoe876f672020-02-13 14:34:48 +00004421
4422 async def terminate(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02004423 # Try to lock HA task here
garciadeblas5697b8b2021-03-24 09:17:02 +01004424 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02004425 if not task_is_locked_by_me:
4426 return
4427
tierno59d22d22018-09-25 18:10:19 +02004428 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4429 self.logger.debug(logging_text + "Enter")
tiernoe876f672020-02-13 14:34:48 +00004430 timeout_ns_terminate = self.timeout_ns_terminate
tierno59d22d22018-09-25 18:10:19 +02004431 db_nsr = None
4432 db_nslcmop = None
tiernoa17d4f42020-04-28 09:59:23 +00004433 operation_params = None
tierno59d22d22018-09-25 18:10:19 +02004434 exc = None
garciadeblas5697b8b2021-03-24 09:17:02 +01004435 error_list = [] # annotates all failed error messages
tierno59d22d22018-09-25 18:10:19 +02004436 db_nslcmop_update = {}
tiernoc2564fe2019-01-28 16:18:56 +00004437 autoremove = False # autoremove after terminated
tiernoe876f672020-02-13 14:34:48 +00004438 tasks_dict_info = {}
4439 db_nsr_update = {}
garciadeblas5697b8b2021-03-24 09:17:02 +01004440 stage = [
4441 "Stage 1/3: Preparing task.",
4442 "Waiting for previous operations to terminate.",
4443 "",
4444 ]
tiernoe876f672020-02-13 14:34:48 +00004445 # ^ contains [stage, step, VIM-status]
tierno59d22d22018-09-25 18:10:19 +02004446 try:
kuused124bfe2019-06-18 12:09:24 +02004447 # wait for any previous tasks in process
garciadeblas5697b8b2021-03-24 09:17:02 +01004448 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02004449
tiernoe876f672020-02-13 14:34:48 +00004450 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4451 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4452 operation_params = db_nslcmop.get("operationParams") or {}
4453 if operation_params.get("timeout_ns_terminate"):
4454 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4455 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4456 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4457
4458 db_nsr_update["operational-status"] = "terminating"
4459 db_nsr_update["config-status"] = "terminating"
quilesj4cda56b2019-12-05 10:02:20 +00004460 self._write_ns_status(
4461 nsr_id=nsr_id,
4462 ns_state="TERMINATING",
4463 current_operation="TERMINATING",
tiernoe876f672020-02-13 14:34:48 +00004464 current_operation_id=nslcmop_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01004465 other_update=db_nsr_update,
quilesj4cda56b2019-12-05 10:02:20 +00004466 )
garciadeblas5697b8b2021-03-24 09:17:02 +01004467 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
tiernoe876f672020-02-13 14:34:48 +00004468 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
tierno59d22d22018-09-25 18:10:19 +02004469 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4470 return
tierno59d22d22018-09-25 18:10:19 +02004471
tiernoe876f672020-02-13 14:34:48 +00004472 stage[1] = "Getting vnf descriptors from db."
4473 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
garciadeblas5697b8b2021-03-24 09:17:02 +01004474 db_vnfrs_dict = {
4475 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4476 }
tiernoe876f672020-02-13 14:34:48 +00004477 db_vnfds_from_id = {}
4478 db_vnfds_from_member_index = {}
4479 # Loop over VNFRs
4480 for vnfr in db_vnfrs_list:
4481 vnfd_id = vnfr["vnfd-id"]
4482 if vnfd_id not in db_vnfds_from_id:
4483 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4484 db_vnfds_from_id[vnfd_id] = vnfd
garciadeblas5697b8b2021-03-24 09:17:02 +01004485 db_vnfds_from_member_index[
4486 vnfr["member-vnf-index-ref"]
4487 ] = db_vnfds_from_id[vnfd_id]
calvinosanch9f9c6f22019-11-04 13:37:39 +01004488
tiernoe876f672020-02-13 14:34:48 +00004489 # Destroy individual execution environments when there are terminating primitives.
4490 # Rest of EE will be deleted at once
tierno588547c2020-07-01 15:30:20 +00004491 # TODO - check before calling _destroy_N2VC
4492 # if not operation_params.get("skip_terminate_primitives"):#
4493 # or not vca.get("needed_terminate"):
4494 stage[0] = "Stage 2/3 execute terminating primitives."
4495 self.logger.debug(logging_text + stage[0])
4496 stage[1] = "Looking execution environment that needs terminate."
4497 self.logger.debug(logging_text + stage[1])
bravof922c4172020-11-24 21:21:43 -03004498
tierno588547c2020-07-01 15:30:20 +00004499 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
tierno588547c2020-07-01 15:30:20 +00004500 config_descriptor = None
David Garcia9ad54a42021-05-28 12:08:18 +02004501 vca_member_vnf_index = vca.get("member-vnf-index")
4502 vca_id = self.get_vca_id(
4503 db_vnfrs_dict.get(vca_member_vnf_index)
4504 if vca_member_vnf_index
4505 else None,
4506 db_nsr,
4507 )
tierno588547c2020-07-01 15:30:20 +00004508 if not vca or not vca.get("ee_id"):
4509 continue
4510 if not vca.get("member-vnf-index"):
4511 # ns
4512 config_descriptor = db_nsr.get("ns-configuration")
4513 elif vca.get("vdu_id"):
4514 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
bravofe5a31bc2021-02-17 19:09:12 -03004515 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
tierno588547c2020-07-01 15:30:20 +00004516 elif vca.get("kdu_name"):
4517 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
bravofe5a31bc2021-02-17 19:09:12 -03004518 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
tierno588547c2020-07-01 15:30:20 +00004519 else:
bravofe5a31bc2021-02-17 19:09:12 -03004520 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
aktas13251562021-02-12 22:19:10 +03004521 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
tierno588547c2020-07-01 15:30:20 +00004522 vca_type = vca.get("type")
garciadeblas5697b8b2021-03-24 09:17:02 +01004523 exec_terminate_primitives = not operation_params.get(
4524 "skip_terminate_primitives"
4525 ) and vca.get("needed_terminate")
tiernoaebd7da2020-08-07 06:36:38 +00004526 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4527 # pending native charms
garciadeblas5697b8b2021-03-24 09:17:02 +01004528 destroy_ee = (
4529 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4530 )
tierno86e33612020-09-16 14:13:06 +00004531 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4532 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
tiernob996d942020-07-03 14:52:28 +00004533 task = asyncio.ensure_future(
David Garciac1fe90a2021-03-31 19:12:02 +02004534 self.destroy_N2VC(
4535 logging_text,
4536 db_nslcmop,
4537 vca,
4538 config_descriptor,
4539 vca_index,
4540 destroy_ee,
4541 exec_terminate_primitives,
4542 vca_id=vca_id,
4543 )
4544 )
tierno588547c2020-07-01 15:30:20 +00004545 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
tierno59d22d22018-09-25 18:10:19 +02004546
tierno588547c2020-07-01 15:30:20 +00004547 # wait for pending tasks of terminate primitives
4548 if tasks_dict_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01004549 self.logger.debug(
4550 logging_text
4551 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4552 )
4553 error_list = await self._wait_for_tasks(
4554 logging_text,
4555 tasks_dict_info,
4556 min(self.timeout_charm_delete, timeout_ns_terminate),
4557 stage,
4558 nslcmop_id,
4559 )
tierno86e33612020-09-16 14:13:06 +00004560 tasks_dict_info.clear()
tierno588547c2020-07-01 15:30:20 +00004561 if error_list:
garciadeblas5697b8b2021-03-24 09:17:02 +01004562 return # raise LcmException("; ".join(error_list))
tierno82974b22018-11-27 21:55:36 +00004563
tiernoe876f672020-02-13 14:34:48 +00004564 # remove All execution environments at once
4565 stage[0] = "Stage 3/3 delete all."
quilesj3655ae02019-12-12 16:08:35 +00004566
tierno49676be2020-04-07 16:34:35 +00004567 if nsr_deployed.get("VCA"):
4568 stage[1] = "Deleting all execution environments."
4569 self.logger.debug(logging_text + stage[1])
David Garciac1fe90a2021-03-31 19:12:02 +02004570 vca_id = self.get_vca_id({}, db_nsr)
4571 task_delete_ee = asyncio.ensure_future(
4572 asyncio.wait_for(
4573 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
garciadeblas5697b8b2021-03-24 09:17:02 +01004574 timeout=self.timeout_charm_delete,
David Garciac1fe90a2021-03-31 19:12:02 +02004575 )
4576 )
tierno49676be2020-04-07 16:34:35 +00004577 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4578 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
tierno59d22d22018-09-25 18:10:19 +02004579
tiernoe876f672020-02-13 14:34:48 +00004580 # Delete from k8scluster
4581 stage[1] = "Deleting KDUs."
4582 self.logger.debug(logging_text + stage[1])
4583 # print(nsr_deployed)
4584 for kdu in get_iterable(nsr_deployed, "K8s"):
4585 if not kdu or not kdu.get("kdu-instance"):
4586 continue
4587 kdu_instance = kdu.get("kdu-instance")
tiernoa2143262020-03-27 16:20:40 +00004588 if kdu.get("k8scluster-type") in self.k8scluster_map:
David Garciac1fe90a2021-03-31 19:12:02 +02004589 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4590 vca_id = self.get_vca_id({}, db_nsr)
tiernoe876f672020-02-13 14:34:48 +00004591 task_delete_kdu_instance = asyncio.ensure_future(
tiernoa2143262020-03-27 16:20:40 +00004592 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4593 cluster_uuid=kdu.get("k8scluster-uuid"),
David Garciac1fe90a2021-03-31 19:12:02 +02004594 kdu_instance=kdu_instance,
4595 vca_id=vca_id,
Pedro Escaleirae1ea2672022-04-22 00:46:14 +01004596 namespace=kdu.get("namespace"),
David Garciac1fe90a2021-03-31 19:12:02 +02004597 )
4598 )
tiernoe876f672020-02-13 14:34:48 +00004599 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004600 self.logger.error(
4601 logging_text
4602 + "Unknown k8s deployment type {}".format(
4603 kdu.get("k8scluster-type")
4604 )
4605 )
tiernoe876f672020-02-13 14:34:48 +00004606 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01004607 tasks_dict_info[
4608 task_delete_kdu_instance
4609 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
tierno59d22d22018-09-25 18:10:19 +02004610
4611 # remove from RO
tiernoe876f672020-02-13 14:34:48 +00004612 stage[1] = "Deleting ns from VIM."
tierno69f0d382020-05-07 13:08:09 +00004613 if self.ng_ro:
4614 task_delete_ro = asyncio.ensure_future(
garciadeblas5697b8b2021-03-24 09:17:02 +01004615 self._terminate_ng_ro(
4616 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4617 )
4618 )
tierno69f0d382020-05-07 13:08:09 +00004619 else:
4620 task_delete_ro = asyncio.ensure_future(
garciadeblas5697b8b2021-03-24 09:17:02 +01004621 self._terminate_RO(
4622 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4623 )
4624 )
tiernoe876f672020-02-13 14:34:48 +00004625 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
tierno59d22d22018-09-25 18:10:19 +02004626
tiernoe876f672020-02-13 14:34:48 +00004627 # rest of staff will be done at finally
4628
garciadeblas5697b8b2021-03-24 09:17:02 +01004629 except (
4630 ROclient.ROClientException,
4631 DbException,
4632 LcmException,
4633 N2VCException,
4634 ) as e:
tiernoe876f672020-02-13 14:34:48 +00004635 self.logger.error(logging_text + "Exit Exception {}".format(e))
4636 exc = e
4637 except asyncio.CancelledError:
garciadeblas5697b8b2021-03-24 09:17:02 +01004638 self.logger.error(
4639 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4640 )
tiernoe876f672020-02-13 14:34:48 +00004641 exc = "Operation was cancelled"
4642 except Exception as e:
4643 exc = traceback.format_exc()
garciadeblas5697b8b2021-03-24 09:17:02 +01004644 self.logger.critical(
4645 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4646 exc_info=True,
4647 )
tiernoe876f672020-02-13 14:34:48 +00004648 finally:
4649 if exc:
4650 error_list.append(str(exc))
tierno59d22d22018-09-25 18:10:19 +02004651 try:
tiernoe876f672020-02-13 14:34:48 +00004652 # wait for pending tasks
4653 if tasks_dict_info:
4654 stage[1] = "Waiting for terminate pending tasks."
4655 self.logger.debug(logging_text + stage[1])
garciadeblas5697b8b2021-03-24 09:17:02 +01004656 error_list += await self._wait_for_tasks(
4657 logging_text,
4658 tasks_dict_info,
4659 timeout_ns_terminate,
4660 stage,
4661 nslcmop_id,
4662 )
tiernoe876f672020-02-13 14:34:48 +00004663 stage[1] = stage[2] = ""
4664 except asyncio.CancelledError:
4665 error_list.append("Cancelled")
4666 # TODO cancell all tasks
4667 except Exception as exc:
4668 error_list.append(str(exc))
4669 # update status at database
4670 if error_list:
4671 error_detail = "; ".join(error_list)
4672 # self.logger.error(logging_text + error_detail)
garciadeblas5697b8b2021-03-24 09:17:02 +01004673 error_description_nslcmop = "{} Detail: {}".format(
4674 stage[0], error_detail
4675 )
4676 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4677 nslcmop_id, stage[0]
4678 )
tierno59d22d22018-09-25 18:10:19 +02004679
tierno59d22d22018-09-25 18:10:19 +02004680 db_nsr_update["operational-status"] = "failed"
garciadeblas5697b8b2021-03-24 09:17:02 +01004681 db_nsr_update["detailed-status"] = (
4682 error_description_nsr + " Detail: " + error_detail
4683 )
tiernoe876f672020-02-13 14:34:48 +00004684 db_nslcmop_update["detailed-status"] = error_detail
4685 nslcmop_operation_state = "FAILED"
4686 ns_state = "BROKEN"
tierno59d22d22018-09-25 18:10:19 +02004687 else:
tiernoa2143262020-03-27 16:20:40 +00004688 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00004689 error_description_nsr = error_description_nslcmop = None
4690 ns_state = "NOT_INSTANTIATED"
tierno59d22d22018-09-25 18:10:19 +02004691 db_nsr_update["operational-status"] = "terminated"
4692 db_nsr_update["detailed-status"] = "Done"
4693 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4694 db_nslcmop_update["detailed-status"] = "Done"
tiernoe876f672020-02-13 14:34:48 +00004695 nslcmop_operation_state = "COMPLETED"
tierno59d22d22018-09-25 18:10:19 +02004696
tiernoe876f672020-02-13 14:34:48 +00004697 if db_nsr:
4698 self._write_ns_status(
4699 nsr_id=nsr_id,
4700 ns_state=ns_state,
4701 current_operation="IDLE",
4702 current_operation_id=None,
4703 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00004704 error_detail=error_detail,
garciadeblas5697b8b2021-03-24 09:17:02 +01004705 other_update=db_nsr_update,
tiernoe876f672020-02-13 14:34:48 +00004706 )
tiernoa17d4f42020-04-28 09:59:23 +00004707 self._write_op_status(
4708 op_id=nslcmop_id,
4709 stage="",
4710 error_message=error_description_nslcmop,
4711 operation_state=nslcmop_operation_state,
4712 other_update=db_nslcmop_update,
4713 )
lloretgalleg6d488782020-07-22 10:13:46 +00004714 if ns_state == "NOT_INSTANTIATED":
4715 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01004716 self.db.set_list(
4717 "vnfrs",
4718 {"nsr-id-ref": nsr_id},
4719 {"_admin.nsState": "NOT_INSTANTIATED"},
4720 )
lloretgalleg6d488782020-07-22 10:13:46 +00004721 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01004722 self.logger.warn(
4723 logging_text
4724 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4725 nsr_id, e
4726 )
4727 )
tiernoa17d4f42020-04-28 09:59:23 +00004728 if operation_params:
tiernoe876f672020-02-13 14:34:48 +00004729 autoremove = operation_params.get("autoremove", False)
tierno59d22d22018-09-25 18:10:19 +02004730 if nslcmop_operation_state:
4731 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01004732 await self.msg.aiowrite(
4733 "ns",
4734 "terminated",
4735 {
4736 "nsr_id": nsr_id,
4737 "nslcmop_id": nslcmop_id,
4738 "operationState": nslcmop_operation_state,
4739 "autoremove": autoremove,
4740 },
4741 loop=self.loop,
4742 )
tierno59d22d22018-09-25 18:10:19 +02004743 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01004744 self.logger.error(
4745 logging_text + "kafka_write notification Exception {}".format(e)
4746 )
quilesj7e13aeb2019-10-08 13:34:55 +02004747
tierno59d22d22018-09-25 18:10:19 +02004748 self.logger.debug(logging_text + "Exit")
4749 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4750
garciadeblas5697b8b2021-03-24 09:17:02 +01004751 async def _wait_for_tasks(
4752 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4753 ):
tiernoe876f672020-02-13 14:34:48 +00004754 time_start = time()
tiernoa2143262020-03-27 16:20:40 +00004755 error_detail_list = []
tiernoe876f672020-02-13 14:34:48 +00004756 error_list = []
4757 pending_tasks = list(created_tasks_info.keys())
4758 num_tasks = len(pending_tasks)
4759 num_done = 0
4760 stage[1] = "{}/{}.".format(num_done, num_tasks)
4761 self._write_op_status(nslcmop_id, stage)
tiernoe876f672020-02-13 14:34:48 +00004762 while pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00004763 new_error = None
tiernoe876f672020-02-13 14:34:48 +00004764 _timeout = timeout + time_start - time()
garciadeblas5697b8b2021-03-24 09:17:02 +01004765 done, pending_tasks = await asyncio.wait(
4766 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4767 )
tiernoe876f672020-02-13 14:34:48 +00004768 num_done += len(done)
garciadeblas5697b8b2021-03-24 09:17:02 +01004769 if not done: # Timeout
tiernoe876f672020-02-13 14:34:48 +00004770 for task in pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00004771 new_error = created_tasks_info[task] + ": Timeout"
4772 error_detail_list.append(new_error)
4773 error_list.append(new_error)
tiernoe876f672020-02-13 14:34:48 +00004774 break
4775 for task in done:
4776 if task.cancelled():
tierno067e04a2020-03-31 12:53:13 +00004777 exc = "Cancelled"
tiernoe876f672020-02-13 14:34:48 +00004778 else:
4779 exc = task.exception()
tierno067e04a2020-03-31 12:53:13 +00004780 if exc:
4781 if isinstance(exc, asyncio.TimeoutError):
4782 exc = "Timeout"
4783 new_error = created_tasks_info[task] + ": {}".format(exc)
4784 error_list.append(created_tasks_info[task])
4785 error_detail_list.append(new_error)
garciadeblas5697b8b2021-03-24 09:17:02 +01004786 if isinstance(
4787 exc,
4788 (
4789 str,
4790 DbException,
4791 N2VCException,
4792 ROclient.ROClientException,
4793 LcmException,
4794 K8sException,
4795 NgRoException,
4796 ),
4797 ):
tierno067e04a2020-03-31 12:53:13 +00004798 self.logger.error(logging_text + new_error)
tiernoe876f672020-02-13 14:34:48 +00004799 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004800 exc_traceback = "".join(
4801 traceback.format_exception(None, exc, exc.__traceback__)
4802 )
4803 self.logger.error(
4804 logging_text
4805 + created_tasks_info[task]
4806 + " "
4807 + exc_traceback
4808 )
tierno067e04a2020-03-31 12:53:13 +00004809 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004810 self.logger.debug(
4811 logging_text + created_tasks_info[task] + ": Done"
4812 )
tiernoe876f672020-02-13 14:34:48 +00004813 stage[1] = "{}/{}.".format(num_done, num_tasks)
4814 if new_error:
tiernoa2143262020-03-27 16:20:40 +00004815 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
tiernoe876f672020-02-13 14:34:48 +00004816 if nsr_id: # update also nsr
garciadeblas5697b8b2021-03-24 09:17:02 +01004817 self.update_db_2(
4818 "nsrs",
4819 nsr_id,
4820 {
4821 "errorDescription": "Error at: " + ", ".join(error_list),
4822 "errorDetail": ". ".join(error_detail_list),
4823 },
4824 )
tiernoe876f672020-02-13 14:34:48 +00004825 self._write_op_status(nslcmop_id, stage)
tiernoa2143262020-03-27 16:20:40 +00004826 return error_detail_list
tiernoe876f672020-02-13 14:34:48 +00004827
tiernoda1ff8c2020-10-22 14:12:46 +00004828 @staticmethod
4829 def _map_primitive_params(primitive_desc, params, instantiation_params):
tiernoda964822019-01-14 15:53:47 +00004830 """
4831 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4832 The default-value is used. If it is between < > it look for a value at instantiation_params
4833 :param primitive_desc: portion of VNFD/NSD that describes primitive
4834 :param params: Params provided by user
4835 :param instantiation_params: Instantiation params provided by user
4836 :return: a dictionary with the calculated params
4837 """
4838 calculated_params = {}
4839 for parameter in primitive_desc.get("parameter", ()):
4840 param_name = parameter["name"]
4841 if param_name in params:
4842 calculated_params[param_name] = params[param_name]
tierno98ad6ea2019-05-30 17:16:28 +00004843 elif "default-value" in parameter or "value" in parameter:
4844 if "value" in parameter:
4845 calculated_params[param_name] = parameter["value"]
4846 else:
4847 calculated_params[param_name] = parameter["default-value"]
garciadeblas5697b8b2021-03-24 09:17:02 +01004848 if (
4849 isinstance(calculated_params[param_name], str)
4850 and calculated_params[param_name].startswith("<")
4851 and calculated_params[param_name].endswith(">")
4852 ):
tierno98ad6ea2019-05-30 17:16:28 +00004853 if calculated_params[param_name][1:-1] in instantiation_params:
garciadeblas5697b8b2021-03-24 09:17:02 +01004854 calculated_params[param_name] = instantiation_params[
4855 calculated_params[param_name][1:-1]
4856 ]
tiernoda964822019-01-14 15:53:47 +00004857 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004858 raise LcmException(
4859 "Parameter {} needed to execute primitive {} not provided".format(
4860 calculated_params[param_name], primitive_desc["name"]
4861 )
4862 )
tiernoda964822019-01-14 15:53:47 +00004863 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004864 raise LcmException(
4865 "Parameter {} needed to execute primitive {} not provided".format(
4866 param_name, primitive_desc["name"]
4867 )
4868 )
tierno59d22d22018-09-25 18:10:19 +02004869
tiernoda964822019-01-14 15:53:47 +00004870 if isinstance(calculated_params[param_name], (dict, list, tuple)):
garciadeblas5697b8b2021-03-24 09:17:02 +01004871 calculated_params[param_name] = yaml.safe_dump(
4872 calculated_params[param_name], default_flow_style=True, width=256
4873 )
4874 elif isinstance(calculated_params[param_name], str) and calculated_params[
4875 param_name
4876 ].startswith("!!yaml "):
tiernoda964822019-01-14 15:53:47 +00004877 calculated_params[param_name] = calculated_params[param_name][7:]
tiernofa40e692020-10-14 14:59:36 +00004878 if parameter.get("data-type") == "INTEGER":
4879 try:
4880 calculated_params[param_name] = int(calculated_params[param_name])
4881 except ValueError: # error converting string to int
4882 raise LcmException(
garciadeblas5697b8b2021-03-24 09:17:02 +01004883 "Parameter {} of primitive {} must be integer".format(
4884 param_name, primitive_desc["name"]
4885 )
4886 )
tiernofa40e692020-10-14 14:59:36 +00004887 elif parameter.get("data-type") == "BOOLEAN":
garciadeblas5697b8b2021-03-24 09:17:02 +01004888 calculated_params[param_name] = not (
4889 (str(calculated_params[param_name])).lower() == "false"
4890 )
tiernoc3f2a822019-11-05 13:45:04 +00004891
4892 # add always ns_config_info if primitive name is config
4893 if primitive_desc["name"] == "config":
4894 if "ns_config_info" in instantiation_params:
garciadeblas5697b8b2021-03-24 09:17:02 +01004895 calculated_params["ns_config_info"] = instantiation_params[
4896 "ns_config_info"
4897 ]
tiernoda964822019-01-14 15:53:47 +00004898 return calculated_params
4899
garciadeblas5697b8b2021-03-24 09:17:02 +01004900 def _look_for_deployed_vca(
4901 self,
4902 deployed_vca,
4903 member_vnf_index,
4904 vdu_id,
4905 vdu_count_index,
4906 kdu_name=None,
4907 ee_descriptor_id=None,
4908 ):
tiernoe876f672020-02-13 14:34:48 +00004909 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4910 for vca in deployed_vca:
4911 if not vca:
4912 continue
4913 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4914 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01004915 if (
4916 vdu_count_index is not None
4917 and vdu_count_index != vca["vdu_count_index"]
4918 ):
tiernoe876f672020-02-13 14:34:48 +00004919 continue
4920 if kdu_name and kdu_name != vca["kdu_name"]:
4921 continue
tiernoa278b842020-07-08 15:33:55 +00004922 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4923 continue
tiernoe876f672020-02-13 14:34:48 +00004924 break
4925 else:
4926 # vca_deployed not found
garciadeblas5697b8b2021-03-24 09:17:02 +01004927 raise LcmException(
4928 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4929 " is not deployed".format(
4930 member_vnf_index,
4931 vdu_id,
4932 vdu_count_index,
4933 kdu_name,
4934 ee_descriptor_id,
4935 )
4936 )
tiernoe876f672020-02-13 14:34:48 +00004937 # get ee_id
4938 ee_id = vca.get("ee_id")
garciadeblas5697b8b2021-03-24 09:17:02 +01004939 vca_type = vca.get(
4940 "type", "lxc_proxy_charm"
4941 ) # default value for backward compatibility - proxy charm
tiernoe876f672020-02-13 14:34:48 +00004942 if not ee_id:
garciadeblas5697b8b2021-03-24 09:17:02 +01004943 raise LcmException(
4944 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4945 "execution environment".format(
4946 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4947 )
4948 )
tierno588547c2020-07-01 15:30:20 +00004949 return ee_id, vca_type
tiernoe876f672020-02-13 14:34:48 +00004950
David Garciac1fe90a2021-03-31 19:12:02 +02004951 async def _ns_execute_primitive(
4952 self,
4953 ee_id,
4954 primitive,
4955 primitive_params,
4956 retries=0,
4957 retries_interval=30,
4958 timeout=None,
4959 vca_type=None,
4960 db_dict=None,
4961 vca_id: str = None,
4962 ) -> (str, str):
tiernoda964822019-01-14 15:53:47 +00004963 try:
tierno98ad6ea2019-05-30 17:16:28 +00004964 if primitive == "config":
4965 primitive_params = {"params": primitive_params}
tierno2fc7ce52019-06-11 22:50:01 +00004966
tierno588547c2020-07-01 15:30:20 +00004967 vca_type = vca_type or "lxc_proxy_charm"
4968
quilesj7e13aeb2019-10-08 13:34:55 +02004969 while retries >= 0:
4970 try:
tierno067e04a2020-03-31 12:53:13 +00004971 output = await asyncio.wait_for(
tierno588547c2020-07-01 15:30:20 +00004972 self.vca_map[vca_type].exec_primitive(
tierno067e04a2020-03-31 12:53:13 +00004973 ee_id=ee_id,
4974 primitive_name=primitive,
4975 params_dict=primitive_params,
4976 progress_timeout=self.timeout_progress_primitive,
tierno588547c2020-07-01 15:30:20 +00004977 total_timeout=self.timeout_primitive,
David Garciac1fe90a2021-03-31 19:12:02 +02004978 db_dict=db_dict,
4979 vca_id=vca_id,
aktas98488ed2021-07-29 17:42:49 +03004980 vca_type=vca_type,
David Garciac1fe90a2021-03-31 19:12:02 +02004981 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01004982 timeout=timeout or self.timeout_primitive,
4983 )
quilesj7e13aeb2019-10-08 13:34:55 +02004984 # execution was OK
4985 break
tierno067e04a2020-03-31 12:53:13 +00004986 except asyncio.CancelledError:
4987 raise
4988 except Exception as e: # asyncio.TimeoutError
4989 if isinstance(e, asyncio.TimeoutError):
4990 e = "Timeout"
quilesj7e13aeb2019-10-08 13:34:55 +02004991 retries -= 1
4992 if retries >= 0:
garciadeblas5697b8b2021-03-24 09:17:02 +01004993 self.logger.debug(
4994 "Error executing action {} on {} -> {}".format(
4995 primitive, ee_id, e
4996 )
4997 )
quilesj7e13aeb2019-10-08 13:34:55 +02004998 # wait and retry
4999 await asyncio.sleep(retries_interval, loop=self.loop)
tierno73d8bd02019-11-18 17:33:27 +00005000 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005001 return "FAILED", str(e)
quilesj7e13aeb2019-10-08 13:34:55 +02005002
garciadeblas5697b8b2021-03-24 09:17:02 +01005003 return "COMPLETED", output
quilesj7e13aeb2019-10-08 13:34:55 +02005004
tierno067e04a2020-03-31 12:53:13 +00005005 except (LcmException, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00005006 raise
quilesj7e13aeb2019-10-08 13:34:55 +02005007 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01005008 return "FAIL", "Error executing action {}: {}".format(primitive, e)
tierno59d22d22018-09-25 18:10:19 +02005009
ksaikiranr3fde2c72021-03-15 10:39:06 +05305010 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5011 """
5012 Updating the vca_status with latest juju information in nsrs record
5013 :param: nsr_id: Id of the nsr
5014 :param: nslcmop_id: Id of the nslcmop
5015 :return: None
5016 """
5017
5018 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5019 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
David Garciac1fe90a2021-03-31 19:12:02 +02005020 vca_id = self.get_vca_id({}, db_nsr)
garciadeblas5697b8b2021-03-24 09:17:02 +01005021 if db_nsr["_admin"]["deployed"]["K8s"]:
Pedro Escaleira75b620d2022-04-01 01:49:22 +01005022 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5023 cluster_uuid, kdu_instance, cluster_type = (
5024 k8s["k8scluster-uuid"],
5025 k8s["kdu-instance"],
5026 k8s["k8scluster-type"],
5027 )
garciadeblas5697b8b2021-03-24 09:17:02 +01005028 await self._on_update_k8s_db(
Pedro Escaleira75b620d2022-04-01 01:49:22 +01005029 cluster_uuid=cluster_uuid,
5030 kdu_instance=kdu_instance,
5031 filter={"_id": nsr_id},
5032 vca_id=vca_id,
5033 cluster_type=cluster_type,
garciadeblas5697b8b2021-03-24 09:17:02 +01005034 )
ksaikiranr656b6dd2021-02-19 10:25:18 +05305035 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005036 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
ksaikiranr656b6dd2021-02-19 10:25:18 +05305037 table, filter = "nsrs", {"_id": nsr_id}
5038 path = "_admin.deployed.VCA.{}.".format(vca_index)
5039 await self._on_update_n2vc_db(table, filter, path, {})
ksaikiranr3fde2c72021-03-15 10:39:06 +05305040
5041 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5042 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5043
tierno59d22d22018-09-25 18:10:19 +02005044 async def action(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02005045 # Try to lock HA task here
garciadeblas5697b8b2021-03-24 09:17:02 +01005046 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02005047 if not task_is_locked_by_me:
5048 return
5049
tierno59d22d22018-09-25 18:10:19 +02005050 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5051 self.logger.debug(logging_text + "Enter")
5052 # get all needed from database
5053 db_nsr = None
5054 db_nslcmop = None
tiernoe876f672020-02-13 14:34:48 +00005055 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02005056 db_nslcmop_update = {}
5057 nslcmop_operation_state = None
tierno067e04a2020-03-31 12:53:13 +00005058 error_description_nslcmop = None
tierno59d22d22018-09-25 18:10:19 +02005059 exc = None
5060 try:
kuused124bfe2019-06-18 12:09:24 +02005061 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00005062 step = "Waiting for previous operations to terminate"
garciadeblas5697b8b2021-03-24 09:17:02 +01005063 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02005064
quilesj4cda56b2019-12-05 10:02:20 +00005065 self._write_ns_status(
5066 nsr_id=nsr_id,
5067 ns_state=None,
5068 current_operation="RUNNING ACTION",
garciadeblas5697b8b2021-03-24 09:17:02 +01005069 current_operation_id=nslcmop_id,
quilesj4cda56b2019-12-05 10:02:20 +00005070 )
5071
tierno59d22d22018-09-25 18:10:19 +02005072 step = "Getting information from database"
5073 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5074 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
Guillermo Calvino57c68152022-01-26 17:40:31 +01005075 if db_nslcmop["operationParams"].get("primitive_params"):
5076 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5077 db_nslcmop["operationParams"]["primitive_params"]
5078 )
tiernoda964822019-01-14 15:53:47 +00005079
tiernoe4f7e6c2018-11-27 14:55:30 +00005080 nsr_deployed = db_nsr["_admin"].get("deployed")
tierno1b633412019-02-25 16:48:23 +00005081 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tierno59d22d22018-09-25 18:10:19 +02005082 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
calvinosanch9f9c6f22019-11-04 13:37:39 +01005083 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
tiernoe4f7e6c2018-11-27 14:55:30 +00005084 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
tierno067e04a2020-03-31 12:53:13 +00005085 primitive = db_nslcmop["operationParams"]["primitive"]
5086 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
garciadeblas5697b8b2021-03-24 09:17:02 +01005087 timeout_ns_action = db_nslcmop["operationParams"].get(
5088 "timeout_ns_action", self.timeout_primitive
5089 )
tierno59d22d22018-09-25 18:10:19 +02005090
tierno1b633412019-02-25 16:48:23 +00005091 if vnf_index:
5092 step = "Getting vnfr from database"
garciadeblas5697b8b2021-03-24 09:17:02 +01005093 db_vnfr = self.db.get_one(
5094 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5095 )
Guillermo Calvino48aee4c2022-02-01 18:59:50 +01005096 if db_vnfr.get("kdur"):
5097 kdur_list = []
5098 for kdur in db_vnfr["kdur"]:
5099 if kdur.get("additionalParams"):
Pedro Escaleirab9a7c4d2022-03-31 00:08:05 +01005100 kdur["additionalParams"] = json.loads(
5101 kdur["additionalParams"]
5102 )
Guillermo Calvino48aee4c2022-02-01 18:59:50 +01005103 kdur_list.append(kdur)
5104 db_vnfr["kdur"] = kdur_list
tierno1b633412019-02-25 16:48:23 +00005105 step = "Getting vnfd from database"
5106 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
bravofa96dd9c2021-10-13 17:37:36 -03005107
5108 # Sync filesystem before running a primitive
5109 self.fs.sync(db_vnfr["vnfd-id"])
tierno1b633412019-02-25 16:48:23 +00005110 else:
tierno067e04a2020-03-31 12:53:13 +00005111 step = "Getting nsd from database"
5112 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
tiernoda964822019-01-14 15:53:47 +00005113
David Garciac1fe90a2021-03-31 19:12:02 +02005114 vca_id = self.get_vca_id(db_vnfr, db_nsr)
tierno82974b22018-11-27 21:55:36 +00005115 # for backward compatibility
5116 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5117 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5118 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5119 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5120
tiernoda964822019-01-14 15:53:47 +00005121 # look for primitive
tiernoa278b842020-07-08 15:33:55 +00005122 config_primitive_desc = descriptor_configuration = None
tiernoda964822019-01-14 15:53:47 +00005123 if vdu_id:
bravofe5a31bc2021-02-17 19:09:12 -03005124 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
calvinosanch9f9c6f22019-11-04 13:37:39 +01005125 elif kdu_name:
bravofe5a31bc2021-02-17 19:09:12 -03005126 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
tierno1b633412019-02-25 16:48:23 +00005127 elif vnf_index:
bravofe5a31bc2021-02-17 19:09:12 -03005128 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
tierno1b633412019-02-25 16:48:23 +00005129 else:
tiernoa278b842020-07-08 15:33:55 +00005130 descriptor_configuration = db_nsd.get("ns-configuration")
5131
garciadeblas5697b8b2021-03-24 09:17:02 +01005132 if descriptor_configuration and descriptor_configuration.get(
5133 "config-primitive"
5134 ):
tiernoa278b842020-07-08 15:33:55 +00005135 for config_primitive in descriptor_configuration["config-primitive"]:
tierno1b633412019-02-25 16:48:23 +00005136 if config_primitive["name"] == primitive:
5137 config_primitive_desc = config_primitive
5138 break
tiernoda964822019-01-14 15:53:47 +00005139
garciadeblas6bed6b32020-07-20 11:05:42 +00005140 if not config_primitive_desc:
5141 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
garciadeblas5697b8b2021-03-24 09:17:02 +01005142 raise LcmException(
5143 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5144 primitive
5145 )
5146 )
garciadeblas6bed6b32020-07-20 11:05:42 +00005147 primitive_name = primitive
5148 ee_descriptor_id = None
5149 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005150 primitive_name = config_primitive_desc.get(
5151 "execution-environment-primitive", primitive
5152 )
5153 ee_descriptor_id = config_primitive_desc.get(
5154 "execution-environment-ref"
5155 )
tierno1b633412019-02-25 16:48:23 +00005156
tierno1b633412019-02-25 16:48:23 +00005157 if vnf_index:
tierno626e0152019-11-29 14:16:16 +00005158 if vdu_id:
garciadeblas5697b8b2021-03-24 09:17:02 +01005159 vdur = next(
5160 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5161 )
bravof922c4172020-11-24 21:21:43 -03005162 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
tierno067e04a2020-03-31 12:53:13 +00005163 elif kdu_name:
garciadeblas5697b8b2021-03-24 09:17:02 +01005164 kdur = next(
5165 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5166 )
bravof922c4172020-11-24 21:21:43 -03005167 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
tierno067e04a2020-03-31 12:53:13 +00005168 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005169 desc_params = parse_yaml_strings(
5170 db_vnfr.get("additionalParamsForVnf")
5171 )
tierno1b633412019-02-25 16:48:23 +00005172 else:
bravof922c4172020-11-24 21:21:43 -03005173 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
bravofe5a31bc2021-02-17 19:09:12 -03005174 if kdu_name and get_configuration(db_vnfd, kdu_name):
5175 kdu_configuration = get_configuration(db_vnfd, kdu_name)
David Garciad41dbd62020-12-10 12:52:52 +01005176 actions = set()
David Garciaa1003662021-02-16 21:07:58 +01005177 for primitive in kdu_configuration.get("initial-config-primitive", []):
David Garciad41dbd62020-12-10 12:52:52 +01005178 actions.add(primitive["name"])
David Garciaa1003662021-02-16 21:07:58 +01005179 for primitive in kdu_configuration.get("config-primitive", []):
David Garciad41dbd62020-12-10 12:52:52 +01005180 actions.add(primitive["name"])
David Garciaae230232022-05-10 14:07:12 +02005181 kdu = find_in_list(
5182 nsr_deployed["K8s"],
5183 lambda kdu: kdu_name == kdu["kdu-name"]
5184 and kdu["member-vnf-index"] == vnf_index,
5185 )
5186 kdu_action = (
5187 True
5188 if primitive_name in actions
5189 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5190 else False
5191 )
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02005192
tiernoda964822019-01-14 15:53:47 +00005193 # TODO check if ns is in a proper status
garciadeblas5697b8b2021-03-24 09:17:02 +01005194 if kdu_name and (
5195 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5196 ):
tierno067e04a2020-03-31 12:53:13 +00005197 # kdur and desc_params already set from before
5198 if primitive_params:
5199 desc_params.update(primitive_params)
5200 # TODO Check if we will need something at vnf level
5201 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
garciadeblas5697b8b2021-03-24 09:17:02 +01005202 if (
5203 kdu_name == kdu["kdu-name"]
5204 and kdu["member-vnf-index"] == vnf_index
5205 ):
tierno067e04a2020-03-31 12:53:13 +00005206 break
5207 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005208 raise LcmException(
5209 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5210 )
quilesj7e13aeb2019-10-08 13:34:55 +02005211
tierno067e04a2020-03-31 12:53:13 +00005212 if kdu.get("k8scluster-type") not in self.k8scluster_map:
garciadeblas5697b8b2021-03-24 09:17:02 +01005213 msg = "unknown k8scluster-type '{}'".format(
5214 kdu.get("k8scluster-type")
5215 )
tierno067e04a2020-03-31 12:53:13 +00005216 raise LcmException(msg)
5217
garciadeblas5697b8b2021-03-24 09:17:02 +01005218 db_dict = {
5219 "collection": "nsrs",
5220 "filter": {"_id": nsr_id},
5221 "path": "_admin.deployed.K8s.{}".format(index),
5222 }
5223 self.logger.debug(
5224 logging_text
5225 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5226 )
tiernoa278b842020-07-08 15:33:55 +00005227 step = "Executing kdu {}".format(primitive_name)
5228 if primitive_name == "upgrade":
tierno067e04a2020-03-31 12:53:13 +00005229 if desc_params.get("kdu_model"):
5230 kdu_model = desc_params.get("kdu_model")
5231 del desc_params["kdu_model"]
5232 else:
5233 kdu_model = kdu.get("kdu-model")
5234 parts = kdu_model.split(sep=":")
5235 if len(parts) == 2:
5236 kdu_model = parts[0]
5237
5238 detailed_status = await asyncio.wait_for(
5239 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5240 cluster_uuid=kdu.get("k8scluster-uuid"),
5241 kdu_instance=kdu.get("kdu-instance"),
garciadeblas5697b8b2021-03-24 09:17:02 +01005242 atomic=True,
5243 kdu_model=kdu_model,
5244 params=desc_params,
5245 db_dict=db_dict,
5246 timeout=timeout_ns_action,
5247 ),
5248 timeout=timeout_ns_action + 10,
5249 )
5250 self.logger.debug(
5251 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5252 )
tiernoa278b842020-07-08 15:33:55 +00005253 elif primitive_name == "rollback":
tierno067e04a2020-03-31 12:53:13 +00005254 detailed_status = await asyncio.wait_for(
5255 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5256 cluster_uuid=kdu.get("k8scluster-uuid"),
5257 kdu_instance=kdu.get("kdu-instance"),
garciadeblas5697b8b2021-03-24 09:17:02 +01005258 db_dict=db_dict,
5259 ),
5260 timeout=timeout_ns_action,
5261 )
tiernoa278b842020-07-08 15:33:55 +00005262 elif primitive_name == "status":
tierno067e04a2020-03-31 12:53:13 +00005263 detailed_status = await asyncio.wait_for(
5264 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5265 cluster_uuid=kdu.get("k8scluster-uuid"),
David Garciac1fe90a2021-03-31 19:12:02 +02005266 kdu_instance=kdu.get("kdu-instance"),
5267 vca_id=vca_id,
5268 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01005269 timeout=timeout_ns_action,
David Garciac1fe90a2021-03-31 19:12:02 +02005270 )
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02005271 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005272 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5273 kdu["kdu-name"], nsr_id
5274 )
5275 params = self._map_primitive_params(
5276 config_primitive_desc, primitive_params, desc_params
5277 )
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02005278
5279 detailed_status = await asyncio.wait_for(
5280 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5281 cluster_uuid=kdu.get("k8scluster-uuid"),
5282 kdu_instance=kdu_instance,
tiernoa278b842020-07-08 15:33:55 +00005283 primitive_name=primitive_name,
garciadeblas5697b8b2021-03-24 09:17:02 +01005284 params=params,
5285 db_dict=db_dict,
David Garciac1fe90a2021-03-31 19:12:02 +02005286 timeout=timeout_ns_action,
5287 vca_id=vca_id,
5288 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01005289 timeout=timeout_ns_action,
David Garciac1fe90a2021-03-31 19:12:02 +02005290 )
tierno067e04a2020-03-31 12:53:13 +00005291
5292 if detailed_status:
garciadeblas5697b8b2021-03-24 09:17:02 +01005293 nslcmop_operation_state = "COMPLETED"
tierno067e04a2020-03-31 12:53:13 +00005294 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005295 detailed_status = ""
5296 nslcmop_operation_state = "FAILED"
tierno067e04a2020-03-31 12:53:13 +00005297 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005298 ee_id, vca_type = self._look_for_deployed_vca(
5299 nsr_deployed["VCA"],
5300 member_vnf_index=vnf_index,
5301 vdu_id=vdu_id,
5302 vdu_count_index=vdu_count_index,
5303 ee_descriptor_id=ee_descriptor_id,
5304 )
5305 for vca_index, vca_deployed in enumerate(
5306 db_nsr["_admin"]["deployed"]["VCA"]
5307 ):
ksaikiranrb1c9f372021-03-15 11:07:29 +05305308 if vca_deployed.get("member-vnf-index") == vnf_index:
garciadeblas5697b8b2021-03-24 09:17:02 +01005309 db_dict = {
5310 "collection": "nsrs",
5311 "filter": {"_id": nsr_id},
5312 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5313 }
ksaikiranrb1c9f372021-03-15 11:07:29 +05305314 break
garciadeblas5697b8b2021-03-24 09:17:02 +01005315 (
5316 nslcmop_operation_state,
5317 detailed_status,
5318 ) = await self._ns_execute_primitive(
tierno588547c2020-07-01 15:30:20 +00005319 ee_id,
tiernoa278b842020-07-08 15:33:55 +00005320 primitive=primitive_name,
garciadeblas5697b8b2021-03-24 09:17:02 +01005321 primitive_params=self._map_primitive_params(
5322 config_primitive_desc, primitive_params, desc_params
5323 ),
tierno588547c2020-07-01 15:30:20 +00005324 timeout=timeout_ns_action,
5325 vca_type=vca_type,
David Garciac1fe90a2021-03-31 19:12:02 +02005326 db_dict=db_dict,
5327 vca_id=vca_id,
5328 )
tierno067e04a2020-03-31 12:53:13 +00005329
5330 db_nslcmop_update["detailed-status"] = detailed_status
garciadeblas5697b8b2021-03-24 09:17:02 +01005331 error_description_nslcmop = (
5332 detailed_status if nslcmop_operation_state == "FAILED" else ""
5333 )
5334 self.logger.debug(
5335 logging_text
5336 + " task Done with result {} {}".format(
5337 nslcmop_operation_state, detailed_status
5338 )
5339 )
tierno59d22d22018-09-25 18:10:19 +02005340 return # database update is called inside finally
5341
tiernof59ad6c2020-04-08 12:50:52 +00005342 except (DbException, LcmException, N2VCException, K8sException) as e:
tierno59d22d22018-09-25 18:10:19 +02005343 self.logger.error(logging_text + "Exit Exception {}".format(e))
5344 exc = e
5345 except asyncio.CancelledError:
garciadeblas5697b8b2021-03-24 09:17:02 +01005346 self.logger.error(
5347 logging_text + "Cancelled Exception while '{}'".format(step)
5348 )
tierno59d22d22018-09-25 18:10:19 +02005349 exc = "Operation was cancelled"
tierno067e04a2020-03-31 12:53:13 +00005350 except asyncio.TimeoutError:
5351 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5352 exc = "Timeout"
tierno59d22d22018-09-25 18:10:19 +02005353 except Exception as e:
5354 exc = traceback.format_exc()
garciadeblas5697b8b2021-03-24 09:17:02 +01005355 self.logger.critical(
5356 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5357 exc_info=True,
5358 )
tierno59d22d22018-09-25 18:10:19 +02005359 finally:
tierno067e04a2020-03-31 12:53:13 +00005360 if exc:
garciadeblas5697b8b2021-03-24 09:17:02 +01005361 db_nslcmop_update[
5362 "detailed-status"
5363 ] = (
5364 detailed_status
5365 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
tierno067e04a2020-03-31 12:53:13 +00005366 nslcmop_operation_state = "FAILED"
5367 if db_nsr:
5368 self._write_ns_status(
5369 nsr_id=nsr_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01005370 ns_state=db_nsr[
5371 "nsState"
5372 ], # TODO check if degraded. For the moment use previous status
tierno067e04a2020-03-31 12:53:13 +00005373 current_operation="IDLE",
5374 current_operation_id=None,
5375 # error_description=error_description_nsr,
5376 # error_detail=error_detail,
garciadeblas5697b8b2021-03-24 09:17:02 +01005377 other_update=db_nsr_update,
tierno067e04a2020-03-31 12:53:13 +00005378 )
5379
garciadeblas5697b8b2021-03-24 09:17:02 +01005380 self._write_op_status(
5381 op_id=nslcmop_id,
5382 stage="",
5383 error_message=error_description_nslcmop,
5384 operation_state=nslcmop_operation_state,
5385 other_update=db_nslcmop_update,
5386 )
tierno067e04a2020-03-31 12:53:13 +00005387
tierno59d22d22018-09-25 18:10:19 +02005388 if nslcmop_operation_state:
5389 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01005390 await self.msg.aiowrite(
5391 "ns",
5392 "actioned",
5393 {
5394 "nsr_id": nsr_id,
5395 "nslcmop_id": nslcmop_id,
5396 "operationState": nslcmop_operation_state,
5397 },
5398 loop=self.loop,
5399 )
tierno59d22d22018-09-25 18:10:19 +02005400 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01005401 self.logger.error(
5402 logging_text + "kafka_write notification Exception {}".format(e)
5403 )
tierno59d22d22018-09-25 18:10:19 +02005404 self.logger.debug(logging_text + "Exit")
5405 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
tierno067e04a2020-03-31 12:53:13 +00005406 return nslcmop_operation_state, detailed_status
tierno59d22d22018-09-25 18:10:19 +02005407
elumalaica7ece02022-04-12 12:47:32 +05305408 async def terminate_vdus(
5409 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5410 ):
5411 """This method terminates VDUs
5412
5413 Args:
5414 db_vnfr: VNF instance record
5415 member_vnf_index: VNF index to identify the VDUs to be removed
5416 db_nsr: NS instance record
5417 update_db_nslcmops: Nslcmop update record
5418 """
5419 vca_scaling_info = []
5420 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5421 scaling_info["scaling_direction"] = "IN"
5422 scaling_info["vdu-delete"] = {}
5423 scaling_info["kdu-delete"] = {}
5424 db_vdur = db_vnfr.get("vdur")
5425 vdur_list = copy(db_vdur)
5426 count_index = 0
5427 for index, vdu in enumerate(vdur_list):
5428 vca_scaling_info.append(
5429 {
5430 "osm_vdu_id": vdu["vdu-id-ref"],
5431 "member-vnf-index": member_vnf_index,
5432 "type": "delete",
5433 "vdu_index": count_index,
5434 })
5435 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5436 scaling_info["vdu"].append(
5437 {
5438 "name": vdu.get("name") or vdu.get("vdu-name"),
5439 "vdu_id": vdu["vdu-id-ref"],
5440 "interface": [],
5441 })
5442 for interface in vdu["interfaces"]:
5443 scaling_info["vdu"][index]["interface"].append(
5444 {
5445 "name": interface["name"],
5446 "ip_address": interface["ip-address"],
5447 "mac_address": interface.get("mac-address"),
5448 })
5449 self.logger.info("NS update scaling info{}".format(scaling_info))
5450 stage[2] = "Terminating VDUs"
5451 if scaling_info.get("vdu-delete"):
5452 # scale_process = "RO"
5453 if self.ro_config.get("ng"):
5454 await self._scale_ng_ro(
5455 logging_text, db_nsr, update_db_nslcmops, db_vnfr, scaling_info, stage
5456 )
5457
5458 async def remove_vnf(
5459 self, nsr_id, nslcmop_id, vnf_instance_id
5460 ):
5461 """This method is to Remove VNF instances from NS.
5462
5463 Args:
5464 nsr_id: NS instance id
5465 nslcmop_id: nslcmop id of update
5466 vnf_instance_id: id of the VNF instance to be removed
5467
5468 Returns:
5469 result: (str, str) COMPLETED/FAILED, details
5470 """
5471 try:
5472 db_nsr_update = {}
5473 logging_text = "Task ns={} update ".format(nsr_id)
5474 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5475 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5476 if check_vnfr_count > 1:
5477 stage = ["", "", ""]
5478 step = "Getting nslcmop from database"
5479 self.logger.debug(step + " after having waited for previous tasks to be completed")
5480 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5481 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5482 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5483 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5484 """ db_vnfr = self.db.get_one(
5485 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5486
5487 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5488 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5489
5490 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5491 constituent_vnfr.remove(db_vnfr.get("_id"))
5492 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get("constituent-vnfr-ref")
5493 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5494 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5495 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5496 return "COMPLETED", "Done"
5497 else:
5498 step = "Terminate VNF Failed with"
5499 raise LcmException("{} Cannot terminate the last VNF in this NS.".format(
5500 vnf_instance_id))
5501 except (LcmException, asyncio.CancelledError):
5502 raise
5503 except Exception as e:
5504 self.logger.debug("Error removing VNF {}".format(e))
5505 return "FAILED", "Error removing VNF {}".format(e)
5506
elumalaib9e357c2022-04-27 09:58:38 +05305507 async def _ns_redeploy_vnf(
5508 self, nsr_id, nslcmop_id, db_vnfd, db_vnfr, db_nsr,
5509 ):
5510 """This method updates and redeploys VNF instances
5511
5512 Args:
5513 nsr_id: NS instance id
5514 nslcmop_id: nslcmop id
5515 db_vnfd: VNF descriptor
5516 db_vnfr: VNF instance record
5517 db_nsr: NS instance record
5518
5519 Returns:
5520 result: (str, str) COMPLETED/FAILED, details
5521 """
5522 try:
5523 count_index = 0
5524 stage = ["", "", ""]
5525 logging_text = "Task ns={} update ".format(nsr_id)
5526 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5527 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5528
5529 # Terminate old VNF resources
5530 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5531 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5532
5533 # old_vnfd_id = db_vnfr["vnfd-id"]
5534 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5535 new_db_vnfd = db_vnfd
5536 # new_vnfd_ref = new_db_vnfd["id"]
5537 # new_vnfd_id = vnfd_id
5538
5539 # Create VDUR
5540 new_vnfr_cp = []
5541 for cp in new_db_vnfd.get("ext-cpd", ()):
5542 vnf_cp = {
5543 "name": cp.get("id"),
5544 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5545 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5546 "id": cp.get("id"),
5547 }
5548 new_vnfr_cp.append(vnf_cp)
5549 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5550 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5551 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5552 new_vnfr_update = {"revision": latest_vnfd_revision, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5553 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5554 updated_db_vnfr = self.db.get_one(
5555 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}
5556 )
5557
5558 # Instantiate new VNF resources
5559 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5560 vca_scaling_info = []
5561 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5562 scaling_info["scaling_direction"] = "OUT"
5563 scaling_info["vdu-create"] = {}
5564 scaling_info["kdu-create"] = {}
5565 vdud_instantiate_list = db_vnfd["vdu"]
5566 for index, vdud in enumerate(vdud_instantiate_list):
5567 cloud_init_text = self._get_vdu_cloud_init_content(
5568 vdud, db_vnfd
5569 )
5570 if cloud_init_text:
5571 additional_params = (
5572 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5573 or {}
5574 )
5575 cloud_init_list = []
5576 if cloud_init_text:
5577 # TODO Information of its own ip is not available because db_vnfr is not updated.
5578 additional_params["OSM"] = get_osm_params(
5579 updated_db_vnfr, vdud["id"], 1
5580 )
5581 cloud_init_list.append(
5582 self._parse_cloud_init(
5583 cloud_init_text,
5584 additional_params,
5585 db_vnfd["id"],
5586 vdud["id"],
5587 )
5588 )
5589 vca_scaling_info.append(
5590 {
5591 "osm_vdu_id": vdud["id"],
5592 "member-vnf-index": member_vnf_index,
5593 "type": "create",
5594 "vdu_index": count_index,
5595 }
5596 )
5597 scaling_info["vdu-create"][vdud["id"]] = count_index
5598 if self.ro_config.get("ng"):
5599 self.logger.debug(
5600 "New Resources to be deployed: {}".format(scaling_info))
5601 await self._scale_ng_ro(
5602 logging_text, db_nsr, update_db_nslcmops, updated_db_vnfr, scaling_info, stage
5603 )
5604 return "COMPLETED", "Done"
5605 except (LcmException, asyncio.CancelledError):
5606 raise
5607 except Exception as e:
5608 self.logger.debug("Error updating VNF {}".format(e))
5609 return "FAILED", "Error updating VNF {}".format(e)
5610
aticigdffa6212022-04-12 15:27:53 +03005611 async def _ns_charm_upgrade(
5612 self,
5613 ee_id,
5614 charm_id,
5615 charm_type,
5616 path,
5617 timeout: float = None,
5618 ) -> (str, str):
5619 """This method upgrade charms in VNF instances
5620
5621 Args:
5622 ee_id: Execution environment id
5623 path: Local path to the charm
5624 charm_id: charm-id
5625 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5626 timeout: (Float) Timeout for the ns update operation
5627
5628 Returns:
5629 result: (str, str) COMPLETED/FAILED, details
5630 """
5631 try:
5632 charm_type = charm_type or "lxc_proxy_charm"
5633 output = await self.vca_map[charm_type].upgrade_charm(
5634 ee_id=ee_id,
5635 path=path,
5636 charm_id=charm_id,
5637 charm_type=charm_type,
5638 timeout=timeout or self.timeout_ns_update,
5639 )
5640
5641 if output:
5642 return "COMPLETED", output
5643
5644 except (LcmException, asyncio.CancelledError):
5645 raise
5646
5647 except Exception as e:
5648
5649 self.logger.debug("Error upgrading charm {}".format(path))
5650
5651 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5652
5653 async def update(self, nsr_id, nslcmop_id):
5654 """Update NS according to different update types
5655
5656 This method performs upgrade of VNF instances then updates the revision
5657 number in VNF record
5658
5659 Args:
5660 nsr_id: Network service will be updated
5661 nslcmop_id: ns lcm operation id
5662
5663 Returns:
5664 It may raise DbException, LcmException, N2VCException, K8sException
5665
5666 """
5667 # Try to lock HA task here
5668 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5669 if not task_is_locked_by_me:
5670 return
5671
5672 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5673 self.logger.debug(logging_text + "Enter")
5674
5675 # Set the required variables to be filled up later
5676 db_nsr = None
5677 db_nslcmop_update = {}
5678 vnfr_update = {}
5679 nslcmop_operation_state = None
5680 db_nsr_update = {}
5681 error_description_nslcmop = ""
5682 exc = None
elumalaica7ece02022-04-12 12:47:32 +05305683 change_type = "updated"
aticigdffa6212022-04-12 15:27:53 +03005684 detailed_status = ""
5685
5686 try:
5687 # wait for any previous tasks in process
5688 step = "Waiting for previous operations to terminate"
5689 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5690 self._write_ns_status(
5691 nsr_id=nsr_id,
5692 ns_state=None,
5693 current_operation="UPDATING",
5694 current_operation_id=nslcmop_id,
5695 )
5696
5697 step = "Getting nslcmop from database"
5698 db_nslcmop = self.db.get_one(
5699 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5700 )
5701 update_type = db_nslcmop["operationParams"]["updateType"]
5702
5703 step = "Getting nsr from database"
5704 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5705 old_operational_status = db_nsr["operational-status"]
5706 db_nsr_update["operational-status"] = "updating"
5707 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5708 nsr_deployed = db_nsr["_admin"].get("deployed")
5709
5710 if update_type == "CHANGE_VNFPKG":
5711
5712 # Get the input parameters given through update request
5713 vnf_instance_id = db_nslcmop["operationParams"][
5714 "changeVnfPackageData"
5715 ].get("vnfInstanceId")
5716
5717 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5718 "vnfdId"
5719 )
5720 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5721
5722 step = "Getting vnfr from database"
5723 db_vnfr = self.db.get_one(
5724 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5725 )
5726
5727 step = "Getting vnfds from database"
5728 # Latest VNFD
5729 latest_vnfd = self.db.get_one(
5730 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5731 )
5732 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5733
5734 # Current VNFD
5735 current_vnf_revision = db_vnfr.get("revision", 1)
5736 current_vnfd = self.db.get_one(
5737 "vnfds_revisions",
5738 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5739 fail_on_empty=False,
5740 )
5741 # Charm artifact paths will be filled up later
5742 (
5743 current_charm_artifact_path,
5744 target_charm_artifact_path,
5745 charm_artifact_paths,
5746 ) = ([], [], [])
5747
5748 step = "Checking if revision has changed in VNFD"
5749 if current_vnf_revision != latest_vnfd_revision:
5750
elumalaib9e357c2022-04-27 09:58:38 +05305751 change_type = "policy_updated"
5752
aticigdffa6212022-04-12 15:27:53 +03005753 # There is new revision of VNFD, update operation is required
5754 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
aticigd7083542022-05-30 20:45:55 +03005755 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
aticigdffa6212022-04-12 15:27:53 +03005756
5757 step = "Removing the VNFD packages if they exist in the local path"
5758 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5759 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5760
5761 step = "Get the VNFD packages from FSMongo"
5762 self.fs.sync(from_path=latest_vnfd_path)
5763 self.fs.sync(from_path=current_vnfd_path)
5764
5765 step = (
5766 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5767 )
5768 base_folder = latest_vnfd["_admin"]["storage"]
5769
5770 for charm_index, charm_deployed in enumerate(
5771 get_iterable(nsr_deployed, "VCA")
5772 ):
5773 vnf_index = db_vnfr.get("member-vnf-index-ref")
5774
5775 # Getting charm-id and charm-type
5776 if charm_deployed.get("member-vnf-index") == vnf_index:
5777 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5778 charm_type = charm_deployed.get("type")
5779
5780 # Getting ee-id
5781 ee_id = charm_deployed.get("ee_id")
5782
5783 step = "Getting descriptor config"
5784 descriptor_config = get_configuration(
5785 current_vnfd, current_vnfd["id"]
5786 )
5787
5788 if "execution-environment-list" in descriptor_config:
5789 ee_list = descriptor_config.get(
5790 "execution-environment-list", []
5791 )
5792 else:
5793 ee_list = []
5794
5795 # There could be several charm used in the same VNF
5796 for ee_item in ee_list:
5797 if ee_item.get("juju"):
5798
5799 step = "Getting charm name"
5800 charm_name = ee_item["juju"].get("charm")
5801
5802 step = "Setting Charm artifact paths"
5803 current_charm_artifact_path.append(
5804 get_charm_artifact_path(
5805 base_folder,
5806 charm_name,
5807 charm_type,
5808 current_vnf_revision,
5809 )
5810 )
5811 target_charm_artifact_path.append(
5812 get_charm_artifact_path(
5813 base_folder,
5814 charm_name,
5815 charm_type,
aticigd7083542022-05-30 20:45:55 +03005816 latest_vnfd_revision,
aticigdffa6212022-04-12 15:27:53 +03005817 )
5818 )
5819
5820 charm_artifact_paths = zip(
5821 current_charm_artifact_path, target_charm_artifact_path
5822 )
5823
5824 step = "Checking if software version has changed in VNFD"
5825 if find_software_version(current_vnfd) != find_software_version(
5826 latest_vnfd
5827 ):
5828
5829 step = "Checking if existing VNF has charm"
5830 for current_charm_path, target_charm_path in list(
5831 charm_artifact_paths
5832 ):
5833 if current_charm_path:
5834 raise LcmException(
5835 "Software version change is not supported as VNF instance {} has charm.".format(
5836 vnf_instance_id
5837 )
5838 )
5839
5840 # There is no change in the charm package, then redeploy the VNF
5841 # based on new descriptor
5842 step = "Redeploying VNF"
elumalaib9e357c2022-04-27 09:58:38 +05305843 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5844 (
5845 result,
5846 detailed_status
5847 ) = await self._ns_redeploy_vnf(
5848 nsr_id,
5849 nslcmop_id,
5850 latest_vnfd,
5851 db_vnfr,
5852 db_nsr
5853 )
5854 if result == "FAILED":
5855 nslcmop_operation_state = result
5856 error_description_nslcmop = detailed_status
5857 db_nslcmop_update["detailed-status"] = detailed_status
5858 self.logger.debug(
5859 logging_text
5860 + " step {} Done with result {} {}".format(
5861 step, nslcmop_operation_state, detailed_status
5862 )
5863 )
aticigdffa6212022-04-12 15:27:53 +03005864
5865 else:
5866 step = "Checking if any charm package has changed or not"
5867 for current_charm_path, target_charm_path in list(
5868 charm_artifact_paths
5869 ):
5870 if (
5871 current_charm_path
5872 and target_charm_path
5873 and self.check_charm_hash_changed(
5874 current_charm_path, target_charm_path
5875 )
5876 ):
5877
5878 step = "Checking whether VNF uses juju bundle"
5879 if check_juju_bundle_existence(current_vnfd):
5880
5881 raise LcmException(
5882 "Charm upgrade is not supported for the instance which"
5883 " uses juju-bundle: {}".format(
5884 check_juju_bundle_existence(current_vnfd)
5885 )
5886 )
5887
5888 step = "Upgrading Charm"
5889 (
5890 result,
5891 detailed_status,
5892 ) = await self._ns_charm_upgrade(
5893 ee_id=ee_id,
5894 charm_id=charm_id,
5895 charm_type=charm_type,
5896 path=self.fs.path + target_charm_path,
5897 timeout=timeout_seconds,
5898 )
5899
5900 if result == "FAILED":
5901 nslcmop_operation_state = result
5902 error_description_nslcmop = detailed_status
5903
5904 db_nslcmop_update["detailed-status"] = detailed_status
5905 self.logger.debug(
5906 logging_text
5907 + " step {} Done with result {} {}".format(
5908 step, nslcmop_operation_state, detailed_status
5909 )
5910 )
5911
5912 step = "Updating policies"
elumalaib9e357c2022-04-27 09:58:38 +05305913 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5914 result = "COMPLETED"
5915 detailed_status = "Done"
5916 db_nslcmop_update["detailed-status"] = "Done"
aticigdffa6212022-04-12 15:27:53 +03005917
5918 # If nslcmop_operation_state is None, so any operation is not failed.
5919 if not nslcmop_operation_state:
5920 nslcmop_operation_state = "COMPLETED"
5921
5922 # If update CHANGE_VNFPKG nslcmop_operation is successful
5923 # vnf revision need to be updated
5924 vnfr_update["revision"] = latest_vnfd_revision
5925 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5926
5927 self.logger.debug(
5928 logging_text
5929 + " task Done with result {} {}".format(
5930 nslcmop_operation_state, detailed_status
5931 )
5932 )
5933 elif update_type == "REMOVE_VNF":
5934 # This part is included in https://osm.etsi.org/gerrit/11876
elumalaica7ece02022-04-12 12:47:32 +05305935 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5936 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5937 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5938 step = "Removing VNF"
5939 (result, detailed_status) = await self.remove_vnf(nsr_id, nslcmop_id, vnf_instance_id)
5940 if result == "FAILED":
5941 nslcmop_operation_state = result
5942 error_description_nslcmop = detailed_status
5943 db_nslcmop_update["detailed-status"] = detailed_status
5944 change_type = "vnf_terminated"
5945 if not nslcmop_operation_state:
5946 nslcmop_operation_state = "COMPLETED"
5947 self.logger.debug(
5948 logging_text
5949 + " task Done with result {} {}".format(
5950 nslcmop_operation_state, detailed_status
5951 )
5952 )
aticigdffa6212022-04-12 15:27:53 +03005953
k4.rahulb827de92022-05-02 16:35:02 +00005954 elif update_type == "OPERATE_VNF":
5955 vnf_id = db_nslcmop["operationParams"]["operateVnfData"]["vnfInstanceId"]
5956 operation_type = db_nslcmop["operationParams"]["operateVnfData"]["changeStateTo"]
5957 additional_param = db_nslcmop["operationParams"]["operateVnfData"]["additionalParam"]
5958 (result, detailed_status) = await self.rebuild_start_stop(
5959 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
5960 )
5961 if result == "FAILED":
5962 nslcmop_operation_state = result
5963 error_description_nslcmop = detailed_status
5964 db_nslcmop_update["detailed-status"] = detailed_status
5965 if not nslcmop_operation_state:
5966 nslcmop_operation_state = "COMPLETED"
5967 self.logger.debug(
5968 logging_text
5969 + " task Done with result {} {}".format(
5970 nslcmop_operation_state, detailed_status
5971 )
5972 )
5973
aticigdffa6212022-04-12 15:27:53 +03005974 # If nslcmop_operation_state is None, so any operation is not failed.
5975 # All operations are executed in overall.
5976 if not nslcmop_operation_state:
5977 nslcmop_operation_state = "COMPLETED"
5978 db_nsr_update["operational-status"] = old_operational_status
5979
5980 except (DbException, LcmException, N2VCException, K8sException) as e:
5981 self.logger.error(logging_text + "Exit Exception {}".format(e))
5982 exc = e
5983 except asyncio.CancelledError:
5984 self.logger.error(
5985 logging_text + "Cancelled Exception while '{}'".format(step)
5986 )
5987 exc = "Operation was cancelled"
5988 except asyncio.TimeoutError:
5989 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5990 exc = "Timeout"
5991 except Exception as e:
5992 exc = traceback.format_exc()
5993 self.logger.critical(
5994 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5995 exc_info=True,
5996 )
5997 finally:
5998 if exc:
5999 db_nslcmop_update[
6000 "detailed-status"
6001 ] = (
6002 detailed_status
6003 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6004 nslcmop_operation_state = "FAILED"
6005 db_nsr_update["operational-status"] = old_operational_status
6006 if db_nsr:
6007 self._write_ns_status(
6008 nsr_id=nsr_id,
6009 ns_state=db_nsr["nsState"],
6010 current_operation="IDLE",
6011 current_operation_id=None,
6012 other_update=db_nsr_update,
6013 )
6014
6015 self._write_op_status(
6016 op_id=nslcmop_id,
6017 stage="",
6018 error_message=error_description_nslcmop,
6019 operation_state=nslcmop_operation_state,
6020 other_update=db_nslcmop_update,
6021 )
6022
6023 if nslcmop_operation_state:
6024 try:
elumalaica7ece02022-04-12 12:47:32 +05306025 msg = {
elumalaib9e357c2022-04-27 09:58:38 +05306026 "nsr_id": nsr_id,
6027 "nslcmop_id": nslcmop_id,
6028 "operationState": nslcmop_operation_state,
6029 }
6030 if change_type in ("vnf_terminated", "policy_updated"):
elumalaica7ece02022-04-12 12:47:32 +05306031 msg.update({"vnf_member_index": member_vnf_index})
6032 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
aticigdffa6212022-04-12 15:27:53 +03006033 except Exception as e:
6034 self.logger.error(
6035 logging_text + "kafka_write notification Exception {}".format(e)
6036 )
6037 self.logger.debug(logging_text + "Exit")
6038 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6039 return nslcmop_operation_state, detailed_status
6040
tierno59d22d22018-09-25 18:10:19 +02006041 async def scale(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02006042 # Try to lock HA task here
garciadeblas5697b8b2021-03-24 09:17:02 +01006043 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02006044 if not task_is_locked_by_me:
6045 return
6046
tierno59d22d22018-09-25 18:10:19 +02006047 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
garciadeblas5697b8b2021-03-24 09:17:02 +01006048 stage = ["", "", ""]
aktas13251562021-02-12 22:19:10 +03006049 tasks_dict_info = {}
tierno2357f4e2020-10-19 16:38:59 +00006050 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02006051 self.logger.debug(logging_text + "Enter")
6052 # get all needed from database
6053 db_nsr = None
tierno59d22d22018-09-25 18:10:19 +02006054 db_nslcmop_update = {}
tiernoe876f672020-02-13 14:34:48 +00006055 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02006056 exc = None
tierno9ab95942018-10-10 16:44:22 +02006057 # in case of error, indicates what part of scale was failed to put nsr at error status
6058 scale_process = None
tiernod6de1992018-10-11 13:05:52 +02006059 old_operational_status = ""
6060 old_config_status = ""
aktas13251562021-02-12 22:19:10 +03006061 nsi_id = None
tierno59d22d22018-09-25 18:10:19 +02006062 try:
kuused124bfe2019-06-18 12:09:24 +02006063 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00006064 step = "Waiting for previous operations to terminate"
garciadeblas5697b8b2021-03-24 09:17:02 +01006065 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6066 self._write_ns_status(
6067 nsr_id=nsr_id,
6068 ns_state=None,
6069 current_operation="SCALING",
6070 current_operation_id=nslcmop_id,
6071 )
quilesj4cda56b2019-12-05 10:02:20 +00006072
ikalyvas02d9e7b2019-05-27 18:16:01 +03006073 step = "Getting nslcmop from database"
garciadeblas5697b8b2021-03-24 09:17:02 +01006074 self.logger.debug(
6075 step + " after having waited for previous tasks to be completed"
6076 )
ikalyvas02d9e7b2019-05-27 18:16:01 +03006077 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
bravof922c4172020-11-24 21:21:43 -03006078
ikalyvas02d9e7b2019-05-27 18:16:01 +03006079 step = "Getting nsr from database"
6080 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
ikalyvas02d9e7b2019-05-27 18:16:01 +03006081 old_operational_status = db_nsr["operational-status"]
6082 old_config_status = db_nsr["config-status"]
bravof922c4172020-11-24 21:21:43 -03006083
tierno59d22d22018-09-25 18:10:19 +02006084 step = "Parsing scaling parameters"
6085 db_nsr_update["operational-status"] = "scaling"
6086 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoe4f7e6c2018-11-27 14:55:30 +00006087 nsr_deployed = db_nsr["_admin"].get("deployed")
calvinosanch9f9c6f22019-11-04 13:37:39 +01006088
garciadeblas5697b8b2021-03-24 09:17:02 +01006089 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6090 "scaleByStepData"
6091 ]["member-vnf-index"]
6092 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6093 "scaleByStepData"
6094 ]["scaling-group-descriptor"]
tierno59d22d22018-09-25 18:10:19 +02006095 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
tierno82974b22018-11-27 21:55:36 +00006096 # for backward compatibility
6097 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6098 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6099 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6100 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6101
tierno59d22d22018-09-25 18:10:19 +02006102 step = "Getting vnfr from database"
garciadeblas5697b8b2021-03-24 09:17:02 +01006103 db_vnfr = self.db.get_one(
6104 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6105 )
bravof922c4172020-11-24 21:21:43 -03006106
David Garciac1fe90a2021-03-31 19:12:02 +02006107 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6108
tierno59d22d22018-09-25 18:10:19 +02006109 step = "Getting vnfd from database"
6110 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
ikalyvas02d9e7b2019-05-27 18:16:01 +03006111
aktas13251562021-02-12 22:19:10 +03006112 base_folder = db_vnfd["_admin"]["storage"]
6113
tierno59d22d22018-09-25 18:10:19 +02006114 step = "Getting scaling-group-descriptor"
bravof832f8992020-12-07 12:57:31 -03006115 scaling_descriptor = find_in_list(
garciadeblas5697b8b2021-03-24 09:17:02 +01006116 get_scaling_aspect(db_vnfd),
6117 lambda scale_desc: scale_desc["name"] == scaling_group,
bravof832f8992020-12-07 12:57:31 -03006118 )
6119 if not scaling_descriptor:
garciadeblas5697b8b2021-03-24 09:17:02 +01006120 raise LcmException(
6121 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6122 "at vnfd:scaling-group-descriptor".format(scaling_group)
6123 )
ikalyvas02d9e7b2019-05-27 18:16:01 +03006124
tierno15b1cf12019-08-29 13:21:40 +00006125 step = "Sending scale order to VIM"
bravof922c4172020-11-24 21:21:43 -03006126 # TODO check if ns is in a proper status
tierno59d22d22018-09-25 18:10:19 +02006127 nb_scale_op = 0
6128 if not db_nsr["_admin"].get("scaling-group"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006129 self.update_db_2(
6130 "nsrs",
6131 nsr_id,
6132 {
6133 "_admin.scaling-group": [
6134 {"name": scaling_group, "nb-scale-op": 0}
6135 ]
6136 },
6137 )
tierno59d22d22018-09-25 18:10:19 +02006138 admin_scale_index = 0
6139 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01006140 for admin_scale_index, admin_scale_info in enumerate(
6141 db_nsr["_admin"]["scaling-group"]
6142 ):
tierno59d22d22018-09-25 18:10:19 +02006143 if admin_scale_info["name"] == scaling_group:
6144 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6145 break
tierno9ab95942018-10-10 16:44:22 +02006146 else: # not found, set index one plus last element and add new entry with the name
6147 admin_scale_index += 1
garciadeblas5697b8b2021-03-24 09:17:02 +01006148 db_nsr_update[
6149 "_admin.scaling-group.{}.name".format(admin_scale_index)
6150 ] = scaling_group
aktas5f75f102021-03-15 11:26:10 +03006151
6152 vca_scaling_info = []
6153 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
tierno59d22d22018-09-25 18:10:19 +02006154 if scaling_type == "SCALE_OUT":
bravof832f8992020-12-07 12:57:31 -03006155 if "aspect-delta-details" not in scaling_descriptor:
6156 raise LcmException(
6157 "Aspect delta details not fount in scaling descriptor {}".format(
6158 scaling_descriptor["name"]
6159 )
6160 )
tierno59d22d22018-09-25 18:10:19 +02006161 # count if max-instance-count is reached
bravof832f8992020-12-07 12:57:31 -03006162 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
kuuse8b998e42019-07-30 15:22:16 +02006163
aktas5f75f102021-03-15 11:26:10 +03006164 scaling_info["scaling_direction"] = "OUT"
6165 scaling_info["vdu-create"] = {}
6166 scaling_info["kdu-create"] = {}
bravof832f8992020-12-07 12:57:31 -03006167 for delta in deltas:
aktas5f75f102021-03-15 11:26:10 +03006168 for vdu_delta in delta.get("vdu-delta", {}):
bravof832f8992020-12-07 12:57:31 -03006169 vdud = get_vdu(db_vnfd, vdu_delta["id"])
aktas5f75f102021-03-15 11:26:10 +03006170 # vdu_index also provides the number of instance of the targeted vdu
6171 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
garciadeblas5697b8b2021-03-24 09:17:02 +01006172 cloud_init_text = self._get_vdu_cloud_init_content(
6173 vdud, db_vnfd
6174 )
tierno72ef84f2020-10-06 08:22:07 +00006175 if cloud_init_text:
garciadeblas5697b8b2021-03-24 09:17:02 +01006176 additional_params = (
6177 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6178 or {}
6179 )
bravof832f8992020-12-07 12:57:31 -03006180 cloud_init_list = []
6181
6182 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6183 max_instance_count = 10
6184 if vdu_profile and "max-number-of-instances" in vdu_profile:
garciadeblas5697b8b2021-03-24 09:17:02 +01006185 max_instance_count = vdu_profile.get(
6186 "max-number-of-instances", 10
6187 )
6188
6189 default_instance_num = get_number_of_instances(
6190 db_vnfd, vdud["id"]
6191 )
aktas5f75f102021-03-15 11:26:10 +03006192 instances_number = vdu_delta.get("number-of-instances", 1)
6193 nb_scale_op += instances_number
bravof832f8992020-12-07 12:57:31 -03006194
aktas5f75f102021-03-15 11:26:10 +03006195 new_instance_count = nb_scale_op + default_instance_num
6196 # Control if new count is over max and vdu count is less than max.
6197 # Then assign new instance count
6198 if new_instance_count > max_instance_count > vdu_count:
6199 instances_number = new_instance_count - max_instance_count
6200 else:
6201 instances_number = instances_number
bravof832f8992020-12-07 12:57:31 -03006202
aktas5f75f102021-03-15 11:26:10 +03006203 if new_instance_count > max_instance_count:
bravof832f8992020-12-07 12:57:31 -03006204 raise LcmException(
6205 "reached the limit of {} (max-instance-count) "
6206 "scaling-out operations for the "
garciadeblas5697b8b2021-03-24 09:17:02 +01006207 "scaling-group-descriptor '{}'".format(
6208 nb_scale_op, scaling_group
6209 )
bravof922c4172020-11-24 21:21:43 -03006210 )
bravof832f8992020-12-07 12:57:31 -03006211 for x in range(vdu_delta.get("number-of-instances", 1)):
6212 if cloud_init_text:
6213 # TODO Information of its own ip is not available because db_vnfr is not updated.
6214 additional_params["OSM"] = get_osm_params(
garciadeblas5697b8b2021-03-24 09:17:02 +01006215 db_vnfr, vdu_delta["id"], vdu_index + x
bravof922c4172020-11-24 21:21:43 -03006216 )
bravof832f8992020-12-07 12:57:31 -03006217 cloud_init_list.append(
6218 self._parse_cloud_init(
6219 cloud_init_text,
6220 additional_params,
6221 db_vnfd["id"],
garciadeblas5697b8b2021-03-24 09:17:02 +01006222 vdud["id"],
bravof832f8992020-12-07 12:57:31 -03006223 )
6224 )
aktas5f75f102021-03-15 11:26:10 +03006225 vca_scaling_info.append(
aktas13251562021-02-12 22:19:10 +03006226 {
6227 "osm_vdu_id": vdu_delta["id"],
6228 "member-vnf-index": vnf_index,
6229 "type": "create",
garciadeblas5697b8b2021-03-24 09:17:02 +01006230 "vdu_index": vdu_index + x,
aktas13251562021-02-12 22:19:10 +03006231 }
6232 )
aktas5f75f102021-03-15 11:26:10 +03006233 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6234 for kdu_delta in delta.get("kdu-resource-delta", {}):
David Garciab4ebcd02021-10-28 02:00:43 +02006235 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
aktas5f75f102021-03-15 11:26:10 +03006236 kdu_name = kdu_profile["kdu-name"]
aktasc41fe832021-11-29 18:41:42 +03006237 resource_name = kdu_profile.get("resource-name", "")
aktas5f75f102021-03-15 11:26:10 +03006238
6239 # Might have different kdus in the same delta
6240 # Should have list for each kdu
6241 if not scaling_info["kdu-create"].get(kdu_name, None):
6242 scaling_info["kdu-create"][kdu_name] = []
6243
6244 kdur = get_kdur(db_vnfr, kdu_name)
6245 if kdur.get("helm-chart"):
6246 k8s_cluster_type = "helm-chart-v3"
6247 self.logger.debug("kdur: {}".format(kdur))
6248 if (
6249 kdur.get("helm-version")
6250 and kdur.get("helm-version") == "v2"
6251 ):
6252 k8s_cluster_type = "helm-chart"
aktas5f75f102021-03-15 11:26:10 +03006253 elif kdur.get("juju-bundle"):
6254 k8s_cluster_type = "juju-bundle"
6255 else:
6256 raise LcmException(
6257 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6258 "juju-bundle. Maybe an old NBI version is running".format(
6259 db_vnfr["member-vnf-index-ref"], kdu_name
6260 )
6261 )
6262
6263 max_instance_count = 10
6264 if kdu_profile and "max-number-of-instances" in kdu_profile:
6265 max_instance_count = kdu_profile.get(
6266 "max-number-of-instances", 10
6267 )
6268
6269 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6270 deployed_kdu, _ = get_deployed_kdu(
6271 nsr_deployed, kdu_name, vnf_index
bravof832f8992020-12-07 12:57:31 -03006272 )
aktas5f75f102021-03-15 11:26:10 +03006273 if deployed_kdu is None:
6274 raise LcmException(
6275 "KDU '{}' for vnf '{}' not deployed".format(
6276 kdu_name, vnf_index
6277 )
6278 )
6279 kdu_instance = deployed_kdu.get("kdu-instance")
6280 instance_num = await self.k8scluster_map[
6281 k8s_cluster_type
aktasc41fe832021-11-29 18:41:42 +03006282 ].get_scale_count(
6283 resource_name,
6284 kdu_instance,
6285 vca_id=vca_id,
6286 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6287 kdu_model=deployed_kdu.get("kdu-model"),
6288 )
aktas5f75f102021-03-15 11:26:10 +03006289 kdu_replica_count = instance_num + kdu_delta.get(
garciadeblas5697b8b2021-03-24 09:17:02 +01006290 "number-of-instances", 1
6291 )
ikalyvas02d9e7b2019-05-27 18:16:01 +03006292
aktas5f75f102021-03-15 11:26:10 +03006293 # Control if new count is over max and instance_num is less than max.
6294 # Then assign max instance number to kdu replica count
6295 if kdu_replica_count > max_instance_count > instance_num:
6296 kdu_replica_count = max_instance_count
6297 if kdu_replica_count > max_instance_count:
6298 raise LcmException(
6299 "reached the limit of {} (max-instance-count) "
6300 "scaling-out operations for the "
6301 "scaling-group-descriptor '{}'".format(
6302 instance_num, scaling_group
6303 )
6304 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006305
aktas5f75f102021-03-15 11:26:10 +03006306 for x in range(kdu_delta.get("number-of-instances", 1)):
6307 vca_scaling_info.append(
6308 {
6309 "osm_kdu_id": kdu_name,
6310 "member-vnf-index": vnf_index,
6311 "type": "create",
6312 "kdu_index": instance_num + x - 1,
6313 }
6314 )
6315 scaling_info["kdu-create"][kdu_name].append(
6316 {
6317 "member-vnf-index": vnf_index,
6318 "type": "create",
6319 "k8s-cluster-type": k8s_cluster_type,
6320 "resource-name": resource_name,
6321 "scale": kdu_replica_count,
6322 }
6323 )
6324 elif scaling_type == "SCALE_IN":
bravof832f8992020-12-07 12:57:31 -03006325 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
aktas5f75f102021-03-15 11:26:10 +03006326
6327 scaling_info["scaling_direction"] = "IN"
6328 scaling_info["vdu-delete"] = {}
6329 scaling_info["kdu-delete"] = {}
6330
bravof832f8992020-12-07 12:57:31 -03006331 for delta in deltas:
aktas5f75f102021-03-15 11:26:10 +03006332 for vdu_delta in delta.get("vdu-delta", {}):
6333 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
bravof832f8992020-12-07 12:57:31 -03006334 min_instance_count = 0
6335 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6336 if vdu_profile and "min-number-of-instances" in vdu_profile:
6337 min_instance_count = vdu_profile["min-number-of-instances"]
6338
garciadeblas5697b8b2021-03-24 09:17:02 +01006339 default_instance_num = get_number_of_instances(
6340 db_vnfd, vdu_delta["id"]
6341 )
aktas5f75f102021-03-15 11:26:10 +03006342 instance_num = vdu_delta.get("number-of-instances", 1)
6343 nb_scale_op -= instance_num
bravof832f8992020-12-07 12:57:31 -03006344
aktas5f75f102021-03-15 11:26:10 +03006345 new_instance_count = nb_scale_op + default_instance_num
6346
6347 if new_instance_count < min_instance_count < vdu_count:
6348 instances_number = min_instance_count - new_instance_count
6349 else:
6350 instances_number = instance_num
6351
6352 if new_instance_count < min_instance_count:
bravof832f8992020-12-07 12:57:31 -03006353 raise LcmException(
6354 "reached the limit of {} (min-instance-count) scaling-in operations for the "
garciadeblas5697b8b2021-03-24 09:17:02 +01006355 "scaling-group-descriptor '{}'".format(
6356 nb_scale_op, scaling_group
6357 )
bravof832f8992020-12-07 12:57:31 -03006358 )
aktas13251562021-02-12 22:19:10 +03006359 for x in range(vdu_delta.get("number-of-instances", 1)):
aktas5f75f102021-03-15 11:26:10 +03006360 vca_scaling_info.append(
aktas13251562021-02-12 22:19:10 +03006361 {
6362 "osm_vdu_id": vdu_delta["id"],
6363 "member-vnf-index": vnf_index,
6364 "type": "delete",
garciadeblas5697b8b2021-03-24 09:17:02 +01006365 "vdu_index": vdu_index - 1 - x,
aktas13251562021-02-12 22:19:10 +03006366 }
6367 )
aktas5f75f102021-03-15 11:26:10 +03006368 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6369 for kdu_delta in delta.get("kdu-resource-delta", {}):
David Garciab4ebcd02021-10-28 02:00:43 +02006370 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
aktas5f75f102021-03-15 11:26:10 +03006371 kdu_name = kdu_profile["kdu-name"]
aktasc41fe832021-11-29 18:41:42 +03006372 resource_name = kdu_profile.get("resource-name", "")
aktas5f75f102021-03-15 11:26:10 +03006373
6374 if not scaling_info["kdu-delete"].get(kdu_name, None):
6375 scaling_info["kdu-delete"][kdu_name] = []
6376
6377 kdur = get_kdur(db_vnfr, kdu_name)
6378 if kdur.get("helm-chart"):
6379 k8s_cluster_type = "helm-chart-v3"
6380 self.logger.debug("kdur: {}".format(kdur))
6381 if (
6382 kdur.get("helm-version")
6383 and kdur.get("helm-version") == "v2"
6384 ):
6385 k8s_cluster_type = "helm-chart"
aktas5f75f102021-03-15 11:26:10 +03006386 elif kdur.get("juju-bundle"):
6387 k8s_cluster_type = "juju-bundle"
6388 else:
6389 raise LcmException(
6390 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6391 "juju-bundle. Maybe an old NBI version is running".format(
6392 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6393 )
6394 )
6395
6396 min_instance_count = 0
6397 if kdu_profile and "min-number-of-instances" in kdu_profile:
6398 min_instance_count = kdu_profile["min-number-of-instances"]
6399
6400 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6401 deployed_kdu, _ = get_deployed_kdu(
6402 nsr_deployed, kdu_name, vnf_index
6403 )
6404 if deployed_kdu is None:
6405 raise LcmException(
6406 "KDU '{}' for vnf '{}' not deployed".format(
6407 kdu_name, vnf_index
6408 )
6409 )
6410 kdu_instance = deployed_kdu.get("kdu-instance")
6411 instance_num = await self.k8scluster_map[
6412 k8s_cluster_type
aktasc41fe832021-11-29 18:41:42 +03006413 ].get_scale_count(
6414 resource_name,
6415 kdu_instance,
6416 vca_id=vca_id,
6417 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6418 kdu_model=deployed_kdu.get("kdu-model"),
6419 )
aktas5f75f102021-03-15 11:26:10 +03006420 kdu_replica_count = instance_num - kdu_delta.get(
garciadeblas5697b8b2021-03-24 09:17:02 +01006421 "number-of-instances", 1
6422 )
tierno59d22d22018-09-25 18:10:19 +02006423
aktas5f75f102021-03-15 11:26:10 +03006424 if kdu_replica_count < min_instance_count < instance_num:
6425 kdu_replica_count = min_instance_count
6426 if kdu_replica_count < min_instance_count:
6427 raise LcmException(
6428 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6429 "scaling-group-descriptor '{}'".format(
6430 instance_num, scaling_group
6431 )
6432 )
6433
6434 for x in range(kdu_delta.get("number-of-instances", 1)):
6435 vca_scaling_info.append(
6436 {
6437 "osm_kdu_id": kdu_name,
6438 "member-vnf-index": vnf_index,
6439 "type": "delete",
6440 "kdu_index": instance_num - x - 1,
6441 }
6442 )
6443 scaling_info["kdu-delete"][kdu_name].append(
6444 {
6445 "member-vnf-index": vnf_index,
6446 "type": "delete",
6447 "k8s-cluster-type": k8s_cluster_type,
6448 "resource-name": resource_name,
6449 "scale": kdu_replica_count,
6450 }
6451 )
6452
tierno59d22d22018-09-25 18:10:19 +02006453 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
aktas5f75f102021-03-15 11:26:10 +03006454 vdu_delete = copy(scaling_info.get("vdu-delete"))
6455 if scaling_info["scaling_direction"] == "IN":
tierno59d22d22018-09-25 18:10:19 +02006456 for vdur in reversed(db_vnfr["vdur"]):
tierno27246d82018-09-27 15:59:09 +02006457 if vdu_delete.get(vdur["vdu-id-ref"]):
6458 vdu_delete[vdur["vdu-id-ref"]] -= 1
aktas5f75f102021-03-15 11:26:10 +03006459 scaling_info["vdu"].append(
garciadeblas5697b8b2021-03-24 09:17:02 +01006460 {
6461 "name": vdur.get("name") or vdur.get("vdu-name"),
6462 "vdu_id": vdur["vdu-id-ref"],
6463 "interface": [],
6464 }
6465 )
tierno59d22d22018-09-25 18:10:19 +02006466 for interface in vdur["interfaces"]:
aktas5f75f102021-03-15 11:26:10 +03006467 scaling_info["vdu"][-1]["interface"].append(
garciadeblas5697b8b2021-03-24 09:17:02 +01006468 {
6469 "name": interface["name"],
6470 "ip_address": interface["ip-address"],
6471 "mac_address": interface.get("mac-address"),
6472 }
6473 )
tierno2357f4e2020-10-19 16:38:59 +00006474 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
tierno59d22d22018-09-25 18:10:19 +02006475
kuuseac3a8882019-10-03 10:48:06 +02006476 # PRE-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02006477 step = "Executing pre-scale vnf-config-primitive"
6478 if scaling_descriptor.get("scaling-config-action"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006479 for scaling_config_action in scaling_descriptor[
6480 "scaling-config-action"
6481 ]:
6482 if (
6483 scaling_config_action.get("trigger") == "pre-scale-in"
6484 and scaling_type == "SCALE_IN"
6485 ) or (
6486 scaling_config_action.get("trigger") == "pre-scale-out"
6487 and scaling_type == "SCALE_OUT"
6488 ):
6489 vnf_config_primitive = scaling_config_action[
6490 "vnf-config-primitive-name-ref"
6491 ]
6492 step = db_nslcmop_update[
6493 "detailed-status"
6494 ] = "executing pre-scale scaling-config-action '{}'".format(
6495 vnf_config_primitive
6496 )
tiernoda964822019-01-14 15:53:47 +00006497
tierno59d22d22018-09-25 18:10:19 +02006498 # look for primitive
garciadeblas5697b8b2021-03-24 09:17:02 +01006499 for config_primitive in (
6500 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6501 ).get("config-primitive", ()):
tierno59d22d22018-09-25 18:10:19 +02006502 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02006503 break
6504 else:
6505 raise LcmException(
6506 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
tiernoda964822019-01-14 15:53:47 +00006507 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
garciadeblas5697b8b2021-03-24 09:17:02 +01006508 "primitive".format(scaling_group, vnf_config_primitive)
6509 )
tiernoda964822019-01-14 15:53:47 +00006510
aktas5f75f102021-03-15 11:26:10 +03006511 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
tiernoda964822019-01-14 15:53:47 +00006512 if db_vnfr.get("additionalParamsForVnf"):
6513 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
quilesj7e13aeb2019-10-08 13:34:55 +02006514
tierno9ab95942018-10-10 16:44:22 +02006515 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02006516 db_nsr_update["config-status"] = "configuring pre-scaling"
garciadeblas5697b8b2021-03-24 09:17:02 +01006517 primitive_params = self._map_primitive_params(
6518 config_primitive, {}, vnfr_params
6519 )
kuuseac3a8882019-10-03 10:48:06 +02006520
tierno7c4e24c2020-05-13 08:41:35 +00006521 # Pre-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02006522 op_index = self._check_or_add_scale_suboperation(
garciadeblas5697b8b2021-03-24 09:17:02 +01006523 db_nslcmop,
garciadeblas5697b8b2021-03-24 09:17:02 +01006524 vnf_index,
6525 vnf_config_primitive,
6526 primitive_params,
6527 "PRE-SCALE",
6528 )
tierno7c4e24c2020-05-13 08:41:35 +00006529 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02006530 # Skip sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006531 result = "COMPLETED"
6532 result_detail = "Done"
6533 self.logger.debug(
6534 logging_text
6535 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6536 vnf_config_primitive, result, result_detail
6537 )
6538 )
kuuseac3a8882019-10-03 10:48:06 +02006539 else:
tierno7c4e24c2020-05-13 08:41:35 +00006540 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02006541 # New sub-operation: Get index of this sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006542 op_index = (
6543 len(db_nslcmop.get("_admin", {}).get("operations"))
6544 - 1
6545 )
6546 self.logger.debug(
6547 logging_text
6548 + "vnf_config_primitive={} New sub-operation".format(
6549 vnf_config_primitive
6550 )
6551 )
kuuseac3a8882019-10-03 10:48:06 +02006552 else:
tierno7c4e24c2020-05-13 08:41:35 +00006553 # retry: Get registered params for this existing sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006554 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6555 op_index
6556 ]
6557 vnf_index = op.get("member_vnf_index")
6558 vnf_config_primitive = op.get("primitive")
6559 primitive_params = op.get("primitive_params")
6560 self.logger.debug(
6561 logging_text
6562 + "vnf_config_primitive={} Sub-operation retry".format(
6563 vnf_config_primitive
6564 )
6565 )
tierno588547c2020-07-01 15:30:20 +00006566 # Execute the primitive, either with new (first-time) or registered (reintent) args
garciadeblas5697b8b2021-03-24 09:17:02 +01006567 ee_descriptor_id = config_primitive.get(
6568 "execution-environment-ref"
6569 )
6570 primitive_name = config_primitive.get(
6571 "execution-environment-primitive", vnf_config_primitive
6572 )
6573 ee_id, vca_type = self._look_for_deployed_vca(
6574 nsr_deployed["VCA"],
6575 member_vnf_index=vnf_index,
6576 vdu_id=None,
6577 vdu_count_index=None,
6578 ee_descriptor_id=ee_descriptor_id,
6579 )
kuuseac3a8882019-10-03 10:48:06 +02006580 result, result_detail = await self._ns_execute_primitive(
garciadeblas5697b8b2021-03-24 09:17:02 +01006581 ee_id,
6582 primitive_name,
David Garciac1fe90a2021-03-31 19:12:02 +02006583 primitive_params,
6584 vca_type=vca_type,
6585 vca_id=vca_id,
6586 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006587 self.logger.debug(
6588 logging_text
6589 + "vnf_config_primitive={} Done with result {} {}".format(
6590 vnf_config_primitive, result, result_detail
6591 )
6592 )
kuuseac3a8882019-10-03 10:48:06 +02006593 # Update operationState = COMPLETED | FAILED
6594 self._update_suboperation_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01006595 db_nslcmop, op_index, result, result_detail
6596 )
kuuseac3a8882019-10-03 10:48:06 +02006597
tierno59d22d22018-09-25 18:10:19 +02006598 if result == "FAILED":
6599 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02006600 db_nsr_update["config-status"] = old_config_status
6601 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02006602 # PRE-SCALE END
tierno59d22d22018-09-25 18:10:19 +02006603
garciadeblas5697b8b2021-03-24 09:17:02 +01006604 db_nsr_update[
6605 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6606 ] = nb_scale_op
6607 db_nsr_update[
6608 "_admin.scaling-group.{}.time".format(admin_scale_index)
6609 ] = time()
tierno2357f4e2020-10-19 16:38:59 +00006610
aktas13251562021-02-12 22:19:10 +03006611 # SCALE-IN VCA - BEGIN
aktas5f75f102021-03-15 11:26:10 +03006612 if vca_scaling_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01006613 step = db_nslcmop_update[
6614 "detailed-status"
6615 ] = "Deleting the execution environments"
aktas13251562021-02-12 22:19:10 +03006616 scale_process = "VCA"
aktas5f75f102021-03-15 11:26:10 +03006617 for vca_info in vca_scaling_info:
Guillermo Calvinoa0c6baf2022-02-02 19:04:50 +01006618 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
aktas5f75f102021-03-15 11:26:10 +03006619 member_vnf_index = str(vca_info["member-vnf-index"])
garciadeblas5697b8b2021-03-24 09:17:02 +01006620 self.logger.debug(
aktas5f75f102021-03-15 11:26:10 +03006621 logging_text + "vdu info: {}".format(vca_info)
garciadeblas5697b8b2021-03-24 09:17:02 +01006622 )
aktas5f75f102021-03-15 11:26:10 +03006623 if vca_info.get("osm_vdu_id"):
6624 vdu_id = vca_info["osm_vdu_id"]
6625 vdu_index = int(vca_info["vdu_index"])
6626 stage[
6627 1
6628 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6629 member_vnf_index, vdu_id, vdu_index
6630 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006631 stage[2] = step = "Scaling in VCA"
6632 self._write_op_status(op_id=nslcmop_id, stage=stage)
aktas13251562021-02-12 22:19:10 +03006633 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6634 config_update = db_nsr["configurationStatus"]
6635 for vca_index, vca in enumerate(vca_update):
garciadeblas5697b8b2021-03-24 09:17:02 +01006636 if (
6637 (vca or vca.get("ee_id"))
6638 and vca["member-vnf-index"] == member_vnf_index
6639 and vca["vdu_count_index"] == vdu_index
6640 ):
aktas13251562021-02-12 22:19:10 +03006641 if vca.get("vdu_id"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006642 config_descriptor = get_configuration(
6643 db_vnfd, vca.get("vdu_id")
6644 )
aktas13251562021-02-12 22:19:10 +03006645 elif vca.get("kdu_name"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006646 config_descriptor = get_configuration(
6647 db_vnfd, vca.get("kdu_name")
6648 )
aktas13251562021-02-12 22:19:10 +03006649 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01006650 config_descriptor = get_configuration(
6651 db_vnfd, db_vnfd["id"]
6652 )
6653 operation_params = (
6654 db_nslcmop.get("operationParams") or {}
6655 )
6656 exec_terminate_primitives = not operation_params.get(
6657 "skip_terminate_primitives"
6658 ) and vca.get("needed_terminate")
David Garciac1fe90a2021-03-31 19:12:02 +02006659 task = asyncio.ensure_future(
6660 asyncio.wait_for(
6661 self.destroy_N2VC(
6662 logging_text,
6663 db_nslcmop,
6664 vca,
6665 config_descriptor,
6666 vca_index,
6667 destroy_ee=True,
6668 exec_primitives=exec_terminate_primitives,
6669 scaling_in=True,
6670 vca_id=vca_id,
6671 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01006672 timeout=self.timeout_charm_delete,
David Garciac1fe90a2021-03-31 19:12:02 +02006673 )
6674 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006675 tasks_dict_info[task] = "Terminating VCA {}".format(
6676 vca.get("ee_id")
6677 )
aktas13251562021-02-12 22:19:10 +03006678 del vca_update[vca_index]
6679 del config_update[vca_index]
6680 # wait for pending tasks of terminate primitives
6681 if tasks_dict_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01006682 self.logger.debug(
6683 logging_text
6684 + "Waiting for tasks {}".format(
6685 list(tasks_dict_info.keys())
6686 )
6687 )
6688 error_list = await self._wait_for_tasks(
6689 logging_text,
6690 tasks_dict_info,
6691 min(
6692 self.timeout_charm_delete, self.timeout_ns_terminate
6693 ),
6694 stage,
6695 nslcmop_id,
6696 )
aktas13251562021-02-12 22:19:10 +03006697 tasks_dict_info.clear()
6698 if error_list:
6699 raise LcmException("; ".join(error_list))
6700
6701 db_vca_and_config_update = {
6702 "_admin.deployed.VCA": vca_update,
garciadeblas5697b8b2021-03-24 09:17:02 +01006703 "configurationStatus": config_update,
aktas13251562021-02-12 22:19:10 +03006704 }
garciadeblas5697b8b2021-03-24 09:17:02 +01006705 self.update_db_2(
6706 "nsrs", db_nsr["_id"], db_vca_and_config_update
6707 )
aktas13251562021-02-12 22:19:10 +03006708 scale_process = None
6709 # SCALE-IN VCA - END
6710
kuuseac3a8882019-10-03 10:48:06 +02006711 # SCALE RO - BEGIN
aktas5f75f102021-03-15 11:26:10 +03006712 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
tierno9ab95942018-10-10 16:44:22 +02006713 scale_process = "RO"
tierno2357f4e2020-10-19 16:38:59 +00006714 if self.ro_config.get("ng"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006715 await self._scale_ng_ro(
aktas5f75f102021-03-15 11:26:10 +03006716 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
garciadeblas5697b8b2021-03-24 09:17:02 +01006717 )
aktas5f75f102021-03-15 11:26:10 +03006718 scaling_info.pop("vdu-create", None)
6719 scaling_info.pop("vdu-delete", None)
tierno59d22d22018-09-25 18:10:19 +02006720
tierno9ab95942018-10-10 16:44:22 +02006721 scale_process = None
aktas13251562021-02-12 22:19:10 +03006722 # SCALE RO - END
6723
aktas5f75f102021-03-15 11:26:10 +03006724 # SCALE KDU - BEGIN
6725 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6726 scale_process = "KDU"
6727 await self._scale_kdu(
6728 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6729 )
6730 scaling_info.pop("kdu-create", None)
6731 scaling_info.pop("kdu-delete", None)
6732
6733 scale_process = None
6734 # SCALE KDU - END
6735
6736 if db_nsr_update:
6737 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6738
aktas13251562021-02-12 22:19:10 +03006739 # SCALE-UP VCA - BEGIN
aktas5f75f102021-03-15 11:26:10 +03006740 if vca_scaling_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01006741 step = db_nslcmop_update[
6742 "detailed-status"
6743 ] = "Creating new execution environments"
aktas13251562021-02-12 22:19:10 +03006744 scale_process = "VCA"
aktas5f75f102021-03-15 11:26:10 +03006745 for vca_info in vca_scaling_info:
Guillermo Calvinoa0c6baf2022-02-02 19:04:50 +01006746 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
aktas5f75f102021-03-15 11:26:10 +03006747 member_vnf_index = str(vca_info["member-vnf-index"])
garciadeblas5697b8b2021-03-24 09:17:02 +01006748 self.logger.debug(
aktas5f75f102021-03-15 11:26:10 +03006749 logging_text + "vdu info: {}".format(vca_info)
garciadeblas5697b8b2021-03-24 09:17:02 +01006750 )
aktas13251562021-02-12 22:19:10 +03006751 vnfd_id = db_vnfr["vnfd-ref"]
aktas5f75f102021-03-15 11:26:10 +03006752 if vca_info.get("osm_vdu_id"):
6753 vdu_index = int(vca_info["vdu_index"])
6754 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6755 if db_vnfr.get("additionalParamsForVnf"):
6756 deploy_params.update(
6757 parse_yaml_strings(
6758 db_vnfr["additionalParamsForVnf"].copy()
6759 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006760 )
aktas5f75f102021-03-15 11:26:10 +03006761 descriptor_config = get_configuration(
6762 db_vnfd, db_vnfd["id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01006763 )
aktas5f75f102021-03-15 11:26:10 +03006764 if descriptor_config:
6765 vdu_id = None
6766 vdu_name = None
6767 kdu_name = None
6768 self._deploy_n2vc(
6769 logging_text=logging_text
6770 + "member_vnf_index={} ".format(member_vnf_index),
6771 db_nsr=db_nsr,
6772 db_vnfr=db_vnfr,
6773 nslcmop_id=nslcmop_id,
6774 nsr_id=nsr_id,
6775 nsi_id=nsi_id,
6776 vnfd_id=vnfd_id,
6777 vdu_id=vdu_id,
6778 kdu_name=kdu_name,
6779 member_vnf_index=member_vnf_index,
6780 vdu_index=vdu_index,
6781 vdu_name=vdu_name,
6782 deploy_params=deploy_params,
6783 descriptor_config=descriptor_config,
6784 base_folder=base_folder,
6785 task_instantiation_info=tasks_dict_info,
6786 stage=stage,
6787 )
6788 vdu_id = vca_info["osm_vdu_id"]
6789 vdur = find_in_list(
6790 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
aktas13251562021-02-12 22:19:10 +03006791 )
aktas5f75f102021-03-15 11:26:10 +03006792 descriptor_config = get_configuration(db_vnfd, vdu_id)
6793 if vdur.get("additionalParams"):
6794 deploy_params_vdu = parse_yaml_strings(
6795 vdur["additionalParams"]
6796 )
6797 else:
6798 deploy_params_vdu = deploy_params
6799 deploy_params_vdu["OSM"] = get_osm_params(
6800 db_vnfr, vdu_id, vdu_count_index=vdu_index
garciadeblas5697b8b2021-03-24 09:17:02 +01006801 )
aktas5f75f102021-03-15 11:26:10 +03006802 if descriptor_config:
6803 vdu_name = None
6804 kdu_name = None
6805 stage[
6806 1
6807 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
garciadeblas5697b8b2021-03-24 09:17:02 +01006808 member_vnf_index, vdu_id, vdu_index
aktas5f75f102021-03-15 11:26:10 +03006809 )
6810 stage[2] = step = "Scaling out VCA"
6811 self._write_op_status(op_id=nslcmop_id, stage=stage)
6812 self._deploy_n2vc(
6813 logging_text=logging_text
6814 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6815 member_vnf_index, vdu_id, vdu_index
6816 ),
6817 db_nsr=db_nsr,
6818 db_vnfr=db_vnfr,
6819 nslcmop_id=nslcmop_id,
6820 nsr_id=nsr_id,
6821 nsi_id=nsi_id,
6822 vnfd_id=vnfd_id,
6823 vdu_id=vdu_id,
6824 kdu_name=kdu_name,
6825 member_vnf_index=member_vnf_index,
6826 vdu_index=vdu_index,
6827 vdu_name=vdu_name,
6828 deploy_params=deploy_params_vdu,
6829 descriptor_config=descriptor_config,
6830 base_folder=base_folder,
6831 task_instantiation_info=tasks_dict_info,
6832 stage=stage,
6833 )
aktas13251562021-02-12 22:19:10 +03006834 # SCALE-UP VCA - END
6835 scale_process = None
tierno59d22d22018-09-25 18:10:19 +02006836
kuuseac3a8882019-10-03 10:48:06 +02006837 # POST-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02006838 # execute primitive service POST-SCALING
6839 step = "Executing post-scale vnf-config-primitive"
6840 if scaling_descriptor.get("scaling-config-action"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006841 for scaling_config_action in scaling_descriptor[
6842 "scaling-config-action"
6843 ]:
6844 if (
6845 scaling_config_action.get("trigger") == "post-scale-in"
6846 and scaling_type == "SCALE_IN"
6847 ) or (
6848 scaling_config_action.get("trigger") == "post-scale-out"
6849 and scaling_type == "SCALE_OUT"
6850 ):
6851 vnf_config_primitive = scaling_config_action[
6852 "vnf-config-primitive-name-ref"
6853 ]
6854 step = db_nslcmop_update[
6855 "detailed-status"
6856 ] = "executing post-scale scaling-config-action '{}'".format(
6857 vnf_config_primitive
6858 )
tiernoda964822019-01-14 15:53:47 +00006859
aktas5f75f102021-03-15 11:26:10 +03006860 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
tiernoda964822019-01-14 15:53:47 +00006861 if db_vnfr.get("additionalParamsForVnf"):
6862 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6863
tierno59d22d22018-09-25 18:10:19 +02006864 # look for primitive
bravof9a256db2021-02-22 18:02:07 -03006865 for config_primitive in (
6866 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6867 ).get("config-primitive", ()):
tierno59d22d22018-09-25 18:10:19 +02006868 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02006869 break
6870 else:
tiernoa278b842020-07-08 15:33:55 +00006871 raise LcmException(
6872 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6873 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
garciadeblas5697b8b2021-03-24 09:17:02 +01006874 "config-primitive".format(
6875 scaling_group, vnf_config_primitive
6876 )
6877 )
tierno9ab95942018-10-10 16:44:22 +02006878 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02006879 db_nsr_update["config-status"] = "configuring post-scaling"
garciadeblas5697b8b2021-03-24 09:17:02 +01006880 primitive_params = self._map_primitive_params(
6881 config_primitive, {}, vnfr_params
6882 )
tiernod6de1992018-10-11 13:05:52 +02006883
tierno7c4e24c2020-05-13 08:41:35 +00006884 # Post-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02006885 op_index = self._check_or_add_scale_suboperation(
garciadeblas5697b8b2021-03-24 09:17:02 +01006886 db_nslcmop,
garciadeblas5697b8b2021-03-24 09:17:02 +01006887 vnf_index,
6888 vnf_config_primitive,
6889 primitive_params,
6890 "POST-SCALE",
6891 )
quilesj4cda56b2019-12-05 10:02:20 +00006892 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02006893 # Skip sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006894 result = "COMPLETED"
6895 result_detail = "Done"
6896 self.logger.debug(
6897 logging_text
6898 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6899 vnf_config_primitive, result, result_detail
6900 )
6901 )
kuuseac3a8882019-10-03 10:48:06 +02006902 else:
quilesj4cda56b2019-12-05 10:02:20 +00006903 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02006904 # New sub-operation: Get index of this sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006905 op_index = (
6906 len(db_nslcmop.get("_admin", {}).get("operations"))
6907 - 1
6908 )
6909 self.logger.debug(
6910 logging_text
6911 + "vnf_config_primitive={} New sub-operation".format(
6912 vnf_config_primitive
6913 )
6914 )
kuuseac3a8882019-10-03 10:48:06 +02006915 else:
tierno7c4e24c2020-05-13 08:41:35 +00006916 # retry: Get registered params for this existing sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006917 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6918 op_index
6919 ]
6920 vnf_index = op.get("member_vnf_index")
6921 vnf_config_primitive = op.get("primitive")
6922 primitive_params = op.get("primitive_params")
6923 self.logger.debug(
6924 logging_text
6925 + "vnf_config_primitive={} Sub-operation retry".format(
6926 vnf_config_primitive
6927 )
6928 )
tierno588547c2020-07-01 15:30:20 +00006929 # Execute the primitive, either with new (first-time) or registered (reintent) args
garciadeblas5697b8b2021-03-24 09:17:02 +01006930 ee_descriptor_id = config_primitive.get(
6931 "execution-environment-ref"
6932 )
6933 primitive_name = config_primitive.get(
6934 "execution-environment-primitive", vnf_config_primitive
6935 )
6936 ee_id, vca_type = self._look_for_deployed_vca(
6937 nsr_deployed["VCA"],
6938 member_vnf_index=vnf_index,
6939 vdu_id=None,
6940 vdu_count_index=None,
6941 ee_descriptor_id=ee_descriptor_id,
6942 )
kuuseac3a8882019-10-03 10:48:06 +02006943 result, result_detail = await self._ns_execute_primitive(
David Garciac1fe90a2021-03-31 19:12:02 +02006944 ee_id,
6945 primitive_name,
6946 primitive_params,
6947 vca_type=vca_type,
6948 vca_id=vca_id,
6949 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006950 self.logger.debug(
6951 logging_text
6952 + "vnf_config_primitive={} Done with result {} {}".format(
6953 vnf_config_primitive, result, result_detail
6954 )
6955 )
kuuseac3a8882019-10-03 10:48:06 +02006956 # Update operationState = COMPLETED | FAILED
6957 self._update_suboperation_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01006958 db_nslcmop, op_index, result, result_detail
6959 )
kuuseac3a8882019-10-03 10:48:06 +02006960
tierno59d22d22018-09-25 18:10:19 +02006961 if result == "FAILED":
6962 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02006963 db_nsr_update["config-status"] = old_config_status
6964 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02006965 # POST-SCALE END
tierno59d22d22018-09-25 18:10:19 +02006966
garciadeblas5697b8b2021-03-24 09:17:02 +01006967 db_nsr_update[
6968 "detailed-status"
6969 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6970 db_nsr_update["operational-status"] = (
6971 "running"
6972 if old_operational_status == "failed"
ikalyvas02d9e7b2019-05-27 18:16:01 +03006973 else old_operational_status
garciadeblas5697b8b2021-03-24 09:17:02 +01006974 )
tiernod6de1992018-10-11 13:05:52 +02006975 db_nsr_update["config-status"] = old_config_status
tierno59d22d22018-09-25 18:10:19 +02006976 return
garciadeblas5697b8b2021-03-24 09:17:02 +01006977 except (
6978 ROclient.ROClientException,
6979 DbException,
6980 LcmException,
6981 NgRoException,
6982 ) as e:
tierno59d22d22018-09-25 18:10:19 +02006983 self.logger.error(logging_text + "Exit Exception {}".format(e))
6984 exc = e
6985 except asyncio.CancelledError:
garciadeblas5697b8b2021-03-24 09:17:02 +01006986 self.logger.error(
6987 logging_text + "Cancelled Exception while '{}'".format(step)
6988 )
tierno59d22d22018-09-25 18:10:19 +02006989 exc = "Operation was cancelled"
6990 except Exception as e:
6991 exc = traceback.format_exc()
garciadeblas5697b8b2021-03-24 09:17:02 +01006992 self.logger.critical(
6993 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6994 exc_info=True,
6995 )
tierno59d22d22018-09-25 18:10:19 +02006996 finally:
garciadeblas5697b8b2021-03-24 09:17:02 +01006997 self._write_ns_status(
6998 nsr_id=nsr_id,
6999 ns_state=None,
7000 current_operation="IDLE",
7001 current_operation_id=None,
7002 )
aktas13251562021-02-12 22:19:10 +03007003 if tasks_dict_info:
7004 stage[1] = "Waiting for instantiate pending tasks."
7005 self.logger.debug(logging_text + stage[1])
garciadeblas5697b8b2021-03-24 09:17:02 +01007006 exc = await self._wait_for_tasks(
7007 logging_text,
7008 tasks_dict_info,
7009 self.timeout_ns_deploy,
7010 stage,
7011 nslcmop_id,
7012 nsr_id=nsr_id,
7013 )
tierno59d22d22018-09-25 18:10:19 +02007014 if exc:
garciadeblas5697b8b2021-03-24 09:17:02 +01007015 db_nslcmop_update[
7016 "detailed-status"
7017 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
tiernoa17d4f42020-04-28 09:59:23 +00007018 nslcmop_operation_state = "FAILED"
tierno59d22d22018-09-25 18:10:19 +02007019 if db_nsr:
tiernod6de1992018-10-11 13:05:52 +02007020 db_nsr_update["operational-status"] = old_operational_status
7021 db_nsr_update["config-status"] = old_config_status
7022 db_nsr_update["detailed-status"] = ""
7023 if scale_process:
7024 if "VCA" in scale_process:
7025 db_nsr_update["config-status"] = "failed"
7026 if "RO" in scale_process:
7027 db_nsr_update["operational-status"] = "failed"
garciadeblas5697b8b2021-03-24 09:17:02 +01007028 db_nsr_update[
7029 "detailed-status"
7030 ] = "FAILED scaling nslcmop={} {}: {}".format(
7031 nslcmop_id, step, exc
7032 )
tiernoa17d4f42020-04-28 09:59:23 +00007033 else:
7034 error_description_nslcmop = None
7035 nslcmop_operation_state = "COMPLETED"
7036 db_nslcmop_update["detailed-status"] = "Done"
quilesj4cda56b2019-12-05 10:02:20 +00007037
garciadeblas5697b8b2021-03-24 09:17:02 +01007038 self._write_op_status(
7039 op_id=nslcmop_id,
7040 stage="",
7041 error_message=error_description_nslcmop,
7042 operation_state=nslcmop_operation_state,
7043 other_update=db_nslcmop_update,
7044 )
tiernoa17d4f42020-04-28 09:59:23 +00007045 if db_nsr:
garciadeblas5697b8b2021-03-24 09:17:02 +01007046 self._write_ns_status(
7047 nsr_id=nsr_id,
7048 ns_state=None,
7049 current_operation="IDLE",
7050 current_operation_id=None,
7051 other_update=db_nsr_update,
7052 )
tiernoa17d4f42020-04-28 09:59:23 +00007053
tierno59d22d22018-09-25 18:10:19 +02007054 if nslcmop_operation_state:
7055 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01007056 msg = {
7057 "nsr_id": nsr_id,
7058 "nslcmop_id": nslcmop_id,
7059 "operationState": nslcmop_operation_state,
7060 }
bravof922c4172020-11-24 21:21:43 -03007061 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02007062 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01007063 self.logger.error(
7064 logging_text + "kafka_write notification Exception {}".format(e)
7065 )
tierno59d22d22018-09-25 18:10:19 +02007066 self.logger.debug(logging_text + "Exit")
7067 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
tiernob996d942020-07-03 14:52:28 +00007068
aktas5f75f102021-03-15 11:26:10 +03007069 async def _scale_kdu(
7070 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7071 ):
7072 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7073 for kdu_name in _scaling_info:
7074 for kdu_scaling_info in _scaling_info[kdu_name]:
7075 deployed_kdu, index = get_deployed_kdu(
7076 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7077 )
7078 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7079 kdu_instance = deployed_kdu["kdu-instance"]
aktasc41fe832021-11-29 18:41:42 +03007080 kdu_model = deployed_kdu.get("kdu-model")
aktas5f75f102021-03-15 11:26:10 +03007081 scale = int(kdu_scaling_info["scale"])
7082 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7083
7084 db_dict = {
7085 "collection": "nsrs",
7086 "filter": {"_id": nsr_id},
7087 "path": "_admin.deployed.K8s.{}".format(index),
7088 }
7089
7090 step = "scaling application {}".format(
7091 kdu_scaling_info["resource-name"]
7092 )
7093 self.logger.debug(logging_text + step)
7094
7095 if kdu_scaling_info["type"] == "delete":
7096 kdu_config = get_configuration(db_vnfd, kdu_name)
7097 if (
7098 kdu_config
7099 and kdu_config.get("terminate-config-primitive")
7100 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7101 ):
7102 terminate_config_primitive_list = kdu_config.get(
7103 "terminate-config-primitive"
7104 )
7105 terminate_config_primitive_list.sort(
7106 key=lambda val: int(val["seq"])
7107 )
7108
7109 for (
7110 terminate_config_primitive
7111 ) in terminate_config_primitive_list:
7112 primitive_params_ = self._map_primitive_params(
7113 terminate_config_primitive, {}, {}
7114 )
7115 step = "execute terminate config primitive"
7116 self.logger.debug(logging_text + step)
7117 await asyncio.wait_for(
7118 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7119 cluster_uuid=cluster_uuid,
7120 kdu_instance=kdu_instance,
7121 primitive_name=terminate_config_primitive["name"],
7122 params=primitive_params_,
7123 db_dict=db_dict,
7124 vca_id=vca_id,
7125 ),
7126 timeout=600,
7127 )
7128
7129 await asyncio.wait_for(
7130 self.k8scluster_map[k8s_cluster_type].scale(
7131 kdu_instance,
7132 scale,
7133 kdu_scaling_info["resource-name"],
7134 vca_id=vca_id,
aktasc41fe832021-11-29 18:41:42 +03007135 cluster_uuid=cluster_uuid,
7136 kdu_model=kdu_model,
7137 atomic=True,
7138 db_dict=db_dict,
aktas5f75f102021-03-15 11:26:10 +03007139 ),
7140 timeout=self.timeout_vca_on_error,
7141 )
7142
7143 if kdu_scaling_info["type"] == "create":
7144 kdu_config = get_configuration(db_vnfd, kdu_name)
7145 if (
7146 kdu_config
7147 and kdu_config.get("initial-config-primitive")
7148 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7149 ):
7150 initial_config_primitive_list = kdu_config.get(
7151 "initial-config-primitive"
7152 )
7153 initial_config_primitive_list.sort(
7154 key=lambda val: int(val["seq"])
7155 )
7156
7157 for initial_config_primitive in initial_config_primitive_list:
7158 primitive_params_ = self._map_primitive_params(
7159 initial_config_primitive, {}, {}
7160 )
7161 step = "execute initial config primitive"
7162 self.logger.debug(logging_text + step)
7163 await asyncio.wait_for(
7164 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7165 cluster_uuid=cluster_uuid,
7166 kdu_instance=kdu_instance,
7167 primitive_name=initial_config_primitive["name"],
7168 params=primitive_params_,
7169 db_dict=db_dict,
7170 vca_id=vca_id,
7171 ),
7172 timeout=600,
7173 )
7174
garciadeblas5697b8b2021-03-24 09:17:02 +01007175 async def _scale_ng_ro(
7176 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7177 ):
tierno2357f4e2020-10-19 16:38:59 +00007178 nsr_id = db_nslcmop["nsInstanceId"]
7179 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7180 db_vnfrs = {}
7181
7182 # read from db: vnfd's for every vnf
bravof832f8992020-12-07 12:57:31 -03007183 db_vnfds = []
tierno2357f4e2020-10-19 16:38:59 +00007184
7185 # for each vnf in ns, read vnfd
7186 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7187 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7188 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
tierno2357f4e2020-10-19 16:38:59 +00007189 # if we haven't this vnfd, read it from db
bravof832f8992020-12-07 12:57:31 -03007190 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
tierno2357f4e2020-10-19 16:38:59 +00007191 # read from db
7192 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
bravof832f8992020-12-07 12:57:31 -03007193 db_vnfds.append(vnfd)
tierno2357f4e2020-10-19 16:38:59 +00007194 n2vc_key = self.n2vc.get_public_key()
7195 n2vc_key_list = [n2vc_key]
garciadeblas5697b8b2021-03-24 09:17:02 +01007196 self.scale_vnfr(
7197 db_vnfr,
7198 vdu_scaling_info.get("vdu-create"),
7199 vdu_scaling_info.get("vdu-delete"),
7200 mark_delete=True,
7201 )
tierno2357f4e2020-10-19 16:38:59 +00007202 # db_vnfr has been updated, update db_vnfrs to use it
7203 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
garciadeblas5697b8b2021-03-24 09:17:02 +01007204 await self._instantiate_ng_ro(
7205 logging_text,
7206 nsr_id,
7207 db_nsd,
7208 db_nsr,
7209 db_nslcmop,
7210 db_vnfrs,
7211 db_vnfds,
7212 n2vc_key_list,
7213 stage=stage,
7214 start_deploy=time(),
7215 timeout_ns_deploy=self.timeout_ns_deploy,
7216 )
tierno2357f4e2020-10-19 16:38:59 +00007217 if vdu_scaling_info.get("vdu-delete"):
garciadeblas5697b8b2021-03-24 09:17:02 +01007218 self.scale_vnfr(
7219 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7220 )
tierno2357f4e2020-10-19 16:38:59 +00007221
bravof73bac502021-05-11 07:38:47 -04007222 async def extract_prometheus_scrape_jobs(
aticig15db6142022-01-24 12:51:26 +03007223 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
garciadeblas5697b8b2021-03-24 09:17:02 +01007224 ):
tiernob996d942020-07-03 14:52:28 +00007225 # look if exist a file called 'prometheus*.j2' and
7226 artifact_content = self.fs.dir_ls(artifact_path)
garciadeblas5697b8b2021-03-24 09:17:02 +01007227 job_file = next(
7228 (
7229 f
7230 for f in artifact_content
7231 if f.startswith("prometheus") and f.endswith(".j2")
7232 ),
7233 None,
7234 )
tiernob996d942020-07-03 14:52:28 +00007235 if not job_file:
7236 return
7237 with self.fs.file_open((artifact_path, job_file), "r") as f:
7238 job_data = f.read()
7239
7240 # TODO get_service
garciadeblas5697b8b2021-03-24 09:17:02 +01007241 _, _, service = ee_id.partition(".") # remove prefix "namespace."
tiernob996d942020-07-03 14:52:28 +00007242 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7243 host_port = "80"
7244 vnfr_id = vnfr_id.replace("-", "")
7245 variables = {
7246 "JOB_NAME": vnfr_id,
7247 "TARGET_IP": target_ip,
7248 "EXPORTER_POD_IP": host_name,
7249 "EXPORTER_POD_PORT": host_port,
7250 }
bravof73bac502021-05-11 07:38:47 -04007251 job_list = parse_job(job_data, variables)
tiernob996d942020-07-03 14:52:28 +00007252 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7253 for job in job_list:
garciadeblas5697b8b2021-03-24 09:17:02 +01007254 if (
7255 not isinstance(job.get("job_name"), str)
7256 or vnfr_id not in job["job_name"]
7257 ):
tiernob996d942020-07-03 14:52:28 +00007258 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7259 job["nsr_id"] = nsr_id
bravof73bac502021-05-11 07:38:47 -04007260 job["vnfr_id"] = vnfr_id
7261 return job_list
David Garciaaae391f2020-11-09 11:12:54 +01007262
k4.rahulb827de92022-05-02 16:35:02 +00007263 async def rebuild_start_stop(self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type):
7264 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7265 self.logger.info(logging_text + "Enter")
7266 stage = ["Preparing the environment", ""]
7267 # database nsrs record
7268 db_nsr_update = {}
7269 vdu_vim_name = None
7270 vim_vm_id = None
7271 # in case of error, indicates what part of scale was failed to put nsr at error status
7272 start_deploy = time()
7273 try:
7274 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7275 vim_account_id = db_vnfr.get("vim-account-id")
7276 vim_info_key = "vim:" + vim_account_id
7277 vdur = find_in_list(
7278 db_vnfr["vdur"], lambda vdu: vdu["count-index"] == additional_param["count-index"]
7279 )
7280 if vdur:
7281 vdu_vim_name = vdur["name"]
7282 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7283 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7284 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7285 # wait for any previous tasks in process
7286 stage[1] = "Waiting for previous operations to terminate"
7287 self.logger.info(stage[1])
7288 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
7289
7290 stage[1] = "Reading from database."
7291 self.logger.info(stage[1])
7292 self._write_ns_status(
7293 nsr_id=nsr_id,
7294 ns_state=None,
7295 current_operation=operation_type.upper(),
7296 current_operation_id=nslcmop_id
7297 )
7298 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7299
7300 # read from db: ns
7301 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7302 db_nsr_update["operational-status"] = operation_type
7303 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7304 # Payload for RO
7305 desc = {
7306 operation_type: {
7307 "vim_vm_id": vim_vm_id,
7308 "vnf_id": vnf_id,
7309 "vdu_index": additional_param["count-index"],
7310 "vdu_id": vdur["id"],
7311 "target_vim": target_vim,
7312 "vim_account_id": vim_account_id
7313 }
7314 }
7315 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7316 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7317 self.logger.info("ro nsr id: {}".format(nsr_id))
7318 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7319 self.logger.info("response from RO: {}".format(result_dict))
7320 action_id = result_dict["action_id"]
7321 await self._wait_ng_ro(
7322 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_operate
7323 )
7324 return "COMPLETED", "Done"
7325 except (ROclient.ROClientException, DbException, LcmException) as e:
7326 self.logger.error("Exit Exception {}".format(e))
7327 exc = e
7328 except asyncio.CancelledError:
7329 self.logger.error("Cancelled Exception while '{}'".format(stage))
7330 exc = "Operation was cancelled"
7331 except Exception as e:
7332 exc = traceback.format_exc()
7333 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
7334 return "FAILED", "Error in operate VNF {}".format(exc)
7335
David Garciaaae391f2020-11-09 11:12:54 +01007336 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7337 """
7338 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7339
7340 :param: vim_account_id: VIM Account ID
7341
7342 :return: (cloud_name, cloud_credential)
7343 """
bravof922c4172020-11-24 21:21:43 -03007344 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
David Garciaaae391f2020-11-09 11:12:54 +01007345 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7346
7347 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7348 """
7349 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7350
7351 :param: vim_account_id: VIM Account ID
7352
7353 :return: (cloud_name, cloud_credential)
7354 """
bravof922c4172020-11-24 21:21:43 -03007355 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
David Garciaaae391f2020-11-09 11:12:54 +01007356 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
elumalai80bcf1c2022-04-28 18:05:01 +05307357
7358 async def migrate(self, nsr_id, nslcmop_id):
7359 """
7360 Migrate VNFs and VDUs instances in a NS
7361
7362 :param: nsr_id: NS Instance ID
7363 :param: nslcmop_id: nslcmop ID of migrate
7364
7365 """
7366 # Try to lock HA task here
7367 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7368 if not task_is_locked_by_me:
7369 return
7370 logging_text = "Task ns={} migrate ".format(nsr_id)
7371 self.logger.debug(logging_text + "Enter")
7372 # get all needed from database
7373 db_nslcmop = None
7374 db_nslcmop_update = {}
7375 nslcmop_operation_state = None
7376 db_nsr_update = {}
7377 target = {}
7378 exc = None
7379 # in case of error, indicates what part of scale was failed to put nsr at error status
7380 start_deploy = time()
7381
7382 try:
7383 # wait for any previous tasks in process
7384 step = "Waiting for previous operations to terminate"
aticig349aa462022-05-19 12:29:35 +03007385 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
elumalai80bcf1c2022-04-28 18:05:01 +05307386
7387 self._write_ns_status(
7388 nsr_id=nsr_id,
7389 ns_state=None,
7390 current_operation="MIGRATING",
aticig349aa462022-05-19 12:29:35 +03007391 current_operation_id=nslcmop_id,
elumalai80bcf1c2022-04-28 18:05:01 +05307392 )
7393 step = "Getting nslcmop from database"
aticig349aa462022-05-19 12:29:35 +03007394 self.logger.debug(
7395 step + " after having waited for previous tasks to be completed"
7396 )
elumalai80bcf1c2022-04-28 18:05:01 +05307397 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7398 migrate_params = db_nslcmop.get("operationParams")
7399
7400 target = {}
7401 target.update(migrate_params)
7402 desc = await self.RO.migrate(nsr_id, target)
7403 self.logger.debug("RO return > {}".format(desc))
7404 action_id = desc["action_id"]
7405 await self._wait_ng_ro(
garciadeblas07f4e4c2022-06-09 09:42:58 +02007406 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_migrate,
7407 operation="migrate"
elumalai80bcf1c2022-04-28 18:05:01 +05307408 )
7409 except (ROclient.ROClientException, DbException, LcmException) as e:
7410 self.logger.error("Exit Exception {}".format(e))
7411 exc = e
7412 except asyncio.CancelledError:
7413 self.logger.error("Cancelled Exception while '{}'".format(step))
7414 exc = "Operation was cancelled"
7415 except Exception as e:
7416 exc = traceback.format_exc()
aticig349aa462022-05-19 12:29:35 +03007417 self.logger.critical(
7418 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7419 )
elumalai80bcf1c2022-04-28 18:05:01 +05307420 finally:
7421 self._write_ns_status(
7422 nsr_id=nsr_id,
7423 ns_state=None,
7424 current_operation="IDLE",
7425 current_operation_id=None,
7426 )
7427 if exc:
aticig349aa462022-05-19 12:29:35 +03007428 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
elumalai80bcf1c2022-04-28 18:05:01 +05307429 nslcmop_operation_state = "FAILED"
7430 else:
7431 nslcmop_operation_state = "COMPLETED"
7432 db_nslcmop_update["detailed-status"] = "Done"
7433 db_nsr_update["detailed-status"] = "Done"
7434
7435 self._write_op_status(
7436 op_id=nslcmop_id,
7437 stage="",
7438 error_message="",
7439 operation_state=nslcmop_operation_state,
7440 other_update=db_nslcmop_update,
7441 )
7442 if nslcmop_operation_state:
7443 try:
7444 msg = {
7445 "nsr_id": nsr_id,
7446 "nslcmop_id": nslcmop_id,
7447 "operationState": nslcmop_operation_state,
7448 }
7449 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7450 except Exception as e:
7451 self.logger.error(
7452 logging_text + "kafka_write notification Exception {}".format(e)
7453 )
7454 self.logger.debug(logging_text + "Exit")
7455 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
garciadeblas07f4e4c2022-06-09 09:42:58 +02007456
7457
7458 async def heal(self, nsr_id, nslcmop_id):
7459 """
7460 Heal NS
7461
7462 :param nsr_id: ns instance to heal
7463 :param nslcmop_id: operation to run
7464 :return:
7465 """
7466
7467 # Try to lock HA task here
7468 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7469 if not task_is_locked_by_me:
7470 return
7471
7472 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7473 stage = ["", "", ""]
7474 tasks_dict_info = {}
7475 # ^ stage, step, VIM progress
7476 self.logger.debug(logging_text + "Enter")
7477 # get all needed from database
7478 db_nsr = None
7479 db_nslcmop_update = {}
7480 db_nsr_update = {}
7481 db_vnfrs = {} # vnf's info indexed by _id
7482 exc = None
7483 old_operational_status = ""
7484 old_config_status = ""
7485 nsi_id = None
7486 try:
7487 # wait for any previous tasks in process
7488 step = "Waiting for previous operations to terminate"
7489 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7490 self._write_ns_status(
7491 nsr_id=nsr_id,
7492 ns_state=None,
7493 current_operation="HEALING",
7494 current_operation_id=nslcmop_id,
7495 )
7496
7497 step = "Getting nslcmop from database"
7498 self.logger.debug(
7499 step + " after having waited for previous tasks to be completed"
7500 )
7501 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7502
7503 step = "Getting nsr from database"
7504 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7505 old_operational_status = db_nsr["operational-status"]
7506 old_config_status = db_nsr["config-status"]
7507
7508 db_nsr_update = {
7509 "_admin.deployed.RO.operational-status": "healing",
7510 }
7511 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7512
7513 step = "Sending heal order to VIM"
7514 task_ro = asyncio.ensure_future(
7515 self.heal_RO(
7516 logging_text=logging_text,
7517 nsr_id=nsr_id,
7518 db_nslcmop=db_nslcmop,
7519 stage=stage,
7520 )
7521 )
7522 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
7523 tasks_dict_info[task_ro] = "Healing at VIM"
7524
7525 # VCA tasks
7526 # read from db: nsd
7527 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7528 self.logger.debug(logging_text + stage[1])
7529 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7530 self.fs.sync(db_nsr["nsd-id"])
7531 db_nsr["nsd"] = nsd
7532 # read from db: vnfr's of this ns
7533 step = "Getting vnfrs from db"
7534 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7535 for vnfr in db_vnfrs_list:
7536 db_vnfrs[vnfr["_id"]] = vnfr
7537 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7538
7539 # Check for each target VNF
7540 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7541 for target_vnf in target_list:
7542 # Find this VNF in the list from DB
7543 vnfr_id = target_vnf.get("vnfInstanceId", None)
7544 if vnfr_id:
7545 db_vnfr = db_vnfrs[vnfr_id]
7546 vnfd_id = db_vnfr.get("vnfd-id")
7547 vnfd_ref = db_vnfr.get("vnfd-ref")
7548 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7549 base_folder = vnfd["_admin"]["storage"]
7550 vdu_id = None
7551 vdu_index = 0
7552 vdu_name = None
7553 kdu_name = None
7554 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7555 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7556
7557 # Check each target VDU and deploy N2VC
7558 for target_vdu in target_vnf["additionalParams"].get("vdu", None):
7559 deploy_params_vdu = target_vdu
7560 # Set run-day1 vnf level value if not vdu level value exists
7561 if not deploy_params_vdu.get("run-day1") and target_vnf["additionalParams"].get("run-day1"):
7562 deploy_params_vdu["run-day1"] = target_vnf["additionalParams"].get("run-day1")
7563 vdu_name = target_vdu.get("vdu-id", None)
7564 # TODO: Get vdu_id from vdud.
7565 vdu_id = vdu_name
7566 # For multi instance VDU count-index is mandatory
7567 # For single session VDU count-indes is 0
7568 vdu_index = target_vdu.get("count-index",0)
7569
7570 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7571 stage[1] = "Deploying Execution Environments."
7572 self.logger.debug(logging_text + stage[1])
7573
7574 # VNF Level charm. Normal case when proxy charms.
7575 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7576 descriptor_config = get_configuration(vnfd, vnfd_ref)
7577 if descriptor_config:
7578 # Continue if healed machine is management machine
7579 vnf_ip_address = db_vnfr.get("ip-address")
7580 target_instance = None
7581 for instance in db_vnfr.get("vdur", None):
7582 if ( instance["vdu-name"] == vdu_name and instance["count-index"] == vdu_index ):
7583 target_instance = instance
7584 break
7585 if vnf_ip_address == target_instance.get("ip-address"):
7586 self._heal_n2vc(
7587 logging_text=logging_text
7588 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7589 member_vnf_index, vdu_name, vdu_index
7590 ),
7591 db_nsr=db_nsr,
7592 db_vnfr=db_vnfr,
7593 nslcmop_id=nslcmop_id,
7594 nsr_id=nsr_id,
7595 nsi_id=nsi_id,
7596 vnfd_id=vnfd_ref,
7597 vdu_id=None,
7598 kdu_name=None,
7599 member_vnf_index=member_vnf_index,
7600 vdu_index=0,
7601 vdu_name=None,
7602 deploy_params=deploy_params_vdu,
7603 descriptor_config=descriptor_config,
7604 base_folder=base_folder,
7605 task_instantiation_info=tasks_dict_info,
7606 stage=stage,
7607 )
7608
7609 # VDU Level charm. Normal case with native charms.
7610 descriptor_config = get_configuration(vnfd, vdu_name)
7611 if descriptor_config:
7612 self._heal_n2vc(
7613 logging_text=logging_text
7614 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7615 member_vnf_index, vdu_name, vdu_index
7616 ),
7617 db_nsr=db_nsr,
7618 db_vnfr=db_vnfr,
7619 nslcmop_id=nslcmop_id,
7620 nsr_id=nsr_id,
7621 nsi_id=nsi_id,
7622 vnfd_id=vnfd_ref,
7623 vdu_id=vdu_id,
7624 kdu_name=kdu_name,
7625 member_vnf_index=member_vnf_index,
7626 vdu_index=vdu_index,
7627 vdu_name=vdu_name,
7628 deploy_params=deploy_params_vdu,
7629 descriptor_config=descriptor_config,
7630 base_folder=base_folder,
7631 task_instantiation_info=tasks_dict_info,
7632 stage=stage,
7633 )
7634
7635 except (
7636 ROclient.ROClientException,
7637 DbException,
7638 LcmException,
7639 NgRoException,
7640 ) as e:
7641 self.logger.error(logging_text + "Exit Exception {}".format(e))
7642 exc = e
7643 except asyncio.CancelledError:
7644 self.logger.error(
7645 logging_text + "Cancelled Exception while '{}'".format(step)
7646 )
7647 exc = "Operation was cancelled"
7648 except Exception as e:
7649 exc = traceback.format_exc()
7650 self.logger.critical(
7651 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7652 exc_info=True,
7653 )
7654 finally:
7655 if tasks_dict_info:
7656 stage[1] = "Waiting for healing pending tasks."
7657 self.logger.debug(logging_text + stage[1])
7658 exc = await self._wait_for_tasks(
7659 logging_text,
7660 tasks_dict_info,
7661 self.timeout_ns_deploy,
7662 stage,
7663 nslcmop_id,
7664 nsr_id=nsr_id,
7665 )
7666 if exc:
7667 db_nslcmop_update[
7668 "detailed-status"
7669 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7670 nslcmop_operation_state = "FAILED"
7671 if db_nsr:
7672 db_nsr_update["operational-status"] = old_operational_status
7673 db_nsr_update["config-status"] = old_config_status
7674 db_nsr_update[
7675 "detailed-status"
7676 ] = "FAILED healing nslcmop={} {}: {}".format(
7677 nslcmop_id, step, exc
7678 )
7679 for task, task_name in tasks_dict_info.items():
7680 if not task.done() or task.cancelled() or task.exception():
7681 if task_name.startswith(self.task_name_deploy_vca):
7682 # A N2VC task is pending
7683 db_nsr_update["config-status"] = "failed"
7684 else:
7685 # RO task is pending
7686 db_nsr_update["operational-status"] = "failed"
7687 else:
7688 error_description_nslcmop = None
7689 nslcmop_operation_state = "COMPLETED"
7690 db_nslcmop_update["detailed-status"] = "Done"
7691 db_nsr_update["detailed-status"] = "Done"
7692 db_nsr_update["operational-status"] = "running"
7693 db_nsr_update["config-status"] = "configured"
7694
7695 self._write_op_status(
7696 op_id=nslcmop_id,
7697 stage="",
7698 error_message=error_description_nslcmop,
7699 operation_state=nslcmop_operation_state,
7700 other_update=db_nslcmop_update,
7701 )
7702 if db_nsr:
7703 self._write_ns_status(
7704 nsr_id=nsr_id,
7705 ns_state=None,
7706 current_operation="IDLE",
7707 current_operation_id=None,
7708 other_update=db_nsr_update,
7709 )
7710
7711 if nslcmop_operation_state:
7712 try:
7713 msg = {
7714 "nsr_id": nsr_id,
7715 "nslcmop_id": nslcmop_id,
7716 "operationState": nslcmop_operation_state,
7717 }
7718 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7719 except Exception as e:
7720 self.logger.error(
7721 logging_text + "kafka_write notification Exception {}".format(e)
7722 )
7723 self.logger.debug(logging_text + "Exit")
7724 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7725
7726 async def heal_RO(
7727 self,
7728 logging_text,
7729 nsr_id,
7730 db_nslcmop,
7731 stage,
7732 ):
7733 """
7734 Heal at RO
7735 :param logging_text: preffix text to use at logging
7736 :param nsr_id: nsr identity
7737 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7738 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7739 :return: None or exception
7740 """
7741 def get_vim_account(vim_account_id):
7742 nonlocal db_vims
7743 if vim_account_id in db_vims:
7744 return db_vims[vim_account_id]
7745 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7746 db_vims[vim_account_id] = db_vim
7747 return db_vim
7748
7749 try:
7750 start_heal = time()
7751 ns_params = db_nslcmop.get("operationParams")
7752 if ns_params and ns_params.get("timeout_ns_heal"):
7753 timeout_ns_heal = ns_params["timeout_ns_heal"]
7754 else:
7755 timeout_ns_heal = self.timeout.get(
7756 "ns_heal", self.timeout_ns_heal
7757 )
7758
7759 db_vims = {}
7760
7761 nslcmop_id = db_nslcmop["_id"]
7762 target = {
7763 "action_id": nslcmop_id,
7764 }
7765 self.logger.warning("db_nslcmop={} and timeout_ns_heal={}".format(db_nslcmop,timeout_ns_heal))
7766 target.update(db_nslcmop.get("operationParams", {}))
7767
7768 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7769 desc = await self.RO.recreate(nsr_id, target)
7770 self.logger.debug("RO return > {}".format(desc))
7771 action_id = desc["action_id"]
7772 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7773 await self._wait_ng_ro(
7774 nsr_id, action_id, nslcmop_id, start_heal, timeout_ns_heal, stage,
7775 operation="healing"
7776 )
7777
7778 # Updating NSR
7779 db_nsr_update = {
7780 "_admin.deployed.RO.operational-status": "running",
7781 "detailed-status": " ".join(stage),
7782 }
7783 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7784 self._write_op_status(nslcmop_id, stage)
7785 self.logger.debug(
7786 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7787 )
7788
7789 except Exception as e:
7790 stage[2] = "ERROR healing at VIM"
7791 #self.set_vnfr_at_error(db_vnfrs, str(e))
7792 self.logger.error(
7793 "Error healing at VIM {}".format(e),
7794 exc_info=not isinstance(
7795 e,
7796 (
7797 ROclient.ROClientException,
7798 LcmException,
7799 DbException,
7800 NgRoException,
7801 ),
7802 ),
7803 )
7804 raise
7805
7806 def _heal_n2vc(
7807 self,
7808 logging_text,
7809 db_nsr,
7810 db_vnfr,
7811 nslcmop_id,
7812 nsr_id,
7813 nsi_id,
7814 vnfd_id,
7815 vdu_id,
7816 kdu_name,
7817 member_vnf_index,
7818 vdu_index,
7819 vdu_name,
7820 deploy_params,
7821 descriptor_config,
7822 base_folder,
7823 task_instantiation_info,
7824 stage,
7825 ):
7826 # launch instantiate_N2VC in a asyncio task and register task object
7827 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7828 # if not found, create one entry and update database
7829 # fill db_nsr._admin.deployed.VCA.<index>
7830
7831 self.logger.debug(
7832 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7833 )
7834 if "execution-environment-list" in descriptor_config:
7835 ee_list = descriptor_config.get("execution-environment-list", [])
7836 elif "juju" in descriptor_config:
7837 ee_list = [descriptor_config] # ns charms
7838 else: # other types as script are not supported
7839 ee_list = []
7840
7841 for ee_item in ee_list:
7842 self.logger.debug(
7843 logging_text
7844 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7845 ee_item.get("juju"), ee_item.get("helm-chart")
7846 )
7847 )
7848 ee_descriptor_id = ee_item.get("id")
7849 if ee_item.get("juju"):
7850 vca_name = ee_item["juju"].get("charm")
7851 vca_type = (
7852 "lxc_proxy_charm"
7853 if ee_item["juju"].get("charm") is not None
7854 else "native_charm"
7855 )
7856 if ee_item["juju"].get("cloud") == "k8s":
7857 vca_type = "k8s_proxy_charm"
7858 elif ee_item["juju"].get("proxy") is False:
7859 vca_type = "native_charm"
7860 elif ee_item.get("helm-chart"):
7861 vca_name = ee_item["helm-chart"]
7862 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7863 vca_type = "helm"
7864 else:
7865 vca_type = "helm-v3"
7866 else:
7867 self.logger.debug(
7868 logging_text + "skipping non juju neither charm configuration"
7869 )
7870 continue
7871
7872 vca_index = -1
7873 for vca_index, vca_deployed in enumerate(
7874 db_nsr["_admin"]["deployed"]["VCA"]
7875 ):
7876 if not vca_deployed:
7877 continue
7878 if (
7879 vca_deployed.get("member-vnf-index") == member_vnf_index
7880 and vca_deployed.get("vdu_id") == vdu_id
7881 and vca_deployed.get("kdu_name") == kdu_name
7882 and vca_deployed.get("vdu_count_index", 0) == vdu_index
7883 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
7884 ):
7885 break
7886 else:
7887 # not found, create one.
7888 target = (
7889 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
7890 )
7891 if vdu_id:
7892 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
7893 elif kdu_name:
7894 target += "/kdu/{}".format(kdu_name)
7895 vca_deployed = {
7896 "target_element": target,
7897 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
7898 "member-vnf-index": member_vnf_index,
7899 "vdu_id": vdu_id,
7900 "kdu_name": kdu_name,
7901 "vdu_count_index": vdu_index,
7902 "operational-status": "init", # TODO revise
7903 "detailed-status": "", # TODO revise
7904 "step": "initial-deploy", # TODO revise
7905 "vnfd_id": vnfd_id,
7906 "vdu_name": vdu_name,
7907 "type": vca_type,
7908 "ee_descriptor_id": ee_descriptor_id,
7909 }
7910 vca_index += 1
7911
7912 # create VCA and configurationStatus in db
7913 db_dict = {
7914 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
7915 "configurationStatus.{}".format(vca_index): dict(),
7916 }
7917 self.update_db_2("nsrs", nsr_id, db_dict)
7918
7919 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
7920
7921 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
7922 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
7923 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
7924
7925 # Launch task
7926 task_n2vc = asyncio.ensure_future(
7927 self.heal_N2VC(
7928 logging_text=logging_text,
7929 vca_index=vca_index,
7930 nsi_id=nsi_id,
7931 db_nsr=db_nsr,
7932 db_vnfr=db_vnfr,
7933 vdu_id=vdu_id,
7934 kdu_name=kdu_name,
7935 vdu_index=vdu_index,
7936 deploy_params=deploy_params,
7937 config_descriptor=descriptor_config,
7938 base_folder=base_folder,
7939 nslcmop_id=nslcmop_id,
7940 stage=stage,
7941 vca_type=vca_type,
7942 vca_name=vca_name,
7943 ee_config_descriptor=ee_item,
7944 )
7945 )
7946 self.lcm_tasks.register(
7947 "ns",
7948 nsr_id,
7949 nslcmop_id,
7950 "instantiate_N2VC-{}".format(vca_index),
7951 task_n2vc,
7952 )
7953 task_instantiation_info[
7954 task_n2vc
7955 ] = self.task_name_deploy_vca + " {}.{}".format(
7956 member_vnf_index or "", vdu_id or ""
7957 )
7958
7959 async def heal_N2VC(
7960 self,
7961 logging_text,
7962 vca_index,
7963 nsi_id,
7964 db_nsr,
7965 db_vnfr,
7966 vdu_id,
7967 kdu_name,
7968 vdu_index,
7969 config_descriptor,
7970 deploy_params,
7971 base_folder,
7972 nslcmop_id,
7973 stage,
7974 vca_type,
7975 vca_name,
7976 ee_config_descriptor,
7977 ):
7978 nsr_id = db_nsr["_id"]
7979 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
7980 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
7981 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
7982 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
7983 db_dict = {
7984 "collection": "nsrs",
7985 "filter": {"_id": nsr_id},
7986 "path": db_update_entry,
7987 }
7988 step = ""
7989 try:
7990
7991 element_type = "NS"
7992 element_under_configuration = nsr_id
7993
7994 vnfr_id = None
7995 if db_vnfr:
7996 vnfr_id = db_vnfr["_id"]
7997 osm_config["osm"]["vnf_id"] = vnfr_id
7998
7999 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8000
8001 if vca_type == "native_charm":
8002 index_number = 0
8003 else:
8004 index_number = vdu_index or 0
8005
8006 if vnfr_id:
8007 element_type = "VNF"
8008 element_under_configuration = vnfr_id
8009 namespace += ".{}-{}".format(vnfr_id, index_number)
8010 if vdu_id:
8011 namespace += ".{}-{}".format(vdu_id, index_number)
8012 element_type = "VDU"
8013 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8014 osm_config["osm"]["vdu_id"] = vdu_id
8015 elif kdu_name:
8016 namespace += ".{}".format(kdu_name)
8017 element_type = "KDU"
8018 element_under_configuration = kdu_name
8019 osm_config["osm"]["kdu_name"] = kdu_name
8020
8021 # Get artifact path
8022 if base_folder["pkg-dir"]:
8023 artifact_path = "{}/{}/{}/{}".format(
8024 base_folder["folder"],
8025 base_folder["pkg-dir"],
8026 "charms"
8027 if vca_type
8028 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8029 else "helm-charts",
8030 vca_name,
8031 )
8032 else:
8033 artifact_path = "{}/Scripts/{}/{}/".format(
8034 base_folder["folder"],
8035 "charms"
8036 if vca_type
8037 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8038 else "helm-charts",
8039 vca_name,
8040 )
8041
8042 self.logger.debug("Artifact path > {}".format(artifact_path))
8043
8044 # get initial_config_primitive_list that applies to this element
8045 initial_config_primitive_list = config_descriptor.get(
8046 "initial-config-primitive"
8047 )
8048
8049 self.logger.debug(
8050 "Initial config primitive list > {}".format(
8051 initial_config_primitive_list
8052 )
8053 )
8054
8055 # add config if not present for NS charm
8056 ee_descriptor_id = ee_config_descriptor.get("id")
8057 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8058 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8059 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8060 )
8061
8062 self.logger.debug(
8063 "Initial config primitive list #2 > {}".format(
8064 initial_config_primitive_list
8065 )
8066 )
8067 # n2vc_redesign STEP 3.1
8068 # find old ee_id if exists
8069 ee_id = vca_deployed.get("ee_id")
8070
8071 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8072 # create or register execution environment in VCA. Only for native charms when healing
8073 if vca_type == "native_charm":
8074 step = "Waiting to VM being up and getting IP address"
8075 self.logger.debug(logging_text + step)
8076 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8077 logging_text,
8078 nsr_id,
8079 vnfr_id,
8080 vdu_id,
8081 vdu_index,
8082 user=None,
8083 pub_key=None,
8084 )
8085 credentials = {"hostname": rw_mgmt_ip}
8086 # get username
8087 username = deep_get(
8088 config_descriptor, ("config-access", "ssh-access", "default-user")
8089 )
8090 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8091 # merged. Meanwhile let's get username from initial-config-primitive
8092 if not username and initial_config_primitive_list:
8093 for config_primitive in initial_config_primitive_list:
8094 for param in config_primitive.get("parameter", ()):
8095 if param["name"] == "ssh-username":
8096 username = param["value"]
8097 break
8098 if not username:
8099 raise LcmException(
8100 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8101 "'config-access.ssh-access.default-user'"
8102 )
8103 credentials["username"] = username
8104
8105 # n2vc_redesign STEP 3.2
8106 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8107 self._write_configuration_status(
8108 nsr_id=nsr_id,
8109 vca_index=vca_index,
8110 status="REGISTERING",
8111 element_under_configuration=element_under_configuration,
8112 element_type=element_type,
8113 )
8114
8115 step = "register execution environment {}".format(credentials)
8116 self.logger.debug(logging_text + step)
8117 ee_id = await self.vca_map[vca_type].register_execution_environment(
8118 credentials=credentials,
8119 namespace=namespace,
8120 db_dict=db_dict,
8121 vca_id=vca_id,
8122 )
8123
8124 # update ee_id en db
8125 db_dict_ee_id = {
8126 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8127 }
8128 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8129
8130 # for compatibility with MON/POL modules, the need model and application name at database
8131 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8132 # Not sure if this need to be done when healing
8133 """
8134 ee_id_parts = ee_id.split(".")
8135 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8136 if len(ee_id_parts) >= 2:
8137 model_name = ee_id_parts[0]
8138 application_name = ee_id_parts[1]
8139 db_nsr_update[db_update_entry + "model"] = model_name
8140 db_nsr_update[db_update_entry + "application"] = application_name
8141 """
8142
8143 # n2vc_redesign STEP 3.3
8144 # Install configuration software. Only for native charms.
8145 step = "Install configuration Software"
8146
8147 self._write_configuration_status(
8148 nsr_id=nsr_id,
8149 vca_index=vca_index,
8150 status="INSTALLING SW",
8151 element_under_configuration=element_under_configuration,
8152 element_type=element_type,
8153 #other_update=db_nsr_update,
8154 other_update=None,
8155 )
8156
8157 # TODO check if already done
8158 self.logger.debug(logging_text + step)
8159 config = None
8160 if vca_type == "native_charm":
8161 config_primitive = next(
8162 (p for p in initial_config_primitive_list if p["name"] == "config"),
8163 None,
8164 )
8165 if config_primitive:
8166 config = self._map_primitive_params(
8167 config_primitive, {}, deploy_params
8168 )
8169 await self.vca_map[vca_type].install_configuration_sw(
8170 ee_id=ee_id,
8171 artifact_path=artifact_path,
8172 db_dict=db_dict,
8173 config=config,
8174 num_units=1,
8175 vca_id=vca_id,
8176 vca_type=vca_type,
8177 )
8178
8179 # write in db flag of configuration_sw already installed
8180 self.update_db_2(
8181 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8182 )
8183
8184 # Not sure if this need to be done when healing
8185 """
8186 # add relations for this VCA (wait for other peers related with this VCA)
8187 await self._add_vca_relations(
8188 logging_text=logging_text,
8189 nsr_id=nsr_id,
8190 vca_type=vca_type,
8191 vca_index=vca_index,
8192 )
8193 """
8194
8195 # if SSH access is required, then get execution environment SSH public
8196 # if native charm we have waited already to VM be UP
8197 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8198 pub_key = None
8199 user = None
8200 # self.logger.debug("get ssh key block")
8201 if deep_get(
8202 config_descriptor, ("config-access", "ssh-access", "required")
8203 ):
8204 # self.logger.debug("ssh key needed")
8205 # Needed to inject a ssh key
8206 user = deep_get(
8207 config_descriptor,
8208 ("config-access", "ssh-access", "default-user"),
8209 )
8210 step = "Install configuration Software, getting public ssh key"
8211 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8212 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8213 )
8214
8215 step = "Insert public key into VM user={} ssh_key={}".format(
8216 user, pub_key
8217 )
8218 else:
8219 # self.logger.debug("no need to get ssh key")
8220 step = "Waiting to VM being up and getting IP address"
8221 self.logger.debug(logging_text + step)
8222
8223 # n2vc_redesign STEP 5.1
8224 # wait for RO (ip-address) Insert pub_key into VM
8225 # IMPORTANT: We need do wait for RO to complete healing operation.
8226 await self._wait_heal_ro(nsr_id,self.timeout_ns_heal)
8227 if vnfr_id:
8228 if kdu_name:
8229 rw_mgmt_ip = await self.wait_kdu_up(
8230 logging_text, nsr_id, vnfr_id, kdu_name
8231 )
8232 else:
8233 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8234 logging_text,
8235 nsr_id,
8236 vnfr_id,
8237 vdu_id,
8238 vdu_index,
8239 user=user,
8240 pub_key=pub_key,
8241 )
8242 else:
8243 rw_mgmt_ip = None # This is for a NS configuration
8244
8245 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8246
8247 # store rw_mgmt_ip in deploy params for later replacement
8248 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8249
8250 # Day1 operations.
8251 # get run-day1 operation parameter
8252 runDay1 = deploy_params.get("run-day1",False)
8253 self.logger.debug(" Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id,vdu_id,runDay1))
8254 if runDay1:
8255 # n2vc_redesign STEP 6 Execute initial config primitive
8256 step = "execute initial config primitive"
8257
8258 # wait for dependent primitives execution (NS -> VNF -> VDU)
8259 if initial_config_primitive_list:
8260 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
8261
8262 # stage, in function of element type: vdu, kdu, vnf or ns
8263 my_vca = vca_deployed_list[vca_index]
8264 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8265 # VDU or KDU
8266 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8267 elif my_vca.get("member-vnf-index"):
8268 # VNF
8269 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8270 else:
8271 # NS
8272 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8273
8274 self._write_configuration_status(
8275 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8276 )
8277
8278 self._write_op_status(op_id=nslcmop_id, stage=stage)
8279
8280 check_if_terminated_needed = True
8281 for initial_config_primitive in initial_config_primitive_list:
8282 # adding information on the vca_deployed if it is a NS execution environment
8283 if not vca_deployed["member-vnf-index"]:
8284 deploy_params["ns_config_info"] = json.dumps(
8285 self._get_ns_config_info(nsr_id)
8286 )
8287 # TODO check if already done
8288 primitive_params_ = self._map_primitive_params(
8289 initial_config_primitive, {}, deploy_params
8290 )
8291
8292 step = "execute primitive '{}' params '{}'".format(
8293 initial_config_primitive["name"], primitive_params_
8294 )
8295 self.logger.debug(logging_text + step)
8296 await self.vca_map[vca_type].exec_primitive(
8297 ee_id=ee_id,
8298 primitive_name=initial_config_primitive["name"],
8299 params_dict=primitive_params_,
8300 db_dict=db_dict,
8301 vca_id=vca_id,
8302 vca_type=vca_type,
8303 )
8304 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8305 if check_if_terminated_needed:
8306 if config_descriptor.get("terminate-config-primitive"):
8307 self.update_db_2(
8308 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
8309 )
8310 check_if_terminated_needed = False
8311
8312 # TODO register in database that primitive is done
8313
8314 # STEP 7 Configure metrics
8315 # Not sure if this need to be done when healing
8316 """
8317 if vca_type == "helm" or vca_type == "helm-v3":
8318 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8319 ee_id=ee_id,
8320 artifact_path=artifact_path,
8321 ee_config_descriptor=ee_config_descriptor,
8322 vnfr_id=vnfr_id,
8323 nsr_id=nsr_id,
8324 target_ip=rw_mgmt_ip,
8325 )
8326 if prometheus_jobs:
8327 self.update_db_2(
8328 "nsrs",
8329 nsr_id,
8330 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8331 )
8332
8333 for job in prometheus_jobs:
8334 self.db.set_one(
8335 "prometheus_jobs",
8336 {"job_name": job["job_name"]},
8337 job,
8338 upsert=True,
8339 fail_on_empty=False,
8340 )
8341
8342 """
8343 step = "instantiated at VCA"
8344 self.logger.debug(logging_text + step)
8345
8346 self._write_configuration_status(
8347 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8348 )
8349
8350 except Exception as e: # TODO not use Exception but N2VC exception
8351 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8352 if not isinstance(
8353 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8354 ):
8355 self.logger.error(
8356 "Exception while {} : {}".format(step, e), exc_info=True
8357 )
8358 self._write_configuration_status(
8359 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8360 )
8361 raise LcmException("{} {}".format(step, e)) from e
8362
8363 async def _wait_heal_ro(
8364 self,
8365 nsr_id,
8366 timeout=600,
8367 ):
8368 start_time = time()
8369 while time() <= start_time + timeout:
8370 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8371 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"]["operational-status"]
8372 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8373 if operational_status_ro != "healing":
8374 break
8375 await asyncio.sleep(15, loop=self.loop)
8376 else: # timeout_ns_deploy
8377 raise NgRoException("Timeout waiting ns to deploy")
govindarajul4ff4b512022-05-02 20:02:41 +05308378
8379 async def vertical_scale(self, nsr_id, nslcmop_id):
8380 """
8381 Vertical Scale the VDUs in a NS
8382
8383 :param: nsr_id: NS Instance ID
8384 :param: nslcmop_id: nslcmop ID of migrate
8385
8386 """
8387 # Try to lock HA task here
8388 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8389 if not task_is_locked_by_me:
8390 return
8391 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8392 self.logger.debug(logging_text + "Enter")
8393 # get all needed from database
8394 db_nslcmop = None
8395 db_nslcmop_update = {}
8396 nslcmop_operation_state = None
8397 db_nsr_update = {}
8398 target = {}
8399 exc = None
8400 # in case of error, indicates what part of scale was failed to put nsr at error status
8401 start_deploy = time()
8402
8403 try:
8404 # wait for any previous tasks in process
8405 step = "Waiting for previous operations to terminate"
8406 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
8407
8408 self._write_ns_status(
8409 nsr_id=nsr_id,
8410 ns_state=None,
8411 current_operation="VerticalScale",
8412 current_operation_id=nslcmop_id
8413 )
8414 step = "Getting nslcmop from database"
8415 self.logger.debug(step + " after having waited for previous tasks to be completed")
8416 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8417 operationParams = db_nslcmop.get("operationParams")
8418 target = {}
8419 target.update(operationParams)
8420 desc = await self.RO.vertical_scale(nsr_id, target)
8421 self.logger.debug("RO return > {}".format(desc))
8422 action_id = desc["action_id"]
8423 await self._wait_ng_ro(
8424 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_verticalscale
8425 )
8426 except (ROclient.ROClientException, DbException, LcmException) as e:
8427 self.logger.error("Exit Exception {}".format(e))
8428 exc = e
8429 except asyncio.CancelledError:
8430 self.logger.error("Cancelled Exception while '{}'".format(step))
8431 exc = "Operation was cancelled"
8432 except Exception as e:
8433 exc = traceback.format_exc()
8434 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
8435 finally:
8436 self._write_ns_status(
8437 nsr_id=nsr_id,
8438 ns_state=None,
8439 current_operation="IDLE",
8440 current_operation_id=None,
8441 )
8442 if exc:
8443 db_nslcmop_update[
8444 "detailed-status"
8445 ] = "FAILED {}: {}".format(step, exc)
8446 nslcmop_operation_state = "FAILED"
8447 else:
8448 nslcmop_operation_state = "COMPLETED"
8449 db_nslcmop_update["detailed-status"] = "Done"
8450 db_nsr_update["detailed-status"] = "Done"
8451
8452 self._write_op_status(
8453 op_id=nslcmop_id,
8454 stage="",
8455 error_message="",
8456 operation_state=nslcmop_operation_state,
8457 other_update=db_nslcmop_update,
8458 )
8459 if nslcmop_operation_state:
8460 try:
8461 msg = {
8462 "nsr_id": nsr_id,
8463 "nslcmop_id": nslcmop_id,
8464 "operationState": nslcmop_operation_state,
8465 }
8466 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8467 except Exception as e:
8468 self.logger.error(
8469 logging_text + "kafka_write notification Exception {}".format(e)
8470 )
8471 self.logger.debug(logging_text + "Exit")
8472 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")