blob: 33e1d18b054483f9f45b8607a28b4dbe541028fa [file] [log] [blame]
tierno59d22d22018-09-25 18:10:19 +02001# -*- coding: utf-8 -*-
2
tierno2e215512018-11-28 09:37:52 +00003##
4# Copyright 2018 Telefonica S.A.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17##
18
tierno59d22d22018-09-25 18:10:19 +020019import asyncio
aticigdffa6212022-04-12 15:27:53 +030020import shutil
David Garcia444bf962021-11-11 16:35:26 +010021from typing import Any, Dict, List
tierno59d22d22018-09-25 18:10:19 +020022import yaml
23import logging
24import logging.handlers
tierno59d22d22018-09-25 18:10:19 +020025import traceback
David Garciad4816682019-12-09 14:57:43 +010026import json
garciadeblas5697b8b2021-03-24 09:17:02 +010027from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33)
tierno59d22d22018-09-25 18:10:19 +020034
tierno77677d92019-08-22 13:46:35 +000035from osm_lcm import ROclient
David Garciab4ebcd02021-10-28 02:00:43 +020036from osm_lcm.data_utils.nsr import (
37 get_deployed_kdu,
38 get_deployed_vca,
39 get_deployed_vca_list,
40 get_nsd,
41)
42from osm_lcm.data_utils.vca import (
43 DeployedComponent,
44 DeployedK8sResource,
45 DeployedVCA,
46 EELevel,
47 Relation,
48 EERelation,
49 safe_get_ee_relation,
50)
tierno69f0d382020-05-07 13:08:09 +000051from osm_lcm.ng_ro import NgRoClient, NgRoException
garciadeblas5697b8b2021-03-24 09:17:02 +010052from osm_lcm.lcm_utils import (
53 LcmException,
54 LcmExceptionNoMgmtIP,
55 LcmBase,
56 deep_get,
57 get_iterable,
58 populate_dict,
aticigdffa6212022-04-12 15:27:53 +030059 check_juju_bundle_existence,
60 get_charm_artifact_path,
garciadeblas5697b8b2021-03-24 09:17:02 +010061)
David Garciab4ebcd02021-10-28 02:00:43 +020062from osm_lcm.data_utils.nsd import (
63 get_ns_configuration_relation_list,
64 get_vnf_profile,
65 get_vnf_profiles,
66)
garciadeblas5697b8b2021-03-24 09:17:02 +010067from osm_lcm.data_utils.vnfd import (
David Garcia78b6e6d2022-04-29 05:50:46 +020068 get_kdu,
69 get_kdu_services,
David Garciab4ebcd02021-10-28 02:00:43 +020070 get_relation_list,
garciadeblas5697b8b2021-03-24 09:17:02 +010071 get_vdu_list,
72 get_vdu_profile,
73 get_ee_sorted_initial_config_primitive_list,
74 get_ee_sorted_terminate_config_primitive_list,
75 get_kdu_list,
76 get_virtual_link_profiles,
77 get_vdu,
78 get_configuration,
79 get_vdu_index,
80 get_scaling_aspect,
81 get_number_of_instances,
82 get_juju_ee_ref,
David Garciab4ebcd02021-10-28 02:00:43 +020083 get_kdu_resource_profile,
aticigdffa6212022-04-12 15:27:53 +030084 find_software_version,
garciadeblas5697b8b2021-03-24 09:17:02 +010085)
bravof922c4172020-11-24 21:21:43 -030086from osm_lcm.data_utils.list_utils import find_in_list
aticig349aa462022-05-19 12:29:35 +030087from osm_lcm.data_utils.vnfr import (
88 get_osm_params,
89 get_vdur_index,
90 get_kdur,
91 get_volumes_from_instantiation_params,
92)
bravof922c4172020-11-24 21:21:43 -030093from osm_lcm.data_utils.dict_utils import parse_yaml_strings
94from osm_lcm.data_utils.database.vim_account import VimAccountDB
David Garciab4ebcd02021-10-28 02:00:43 +020095from n2vc.definitions import RelationEndpoint
calvinosanch9f9c6f22019-11-04 13:37:39 +010096from n2vc.k8s_helm_conn import K8sHelmConnector
lloretgalleg18ebc3a2020-10-22 09:54:51 +000097from n2vc.k8s_helm3_conn import K8sHelm3Connector
Adam Israelbaacc302019-12-01 12:41:39 -050098from n2vc.k8s_juju_conn import K8sJujuConnector
tierno59d22d22018-09-25 18:10:19 +020099
tierno27246d82018-09-27 15:59:09 +0200100from osm_common.dbbase import DbException
tierno59d22d22018-09-25 18:10:19 +0200101from osm_common.fsbase import FsException
quilesj7e13aeb2019-10-08 13:34:55 +0200102
bravof922c4172020-11-24 21:21:43 -0300103from osm_lcm.data_utils.database.database import Database
104from osm_lcm.data_utils.filesystem.filesystem import Filesystem
105
quilesj7e13aeb2019-10-08 13:34:55 +0200106from n2vc.n2vc_juju_conn import N2VCJujuConnector
tiernof59ad6c2020-04-08 12:50:52 +0000107from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
tierno59d22d22018-09-25 18:10:19 +0200108
tierno588547c2020-07-01 15:30:20 +0000109from osm_lcm.lcm_helm_conn import LCMHelmConn
David Garcia78b6e6d2022-04-29 05:50:46 +0200110from osm_lcm.osm_config import OsmConfigBuilder
bravof73bac502021-05-11 07:38:47 -0400111from osm_lcm.prometheus import parse_job
tierno588547c2020-07-01 15:30:20 +0000112
tierno27246d82018-09-27 15:59:09 +0200113from copy import copy, deepcopy
tierno59d22d22018-09-25 18:10:19 +0200114from time import time
tierno27246d82018-09-27 15:59:09 +0200115from uuid import uuid4
lloretgalleg7c121132020-07-08 07:53:22 +0000116
tiernob996d942020-07-03 14:52:28 +0000117from random import randint
tierno59d22d22018-09-25 18:10:19 +0200118
tierno69f0d382020-05-07 13:08:09 +0000119__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
tierno59d22d22018-09-25 18:10:19 +0200120
121
122class NsLcm(LcmBase):
garciadeblas5697b8b2021-03-24 09:17:02 +0100123 timeout_vca_on_error = (
124 5 * 60
125 ) # Time for charm from first time at blocked,error status to mark as failed
126 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
127 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
garciadeblas07f4e4c2022-06-09 09:42:58 +0200128 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
garciadeblasf9b04952019-04-09 18:53:58 +0200129 timeout_charm_delete = 10 * 60
David Garciaf6919842020-05-21 16:41:07 +0200130 timeout_primitive = 30 * 60 # timeout for primitive execution
aticigdffa6212022-04-12 15:27:53 +0300131 timeout_ns_update = 30 * 60 # timeout for ns update
garciadeblas5697b8b2021-03-24 09:17:02 +0100132 timeout_progress_primitive = (
133 10 * 60
134 ) # timeout for some progress in a primitive execution
elumalai80bcf1c2022-04-28 18:05:01 +0530135 timeout_migrate = 1800 # default global timeout for migrating vnfs
k4.rahulb827de92022-05-02 16:35:02 +0000136 timeout_operate = 1800 # default global timeout for migrating vnfs
kuuseac3a8882019-10-03 10:48:06 +0200137 SUBOPERATION_STATUS_NOT_FOUND = -1
138 SUBOPERATION_STATUS_NEW = -2
139 SUBOPERATION_STATUS_SKIP = -3
tiernoa2143262020-03-27 16:20:40 +0000140 task_name_deploy_vca = "Deploying VCA"
kuuseac3a8882019-10-03 10:48:06 +0200141
bravof73bac502021-05-11 07:38:47 -0400142 def __init__(self, msg, lcm_tasks, config, loop):
tierno59d22d22018-09-25 18:10:19 +0200143 """
144 Init, Connect to database, filesystem storage, and messaging
145 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
146 :return: None
147 """
garciadeblas5697b8b2021-03-24 09:17:02 +0100148 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
quilesj7e13aeb2019-10-08 13:34:55 +0200149
bravof922c4172020-11-24 21:21:43 -0300150 self.db = Database().instance.db
151 self.fs = Filesystem().instance.fs
tierno59d22d22018-09-25 18:10:19 +0200152 self.loop = loop
153 self.lcm_tasks = lcm_tasks
tierno744303e2020-01-13 16:46:31 +0000154 self.timeout = config["timeout"]
155 self.ro_config = config["ro_config"]
tierno69f0d382020-05-07 13:08:09 +0000156 self.ng_ro = config["ro_config"].get("ng")
tierno744303e2020-01-13 16:46:31 +0000157 self.vca_config = config["VCA"].copy()
tierno59d22d22018-09-25 18:10:19 +0200158
quilesj7e13aeb2019-10-08 13:34:55 +0200159 # create N2VC connector
David Garciaaae391f2020-11-09 11:12:54 +0100160 self.n2vc = N2VCJujuConnector(
tierno59d22d22018-09-25 18:10:19 +0200161 log=self.logger,
quilesj7e13aeb2019-10-08 13:34:55 +0200162 loop=self.loop,
bravof922c4172020-11-24 21:21:43 -0300163 on_update_db=self._on_update_n2vc_db,
164 fs=self.fs,
garciadeblas5697b8b2021-03-24 09:17:02 +0100165 db=self.db,
tierno59d22d22018-09-25 18:10:19 +0200166 )
quilesj7e13aeb2019-10-08 13:34:55 +0200167
tierno588547c2020-07-01 15:30:20 +0000168 self.conn_helm_ee = LCMHelmConn(
tierno588547c2020-07-01 15:30:20 +0000169 log=self.logger,
170 loop=self.loop,
tierno588547c2020-07-01 15:30:20 +0000171 vca_config=self.vca_config,
garciadeblas5697b8b2021-03-24 09:17:02 +0100172 on_update_db=self._on_update_n2vc_db,
tierno588547c2020-07-01 15:30:20 +0000173 )
174
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000175 self.k8sclusterhelm2 = K8sHelmConnector(
calvinosanch9f9c6f22019-11-04 13:37:39 +0100176 kubectl_command=self.vca_config.get("kubectlpath"),
177 helm_command=self.vca_config.get("helmpath"),
calvinosanch9f9c6f22019-11-04 13:37:39 +0100178 log=self.logger,
calvinosanch9f9c6f22019-11-04 13:37:39 +0100179 on_update_db=None,
bravof922c4172020-11-24 21:21:43 -0300180 fs=self.fs,
garciadeblas5697b8b2021-03-24 09:17:02 +0100181 db=self.db,
calvinosanch9f9c6f22019-11-04 13:37:39 +0100182 )
183
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000184 self.k8sclusterhelm3 = K8sHelm3Connector(
185 kubectl_command=self.vca_config.get("kubectlpath"),
186 helm_command=self.vca_config.get("helm3path"),
187 fs=self.fs,
188 log=self.logger,
189 db=self.db,
190 on_update_db=None,
191 )
192
Adam Israelbaacc302019-12-01 12:41:39 -0500193 self.k8sclusterjuju = K8sJujuConnector(
194 kubectl_command=self.vca_config.get("kubectlpath"),
195 juju_command=self.vca_config.get("jujupath"),
Adam Israelbaacc302019-12-01 12:41:39 -0500196 log=self.logger,
David Garciaba89cbb2020-10-16 13:05:34 +0200197 loop=self.loop,
ksaikiranr656b6dd2021-02-19 10:25:18 +0530198 on_update_db=self._on_update_k8s_db,
bravof922c4172020-11-24 21:21:43 -0300199 fs=self.fs,
garciadeblas5697b8b2021-03-24 09:17:02 +0100200 db=self.db,
Adam Israelbaacc302019-12-01 12:41:39 -0500201 )
202
tiernoa2143262020-03-27 16:20:40 +0000203 self.k8scluster_map = {
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000204 "helm-chart": self.k8sclusterhelm2,
205 "helm-chart-v3": self.k8sclusterhelm3,
206 "chart": self.k8sclusterhelm3,
tiernoa2143262020-03-27 16:20:40 +0000207 "juju-bundle": self.k8sclusterjuju,
208 "juju": self.k8sclusterjuju,
209 }
tierno588547c2020-07-01 15:30:20 +0000210
211 self.vca_map = {
212 "lxc_proxy_charm": self.n2vc,
213 "native_charm": self.n2vc,
214 "k8s_proxy_charm": self.n2vc,
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000215 "helm": self.conn_helm_ee,
garciadeblas5697b8b2021-03-24 09:17:02 +0100216 "helm-v3": self.conn_helm_ee,
tierno588547c2020-07-01 15:30:20 +0000217 }
218
quilesj7e13aeb2019-10-08 13:34:55 +0200219 # create RO client
bravof922c4172020-11-24 21:21:43 -0300220 self.RO = NgRoClient(self.loop, **self.ro_config)
tierno59d22d22018-09-25 18:10:19 +0200221
garciadeblas07f4e4c2022-06-09 09:42:58 +0200222 self.op_status_map = {
223 "instantiation": self.RO.status,
224 "termination": self.RO.status,
225 "migrate": self.RO.status,
226 "healing": self.RO.recreate_status,
227 }
228
tierno2357f4e2020-10-19 16:38:59 +0000229 @staticmethod
230 def increment_ip_mac(ip_mac, vm_index=1):
231 if not isinstance(ip_mac, str):
232 return ip_mac
233 try:
234 # try with ipv4 look for last dot
235 i = ip_mac.rfind(".")
236 if i > 0:
237 i += 1
238 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
239 # try with ipv6 or mac look for last colon. Operate in hex
240 i = ip_mac.rfind(":")
241 if i > 0:
242 i += 1
243 # format in hex, len can be 2 for mac or 4 for ipv6
garciadeblas5697b8b2021-03-24 09:17:02 +0100244 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
245 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
246 )
tierno2357f4e2020-10-19 16:38:59 +0000247 except Exception:
248 pass
249 return None
250
quilesj3655ae02019-12-12 16:08:35 +0000251 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
quilesj7e13aeb2019-10-08 13:34:55 +0200252
quilesj3655ae02019-12-12 16:08:35 +0000253 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
254
255 try:
256 # TODO filter RO descriptor fields...
257
258 # write to database
259 db_dict = dict()
260 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
garciadeblas5697b8b2021-03-24 09:17:02 +0100261 db_dict["deploymentStatus"] = ro_descriptor
quilesj3655ae02019-12-12 16:08:35 +0000262 self.update_db_2("nsrs", nsrs_id, db_dict)
263
264 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100265 self.logger.warn(
266 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
267 )
quilesj3655ae02019-12-12 16:08:35 +0000268
David Garciac1fe90a2021-03-31 19:12:02 +0200269 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
quilesj3655ae02019-12-12 16:08:35 +0000270
quilesj69a722c2020-01-09 08:30:17 +0000271 # remove last dot from path (if exists)
garciadeblas5697b8b2021-03-24 09:17:02 +0100272 if path.endswith("."):
quilesj69a722c2020-01-09 08:30:17 +0000273 path = path[:-1]
274
quilesj3655ae02019-12-12 16:08:35 +0000275 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
276 # .format(table, filter, path, updated_data))
quilesj3655ae02019-12-12 16:08:35 +0000277 try:
278
garciadeblas5697b8b2021-03-24 09:17:02 +0100279 nsr_id = filter.get("_id")
quilesj3655ae02019-12-12 16:08:35 +0000280
281 # read ns record from database
garciadeblas5697b8b2021-03-24 09:17:02 +0100282 nsr = self.db.get_one(table="nsrs", q_filter=filter)
283 current_ns_status = nsr.get("nsState")
quilesj3655ae02019-12-12 16:08:35 +0000284
285 # get vca status for NS
garciadeblas5697b8b2021-03-24 09:17:02 +0100286 status_dict = await self.n2vc.get_status(
287 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
288 )
quilesj3655ae02019-12-12 16:08:35 +0000289
290 # vcaStatus
291 db_dict = dict()
garciadeblas5697b8b2021-03-24 09:17:02 +0100292 db_dict["vcaStatus"] = status_dict
293 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
quilesj3655ae02019-12-12 16:08:35 +0000294
295 # update configurationStatus for this VCA
296 try:
garciadeblas5697b8b2021-03-24 09:17:02 +0100297 vca_index = int(path[path.rfind(".") + 1 :])
quilesj3655ae02019-12-12 16:08:35 +0000298
garciadeblas5697b8b2021-03-24 09:17:02 +0100299 vca_list = deep_get(
300 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
301 )
302 vca_status = vca_list[vca_index].get("status")
quilesj3655ae02019-12-12 16:08:35 +0000303
garciadeblas5697b8b2021-03-24 09:17:02 +0100304 configuration_status_list = nsr.get("configurationStatus")
305 config_status = configuration_status_list[vca_index].get("status")
quilesj3655ae02019-12-12 16:08:35 +0000306
garciadeblas5697b8b2021-03-24 09:17:02 +0100307 if config_status == "BROKEN" and vca_status != "failed":
308 db_dict["configurationStatus"][vca_index] = "READY"
309 elif config_status != "BROKEN" and vca_status == "failed":
310 db_dict["configurationStatus"][vca_index] = "BROKEN"
quilesj3655ae02019-12-12 16:08:35 +0000311 except Exception as e:
312 # not update configurationStatus
garciadeblas5697b8b2021-03-24 09:17:02 +0100313 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
quilesj3655ae02019-12-12 16:08:35 +0000314
315 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
316 # if nsState = 'DEGRADED' check if all is OK
317 is_degraded = False
garciadeblas5697b8b2021-03-24 09:17:02 +0100318 if current_ns_status in ("READY", "DEGRADED"):
319 error_description = ""
quilesj3655ae02019-12-12 16:08:35 +0000320 # check machines
garciadeblas5697b8b2021-03-24 09:17:02 +0100321 if status_dict.get("machines"):
322 for machine_id in status_dict.get("machines"):
323 machine = status_dict.get("machines").get(machine_id)
quilesj3655ae02019-12-12 16:08:35 +0000324 # check machine agent-status
garciadeblas5697b8b2021-03-24 09:17:02 +0100325 if machine.get("agent-status"):
326 s = machine.get("agent-status").get("status")
327 if s != "started":
quilesj3655ae02019-12-12 16:08:35 +0000328 is_degraded = True
garciadeblas5697b8b2021-03-24 09:17:02 +0100329 error_description += (
330 "machine {} agent-status={} ; ".format(
331 machine_id, s
332 )
333 )
quilesj3655ae02019-12-12 16:08:35 +0000334 # check machine instance status
garciadeblas5697b8b2021-03-24 09:17:02 +0100335 if machine.get("instance-status"):
336 s = machine.get("instance-status").get("status")
337 if s != "running":
quilesj3655ae02019-12-12 16:08:35 +0000338 is_degraded = True
garciadeblas5697b8b2021-03-24 09:17:02 +0100339 error_description += (
340 "machine {} instance-status={} ; ".format(
341 machine_id, s
342 )
343 )
quilesj3655ae02019-12-12 16:08:35 +0000344 # check applications
garciadeblas5697b8b2021-03-24 09:17:02 +0100345 if status_dict.get("applications"):
346 for app_id in status_dict.get("applications"):
347 app = status_dict.get("applications").get(app_id)
quilesj3655ae02019-12-12 16:08:35 +0000348 # check application status
garciadeblas5697b8b2021-03-24 09:17:02 +0100349 if app.get("status"):
350 s = app.get("status").get("status")
351 if s != "active":
quilesj3655ae02019-12-12 16:08:35 +0000352 is_degraded = True
garciadeblas5697b8b2021-03-24 09:17:02 +0100353 error_description += (
354 "application {} status={} ; ".format(app_id, s)
355 )
quilesj3655ae02019-12-12 16:08:35 +0000356
357 if error_description:
garciadeblas5697b8b2021-03-24 09:17:02 +0100358 db_dict["errorDescription"] = error_description
359 if current_ns_status == "READY" and is_degraded:
360 db_dict["nsState"] = "DEGRADED"
361 if current_ns_status == "DEGRADED" and not is_degraded:
362 db_dict["nsState"] = "READY"
quilesj3655ae02019-12-12 16:08:35 +0000363
364 # write to database
365 self.update_db_2("nsrs", nsr_id, db_dict)
366
tierno51183952020-04-03 15:48:18 +0000367 except (asyncio.CancelledError, asyncio.TimeoutError):
368 raise
quilesj3655ae02019-12-12 16:08:35 +0000369 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100370 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +0200371
garciadeblas5697b8b2021-03-24 09:17:02 +0100372 async def _on_update_k8s_db(
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100373 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
garciadeblas5697b8b2021-03-24 09:17:02 +0100374 ):
ksaikiranr656b6dd2021-02-19 10:25:18 +0530375 """
376 Updating vca status in NSR record
377 :param cluster_uuid: UUID of a k8s cluster
378 :param kdu_instance: The unique name of the KDU instance
379 :param filter: To get nsr_id
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100380 :cluster_type: The cluster type (juju, k8s)
ksaikiranr656b6dd2021-02-19 10:25:18 +0530381 :return: none
382 """
383
384 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
385 # .format(cluster_uuid, kdu_instance, filter))
386
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100387 nsr_id = filter.get("_id")
ksaikiranr656b6dd2021-02-19 10:25:18 +0530388 try:
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100389 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
390 cluster_uuid=cluster_uuid,
391 kdu_instance=kdu_instance,
David Garciac1fe90a2021-03-31 19:12:02 +0200392 yaml_format=False,
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100393 complete_status=True,
David Garciac1fe90a2021-03-31 19:12:02 +0200394 vca_id=vca_id,
395 )
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100396
ksaikiranr656b6dd2021-02-19 10:25:18 +0530397 # vcaStatus
398 db_dict = dict()
garciadeblas5697b8b2021-03-24 09:17:02 +0100399 db_dict["vcaStatus"] = {nsr_id: vca_status}
ksaikiranr656b6dd2021-02-19 10:25:18 +0530400
Pedro Escaleira75b620d2022-04-01 01:49:22 +0100401 if cluster_type in ("juju-bundle", "juju"):
402 # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
403 # status in a similar way between Juju Bundles and Helm Charts on this side
404 await self.k8sclusterjuju.update_vca_status(
405 db_dict["vcaStatus"],
406 kdu_instance,
407 vca_id=vca_id,
408 )
409
410 self.logger.debug(
411 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
David Garciac1fe90a2021-03-31 19:12:02 +0200412 )
ksaikiranr656b6dd2021-02-19 10:25:18 +0530413
414 # write to database
415 self.update_db_2("nsrs", nsr_id, db_dict)
ksaikiranr656b6dd2021-02-19 10:25:18 +0530416 except (asyncio.CancelledError, asyncio.TimeoutError):
417 raise
418 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100419 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
ksaikiranr656b6dd2021-02-19 10:25:18 +0530420
tierno72ef84f2020-10-06 08:22:07 +0000421 @staticmethod
422 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
423 try:
424 env = Environment(undefined=StrictUndefined)
425 template = env.from_string(cloud_init_text)
426 return template.render(additional_params or {})
427 except UndefinedError as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100428 raise LcmException(
429 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
430 "file, must be provided in the instantiation parameters inside the "
431 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
432 )
tierno72ef84f2020-10-06 08:22:07 +0000433 except (TemplateError, TemplateNotFound) as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100434 raise LcmException(
435 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
436 vnfd_id, vdu_id, e
437 )
438 )
tierno72ef84f2020-10-06 08:22:07 +0000439
bravof922c4172020-11-24 21:21:43 -0300440 def _get_vdu_cloud_init_content(self, vdu, vnfd):
441 cloud_init_content = cloud_init_file = None
tierno72ef84f2020-10-06 08:22:07 +0000442 try:
tierno72ef84f2020-10-06 08:22:07 +0000443 if vdu.get("cloud-init-file"):
444 base_folder = vnfd["_admin"]["storage"]
bravof486707f2021-11-08 17:18:50 -0300445 if base_folder["pkg-dir"]:
446 cloud_init_file = "{}/{}/cloud_init/{}".format(
447 base_folder["folder"],
448 base_folder["pkg-dir"],
449 vdu["cloud-init-file"],
450 )
451 else:
452 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
453 base_folder["folder"],
454 vdu["cloud-init-file"],
455 )
tierno72ef84f2020-10-06 08:22:07 +0000456 with self.fs.file_open(cloud_init_file, "r") as ci_file:
457 cloud_init_content = ci_file.read()
458 elif vdu.get("cloud-init"):
459 cloud_init_content = vdu["cloud-init"]
460
461 return cloud_init_content
462 except FsException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +0100463 raise LcmException(
464 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
465 vnfd["id"], vdu["id"], cloud_init_file, e
466 )
467 )
tierno72ef84f2020-10-06 08:22:07 +0000468
tierno72ef84f2020-10-06 08:22:07 +0000469 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
garciadeblas5697b8b2021-03-24 09:17:02 +0100470 vdur = next(
aticig349aa462022-05-19 12:29:35 +0300471 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
garciadeblas5697b8b2021-03-24 09:17:02 +0100472 )
tierno72ef84f2020-10-06 08:22:07 +0000473 additional_params = vdur.get("additionalParams")
bravof922c4172020-11-24 21:21:43 -0300474 return parse_yaml_strings(additional_params)
tierno72ef84f2020-10-06 08:22:07 +0000475
gcalvino35be9152018-12-20 09:33:12 +0100476 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
tierno59d22d22018-09-25 18:10:19 +0200477 """
478 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
479 :param vnfd: input vnfd
480 :param new_id: overrides vnf id if provided
tierno8a518872018-12-21 13:42:14 +0000481 :param additionalParams: Instantiation params for VNFs provided
gcalvino35be9152018-12-20 09:33:12 +0100482 :param nsrId: Id of the NSR
tierno59d22d22018-09-25 18:10:19 +0200483 :return: copy of vnfd
484 """
tierno72ef84f2020-10-06 08:22:07 +0000485 vnfd_RO = deepcopy(vnfd)
486 # remove unused by RO configuration, monitoring, scaling and internal keys
487 vnfd_RO.pop("_id", None)
488 vnfd_RO.pop("_admin", None)
tierno72ef84f2020-10-06 08:22:07 +0000489 vnfd_RO.pop("monitoring-param", None)
490 vnfd_RO.pop("scaling-group-descriptor", None)
491 vnfd_RO.pop("kdu", None)
492 vnfd_RO.pop("k8s-cluster", None)
493 if new_id:
494 vnfd_RO["id"] = new_id
tierno8a518872018-12-21 13:42:14 +0000495
tierno72ef84f2020-10-06 08:22:07 +0000496 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
497 for vdu in get_iterable(vnfd_RO, "vdu"):
498 vdu.pop("cloud-init-file", None)
499 vdu.pop("cloud-init", None)
500 return vnfd_RO
tierno59d22d22018-09-25 18:10:19 +0200501
tierno2357f4e2020-10-19 16:38:59 +0000502 @staticmethod
503 def ip_profile_2_RO(ip_profile):
504 RO_ip_profile = deepcopy(ip_profile)
505 if "dns-server" in RO_ip_profile:
506 if isinstance(RO_ip_profile["dns-server"], list):
507 RO_ip_profile["dns-address"] = []
508 for ds in RO_ip_profile.pop("dns-server"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100509 RO_ip_profile["dns-address"].append(ds["address"])
tierno2357f4e2020-10-19 16:38:59 +0000510 else:
511 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
512 if RO_ip_profile.get("ip-version") == "ipv4":
513 RO_ip_profile["ip-version"] = "IPv4"
514 if RO_ip_profile.get("ip-version") == "ipv6":
515 RO_ip_profile["ip-version"] = "IPv6"
516 if "dhcp-params" in RO_ip_profile:
517 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
518 return RO_ip_profile
519
bravof922c4172020-11-24 21:21:43 -0300520 def _get_ro_vim_id_for_vim_account(self, vim_account):
521 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
522 if db_vim["_admin"]["operationalState"] != "ENABLED":
garciadeblas5697b8b2021-03-24 09:17:02 +0100523 raise LcmException(
524 "VIM={} is not available. operationalState={}".format(
525 vim_account, db_vim["_admin"]["operationalState"]
526 )
527 )
bravof922c4172020-11-24 21:21:43 -0300528 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
529 return RO_vim_id
tierno59d22d22018-09-25 18:10:19 +0200530
bravof922c4172020-11-24 21:21:43 -0300531 def get_ro_wim_id_for_wim_account(self, wim_account):
532 if isinstance(wim_account, str):
533 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
534 if db_wim["_admin"]["operationalState"] != "ENABLED":
garciadeblas5697b8b2021-03-24 09:17:02 +0100535 raise LcmException(
536 "WIM={} is not available. operationalState={}".format(
537 wim_account, db_wim["_admin"]["operationalState"]
538 )
539 )
bravof922c4172020-11-24 21:21:43 -0300540 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
541 return RO_wim_id
542 else:
543 return wim_account
tierno59d22d22018-09-25 18:10:19 +0200544
tierno2357f4e2020-10-19 16:38:59 +0000545 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
tierno27246d82018-09-27 15:59:09 +0200546
tierno2357f4e2020-10-19 16:38:59 +0000547 db_vdu_push_list = []
vegall8d625f12022-03-22 16:23:30 +0000548 template_vdur = []
tierno2357f4e2020-10-19 16:38:59 +0000549 db_update = {"_admin.modified": time()}
550 if vdu_create:
551 for vdu_id, vdu_count in vdu_create.items():
garciadeblas5697b8b2021-03-24 09:17:02 +0100552 vdur = next(
553 (
554 vdur
555 for vdur in reversed(db_vnfr["vdur"])
556 if vdur["vdu-id-ref"] == vdu_id
557 ),
558 None,
559 )
tierno2357f4e2020-10-19 16:38:59 +0000560 if not vdur:
vegall8d625f12022-03-22 16:23:30 +0000561 # Read the template saved in the db:
aticig349aa462022-05-19 12:29:35 +0300562 self.logger.debug(
563 "No vdur in the database. Using the vdur-template to scale"
564 )
vegall8d625f12022-03-22 16:23:30 +0000565 vdur_template = db_vnfr.get("vdur-template")
566 if not vdur_template:
567 raise LcmException(
aticig349aa462022-05-19 12:29:35 +0300568 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
569 vdu_id
vegall8d625f12022-03-22 16:23:30 +0000570 )
garciadeblas5697b8b2021-03-24 09:17:02 +0100571 )
vegall8d625f12022-03-22 16:23:30 +0000572 vdur = vdur_template[0]
aticig349aa462022-05-19 12:29:35 +0300573 # Delete a template from the database after using it
574 self.db.set_one(
575 "vnfrs",
576 {"_id": db_vnfr["_id"]},
577 None,
578 pull={"vdur-template": {"_id": vdur["_id"]}},
579 )
tierno2357f4e2020-10-19 16:38:59 +0000580 for count in range(vdu_count):
581 vdur_copy = deepcopy(vdur)
582 vdur_copy["status"] = "BUILD"
583 vdur_copy["status-detailed"] = None
Guillermo Calvino57c68152022-01-26 17:40:31 +0100584 vdur_copy["ip-address"] = None
tierno683eb392020-09-25 12:33:15 +0000585 vdur_copy["_id"] = str(uuid4())
tierno2357f4e2020-10-19 16:38:59 +0000586 vdur_copy["count-index"] += count + 1
garciadeblas5697b8b2021-03-24 09:17:02 +0100587 vdur_copy["id"] = "{}-{}".format(
588 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
589 )
tierno2357f4e2020-10-19 16:38:59 +0000590 vdur_copy.pop("vim_info", None)
591 for iface in vdur_copy["interfaces"]:
592 if iface.get("fixed-ip"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100593 iface["ip-address"] = self.increment_ip_mac(
594 iface["ip-address"], count + 1
595 )
tierno2357f4e2020-10-19 16:38:59 +0000596 else:
597 iface.pop("ip-address", None)
598 if iface.get("fixed-mac"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100599 iface["mac-address"] = self.increment_ip_mac(
600 iface["mac-address"], count + 1
601 )
tierno2357f4e2020-10-19 16:38:59 +0000602 else:
603 iface.pop("mac-address", None)
vegall8d625f12022-03-22 16:23:30 +0000604 if db_vnfr["vdur"]:
605 iface.pop(
606 "mgmt_vnf", None
607 ) # only first vdu can be managment of vnf
tierno2357f4e2020-10-19 16:38:59 +0000608 db_vdu_push_list.append(vdur_copy)
609 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
tierno27246d82018-09-27 15:59:09 +0200610 if vdu_delete:
vegall8d625f12022-03-22 16:23:30 +0000611 if len(db_vnfr["vdur"]) == 1:
612 # The scale will move to 0 instances
aticig349aa462022-05-19 12:29:35 +0300613 self.logger.debug(
614 "Scaling to 0 !, creating the template with the last vdur"
615 )
vegall8d625f12022-03-22 16:23:30 +0000616 template_vdur = [db_vnfr["vdur"][0]]
tierno2357f4e2020-10-19 16:38:59 +0000617 for vdu_id, vdu_count in vdu_delete.items():
618 if mark_delete:
garciadeblas5697b8b2021-03-24 09:17:02 +0100619 indexes_to_delete = [
620 iv[0]
621 for iv in enumerate(db_vnfr["vdur"])
622 if iv[1]["vdu-id-ref"] == vdu_id
623 ]
624 db_update.update(
625 {
626 "vdur.{}.status".format(i): "DELETING"
627 for i in indexes_to_delete[-vdu_count:]
628 }
629 )
tierno2357f4e2020-10-19 16:38:59 +0000630 else:
631 # it must be deleted one by one because common.db does not allow otherwise
garciadeblas5697b8b2021-03-24 09:17:02 +0100632 vdus_to_delete = [
633 v
634 for v in reversed(db_vnfr["vdur"])
635 if v["vdu-id-ref"] == vdu_id
636 ]
tierno2357f4e2020-10-19 16:38:59 +0000637 for vdu in vdus_to_delete[:vdu_count]:
garciadeblas5697b8b2021-03-24 09:17:02 +0100638 self.db.set_one(
639 "vnfrs",
640 {"_id": db_vnfr["_id"]},
641 None,
642 pull={"vdur": {"_id": vdu["_id"]}},
643 )
vegall8d625f12022-03-22 16:23:30 +0000644 db_push = {}
645 if db_vdu_push_list:
646 db_push["vdur"] = db_vdu_push_list
647 if template_vdur:
648 db_push["vdur-template"] = template_vdur
649 if not db_push:
650 db_push = None
651 db_vnfr["vdur-template"] = template_vdur
tierno2357f4e2020-10-19 16:38:59 +0000652 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
653 # modify passed dictionary db_vnfr
654 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
655 db_vnfr["vdur"] = db_vnfr_["vdur"]
tierno27246d82018-09-27 15:59:09 +0200656
tiernof578e552018-11-08 19:07:20 +0100657 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
658 """
659 Updates database nsr with the RO info for the created vld
660 :param ns_update_nsr: dictionary to be filled with the updated info
661 :param db_nsr: content of db_nsr. This is also modified
662 :param nsr_desc_RO: nsr descriptor from RO
663 :return: Nothing, LcmException is raised on errors
664 """
665
666 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
667 for net_RO in get_iterable(nsr_desc_RO, "nets"):
668 if vld["id"] != net_RO.get("ns_net_osm_id"):
669 continue
670 vld["vim-id"] = net_RO.get("vim_net_id")
671 vld["name"] = net_RO.get("vim_name")
672 vld["status"] = net_RO.get("status")
673 vld["status-detailed"] = net_RO.get("error_msg")
674 ns_update_nsr["vld.{}".format(vld_index)] = vld
675 break
676 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100677 raise LcmException(
678 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
679 )
tiernof578e552018-11-08 19:07:20 +0100680
tiernoe876f672020-02-13 14:34:48 +0000681 def set_vnfr_at_error(self, db_vnfrs, error_text):
682 try:
683 for db_vnfr in db_vnfrs.values():
684 vnfr_update = {"status": "ERROR"}
685 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
686 if "status" not in vdur:
687 vdur["status"] = "ERROR"
688 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
689 if error_text:
690 vdur["status-detailed"] = str(error_text)
garciadeblas5697b8b2021-03-24 09:17:02 +0100691 vnfr_update[
692 "vdur.{}.status-detailed".format(vdu_index)
693 ] = "ERROR"
tiernoe876f672020-02-13 14:34:48 +0000694 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
695 except DbException as e:
696 self.logger.error("Cannot update vnf. {}".format(e))
697
tierno59d22d22018-09-25 18:10:19 +0200698 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
699 """
700 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
tierno27246d82018-09-27 15:59:09 +0200701 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
702 :param nsr_desc_RO: nsr descriptor from RO
703 :return: Nothing, LcmException is raised on errors
tierno59d22d22018-09-25 18:10:19 +0200704 """
705 for vnf_index, db_vnfr in db_vnfrs.items():
706 for vnf_RO in nsr_desc_RO["vnfs"]:
tierno27246d82018-09-27 15:59:09 +0200707 if vnf_RO["member_vnf_index"] != vnf_index:
708 continue
709 vnfr_update = {}
tiernof578e552018-11-08 19:07:20 +0100710 if vnf_RO.get("ip_address"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100711 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
712 "ip_address"
713 ].split(";")[0]
tiernof578e552018-11-08 19:07:20 +0100714 elif not db_vnfr.get("ip-address"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100715 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
716 raise LcmExceptionNoMgmtIP(
717 "ns member_vnf_index '{}' has no IP address".format(
718 vnf_index
719 )
720 )
tierno59d22d22018-09-25 18:10:19 +0200721
tierno27246d82018-09-27 15:59:09 +0200722 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
723 vdur_RO_count_index = 0
724 if vdur.get("pdu-type"):
725 continue
726 for vdur_RO in get_iterable(vnf_RO, "vms"):
727 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
728 continue
729 if vdur["count-index"] != vdur_RO_count_index:
730 vdur_RO_count_index += 1
731 continue
732 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
tierno1674de82019-04-09 13:03:14 +0000733 if vdur_RO.get("ip_address"):
734 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
tierno274ed572019-04-04 13:33:27 +0000735 else:
736 vdur["ip-address"] = None
tierno27246d82018-09-27 15:59:09 +0200737 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
738 vdur["name"] = vdur_RO.get("vim_name")
739 vdur["status"] = vdur_RO.get("status")
740 vdur["status-detailed"] = vdur_RO.get("error_msg")
741 for ifacer in get_iterable(vdur, "interfaces"):
742 for interface_RO in get_iterable(vdur_RO, "interfaces"):
743 if ifacer["name"] == interface_RO.get("internal_name"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100744 ifacer["ip-address"] = interface_RO.get(
745 "ip_address"
746 )
747 ifacer["mac-address"] = interface_RO.get(
748 "mac_address"
749 )
tierno27246d82018-09-27 15:59:09 +0200750 break
751 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100752 raise LcmException(
753 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
754 "from VIM info".format(
755 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
756 )
757 )
tierno27246d82018-09-27 15:59:09 +0200758 vnfr_update["vdur.{}".format(vdu_index)] = vdur
759 break
760 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100761 raise LcmException(
762 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
763 "VIM info".format(
764 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
765 )
766 )
tiernof578e552018-11-08 19:07:20 +0100767
768 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
769 for net_RO in get_iterable(nsr_desc_RO, "nets"):
770 if vld["id"] != net_RO.get("vnf_net_osm_id"):
771 continue
772 vld["vim-id"] = net_RO.get("vim_net_id")
773 vld["name"] = net_RO.get("vim_name")
774 vld["status"] = net_RO.get("status")
775 vld["status-detailed"] = net_RO.get("error_msg")
776 vnfr_update["vld.{}".format(vld_index)] = vld
777 break
778 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100779 raise LcmException(
780 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
781 vnf_index, vld["id"]
782 )
783 )
tiernof578e552018-11-08 19:07:20 +0100784
tierno27246d82018-09-27 15:59:09 +0200785 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
786 break
tierno59d22d22018-09-25 18:10:19 +0200787
788 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100789 raise LcmException(
790 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
791 vnf_index
792 )
793 )
tierno59d22d22018-09-25 18:10:19 +0200794
tierno5ee02052019-12-05 19:55:02 +0000795 def _get_ns_config_info(self, nsr_id):
tiernoc3f2a822019-11-05 13:45:04 +0000796 """
797 Generates a mapping between vnf,vdu elements and the N2VC id
tierno5ee02052019-12-05 19:55:02 +0000798 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
tiernoc3f2a822019-11-05 13:45:04 +0000799 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
800 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
801 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
802 """
tierno5ee02052019-12-05 19:55:02 +0000803 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
804 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernoc3f2a822019-11-05 13:45:04 +0000805 mapping = {}
806 ns_config_info = {"osm-config-mapping": mapping}
807 for vca in vca_deployed_list:
808 if not vca["member-vnf-index"]:
809 continue
810 if not vca["vdu_id"]:
811 mapping[vca["member-vnf-index"]] = vca["application"]
812 else:
garciadeblas5697b8b2021-03-24 09:17:02 +0100813 mapping[
814 "{}.{}.{}".format(
815 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
816 )
817 ] = vca["application"]
tiernoc3f2a822019-11-05 13:45:04 +0000818 return ns_config_info
819
garciadeblas5697b8b2021-03-24 09:17:02 +0100820 async def _instantiate_ng_ro(
821 self,
822 logging_text,
823 nsr_id,
824 nsd,
825 db_nsr,
826 db_nslcmop,
827 db_vnfrs,
828 db_vnfds,
829 n2vc_key_list,
830 stage,
831 start_deploy,
832 timeout_ns_deploy,
833 ):
tierno2357f4e2020-10-19 16:38:59 +0000834
835 db_vims = {}
836
837 def get_vim_account(vim_account_id):
838 nonlocal db_vims
839 if vim_account_id in db_vims:
840 return db_vims[vim_account_id]
841 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
842 db_vims[vim_account_id] = db_vim
843 return db_vim
844
845 # modify target_vld info with instantiation parameters
garciadeblas5697b8b2021-03-24 09:17:02 +0100846 def parse_vld_instantiation_params(
847 target_vim, target_vld, vld_params, target_sdn
848 ):
tierno2357f4e2020-10-19 16:38:59 +0000849 if vld_params.get("ip-profile"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100850 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
851 "ip-profile"
852 ]
tierno2357f4e2020-10-19 16:38:59 +0000853 if vld_params.get("provider-network"):
garciadeblas5697b8b2021-03-24 09:17:02 +0100854 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
855 "provider-network"
856 ]
tierno2357f4e2020-10-19 16:38:59 +0000857 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
garciadeblas5697b8b2021-03-24 09:17:02 +0100858 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
859 "provider-network"
860 ]["sdn-ports"]
tierno2357f4e2020-10-19 16:38:59 +0000861 if vld_params.get("wimAccountId"):
862 target_wim = "wim:{}".format(vld_params["wimAccountId"])
863 target_vld["vim_info"][target_wim] = {}
864 for param in ("vim-network-name", "vim-network-id"):
865 if vld_params.get(param):
866 if isinstance(vld_params[param], dict):
garciaale04694c62021-03-02 10:49:28 -0300867 for vim, vim_net in vld_params[param].items():
bravof922c4172020-11-24 21:21:43 -0300868 other_target_vim = "vim:" + vim
garciadeblas5697b8b2021-03-24 09:17:02 +0100869 populate_dict(
870 target_vld["vim_info"],
871 (other_target_vim, param.replace("-", "_")),
872 vim_net,
873 )
tierno2357f4e2020-10-19 16:38:59 +0000874 else: # isinstance str
garciadeblas5697b8b2021-03-24 09:17:02 +0100875 target_vld["vim_info"][target_vim][
876 param.replace("-", "_")
877 ] = vld_params[param]
bravof922c4172020-11-24 21:21:43 -0300878 if vld_params.get("common_id"):
879 target_vld["common_id"] = vld_params.get("common_id")
tierno2357f4e2020-10-19 16:38:59 +0000880
aticig15db6142022-01-24 12:51:26 +0300881 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
882 def update_ns_vld_target(target, ns_params):
883 for vnf_params in ns_params.get("vnf", ()):
884 if vnf_params.get("vimAccountId"):
885 target_vnf = next(
886 (
887 vnfr
888 for vnfr in db_vnfrs.values()
889 if vnf_params["member-vnf-index"]
890 == vnfr["member-vnf-index-ref"]
891 ),
892 None,
893 )
894 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
895 for a_index, a_vld in enumerate(target["ns"]["vld"]):
896 target_vld = find_in_list(
897 get_iterable(vdur, "interfaces"),
898 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
899 )
aticig84bd9a72022-06-14 03:01:36 +0300900
901 vld_params = find_in_list(
902 get_iterable(ns_params, "vld"),
903 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
904 )
aticig15db6142022-01-24 12:51:26 +0300905 if target_vld:
aticig84bd9a72022-06-14 03:01:36 +0300906
aticig15db6142022-01-24 12:51:26 +0300907 if vnf_params.get("vimAccountId") not in a_vld.get(
908 "vim_info", {}
909 ):
aticig84bd9a72022-06-14 03:01:36 +0300910 target_vim_network_list = [
911 v for _, v in a_vld.get("vim_info").items()
912 ]
913 target_vim_network_name = next(
914 (
915 item.get("vim_network_name", "")
916 for item in target_vim_network_list
917 ),
918 "",
919 )
920
aticig15db6142022-01-24 12:51:26 +0300921 target["ns"]["vld"][a_index].get("vim_info").update(
922 {
923 "vim:{}".format(vnf_params["vimAccountId"]): {
aticig84bd9a72022-06-14 03:01:36 +0300924 "vim_network_name": target_vim_network_name,
aticig15db6142022-01-24 12:51:26 +0300925 }
926 }
927 )
928
aticig84bd9a72022-06-14 03:01:36 +0300929 if vld_params:
930 for param in ("vim-network-name", "vim-network-id"):
931 if vld_params.get(param) and isinstance(
932 vld_params[param], dict
933 ):
934 for vim, vim_net in vld_params[
935 param
936 ].items():
937 other_target_vim = "vim:" + vim
938 populate_dict(
939 target["ns"]["vld"][a_index].get(
940 "vim_info"
941 ),
942 (
943 other_target_vim,
944 param.replace("-", "_"),
945 ),
946 vim_net,
947 )
948
tierno69f0d382020-05-07 13:08:09 +0000949 nslcmop_id = db_nslcmop["_id"]
950 target = {
951 "name": db_nsr["name"],
952 "ns": {"vld": []},
953 "vnf": [],
954 "image": deepcopy(db_nsr["image"]),
955 "flavor": deepcopy(db_nsr["flavor"]),
956 "action_id": nslcmop_id,
tierno2357f4e2020-10-19 16:38:59 +0000957 "cloud_init_content": {},
tierno69f0d382020-05-07 13:08:09 +0000958 }
959 for image in target["image"]:
tierno2357f4e2020-10-19 16:38:59 +0000960 image["vim_info"] = {}
tierno69f0d382020-05-07 13:08:09 +0000961 for flavor in target["flavor"]:
tierno2357f4e2020-10-19 16:38:59 +0000962 flavor["vim_info"] = {}
Alexis Romero305b5c42022-03-11 15:29:18 +0100963 if db_nsr.get("affinity-or-anti-affinity-group"):
Pedro Escaleirab9a7c4d2022-03-31 00:08:05 +0100964 target["affinity-or-anti-affinity-group"] = deepcopy(
965 db_nsr["affinity-or-anti-affinity-group"]
966 )
967 for affinity_or_anti_affinity_group in target[
968 "affinity-or-anti-affinity-group"
969 ]:
Alexis Romero305b5c42022-03-11 15:29:18 +0100970 affinity_or_anti_affinity_group["vim_info"] = {}
tierno69f0d382020-05-07 13:08:09 +0000971
tierno2357f4e2020-10-19 16:38:59 +0000972 if db_nslcmop.get("lcmOperationType") != "instantiate":
973 # get parameters of instantiation:
garciadeblas5697b8b2021-03-24 09:17:02 +0100974 db_nslcmop_instantiate = self.db.get_list(
975 "nslcmops",
976 {
977 "nsInstanceId": db_nslcmop["nsInstanceId"],
978 "lcmOperationType": "instantiate",
979 },
980 )[-1]
tierno2357f4e2020-10-19 16:38:59 +0000981 ns_params = db_nslcmop_instantiate.get("operationParams")
982 else:
983 ns_params = db_nslcmop.get("operationParams")
bravof922c4172020-11-24 21:21:43 -0300984 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
985 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
tierno69f0d382020-05-07 13:08:09 +0000986
987 cp2target = {}
tierno2357f4e2020-10-19 16:38:59 +0000988 for vld_index, vld in enumerate(db_nsr.get("vld")):
989 target_vim = "vim:{}".format(ns_params["vimAccountId"])
990 target_vld = {
991 "id": vld["id"],
992 "name": vld["name"],
993 "mgmt-network": vld.get("mgmt-network", False),
994 "type": vld.get("type"),
995 "vim_info": {
bravof922c4172020-11-24 21:21:43 -0300996 target_vim: {
997 "vim_network_name": vld.get("vim-network-name"),
garciadeblas5697b8b2021-03-24 09:17:02 +0100998 "vim_account_id": ns_params["vimAccountId"],
bravof922c4172020-11-24 21:21:43 -0300999 }
garciadeblas5697b8b2021-03-24 09:17:02 +01001000 },
tierno2357f4e2020-10-19 16:38:59 +00001001 }
1002 # check if this network needs SDN assist
tierno2357f4e2020-10-19 16:38:59 +00001003 if vld.get("pci-interfaces"):
garciadeblasa5ae90b2021-02-12 11:26:46 +00001004 db_vim = get_vim_account(ns_params["vimAccountId"])
tierno2357f4e2020-10-19 16:38:59 +00001005 sdnc_id = db_vim["config"].get("sdn-controller")
1006 if sdnc_id:
garciadeblasa5ae90b2021-02-12 11:26:46 +00001007 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1008 target_sdn = "sdn:{}".format(sdnc_id)
1009 target_vld["vim_info"][target_sdn] = {
garciadeblas5697b8b2021-03-24 09:17:02 +01001010 "sdn": True,
1011 "target_vim": target_vim,
1012 "vlds": [sdn_vld],
1013 "type": vld.get("type"),
1014 }
tierno2357f4e2020-10-19 16:38:59 +00001015
bravof922c4172020-11-24 21:21:43 -03001016 nsd_vnf_profiles = get_vnf_profiles(nsd)
1017 for nsd_vnf_profile in nsd_vnf_profiles:
1018 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1019 if cp["virtual-link-profile-id"] == vld["id"]:
garciadeblas5697b8b2021-03-24 09:17:02 +01001020 cp2target[
1021 "member_vnf:{}.{}".format(
1022 cp["constituent-cpd-id"][0][
1023 "constituent-base-element-id"
1024 ],
1025 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1026 )
1027 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
tierno2357f4e2020-10-19 16:38:59 +00001028
1029 # check at nsd descriptor, if there is an ip-profile
1030 vld_params = {}
lloretgalleg19008482021-04-19 11:40:18 +00001031 nsd_vlp = find_in_list(
1032 get_virtual_link_profiles(nsd),
garciadeblas5697b8b2021-03-24 09:17:02 +01001033 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1034 == vld["id"],
1035 )
1036 if (
1037 nsd_vlp
1038 and nsd_vlp.get("virtual-link-protocol-data")
1039 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1040 ):
1041 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1042 "l3-protocol-data"
1043 ]
lloretgalleg19008482021-04-19 11:40:18 +00001044 ip_profile_dest_data = {}
1045 if "ip-version" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001046 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1047 "ip-version"
1048 ]
lloretgalleg19008482021-04-19 11:40:18 +00001049 if "cidr" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001050 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1051 "cidr"
1052 ]
lloretgalleg19008482021-04-19 11:40:18 +00001053 if "gateway-ip" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001054 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1055 "gateway-ip"
1056 ]
lloretgalleg19008482021-04-19 11:40:18 +00001057 if "dhcp-enabled" in ip_profile_source_data:
1058 ip_profile_dest_data["dhcp-params"] = {
1059 "enabled": ip_profile_source_data["dhcp-enabled"]
1060 }
1061 vld_params["ip-profile"] = ip_profile_dest_data
bravof922c4172020-11-24 21:21:43 -03001062
tierno2357f4e2020-10-19 16:38:59 +00001063 # update vld_params with instantiation params
garciadeblas5697b8b2021-03-24 09:17:02 +01001064 vld_instantiation_params = find_in_list(
1065 get_iterable(ns_params, "vld"),
1066 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1067 )
tierno2357f4e2020-10-19 16:38:59 +00001068 if vld_instantiation_params:
1069 vld_params.update(vld_instantiation_params)
bravof922c4172020-11-24 21:21:43 -03001070 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
tierno69f0d382020-05-07 13:08:09 +00001071 target["ns"]["vld"].append(target_vld)
aticig15db6142022-01-24 12:51:26 +03001072 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1073 update_ns_vld_target(target, ns_params)
bravof922c4172020-11-24 21:21:43 -03001074
tierno69f0d382020-05-07 13:08:09 +00001075 for vnfr in db_vnfrs.values():
garciadeblas5697b8b2021-03-24 09:17:02 +01001076 vnfd = find_in_list(
1077 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1078 )
1079 vnf_params = find_in_list(
1080 get_iterable(ns_params, "vnf"),
1081 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1082 )
tierno69f0d382020-05-07 13:08:09 +00001083 target_vnf = deepcopy(vnfr)
tierno2357f4e2020-10-19 16:38:59 +00001084 target_vim = "vim:{}".format(vnfr["vim-account-id"])
tierno69f0d382020-05-07 13:08:09 +00001085 for vld in target_vnf.get("vld", ()):
tierno2357f4e2020-10-19 16:38:59 +00001086 # check if connected to a ns.vld, to fill target'
garciadeblas5697b8b2021-03-24 09:17:02 +01001087 vnf_cp = find_in_list(
1088 vnfd.get("int-virtual-link-desc", ()),
1089 lambda cpd: cpd.get("id") == vld["id"],
1090 )
tierno69f0d382020-05-07 13:08:09 +00001091 if vnf_cp:
garciadeblas5697b8b2021-03-24 09:17:02 +01001092 ns_cp = "member_vnf:{}.{}".format(
1093 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1094 )
tierno69f0d382020-05-07 13:08:09 +00001095 if cp2target.get(ns_cp):
1096 vld["target"] = cp2target[ns_cp]
bravof922c4172020-11-24 21:21:43 -03001097
garciadeblas5697b8b2021-03-24 09:17:02 +01001098 vld["vim_info"] = {
1099 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1100 }
tierno2357f4e2020-10-19 16:38:59 +00001101 # check if this network needs SDN assist
1102 target_sdn = None
1103 if vld.get("pci-interfaces"):
1104 db_vim = get_vim_account(vnfr["vim-account-id"])
1105 sdnc_id = db_vim["config"].get("sdn-controller")
1106 if sdnc_id:
1107 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1108 target_sdn = "sdn:{}".format(sdnc_id)
1109 vld["vim_info"][target_sdn] = {
garciadeblas5697b8b2021-03-24 09:17:02 +01001110 "sdn": True,
1111 "target_vim": target_vim,
1112 "vlds": [sdn_vld],
1113 "type": vld.get("type"),
1114 }
tierno69f0d382020-05-07 13:08:09 +00001115
tierno2357f4e2020-10-19 16:38:59 +00001116 # check at vnfd descriptor, if there is an ip-profile
1117 vld_params = {}
bravof922c4172020-11-24 21:21:43 -03001118 vnfd_vlp = find_in_list(
1119 get_virtual_link_profiles(vnfd),
garciadeblas5697b8b2021-03-24 09:17:02 +01001120 lambda a_link_profile: a_link_profile["id"] == vld["id"],
bravof922c4172020-11-24 21:21:43 -03001121 )
garciadeblas5697b8b2021-03-24 09:17:02 +01001122 if (
1123 vnfd_vlp
1124 and vnfd_vlp.get("virtual-link-protocol-data")
1125 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1126 ):
1127 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1128 "l3-protocol-data"
1129 ]
bravof922c4172020-11-24 21:21:43 -03001130 ip_profile_dest_data = {}
1131 if "ip-version" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001132 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1133 "ip-version"
1134 ]
bravof922c4172020-11-24 21:21:43 -03001135 if "cidr" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001136 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1137 "cidr"
1138 ]
bravof922c4172020-11-24 21:21:43 -03001139 if "gateway-ip" in ip_profile_source_data:
garciadeblas5697b8b2021-03-24 09:17:02 +01001140 ip_profile_dest_data[
1141 "gateway-address"
1142 ] = ip_profile_source_data["gateway-ip"]
bravof922c4172020-11-24 21:21:43 -03001143 if "dhcp-enabled" in ip_profile_source_data:
1144 ip_profile_dest_data["dhcp-params"] = {
1145 "enabled": ip_profile_source_data["dhcp-enabled"]
1146 }
1147
1148 vld_params["ip-profile"] = ip_profile_dest_data
tierno2357f4e2020-10-19 16:38:59 +00001149 # update vld_params with instantiation params
1150 if vnf_params:
garciadeblas5697b8b2021-03-24 09:17:02 +01001151 vld_instantiation_params = find_in_list(
1152 get_iterable(vnf_params, "internal-vld"),
1153 lambda i_vld: i_vld["name"] == vld["id"],
1154 )
tierno2357f4e2020-10-19 16:38:59 +00001155 if vld_instantiation_params:
1156 vld_params.update(vld_instantiation_params)
1157 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1158
1159 vdur_list = []
tierno69f0d382020-05-07 13:08:09 +00001160 for vdur in target_vnf.get("vdur", ()):
tierno2357f4e2020-10-19 16:38:59 +00001161 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1162 continue # This vdu must not be created
bravof922c4172020-11-24 21:21:43 -03001163 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
tierno69f0d382020-05-07 13:08:09 +00001164
bravof922c4172020-11-24 21:21:43 -03001165 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1166
1167 if ssh_keys_all:
bravofe5a31bc2021-02-17 19:09:12 -03001168 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1169 vnf_configuration = get_configuration(vnfd, vnfd["id"])
garciadeblas5697b8b2021-03-24 09:17:02 +01001170 if (
1171 vdu_configuration
1172 and vdu_configuration.get("config-access")
1173 and vdu_configuration.get("config-access").get("ssh-access")
1174 ):
bravof922c4172020-11-24 21:21:43 -03001175 vdur["ssh-keys"] = ssh_keys_all
garciadeblas5697b8b2021-03-24 09:17:02 +01001176 vdur["ssh-access-required"] = vdu_configuration[
1177 "config-access"
1178 ]["ssh-access"]["required"]
1179 elif (
1180 vnf_configuration
1181 and vnf_configuration.get("config-access")
1182 and vnf_configuration.get("config-access").get("ssh-access")
1183 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1184 ):
bravof922c4172020-11-24 21:21:43 -03001185 vdur["ssh-keys"] = ssh_keys_all
garciadeblas5697b8b2021-03-24 09:17:02 +01001186 vdur["ssh-access-required"] = vnf_configuration[
1187 "config-access"
1188 ]["ssh-access"]["required"]
1189 elif ssh_keys_instantiation and find_in_list(
1190 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1191 ):
bravof922c4172020-11-24 21:21:43 -03001192 vdur["ssh-keys"] = ssh_keys_instantiation
tierno69f0d382020-05-07 13:08:09 +00001193
bravof922c4172020-11-24 21:21:43 -03001194 self.logger.debug("NS > vdur > {}".format(vdur))
1195
1196 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
tierno69f0d382020-05-07 13:08:09 +00001197 # cloud-init
1198 if vdud.get("cloud-init-file"):
garciadeblas5697b8b2021-03-24 09:17:02 +01001199 vdur["cloud-init"] = "{}:file:{}".format(
1200 vnfd["_id"], vdud.get("cloud-init-file")
1201 )
tierno2357f4e2020-10-19 16:38:59 +00001202 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1203 if vdur["cloud-init"] not in target["cloud_init_content"]:
1204 base_folder = vnfd["_admin"]["storage"]
bravof486707f2021-11-08 17:18:50 -03001205 if base_folder["pkg-dir"]:
1206 cloud_init_file = "{}/{}/cloud_init/{}".format(
1207 base_folder["folder"],
1208 base_folder["pkg-dir"],
1209 vdud.get("cloud-init-file"),
1210 )
1211 else:
1212 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1213 base_folder["folder"],
1214 vdud.get("cloud-init-file"),
1215 )
tierno2357f4e2020-10-19 16:38:59 +00001216 with self.fs.file_open(cloud_init_file, "r") as ci_file:
garciadeblas5697b8b2021-03-24 09:17:02 +01001217 target["cloud_init_content"][
1218 vdur["cloud-init"]
1219 ] = ci_file.read()
tierno69f0d382020-05-07 13:08:09 +00001220 elif vdud.get("cloud-init"):
garciadeblas5697b8b2021-03-24 09:17:02 +01001221 vdur["cloud-init"] = "{}:vdu:{}".format(
1222 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1223 )
tierno2357f4e2020-10-19 16:38:59 +00001224 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
garciadeblas5697b8b2021-03-24 09:17:02 +01001225 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1226 "cloud-init"
1227 ]
tierno2357f4e2020-10-19 16:38:59 +00001228 vdur["additionalParams"] = vdur.get("additionalParams") or {}
garciadeblas5697b8b2021-03-24 09:17:02 +01001229 deploy_params_vdu = self._format_additional_params(
1230 vdur.get("additionalParams") or {}
1231 )
1232 deploy_params_vdu["OSM"] = get_osm_params(
1233 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1234 )
tierno2357f4e2020-10-19 16:38:59 +00001235 vdur["additionalParams"] = deploy_params_vdu
tierno69f0d382020-05-07 13:08:09 +00001236
1237 # flavor
1238 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
tierno2357f4e2020-10-19 16:38:59 +00001239 if target_vim not in ns_flavor["vim_info"]:
1240 ns_flavor["vim_info"][target_vim] = {}
lloretgalleg7dc94672021-02-08 11:49:50 +00001241
1242 # deal with images
1243 # in case alternative images are provided we must check if they should be applied
1244 # for the vim_type, modify the vim_type taking into account
1245 ns_image_id = int(vdur["ns-image-id"])
1246 if vdur.get("alt-image-ids"):
1247 db_vim = get_vim_account(vnfr["vim-account-id"])
1248 vim_type = db_vim["vim_type"]
1249 for alt_image_id in vdur.get("alt-image-ids"):
1250 ns_alt_image = target["image"][int(alt_image_id)]
1251 if vim_type == ns_alt_image.get("vim-type"):
1252 # must use alternative image
garciadeblas5697b8b2021-03-24 09:17:02 +01001253 self.logger.debug(
1254 "use alternative image id: {}".format(alt_image_id)
1255 )
lloretgalleg7dc94672021-02-08 11:49:50 +00001256 ns_image_id = alt_image_id
1257 vdur["ns-image-id"] = ns_image_id
1258 break
1259 ns_image = target["image"][int(ns_image_id)]
tierno2357f4e2020-10-19 16:38:59 +00001260 if target_vim not in ns_image["vim_info"]:
1261 ns_image["vim_info"][target_vim] = {}
tierno69f0d382020-05-07 13:08:09 +00001262
Alexis Romero305b5c42022-03-11 15:29:18 +01001263 # Affinity groups
1264 if vdur.get("affinity-or-anti-affinity-group-id"):
1265 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1266 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1267 if target_vim not in ns_ags["vim_info"]:
1268 ns_ags["vim_info"][target_vim] = {}
1269
tierno2357f4e2020-10-19 16:38:59 +00001270 vdur["vim_info"] = {target_vim: {}}
1271 # instantiation parameters
aticig349aa462022-05-19 12:29:35 +03001272 if vnf_params:
1273 vdu_instantiation_params = find_in_list(
1274 get_iterable(vnf_params, "vdu"),
1275 lambda i_vdu: i_vdu["id"] == vdud["id"],
1276 )
1277 if vdu_instantiation_params:
1278 # Parse the vdu_volumes from the instantiation params
1279 vdu_volumes = get_volumes_from_instantiation_params(
1280 vdu_instantiation_params, vdud
1281 )
1282 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
tierno2357f4e2020-10-19 16:38:59 +00001283 vdur_list.append(vdur)
1284 target_vnf["vdur"] = vdur_list
tierno69f0d382020-05-07 13:08:09 +00001285 target["vnf"].append(target_vnf)
1286
garciadeblas07f4e4c2022-06-09 09:42:58 +02001287 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
tierno69f0d382020-05-07 13:08:09 +00001288 desc = await self.RO.deploy(nsr_id, target)
bravof922c4172020-11-24 21:21:43 -03001289 self.logger.debug("RO return > {}".format(desc))
tierno69f0d382020-05-07 13:08:09 +00001290 action_id = desc["action_id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01001291 await self._wait_ng_ro(
garciadeblas07f4e4c2022-06-09 09:42:58 +02001292 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage,
1293 operation="instantiation"
garciadeblas5697b8b2021-03-24 09:17:02 +01001294 )
tierno69f0d382020-05-07 13:08:09 +00001295
1296 # Updating NSR
1297 db_nsr_update = {
1298 "_admin.deployed.RO.operational-status": "running",
garciadeblas5697b8b2021-03-24 09:17:02 +01001299 "detailed-status": " ".join(stage),
tierno69f0d382020-05-07 13:08:09 +00001300 }
1301 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1302 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1303 self._write_op_status(nslcmop_id, stage)
garciadeblas5697b8b2021-03-24 09:17:02 +01001304 self.logger.debug(
1305 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1306 )
tierno69f0d382020-05-07 13:08:09 +00001307 return
1308
garciadeblas5697b8b2021-03-24 09:17:02 +01001309 async def _wait_ng_ro(
1310 self,
1311 nsr_id,
1312 action_id,
1313 nslcmop_id=None,
1314 start_time=None,
1315 timeout=600,
1316 stage=None,
garciadeblas07f4e4c2022-06-09 09:42:58 +02001317 operation=None,
garciadeblas5697b8b2021-03-24 09:17:02 +01001318 ):
tierno69f0d382020-05-07 13:08:09 +00001319 detailed_status_old = None
1320 db_nsr_update = {}
tierno2357f4e2020-10-19 16:38:59 +00001321 start_time = start_time or time()
tierno69f0d382020-05-07 13:08:09 +00001322 while time() <= start_time + timeout:
garciadeblas07f4e4c2022-06-09 09:42:58 +02001323 desc_status = await self.op_status_map[operation](nsr_id, action_id)
bravof922c4172020-11-24 21:21:43 -03001324 self.logger.debug("Wait NG RO > {}".format(desc_status))
tierno69f0d382020-05-07 13:08:09 +00001325 if desc_status["status"] == "FAILED":
1326 raise NgRoException(desc_status["details"])
1327 elif desc_status["status"] == "BUILD":
tierno2357f4e2020-10-19 16:38:59 +00001328 if stage:
1329 stage[2] = "VIM: ({})".format(desc_status["details"])
tierno69f0d382020-05-07 13:08:09 +00001330 elif desc_status["status"] == "DONE":
tierno2357f4e2020-10-19 16:38:59 +00001331 if stage:
1332 stage[2] = "Deployed at VIM"
tierno69f0d382020-05-07 13:08:09 +00001333 break
1334 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01001335 assert False, "ROclient.check_ns_status returns unknown {}".format(
1336 desc_status["status"]
1337 )
tierno2357f4e2020-10-19 16:38:59 +00001338 if stage and nslcmop_id and stage[2] != detailed_status_old:
tierno69f0d382020-05-07 13:08:09 +00001339 detailed_status_old = stage[2]
1340 db_nsr_update["detailed-status"] = " ".join(stage)
1341 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1342 self._write_op_status(nslcmop_id, stage)
bravof922c4172020-11-24 21:21:43 -03001343 await asyncio.sleep(15, loop=self.loop)
tierno69f0d382020-05-07 13:08:09 +00001344 else: # timeout_ns_deploy
1345 raise NgRoException("Timeout waiting ns to deploy")
1346
garciadeblas5697b8b2021-03-24 09:17:02 +01001347 async def _terminate_ng_ro(
1348 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1349 ):
tierno69f0d382020-05-07 13:08:09 +00001350 db_nsr_update = {}
1351 failed_detail = []
1352 action_id = None
1353 start_deploy = time()
1354 try:
1355 target = {
1356 "ns": {"vld": []},
1357 "vnf": [],
1358 "image": [],
1359 "flavor": [],
garciadeblas5697b8b2021-03-24 09:17:02 +01001360 "action_id": nslcmop_id,
tierno69f0d382020-05-07 13:08:09 +00001361 }
1362 desc = await self.RO.deploy(nsr_id, target)
1363 action_id = desc["action_id"]
1364 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1365 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
garciadeblas5697b8b2021-03-24 09:17:02 +01001366 self.logger.debug(
1367 logging_text
1368 + "ns terminate action at RO. action_id={}".format(action_id)
1369 )
tierno69f0d382020-05-07 13:08:09 +00001370
1371 # wait until done
1372 delete_timeout = 20 * 60 # 20 minutes
garciadeblas5697b8b2021-03-24 09:17:02 +01001373 await self._wait_ng_ro(
garciadeblas07f4e4c2022-06-09 09:42:58 +02001374 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage,
1375 operation="termination"
garciadeblas5697b8b2021-03-24 09:17:02 +01001376 )
tierno69f0d382020-05-07 13:08:09 +00001377
1378 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1379 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1380 # delete all nsr
1381 await self.RO.delete(nsr_id)
1382 except Exception as e:
1383 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1384 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1385 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1386 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
garciadeblas5697b8b2021-03-24 09:17:02 +01001387 self.logger.debug(
1388 logging_text + "RO_action_id={} already deleted".format(action_id)
1389 )
tierno69f0d382020-05-07 13:08:09 +00001390 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1391 failed_detail.append("delete conflict: {}".format(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01001392 self.logger.debug(
1393 logging_text
1394 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1395 )
tierno69f0d382020-05-07 13:08:09 +00001396 else:
1397 failed_detail.append("delete error: {}".format(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01001398 self.logger.error(
1399 logging_text
1400 + "RO_action_id={} delete error: {}".format(action_id, e)
1401 )
tierno69f0d382020-05-07 13:08:09 +00001402
1403 if failed_detail:
1404 stage[2] = "Error deleting from VIM"
1405 else:
1406 stage[2] = "Deleted from VIM"
1407 db_nsr_update["detailed-status"] = " ".join(stage)
1408 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1409 self._write_op_status(nslcmop_id, stage)
1410
1411 if failed_detail:
1412 raise LcmException("; ".join(failed_detail))
1413 return
1414
garciadeblas5697b8b2021-03-24 09:17:02 +01001415 async def instantiate_RO(
1416 self,
1417 logging_text,
1418 nsr_id,
1419 nsd,
1420 db_nsr,
1421 db_nslcmop,
1422 db_vnfrs,
1423 db_vnfds,
1424 n2vc_key_list,
1425 stage,
1426 ):
tiernoe95ed362020-04-23 08:24:57 +00001427 """
1428 Instantiate at RO
1429 :param logging_text: preffix text to use at logging
1430 :param nsr_id: nsr identity
1431 :param nsd: database content of ns descriptor
1432 :param db_nsr: database content of ns record
1433 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1434 :param db_vnfrs:
bravof922c4172020-11-24 21:21:43 -03001435 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
tiernoe95ed362020-04-23 08:24:57 +00001436 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1437 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1438 :return: None or exception
1439 """
tiernoe876f672020-02-13 14:34:48 +00001440 try:
tiernoe876f672020-02-13 14:34:48 +00001441 start_deploy = time()
1442 ns_params = db_nslcmop.get("operationParams")
1443 if ns_params and ns_params.get("timeout_ns_deploy"):
1444 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1445 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01001446 timeout_ns_deploy = self.timeout.get(
1447 "ns_deploy", self.timeout_ns_deploy
1448 )
quilesj7e13aeb2019-10-08 13:34:55 +02001449
tiernoe876f672020-02-13 14:34:48 +00001450 # Check for and optionally request placement optimization. Database will be updated if placement activated
1451 stage[2] = "Waiting for Placement."
tierno8790a3d2020-04-23 22:49:52 +00001452 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1453 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1454 for vnfr in db_vnfrs.values():
1455 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1456 break
1457 else:
1458 ns_params["vimAccountId"] == vnfr["vim-account-id"]
quilesj7e13aeb2019-10-08 13:34:55 +02001459
garciadeblas5697b8b2021-03-24 09:17:02 +01001460 return await self._instantiate_ng_ro(
1461 logging_text,
1462 nsr_id,
1463 nsd,
1464 db_nsr,
1465 db_nslcmop,
1466 db_vnfrs,
1467 db_vnfds,
1468 n2vc_key_list,
1469 stage,
1470 start_deploy,
1471 timeout_ns_deploy,
1472 )
tierno2357f4e2020-10-19 16:38:59 +00001473 except Exception as e:
tierno067e04a2020-03-31 12:53:13 +00001474 stage[2] = "ERROR deploying at VIM"
tiernoe876f672020-02-13 14:34:48 +00001475 self.set_vnfr_at_error(db_vnfrs, str(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01001476 self.logger.error(
1477 "Error deploying at VIM {}".format(e),
1478 exc_info=not isinstance(
1479 e,
1480 (
1481 ROclient.ROClientException,
1482 LcmException,
1483 DbException,
1484 NgRoException,
1485 ),
1486 ),
1487 )
tiernoe876f672020-02-13 14:34:48 +00001488 raise
quilesj7e13aeb2019-10-08 13:34:55 +02001489
tierno7ecbc342020-09-21 14:05:39 +00001490 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1491 """
1492 Wait for kdu to be up, get ip address
1493 :param logging_text: prefix use for logging
1494 :param nsr_id:
1495 :param vnfr_id:
1496 :param kdu_name:
David Garcia78b6e6d2022-04-29 05:50:46 +02001497 :return: IP address, K8s services
tierno7ecbc342020-09-21 14:05:39 +00001498 """
1499
1500 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1501 nb_tries = 0
1502
1503 while nb_tries < 360:
1504 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
garciadeblas5697b8b2021-03-24 09:17:02 +01001505 kdur = next(
1506 (
1507 x
1508 for x in get_iterable(db_vnfr, "kdur")
1509 if x.get("kdu-name") == kdu_name
1510 ),
1511 None,
1512 )
tierno7ecbc342020-09-21 14:05:39 +00001513 if not kdur:
garciadeblas5697b8b2021-03-24 09:17:02 +01001514 raise LcmException(
1515 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1516 )
tierno7ecbc342020-09-21 14:05:39 +00001517 if kdur.get("status"):
1518 if kdur["status"] in ("READY", "ENABLED"):
David Garcia78b6e6d2022-04-29 05:50:46 +02001519 return kdur.get("ip-address"), kdur.get("services")
tierno7ecbc342020-09-21 14:05:39 +00001520 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01001521 raise LcmException(
1522 "target KDU={} is in error state".format(kdu_name)
1523 )
tierno7ecbc342020-09-21 14:05:39 +00001524
1525 await asyncio.sleep(10, loop=self.loop)
1526 nb_tries += 1
1527 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1528
garciadeblas5697b8b2021-03-24 09:17:02 +01001529 async def wait_vm_up_insert_key_ro(
1530 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1531 ):
tiernoa5088192019-11-26 16:12:53 +00001532 """
1533 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1534 :param logging_text: prefix use for logging
1535 :param nsr_id:
1536 :param vnfr_id:
1537 :param vdu_id:
1538 :param vdu_index:
1539 :param pub_key: public ssh key to inject, None to skip
1540 :param user: user to apply the public ssh key
1541 :return: IP address
1542 """
quilesj7e13aeb2019-10-08 13:34:55 +02001543
tierno2357f4e2020-10-19 16:38:59 +00001544 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
tiernod8323042019-08-09 11:32:23 +00001545 ro_nsr_id = None
1546 ip_address = None
1547 nb_tries = 0
1548 target_vdu_id = None
quilesj3149f262019-12-03 10:58:10 +00001549 ro_retries = 0
quilesj7e13aeb2019-10-08 13:34:55 +02001550
tiernod8323042019-08-09 11:32:23 +00001551 while True:
quilesj7e13aeb2019-10-08 13:34:55 +02001552
quilesj3149f262019-12-03 10:58:10 +00001553 ro_retries += 1
1554 if ro_retries >= 360: # 1 hour
garciadeblas5697b8b2021-03-24 09:17:02 +01001555 raise LcmException(
1556 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1557 )
quilesj3149f262019-12-03 10:58:10 +00001558
tiernod8323042019-08-09 11:32:23 +00001559 await asyncio.sleep(10, loop=self.loop)
quilesj7e13aeb2019-10-08 13:34:55 +02001560
1561 # get ip address
tiernod8323042019-08-09 11:32:23 +00001562 if not target_vdu_id:
1563 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
quilesj3149f262019-12-03 10:58:10 +00001564
1565 if not vdu_id: # for the VNF case
tiernoe876f672020-02-13 14:34:48 +00001566 if db_vnfr.get("status") == "ERROR":
garciadeblas5697b8b2021-03-24 09:17:02 +01001567 raise LcmException(
1568 "Cannot inject ssh-key because target VNF is in error state"
1569 )
tiernod8323042019-08-09 11:32:23 +00001570 ip_address = db_vnfr.get("ip-address")
1571 if not ip_address:
1572 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01001573 vdur = next(
1574 (
1575 x
1576 for x in get_iterable(db_vnfr, "vdur")
1577 if x.get("ip-address") == ip_address
1578 ),
1579 None,
1580 )
quilesj3149f262019-12-03 10:58:10 +00001581 else: # VDU case
garciadeblas5697b8b2021-03-24 09:17:02 +01001582 vdur = next(
1583 (
1584 x
1585 for x in get_iterable(db_vnfr, "vdur")
1586 if x.get("vdu-id-ref") == vdu_id
1587 and x.get("count-index") == vdu_index
1588 ),
1589 None,
1590 )
quilesj3149f262019-12-03 10:58:10 +00001591
garciadeblas5697b8b2021-03-24 09:17:02 +01001592 if (
1593 not vdur and len(db_vnfr.get("vdur", ())) == 1
1594 ): # If only one, this should be the target vdu
tierno0e8c3f02020-03-12 17:18:21 +00001595 vdur = db_vnfr["vdur"][0]
quilesj3149f262019-12-03 10:58:10 +00001596 if not vdur:
garciadeblas5697b8b2021-03-24 09:17:02 +01001597 raise LcmException(
1598 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1599 vnfr_id, vdu_id, vdu_index
1600 )
1601 )
tierno2357f4e2020-10-19 16:38:59 +00001602 # New generation RO stores information at "vim_info"
1603 ng_ro_status = None
David Garciaa8bbe672020-11-19 13:06:54 +01001604 target_vim = None
tierno2357f4e2020-10-19 16:38:59 +00001605 if vdur.get("vim_info"):
garciadeblas5697b8b2021-03-24 09:17:02 +01001606 target_vim = next(
1607 t for t in vdur["vim_info"]
1608 ) # there should be only one key
tierno2357f4e2020-10-19 16:38:59 +00001609 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
garciadeblas5697b8b2021-03-24 09:17:02 +01001610 if (
1611 vdur.get("pdu-type")
1612 or vdur.get("status") == "ACTIVE"
1613 or ng_ro_status == "ACTIVE"
1614 ):
quilesj3149f262019-12-03 10:58:10 +00001615 ip_address = vdur.get("ip-address")
1616 if not ip_address:
1617 continue
1618 target_vdu_id = vdur["vdu-id-ref"]
bravof922c4172020-11-24 21:21:43 -03001619 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
garciadeblas5697b8b2021-03-24 09:17:02 +01001620 raise LcmException(
1621 "Cannot inject ssh-key because target VM is in error state"
1622 )
quilesj3149f262019-12-03 10:58:10 +00001623
tiernod8323042019-08-09 11:32:23 +00001624 if not target_vdu_id:
1625 continue
tiernod8323042019-08-09 11:32:23 +00001626
quilesj7e13aeb2019-10-08 13:34:55 +02001627 # inject public key into machine
1628 if pub_key and user:
tierno2357f4e2020-10-19 16:38:59 +00001629 self.logger.debug(logging_text + "Inserting RO key")
bravof922c4172020-11-24 21:21:43 -03001630 self.logger.debug("SSH > PubKey > {}".format(pub_key))
tierno0e8c3f02020-03-12 17:18:21 +00001631 if vdur.get("pdu-type"):
1632 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1633 return ip_address
quilesj7e13aeb2019-10-08 13:34:55 +02001634 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01001635 ro_vm_id = "{}-{}".format(
1636 db_vnfr["member-vnf-index-ref"], target_vdu_id
1637 ) # TODO add vdu_index
tierno69f0d382020-05-07 13:08:09 +00001638 if self.ng_ro:
garciadeblas5697b8b2021-03-24 09:17:02 +01001639 target = {
1640 "action": {
1641 "action": "inject_ssh_key",
1642 "key": pub_key,
1643 "user": user,
1644 },
1645 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1646 }
tierno2357f4e2020-10-19 16:38:59 +00001647 desc = await self.RO.deploy(nsr_id, target)
1648 action_id = desc["action_id"]
garciadeblas07f4e4c2022-06-09 09:42:58 +02001649 await self._wait_ng_ro(nsr_id, action_id, timeout=600, operation="instantiation")
tierno2357f4e2020-10-19 16:38:59 +00001650 break
tierno69f0d382020-05-07 13:08:09 +00001651 else:
tierno2357f4e2020-10-19 16:38:59 +00001652 # wait until NS is deployed at RO
1653 if not ro_nsr_id:
1654 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
garciadeblas5697b8b2021-03-24 09:17:02 +01001655 ro_nsr_id = deep_get(
1656 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1657 )
tierno2357f4e2020-10-19 16:38:59 +00001658 if not ro_nsr_id:
1659 continue
tierno69f0d382020-05-07 13:08:09 +00001660 result_dict = await self.RO.create_action(
1661 item="ns",
1662 item_id_name=ro_nsr_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01001663 descriptor={
1664 "add_public_key": pub_key,
1665 "vms": [ro_vm_id],
1666 "user": user,
1667 },
tierno69f0d382020-05-07 13:08:09 +00001668 )
1669 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1670 if not result_dict or not isinstance(result_dict, dict):
garciadeblas5697b8b2021-03-24 09:17:02 +01001671 raise LcmException(
1672 "Unknown response from RO when injecting key"
1673 )
tierno69f0d382020-05-07 13:08:09 +00001674 for result in result_dict.values():
1675 if result.get("vim_result") == 200:
1676 break
1677 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01001678 raise ROclient.ROClientException(
1679 "error injecting key: {}".format(
1680 result.get("description")
1681 )
1682 )
tierno69f0d382020-05-07 13:08:09 +00001683 break
1684 except NgRoException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01001685 raise LcmException(
1686 "Reaching max tries injecting key. Error: {}".format(e)
1687 )
quilesj7e13aeb2019-10-08 13:34:55 +02001688 except ROclient.ROClientException as e:
tiernoa5088192019-11-26 16:12:53 +00001689 if not nb_tries:
garciadeblas5697b8b2021-03-24 09:17:02 +01001690 self.logger.debug(
1691 logging_text
1692 + "error injecting key: {}. Retrying until {} seconds".format(
1693 e, 20 * 10
1694 )
1695 )
quilesj7e13aeb2019-10-08 13:34:55 +02001696 nb_tries += 1
tiernoa5088192019-11-26 16:12:53 +00001697 if nb_tries >= 20:
garciadeblas5697b8b2021-03-24 09:17:02 +01001698 raise LcmException(
1699 "Reaching max tries injecting key. Error: {}".format(e)
1700 )
quilesj7e13aeb2019-10-08 13:34:55 +02001701 else:
quilesj7e13aeb2019-10-08 13:34:55 +02001702 break
1703
1704 return ip_address
1705
tierno5ee02052019-12-05 19:55:02 +00001706 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1707 """
1708 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1709 """
1710 my_vca = vca_deployed_list[vca_index]
1711 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
quilesj3655ae02019-12-12 16:08:35 +00001712 # vdu or kdu: no dependencies
tierno5ee02052019-12-05 19:55:02 +00001713 return
1714 timeout = 300
1715 while timeout >= 0:
quilesj3655ae02019-12-12 16:08:35 +00001716 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1717 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1718 configuration_status_list = db_nsr["configurationStatus"]
1719 for index, vca_deployed in enumerate(configuration_status_list):
tierno5ee02052019-12-05 19:55:02 +00001720 if index == vca_index:
quilesj3655ae02019-12-12 16:08:35 +00001721 # myself
tierno5ee02052019-12-05 19:55:02 +00001722 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01001723 if not my_vca.get("member-vnf-index") or (
1724 vca_deployed.get("member-vnf-index")
1725 == my_vca.get("member-vnf-index")
1726 ):
quilesj3655ae02019-12-12 16:08:35 +00001727 internal_status = configuration_status_list[index].get("status")
garciadeblas5697b8b2021-03-24 09:17:02 +01001728 if internal_status == "READY":
quilesj3655ae02019-12-12 16:08:35 +00001729 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01001730 elif internal_status == "BROKEN":
1731 raise LcmException(
1732 "Configuration aborted because dependent charm/s has failed"
1733 )
quilesj3655ae02019-12-12 16:08:35 +00001734 else:
1735 break
tierno5ee02052019-12-05 19:55:02 +00001736 else:
quilesj3655ae02019-12-12 16:08:35 +00001737 # no dependencies, return
tierno5ee02052019-12-05 19:55:02 +00001738 return
1739 await asyncio.sleep(10)
1740 timeout -= 1
tierno5ee02052019-12-05 19:55:02 +00001741
1742 raise LcmException("Configuration aborted because dependent charm/s timeout")
1743
David Garciac1fe90a2021-03-31 19:12:02 +02001744 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
David Garcia5506c182021-10-21 17:03:48 +02001745 vca_id = None
1746 if db_vnfr:
1747 vca_id = deep_get(db_vnfr, ("vca-id",))
1748 elif db_nsr:
1749 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1750 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1751 return vca_id
David Garciac1fe90a2021-03-31 19:12:02 +02001752
garciadeblas5697b8b2021-03-24 09:17:02 +01001753 async def instantiate_N2VC(
1754 self,
1755 logging_text,
1756 vca_index,
1757 nsi_id,
1758 db_nsr,
1759 db_vnfr,
1760 vdu_id,
1761 kdu_name,
1762 vdu_index,
1763 config_descriptor,
1764 deploy_params,
1765 base_folder,
1766 nslcmop_id,
1767 stage,
1768 vca_type,
1769 vca_name,
1770 ee_config_descriptor,
1771 ):
tiernod8323042019-08-09 11:32:23 +00001772 nsr_id = db_nsr["_id"]
1773 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
tiernoda6fb102019-11-23 00:36:52 +00001774 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernod8323042019-08-09 11:32:23 +00001775 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
tiernob996d942020-07-03 14:52:28 +00001776 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
quilesj7e13aeb2019-10-08 13:34:55 +02001777 db_dict = {
garciadeblas5697b8b2021-03-24 09:17:02 +01001778 "collection": "nsrs",
1779 "filter": {"_id": nsr_id},
1780 "path": db_update_entry,
quilesj7e13aeb2019-10-08 13:34:55 +02001781 }
tiernod8323042019-08-09 11:32:23 +00001782 step = ""
1783 try:
quilesj3655ae02019-12-12 16:08:35 +00001784
garciadeblas5697b8b2021-03-24 09:17:02 +01001785 element_type = "NS"
quilesj3655ae02019-12-12 16:08:35 +00001786 element_under_configuration = nsr_id
1787
tiernod8323042019-08-09 11:32:23 +00001788 vnfr_id = None
1789 if db_vnfr:
1790 vnfr_id = db_vnfr["_id"]
tiernob996d942020-07-03 14:52:28 +00001791 osm_config["osm"]["vnf_id"] = vnfr_id
tiernod8323042019-08-09 11:32:23 +00001792
garciadeblas5697b8b2021-03-24 09:17:02 +01001793 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001794
aktas98488ed2021-07-29 17:42:49 +03001795 if vca_type == "native_charm":
1796 index_number = 0
1797 else:
1798 index_number = vdu_index or 0
1799
tiernod8323042019-08-09 11:32:23 +00001800 if vnfr_id:
garciadeblas5697b8b2021-03-24 09:17:02 +01001801 element_type = "VNF"
quilesj3655ae02019-12-12 16:08:35 +00001802 element_under_configuration = vnfr_id
aktas98488ed2021-07-29 17:42:49 +03001803 namespace += ".{}-{}".format(vnfr_id, index_number)
tiernod8323042019-08-09 11:32:23 +00001804 if vdu_id:
aktas98488ed2021-07-29 17:42:49 +03001805 namespace += ".{}-{}".format(vdu_id, index_number)
garciadeblas5697b8b2021-03-24 09:17:02 +01001806 element_type = "VDU"
aktas98488ed2021-07-29 17:42:49 +03001807 element_under_configuration = "{}-{}".format(vdu_id, index_number)
tiernob996d942020-07-03 14:52:28 +00001808 osm_config["osm"]["vdu_id"] = vdu_id
tierno51183952020-04-03 15:48:18 +00001809 elif kdu_name:
aktas98488ed2021-07-29 17:42:49 +03001810 namespace += ".{}".format(kdu_name)
garciadeblas5697b8b2021-03-24 09:17:02 +01001811 element_type = "KDU"
tierno51183952020-04-03 15:48:18 +00001812 element_under_configuration = kdu_name
tiernob996d942020-07-03 14:52:28 +00001813 osm_config["osm"]["kdu_name"] = kdu_name
tiernod8323042019-08-09 11:32:23 +00001814
1815 # Get artifact path
bravof486707f2021-11-08 17:18:50 -03001816 if base_folder["pkg-dir"]:
1817 artifact_path = "{}/{}/{}/{}".format(
1818 base_folder["folder"],
1819 base_folder["pkg-dir"],
1820 "charms"
aticig15db6142022-01-24 12:51:26 +03001821 if vca_type
1822 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
bravof486707f2021-11-08 17:18:50 -03001823 else "helm-charts",
1824 vca_name,
1825 )
1826 else:
1827 artifact_path = "{}/Scripts/{}/{}/".format(
1828 base_folder["folder"],
1829 "charms"
aticig15db6142022-01-24 12:51:26 +03001830 if vca_type
1831 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
bravof486707f2021-11-08 17:18:50 -03001832 else "helm-charts",
1833 vca_name,
1834 )
bravof922c4172020-11-24 21:21:43 -03001835
1836 self.logger.debug("Artifact path > {}".format(artifact_path))
1837
tiernoa278b842020-07-08 15:33:55 +00001838 # get initial_config_primitive_list that applies to this element
garciadeblas5697b8b2021-03-24 09:17:02 +01001839 initial_config_primitive_list = config_descriptor.get(
1840 "initial-config-primitive"
1841 )
tiernoa278b842020-07-08 15:33:55 +00001842
garciadeblas5697b8b2021-03-24 09:17:02 +01001843 self.logger.debug(
1844 "Initial config primitive list > {}".format(
1845 initial_config_primitive_list
1846 )
1847 )
bravof922c4172020-11-24 21:21:43 -03001848
tiernoa278b842020-07-08 15:33:55 +00001849 # add config if not present for NS charm
1850 ee_descriptor_id = ee_config_descriptor.get("id")
bravof922c4172020-11-24 21:21:43 -03001851 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
garciadeblas5697b8b2021-03-24 09:17:02 +01001852 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1853 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1854 )
tiernod8323042019-08-09 11:32:23 +00001855
garciadeblas5697b8b2021-03-24 09:17:02 +01001856 self.logger.debug(
1857 "Initial config primitive list #2 > {}".format(
1858 initial_config_primitive_list
1859 )
1860 )
tierno588547c2020-07-01 15:30:20 +00001861 # n2vc_redesign STEP 3.1
tierno588547c2020-07-01 15:30:20 +00001862 # find old ee_id if exists
1863 ee_id = vca_deployed.get("ee_id")
tiernod8323042019-08-09 11:32:23 +00001864
David Garciac1fe90a2021-03-31 19:12:02 +02001865 vca_id = self.get_vca_id(db_vnfr, db_nsr)
tierno588547c2020-07-01 15:30:20 +00001866 # create or register execution environment in VCA
lloretgalleg18ebc3a2020-10-22 09:54:51 +00001867 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
quilesj7e13aeb2019-10-08 13:34:55 +02001868
tierno588547c2020-07-01 15:30:20 +00001869 self._write_configuration_status(
1870 nsr_id=nsr_id,
1871 vca_index=vca_index,
garciadeblas5697b8b2021-03-24 09:17:02 +01001872 status="CREATING",
tierno588547c2020-07-01 15:30:20 +00001873 element_under_configuration=element_under_configuration,
garciadeblas5697b8b2021-03-24 09:17:02 +01001874 element_type=element_type,
tierno588547c2020-07-01 15:30:20 +00001875 )
tiernod8323042019-08-09 11:32:23 +00001876
tierno588547c2020-07-01 15:30:20 +00001877 step = "create execution environment"
garciadeblas5697b8b2021-03-24 09:17:02 +01001878 self.logger.debug(logging_text + step)
David Garciaaae391f2020-11-09 11:12:54 +01001879
1880 ee_id = None
1881 credentials = None
1882 if vca_type == "k8s_proxy_charm":
1883 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
garciadeblas5697b8b2021-03-24 09:17:02 +01001884 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
David Garciaaae391f2020-11-09 11:12:54 +01001885 namespace=namespace,
1886 artifact_path=artifact_path,
1887 db_dict=db_dict,
David Garciac1fe90a2021-03-31 19:12:02 +02001888 vca_id=vca_id,
David Garciaaae391f2020-11-09 11:12:54 +01001889 )
garciadeblas5697b8b2021-03-24 09:17:02 +01001890 elif vca_type == "helm" or vca_type == "helm-v3":
1891 ee_id, credentials = await self.vca_map[
1892 vca_type
1893 ].create_execution_environment(
bravof922c4172020-11-24 21:21:43 -03001894 namespace=namespace,
1895 reuse_ee_id=ee_id,
1896 db_dict=db_dict,
lloretgalleg18cb3cb2020-12-10 14:21:10 +00001897 config=osm_config,
1898 artifact_path=artifact_path,
garciadeblas5697b8b2021-03-24 09:17:02 +01001899 vca_type=vca_type,
bravof922c4172020-11-24 21:21:43 -03001900 )
garciadeblas5697b8b2021-03-24 09:17:02 +01001901 else:
1902 ee_id, credentials = await self.vca_map[
1903 vca_type
1904 ].create_execution_environment(
David Garciaaae391f2020-11-09 11:12:54 +01001905 namespace=namespace,
1906 reuse_ee_id=ee_id,
1907 db_dict=db_dict,
David Garciac1fe90a2021-03-31 19:12:02 +02001908 vca_id=vca_id,
David Garciaaae391f2020-11-09 11:12:54 +01001909 )
quilesj3655ae02019-12-12 16:08:35 +00001910
tierno588547c2020-07-01 15:30:20 +00001911 elif vca_type == "native_charm":
1912 step = "Waiting to VM being up and getting IP address"
1913 self.logger.debug(logging_text + step)
garciadeblas5697b8b2021-03-24 09:17:02 +01001914 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1915 logging_text,
1916 nsr_id,
1917 vnfr_id,
1918 vdu_id,
1919 vdu_index,
1920 user=None,
1921 pub_key=None,
1922 )
tierno588547c2020-07-01 15:30:20 +00001923 credentials = {"hostname": rw_mgmt_ip}
1924 # get username
garciadeblas5697b8b2021-03-24 09:17:02 +01001925 username = deep_get(
1926 config_descriptor, ("config-access", "ssh-access", "default-user")
1927 )
tierno588547c2020-07-01 15:30:20 +00001928 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1929 # merged. Meanwhile let's get username from initial-config-primitive
tiernoa278b842020-07-08 15:33:55 +00001930 if not username and initial_config_primitive_list:
1931 for config_primitive in initial_config_primitive_list:
tierno588547c2020-07-01 15:30:20 +00001932 for param in config_primitive.get("parameter", ()):
1933 if param["name"] == "ssh-username":
1934 username = param["value"]
1935 break
1936 if not username:
garciadeblas5697b8b2021-03-24 09:17:02 +01001937 raise LcmException(
1938 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1939 "'config-access.ssh-access.default-user'"
1940 )
tierno588547c2020-07-01 15:30:20 +00001941 credentials["username"] = username
1942 # n2vc_redesign STEP 3.2
quilesj3655ae02019-12-12 16:08:35 +00001943
tierno588547c2020-07-01 15:30:20 +00001944 self._write_configuration_status(
1945 nsr_id=nsr_id,
1946 vca_index=vca_index,
garciadeblas5697b8b2021-03-24 09:17:02 +01001947 status="REGISTERING",
tierno588547c2020-07-01 15:30:20 +00001948 element_under_configuration=element_under_configuration,
garciadeblas5697b8b2021-03-24 09:17:02 +01001949 element_type=element_type,
tierno588547c2020-07-01 15:30:20 +00001950 )
quilesj3655ae02019-12-12 16:08:35 +00001951
tierno588547c2020-07-01 15:30:20 +00001952 step = "register execution environment {}".format(credentials)
1953 self.logger.debug(logging_text + step)
1954 ee_id = await self.vca_map[vca_type].register_execution_environment(
David Garciaaae391f2020-11-09 11:12:54 +01001955 credentials=credentials,
1956 namespace=namespace,
1957 db_dict=db_dict,
David Garciac1fe90a2021-03-31 19:12:02 +02001958 vca_id=vca_id,
David Garciaaae391f2020-11-09 11:12:54 +01001959 )
tierno3bedc9b2019-11-27 15:46:57 +00001960
tierno588547c2020-07-01 15:30:20 +00001961 # for compatibility with MON/POL modules, the need model and application name at database
1962 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
garciadeblas5697b8b2021-03-24 09:17:02 +01001963 ee_id_parts = ee_id.split(".")
tierno588547c2020-07-01 15:30:20 +00001964 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1965 if len(ee_id_parts) >= 2:
1966 model_name = ee_id_parts[0]
1967 application_name = ee_id_parts[1]
1968 db_nsr_update[db_update_entry + "model"] = model_name
1969 db_nsr_update[db_update_entry + "application"] = application_name
tiernod8323042019-08-09 11:32:23 +00001970
1971 # n2vc_redesign STEP 3.3
tiernod8323042019-08-09 11:32:23 +00001972 step = "Install configuration Software"
quilesj3655ae02019-12-12 16:08:35 +00001973
tiernoc231a872020-01-21 08:49:05 +00001974 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001975 nsr_id=nsr_id,
1976 vca_index=vca_index,
garciadeblas5697b8b2021-03-24 09:17:02 +01001977 status="INSTALLING SW",
quilesj3655ae02019-12-12 16:08:35 +00001978 element_under_configuration=element_under_configuration,
tierno51183952020-04-03 15:48:18 +00001979 element_type=element_type,
garciadeblas5697b8b2021-03-24 09:17:02 +01001980 other_update=db_nsr_update,
quilesj3655ae02019-12-12 16:08:35 +00001981 )
1982
tierno3bedc9b2019-11-27 15:46:57 +00001983 # TODO check if already done
quilesj7e13aeb2019-10-08 13:34:55 +02001984 self.logger.debug(logging_text + step)
David Garcia18a63322020-04-01 16:14:59 +02001985 config = None
tierno588547c2020-07-01 15:30:20 +00001986 if vca_type == "native_charm":
garciadeblas5697b8b2021-03-24 09:17:02 +01001987 config_primitive = next(
1988 (p for p in initial_config_primitive_list if p["name"] == "config"),
1989 None,
1990 )
tiernoa278b842020-07-08 15:33:55 +00001991 if config_primitive:
1992 config = self._map_primitive_params(
garciadeblas5697b8b2021-03-24 09:17:02 +01001993 config_primitive, {}, deploy_params
tiernoa278b842020-07-08 15:33:55 +00001994 )
tierno588547c2020-07-01 15:30:20 +00001995 num_units = 1
1996 if vca_type == "lxc_proxy_charm":
1997 if element_type == "NS":
1998 num_units = db_nsr.get("config-units") or 1
1999 elif element_type == "VNF":
2000 num_units = db_vnfr.get("config-units") or 1
2001 elif element_type == "VDU":
2002 for v in db_vnfr["vdur"]:
2003 if vdu_id == v["vdu-id-ref"]:
2004 num_units = v.get("config-units") or 1
2005 break
David Garciaaae391f2020-11-09 11:12:54 +01002006 if vca_type != "k8s_proxy_charm":
2007 await self.vca_map[vca_type].install_configuration_sw(
2008 ee_id=ee_id,
2009 artifact_path=artifact_path,
2010 db_dict=db_dict,
2011 config=config,
2012 num_units=num_units,
David Garciac1fe90a2021-03-31 19:12:02 +02002013 vca_id=vca_id,
aktas98488ed2021-07-29 17:42:49 +03002014 vca_type=vca_type,
David Garciaaae391f2020-11-09 11:12:54 +01002015 )
quilesj7e13aeb2019-10-08 13:34:55 +02002016
quilesj63f90042020-01-17 09:53:55 +00002017 # write in db flag of configuration_sw already installed
garciadeblas5697b8b2021-03-24 09:17:02 +01002018 self.update_db_2(
2019 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2020 )
quilesj63f90042020-01-17 09:53:55 +00002021
2022 # add relations for this VCA (wait for other peers related with this VCA)
garciadeblas5697b8b2021-03-24 09:17:02 +01002023 await self._add_vca_relations(
2024 logging_text=logging_text,
2025 nsr_id=nsr_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01002026 vca_type=vca_type,
David Garciab4ebcd02021-10-28 02:00:43 +02002027 vca_index=vca_index,
garciadeblas5697b8b2021-03-24 09:17:02 +01002028 )
quilesj63f90042020-01-17 09:53:55 +00002029
quilesj7e13aeb2019-10-08 13:34:55 +02002030 # if SSH access is required, then get execution environment SSH public
David Garciaa27e20a2020-07-10 13:12:44 +02002031 # if native charm we have waited already to VM be UP
lloretgalleg18ebc3a2020-10-22 09:54:51 +00002032 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
tierno3bedc9b2019-11-27 15:46:57 +00002033 pub_key = None
2034 user = None
tierno588547c2020-07-01 15:30:20 +00002035 # self.logger.debug("get ssh key block")
garciadeblas5697b8b2021-03-24 09:17:02 +01002036 if deep_get(
2037 config_descriptor, ("config-access", "ssh-access", "required")
2038 ):
tierno588547c2020-07-01 15:30:20 +00002039 # self.logger.debug("ssh key needed")
tierno3bedc9b2019-11-27 15:46:57 +00002040 # Needed to inject a ssh key
garciadeblas5697b8b2021-03-24 09:17:02 +01002041 user = deep_get(
2042 config_descriptor,
2043 ("config-access", "ssh-access", "default-user"),
2044 )
tierno3bedc9b2019-11-27 15:46:57 +00002045 step = "Install configuration Software, getting public ssh key"
David Garciac1fe90a2021-03-31 19:12:02 +02002046 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
garciadeblas5697b8b2021-03-24 09:17:02 +01002047 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
David Garciac1fe90a2021-03-31 19:12:02 +02002048 )
quilesj7e13aeb2019-10-08 13:34:55 +02002049
garciadeblas5697b8b2021-03-24 09:17:02 +01002050 step = "Insert public key into VM user={} ssh_key={}".format(
2051 user, pub_key
2052 )
tierno3bedc9b2019-11-27 15:46:57 +00002053 else:
tierno588547c2020-07-01 15:30:20 +00002054 # self.logger.debug("no need to get ssh key")
tierno3bedc9b2019-11-27 15:46:57 +00002055 step = "Waiting to VM being up and getting IP address"
2056 self.logger.debug(logging_text + step)
quilesj7e13aeb2019-10-08 13:34:55 +02002057
Pedro Escaleira1e9c3e32022-05-30 15:37:01 +01002058 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2059 rw_mgmt_ip = None
2060
tierno3bedc9b2019-11-27 15:46:57 +00002061 # n2vc_redesign STEP 5.1
2062 # wait for RO (ip-address) Insert pub_key into VM
tierno5ee02052019-12-05 19:55:02 +00002063 if vnfr_id:
tierno7ecbc342020-09-21 14:05:39 +00002064 if kdu_name:
David Garcia78b6e6d2022-04-29 05:50:46 +02002065 rw_mgmt_ip, services = await self.wait_kdu_up(
garciadeblas5697b8b2021-03-24 09:17:02 +01002066 logging_text, nsr_id, vnfr_id, kdu_name
2067 )
David Garcia78b6e6d2022-04-29 05:50:46 +02002068 vnfd = self.db.get_one(
2069 "vnfds_revisions",
2070 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2071 )
2072 kdu = get_kdu(vnfd, kdu_name)
2073 kdu_services = [
2074 service["name"] for service in get_kdu_services(kdu)
2075 ]
2076 exposed_services = []
2077 for service in services:
2078 if any(s in service["name"] for s in kdu_services):
2079 exposed_services.append(service)
2080 await self.vca_map[vca_type].exec_primitive(
2081 ee_id=ee_id,
2082 primitive_name="config",
2083 params_dict={
2084 "osm-config": json.dumps(
2085 OsmConfigBuilder(
2086 k8s={"services": exposed_services}
2087 ).build()
2088 )
2089 },
2090 vca_id=vca_id,
2091 )
Pedro Escaleira1e9c3e32022-05-30 15:37:01 +01002092
2093 # This verification is needed in order to avoid trying to add a public key
2094 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2095 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2096 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2097 # or it is a KNF)
2098 elif db_vnfr.get('vdur'):
garciadeblas5697b8b2021-03-24 09:17:02 +01002099 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2100 logging_text,
2101 nsr_id,
2102 vnfr_id,
2103 vdu_id,
2104 vdu_index,
2105 user=user,
2106 pub_key=pub_key,
2107 )
David Garcia78b6e6d2022-04-29 05:50:46 +02002108
garciadeblas5697b8b2021-03-24 09:17:02 +01002109 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
quilesj7e13aeb2019-10-08 13:34:55 +02002110
tiernoa5088192019-11-26 16:12:53 +00002111 # store rw_mgmt_ip in deploy params for later replacement
quilesj7e13aeb2019-10-08 13:34:55 +02002112 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
tiernod8323042019-08-09 11:32:23 +00002113
2114 # n2vc_redesign STEP 6 Execute initial config primitive
garciadeblas5697b8b2021-03-24 09:17:02 +01002115 step = "execute initial config primitive"
quilesj3655ae02019-12-12 16:08:35 +00002116
2117 # wait for dependent primitives execution (NS -> VNF -> VDU)
tierno5ee02052019-12-05 19:55:02 +00002118 if initial_config_primitive_list:
2119 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
quilesj3655ae02019-12-12 16:08:35 +00002120
2121 # stage, in function of element type: vdu, kdu, vnf or ns
2122 my_vca = vca_deployed_list[vca_index]
2123 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2124 # VDU or KDU
garciadeblas5697b8b2021-03-24 09:17:02 +01002125 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
quilesj3655ae02019-12-12 16:08:35 +00002126 elif my_vca.get("member-vnf-index"):
2127 # VNF
garciadeblas5697b8b2021-03-24 09:17:02 +01002128 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
quilesj3655ae02019-12-12 16:08:35 +00002129 else:
2130 # NS
garciadeblas5697b8b2021-03-24 09:17:02 +01002131 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
quilesj3655ae02019-12-12 16:08:35 +00002132
tiernoc231a872020-01-21 08:49:05 +00002133 self._write_configuration_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01002134 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
quilesj3655ae02019-12-12 16:08:35 +00002135 )
2136
garciadeblas5697b8b2021-03-24 09:17:02 +01002137 self._write_op_status(op_id=nslcmop_id, stage=stage)
quilesj3655ae02019-12-12 16:08:35 +00002138
tiernoe876f672020-02-13 14:34:48 +00002139 check_if_terminated_needed = True
tiernod8323042019-08-09 11:32:23 +00002140 for initial_config_primitive in initial_config_primitive_list:
tiernoda6fb102019-11-23 00:36:52 +00002141 # adding information on the vca_deployed if it is a NS execution environment
2142 if not vca_deployed["member-vnf-index"]:
garciadeblas5697b8b2021-03-24 09:17:02 +01002143 deploy_params["ns_config_info"] = json.dumps(
2144 self._get_ns_config_info(nsr_id)
2145 )
tiernod8323042019-08-09 11:32:23 +00002146 # TODO check if already done
garciadeblas5697b8b2021-03-24 09:17:02 +01002147 primitive_params_ = self._map_primitive_params(
2148 initial_config_primitive, {}, deploy_params
2149 )
tierno3bedc9b2019-11-27 15:46:57 +00002150
garciadeblas5697b8b2021-03-24 09:17:02 +01002151 step = "execute primitive '{}' params '{}'".format(
2152 initial_config_primitive["name"], primitive_params_
2153 )
tiernod8323042019-08-09 11:32:23 +00002154 self.logger.debug(logging_text + step)
tierno588547c2020-07-01 15:30:20 +00002155 await self.vca_map[vca_type].exec_primitive(
quilesj7e13aeb2019-10-08 13:34:55 +02002156 ee_id=ee_id,
2157 primitive_name=initial_config_primitive["name"],
2158 params_dict=primitive_params_,
David Garciac1fe90a2021-03-31 19:12:02 +02002159 db_dict=db_dict,
2160 vca_id=vca_id,
aktas98488ed2021-07-29 17:42:49 +03002161 vca_type=vca_type,
quilesj7e13aeb2019-10-08 13:34:55 +02002162 )
tiernoe876f672020-02-13 14:34:48 +00002163 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2164 if check_if_terminated_needed:
garciadeblas5697b8b2021-03-24 09:17:02 +01002165 if config_descriptor.get("terminate-config-primitive"):
2166 self.update_db_2(
2167 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2168 )
tiernoe876f672020-02-13 14:34:48 +00002169 check_if_terminated_needed = False
quilesj3655ae02019-12-12 16:08:35 +00002170
tiernod8323042019-08-09 11:32:23 +00002171 # TODO register in database that primitive is done
quilesj7e13aeb2019-10-08 13:34:55 +02002172
tiernob996d942020-07-03 14:52:28 +00002173 # STEP 7 Configure metrics
lloretgalleg18ebc3a2020-10-22 09:54:51 +00002174 if vca_type == "helm" or vca_type == "helm-v3":
bravof73bac502021-05-11 07:38:47 -04002175 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
tiernob996d942020-07-03 14:52:28 +00002176 ee_id=ee_id,
2177 artifact_path=artifact_path,
2178 ee_config_descriptor=ee_config_descriptor,
2179 vnfr_id=vnfr_id,
2180 nsr_id=nsr_id,
2181 target_ip=rw_mgmt_ip,
2182 )
2183 if prometheus_jobs:
garciadeblas5697b8b2021-03-24 09:17:02 +01002184 self.update_db_2(
2185 "nsrs",
2186 nsr_id,
2187 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2188 )
tiernob996d942020-07-03 14:52:28 +00002189
bravof73bac502021-05-11 07:38:47 -04002190 for job in prometheus_jobs:
2191 self.db.set_one(
2192 "prometheus_jobs",
aticig15db6142022-01-24 12:51:26 +03002193 {"job_name": job["job_name"]},
bravof73bac502021-05-11 07:38:47 -04002194 job,
2195 upsert=True,
aticig15db6142022-01-24 12:51:26 +03002196 fail_on_empty=False,
bravof73bac502021-05-11 07:38:47 -04002197 )
2198
quilesj7e13aeb2019-10-08 13:34:55 +02002199 step = "instantiated at VCA"
2200 self.logger.debug(logging_text + step)
2201
tiernoc231a872020-01-21 08:49:05 +00002202 self._write_configuration_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01002203 nsr_id=nsr_id, vca_index=vca_index, status="READY"
quilesj3655ae02019-12-12 16:08:35 +00002204 )
2205
tiernod8323042019-08-09 11:32:23 +00002206 except Exception as e: # TODO not use Exception but N2VC exception
quilesj3655ae02019-12-12 16:08:35 +00002207 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
garciadeblas5697b8b2021-03-24 09:17:02 +01002208 if not isinstance(
2209 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2210 ):
2211 self.logger.error(
2212 "Exception while {} : {}".format(step, e), exc_info=True
2213 )
tiernoc231a872020-01-21 08:49:05 +00002214 self._write_configuration_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01002215 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
quilesj3655ae02019-12-12 16:08:35 +00002216 )
tiernoe876f672020-02-13 14:34:48 +00002217 raise LcmException("{} {}".format(step, e)) from e
tiernod8323042019-08-09 11:32:23 +00002218
garciadeblas5697b8b2021-03-24 09:17:02 +01002219 def _write_ns_status(
2220 self,
2221 nsr_id: str,
2222 ns_state: str,
2223 current_operation: str,
2224 current_operation_id: str,
2225 error_description: str = None,
2226 error_detail: str = None,
2227 other_update: dict = None,
2228 ):
tiernoe876f672020-02-13 14:34:48 +00002229 """
2230 Update db_nsr fields.
2231 :param nsr_id:
2232 :param ns_state:
2233 :param current_operation:
2234 :param current_operation_id:
2235 :param error_description:
tiernoa2143262020-03-27 16:20:40 +00002236 :param error_detail:
tiernoe876f672020-02-13 14:34:48 +00002237 :param other_update: Other required changes at database if provided, will be cleared
2238 :return:
2239 """
quilesj4cda56b2019-12-05 10:02:20 +00002240 try:
tiernoe876f672020-02-13 14:34:48 +00002241 db_dict = other_update or {}
garciadeblas5697b8b2021-03-24 09:17:02 +01002242 db_dict[
2243 "_admin.nslcmop"
2244 ] = current_operation_id # for backward compatibility
tiernoe876f672020-02-13 14:34:48 +00002245 db_dict["_admin.current-operation"] = current_operation_id
garciadeblas5697b8b2021-03-24 09:17:02 +01002246 db_dict["_admin.operation-type"] = (
2247 current_operation if current_operation != "IDLE" else None
2248 )
quilesj4cda56b2019-12-05 10:02:20 +00002249 db_dict["currentOperation"] = current_operation
2250 db_dict["currentOperationID"] = current_operation_id
2251 db_dict["errorDescription"] = error_description
tiernoa2143262020-03-27 16:20:40 +00002252 db_dict["errorDetail"] = error_detail
tiernoe876f672020-02-13 14:34:48 +00002253
2254 if ns_state:
2255 db_dict["nsState"] = ns_state
quilesj4cda56b2019-12-05 10:02:20 +00002256 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00002257 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002258 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
quilesj3655ae02019-12-12 16:08:35 +00002259
garciadeblas5697b8b2021-03-24 09:17:02 +01002260 def _write_op_status(
2261 self,
2262 op_id: str,
2263 stage: list = None,
2264 error_message: str = None,
2265 queuePosition: int = 0,
2266 operation_state: str = None,
2267 other_update: dict = None,
2268 ):
quilesj3655ae02019-12-12 16:08:35 +00002269 try:
tiernoe876f672020-02-13 14:34:48 +00002270 db_dict = other_update or {}
garciadeblas5697b8b2021-03-24 09:17:02 +01002271 db_dict["queuePosition"] = queuePosition
tiernoe876f672020-02-13 14:34:48 +00002272 if isinstance(stage, list):
garciadeblas5697b8b2021-03-24 09:17:02 +01002273 db_dict["stage"] = stage[0]
2274 db_dict["detailed-status"] = " ".join(stage)
tiernoe876f672020-02-13 14:34:48 +00002275 elif stage is not None:
garciadeblas5697b8b2021-03-24 09:17:02 +01002276 db_dict["stage"] = str(stage)
tiernoe876f672020-02-13 14:34:48 +00002277
2278 if error_message is not None:
garciadeblas5697b8b2021-03-24 09:17:02 +01002279 db_dict["errorMessage"] = error_message
tiernoe876f672020-02-13 14:34:48 +00002280 if operation_state is not None:
garciadeblas5697b8b2021-03-24 09:17:02 +01002281 db_dict["operationState"] = operation_state
tiernoe876f672020-02-13 14:34:48 +00002282 db_dict["statusEnteredTime"] = time()
quilesj3655ae02019-12-12 16:08:35 +00002283 self.update_db_2("nslcmops", op_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00002284 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002285 self.logger.warn(
2286 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2287 )
quilesj3655ae02019-12-12 16:08:35 +00002288
tierno51183952020-04-03 15:48:18 +00002289 def _write_all_config_status(self, db_nsr: dict, status: str):
quilesj3655ae02019-12-12 16:08:35 +00002290 try:
tierno51183952020-04-03 15:48:18 +00002291 nsr_id = db_nsr["_id"]
quilesj3655ae02019-12-12 16:08:35 +00002292 # configurationStatus
garciadeblas5697b8b2021-03-24 09:17:02 +01002293 config_status = db_nsr.get("configurationStatus")
quilesj3655ae02019-12-12 16:08:35 +00002294 if config_status:
garciadeblas5697b8b2021-03-24 09:17:02 +01002295 db_nsr_update = {
2296 "configurationStatus.{}.status".format(index): status
2297 for index, v in enumerate(config_status)
2298 if v
2299 }
quilesj3655ae02019-12-12 16:08:35 +00002300 # update status
tierno51183952020-04-03 15:48:18 +00002301 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00002302
tiernoe876f672020-02-13 14:34:48 +00002303 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002304 self.logger.warn(
2305 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2306 )
quilesj3655ae02019-12-12 16:08:35 +00002307
garciadeblas5697b8b2021-03-24 09:17:02 +01002308 def _write_configuration_status(
2309 self,
2310 nsr_id: str,
2311 vca_index: int,
2312 status: str = None,
2313 element_under_configuration: str = None,
2314 element_type: str = None,
2315 other_update: dict = None,
2316 ):
quilesj3655ae02019-12-12 16:08:35 +00002317
2318 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2319 # .format(vca_index, status))
2320
2321 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01002322 db_path = "configurationStatus.{}.".format(vca_index)
tierno51183952020-04-03 15:48:18 +00002323 db_dict = other_update or {}
quilesj63f90042020-01-17 09:53:55 +00002324 if status:
garciadeblas5697b8b2021-03-24 09:17:02 +01002325 db_dict[db_path + "status"] = status
quilesj3655ae02019-12-12 16:08:35 +00002326 if element_under_configuration:
garciadeblas5697b8b2021-03-24 09:17:02 +01002327 db_dict[
2328 db_path + "elementUnderConfiguration"
2329 ] = element_under_configuration
quilesj3655ae02019-12-12 16:08:35 +00002330 if element_type:
garciadeblas5697b8b2021-03-24 09:17:02 +01002331 db_dict[db_path + "elementType"] = element_type
quilesj3655ae02019-12-12 16:08:35 +00002332 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00002333 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002334 self.logger.warn(
2335 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2336 status, nsr_id, vca_index, e
2337 )
2338 )
quilesj4cda56b2019-12-05 10:02:20 +00002339
tierno38089af2020-04-16 07:56:58 +00002340 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2341 """
2342 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2343 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2344 Database is used because the result can be obtained from a different LCM worker in case of HA.
2345 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2346 :param db_nslcmop: database content of nslcmop
2347 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
tierno8790a3d2020-04-23 22:49:52 +00002348 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2349 computed 'vim-account-id'
tierno38089af2020-04-16 07:56:58 +00002350 """
tierno8790a3d2020-04-23 22:49:52 +00002351 modified = False
garciadeblas5697b8b2021-03-24 09:17:02 +01002352 nslcmop_id = db_nslcmop["_id"]
2353 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
magnussonle9198bb2020-01-21 13:00:51 +01002354 if placement_engine == "PLA":
garciadeblas5697b8b2021-03-24 09:17:02 +01002355 self.logger.debug(
2356 logging_text + "Invoke and wait for placement optimization"
2357 )
2358 await self.msg.aiowrite(
2359 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2360 )
magnussonle9198bb2020-01-21 13:00:51 +01002361 db_poll_interval = 5
tierno38089af2020-04-16 07:56:58 +00002362 wait = db_poll_interval * 10
magnussonle9198bb2020-01-21 13:00:51 +01002363 pla_result = None
2364 while not pla_result and wait >= 0:
2365 await asyncio.sleep(db_poll_interval)
2366 wait -= db_poll_interval
tierno38089af2020-04-16 07:56:58 +00002367 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
garciadeblas5697b8b2021-03-24 09:17:02 +01002368 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
magnussonle9198bb2020-01-21 13:00:51 +01002369
2370 if not pla_result:
garciadeblas5697b8b2021-03-24 09:17:02 +01002371 raise LcmException(
2372 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2373 )
magnussonle9198bb2020-01-21 13:00:51 +01002374
garciadeblas5697b8b2021-03-24 09:17:02 +01002375 for pla_vnf in pla_result["vnf"]:
2376 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2377 if not pla_vnf.get("vimAccountId") or not vnfr:
magnussonle9198bb2020-01-21 13:00:51 +01002378 continue
tierno8790a3d2020-04-23 22:49:52 +00002379 modified = True
garciadeblas5697b8b2021-03-24 09:17:02 +01002380 self.db.set_one(
2381 "vnfrs",
2382 {"_id": vnfr["_id"]},
2383 {"vim-account-id": pla_vnf["vimAccountId"]},
2384 )
tierno38089af2020-04-16 07:56:58 +00002385 # Modifies db_vnfrs
garciadeblas5697b8b2021-03-24 09:17:02 +01002386 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
tierno8790a3d2020-04-23 22:49:52 +00002387 return modified
magnussonle9198bb2020-01-21 13:00:51 +01002388
2389 def update_nsrs_with_pla_result(self, params):
2390 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01002391 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2392 self.update_db_2(
2393 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2394 )
magnussonle9198bb2020-01-21 13:00:51 +01002395 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002396 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
magnussonle9198bb2020-01-21 13:00:51 +01002397
tierno59d22d22018-09-25 18:10:19 +02002398 async def instantiate(self, nsr_id, nslcmop_id):
quilesj7e13aeb2019-10-08 13:34:55 +02002399 """
2400
2401 :param nsr_id: ns instance to deploy
2402 :param nslcmop_id: operation to run
2403 :return:
2404 """
kuused124bfe2019-06-18 12:09:24 +02002405
2406 # Try to lock HA task here
garciadeblas5697b8b2021-03-24 09:17:02 +01002407 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02002408 if not task_is_locked_by_me:
garciadeblas5697b8b2021-03-24 09:17:02 +01002409 self.logger.debug(
2410 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2411 )
kuused124bfe2019-06-18 12:09:24 +02002412 return
2413
tierno59d22d22018-09-25 18:10:19 +02002414 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2415 self.logger.debug(logging_text + "Enter")
quilesj7e13aeb2019-10-08 13:34:55 +02002416
tierno59d22d22018-09-25 18:10:19 +02002417 # get all needed from database
quilesj7e13aeb2019-10-08 13:34:55 +02002418
2419 # database nsrs record
tierno59d22d22018-09-25 18:10:19 +02002420 db_nsr = None
quilesj7e13aeb2019-10-08 13:34:55 +02002421
2422 # database nslcmops record
tierno59d22d22018-09-25 18:10:19 +02002423 db_nslcmop = None
quilesj7e13aeb2019-10-08 13:34:55 +02002424
2425 # update operation on nsrs
tiernoe876f672020-02-13 14:34:48 +00002426 db_nsr_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02002427 # update operation on nslcmops
tierno59d22d22018-09-25 18:10:19 +02002428 db_nslcmop_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02002429
tierno59d22d22018-09-25 18:10:19 +02002430 nslcmop_operation_state = None
garciadeblas5697b8b2021-03-24 09:17:02 +01002431 db_vnfrs = {} # vnf's info indexed by member-index
quilesj7e13aeb2019-10-08 13:34:55 +02002432 # n2vc_info = {}
tiernoe876f672020-02-13 14:34:48 +00002433 tasks_dict_info = {} # from task to info text
tierno59d22d22018-09-25 18:10:19 +02002434 exc = None
tiernoe876f672020-02-13 14:34:48 +00002435 error_list = []
garciadeblas5697b8b2021-03-24 09:17:02 +01002436 stage = [
2437 "Stage 1/5: preparation of the environment.",
2438 "Waiting for previous operations to terminate.",
2439 "",
2440 ]
tiernoe876f672020-02-13 14:34:48 +00002441 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02002442 try:
kuused124bfe2019-06-18 12:09:24 +02002443 # wait for any previous tasks in process
garciadeblas5697b8b2021-03-24 09:17:02 +01002444 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02002445
quilesj7e13aeb2019-10-08 13:34:55 +02002446 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
tiernob5203912020-08-11 11:20:13 +00002447 stage[1] = "Reading from database."
quilesj4cda56b2019-12-05 10:02:20 +00002448 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
tiernoe876f672020-02-13 14:34:48 +00002449 db_nsr_update["detailed-status"] = "creating"
2450 db_nsr_update["operational-status"] = "init"
quilesj4cda56b2019-12-05 10:02:20 +00002451 self._write_ns_status(
2452 nsr_id=nsr_id,
2453 ns_state="BUILDING",
2454 current_operation="INSTANTIATING",
tiernoe876f672020-02-13 14:34:48 +00002455 current_operation_id=nslcmop_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01002456 other_update=db_nsr_update,
tiernoe876f672020-02-13 14:34:48 +00002457 )
garciadeblas5697b8b2021-03-24 09:17:02 +01002458 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
quilesj4cda56b2019-12-05 10:02:20 +00002459
quilesj7e13aeb2019-10-08 13:34:55 +02002460 # read from db: operation
tiernob5203912020-08-11 11:20:13 +00002461 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
tierno59d22d22018-09-25 18:10:19 +02002462 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
Guillermo Calvino57c68152022-01-26 17:40:31 +01002463 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2464 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2465 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2466 )
tierno744303e2020-01-13 16:46:31 +00002467 ns_params = db_nslcmop.get("operationParams")
2468 if ns_params and ns_params.get("timeout_ns_deploy"):
2469 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2470 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01002471 timeout_ns_deploy = self.timeout.get(
2472 "ns_deploy", self.timeout_ns_deploy
2473 )
quilesj7e13aeb2019-10-08 13:34:55 +02002474
2475 # read from db: ns
tiernob5203912020-08-11 11:20:13 +00002476 stage[1] = "Getting nsr={} from db.".format(nsr_id)
garciadeblascd509f52021-11-23 10:04:12 +01002477 self.logger.debug(logging_text + stage[1])
tierno59d22d22018-09-25 18:10:19 +02002478 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernob5203912020-08-11 11:20:13 +00002479 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
garciadeblascd509f52021-11-23 10:04:12 +01002480 self.logger.debug(logging_text + stage[1])
tiernod732fb82020-05-21 13:18:23 +00002481 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
bravof021e70d2021-03-11 12:03:30 -03002482 self.fs.sync(db_nsr["nsd-id"])
tiernod732fb82020-05-21 13:18:23 +00002483 db_nsr["nsd"] = nsd
tiernod8323042019-08-09 11:32:23 +00002484 # nsr_name = db_nsr["name"] # TODO short-name??
tierno47e86b52018-10-10 14:05:55 +02002485
quilesj7e13aeb2019-10-08 13:34:55 +02002486 # read from db: vnf's of this ns
tiernob5203912020-08-11 11:20:13 +00002487 stage[1] = "Getting vnfrs from db."
tiernoe876f672020-02-13 14:34:48 +00002488 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02002489 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
tierno27246d82018-09-27 15:59:09 +02002490
quilesj7e13aeb2019-10-08 13:34:55 +02002491 # read from db: vnfd's for every vnf
garciadeblas5697b8b2021-03-24 09:17:02 +01002492 db_vnfds = [] # every vnfd data
quilesj7e13aeb2019-10-08 13:34:55 +02002493
2494 # for each vnf in ns, read vnfd
tierno27246d82018-09-27 15:59:09 +02002495 for vnfr in db_vnfrs_list:
Guillermo Calvino57c68152022-01-26 17:40:31 +01002496 if vnfr.get("kdur"):
2497 kdur_list = []
2498 for kdur in vnfr["kdur"]:
2499 if kdur.get("additionalParams"):
Pedro Escaleirab9a7c4d2022-03-31 00:08:05 +01002500 kdur["additionalParams"] = json.loads(
2501 kdur["additionalParams"]
2502 )
Guillermo Calvino57c68152022-01-26 17:40:31 +01002503 kdur_list.append(kdur)
2504 vnfr["kdur"] = kdur_list
2505
bravof922c4172020-11-24 21:21:43 -03002506 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2507 vnfd_id = vnfr["vnfd-id"]
2508 vnfd_ref = vnfr["vnfd-ref"]
bravof021e70d2021-03-11 12:03:30 -03002509 self.fs.sync(vnfd_id)
lloretgalleg6d488782020-07-22 10:13:46 +00002510
quilesj7e13aeb2019-10-08 13:34:55 +02002511 # if we haven't this vnfd, read it from db
tierno27246d82018-09-27 15:59:09 +02002512 if vnfd_id not in db_vnfds:
quilesj63f90042020-01-17 09:53:55 +00002513 # read from db
garciadeblas5697b8b2021-03-24 09:17:02 +01002514 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2515 vnfd_id, vnfd_ref
2516 )
tiernoe876f672020-02-13 14:34:48 +00002517 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02002518 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
tierno27246d82018-09-27 15:59:09 +02002519
quilesj7e13aeb2019-10-08 13:34:55 +02002520 # store vnfd
David Garciad41dbd62020-12-10 12:52:52 +01002521 db_vnfds.append(vnfd)
quilesj7e13aeb2019-10-08 13:34:55 +02002522
2523 # Get or generates the _admin.deployed.VCA list
tiernoe4f7e6c2018-11-27 14:55:30 +00002524 vca_deployed_list = None
2525 if db_nsr["_admin"].get("deployed"):
2526 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2527 if vca_deployed_list is None:
2528 vca_deployed_list = []
quilesj3655ae02019-12-12 16:08:35 +00002529 configuration_status_list = []
tiernoe4f7e6c2018-11-27 14:55:30 +00002530 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
quilesj3655ae02019-12-12 16:08:35 +00002531 db_nsr_update["configurationStatus"] = configuration_status_list
quilesj7e13aeb2019-10-08 13:34:55 +02002532 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00002533 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00002534 elif isinstance(vca_deployed_list, dict):
2535 # maintain backward compatibility. Change a dict to list at database
2536 vca_deployed_list = list(vca_deployed_list.values())
2537 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00002538 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00002539
garciadeblas5697b8b2021-03-24 09:17:02 +01002540 if not isinstance(
2541 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2542 ):
tiernoa009e552019-01-30 16:45:44 +00002543 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2544 db_nsr_update["_admin.deployed.RO.vnfd"] = []
tierno59d22d22018-09-25 18:10:19 +02002545
tiernobaa51102018-12-14 13:16:18 +00002546 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2547 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2548 self.update_db_2("nsrs", nsr_id, db_nsr_update)
garciadeblas5697b8b2021-03-24 09:17:02 +01002549 self.db.set_list(
2550 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2551 )
quilesj3655ae02019-12-12 16:08:35 +00002552
2553 # n2vc_redesign STEP 2 Deploy Network Scenario
garciadeblas5697b8b2021-03-24 09:17:02 +01002554 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2555 self._write_op_status(op_id=nslcmop_id, stage=stage)
quilesj3655ae02019-12-12 16:08:35 +00002556
tiernob5203912020-08-11 11:20:13 +00002557 stage[1] = "Deploying KDUs."
tiernoe876f672020-02-13 14:34:48 +00002558 # self.logger.debug(logging_text + "Before deploy_kdus")
calvinosanch9f9c6f22019-11-04 13:37:39 +01002559 # Call to deploy_kdus in case exists the "vdu:kdu" param
tiernoe876f672020-02-13 14:34:48 +00002560 await self.deploy_kdus(
2561 logging_text=logging_text,
2562 nsr_id=nsr_id,
2563 nslcmop_id=nslcmop_id,
2564 db_vnfrs=db_vnfrs,
2565 db_vnfds=db_vnfds,
2566 task_instantiation_info=tasks_dict_info,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002567 )
tiernoe876f672020-02-13 14:34:48 +00002568
2569 stage[1] = "Getting VCA public key."
tiernod8323042019-08-09 11:32:23 +00002570 # n2vc_redesign STEP 1 Get VCA public ssh-key
2571 # feature 1429. Add n2vc public key to needed VMs
tierno3bedc9b2019-11-27 15:46:57 +00002572 n2vc_key = self.n2vc.get_public_key()
tiernoa5088192019-11-26 16:12:53 +00002573 n2vc_key_list = [n2vc_key]
2574 if self.vca_config.get("public_key"):
2575 n2vc_key_list.append(self.vca_config["public_key"])
tierno98ad6ea2019-05-30 17:16:28 +00002576
tiernoe876f672020-02-13 14:34:48 +00002577 stage[1] = "Deploying NS at VIM."
tiernod8323042019-08-09 11:32:23 +00002578 task_ro = asyncio.ensure_future(
quilesj7e13aeb2019-10-08 13:34:55 +02002579 self.instantiate_RO(
2580 logging_text=logging_text,
2581 nsr_id=nsr_id,
2582 nsd=nsd,
2583 db_nsr=db_nsr,
2584 db_nslcmop=db_nslcmop,
2585 db_vnfrs=db_vnfrs,
bravof922c4172020-11-24 21:21:43 -03002586 db_vnfds=db_vnfds,
tiernoe876f672020-02-13 14:34:48 +00002587 n2vc_key_list=n2vc_key_list,
garciadeblas5697b8b2021-03-24 09:17:02 +01002588 stage=stage,
tierno98ad6ea2019-05-30 17:16:28 +00002589 )
tiernod8323042019-08-09 11:32:23 +00002590 )
2591 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
tiernoa2143262020-03-27 16:20:40 +00002592 tasks_dict_info[task_ro] = "Deploying at VIM"
tierno98ad6ea2019-05-30 17:16:28 +00002593
tiernod8323042019-08-09 11:32:23 +00002594 # n2vc_redesign STEP 3 to 6 Deploy N2VC
tiernoe876f672020-02-13 14:34:48 +00002595 stage[1] = "Deploying Execution Environments."
2596 self.logger.debug(logging_text + stage[1])
tierno98ad6ea2019-05-30 17:16:28 +00002597
tiernod8323042019-08-09 11:32:23 +00002598 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
bravof922c4172020-11-24 21:21:43 -03002599 for vnf_profile in get_vnf_profiles(nsd):
2600 vnfd_id = vnf_profile["vnfd-id"]
2601 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2602 member_vnf_index = str(vnf_profile["id"])
tiernod8323042019-08-09 11:32:23 +00002603 db_vnfr = db_vnfrs[member_vnf_index]
2604 base_folder = vnfd["_admin"]["storage"]
2605 vdu_id = None
2606 vdu_index = 0
tierno98ad6ea2019-05-30 17:16:28 +00002607 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002608 kdu_name = None
tierno59d22d22018-09-25 18:10:19 +02002609
tierno8a518872018-12-21 13:42:14 +00002610 # Get additional parameters
bravof922c4172020-11-24 21:21:43 -03002611 deploy_params = {"OSM": get_osm_params(db_vnfr)}
tiernod8323042019-08-09 11:32:23 +00002612 if db_vnfr.get("additionalParamsForVnf"):
garciadeblas5697b8b2021-03-24 09:17:02 +01002613 deploy_params.update(
2614 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2615 )
tierno8a518872018-12-21 13:42:14 +00002616
bravofe5a31bc2021-02-17 19:09:12 -03002617 descriptor_config = get_configuration(vnfd, vnfd["id"])
tierno588547c2020-07-01 15:30:20 +00002618 if descriptor_config:
quilesj7e13aeb2019-10-08 13:34:55 +02002619 self._deploy_n2vc(
garciadeblas5697b8b2021-03-24 09:17:02 +01002620 logging_text=logging_text
2621 + "member_vnf_index={} ".format(member_vnf_index),
quilesj7e13aeb2019-10-08 13:34:55 +02002622 db_nsr=db_nsr,
2623 db_vnfr=db_vnfr,
2624 nslcmop_id=nslcmop_id,
2625 nsr_id=nsr_id,
2626 nsi_id=nsi_id,
2627 vnfd_id=vnfd_id,
2628 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002629 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002630 member_vnf_index=member_vnf_index,
2631 vdu_index=vdu_index,
2632 vdu_name=vdu_name,
2633 deploy_params=deploy_params,
2634 descriptor_config=descriptor_config,
2635 base_folder=base_folder,
tiernoe876f672020-02-13 14:34:48 +00002636 task_instantiation_info=tasks_dict_info,
garciadeblas5697b8b2021-03-24 09:17:02 +01002637 stage=stage,
quilesj7e13aeb2019-10-08 13:34:55 +02002638 )
tierno59d22d22018-09-25 18:10:19 +02002639
2640 # Deploy charms for each VDU that supports one.
bravof922c4172020-11-24 21:21:43 -03002641 for vdud in get_vdu_list(vnfd):
tiernod8323042019-08-09 11:32:23 +00002642 vdu_id = vdud["id"]
bravofe5a31bc2021-02-17 19:09:12 -03002643 descriptor_config = get_configuration(vnfd, vdu_id)
garciadeblas5697b8b2021-03-24 09:17:02 +01002644 vdur = find_in_list(
2645 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2646 )
bravof922c4172020-11-24 21:21:43 -03002647
tierno626e0152019-11-29 14:16:16 +00002648 if vdur.get("additionalParams"):
bravof922c4172020-11-24 21:21:43 -03002649 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
tierno626e0152019-11-29 14:16:16 +00002650 else:
2651 deploy_params_vdu = deploy_params
garciadeblas5697b8b2021-03-24 09:17:02 +01002652 deploy_params_vdu["OSM"] = get_osm_params(
2653 db_vnfr, vdu_id, vdu_count_index=0
2654 )
endika76ba9232021-06-21 18:55:07 +02002655 vdud_count = get_number_of_instances(vnfd, vdu_id)
bravof922c4172020-11-24 21:21:43 -03002656
2657 self.logger.debug("VDUD > {}".format(vdud))
garciadeblas5697b8b2021-03-24 09:17:02 +01002658 self.logger.debug(
2659 "Descriptor config > {}".format(descriptor_config)
2660 )
tierno588547c2020-07-01 15:30:20 +00002661 if descriptor_config:
tiernod8323042019-08-09 11:32:23 +00002662 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002663 kdu_name = None
bravof922c4172020-11-24 21:21:43 -03002664 for vdu_index in range(vdud_count):
tiernod8323042019-08-09 11:32:23 +00002665 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
quilesj7e13aeb2019-10-08 13:34:55 +02002666 self._deploy_n2vc(
garciadeblas5697b8b2021-03-24 09:17:02 +01002667 logging_text=logging_text
2668 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2669 member_vnf_index, vdu_id, vdu_index
2670 ),
quilesj7e13aeb2019-10-08 13:34:55 +02002671 db_nsr=db_nsr,
2672 db_vnfr=db_vnfr,
2673 nslcmop_id=nslcmop_id,
2674 nsr_id=nsr_id,
2675 nsi_id=nsi_id,
2676 vnfd_id=vnfd_id,
2677 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002678 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002679 member_vnf_index=member_vnf_index,
2680 vdu_index=vdu_index,
2681 vdu_name=vdu_name,
tierno626e0152019-11-29 14:16:16 +00002682 deploy_params=deploy_params_vdu,
quilesj7e13aeb2019-10-08 13:34:55 +02002683 descriptor_config=descriptor_config,
2684 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002685 task_instantiation_info=tasks_dict_info,
garciadeblas5697b8b2021-03-24 09:17:02 +01002686 stage=stage,
quilesj7e13aeb2019-10-08 13:34:55 +02002687 )
bravof922c4172020-11-24 21:21:43 -03002688 for kdud in get_kdu_list(vnfd):
calvinosanch9f9c6f22019-11-04 13:37:39 +01002689 kdu_name = kdud["name"]
bravofe5a31bc2021-02-17 19:09:12 -03002690 descriptor_config = get_configuration(vnfd, kdu_name)
tierno588547c2020-07-01 15:30:20 +00002691 if descriptor_config:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002692 vdu_id = None
2693 vdu_index = 0
2694 vdu_name = None
garciadeblas5697b8b2021-03-24 09:17:02 +01002695 kdur = next(
2696 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2697 )
bravof922c4172020-11-24 21:21:43 -03002698 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
tierno72ef84f2020-10-06 08:22:07 +00002699 if kdur.get("additionalParams"):
Pedro Escaleirab9a7c4d2022-03-31 00:08:05 +01002700 deploy_params_kdu.update(
2701 parse_yaml_strings(kdur["additionalParams"].copy())
garciadeblas5697b8b2021-03-24 09:17:02 +01002702 )
tierno59d22d22018-09-25 18:10:19 +02002703
calvinosanch9f9c6f22019-11-04 13:37:39 +01002704 self._deploy_n2vc(
2705 logging_text=logging_text,
2706 db_nsr=db_nsr,
2707 db_vnfr=db_vnfr,
2708 nslcmop_id=nslcmop_id,
2709 nsr_id=nsr_id,
2710 nsi_id=nsi_id,
2711 vnfd_id=vnfd_id,
2712 vdu_id=vdu_id,
2713 kdu_name=kdu_name,
2714 member_vnf_index=member_vnf_index,
2715 vdu_index=vdu_index,
2716 vdu_name=vdu_name,
tierno72ef84f2020-10-06 08:22:07 +00002717 deploy_params=deploy_params_kdu,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002718 descriptor_config=descriptor_config,
2719 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002720 task_instantiation_info=tasks_dict_info,
garciadeblas5697b8b2021-03-24 09:17:02 +01002721 stage=stage,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002722 )
tierno59d22d22018-09-25 18:10:19 +02002723
tierno1b633412019-02-25 16:48:23 +00002724 # Check if this NS has a charm configuration
tiernod8323042019-08-09 11:32:23 +00002725 descriptor_config = nsd.get("ns-configuration")
2726 if descriptor_config and descriptor_config.get("juju"):
2727 vnfd_id = None
2728 db_vnfr = None
2729 member_vnf_index = None
2730 vdu_id = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002731 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002732 vdu_index = 0
2733 vdu_name = None
tierno1b633412019-02-25 16:48:23 +00002734
tiernod8323042019-08-09 11:32:23 +00002735 # Get additional parameters
David Garcia40603572020-12-10 20:10:53 +01002736 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
tiernod8323042019-08-09 11:32:23 +00002737 if db_nsr.get("additionalParamsForNs"):
garciadeblas5697b8b2021-03-24 09:17:02 +01002738 deploy_params.update(
2739 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2740 )
tiernod8323042019-08-09 11:32:23 +00002741 base_folder = nsd["_admin"]["storage"]
quilesj7e13aeb2019-10-08 13:34:55 +02002742 self._deploy_n2vc(
2743 logging_text=logging_text,
2744 db_nsr=db_nsr,
2745 db_vnfr=db_vnfr,
2746 nslcmop_id=nslcmop_id,
2747 nsr_id=nsr_id,
2748 nsi_id=nsi_id,
2749 vnfd_id=vnfd_id,
2750 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002751 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002752 member_vnf_index=member_vnf_index,
2753 vdu_index=vdu_index,
2754 vdu_name=vdu_name,
2755 deploy_params=deploy_params,
2756 descriptor_config=descriptor_config,
2757 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002758 task_instantiation_info=tasks_dict_info,
garciadeblas5697b8b2021-03-24 09:17:02 +01002759 stage=stage,
quilesj7e13aeb2019-10-08 13:34:55 +02002760 )
tierno1b633412019-02-25 16:48:23 +00002761
tiernoe876f672020-02-13 14:34:48 +00002762 # rest of staff will be done at finally
tierno1b633412019-02-25 16:48:23 +00002763
garciadeblas5697b8b2021-03-24 09:17:02 +01002764 except (
2765 ROclient.ROClientException,
2766 DbException,
2767 LcmException,
2768 N2VCException,
2769 ) as e:
2770 self.logger.error(
2771 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2772 )
tierno59d22d22018-09-25 18:10:19 +02002773 exc = e
2774 except asyncio.CancelledError:
garciadeblas5697b8b2021-03-24 09:17:02 +01002775 self.logger.error(
2776 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2777 )
tierno59d22d22018-09-25 18:10:19 +02002778 exc = "Operation was cancelled"
2779 except Exception as e:
2780 exc = traceback.format_exc()
garciadeblas5697b8b2021-03-24 09:17:02 +01002781 self.logger.critical(
2782 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2783 exc_info=True,
2784 )
tierno59d22d22018-09-25 18:10:19 +02002785 finally:
2786 if exc:
tiernoe876f672020-02-13 14:34:48 +00002787 error_list.append(str(exc))
tiernobaa51102018-12-14 13:16:18 +00002788 try:
tiernoe876f672020-02-13 14:34:48 +00002789 # wait for pending tasks
2790 if tasks_dict_info:
2791 stage[1] = "Waiting for instantiate pending tasks."
2792 self.logger.debug(logging_text + stage[1])
garciadeblas5697b8b2021-03-24 09:17:02 +01002793 error_list += await self._wait_for_tasks(
2794 logging_text,
2795 tasks_dict_info,
2796 timeout_ns_deploy,
2797 stage,
2798 nslcmop_id,
2799 nsr_id=nsr_id,
2800 )
tiernoe876f672020-02-13 14:34:48 +00002801 stage[1] = stage[2] = ""
2802 except asyncio.CancelledError:
2803 error_list.append("Cancelled")
2804 # TODO cancel all tasks
2805 except Exception as exc:
2806 error_list.append(str(exc))
quilesj4cda56b2019-12-05 10:02:20 +00002807
tiernoe876f672020-02-13 14:34:48 +00002808 # update operation-status
2809 db_nsr_update["operational-status"] = "running"
2810 # let's begin with VCA 'configured' status (later we can change it)
2811 db_nsr_update["config-status"] = "configured"
2812 for task, task_name in tasks_dict_info.items():
2813 if not task.done() or task.cancelled() or task.exception():
2814 if task_name.startswith(self.task_name_deploy_vca):
2815 # A N2VC task is pending
2816 db_nsr_update["config-status"] = "failed"
quilesj4cda56b2019-12-05 10:02:20 +00002817 else:
tiernoe876f672020-02-13 14:34:48 +00002818 # RO or KDU task is pending
2819 db_nsr_update["operational-status"] = "failed"
quilesj3655ae02019-12-12 16:08:35 +00002820
tiernoe876f672020-02-13 14:34:48 +00002821 # update status at database
2822 if error_list:
tiernoa2143262020-03-27 16:20:40 +00002823 error_detail = ". ".join(error_list)
tiernoe876f672020-02-13 14:34:48 +00002824 self.logger.error(logging_text + error_detail)
garciadeblas5697b8b2021-03-24 09:17:02 +01002825 error_description_nslcmop = "{} Detail: {}".format(
2826 stage[0], error_detail
2827 )
2828 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2829 nslcmop_id, stage[0]
2830 )
quilesj3655ae02019-12-12 16:08:35 +00002831
garciadeblas5697b8b2021-03-24 09:17:02 +01002832 db_nsr_update["detailed-status"] = (
2833 error_description_nsr + " Detail: " + error_detail
2834 )
tiernoe876f672020-02-13 14:34:48 +00002835 db_nslcmop_update["detailed-status"] = error_detail
2836 nslcmop_operation_state = "FAILED"
2837 ns_state = "BROKEN"
2838 else:
tiernoa2143262020-03-27 16:20:40 +00002839 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00002840 error_description_nsr = error_description_nslcmop = None
2841 ns_state = "READY"
2842 db_nsr_update["detailed-status"] = "Done"
2843 db_nslcmop_update["detailed-status"] = "Done"
2844 nslcmop_operation_state = "COMPLETED"
quilesj4cda56b2019-12-05 10:02:20 +00002845
tiernoe876f672020-02-13 14:34:48 +00002846 if db_nsr:
2847 self._write_ns_status(
2848 nsr_id=nsr_id,
2849 ns_state=ns_state,
2850 current_operation="IDLE",
2851 current_operation_id=None,
2852 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00002853 error_detail=error_detail,
garciadeblas5697b8b2021-03-24 09:17:02 +01002854 other_update=db_nsr_update,
tiernoe876f672020-02-13 14:34:48 +00002855 )
tiernoa17d4f42020-04-28 09:59:23 +00002856 self._write_op_status(
2857 op_id=nslcmop_id,
2858 stage="",
2859 error_message=error_description_nslcmop,
2860 operation_state=nslcmop_operation_state,
2861 other_update=db_nslcmop_update,
2862 )
quilesj3655ae02019-12-12 16:08:35 +00002863
tierno59d22d22018-09-25 18:10:19 +02002864 if nslcmop_operation_state:
2865 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01002866 await self.msg.aiowrite(
2867 "ns",
2868 "instantiated",
2869 {
2870 "nsr_id": nsr_id,
2871 "nslcmop_id": nslcmop_id,
2872 "operationState": nslcmop_operation_state,
2873 },
2874 loop=self.loop,
2875 )
tierno59d22d22018-09-25 18:10:19 +02002876 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01002877 self.logger.error(
2878 logging_text + "kafka_write notification Exception {}".format(e)
2879 )
tierno59d22d22018-09-25 18:10:19 +02002880
2881 self.logger.debug(logging_text + "Exit")
2882 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2883
David Garciab4ebcd02021-10-28 02:00:43 +02002884 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2885 if vnfd_id not in cached_vnfds:
2886 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2887 return cached_vnfds[vnfd_id]
2888
2889 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2890 if vnf_profile_id not in cached_vnfrs:
2891 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2892 "vnfrs",
2893 {
2894 "member-vnf-index-ref": vnf_profile_id,
2895 "nsr-id-ref": nsr_id,
2896 },
2897 )
2898 return cached_vnfrs[vnf_profile_id]
2899
2900 def _is_deployed_vca_in_relation(
2901 self, vca: DeployedVCA, relation: Relation
2902 ) -> bool:
2903 found = False
2904 for endpoint in (relation.provider, relation.requirer):
2905 if endpoint["kdu-resource-profile-id"]:
2906 continue
2907 found = (
2908 vca.vnf_profile_id == endpoint.vnf_profile_id
2909 and vca.vdu_profile_id == endpoint.vdu_profile_id
2910 and vca.execution_environment_ref == endpoint.execution_environment_ref
2911 )
2912 if found:
2913 break
2914 return found
2915
2916 def _update_ee_relation_data_with_implicit_data(
2917 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2918 ):
2919 ee_relation_data = safe_get_ee_relation(
2920 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2921 )
2922 ee_relation_level = EELevel.get_level(ee_relation_data)
2923 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2924 "execution-environment-ref"
2925 ]:
2926 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2927 vnfd_id = vnf_profile["vnfd-id"]
2928 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2929 entity_id = (
2930 vnfd_id
2931 if ee_relation_level == EELevel.VNF
2932 else ee_relation_data["vdu-profile-id"]
2933 )
2934 ee = get_juju_ee_ref(db_vnfd, entity_id)
2935 if not ee:
2936 raise Exception(
2937 f"not execution environments found for ee_relation {ee_relation_data}"
2938 )
2939 ee_relation_data["execution-environment-ref"] = ee["id"]
2940 return ee_relation_data
2941
2942 def _get_ns_relations(
2943 self,
2944 nsr_id: str,
2945 nsd: Dict[str, Any],
2946 vca: DeployedVCA,
2947 cached_vnfds: Dict[str, Any],
David Garcia444bf962021-11-11 16:35:26 +01002948 ) -> List[Relation]:
David Garciab4ebcd02021-10-28 02:00:43 +02002949 relations = []
2950 db_ns_relations = get_ns_configuration_relation_list(nsd)
2951 for r in db_ns_relations:
David Garcia444bf962021-11-11 16:35:26 +01002952 provider_dict = None
2953 requirer_dict = None
2954 if all(key in r for key in ("provider", "requirer")):
2955 provider_dict = r["provider"]
2956 requirer_dict = r["requirer"]
2957 elif "entities" in r:
2958 provider_id = r["entities"][0]["id"]
2959 provider_dict = {
2960 "nsr-id": nsr_id,
2961 "endpoint": r["entities"][0]["endpoint"],
2962 }
2963 if provider_id != nsd["id"]:
2964 provider_dict["vnf-profile-id"] = provider_id
2965 requirer_id = r["entities"][1]["id"]
2966 requirer_dict = {
2967 "nsr-id": nsr_id,
2968 "endpoint": r["entities"][1]["endpoint"],
2969 }
2970 if requirer_id != nsd["id"]:
2971 requirer_dict["vnf-profile-id"] = requirer_id
2972 else:
aticig15db6142022-01-24 12:51:26 +03002973 raise Exception(
2974 "provider/requirer or entities must be included in the relation."
2975 )
David Garciab4ebcd02021-10-28 02:00:43 +02002976 relation_provider = self._update_ee_relation_data_with_implicit_data(
David Garcia444bf962021-11-11 16:35:26 +01002977 nsr_id, nsd, provider_dict, cached_vnfds
David Garciab4ebcd02021-10-28 02:00:43 +02002978 )
2979 relation_requirer = self._update_ee_relation_data_with_implicit_data(
David Garcia444bf962021-11-11 16:35:26 +01002980 nsr_id, nsd, requirer_dict, cached_vnfds
David Garciab4ebcd02021-10-28 02:00:43 +02002981 )
2982 provider = EERelation(relation_provider)
2983 requirer = EERelation(relation_requirer)
2984 relation = Relation(r["name"], provider, requirer)
2985 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2986 if vca_in_relation:
2987 relations.append(relation)
2988 return relations
2989
2990 def _get_vnf_relations(
2991 self,
2992 nsr_id: str,
2993 nsd: Dict[str, Any],
2994 vca: DeployedVCA,
2995 cached_vnfds: Dict[str, Any],
David Garcia444bf962021-11-11 16:35:26 +01002996 ) -> List[Relation]:
David Garciab4ebcd02021-10-28 02:00:43 +02002997 relations = []
2998 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
2999 vnf_profile_id = vnf_profile["id"]
3000 vnfd_id = vnf_profile["vnfd-id"]
3001 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3002 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3003 for r in db_vnf_relations:
David Garcia444bf962021-11-11 16:35:26 +01003004 provider_dict = None
3005 requirer_dict = None
3006 if all(key in r for key in ("provider", "requirer")):
3007 provider_dict = r["provider"]
3008 requirer_dict = r["requirer"]
3009 elif "entities" in r:
3010 provider_id = r["entities"][0]["id"]
3011 provider_dict = {
3012 "nsr-id": nsr_id,
3013 "vnf-profile-id": vnf_profile_id,
3014 "endpoint": r["entities"][0]["endpoint"],
3015 }
3016 if provider_id != vnfd_id:
3017 provider_dict["vdu-profile-id"] = provider_id
3018 requirer_id = r["entities"][1]["id"]
3019 requirer_dict = {
3020 "nsr-id": nsr_id,
3021 "vnf-profile-id": vnf_profile_id,
3022 "endpoint": r["entities"][1]["endpoint"],
3023 }
3024 if requirer_id != vnfd_id:
3025 requirer_dict["vdu-profile-id"] = requirer_id
3026 else:
aticig15db6142022-01-24 12:51:26 +03003027 raise Exception(
3028 "provider/requirer or entities must be included in the relation."
3029 )
David Garciab4ebcd02021-10-28 02:00:43 +02003030 relation_provider = self._update_ee_relation_data_with_implicit_data(
David Garcia444bf962021-11-11 16:35:26 +01003031 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
David Garciab4ebcd02021-10-28 02:00:43 +02003032 )
3033 relation_requirer = self._update_ee_relation_data_with_implicit_data(
David Garcia444bf962021-11-11 16:35:26 +01003034 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
David Garciab4ebcd02021-10-28 02:00:43 +02003035 )
3036 provider = EERelation(relation_provider)
3037 requirer = EERelation(relation_requirer)
3038 relation = Relation(r["name"], provider, requirer)
3039 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3040 if vca_in_relation:
3041 relations.append(relation)
3042 return relations
3043
3044 def _get_kdu_resource_data(
3045 self,
3046 ee_relation: EERelation,
3047 db_nsr: Dict[str, Any],
3048 cached_vnfds: Dict[str, Any],
3049 ) -> DeployedK8sResource:
3050 nsd = get_nsd(db_nsr)
3051 vnf_profiles = get_vnf_profiles(nsd)
3052 vnfd_id = find_in_list(
3053 vnf_profiles,
3054 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3055 )["vnfd-id"]
3056 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3057 kdu_resource_profile = get_kdu_resource_profile(
3058 db_vnfd, ee_relation.kdu_resource_profile_id
3059 )
3060 kdu_name = kdu_resource_profile["kdu-name"]
3061 deployed_kdu, _ = get_deployed_kdu(
3062 db_nsr.get("_admin", ()).get("deployed", ()),
3063 kdu_name,
3064 ee_relation.vnf_profile_id,
3065 )
3066 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3067 return deployed_kdu
3068
3069 def _get_deployed_component(
3070 self,
3071 ee_relation: EERelation,
3072 db_nsr: Dict[str, Any],
3073 cached_vnfds: Dict[str, Any],
3074 ) -> DeployedComponent:
3075 nsr_id = db_nsr["_id"]
3076 deployed_component = None
3077 ee_level = EELevel.get_level(ee_relation)
3078 if ee_level == EELevel.NS:
3079 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3080 if vca:
3081 deployed_component = DeployedVCA(nsr_id, vca)
3082 elif ee_level == EELevel.VNF:
3083 vca = get_deployed_vca(
3084 db_nsr,
3085 {
3086 "vdu_id": None,
3087 "member-vnf-index": ee_relation.vnf_profile_id,
3088 "ee_descriptor_id": ee_relation.execution_environment_ref,
3089 },
3090 )
3091 if vca:
3092 deployed_component = DeployedVCA(nsr_id, vca)
3093 elif ee_level == EELevel.VDU:
3094 vca = get_deployed_vca(
3095 db_nsr,
3096 {
3097 "vdu_id": ee_relation.vdu_profile_id,
3098 "member-vnf-index": ee_relation.vnf_profile_id,
3099 "ee_descriptor_id": ee_relation.execution_environment_ref,
3100 },
3101 )
3102 if vca:
3103 deployed_component = DeployedVCA(nsr_id, vca)
3104 elif ee_level == EELevel.KDU:
3105 kdu_resource_data = self._get_kdu_resource_data(
3106 ee_relation, db_nsr, cached_vnfds
3107 )
3108 if kdu_resource_data:
3109 deployed_component = DeployedK8sResource(kdu_resource_data)
3110 return deployed_component
3111
3112 async def _add_relation(
3113 self,
3114 relation: Relation,
3115 vca_type: str,
3116 db_nsr: Dict[str, Any],
3117 cached_vnfds: Dict[str, Any],
3118 cached_vnfrs: Dict[str, Any],
3119 ) -> bool:
3120 deployed_provider = self._get_deployed_component(
3121 relation.provider, db_nsr, cached_vnfds
3122 )
3123 deployed_requirer = self._get_deployed_component(
3124 relation.requirer, db_nsr, cached_vnfds
3125 )
3126 if (
3127 deployed_provider
3128 and deployed_requirer
3129 and deployed_provider.config_sw_installed
3130 and deployed_requirer.config_sw_installed
3131 ):
3132 provider_db_vnfr = (
3133 self._get_vnfr(
3134 relation.provider.nsr_id,
3135 relation.provider.vnf_profile_id,
3136 cached_vnfrs,
3137 )
3138 if relation.provider.vnf_profile_id
3139 else None
3140 )
3141 requirer_db_vnfr = (
3142 self._get_vnfr(
3143 relation.requirer.nsr_id,
3144 relation.requirer.vnf_profile_id,
3145 cached_vnfrs,
3146 )
3147 if relation.requirer.vnf_profile_id
3148 else None
3149 )
3150 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3151 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3152 provider_relation_endpoint = RelationEndpoint(
3153 deployed_provider.ee_id,
3154 provider_vca_id,
3155 relation.provider.endpoint,
3156 )
3157 requirer_relation_endpoint = RelationEndpoint(
3158 deployed_requirer.ee_id,
3159 requirer_vca_id,
3160 relation.requirer.endpoint,
3161 )
3162 await self.vca_map[vca_type].add_relation(
3163 provider=provider_relation_endpoint,
3164 requirer=requirer_relation_endpoint,
3165 )
3166 # remove entry from relations list
3167 return True
3168 return False
3169
David Garciac1fe90a2021-03-31 19:12:02 +02003170 async def _add_vca_relations(
3171 self,
3172 logging_text,
3173 nsr_id,
David Garciab4ebcd02021-10-28 02:00:43 +02003174 vca_type: str,
David Garciac1fe90a2021-03-31 19:12:02 +02003175 vca_index: int,
3176 timeout: int = 3600,
David Garciac1fe90a2021-03-31 19:12:02 +02003177 ) -> bool:
quilesj63f90042020-01-17 09:53:55 +00003178
3179 # steps:
3180 # 1. find all relations for this VCA
3181 # 2. wait for other peers related
3182 # 3. add relations
3183
3184 try:
quilesj63f90042020-01-17 09:53:55 +00003185 # STEP 1: find all relations for this VCA
3186
3187 # read nsr record
3188 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
David Garciab4ebcd02021-10-28 02:00:43 +02003189 nsd = get_nsd(db_nsr)
quilesj63f90042020-01-17 09:53:55 +00003190
3191 # this VCA data
David Garciab4ebcd02021-10-28 02:00:43 +02003192 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3193 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
quilesj63f90042020-01-17 09:53:55 +00003194
David Garciab4ebcd02021-10-28 02:00:43 +02003195 cached_vnfds = {}
3196 cached_vnfrs = {}
3197 relations = []
3198 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3199 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
quilesj63f90042020-01-17 09:53:55 +00003200
3201 # if no relations, terminate
David Garciab4ebcd02021-10-28 02:00:43 +02003202 if not relations:
garciadeblas5697b8b2021-03-24 09:17:02 +01003203 self.logger.debug(logging_text + " No relations")
quilesj63f90042020-01-17 09:53:55 +00003204 return True
3205
David Garciab4ebcd02021-10-28 02:00:43 +02003206 self.logger.debug(logging_text + " adding relations {}".format(relations))
quilesj63f90042020-01-17 09:53:55 +00003207
3208 # add all relations
3209 start = time()
3210 while True:
3211 # check timeout
3212 now = time()
3213 if now - start >= timeout:
garciadeblas5697b8b2021-03-24 09:17:02 +01003214 self.logger.error(logging_text + " : timeout adding relations")
quilesj63f90042020-01-17 09:53:55 +00003215 return False
3216
David Garciab4ebcd02021-10-28 02:00:43 +02003217 # reload nsr from database (we need to update record: _admin.deployed.VCA)
quilesj63f90042020-01-17 09:53:55 +00003218 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3219
David Garciab4ebcd02021-10-28 02:00:43 +02003220 # for each relation, find the VCA's related
3221 for relation in relations.copy():
3222 added = await self._add_relation(
3223 relation,
3224 vca_type,
3225 db_nsr,
3226 cached_vnfds,
3227 cached_vnfrs,
3228 )
3229 if added:
3230 relations.remove(relation)
quilesj63f90042020-01-17 09:53:55 +00003231
David Garciab4ebcd02021-10-28 02:00:43 +02003232 if not relations:
garciadeblas5697b8b2021-03-24 09:17:02 +01003233 self.logger.debug("Relations added")
quilesj63f90042020-01-17 09:53:55 +00003234 break
David Garciab4ebcd02021-10-28 02:00:43 +02003235 await asyncio.sleep(5.0)
quilesj63f90042020-01-17 09:53:55 +00003236
3237 return True
3238
3239 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01003240 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
quilesj63f90042020-01-17 09:53:55 +00003241 return False
3242
garciadeblas5697b8b2021-03-24 09:17:02 +01003243 async def _install_kdu(
3244 self,
3245 nsr_id: str,
3246 nsr_db_path: str,
3247 vnfr_data: dict,
3248 kdu_index: int,
3249 kdud: dict,
3250 vnfd: dict,
3251 k8s_instance_info: dict,
3252 k8params: dict = None,
3253 timeout: int = 600,
3254 vca_id: str = None,
3255 ):
lloretgalleg7c121132020-07-08 07:53:22 +00003256
tiernob9018152020-04-16 14:18:24 +00003257 try:
lloretgalleg7c121132020-07-08 07:53:22 +00003258 k8sclustertype = k8s_instance_info["k8scluster-type"]
3259 # Instantiate kdu
garciadeblas5697b8b2021-03-24 09:17:02 +01003260 db_dict_install = {
3261 "collection": "nsrs",
3262 "filter": {"_id": nsr_id},
3263 "path": nsr_db_path,
3264 }
lloretgalleg7c121132020-07-08 07:53:22 +00003265
romeromonser4554a702021-05-28 12:00:08 +02003266 if k8s_instance_info.get("kdu-deployment-name"):
3267 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3268 else:
3269 kdu_instance = self.k8scluster_map[
3270 k8sclustertype
3271 ].generate_kdu_instance_name(
3272 db_dict=db_dict_install,
3273 kdu_model=k8s_instance_info["kdu-model"],
3274 kdu_name=k8s_instance_info["kdu-name"],
3275 )
Pedro Escaleirada21d262022-04-21 16:31:06 +01003276
3277 # Update the nsrs table with the kdu-instance value
garciadeblas5697b8b2021-03-24 09:17:02 +01003278 self.update_db_2(
Pedro Escaleirada21d262022-04-21 16:31:06 +01003279 item="nsrs",
3280 _id=nsr_id,
3281 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
garciadeblas5697b8b2021-03-24 09:17:02 +01003282 )
Pedro Escaleirada21d262022-04-21 16:31:06 +01003283
3284 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3285 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3286 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3287 # namespace, this first verification could be removed, and the next step would be done for any kind
3288 # of KNF.
3289 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3290 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3291 if k8sclustertype in ("juju", "juju-bundle"):
3292 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3293 # that the user passed a namespace which he wants its KDU to be deployed in)
3294 if (
3295 self.db.count(
3296 table="nsrs",
3297 q_filter={
3298 "_id": nsr_id,
3299 "_admin.projects_write": k8s_instance_info["namespace"],
3300 "_admin.projects_read": k8s_instance_info["namespace"],
3301 },
3302 )
3303 > 0
3304 ):
3305 self.logger.debug(
3306 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3307 )
3308 self.update_db_2(
3309 item="nsrs",
3310 _id=nsr_id,
3311 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3312 )
3313 k8s_instance_info["namespace"] = kdu_instance
3314
David Garciad64e2742021-02-25 20:19:18 +01003315 await self.k8scluster_map[k8sclustertype].install(
lloretgalleg7c121132020-07-08 07:53:22 +00003316 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3317 kdu_model=k8s_instance_info["kdu-model"],
3318 atomic=True,
3319 params=k8params,
3320 db_dict=db_dict_install,
3321 timeout=timeout,
3322 kdu_name=k8s_instance_info["kdu-name"],
David Garciad64e2742021-02-25 20:19:18 +01003323 namespace=k8s_instance_info["namespace"],
3324 kdu_instance=kdu_instance,
David Garciac1fe90a2021-03-31 19:12:02 +02003325 vca_id=vca_id,
David Garciad64e2742021-02-25 20:19:18 +01003326 )
lloretgalleg7c121132020-07-08 07:53:22 +00003327
3328 # Obtain services to obtain management service ip
3329 services = await self.k8scluster_map[k8sclustertype].get_services(
3330 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3331 kdu_instance=kdu_instance,
garciadeblas5697b8b2021-03-24 09:17:02 +01003332 namespace=k8s_instance_info["namespace"],
3333 )
lloretgalleg7c121132020-07-08 07:53:22 +00003334
3335 # Obtain management service info (if exists)
tierno7ecbc342020-09-21 14:05:39 +00003336 vnfr_update_dict = {}
bravof6ec62b72021-02-25 17:20:35 -03003337 kdu_config = get_configuration(vnfd, kdud["name"])
3338 if kdu_config:
3339 target_ee_list = kdu_config.get("execution-environment-list", [])
3340 else:
3341 target_ee_list = []
3342
lloretgalleg7c121132020-07-08 07:53:22 +00003343 if services:
tierno7ecbc342020-09-21 14:05:39 +00003344 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
garciadeblas5697b8b2021-03-24 09:17:02 +01003345 mgmt_services = [
3346 service
3347 for service in kdud.get("service", [])
3348 if service.get("mgmt-service")
3349 ]
lloretgalleg7c121132020-07-08 07:53:22 +00003350 for mgmt_service in mgmt_services:
3351 for service in services:
3352 if service["name"].startswith(mgmt_service["name"]):
3353 # Mgmt service found, Obtain service ip
3354 ip = service.get("external_ip", service.get("cluster_ip"))
3355 if isinstance(ip, list) and len(ip) == 1:
3356 ip = ip[0]
3357
garciadeblas5697b8b2021-03-24 09:17:02 +01003358 vnfr_update_dict[
3359 "kdur.{}.ip-address".format(kdu_index)
3360 ] = ip
lloretgalleg7c121132020-07-08 07:53:22 +00003361
3362 # Check if must update also mgmt ip at the vnf
garciadeblas5697b8b2021-03-24 09:17:02 +01003363 service_external_cp = mgmt_service.get(
3364 "external-connection-point-ref"
3365 )
lloretgalleg7c121132020-07-08 07:53:22 +00003366 if service_external_cp:
garciadeblas5697b8b2021-03-24 09:17:02 +01003367 if (
3368 deep_get(vnfd, ("mgmt-interface", "cp"))
3369 == service_external_cp
3370 ):
lloretgalleg7c121132020-07-08 07:53:22 +00003371 vnfr_update_dict["ip-address"] = ip
3372
bravof6ec62b72021-02-25 17:20:35 -03003373 if find_in_list(
3374 target_ee_list,
garciadeblas5697b8b2021-03-24 09:17:02 +01003375 lambda ee: ee.get(
3376 "external-connection-point-ref", ""
3377 )
3378 == service_external_cp,
bravof6ec62b72021-02-25 17:20:35 -03003379 ):
garciadeblas5697b8b2021-03-24 09:17:02 +01003380 vnfr_update_dict[
3381 "kdur.{}.ip-address".format(kdu_index)
3382 ] = ip
lloretgalleg7c121132020-07-08 07:53:22 +00003383 break
3384 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01003385 self.logger.warn(
3386 "Mgmt service name: {} not found".format(
3387 mgmt_service["name"]
3388 )
3389 )
lloretgalleg7c121132020-07-08 07:53:22 +00003390
tierno7ecbc342020-09-21 14:05:39 +00003391 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3392 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
lloretgalleg7c121132020-07-08 07:53:22 +00003393
bravof9a256db2021-02-22 18:02:07 -03003394 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
garciadeblas5697b8b2021-03-24 09:17:02 +01003395 if (
3396 kdu_config
3397 and kdu_config.get("initial-config-primitive")
3398 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3399 ):
3400 initial_config_primitive_list = kdu_config.get(
3401 "initial-config-primitive"
3402 )
Dominik Fleischmannc1975dd2020-08-19 12:17:51 +02003403 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3404
3405 for initial_config_primitive in initial_config_primitive_list:
garciadeblas5697b8b2021-03-24 09:17:02 +01003406 primitive_params_ = self._map_primitive_params(
3407 initial_config_primitive, {}, {}
3408 )
Dominik Fleischmannc1975dd2020-08-19 12:17:51 +02003409
3410 await asyncio.wait_for(
3411 self.k8scluster_map[k8sclustertype].exec_primitive(
3412 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3413 kdu_instance=kdu_instance,
3414 primitive_name=initial_config_primitive["name"],
garciadeblas5697b8b2021-03-24 09:17:02 +01003415 params=primitive_params_,
3416 db_dict=db_dict_install,
David Garciac1fe90a2021-03-31 19:12:02 +02003417 vca_id=vca_id,
3418 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01003419 timeout=timeout,
David Garciac1fe90a2021-03-31 19:12:02 +02003420 )
Dominik Fleischmannc1975dd2020-08-19 12:17:51 +02003421
tiernob9018152020-04-16 14:18:24 +00003422 except Exception as e:
lloretgalleg7c121132020-07-08 07:53:22 +00003423 # Prepare update db with error and raise exception
tiernob9018152020-04-16 14:18:24 +00003424 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01003425 self.update_db_2(
3426 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3427 )
3428 self.update_db_2(
3429 "vnfrs",
3430 vnfr_data.get("_id"),
3431 {"kdur.{}.status".format(kdu_index): "ERROR"},
3432 )
tiernob9018152020-04-16 14:18:24 +00003433 except Exception:
lloretgalleg7c121132020-07-08 07:53:22 +00003434 # ignore to keep original exception
tiernob9018152020-04-16 14:18:24 +00003435 pass
lloretgalleg7c121132020-07-08 07:53:22 +00003436 # reraise original error
3437 raise
3438
3439 return kdu_instance
tiernob9018152020-04-16 14:18:24 +00003440
garciadeblas5697b8b2021-03-24 09:17:02 +01003441 async def deploy_kdus(
3442 self,
3443 logging_text,
3444 nsr_id,
3445 nslcmop_id,
3446 db_vnfrs,
3447 db_vnfds,
3448 task_instantiation_info,
3449 ):
calvinosanch9f9c6f22019-11-04 13:37:39 +01003450 # Launch kdus if present in the descriptor
tierno626e0152019-11-29 14:16:16 +00003451
garciadeblas5697b8b2021-03-24 09:17:02 +01003452 k8scluster_id_2_uuic = {
3453 "helm-chart-v3": {},
3454 "helm-chart": {},
3455 "juju-bundle": {},
3456 }
tierno626e0152019-11-29 14:16:16 +00003457
tierno16f4a4e2020-07-20 09:05:51 +00003458 async def _get_cluster_id(cluster_id, cluster_type):
tierno626e0152019-11-29 14:16:16 +00003459 nonlocal k8scluster_id_2_uuic
3460 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3461 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3462
tierno16f4a4e2020-07-20 09:05:51 +00003463 # check if K8scluster is creating and wait look if previous tasks in process
garciadeblas5697b8b2021-03-24 09:17:02 +01003464 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3465 "k8scluster", cluster_id
3466 )
tierno16f4a4e2020-07-20 09:05:51 +00003467 if task_dependency:
garciadeblas5697b8b2021-03-24 09:17:02 +01003468 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3469 task_name, cluster_id
3470 )
tierno16f4a4e2020-07-20 09:05:51 +00003471 self.logger.debug(logging_text + text)
3472 await asyncio.wait(task_dependency, timeout=3600)
3473
garciadeblas5697b8b2021-03-24 09:17:02 +01003474 db_k8scluster = self.db.get_one(
3475 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3476 )
tierno626e0152019-11-29 14:16:16 +00003477 if not db_k8scluster:
3478 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
tierno16f4a4e2020-07-20 09:05:51 +00003479
tierno626e0152019-11-29 14:16:16 +00003480 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3481 if not k8s_id:
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003482 if cluster_type == "helm-chart-v3":
3483 try:
3484 # backward compatibility for existing clusters that have not been initialized for helm v3
garciadeblas5697b8b2021-03-24 09:17:02 +01003485 k8s_credentials = yaml.safe_dump(
3486 db_k8scluster.get("credentials")
3487 )
3488 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3489 k8s_credentials, reuse_cluster_uuid=cluster_id
3490 )
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003491 db_k8scluster_update = {}
3492 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3493 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
garciadeblas5697b8b2021-03-24 09:17:02 +01003494 db_k8scluster_update[
3495 "_admin.helm-chart-v3.created"
3496 ] = uninstall_sw
3497 db_k8scluster_update[
3498 "_admin.helm-chart-v3.operationalState"
3499 ] = "ENABLED"
3500 self.update_db_2(
3501 "k8sclusters", cluster_id, db_k8scluster_update
3502 )
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003503 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01003504 self.logger.error(
3505 logging_text
3506 + "error initializing helm-v3 cluster: {}".format(str(e))
3507 )
3508 raise LcmException(
3509 "K8s cluster '{}' has not been initialized for '{}'".format(
3510 cluster_id, cluster_type
3511 )
3512 )
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003513 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01003514 raise LcmException(
3515 "K8s cluster '{}' has not been initialized for '{}'".format(
3516 cluster_id, cluster_type
3517 )
3518 )
tierno626e0152019-11-29 14:16:16 +00003519 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3520 return k8s_id
3521
3522 logging_text += "Deploy kdus: "
tiernoe876f672020-02-13 14:34:48 +00003523 step = ""
calvinosanch9f9c6f22019-11-04 13:37:39 +01003524 try:
tierno626e0152019-11-29 14:16:16 +00003525 db_nsr_update = {"_admin.deployed.K8s": []}
calvinosanch9f9c6f22019-11-04 13:37:39 +01003526 self.update_db_2("nsrs", nsr_id, db_nsr_update)
calvinosanch9f9c6f22019-11-04 13:37:39 +01003527
tierno626e0152019-11-29 14:16:16 +00003528 index = 0
tiernoe876f672020-02-13 14:34:48 +00003529 updated_cluster_list = []
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003530 updated_v3_cluster_list = []
tiernoe876f672020-02-13 14:34:48 +00003531
tierno626e0152019-11-29 14:16:16 +00003532 for vnfr_data in db_vnfrs.values():
David Garciac1fe90a2021-03-31 19:12:02 +02003533 vca_id = self.get_vca_id(vnfr_data, {})
lloretgalleg7c121132020-07-08 07:53:22 +00003534 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3535 # Step 0: Prepare and set parameters
bravof922c4172020-11-24 21:21:43 -03003536 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
garciadeblas5697b8b2021-03-24 09:17:02 +01003537 vnfd_id = vnfr_data.get("vnfd-id")
3538 vnfd_with_id = find_in_list(
3539 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3540 )
3541 kdud = next(
3542 kdud
3543 for kdud in vnfd_with_id["kdu"]
3544 if kdud["name"] == kdur["kdu-name"]
3545 )
tiernode1584f2020-04-07 09:07:33 +00003546 namespace = kdur.get("k8s-namespace")
romeromonser4554a702021-05-28 12:00:08 +02003547 kdu_deployment_name = kdur.get("kdu-deployment-name")
tierno626e0152019-11-29 14:16:16 +00003548 if kdur.get("helm-chart"):
lloretgalleg07e53f52020-12-15 10:54:02 +00003549 kdumodel = kdur["helm-chart"]
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003550 # Default version: helm3, if helm-version is v2 assign v2
3551 k8sclustertype = "helm-chart-v3"
3552 self.logger.debug("kdur: {}".format(kdur))
garciadeblas5697b8b2021-03-24 09:17:02 +01003553 if (
3554 kdur.get("helm-version")
3555 and kdur.get("helm-version") == "v2"
3556 ):
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003557 k8sclustertype = "helm-chart"
tierno626e0152019-11-29 14:16:16 +00003558 elif kdur.get("juju-bundle"):
lloretgalleg07e53f52020-12-15 10:54:02 +00003559 kdumodel = kdur["juju-bundle"]
tiernoe876f672020-02-13 14:34:48 +00003560 k8sclustertype = "juju-bundle"
tierno626e0152019-11-29 14:16:16 +00003561 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01003562 raise LcmException(
3563 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3564 "juju-bundle. Maybe an old NBI version is running".format(
3565 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3566 )
3567 )
quilesjacde94f2020-01-23 10:07:08 +00003568 # check if kdumodel is a file and exists
3569 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01003570 vnfd_with_id = find_in_list(
3571 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3572 )
3573 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
bravof486707f2021-11-08 17:18:50 -03003574 if storage: # may be not present if vnfd has not artifacts
tierno51183952020-04-03 15:48:18 +00003575 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
bravof486707f2021-11-08 17:18:50 -03003576 if storage["pkg-dir"]:
3577 filename = "{}/{}/{}s/{}".format(
3578 storage["folder"],
3579 storage["pkg-dir"],
3580 k8sclustertype,
3581 kdumodel,
3582 )
3583 else:
3584 filename = "{}/Scripts/{}s/{}".format(
3585 storage["folder"],
3586 k8sclustertype,
3587 kdumodel,
3588 )
garciadeblas5697b8b2021-03-24 09:17:02 +01003589 if self.fs.file_exists(
3590 filename, mode="file"
3591 ) or self.fs.file_exists(filename, mode="dir"):
tierno51183952020-04-03 15:48:18 +00003592 kdumodel = self.fs.path + filename
3593 except (asyncio.TimeoutError, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00003594 raise
garciadeblas5697b8b2021-03-24 09:17:02 +01003595 except Exception: # it is not a file
quilesjacde94f2020-01-23 10:07:08 +00003596 pass
lloretgallegedc5f332020-02-20 11:50:50 +01003597
tiernoe876f672020-02-13 14:34:48 +00003598 k8s_cluster_id = kdur["k8s-cluster"]["id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01003599 step = "Synchronize repos for k8s cluster '{}'".format(
3600 k8s_cluster_id
3601 )
tierno16f4a4e2020-07-20 09:05:51 +00003602 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
lloretgallegedc5f332020-02-20 11:50:50 +01003603
lloretgalleg7c121132020-07-08 07:53:22 +00003604 # Synchronize repos
garciadeblas5697b8b2021-03-24 09:17:02 +01003605 if (
3606 k8sclustertype == "helm-chart"
3607 and cluster_uuid not in updated_cluster_list
3608 ) or (
3609 k8sclustertype == "helm-chart-v3"
3610 and cluster_uuid not in updated_v3_cluster_list
3611 ):
tiernoe876f672020-02-13 14:34:48 +00003612 del_repo_list, added_repo_dict = await asyncio.ensure_future(
garciadeblas5697b8b2021-03-24 09:17:02 +01003613 self.k8scluster_map[k8sclustertype].synchronize_repos(
3614 cluster_uuid=cluster_uuid
3615 )
3616 )
tiernoe876f672020-02-13 14:34:48 +00003617 if del_repo_list or added_repo_dict:
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003618 if k8sclustertype == "helm-chart":
garciadeblas5697b8b2021-03-24 09:17:02 +01003619 unset = {
3620 "_admin.helm_charts_added." + item: None
3621 for item in del_repo_list
3622 }
3623 updated = {
3624 "_admin.helm_charts_added." + item: name
3625 for item, name in added_repo_dict.items()
3626 }
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003627 updated_cluster_list.append(cluster_uuid)
3628 elif k8sclustertype == "helm-chart-v3":
garciadeblas5697b8b2021-03-24 09:17:02 +01003629 unset = {
3630 "_admin.helm_charts_v3_added." + item: None
3631 for item in del_repo_list
3632 }
3633 updated = {
3634 "_admin.helm_charts_v3_added." + item: name
3635 for item, name in added_repo_dict.items()
3636 }
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003637 updated_v3_cluster_list.append(cluster_uuid)
garciadeblas5697b8b2021-03-24 09:17:02 +01003638 self.logger.debug(
3639 logging_text + "repos synchronized on k8s cluster "
3640 "'{}' to_delete: {}, to_add: {}".format(
3641 k8s_cluster_id, del_repo_list, added_repo_dict
3642 )
3643 )
3644 self.db.set_one(
3645 "k8sclusters",
3646 {"_id": k8s_cluster_id},
3647 updated,
3648 unset=unset,
3649 )
lloretgallegedc5f332020-02-20 11:50:50 +01003650
lloretgalleg7c121132020-07-08 07:53:22 +00003651 # Instantiate kdu
garciadeblas5697b8b2021-03-24 09:17:02 +01003652 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3653 vnfr_data["member-vnf-index-ref"],
3654 kdur["kdu-name"],
3655 k8s_cluster_id,
3656 )
3657 k8s_instance_info = {
3658 "kdu-instance": None,
3659 "k8scluster-uuid": cluster_uuid,
3660 "k8scluster-type": k8sclustertype,
3661 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3662 "kdu-name": kdur["kdu-name"],
3663 "kdu-model": kdumodel,
3664 "namespace": namespace,
romeromonser4554a702021-05-28 12:00:08 +02003665 "kdu-deployment-name": kdu_deployment_name,
garciadeblas5697b8b2021-03-24 09:17:02 +01003666 }
tiernob9018152020-04-16 14:18:24 +00003667 db_path = "_admin.deployed.K8s.{}".format(index)
lloretgalleg7c121132020-07-08 07:53:22 +00003668 db_nsr_update[db_path] = k8s_instance_info
tierno626e0152019-11-29 14:16:16 +00003669 self.update_db_2("nsrs", nsr_id, db_nsr_update)
garciadeblas5697b8b2021-03-24 09:17:02 +01003670 vnfd_with_id = find_in_list(
3671 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3672 )
tiernoa2143262020-03-27 16:20:40 +00003673 task = asyncio.ensure_future(
garciadeblas5697b8b2021-03-24 09:17:02 +01003674 self._install_kdu(
3675 nsr_id,
3676 db_path,
3677 vnfr_data,
3678 kdu_index,
3679 kdud,
3680 vnfd_with_id,
3681 k8s_instance_info,
3682 k8params=desc_params,
Alexis Romeroab16ae82022-05-17 18:18:02 +02003683 timeout=1800,
garciadeblas5697b8b2021-03-24 09:17:02 +01003684 vca_id=vca_id,
3685 )
3686 )
3687 self.lcm_tasks.register(
3688 "ns",
3689 nsr_id,
3690 nslcmop_id,
3691 "instantiate_KDU-{}".format(index),
3692 task,
3693 )
3694 task_instantiation_info[task] = "Deploying KDU {}".format(
3695 kdur["kdu-name"]
3696 )
tiernoe876f672020-02-13 14:34:48 +00003697
tierno626e0152019-11-29 14:16:16 +00003698 index += 1
quilesjdd799ac2020-01-23 16:31:11 +00003699
tiernoe876f672020-02-13 14:34:48 +00003700 except (LcmException, asyncio.CancelledError):
3701 raise
calvinosanch9f9c6f22019-11-04 13:37:39 +01003702 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00003703 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3704 if isinstance(e, (N2VCException, DbException)):
3705 self.logger.error(logging_text + msg)
3706 else:
3707 self.logger.critical(logging_text + msg, exc_info=True)
quilesjdd799ac2020-01-23 16:31:11 +00003708 raise LcmException(msg)
calvinosanch9f9c6f22019-11-04 13:37:39 +01003709 finally:
calvinosanch9f9c6f22019-11-04 13:37:39 +01003710 if db_nsr_update:
3711 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoda6fb102019-11-23 00:36:52 +00003712
garciadeblas5697b8b2021-03-24 09:17:02 +01003713 def _deploy_n2vc(
3714 self,
3715 logging_text,
3716 db_nsr,
3717 db_vnfr,
3718 nslcmop_id,
3719 nsr_id,
3720 nsi_id,
3721 vnfd_id,
3722 vdu_id,
3723 kdu_name,
3724 member_vnf_index,
3725 vdu_index,
3726 vdu_name,
3727 deploy_params,
3728 descriptor_config,
3729 base_folder,
3730 task_instantiation_info,
3731 stage,
3732 ):
quilesj7e13aeb2019-10-08 13:34:55 +02003733 # launch instantiate_N2VC in a asyncio task and register task object
3734 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3735 # if not found, create one entry and update database
quilesj7e13aeb2019-10-08 13:34:55 +02003736 # fill db_nsr._admin.deployed.VCA.<index>
tierno588547c2020-07-01 15:30:20 +00003737
garciadeblas5697b8b2021-03-24 09:17:02 +01003738 self.logger.debug(
3739 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3740 )
bravof9a256db2021-02-22 18:02:07 -03003741 if "execution-environment-list" in descriptor_config:
3742 ee_list = descriptor_config.get("execution-environment-list", [])
David Garcia9ad54a42021-05-28 12:08:18 +02003743 elif "juju" in descriptor_config:
3744 ee_list = [descriptor_config] # ns charms
tierno588547c2020-07-01 15:30:20 +00003745 else: # other types as script are not supported
3746 ee_list = []
3747
3748 for ee_item in ee_list:
garciadeblas5697b8b2021-03-24 09:17:02 +01003749 self.logger.debug(
3750 logging_text
3751 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3752 ee_item.get("juju"), ee_item.get("helm-chart")
3753 )
3754 )
tiernoa278b842020-07-08 15:33:55 +00003755 ee_descriptor_id = ee_item.get("id")
tierno588547c2020-07-01 15:30:20 +00003756 if ee_item.get("juju"):
garciadeblas5697b8b2021-03-24 09:17:02 +01003757 vca_name = ee_item["juju"].get("charm")
3758 vca_type = (
3759 "lxc_proxy_charm"
3760 if ee_item["juju"].get("charm") is not None
3761 else "native_charm"
3762 )
3763 if ee_item["juju"].get("cloud") == "k8s":
tierno588547c2020-07-01 15:30:20 +00003764 vca_type = "k8s_proxy_charm"
garciadeblas5697b8b2021-03-24 09:17:02 +01003765 elif ee_item["juju"].get("proxy") is False:
tierno588547c2020-07-01 15:30:20 +00003766 vca_type = "native_charm"
3767 elif ee_item.get("helm-chart"):
garciadeblas5697b8b2021-03-24 09:17:02 +01003768 vca_name = ee_item["helm-chart"]
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003769 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3770 vca_type = "helm"
3771 else:
3772 vca_type = "helm-v3"
tierno588547c2020-07-01 15:30:20 +00003773 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01003774 self.logger.debug(
3775 logging_text + "skipping non juju neither charm configuration"
3776 )
quilesj7e13aeb2019-10-08 13:34:55 +02003777 continue
quilesj3655ae02019-12-12 16:08:35 +00003778
tierno588547c2020-07-01 15:30:20 +00003779 vca_index = -1
garciadeblas5697b8b2021-03-24 09:17:02 +01003780 for vca_index, vca_deployed in enumerate(
3781 db_nsr["_admin"]["deployed"]["VCA"]
3782 ):
tierno588547c2020-07-01 15:30:20 +00003783 if not vca_deployed:
3784 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01003785 if (
3786 vca_deployed.get("member-vnf-index") == member_vnf_index
3787 and vca_deployed.get("vdu_id") == vdu_id
3788 and vca_deployed.get("kdu_name") == kdu_name
3789 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3790 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3791 ):
tierno588547c2020-07-01 15:30:20 +00003792 break
3793 else:
3794 # not found, create one.
garciadeblas5697b8b2021-03-24 09:17:02 +01003795 target = (
3796 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3797 )
tiernoa278b842020-07-08 15:33:55 +00003798 if vdu_id:
3799 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3800 elif kdu_name:
3801 target += "/kdu/{}".format(kdu_name)
tierno588547c2020-07-01 15:30:20 +00003802 vca_deployed = {
tiernoa278b842020-07-08 15:33:55 +00003803 "target_element": target,
3804 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
tierno588547c2020-07-01 15:30:20 +00003805 "member-vnf-index": member_vnf_index,
3806 "vdu_id": vdu_id,
3807 "kdu_name": kdu_name,
3808 "vdu_count_index": vdu_index,
3809 "operational-status": "init", # TODO revise
3810 "detailed-status": "", # TODO revise
garciadeblas5697b8b2021-03-24 09:17:02 +01003811 "step": "initial-deploy", # TODO revise
tierno588547c2020-07-01 15:30:20 +00003812 "vnfd_id": vnfd_id,
3813 "vdu_name": vdu_name,
tiernoa278b842020-07-08 15:33:55 +00003814 "type": vca_type,
garciadeblas5697b8b2021-03-24 09:17:02 +01003815 "ee_descriptor_id": ee_descriptor_id,
tierno588547c2020-07-01 15:30:20 +00003816 }
3817 vca_index += 1
quilesj3655ae02019-12-12 16:08:35 +00003818
tierno588547c2020-07-01 15:30:20 +00003819 # create VCA and configurationStatus in db
3820 db_dict = {
3821 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
garciadeblas5697b8b2021-03-24 09:17:02 +01003822 "configurationStatus.{}".format(vca_index): dict(),
tierno588547c2020-07-01 15:30:20 +00003823 }
3824 self.update_db_2("nsrs", nsr_id, db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02003825
tierno588547c2020-07-01 15:30:20 +00003826 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3827
bravof922c4172020-11-24 21:21:43 -03003828 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3829 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3830 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3831
tierno588547c2020-07-01 15:30:20 +00003832 # Launch task
3833 task_n2vc = asyncio.ensure_future(
3834 self.instantiate_N2VC(
3835 logging_text=logging_text,
3836 vca_index=vca_index,
3837 nsi_id=nsi_id,
3838 db_nsr=db_nsr,
3839 db_vnfr=db_vnfr,
3840 vdu_id=vdu_id,
3841 kdu_name=kdu_name,
3842 vdu_index=vdu_index,
3843 deploy_params=deploy_params,
3844 config_descriptor=descriptor_config,
3845 base_folder=base_folder,
3846 nslcmop_id=nslcmop_id,
3847 stage=stage,
3848 vca_type=vca_type,
tiernob996d942020-07-03 14:52:28 +00003849 vca_name=vca_name,
garciadeblas5697b8b2021-03-24 09:17:02 +01003850 ee_config_descriptor=ee_item,
tierno588547c2020-07-01 15:30:20 +00003851 )
quilesj7e13aeb2019-10-08 13:34:55 +02003852 )
garciadeblas5697b8b2021-03-24 09:17:02 +01003853 self.lcm_tasks.register(
3854 "ns",
3855 nsr_id,
3856 nslcmop_id,
3857 "instantiate_N2VC-{}".format(vca_index),
3858 task_n2vc,
3859 )
3860 task_instantiation_info[
3861 task_n2vc
3862 ] = self.task_name_deploy_vca + " {}.{}".format(
3863 member_vnf_index or "", vdu_id or ""
3864 )
tiernobaa51102018-12-14 13:16:18 +00003865
tiernoc9556972019-07-05 15:25:25 +00003866 @staticmethod
kuuse0ca67472019-05-13 15:59:27 +02003867 def _create_nslcmop(nsr_id, operation, params):
3868 """
3869 Creates a ns-lcm-opp content to be stored at database.
3870 :param nsr_id: internal id of the instance
3871 :param operation: instantiate, terminate, scale, action, ...
3872 :param params: user parameters for the operation
3873 :return: dictionary following SOL005 format
3874 """
3875 # Raise exception if invalid arguments
3876 if not (nsr_id and operation and params):
3877 raise LcmException(
garciadeblas5697b8b2021-03-24 09:17:02 +01003878 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3879 )
kuuse0ca67472019-05-13 15:59:27 +02003880 now = time()
3881 _id = str(uuid4())
3882 nslcmop = {
3883 "id": _id,
3884 "_id": _id,
3885 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3886 "operationState": "PROCESSING",
3887 "statusEnteredTime": now,
3888 "nsInstanceId": nsr_id,
3889 "lcmOperationType": operation,
3890 "startTime": now,
3891 "isAutomaticInvocation": False,
3892 "operationParams": params,
3893 "isCancelPending": False,
3894 "links": {
3895 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3896 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01003897 },
kuuse0ca67472019-05-13 15:59:27 +02003898 }
3899 return nslcmop
3900
calvinosanch9f9c6f22019-11-04 13:37:39 +01003901 def _format_additional_params(self, params):
tierno626e0152019-11-29 14:16:16 +00003902 params = params or {}
calvinosanch9f9c6f22019-11-04 13:37:39 +01003903 for key, value in params.items():
3904 if str(value).startswith("!!yaml "):
3905 params[key] = yaml.safe_load(value[7:])
calvinosanch9f9c6f22019-11-04 13:37:39 +01003906 return params
3907
kuuse8b998e42019-07-30 15:22:16 +02003908 def _get_terminate_primitive_params(self, seq, vnf_index):
garciadeblas5697b8b2021-03-24 09:17:02 +01003909 primitive = seq.get("name")
kuuse8b998e42019-07-30 15:22:16 +02003910 primitive_params = {}
3911 params = {
3912 "member_vnf_index": vnf_index,
3913 "primitive": primitive,
3914 "primitive_params": primitive_params,
3915 }
3916 desc_params = {}
3917 return self._map_primitive_params(seq, params, desc_params)
3918
kuuseac3a8882019-10-03 10:48:06 +02003919 # sub-operations
3920
tierno51183952020-04-03 15:48:18 +00003921 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
garciadeblas5697b8b2021-03-24 09:17:02 +01003922 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3923 if op.get("operationState") == "COMPLETED":
kuuseac3a8882019-10-03 10:48:06 +02003924 # b. Skip sub-operation
3925 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3926 return self.SUBOPERATION_STATUS_SKIP
3927 else:
tierno7c4e24c2020-05-13 08:41:35 +00003928 # c. retry executing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02003929 # The sub-operation exists, and operationState != 'COMPLETED'
tierno7c4e24c2020-05-13 08:41:35 +00003930 # Update operationState = 'PROCESSING' to indicate a retry.
garciadeblas5697b8b2021-03-24 09:17:02 +01003931 operationState = "PROCESSING"
3932 detailed_status = "In progress"
kuuseac3a8882019-10-03 10:48:06 +02003933 self._update_suboperation_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01003934 db_nslcmop, op_index, operationState, detailed_status
3935 )
kuuseac3a8882019-10-03 10:48:06 +02003936 # Return the sub-operation index
3937 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3938 # with arguments extracted from the sub-operation
3939 return op_index
3940
3941 # Find a sub-operation where all keys in a matching dictionary must match
3942 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3943 def _find_suboperation(self, db_nslcmop, match):
tierno7c4e24c2020-05-13 08:41:35 +00003944 if db_nslcmop and match:
garciadeblas5697b8b2021-03-24 09:17:02 +01003945 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
kuuseac3a8882019-10-03 10:48:06 +02003946 for i, op in enumerate(op_list):
3947 if all(op.get(k) == match[k] for k in match):
3948 return i
3949 return self.SUBOPERATION_STATUS_NOT_FOUND
3950
3951 # Update status for a sub-operation given its index
garciadeblas5697b8b2021-03-24 09:17:02 +01003952 def _update_suboperation_status(
3953 self, db_nslcmop, op_index, operationState, detailed_status
3954 ):
kuuseac3a8882019-10-03 10:48:06 +02003955 # Update DB for HA tasks
garciadeblas5697b8b2021-03-24 09:17:02 +01003956 q_filter = {"_id": db_nslcmop["_id"]}
3957 update_dict = {
3958 "_admin.operations.{}.operationState".format(op_index): operationState,
3959 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3960 }
3961 self.db.set_one(
3962 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3963 )
kuuseac3a8882019-10-03 10:48:06 +02003964
3965 # Add sub-operation, return the index of the added sub-operation
3966 # Optionally, set operationState, detailed-status, and operationType
3967 # Status and type are currently set for 'scale' sub-operations:
3968 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3969 # 'detailed-status' : status message
3970 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3971 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
garciadeblas5697b8b2021-03-24 09:17:02 +01003972 def _add_suboperation(
3973 self,
3974 db_nslcmop,
3975 vnf_index,
3976 vdu_id,
3977 vdu_count_index,
3978 vdu_name,
3979 primitive,
3980 mapped_primitive_params,
3981 operationState=None,
3982 detailed_status=None,
3983 operationType=None,
3984 RO_nsr_id=None,
3985 RO_scaling_info=None,
3986 ):
tiernoe876f672020-02-13 14:34:48 +00003987 if not db_nslcmop:
kuuseac3a8882019-10-03 10:48:06 +02003988 return self.SUBOPERATION_STATUS_NOT_FOUND
3989 # Get the "_admin.operations" list, if it exists
garciadeblas5697b8b2021-03-24 09:17:02 +01003990 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3991 op_list = db_nslcmop_admin.get("operations")
kuuseac3a8882019-10-03 10:48:06 +02003992 # Create or append to the "_admin.operations" list
garciadeblas5697b8b2021-03-24 09:17:02 +01003993 new_op = {
3994 "member_vnf_index": vnf_index,
3995 "vdu_id": vdu_id,
3996 "vdu_count_index": vdu_count_index,
3997 "primitive": primitive,
3998 "primitive_params": mapped_primitive_params,
3999 }
kuuseac3a8882019-10-03 10:48:06 +02004000 if operationState:
garciadeblas5697b8b2021-03-24 09:17:02 +01004001 new_op["operationState"] = operationState
kuuseac3a8882019-10-03 10:48:06 +02004002 if detailed_status:
garciadeblas5697b8b2021-03-24 09:17:02 +01004003 new_op["detailed-status"] = detailed_status
kuuseac3a8882019-10-03 10:48:06 +02004004 if operationType:
garciadeblas5697b8b2021-03-24 09:17:02 +01004005 new_op["lcmOperationType"] = operationType
kuuseac3a8882019-10-03 10:48:06 +02004006 if RO_nsr_id:
garciadeblas5697b8b2021-03-24 09:17:02 +01004007 new_op["RO_nsr_id"] = RO_nsr_id
kuuseac3a8882019-10-03 10:48:06 +02004008 if RO_scaling_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01004009 new_op["RO_scaling_info"] = RO_scaling_info
kuuseac3a8882019-10-03 10:48:06 +02004010 if not op_list:
4011 # No existing operations, create key 'operations' with current operation as first list element
garciadeblas5697b8b2021-03-24 09:17:02 +01004012 db_nslcmop_admin.update({"operations": [new_op]})
4013 op_list = db_nslcmop_admin.get("operations")
kuuseac3a8882019-10-03 10:48:06 +02004014 else:
4015 # Existing operations, append operation to list
4016 op_list.append(new_op)
kuuse8b998e42019-07-30 15:22:16 +02004017
garciadeblas5697b8b2021-03-24 09:17:02 +01004018 db_nslcmop_update = {"_admin.operations": op_list}
4019 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
kuuseac3a8882019-10-03 10:48:06 +02004020 op_index = len(op_list) - 1
4021 return op_index
4022
4023 # Helper methods for scale() sub-operations
4024
4025 # pre-scale/post-scale:
4026 # Check for 3 different cases:
4027 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4028 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
tierno7c4e24c2020-05-13 08:41:35 +00004029 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
garciadeblas5697b8b2021-03-24 09:17:02 +01004030 def _check_or_add_scale_suboperation(
4031 self,
4032 db_nslcmop,
4033 vnf_index,
4034 vnf_config_primitive,
4035 primitive_params,
4036 operationType,
4037 RO_nsr_id=None,
4038 RO_scaling_info=None,
4039 ):
kuuseac3a8882019-10-03 10:48:06 +02004040 # Find this sub-operation
tierno7c4e24c2020-05-13 08:41:35 +00004041 if RO_nsr_id and RO_scaling_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01004042 operationType = "SCALE-RO"
kuuseac3a8882019-10-03 10:48:06 +02004043 match = {
garciadeblas5697b8b2021-03-24 09:17:02 +01004044 "member_vnf_index": vnf_index,
4045 "RO_nsr_id": RO_nsr_id,
4046 "RO_scaling_info": RO_scaling_info,
kuuseac3a8882019-10-03 10:48:06 +02004047 }
4048 else:
4049 match = {
garciadeblas5697b8b2021-03-24 09:17:02 +01004050 "member_vnf_index": vnf_index,
4051 "primitive": vnf_config_primitive,
4052 "primitive_params": primitive_params,
4053 "lcmOperationType": operationType,
kuuseac3a8882019-10-03 10:48:06 +02004054 }
4055 op_index = self._find_suboperation(db_nslcmop, match)
tierno51183952020-04-03 15:48:18 +00004056 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
kuuseac3a8882019-10-03 10:48:06 +02004057 # a. New sub-operation
4058 # The sub-operation does not exist, add it.
4059 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4060 # The following parameters are set to None for all kind of scaling:
4061 vdu_id = None
4062 vdu_count_index = None
4063 vdu_name = None
tierno51183952020-04-03 15:48:18 +00004064 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02004065 vnf_config_primitive = None
4066 primitive_params = None
4067 else:
4068 RO_nsr_id = None
4069 RO_scaling_info = None
4070 # Initial status for sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01004071 operationState = "PROCESSING"
4072 detailed_status = "In progress"
kuuseac3a8882019-10-03 10:48:06 +02004073 # Add sub-operation for pre/post-scaling (zero or more operations)
garciadeblas5697b8b2021-03-24 09:17:02 +01004074 self._add_suboperation(
4075 db_nslcmop,
4076 vnf_index,
4077 vdu_id,
4078 vdu_count_index,
4079 vdu_name,
4080 vnf_config_primitive,
4081 primitive_params,
4082 operationState,
4083 detailed_status,
4084 operationType,
4085 RO_nsr_id,
4086 RO_scaling_info,
4087 )
kuuseac3a8882019-10-03 10:48:06 +02004088 return self.SUBOPERATION_STATUS_NEW
4089 else:
4090 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4091 # or op_index (operationState != 'COMPLETED')
tierno51183952020-04-03 15:48:18 +00004092 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
kuuseac3a8882019-10-03 10:48:06 +02004093
preethika.pdf7d8e02019-12-10 13:10:48 +00004094 # Function to return execution_environment id
4095
4096 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
tiernoe876f672020-02-13 14:34:48 +00004097 # TODO vdu_index_count
preethika.pdf7d8e02019-12-10 13:10:48 +00004098 for vca in vca_deployed_list:
4099 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4100 return vca["ee_id"]
4101
David Garciac1fe90a2021-03-31 19:12:02 +02004102 async def destroy_N2VC(
4103 self,
4104 logging_text,
4105 db_nslcmop,
4106 vca_deployed,
4107 config_descriptor,
4108 vca_index,
4109 destroy_ee=True,
4110 exec_primitives=True,
4111 scaling_in=False,
4112 vca_id: str = None,
4113 ):
tiernoe876f672020-02-13 14:34:48 +00004114 """
4115 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4116 :param logging_text:
4117 :param db_nslcmop:
4118 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4119 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4120 :param vca_index: index in the database _admin.deployed.VCA
4121 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
tierno588547c2020-07-01 15:30:20 +00004122 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4123 not executed properly
aktas13251562021-02-12 22:19:10 +03004124 :param scaling_in: True destroys the application, False destroys the model
tiernoe876f672020-02-13 14:34:48 +00004125 :return: None or exception
4126 """
tiernoe876f672020-02-13 14:34:48 +00004127
tierno588547c2020-07-01 15:30:20 +00004128 self.logger.debug(
garciadeblas5697b8b2021-03-24 09:17:02 +01004129 logging_text
4130 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
tierno588547c2020-07-01 15:30:20 +00004131 vca_index, vca_deployed, config_descriptor, destroy_ee
4132 )
4133 )
4134
4135 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4136
4137 # execute terminate_primitives
4138 if exec_primitives:
bravof922c4172020-11-24 21:21:43 -03004139 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
garciadeblas5697b8b2021-03-24 09:17:02 +01004140 config_descriptor.get("terminate-config-primitive"),
4141 vca_deployed.get("ee_descriptor_id"),
4142 )
tierno588547c2020-07-01 15:30:20 +00004143 vdu_id = vca_deployed.get("vdu_id")
4144 vdu_count_index = vca_deployed.get("vdu_count_index")
4145 vdu_name = vca_deployed.get("vdu_name")
4146 vnf_index = vca_deployed.get("member-vnf-index")
4147 if terminate_primitives and vca_deployed.get("needed_terminate"):
tierno588547c2020-07-01 15:30:20 +00004148 for seq in terminate_primitives:
4149 # For each sequence in list, get primitive and call _ns_execute_primitive()
4150 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
garciadeblas5697b8b2021-03-24 09:17:02 +01004151 vnf_index, seq.get("name")
4152 )
tierno588547c2020-07-01 15:30:20 +00004153 self.logger.debug(logging_text + step)
4154 # Create the primitive for each sequence, i.e. "primitive": "touch"
garciadeblas5697b8b2021-03-24 09:17:02 +01004155 primitive = seq.get("name")
4156 mapped_primitive_params = self._get_terminate_primitive_params(
4157 seq, vnf_index
4158 )
tierno588547c2020-07-01 15:30:20 +00004159
4160 # Add sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01004161 self._add_suboperation(
4162 db_nslcmop,
4163 vnf_index,
4164 vdu_id,
4165 vdu_count_index,
4166 vdu_name,
4167 primitive,
4168 mapped_primitive_params,
4169 )
tierno588547c2020-07-01 15:30:20 +00004170 # Sub-operations: Call _ns_execute_primitive() instead of action()
4171 try:
David Garciac1fe90a2021-03-31 19:12:02 +02004172 result, result_detail = await self._ns_execute_primitive(
garciadeblas5697b8b2021-03-24 09:17:02 +01004173 vca_deployed["ee_id"],
4174 primitive,
David Garciac1fe90a2021-03-31 19:12:02 +02004175 mapped_primitive_params,
4176 vca_type=vca_type,
4177 vca_id=vca_id,
4178 )
tierno588547c2020-07-01 15:30:20 +00004179 except LcmException:
4180 # this happens when VCA is not deployed. In this case it is not needed to terminate
4181 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01004182 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
tierno588547c2020-07-01 15:30:20 +00004183 if result not in result_ok:
garciadeblas5697b8b2021-03-24 09:17:02 +01004184 raise LcmException(
4185 "terminate_primitive {} for vnf_member_index={} fails with "
4186 "error {}".format(seq.get("name"), vnf_index, result_detail)
4187 )
tierno588547c2020-07-01 15:30:20 +00004188 # set that this VCA do not need terminated
garciadeblas5697b8b2021-03-24 09:17:02 +01004189 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4190 vca_index
4191 )
4192 self.update_db_2(
4193 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4194 )
tiernoe876f672020-02-13 14:34:48 +00004195
bravof73bac502021-05-11 07:38:47 -04004196 # Delete Prometheus Jobs if any
4197 # This uses NSR_ID, so it will destroy any jobs under this index
4198 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
tiernob996d942020-07-03 14:52:28 +00004199
tiernoe876f672020-02-13 14:34:48 +00004200 if destroy_ee:
David Garciac1fe90a2021-03-31 19:12:02 +02004201 await self.vca_map[vca_type].delete_execution_environment(
4202 vca_deployed["ee_id"],
4203 scaling_in=scaling_in,
aktas98488ed2021-07-29 17:42:49 +03004204 vca_type=vca_type,
David Garciac1fe90a2021-03-31 19:12:02 +02004205 vca_id=vca_id,
4206 )
kuuse0ca67472019-05-13 15:59:27 +02004207
David Garciac1fe90a2021-03-31 19:12:02 +02004208 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
garciadeblas5697b8b2021-03-24 09:17:02 +01004209 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
tierno51183952020-04-03 15:48:18 +00004210 namespace = "." + db_nsr["_id"]
tiernof59ad6c2020-04-08 12:50:52 +00004211 try:
David Garciac1fe90a2021-03-31 19:12:02 +02004212 await self.n2vc.delete_namespace(
4213 namespace=namespace,
4214 total_timeout=self.timeout_charm_delete,
4215 vca_id=vca_id,
4216 )
tiernof59ad6c2020-04-08 12:50:52 +00004217 except N2VCNotFound: # already deleted. Skip
4218 pass
garciadeblas5697b8b2021-03-24 09:17:02 +01004219 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
quilesj3655ae02019-12-12 16:08:35 +00004220
garciadeblas5697b8b2021-03-24 09:17:02 +01004221 async def _terminate_RO(
4222 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4223 ):
tiernoe876f672020-02-13 14:34:48 +00004224 """
4225 Terminates a deployment from RO
4226 :param logging_text:
4227 :param nsr_deployed: db_nsr._admin.deployed
4228 :param nsr_id:
4229 :param nslcmop_id:
4230 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4231 this method will update only the index 2, but it will write on database the concatenated content of the list
4232 :return:
4233 """
4234 db_nsr_update = {}
4235 failed_detail = []
4236 ro_nsr_id = ro_delete_action = None
4237 if nsr_deployed and nsr_deployed.get("RO"):
4238 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4239 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4240 try:
4241 if ro_nsr_id:
4242 stage[2] = "Deleting ns from VIM."
4243 db_nsr_update["detailed-status"] = " ".join(stage)
4244 self._write_op_status(nslcmop_id, stage)
4245 self.logger.debug(logging_text + stage[2])
4246 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4247 self._write_op_status(nslcmop_id, stage)
4248 desc = await self.RO.delete("ns", ro_nsr_id)
4249 ro_delete_action = desc["action_id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01004250 db_nsr_update[
4251 "_admin.deployed.RO.nsr_delete_action_id"
4252 ] = ro_delete_action
tiernoe876f672020-02-13 14:34:48 +00004253 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4254 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4255 if ro_delete_action:
4256 # wait until NS is deleted from VIM
4257 stage[2] = "Waiting ns deleted from VIM."
4258 detailed_status_old = None
garciadeblas5697b8b2021-03-24 09:17:02 +01004259 self.logger.debug(
4260 logging_text
4261 + stage[2]
4262 + " RO_id={} ro_delete_action={}".format(
4263 ro_nsr_id, ro_delete_action
4264 )
4265 )
tiernoe876f672020-02-13 14:34:48 +00004266 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4267 self._write_op_status(nslcmop_id, stage)
kuused124bfe2019-06-18 12:09:24 +02004268
tiernoe876f672020-02-13 14:34:48 +00004269 delete_timeout = 20 * 60 # 20 minutes
4270 while delete_timeout > 0:
4271 desc = await self.RO.show(
4272 "ns",
4273 item_id_name=ro_nsr_id,
4274 extra_item="action",
garciadeblas5697b8b2021-03-24 09:17:02 +01004275 extra_item_id=ro_delete_action,
4276 )
tiernoe876f672020-02-13 14:34:48 +00004277
4278 # deploymentStatus
4279 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4280
4281 ns_status, ns_status_info = self.RO.check_action_status(desc)
4282 if ns_status == "ERROR":
4283 raise ROclient.ROClientException(ns_status_info)
4284 elif ns_status == "BUILD":
4285 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4286 elif ns_status == "ACTIVE":
4287 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4288 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4289 break
4290 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004291 assert (
4292 False
4293 ), "ROclient.check_action_status returns unknown {}".format(
4294 ns_status
4295 )
tiernoe876f672020-02-13 14:34:48 +00004296 if stage[2] != detailed_status_old:
4297 detailed_status_old = stage[2]
4298 db_nsr_update["detailed-status"] = " ".join(stage)
4299 self._write_op_status(nslcmop_id, stage)
4300 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4301 await asyncio.sleep(5, loop=self.loop)
4302 delete_timeout -= 5
4303 else: # delete_timeout <= 0:
garciadeblas5697b8b2021-03-24 09:17:02 +01004304 raise ROclient.ROClientException(
4305 "Timeout waiting ns deleted from VIM"
4306 )
tiernoe876f672020-02-13 14:34:48 +00004307
4308 except Exception as e:
4309 self.update_db_2("nsrs", nsr_id, db_nsr_update)
garciadeblas5697b8b2021-03-24 09:17:02 +01004310 if (
4311 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4312 ): # not found
tiernoe876f672020-02-13 14:34:48 +00004313 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4314 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4315 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
garciadeblas5697b8b2021-03-24 09:17:02 +01004316 self.logger.debug(
4317 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4318 )
4319 elif (
4320 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4321 ): # conflict
tiernoa2143262020-03-27 16:20:40 +00004322 failed_detail.append("delete conflict: {}".format(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01004323 self.logger.debug(
4324 logging_text
4325 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4326 )
tiernoe876f672020-02-13 14:34:48 +00004327 else:
tiernoa2143262020-03-27 16:20:40 +00004328 failed_detail.append("delete error: {}".format(e))
garciadeblas5697b8b2021-03-24 09:17:02 +01004329 self.logger.error(
4330 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4331 )
tiernoe876f672020-02-13 14:34:48 +00004332
4333 # Delete nsd
4334 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4335 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4336 try:
4337 stage[2] = "Deleting nsd from RO."
4338 db_nsr_update["detailed-status"] = " ".join(stage)
4339 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4340 self._write_op_status(nslcmop_id, stage)
4341 await self.RO.delete("nsd", ro_nsd_id)
garciadeblas5697b8b2021-03-24 09:17:02 +01004342 self.logger.debug(
4343 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4344 )
tiernoe876f672020-02-13 14:34:48 +00004345 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4346 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01004347 if (
4348 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4349 ): # not found
tiernoe876f672020-02-13 14:34:48 +00004350 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
garciadeblas5697b8b2021-03-24 09:17:02 +01004351 self.logger.debug(
4352 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4353 )
4354 elif (
4355 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4356 ): # conflict
4357 failed_detail.append(
4358 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4359 )
tiernoe876f672020-02-13 14:34:48 +00004360 self.logger.debug(logging_text + failed_detail[-1])
4361 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004362 failed_detail.append(
4363 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4364 )
tiernoe876f672020-02-13 14:34:48 +00004365 self.logger.error(logging_text + failed_detail[-1])
4366
4367 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4368 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4369 if not vnf_deployed or not vnf_deployed["id"]:
4370 continue
4371 try:
4372 ro_vnfd_id = vnf_deployed["id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01004373 stage[
4374 2
4375 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4376 vnf_deployed["member-vnf-index"], ro_vnfd_id
4377 )
tiernoe876f672020-02-13 14:34:48 +00004378 db_nsr_update["detailed-status"] = " ".join(stage)
4379 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4380 self._write_op_status(nslcmop_id, stage)
4381 await self.RO.delete("vnfd", ro_vnfd_id)
garciadeblas5697b8b2021-03-24 09:17:02 +01004382 self.logger.debug(
4383 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4384 )
tiernoe876f672020-02-13 14:34:48 +00004385 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4386 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01004387 if (
4388 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4389 ): # not found
4390 db_nsr_update[
4391 "_admin.deployed.RO.vnfd.{}.id".format(index)
4392 ] = None
4393 self.logger.debug(
4394 logging_text
4395 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4396 )
4397 elif (
4398 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4399 ): # conflict
4400 failed_detail.append(
4401 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4402 )
tiernoe876f672020-02-13 14:34:48 +00004403 self.logger.debug(logging_text + failed_detail[-1])
4404 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004405 failed_detail.append(
4406 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4407 )
tiernoe876f672020-02-13 14:34:48 +00004408 self.logger.error(logging_text + failed_detail[-1])
4409
tiernoa2143262020-03-27 16:20:40 +00004410 if failed_detail:
4411 stage[2] = "Error deleting from VIM"
4412 else:
4413 stage[2] = "Deleted from VIM"
tiernoe876f672020-02-13 14:34:48 +00004414 db_nsr_update["detailed-status"] = " ".join(stage)
4415 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4416 self._write_op_status(nslcmop_id, stage)
4417
4418 if failed_detail:
tiernoa2143262020-03-27 16:20:40 +00004419 raise LcmException("; ".join(failed_detail))
tiernoe876f672020-02-13 14:34:48 +00004420
4421 async def terminate(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02004422 # Try to lock HA task here
garciadeblas5697b8b2021-03-24 09:17:02 +01004423 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02004424 if not task_is_locked_by_me:
4425 return
4426
tierno59d22d22018-09-25 18:10:19 +02004427 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4428 self.logger.debug(logging_text + "Enter")
tiernoe876f672020-02-13 14:34:48 +00004429 timeout_ns_terminate = self.timeout_ns_terminate
tierno59d22d22018-09-25 18:10:19 +02004430 db_nsr = None
4431 db_nslcmop = None
tiernoa17d4f42020-04-28 09:59:23 +00004432 operation_params = None
tierno59d22d22018-09-25 18:10:19 +02004433 exc = None
garciadeblas5697b8b2021-03-24 09:17:02 +01004434 error_list = [] # annotates all failed error messages
tierno59d22d22018-09-25 18:10:19 +02004435 db_nslcmop_update = {}
tiernoc2564fe2019-01-28 16:18:56 +00004436 autoremove = False # autoremove after terminated
tiernoe876f672020-02-13 14:34:48 +00004437 tasks_dict_info = {}
4438 db_nsr_update = {}
garciadeblas5697b8b2021-03-24 09:17:02 +01004439 stage = [
4440 "Stage 1/3: Preparing task.",
4441 "Waiting for previous operations to terminate.",
4442 "",
4443 ]
tiernoe876f672020-02-13 14:34:48 +00004444 # ^ contains [stage, step, VIM-status]
tierno59d22d22018-09-25 18:10:19 +02004445 try:
kuused124bfe2019-06-18 12:09:24 +02004446 # wait for any previous tasks in process
garciadeblas5697b8b2021-03-24 09:17:02 +01004447 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02004448
tiernoe876f672020-02-13 14:34:48 +00004449 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4450 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4451 operation_params = db_nslcmop.get("operationParams") or {}
4452 if operation_params.get("timeout_ns_terminate"):
4453 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4454 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4455 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4456
4457 db_nsr_update["operational-status"] = "terminating"
4458 db_nsr_update["config-status"] = "terminating"
quilesj4cda56b2019-12-05 10:02:20 +00004459 self._write_ns_status(
4460 nsr_id=nsr_id,
4461 ns_state="TERMINATING",
4462 current_operation="TERMINATING",
tiernoe876f672020-02-13 14:34:48 +00004463 current_operation_id=nslcmop_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01004464 other_update=db_nsr_update,
quilesj4cda56b2019-12-05 10:02:20 +00004465 )
garciadeblas5697b8b2021-03-24 09:17:02 +01004466 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
tiernoe876f672020-02-13 14:34:48 +00004467 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
tierno59d22d22018-09-25 18:10:19 +02004468 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4469 return
tierno59d22d22018-09-25 18:10:19 +02004470
tiernoe876f672020-02-13 14:34:48 +00004471 stage[1] = "Getting vnf descriptors from db."
4472 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
garciadeblas5697b8b2021-03-24 09:17:02 +01004473 db_vnfrs_dict = {
4474 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4475 }
tiernoe876f672020-02-13 14:34:48 +00004476 db_vnfds_from_id = {}
4477 db_vnfds_from_member_index = {}
4478 # Loop over VNFRs
4479 for vnfr in db_vnfrs_list:
4480 vnfd_id = vnfr["vnfd-id"]
4481 if vnfd_id not in db_vnfds_from_id:
4482 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4483 db_vnfds_from_id[vnfd_id] = vnfd
garciadeblas5697b8b2021-03-24 09:17:02 +01004484 db_vnfds_from_member_index[
4485 vnfr["member-vnf-index-ref"]
4486 ] = db_vnfds_from_id[vnfd_id]
calvinosanch9f9c6f22019-11-04 13:37:39 +01004487
tiernoe876f672020-02-13 14:34:48 +00004488 # Destroy individual execution environments when there are terminating primitives.
4489 # Rest of EE will be deleted at once
tierno588547c2020-07-01 15:30:20 +00004490 # TODO - check before calling _destroy_N2VC
4491 # if not operation_params.get("skip_terminate_primitives"):#
4492 # or not vca.get("needed_terminate"):
4493 stage[0] = "Stage 2/3 execute terminating primitives."
4494 self.logger.debug(logging_text + stage[0])
4495 stage[1] = "Looking execution environment that needs terminate."
4496 self.logger.debug(logging_text + stage[1])
bravof922c4172020-11-24 21:21:43 -03004497
tierno588547c2020-07-01 15:30:20 +00004498 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
tierno588547c2020-07-01 15:30:20 +00004499 config_descriptor = None
David Garcia9ad54a42021-05-28 12:08:18 +02004500 vca_member_vnf_index = vca.get("member-vnf-index")
4501 vca_id = self.get_vca_id(
4502 db_vnfrs_dict.get(vca_member_vnf_index)
4503 if vca_member_vnf_index
4504 else None,
4505 db_nsr,
4506 )
tierno588547c2020-07-01 15:30:20 +00004507 if not vca or not vca.get("ee_id"):
4508 continue
4509 if not vca.get("member-vnf-index"):
4510 # ns
4511 config_descriptor = db_nsr.get("ns-configuration")
4512 elif vca.get("vdu_id"):
4513 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
bravofe5a31bc2021-02-17 19:09:12 -03004514 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
tierno588547c2020-07-01 15:30:20 +00004515 elif vca.get("kdu_name"):
4516 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
bravofe5a31bc2021-02-17 19:09:12 -03004517 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
tierno588547c2020-07-01 15:30:20 +00004518 else:
bravofe5a31bc2021-02-17 19:09:12 -03004519 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
aktas13251562021-02-12 22:19:10 +03004520 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
tierno588547c2020-07-01 15:30:20 +00004521 vca_type = vca.get("type")
garciadeblas5697b8b2021-03-24 09:17:02 +01004522 exec_terminate_primitives = not operation_params.get(
4523 "skip_terminate_primitives"
4524 ) and vca.get("needed_terminate")
tiernoaebd7da2020-08-07 06:36:38 +00004525 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4526 # pending native charms
garciadeblas5697b8b2021-03-24 09:17:02 +01004527 destroy_ee = (
4528 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4529 )
tierno86e33612020-09-16 14:13:06 +00004530 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4531 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
tiernob996d942020-07-03 14:52:28 +00004532 task = asyncio.ensure_future(
David Garciac1fe90a2021-03-31 19:12:02 +02004533 self.destroy_N2VC(
4534 logging_text,
4535 db_nslcmop,
4536 vca,
4537 config_descriptor,
4538 vca_index,
4539 destroy_ee,
4540 exec_terminate_primitives,
4541 vca_id=vca_id,
4542 )
4543 )
tierno588547c2020-07-01 15:30:20 +00004544 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
tierno59d22d22018-09-25 18:10:19 +02004545
tierno588547c2020-07-01 15:30:20 +00004546 # wait for pending tasks of terminate primitives
4547 if tasks_dict_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01004548 self.logger.debug(
4549 logging_text
4550 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4551 )
4552 error_list = await self._wait_for_tasks(
4553 logging_text,
4554 tasks_dict_info,
4555 min(self.timeout_charm_delete, timeout_ns_terminate),
4556 stage,
4557 nslcmop_id,
4558 )
tierno86e33612020-09-16 14:13:06 +00004559 tasks_dict_info.clear()
tierno588547c2020-07-01 15:30:20 +00004560 if error_list:
garciadeblas5697b8b2021-03-24 09:17:02 +01004561 return # raise LcmException("; ".join(error_list))
tierno82974b22018-11-27 21:55:36 +00004562
tiernoe876f672020-02-13 14:34:48 +00004563 # remove All execution environments at once
4564 stage[0] = "Stage 3/3 delete all."
quilesj3655ae02019-12-12 16:08:35 +00004565
tierno49676be2020-04-07 16:34:35 +00004566 if nsr_deployed.get("VCA"):
4567 stage[1] = "Deleting all execution environments."
4568 self.logger.debug(logging_text + stage[1])
David Garciac1fe90a2021-03-31 19:12:02 +02004569 vca_id = self.get_vca_id({}, db_nsr)
4570 task_delete_ee = asyncio.ensure_future(
4571 asyncio.wait_for(
4572 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
garciadeblas5697b8b2021-03-24 09:17:02 +01004573 timeout=self.timeout_charm_delete,
David Garciac1fe90a2021-03-31 19:12:02 +02004574 )
4575 )
tierno49676be2020-04-07 16:34:35 +00004576 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4577 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
tierno59d22d22018-09-25 18:10:19 +02004578
tiernoe876f672020-02-13 14:34:48 +00004579 # Delete from k8scluster
4580 stage[1] = "Deleting KDUs."
4581 self.logger.debug(logging_text + stage[1])
4582 # print(nsr_deployed)
4583 for kdu in get_iterable(nsr_deployed, "K8s"):
4584 if not kdu or not kdu.get("kdu-instance"):
4585 continue
4586 kdu_instance = kdu.get("kdu-instance")
tiernoa2143262020-03-27 16:20:40 +00004587 if kdu.get("k8scluster-type") in self.k8scluster_map:
David Garciac1fe90a2021-03-31 19:12:02 +02004588 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4589 vca_id = self.get_vca_id({}, db_nsr)
tiernoe876f672020-02-13 14:34:48 +00004590 task_delete_kdu_instance = asyncio.ensure_future(
tiernoa2143262020-03-27 16:20:40 +00004591 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4592 cluster_uuid=kdu.get("k8scluster-uuid"),
David Garciac1fe90a2021-03-31 19:12:02 +02004593 kdu_instance=kdu_instance,
4594 vca_id=vca_id,
Pedro Escaleirae1ea2672022-04-22 00:46:14 +01004595 namespace=kdu.get("namespace"),
David Garciac1fe90a2021-03-31 19:12:02 +02004596 )
4597 )
tiernoe876f672020-02-13 14:34:48 +00004598 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004599 self.logger.error(
4600 logging_text
4601 + "Unknown k8s deployment type {}".format(
4602 kdu.get("k8scluster-type")
4603 )
4604 )
tiernoe876f672020-02-13 14:34:48 +00004605 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01004606 tasks_dict_info[
4607 task_delete_kdu_instance
4608 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
tierno59d22d22018-09-25 18:10:19 +02004609
4610 # remove from RO
tiernoe876f672020-02-13 14:34:48 +00004611 stage[1] = "Deleting ns from VIM."
tierno69f0d382020-05-07 13:08:09 +00004612 if self.ng_ro:
4613 task_delete_ro = asyncio.ensure_future(
garciadeblas5697b8b2021-03-24 09:17:02 +01004614 self._terminate_ng_ro(
4615 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4616 )
4617 )
tierno69f0d382020-05-07 13:08:09 +00004618 else:
4619 task_delete_ro = asyncio.ensure_future(
garciadeblas5697b8b2021-03-24 09:17:02 +01004620 self._terminate_RO(
4621 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4622 )
4623 )
tiernoe876f672020-02-13 14:34:48 +00004624 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
tierno59d22d22018-09-25 18:10:19 +02004625
tiernoe876f672020-02-13 14:34:48 +00004626 # rest of staff will be done at finally
4627
garciadeblas5697b8b2021-03-24 09:17:02 +01004628 except (
4629 ROclient.ROClientException,
4630 DbException,
4631 LcmException,
4632 N2VCException,
4633 ) as e:
tiernoe876f672020-02-13 14:34:48 +00004634 self.logger.error(logging_text + "Exit Exception {}".format(e))
4635 exc = e
4636 except asyncio.CancelledError:
garciadeblas5697b8b2021-03-24 09:17:02 +01004637 self.logger.error(
4638 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4639 )
tiernoe876f672020-02-13 14:34:48 +00004640 exc = "Operation was cancelled"
4641 except Exception as e:
4642 exc = traceback.format_exc()
garciadeblas5697b8b2021-03-24 09:17:02 +01004643 self.logger.critical(
4644 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4645 exc_info=True,
4646 )
tiernoe876f672020-02-13 14:34:48 +00004647 finally:
4648 if exc:
4649 error_list.append(str(exc))
tierno59d22d22018-09-25 18:10:19 +02004650 try:
tiernoe876f672020-02-13 14:34:48 +00004651 # wait for pending tasks
4652 if tasks_dict_info:
4653 stage[1] = "Waiting for terminate pending tasks."
4654 self.logger.debug(logging_text + stage[1])
garciadeblas5697b8b2021-03-24 09:17:02 +01004655 error_list += await self._wait_for_tasks(
4656 logging_text,
4657 tasks_dict_info,
4658 timeout_ns_terminate,
4659 stage,
4660 nslcmop_id,
4661 )
tiernoe876f672020-02-13 14:34:48 +00004662 stage[1] = stage[2] = ""
4663 except asyncio.CancelledError:
4664 error_list.append("Cancelled")
4665 # TODO cancell all tasks
4666 except Exception as exc:
4667 error_list.append(str(exc))
4668 # update status at database
4669 if error_list:
4670 error_detail = "; ".join(error_list)
4671 # self.logger.error(logging_text + error_detail)
garciadeblas5697b8b2021-03-24 09:17:02 +01004672 error_description_nslcmop = "{} Detail: {}".format(
4673 stage[0], error_detail
4674 )
4675 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4676 nslcmop_id, stage[0]
4677 )
tierno59d22d22018-09-25 18:10:19 +02004678
tierno59d22d22018-09-25 18:10:19 +02004679 db_nsr_update["operational-status"] = "failed"
garciadeblas5697b8b2021-03-24 09:17:02 +01004680 db_nsr_update["detailed-status"] = (
4681 error_description_nsr + " Detail: " + error_detail
4682 )
tiernoe876f672020-02-13 14:34:48 +00004683 db_nslcmop_update["detailed-status"] = error_detail
4684 nslcmop_operation_state = "FAILED"
4685 ns_state = "BROKEN"
tierno59d22d22018-09-25 18:10:19 +02004686 else:
tiernoa2143262020-03-27 16:20:40 +00004687 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00004688 error_description_nsr = error_description_nslcmop = None
4689 ns_state = "NOT_INSTANTIATED"
tierno59d22d22018-09-25 18:10:19 +02004690 db_nsr_update["operational-status"] = "terminated"
4691 db_nsr_update["detailed-status"] = "Done"
4692 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4693 db_nslcmop_update["detailed-status"] = "Done"
tiernoe876f672020-02-13 14:34:48 +00004694 nslcmop_operation_state = "COMPLETED"
tierno59d22d22018-09-25 18:10:19 +02004695
tiernoe876f672020-02-13 14:34:48 +00004696 if db_nsr:
4697 self._write_ns_status(
4698 nsr_id=nsr_id,
4699 ns_state=ns_state,
4700 current_operation="IDLE",
4701 current_operation_id=None,
4702 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00004703 error_detail=error_detail,
garciadeblas5697b8b2021-03-24 09:17:02 +01004704 other_update=db_nsr_update,
tiernoe876f672020-02-13 14:34:48 +00004705 )
tiernoa17d4f42020-04-28 09:59:23 +00004706 self._write_op_status(
4707 op_id=nslcmop_id,
4708 stage="",
4709 error_message=error_description_nslcmop,
4710 operation_state=nslcmop_operation_state,
4711 other_update=db_nslcmop_update,
4712 )
lloretgalleg6d488782020-07-22 10:13:46 +00004713 if ns_state == "NOT_INSTANTIATED":
4714 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01004715 self.db.set_list(
4716 "vnfrs",
4717 {"nsr-id-ref": nsr_id},
4718 {"_admin.nsState": "NOT_INSTANTIATED"},
4719 )
lloretgalleg6d488782020-07-22 10:13:46 +00004720 except DbException as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01004721 self.logger.warn(
4722 logging_text
4723 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4724 nsr_id, e
4725 )
4726 )
tiernoa17d4f42020-04-28 09:59:23 +00004727 if operation_params:
tiernoe876f672020-02-13 14:34:48 +00004728 autoremove = operation_params.get("autoremove", False)
tierno59d22d22018-09-25 18:10:19 +02004729 if nslcmop_operation_state:
4730 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01004731 await self.msg.aiowrite(
4732 "ns",
4733 "terminated",
4734 {
4735 "nsr_id": nsr_id,
4736 "nslcmop_id": nslcmop_id,
4737 "operationState": nslcmop_operation_state,
4738 "autoremove": autoremove,
4739 },
4740 loop=self.loop,
4741 )
tierno59d22d22018-09-25 18:10:19 +02004742 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01004743 self.logger.error(
4744 logging_text + "kafka_write notification Exception {}".format(e)
4745 )
quilesj7e13aeb2019-10-08 13:34:55 +02004746
tierno59d22d22018-09-25 18:10:19 +02004747 self.logger.debug(logging_text + "Exit")
4748 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4749
garciadeblas5697b8b2021-03-24 09:17:02 +01004750 async def _wait_for_tasks(
4751 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4752 ):
tiernoe876f672020-02-13 14:34:48 +00004753 time_start = time()
tiernoa2143262020-03-27 16:20:40 +00004754 error_detail_list = []
tiernoe876f672020-02-13 14:34:48 +00004755 error_list = []
4756 pending_tasks = list(created_tasks_info.keys())
4757 num_tasks = len(pending_tasks)
4758 num_done = 0
4759 stage[1] = "{}/{}.".format(num_done, num_tasks)
4760 self._write_op_status(nslcmop_id, stage)
tiernoe876f672020-02-13 14:34:48 +00004761 while pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00004762 new_error = None
tiernoe876f672020-02-13 14:34:48 +00004763 _timeout = timeout + time_start - time()
garciadeblas5697b8b2021-03-24 09:17:02 +01004764 done, pending_tasks = await asyncio.wait(
4765 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4766 )
tiernoe876f672020-02-13 14:34:48 +00004767 num_done += len(done)
garciadeblas5697b8b2021-03-24 09:17:02 +01004768 if not done: # Timeout
tiernoe876f672020-02-13 14:34:48 +00004769 for task in pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00004770 new_error = created_tasks_info[task] + ": Timeout"
4771 error_detail_list.append(new_error)
4772 error_list.append(new_error)
tiernoe876f672020-02-13 14:34:48 +00004773 break
4774 for task in done:
4775 if task.cancelled():
tierno067e04a2020-03-31 12:53:13 +00004776 exc = "Cancelled"
tiernoe876f672020-02-13 14:34:48 +00004777 else:
4778 exc = task.exception()
tierno067e04a2020-03-31 12:53:13 +00004779 if exc:
4780 if isinstance(exc, asyncio.TimeoutError):
4781 exc = "Timeout"
4782 new_error = created_tasks_info[task] + ": {}".format(exc)
4783 error_list.append(created_tasks_info[task])
4784 error_detail_list.append(new_error)
garciadeblas5697b8b2021-03-24 09:17:02 +01004785 if isinstance(
4786 exc,
4787 (
4788 str,
4789 DbException,
4790 N2VCException,
4791 ROclient.ROClientException,
4792 LcmException,
4793 K8sException,
4794 NgRoException,
4795 ),
4796 ):
tierno067e04a2020-03-31 12:53:13 +00004797 self.logger.error(logging_text + new_error)
tiernoe876f672020-02-13 14:34:48 +00004798 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004799 exc_traceback = "".join(
4800 traceback.format_exception(None, exc, exc.__traceback__)
4801 )
4802 self.logger.error(
4803 logging_text
4804 + created_tasks_info[task]
4805 + " "
4806 + exc_traceback
4807 )
tierno067e04a2020-03-31 12:53:13 +00004808 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004809 self.logger.debug(
4810 logging_text + created_tasks_info[task] + ": Done"
4811 )
tiernoe876f672020-02-13 14:34:48 +00004812 stage[1] = "{}/{}.".format(num_done, num_tasks)
4813 if new_error:
tiernoa2143262020-03-27 16:20:40 +00004814 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
tiernoe876f672020-02-13 14:34:48 +00004815 if nsr_id: # update also nsr
garciadeblas5697b8b2021-03-24 09:17:02 +01004816 self.update_db_2(
4817 "nsrs",
4818 nsr_id,
4819 {
4820 "errorDescription": "Error at: " + ", ".join(error_list),
4821 "errorDetail": ". ".join(error_detail_list),
4822 },
4823 )
tiernoe876f672020-02-13 14:34:48 +00004824 self._write_op_status(nslcmop_id, stage)
tiernoa2143262020-03-27 16:20:40 +00004825 return error_detail_list
tiernoe876f672020-02-13 14:34:48 +00004826
tiernoda1ff8c2020-10-22 14:12:46 +00004827 @staticmethod
4828 def _map_primitive_params(primitive_desc, params, instantiation_params):
tiernoda964822019-01-14 15:53:47 +00004829 """
4830 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4831 The default-value is used. If it is between < > it look for a value at instantiation_params
4832 :param primitive_desc: portion of VNFD/NSD that describes primitive
4833 :param params: Params provided by user
4834 :param instantiation_params: Instantiation params provided by user
4835 :return: a dictionary with the calculated params
4836 """
4837 calculated_params = {}
4838 for parameter in primitive_desc.get("parameter", ()):
4839 param_name = parameter["name"]
4840 if param_name in params:
4841 calculated_params[param_name] = params[param_name]
tierno98ad6ea2019-05-30 17:16:28 +00004842 elif "default-value" in parameter or "value" in parameter:
4843 if "value" in parameter:
4844 calculated_params[param_name] = parameter["value"]
4845 else:
4846 calculated_params[param_name] = parameter["default-value"]
garciadeblas5697b8b2021-03-24 09:17:02 +01004847 if (
4848 isinstance(calculated_params[param_name], str)
4849 and calculated_params[param_name].startswith("<")
4850 and calculated_params[param_name].endswith(">")
4851 ):
tierno98ad6ea2019-05-30 17:16:28 +00004852 if calculated_params[param_name][1:-1] in instantiation_params:
garciadeblas5697b8b2021-03-24 09:17:02 +01004853 calculated_params[param_name] = instantiation_params[
4854 calculated_params[param_name][1:-1]
4855 ]
tiernoda964822019-01-14 15:53:47 +00004856 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004857 raise LcmException(
4858 "Parameter {} needed to execute primitive {} not provided".format(
4859 calculated_params[param_name], primitive_desc["name"]
4860 )
4861 )
tiernoda964822019-01-14 15:53:47 +00004862 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01004863 raise LcmException(
4864 "Parameter {} needed to execute primitive {} not provided".format(
4865 param_name, primitive_desc["name"]
4866 )
4867 )
tierno59d22d22018-09-25 18:10:19 +02004868
tiernoda964822019-01-14 15:53:47 +00004869 if isinstance(calculated_params[param_name], (dict, list, tuple)):
garciadeblas5697b8b2021-03-24 09:17:02 +01004870 calculated_params[param_name] = yaml.safe_dump(
4871 calculated_params[param_name], default_flow_style=True, width=256
4872 )
4873 elif isinstance(calculated_params[param_name], str) and calculated_params[
4874 param_name
4875 ].startswith("!!yaml "):
tiernoda964822019-01-14 15:53:47 +00004876 calculated_params[param_name] = calculated_params[param_name][7:]
tiernofa40e692020-10-14 14:59:36 +00004877 if parameter.get("data-type") == "INTEGER":
4878 try:
4879 calculated_params[param_name] = int(calculated_params[param_name])
4880 except ValueError: # error converting string to int
4881 raise LcmException(
garciadeblas5697b8b2021-03-24 09:17:02 +01004882 "Parameter {} of primitive {} must be integer".format(
4883 param_name, primitive_desc["name"]
4884 )
4885 )
tiernofa40e692020-10-14 14:59:36 +00004886 elif parameter.get("data-type") == "BOOLEAN":
garciadeblas5697b8b2021-03-24 09:17:02 +01004887 calculated_params[param_name] = not (
4888 (str(calculated_params[param_name])).lower() == "false"
4889 )
tiernoc3f2a822019-11-05 13:45:04 +00004890
4891 # add always ns_config_info if primitive name is config
4892 if primitive_desc["name"] == "config":
4893 if "ns_config_info" in instantiation_params:
garciadeblas5697b8b2021-03-24 09:17:02 +01004894 calculated_params["ns_config_info"] = instantiation_params[
4895 "ns_config_info"
4896 ]
tiernoda964822019-01-14 15:53:47 +00004897 return calculated_params
4898
garciadeblas5697b8b2021-03-24 09:17:02 +01004899 def _look_for_deployed_vca(
4900 self,
4901 deployed_vca,
4902 member_vnf_index,
4903 vdu_id,
4904 vdu_count_index,
4905 kdu_name=None,
4906 ee_descriptor_id=None,
4907 ):
tiernoe876f672020-02-13 14:34:48 +00004908 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4909 for vca in deployed_vca:
4910 if not vca:
4911 continue
4912 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4913 continue
garciadeblas5697b8b2021-03-24 09:17:02 +01004914 if (
4915 vdu_count_index is not None
4916 and vdu_count_index != vca["vdu_count_index"]
4917 ):
tiernoe876f672020-02-13 14:34:48 +00004918 continue
4919 if kdu_name and kdu_name != vca["kdu_name"]:
4920 continue
tiernoa278b842020-07-08 15:33:55 +00004921 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4922 continue
tiernoe876f672020-02-13 14:34:48 +00004923 break
4924 else:
4925 # vca_deployed not found
garciadeblas5697b8b2021-03-24 09:17:02 +01004926 raise LcmException(
4927 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4928 " is not deployed".format(
4929 member_vnf_index,
4930 vdu_id,
4931 vdu_count_index,
4932 kdu_name,
4933 ee_descriptor_id,
4934 )
4935 )
tiernoe876f672020-02-13 14:34:48 +00004936 # get ee_id
4937 ee_id = vca.get("ee_id")
garciadeblas5697b8b2021-03-24 09:17:02 +01004938 vca_type = vca.get(
4939 "type", "lxc_proxy_charm"
4940 ) # default value for backward compatibility - proxy charm
tiernoe876f672020-02-13 14:34:48 +00004941 if not ee_id:
garciadeblas5697b8b2021-03-24 09:17:02 +01004942 raise LcmException(
4943 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4944 "execution environment".format(
4945 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4946 )
4947 )
tierno588547c2020-07-01 15:30:20 +00004948 return ee_id, vca_type
tiernoe876f672020-02-13 14:34:48 +00004949
David Garciac1fe90a2021-03-31 19:12:02 +02004950 async def _ns_execute_primitive(
4951 self,
4952 ee_id,
4953 primitive,
4954 primitive_params,
4955 retries=0,
4956 retries_interval=30,
4957 timeout=None,
4958 vca_type=None,
4959 db_dict=None,
4960 vca_id: str = None,
4961 ) -> (str, str):
tiernoda964822019-01-14 15:53:47 +00004962 try:
tierno98ad6ea2019-05-30 17:16:28 +00004963 if primitive == "config":
4964 primitive_params = {"params": primitive_params}
tierno2fc7ce52019-06-11 22:50:01 +00004965
tierno588547c2020-07-01 15:30:20 +00004966 vca_type = vca_type or "lxc_proxy_charm"
4967
quilesj7e13aeb2019-10-08 13:34:55 +02004968 while retries >= 0:
4969 try:
tierno067e04a2020-03-31 12:53:13 +00004970 output = await asyncio.wait_for(
tierno588547c2020-07-01 15:30:20 +00004971 self.vca_map[vca_type].exec_primitive(
tierno067e04a2020-03-31 12:53:13 +00004972 ee_id=ee_id,
4973 primitive_name=primitive,
4974 params_dict=primitive_params,
4975 progress_timeout=self.timeout_progress_primitive,
tierno588547c2020-07-01 15:30:20 +00004976 total_timeout=self.timeout_primitive,
David Garciac1fe90a2021-03-31 19:12:02 +02004977 db_dict=db_dict,
4978 vca_id=vca_id,
aktas98488ed2021-07-29 17:42:49 +03004979 vca_type=vca_type,
David Garciac1fe90a2021-03-31 19:12:02 +02004980 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01004981 timeout=timeout or self.timeout_primitive,
4982 )
quilesj7e13aeb2019-10-08 13:34:55 +02004983 # execution was OK
4984 break
tierno067e04a2020-03-31 12:53:13 +00004985 except asyncio.CancelledError:
4986 raise
4987 except Exception as e: # asyncio.TimeoutError
4988 if isinstance(e, asyncio.TimeoutError):
4989 e = "Timeout"
quilesj7e13aeb2019-10-08 13:34:55 +02004990 retries -= 1
4991 if retries >= 0:
garciadeblas5697b8b2021-03-24 09:17:02 +01004992 self.logger.debug(
4993 "Error executing action {} on {} -> {}".format(
4994 primitive, ee_id, e
4995 )
4996 )
quilesj7e13aeb2019-10-08 13:34:55 +02004997 # wait and retry
4998 await asyncio.sleep(retries_interval, loop=self.loop)
tierno73d8bd02019-11-18 17:33:27 +00004999 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005000 return "FAILED", str(e)
quilesj7e13aeb2019-10-08 13:34:55 +02005001
garciadeblas5697b8b2021-03-24 09:17:02 +01005002 return "COMPLETED", output
quilesj7e13aeb2019-10-08 13:34:55 +02005003
tierno067e04a2020-03-31 12:53:13 +00005004 except (LcmException, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00005005 raise
quilesj7e13aeb2019-10-08 13:34:55 +02005006 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01005007 return "FAIL", "Error executing action {}: {}".format(primitive, e)
tierno59d22d22018-09-25 18:10:19 +02005008
ksaikiranr3fde2c72021-03-15 10:39:06 +05305009 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5010 """
5011 Updating the vca_status with latest juju information in nsrs record
5012 :param: nsr_id: Id of the nsr
5013 :param: nslcmop_id: Id of the nslcmop
5014 :return: None
5015 """
5016
5017 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5018 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
David Garciac1fe90a2021-03-31 19:12:02 +02005019 vca_id = self.get_vca_id({}, db_nsr)
garciadeblas5697b8b2021-03-24 09:17:02 +01005020 if db_nsr["_admin"]["deployed"]["K8s"]:
Pedro Escaleira75b620d2022-04-01 01:49:22 +01005021 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5022 cluster_uuid, kdu_instance, cluster_type = (
5023 k8s["k8scluster-uuid"],
5024 k8s["kdu-instance"],
5025 k8s["k8scluster-type"],
5026 )
garciadeblas5697b8b2021-03-24 09:17:02 +01005027 await self._on_update_k8s_db(
Pedro Escaleira75b620d2022-04-01 01:49:22 +01005028 cluster_uuid=cluster_uuid,
5029 kdu_instance=kdu_instance,
5030 filter={"_id": nsr_id},
5031 vca_id=vca_id,
5032 cluster_type=cluster_type,
garciadeblas5697b8b2021-03-24 09:17:02 +01005033 )
ksaikiranr656b6dd2021-02-19 10:25:18 +05305034 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005035 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
ksaikiranr656b6dd2021-02-19 10:25:18 +05305036 table, filter = "nsrs", {"_id": nsr_id}
5037 path = "_admin.deployed.VCA.{}.".format(vca_index)
5038 await self._on_update_n2vc_db(table, filter, path, {})
ksaikiranr3fde2c72021-03-15 10:39:06 +05305039
5040 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5041 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5042
tierno59d22d22018-09-25 18:10:19 +02005043 async def action(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02005044 # Try to lock HA task here
garciadeblas5697b8b2021-03-24 09:17:02 +01005045 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02005046 if not task_is_locked_by_me:
5047 return
5048
tierno59d22d22018-09-25 18:10:19 +02005049 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5050 self.logger.debug(logging_text + "Enter")
5051 # get all needed from database
5052 db_nsr = None
5053 db_nslcmop = None
tiernoe876f672020-02-13 14:34:48 +00005054 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02005055 db_nslcmop_update = {}
5056 nslcmop_operation_state = None
tierno067e04a2020-03-31 12:53:13 +00005057 error_description_nslcmop = None
tierno59d22d22018-09-25 18:10:19 +02005058 exc = None
5059 try:
kuused124bfe2019-06-18 12:09:24 +02005060 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00005061 step = "Waiting for previous operations to terminate"
garciadeblas5697b8b2021-03-24 09:17:02 +01005062 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02005063
quilesj4cda56b2019-12-05 10:02:20 +00005064 self._write_ns_status(
5065 nsr_id=nsr_id,
5066 ns_state=None,
5067 current_operation="RUNNING ACTION",
garciadeblas5697b8b2021-03-24 09:17:02 +01005068 current_operation_id=nslcmop_id,
quilesj4cda56b2019-12-05 10:02:20 +00005069 )
5070
tierno59d22d22018-09-25 18:10:19 +02005071 step = "Getting information from database"
5072 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5073 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
Guillermo Calvino57c68152022-01-26 17:40:31 +01005074 if db_nslcmop["operationParams"].get("primitive_params"):
5075 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5076 db_nslcmop["operationParams"]["primitive_params"]
5077 )
tiernoda964822019-01-14 15:53:47 +00005078
tiernoe4f7e6c2018-11-27 14:55:30 +00005079 nsr_deployed = db_nsr["_admin"].get("deployed")
tierno1b633412019-02-25 16:48:23 +00005080 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tierno59d22d22018-09-25 18:10:19 +02005081 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
calvinosanch9f9c6f22019-11-04 13:37:39 +01005082 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
tiernoe4f7e6c2018-11-27 14:55:30 +00005083 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
tierno067e04a2020-03-31 12:53:13 +00005084 primitive = db_nslcmop["operationParams"]["primitive"]
5085 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
garciadeblas5697b8b2021-03-24 09:17:02 +01005086 timeout_ns_action = db_nslcmop["operationParams"].get(
5087 "timeout_ns_action", self.timeout_primitive
5088 )
tierno59d22d22018-09-25 18:10:19 +02005089
tierno1b633412019-02-25 16:48:23 +00005090 if vnf_index:
5091 step = "Getting vnfr from database"
garciadeblas5697b8b2021-03-24 09:17:02 +01005092 db_vnfr = self.db.get_one(
5093 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5094 )
Guillermo Calvino48aee4c2022-02-01 18:59:50 +01005095 if db_vnfr.get("kdur"):
5096 kdur_list = []
5097 for kdur in db_vnfr["kdur"]:
5098 if kdur.get("additionalParams"):
Pedro Escaleirab9a7c4d2022-03-31 00:08:05 +01005099 kdur["additionalParams"] = json.loads(
5100 kdur["additionalParams"]
5101 )
Guillermo Calvino48aee4c2022-02-01 18:59:50 +01005102 kdur_list.append(kdur)
5103 db_vnfr["kdur"] = kdur_list
tierno1b633412019-02-25 16:48:23 +00005104 step = "Getting vnfd from database"
5105 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
bravofa96dd9c2021-10-13 17:37:36 -03005106
5107 # Sync filesystem before running a primitive
5108 self.fs.sync(db_vnfr["vnfd-id"])
tierno1b633412019-02-25 16:48:23 +00005109 else:
tierno067e04a2020-03-31 12:53:13 +00005110 step = "Getting nsd from database"
5111 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
tiernoda964822019-01-14 15:53:47 +00005112
David Garciac1fe90a2021-03-31 19:12:02 +02005113 vca_id = self.get_vca_id(db_vnfr, db_nsr)
tierno82974b22018-11-27 21:55:36 +00005114 # for backward compatibility
5115 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5116 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5117 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5118 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5119
tiernoda964822019-01-14 15:53:47 +00005120 # look for primitive
tiernoa278b842020-07-08 15:33:55 +00005121 config_primitive_desc = descriptor_configuration = None
tiernoda964822019-01-14 15:53:47 +00005122 if vdu_id:
bravofe5a31bc2021-02-17 19:09:12 -03005123 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
calvinosanch9f9c6f22019-11-04 13:37:39 +01005124 elif kdu_name:
bravofe5a31bc2021-02-17 19:09:12 -03005125 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
tierno1b633412019-02-25 16:48:23 +00005126 elif vnf_index:
bravofe5a31bc2021-02-17 19:09:12 -03005127 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
tierno1b633412019-02-25 16:48:23 +00005128 else:
tiernoa278b842020-07-08 15:33:55 +00005129 descriptor_configuration = db_nsd.get("ns-configuration")
5130
garciadeblas5697b8b2021-03-24 09:17:02 +01005131 if descriptor_configuration and descriptor_configuration.get(
5132 "config-primitive"
5133 ):
tiernoa278b842020-07-08 15:33:55 +00005134 for config_primitive in descriptor_configuration["config-primitive"]:
tierno1b633412019-02-25 16:48:23 +00005135 if config_primitive["name"] == primitive:
5136 config_primitive_desc = config_primitive
5137 break
tiernoda964822019-01-14 15:53:47 +00005138
garciadeblas6bed6b32020-07-20 11:05:42 +00005139 if not config_primitive_desc:
5140 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
garciadeblas5697b8b2021-03-24 09:17:02 +01005141 raise LcmException(
5142 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5143 primitive
5144 )
5145 )
garciadeblas6bed6b32020-07-20 11:05:42 +00005146 primitive_name = primitive
5147 ee_descriptor_id = None
5148 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005149 primitive_name = config_primitive_desc.get(
5150 "execution-environment-primitive", primitive
5151 )
5152 ee_descriptor_id = config_primitive_desc.get(
5153 "execution-environment-ref"
5154 )
tierno1b633412019-02-25 16:48:23 +00005155
tierno1b633412019-02-25 16:48:23 +00005156 if vnf_index:
tierno626e0152019-11-29 14:16:16 +00005157 if vdu_id:
garciadeblas5697b8b2021-03-24 09:17:02 +01005158 vdur = next(
5159 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5160 )
bravof922c4172020-11-24 21:21:43 -03005161 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
tierno067e04a2020-03-31 12:53:13 +00005162 elif kdu_name:
garciadeblas5697b8b2021-03-24 09:17:02 +01005163 kdur = next(
5164 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5165 )
bravof922c4172020-11-24 21:21:43 -03005166 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
tierno067e04a2020-03-31 12:53:13 +00005167 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005168 desc_params = parse_yaml_strings(
5169 db_vnfr.get("additionalParamsForVnf")
5170 )
tierno1b633412019-02-25 16:48:23 +00005171 else:
bravof922c4172020-11-24 21:21:43 -03005172 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
bravofe5a31bc2021-02-17 19:09:12 -03005173 if kdu_name and get_configuration(db_vnfd, kdu_name):
5174 kdu_configuration = get_configuration(db_vnfd, kdu_name)
David Garciad41dbd62020-12-10 12:52:52 +01005175 actions = set()
David Garciaa1003662021-02-16 21:07:58 +01005176 for primitive in kdu_configuration.get("initial-config-primitive", []):
David Garciad41dbd62020-12-10 12:52:52 +01005177 actions.add(primitive["name"])
David Garciaa1003662021-02-16 21:07:58 +01005178 for primitive in kdu_configuration.get("config-primitive", []):
David Garciad41dbd62020-12-10 12:52:52 +01005179 actions.add(primitive["name"])
David Garciaae230232022-05-10 14:07:12 +02005180 kdu = find_in_list(
5181 nsr_deployed["K8s"],
5182 lambda kdu: kdu_name == kdu["kdu-name"]
5183 and kdu["member-vnf-index"] == vnf_index,
5184 )
5185 kdu_action = (
5186 True
5187 if primitive_name in actions
5188 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5189 else False
5190 )
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02005191
tiernoda964822019-01-14 15:53:47 +00005192 # TODO check if ns is in a proper status
garciadeblas5697b8b2021-03-24 09:17:02 +01005193 if kdu_name and (
5194 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5195 ):
tierno067e04a2020-03-31 12:53:13 +00005196 # kdur and desc_params already set from before
5197 if primitive_params:
5198 desc_params.update(primitive_params)
5199 # TODO Check if we will need something at vnf level
5200 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
garciadeblas5697b8b2021-03-24 09:17:02 +01005201 if (
5202 kdu_name == kdu["kdu-name"]
5203 and kdu["member-vnf-index"] == vnf_index
5204 ):
tierno067e04a2020-03-31 12:53:13 +00005205 break
5206 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005207 raise LcmException(
5208 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5209 )
quilesj7e13aeb2019-10-08 13:34:55 +02005210
tierno067e04a2020-03-31 12:53:13 +00005211 if kdu.get("k8scluster-type") not in self.k8scluster_map:
garciadeblas5697b8b2021-03-24 09:17:02 +01005212 msg = "unknown k8scluster-type '{}'".format(
5213 kdu.get("k8scluster-type")
5214 )
tierno067e04a2020-03-31 12:53:13 +00005215 raise LcmException(msg)
5216
garciadeblas5697b8b2021-03-24 09:17:02 +01005217 db_dict = {
5218 "collection": "nsrs",
5219 "filter": {"_id": nsr_id},
5220 "path": "_admin.deployed.K8s.{}".format(index),
5221 }
5222 self.logger.debug(
5223 logging_text
5224 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5225 )
tiernoa278b842020-07-08 15:33:55 +00005226 step = "Executing kdu {}".format(primitive_name)
5227 if primitive_name == "upgrade":
tierno067e04a2020-03-31 12:53:13 +00005228 if desc_params.get("kdu_model"):
5229 kdu_model = desc_params.get("kdu_model")
5230 del desc_params["kdu_model"]
5231 else:
5232 kdu_model = kdu.get("kdu-model")
5233 parts = kdu_model.split(sep=":")
5234 if len(parts) == 2:
5235 kdu_model = parts[0]
5236
5237 detailed_status = await asyncio.wait_for(
5238 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5239 cluster_uuid=kdu.get("k8scluster-uuid"),
5240 kdu_instance=kdu.get("kdu-instance"),
garciadeblas5697b8b2021-03-24 09:17:02 +01005241 atomic=True,
5242 kdu_model=kdu_model,
5243 params=desc_params,
5244 db_dict=db_dict,
5245 timeout=timeout_ns_action,
5246 ),
5247 timeout=timeout_ns_action + 10,
5248 )
5249 self.logger.debug(
5250 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5251 )
tiernoa278b842020-07-08 15:33:55 +00005252 elif primitive_name == "rollback":
tierno067e04a2020-03-31 12:53:13 +00005253 detailed_status = await asyncio.wait_for(
5254 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5255 cluster_uuid=kdu.get("k8scluster-uuid"),
5256 kdu_instance=kdu.get("kdu-instance"),
garciadeblas5697b8b2021-03-24 09:17:02 +01005257 db_dict=db_dict,
5258 ),
5259 timeout=timeout_ns_action,
5260 )
tiernoa278b842020-07-08 15:33:55 +00005261 elif primitive_name == "status":
tierno067e04a2020-03-31 12:53:13 +00005262 detailed_status = await asyncio.wait_for(
5263 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5264 cluster_uuid=kdu.get("k8scluster-uuid"),
David Garciac1fe90a2021-03-31 19:12:02 +02005265 kdu_instance=kdu.get("kdu-instance"),
5266 vca_id=vca_id,
5267 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01005268 timeout=timeout_ns_action,
David Garciac1fe90a2021-03-31 19:12:02 +02005269 )
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02005270 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005271 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5272 kdu["kdu-name"], nsr_id
5273 )
5274 params = self._map_primitive_params(
5275 config_primitive_desc, primitive_params, desc_params
5276 )
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02005277
5278 detailed_status = await asyncio.wait_for(
5279 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5280 cluster_uuid=kdu.get("k8scluster-uuid"),
5281 kdu_instance=kdu_instance,
tiernoa278b842020-07-08 15:33:55 +00005282 primitive_name=primitive_name,
garciadeblas5697b8b2021-03-24 09:17:02 +01005283 params=params,
5284 db_dict=db_dict,
David Garciac1fe90a2021-03-31 19:12:02 +02005285 timeout=timeout_ns_action,
5286 vca_id=vca_id,
5287 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01005288 timeout=timeout_ns_action,
David Garciac1fe90a2021-03-31 19:12:02 +02005289 )
tierno067e04a2020-03-31 12:53:13 +00005290
5291 if detailed_status:
garciadeblas5697b8b2021-03-24 09:17:02 +01005292 nslcmop_operation_state = "COMPLETED"
tierno067e04a2020-03-31 12:53:13 +00005293 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005294 detailed_status = ""
5295 nslcmop_operation_state = "FAILED"
tierno067e04a2020-03-31 12:53:13 +00005296 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01005297 ee_id, vca_type = self._look_for_deployed_vca(
5298 nsr_deployed["VCA"],
5299 member_vnf_index=vnf_index,
5300 vdu_id=vdu_id,
5301 vdu_count_index=vdu_count_index,
5302 ee_descriptor_id=ee_descriptor_id,
5303 )
5304 for vca_index, vca_deployed in enumerate(
5305 db_nsr["_admin"]["deployed"]["VCA"]
5306 ):
ksaikiranrb1c9f372021-03-15 11:07:29 +05305307 if vca_deployed.get("member-vnf-index") == vnf_index:
garciadeblas5697b8b2021-03-24 09:17:02 +01005308 db_dict = {
5309 "collection": "nsrs",
5310 "filter": {"_id": nsr_id},
5311 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5312 }
ksaikiranrb1c9f372021-03-15 11:07:29 +05305313 break
garciadeblas5697b8b2021-03-24 09:17:02 +01005314 (
5315 nslcmop_operation_state,
5316 detailed_status,
5317 ) = await self._ns_execute_primitive(
tierno588547c2020-07-01 15:30:20 +00005318 ee_id,
tiernoa278b842020-07-08 15:33:55 +00005319 primitive=primitive_name,
garciadeblas5697b8b2021-03-24 09:17:02 +01005320 primitive_params=self._map_primitive_params(
5321 config_primitive_desc, primitive_params, desc_params
5322 ),
tierno588547c2020-07-01 15:30:20 +00005323 timeout=timeout_ns_action,
5324 vca_type=vca_type,
David Garciac1fe90a2021-03-31 19:12:02 +02005325 db_dict=db_dict,
5326 vca_id=vca_id,
5327 )
tierno067e04a2020-03-31 12:53:13 +00005328
5329 db_nslcmop_update["detailed-status"] = detailed_status
garciadeblas5697b8b2021-03-24 09:17:02 +01005330 error_description_nslcmop = (
5331 detailed_status if nslcmop_operation_state == "FAILED" else ""
5332 )
5333 self.logger.debug(
5334 logging_text
5335 + " task Done with result {} {}".format(
5336 nslcmop_operation_state, detailed_status
5337 )
5338 )
tierno59d22d22018-09-25 18:10:19 +02005339 return # database update is called inside finally
5340
tiernof59ad6c2020-04-08 12:50:52 +00005341 except (DbException, LcmException, N2VCException, K8sException) as e:
tierno59d22d22018-09-25 18:10:19 +02005342 self.logger.error(logging_text + "Exit Exception {}".format(e))
5343 exc = e
5344 except asyncio.CancelledError:
garciadeblas5697b8b2021-03-24 09:17:02 +01005345 self.logger.error(
5346 logging_text + "Cancelled Exception while '{}'".format(step)
5347 )
tierno59d22d22018-09-25 18:10:19 +02005348 exc = "Operation was cancelled"
tierno067e04a2020-03-31 12:53:13 +00005349 except asyncio.TimeoutError:
5350 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5351 exc = "Timeout"
tierno59d22d22018-09-25 18:10:19 +02005352 except Exception as e:
5353 exc = traceback.format_exc()
garciadeblas5697b8b2021-03-24 09:17:02 +01005354 self.logger.critical(
5355 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5356 exc_info=True,
5357 )
tierno59d22d22018-09-25 18:10:19 +02005358 finally:
tierno067e04a2020-03-31 12:53:13 +00005359 if exc:
garciadeblas5697b8b2021-03-24 09:17:02 +01005360 db_nslcmop_update[
5361 "detailed-status"
5362 ] = (
5363 detailed_status
5364 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
tierno067e04a2020-03-31 12:53:13 +00005365 nslcmop_operation_state = "FAILED"
5366 if db_nsr:
5367 self._write_ns_status(
5368 nsr_id=nsr_id,
garciadeblas5697b8b2021-03-24 09:17:02 +01005369 ns_state=db_nsr[
5370 "nsState"
5371 ], # TODO check if degraded. For the moment use previous status
tierno067e04a2020-03-31 12:53:13 +00005372 current_operation="IDLE",
5373 current_operation_id=None,
5374 # error_description=error_description_nsr,
5375 # error_detail=error_detail,
garciadeblas5697b8b2021-03-24 09:17:02 +01005376 other_update=db_nsr_update,
tierno067e04a2020-03-31 12:53:13 +00005377 )
5378
garciadeblas5697b8b2021-03-24 09:17:02 +01005379 self._write_op_status(
5380 op_id=nslcmop_id,
5381 stage="",
5382 error_message=error_description_nslcmop,
5383 operation_state=nslcmop_operation_state,
5384 other_update=db_nslcmop_update,
5385 )
tierno067e04a2020-03-31 12:53:13 +00005386
tierno59d22d22018-09-25 18:10:19 +02005387 if nslcmop_operation_state:
5388 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01005389 await self.msg.aiowrite(
5390 "ns",
5391 "actioned",
5392 {
5393 "nsr_id": nsr_id,
5394 "nslcmop_id": nslcmop_id,
5395 "operationState": nslcmop_operation_state,
5396 },
5397 loop=self.loop,
5398 )
tierno59d22d22018-09-25 18:10:19 +02005399 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01005400 self.logger.error(
5401 logging_text + "kafka_write notification Exception {}".format(e)
5402 )
tierno59d22d22018-09-25 18:10:19 +02005403 self.logger.debug(logging_text + "Exit")
5404 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
tierno067e04a2020-03-31 12:53:13 +00005405 return nslcmop_operation_state, detailed_status
tierno59d22d22018-09-25 18:10:19 +02005406
elumalaica7ece02022-04-12 12:47:32 +05305407 async def terminate_vdus(
5408 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5409 ):
5410 """This method terminates VDUs
5411
5412 Args:
5413 db_vnfr: VNF instance record
5414 member_vnf_index: VNF index to identify the VDUs to be removed
5415 db_nsr: NS instance record
5416 update_db_nslcmops: Nslcmop update record
5417 """
5418 vca_scaling_info = []
5419 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5420 scaling_info["scaling_direction"] = "IN"
5421 scaling_info["vdu-delete"] = {}
5422 scaling_info["kdu-delete"] = {}
5423 db_vdur = db_vnfr.get("vdur")
5424 vdur_list = copy(db_vdur)
5425 count_index = 0
5426 for index, vdu in enumerate(vdur_list):
5427 vca_scaling_info.append(
5428 {
5429 "osm_vdu_id": vdu["vdu-id-ref"],
5430 "member-vnf-index": member_vnf_index,
5431 "type": "delete",
5432 "vdu_index": count_index,
5433 })
5434 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5435 scaling_info["vdu"].append(
5436 {
5437 "name": vdu.get("name") or vdu.get("vdu-name"),
5438 "vdu_id": vdu["vdu-id-ref"],
5439 "interface": [],
5440 })
5441 for interface in vdu["interfaces"]:
5442 scaling_info["vdu"][index]["interface"].append(
5443 {
5444 "name": interface["name"],
5445 "ip_address": interface["ip-address"],
5446 "mac_address": interface.get("mac-address"),
5447 })
5448 self.logger.info("NS update scaling info{}".format(scaling_info))
5449 stage[2] = "Terminating VDUs"
5450 if scaling_info.get("vdu-delete"):
5451 # scale_process = "RO"
5452 if self.ro_config.get("ng"):
5453 await self._scale_ng_ro(
5454 logging_text, db_nsr, update_db_nslcmops, db_vnfr, scaling_info, stage
5455 )
5456
5457 async def remove_vnf(
5458 self, nsr_id, nslcmop_id, vnf_instance_id
5459 ):
5460 """This method is to Remove VNF instances from NS.
5461
5462 Args:
5463 nsr_id: NS instance id
5464 nslcmop_id: nslcmop id of update
5465 vnf_instance_id: id of the VNF instance to be removed
5466
5467 Returns:
5468 result: (str, str) COMPLETED/FAILED, details
5469 """
5470 try:
5471 db_nsr_update = {}
5472 logging_text = "Task ns={} update ".format(nsr_id)
5473 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5474 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5475 if check_vnfr_count > 1:
5476 stage = ["", "", ""]
5477 step = "Getting nslcmop from database"
5478 self.logger.debug(step + " after having waited for previous tasks to be completed")
5479 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5480 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5481 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5482 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5483 """ db_vnfr = self.db.get_one(
5484 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5485
5486 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5487 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5488
5489 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5490 constituent_vnfr.remove(db_vnfr.get("_id"))
5491 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get("constituent-vnfr-ref")
5492 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5493 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5494 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5495 return "COMPLETED", "Done"
5496 else:
5497 step = "Terminate VNF Failed with"
5498 raise LcmException("{} Cannot terminate the last VNF in this NS.".format(
5499 vnf_instance_id))
5500 except (LcmException, asyncio.CancelledError):
5501 raise
5502 except Exception as e:
5503 self.logger.debug("Error removing VNF {}".format(e))
5504 return "FAILED", "Error removing VNF {}".format(e)
5505
elumalaib9e357c2022-04-27 09:58:38 +05305506 async def _ns_redeploy_vnf(
5507 self, nsr_id, nslcmop_id, db_vnfd, db_vnfr, db_nsr,
5508 ):
5509 """This method updates and redeploys VNF instances
5510
5511 Args:
5512 nsr_id: NS instance id
5513 nslcmop_id: nslcmop id
5514 db_vnfd: VNF descriptor
5515 db_vnfr: VNF instance record
5516 db_nsr: NS instance record
5517
5518 Returns:
5519 result: (str, str) COMPLETED/FAILED, details
5520 """
5521 try:
5522 count_index = 0
5523 stage = ["", "", ""]
5524 logging_text = "Task ns={} update ".format(nsr_id)
5525 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5526 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5527
5528 # Terminate old VNF resources
5529 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5530 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5531
5532 # old_vnfd_id = db_vnfr["vnfd-id"]
5533 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5534 new_db_vnfd = db_vnfd
5535 # new_vnfd_ref = new_db_vnfd["id"]
5536 # new_vnfd_id = vnfd_id
5537
5538 # Create VDUR
5539 new_vnfr_cp = []
5540 for cp in new_db_vnfd.get("ext-cpd", ()):
5541 vnf_cp = {
5542 "name": cp.get("id"),
5543 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5544 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5545 "id": cp.get("id"),
5546 }
5547 new_vnfr_cp.append(vnf_cp)
5548 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5549 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5550 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5551 new_vnfr_update = {"revision": latest_vnfd_revision, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5552 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5553 updated_db_vnfr = self.db.get_one(
5554 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}
5555 )
5556
5557 # Instantiate new VNF resources
5558 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5559 vca_scaling_info = []
5560 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5561 scaling_info["scaling_direction"] = "OUT"
5562 scaling_info["vdu-create"] = {}
5563 scaling_info["kdu-create"] = {}
5564 vdud_instantiate_list = db_vnfd["vdu"]
5565 for index, vdud in enumerate(vdud_instantiate_list):
5566 cloud_init_text = self._get_vdu_cloud_init_content(
5567 vdud, db_vnfd
5568 )
5569 if cloud_init_text:
5570 additional_params = (
5571 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5572 or {}
5573 )
5574 cloud_init_list = []
5575 if cloud_init_text:
5576 # TODO Information of its own ip is not available because db_vnfr is not updated.
5577 additional_params["OSM"] = get_osm_params(
5578 updated_db_vnfr, vdud["id"], 1
5579 )
5580 cloud_init_list.append(
5581 self._parse_cloud_init(
5582 cloud_init_text,
5583 additional_params,
5584 db_vnfd["id"],
5585 vdud["id"],
5586 )
5587 )
5588 vca_scaling_info.append(
5589 {
5590 "osm_vdu_id": vdud["id"],
5591 "member-vnf-index": member_vnf_index,
5592 "type": "create",
5593 "vdu_index": count_index,
5594 }
5595 )
5596 scaling_info["vdu-create"][vdud["id"]] = count_index
5597 if self.ro_config.get("ng"):
5598 self.logger.debug(
5599 "New Resources to be deployed: {}".format(scaling_info))
5600 await self._scale_ng_ro(
5601 logging_text, db_nsr, update_db_nslcmops, updated_db_vnfr, scaling_info, stage
5602 )
5603 return "COMPLETED", "Done"
5604 except (LcmException, asyncio.CancelledError):
5605 raise
5606 except Exception as e:
5607 self.logger.debug("Error updating VNF {}".format(e))
5608 return "FAILED", "Error updating VNF {}".format(e)
5609
aticigdffa6212022-04-12 15:27:53 +03005610 async def _ns_charm_upgrade(
5611 self,
5612 ee_id,
5613 charm_id,
5614 charm_type,
5615 path,
5616 timeout: float = None,
5617 ) -> (str, str):
5618 """This method upgrade charms in VNF instances
5619
5620 Args:
5621 ee_id: Execution environment id
5622 path: Local path to the charm
5623 charm_id: charm-id
5624 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5625 timeout: (Float) Timeout for the ns update operation
5626
5627 Returns:
5628 result: (str, str) COMPLETED/FAILED, details
5629 """
5630 try:
5631 charm_type = charm_type or "lxc_proxy_charm"
5632 output = await self.vca_map[charm_type].upgrade_charm(
5633 ee_id=ee_id,
5634 path=path,
5635 charm_id=charm_id,
5636 charm_type=charm_type,
5637 timeout=timeout or self.timeout_ns_update,
5638 )
5639
5640 if output:
5641 return "COMPLETED", output
5642
5643 except (LcmException, asyncio.CancelledError):
5644 raise
5645
5646 except Exception as e:
5647
5648 self.logger.debug("Error upgrading charm {}".format(path))
5649
5650 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5651
5652 async def update(self, nsr_id, nslcmop_id):
5653 """Update NS according to different update types
5654
5655 This method performs upgrade of VNF instances then updates the revision
5656 number in VNF record
5657
5658 Args:
5659 nsr_id: Network service will be updated
5660 nslcmop_id: ns lcm operation id
5661
5662 Returns:
5663 It may raise DbException, LcmException, N2VCException, K8sException
5664
5665 """
5666 # Try to lock HA task here
5667 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5668 if not task_is_locked_by_me:
5669 return
5670
5671 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5672 self.logger.debug(logging_text + "Enter")
5673
5674 # Set the required variables to be filled up later
5675 db_nsr = None
5676 db_nslcmop_update = {}
5677 vnfr_update = {}
5678 nslcmop_operation_state = None
5679 db_nsr_update = {}
5680 error_description_nslcmop = ""
5681 exc = None
elumalaica7ece02022-04-12 12:47:32 +05305682 change_type = "updated"
aticigdffa6212022-04-12 15:27:53 +03005683 detailed_status = ""
5684
5685 try:
5686 # wait for any previous tasks in process
5687 step = "Waiting for previous operations to terminate"
5688 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5689 self._write_ns_status(
5690 nsr_id=nsr_id,
5691 ns_state=None,
5692 current_operation="UPDATING",
5693 current_operation_id=nslcmop_id,
5694 )
5695
5696 step = "Getting nslcmop from database"
5697 db_nslcmop = self.db.get_one(
5698 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5699 )
5700 update_type = db_nslcmop["operationParams"]["updateType"]
5701
5702 step = "Getting nsr from database"
5703 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5704 old_operational_status = db_nsr["operational-status"]
5705 db_nsr_update["operational-status"] = "updating"
5706 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5707 nsr_deployed = db_nsr["_admin"].get("deployed")
5708
5709 if update_type == "CHANGE_VNFPKG":
5710
5711 # Get the input parameters given through update request
5712 vnf_instance_id = db_nslcmop["operationParams"][
5713 "changeVnfPackageData"
5714 ].get("vnfInstanceId")
5715
5716 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5717 "vnfdId"
5718 )
5719 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5720
5721 step = "Getting vnfr from database"
5722 db_vnfr = self.db.get_one(
5723 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5724 )
5725
5726 step = "Getting vnfds from database"
5727 # Latest VNFD
5728 latest_vnfd = self.db.get_one(
5729 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5730 )
5731 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5732
5733 # Current VNFD
5734 current_vnf_revision = db_vnfr.get("revision", 1)
5735 current_vnfd = self.db.get_one(
5736 "vnfds_revisions",
5737 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5738 fail_on_empty=False,
5739 )
5740 # Charm artifact paths will be filled up later
5741 (
5742 current_charm_artifact_path,
5743 target_charm_artifact_path,
5744 charm_artifact_paths,
5745 ) = ([], [], [])
5746
5747 step = "Checking if revision has changed in VNFD"
5748 if current_vnf_revision != latest_vnfd_revision:
5749
elumalaib9e357c2022-04-27 09:58:38 +05305750 change_type = "policy_updated"
5751
aticigdffa6212022-04-12 15:27:53 +03005752 # There is new revision of VNFD, update operation is required
5753 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
aticigd7083542022-05-30 20:45:55 +03005754 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
aticigdffa6212022-04-12 15:27:53 +03005755
5756 step = "Removing the VNFD packages if they exist in the local path"
5757 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5758 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5759
5760 step = "Get the VNFD packages from FSMongo"
5761 self.fs.sync(from_path=latest_vnfd_path)
5762 self.fs.sync(from_path=current_vnfd_path)
5763
5764 step = (
5765 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5766 )
5767 base_folder = latest_vnfd["_admin"]["storage"]
5768
5769 for charm_index, charm_deployed in enumerate(
5770 get_iterable(nsr_deployed, "VCA")
5771 ):
5772 vnf_index = db_vnfr.get("member-vnf-index-ref")
5773
5774 # Getting charm-id and charm-type
5775 if charm_deployed.get("member-vnf-index") == vnf_index:
5776 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5777 charm_type = charm_deployed.get("type")
5778
5779 # Getting ee-id
5780 ee_id = charm_deployed.get("ee_id")
5781
5782 step = "Getting descriptor config"
5783 descriptor_config = get_configuration(
5784 current_vnfd, current_vnfd["id"]
5785 )
5786
5787 if "execution-environment-list" in descriptor_config:
5788 ee_list = descriptor_config.get(
5789 "execution-environment-list", []
5790 )
5791 else:
5792 ee_list = []
5793
5794 # There could be several charm used in the same VNF
5795 for ee_item in ee_list:
5796 if ee_item.get("juju"):
5797
5798 step = "Getting charm name"
5799 charm_name = ee_item["juju"].get("charm")
5800
5801 step = "Setting Charm artifact paths"
5802 current_charm_artifact_path.append(
5803 get_charm_artifact_path(
5804 base_folder,
5805 charm_name,
5806 charm_type,
5807 current_vnf_revision,
5808 )
5809 )
5810 target_charm_artifact_path.append(
5811 get_charm_artifact_path(
5812 base_folder,
5813 charm_name,
5814 charm_type,
aticigd7083542022-05-30 20:45:55 +03005815 latest_vnfd_revision,
aticigdffa6212022-04-12 15:27:53 +03005816 )
5817 )
5818
5819 charm_artifact_paths = zip(
5820 current_charm_artifact_path, target_charm_artifact_path
5821 )
5822
5823 step = "Checking if software version has changed in VNFD"
5824 if find_software_version(current_vnfd) != find_software_version(
5825 latest_vnfd
5826 ):
5827
5828 step = "Checking if existing VNF has charm"
5829 for current_charm_path, target_charm_path in list(
5830 charm_artifact_paths
5831 ):
5832 if current_charm_path:
5833 raise LcmException(
5834 "Software version change is not supported as VNF instance {} has charm.".format(
5835 vnf_instance_id
5836 )
5837 )
5838
5839 # There is no change in the charm package, then redeploy the VNF
5840 # based on new descriptor
5841 step = "Redeploying VNF"
elumalaib9e357c2022-04-27 09:58:38 +05305842 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5843 (
5844 result,
5845 detailed_status
5846 ) = await self._ns_redeploy_vnf(
5847 nsr_id,
5848 nslcmop_id,
5849 latest_vnfd,
5850 db_vnfr,
5851 db_nsr
5852 )
5853 if result == "FAILED":
5854 nslcmop_operation_state = result
5855 error_description_nslcmop = detailed_status
5856 db_nslcmop_update["detailed-status"] = detailed_status
5857 self.logger.debug(
5858 logging_text
5859 + " step {} Done with result {} {}".format(
5860 step, nslcmop_operation_state, detailed_status
5861 )
5862 )
aticigdffa6212022-04-12 15:27:53 +03005863
5864 else:
5865 step = "Checking if any charm package has changed or not"
5866 for current_charm_path, target_charm_path in list(
5867 charm_artifact_paths
5868 ):
5869 if (
5870 current_charm_path
5871 and target_charm_path
5872 and self.check_charm_hash_changed(
5873 current_charm_path, target_charm_path
5874 )
5875 ):
5876
5877 step = "Checking whether VNF uses juju bundle"
5878 if check_juju_bundle_existence(current_vnfd):
5879
5880 raise LcmException(
5881 "Charm upgrade is not supported for the instance which"
5882 " uses juju-bundle: {}".format(
5883 check_juju_bundle_existence(current_vnfd)
5884 )
5885 )
5886
5887 step = "Upgrading Charm"
5888 (
5889 result,
5890 detailed_status,
5891 ) = await self._ns_charm_upgrade(
5892 ee_id=ee_id,
5893 charm_id=charm_id,
5894 charm_type=charm_type,
5895 path=self.fs.path + target_charm_path,
5896 timeout=timeout_seconds,
5897 )
5898
5899 if result == "FAILED":
5900 nslcmop_operation_state = result
5901 error_description_nslcmop = detailed_status
5902
5903 db_nslcmop_update["detailed-status"] = detailed_status
5904 self.logger.debug(
5905 logging_text
5906 + " step {} Done with result {} {}".format(
5907 step, nslcmop_operation_state, detailed_status
5908 )
5909 )
5910
5911 step = "Updating policies"
elumalaib9e357c2022-04-27 09:58:38 +05305912 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5913 result = "COMPLETED"
5914 detailed_status = "Done"
5915 db_nslcmop_update["detailed-status"] = "Done"
aticigdffa6212022-04-12 15:27:53 +03005916
5917 # If nslcmop_operation_state is None, so any operation is not failed.
5918 if not nslcmop_operation_state:
5919 nslcmop_operation_state = "COMPLETED"
5920
5921 # If update CHANGE_VNFPKG nslcmop_operation is successful
5922 # vnf revision need to be updated
5923 vnfr_update["revision"] = latest_vnfd_revision
5924 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5925
5926 self.logger.debug(
5927 logging_text
5928 + " task Done with result {} {}".format(
5929 nslcmop_operation_state, detailed_status
5930 )
5931 )
5932 elif update_type == "REMOVE_VNF":
5933 # This part is included in https://osm.etsi.org/gerrit/11876
elumalaica7ece02022-04-12 12:47:32 +05305934 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5935 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5936 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5937 step = "Removing VNF"
5938 (result, detailed_status) = await self.remove_vnf(nsr_id, nslcmop_id, vnf_instance_id)
5939 if result == "FAILED":
5940 nslcmop_operation_state = result
5941 error_description_nslcmop = detailed_status
5942 db_nslcmop_update["detailed-status"] = detailed_status
5943 change_type = "vnf_terminated"
5944 if not nslcmop_operation_state:
5945 nslcmop_operation_state = "COMPLETED"
5946 self.logger.debug(
5947 logging_text
5948 + " task Done with result {} {}".format(
5949 nslcmop_operation_state, detailed_status
5950 )
5951 )
aticigdffa6212022-04-12 15:27:53 +03005952
k4.rahulb827de92022-05-02 16:35:02 +00005953 elif update_type == "OPERATE_VNF":
5954 vnf_id = db_nslcmop["operationParams"]["operateVnfData"]["vnfInstanceId"]
5955 operation_type = db_nslcmop["operationParams"]["operateVnfData"]["changeStateTo"]
5956 additional_param = db_nslcmop["operationParams"]["operateVnfData"]["additionalParam"]
5957 (result, detailed_status) = await self.rebuild_start_stop(
5958 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
5959 )
5960 if result == "FAILED":
5961 nslcmop_operation_state = result
5962 error_description_nslcmop = detailed_status
5963 db_nslcmop_update["detailed-status"] = detailed_status
5964 if not nslcmop_operation_state:
5965 nslcmop_operation_state = "COMPLETED"
5966 self.logger.debug(
5967 logging_text
5968 + " task Done with result {} {}".format(
5969 nslcmop_operation_state, detailed_status
5970 )
5971 )
5972
aticigdffa6212022-04-12 15:27:53 +03005973 # If nslcmop_operation_state is None, so any operation is not failed.
5974 # All operations are executed in overall.
5975 if not nslcmop_operation_state:
5976 nslcmop_operation_state = "COMPLETED"
5977 db_nsr_update["operational-status"] = old_operational_status
5978
5979 except (DbException, LcmException, N2VCException, K8sException) as e:
5980 self.logger.error(logging_text + "Exit Exception {}".format(e))
5981 exc = e
5982 except asyncio.CancelledError:
5983 self.logger.error(
5984 logging_text + "Cancelled Exception while '{}'".format(step)
5985 )
5986 exc = "Operation was cancelled"
5987 except asyncio.TimeoutError:
5988 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5989 exc = "Timeout"
5990 except Exception as e:
5991 exc = traceback.format_exc()
5992 self.logger.critical(
5993 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5994 exc_info=True,
5995 )
5996 finally:
5997 if exc:
5998 db_nslcmop_update[
5999 "detailed-status"
6000 ] = (
6001 detailed_status
6002 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6003 nslcmop_operation_state = "FAILED"
6004 db_nsr_update["operational-status"] = old_operational_status
6005 if db_nsr:
6006 self._write_ns_status(
6007 nsr_id=nsr_id,
6008 ns_state=db_nsr["nsState"],
6009 current_operation="IDLE",
6010 current_operation_id=None,
6011 other_update=db_nsr_update,
6012 )
6013
6014 self._write_op_status(
6015 op_id=nslcmop_id,
6016 stage="",
6017 error_message=error_description_nslcmop,
6018 operation_state=nslcmop_operation_state,
6019 other_update=db_nslcmop_update,
6020 )
6021
6022 if nslcmop_operation_state:
6023 try:
elumalaica7ece02022-04-12 12:47:32 +05306024 msg = {
elumalaib9e357c2022-04-27 09:58:38 +05306025 "nsr_id": nsr_id,
6026 "nslcmop_id": nslcmop_id,
6027 "operationState": nslcmop_operation_state,
6028 }
6029 if change_type in ("vnf_terminated", "policy_updated"):
elumalaica7ece02022-04-12 12:47:32 +05306030 msg.update({"vnf_member_index": member_vnf_index})
6031 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
aticigdffa6212022-04-12 15:27:53 +03006032 except Exception as e:
6033 self.logger.error(
6034 logging_text + "kafka_write notification Exception {}".format(e)
6035 )
6036 self.logger.debug(logging_text + "Exit")
6037 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6038 return nslcmop_operation_state, detailed_status
6039
tierno59d22d22018-09-25 18:10:19 +02006040 async def scale(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02006041 # Try to lock HA task here
garciadeblas5697b8b2021-03-24 09:17:02 +01006042 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
kuused124bfe2019-06-18 12:09:24 +02006043 if not task_is_locked_by_me:
6044 return
6045
tierno59d22d22018-09-25 18:10:19 +02006046 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
garciadeblas5697b8b2021-03-24 09:17:02 +01006047 stage = ["", "", ""]
aktas13251562021-02-12 22:19:10 +03006048 tasks_dict_info = {}
tierno2357f4e2020-10-19 16:38:59 +00006049 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02006050 self.logger.debug(logging_text + "Enter")
6051 # get all needed from database
6052 db_nsr = None
tierno59d22d22018-09-25 18:10:19 +02006053 db_nslcmop_update = {}
tiernoe876f672020-02-13 14:34:48 +00006054 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02006055 exc = None
tierno9ab95942018-10-10 16:44:22 +02006056 # in case of error, indicates what part of scale was failed to put nsr at error status
6057 scale_process = None
tiernod6de1992018-10-11 13:05:52 +02006058 old_operational_status = ""
6059 old_config_status = ""
aktas13251562021-02-12 22:19:10 +03006060 nsi_id = None
tierno59d22d22018-09-25 18:10:19 +02006061 try:
kuused124bfe2019-06-18 12:09:24 +02006062 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00006063 step = "Waiting for previous operations to terminate"
garciadeblas5697b8b2021-03-24 09:17:02 +01006064 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6065 self._write_ns_status(
6066 nsr_id=nsr_id,
6067 ns_state=None,
6068 current_operation="SCALING",
6069 current_operation_id=nslcmop_id,
6070 )
quilesj4cda56b2019-12-05 10:02:20 +00006071
ikalyvas02d9e7b2019-05-27 18:16:01 +03006072 step = "Getting nslcmop from database"
garciadeblas5697b8b2021-03-24 09:17:02 +01006073 self.logger.debug(
6074 step + " after having waited for previous tasks to be completed"
6075 )
ikalyvas02d9e7b2019-05-27 18:16:01 +03006076 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
bravof922c4172020-11-24 21:21:43 -03006077
ikalyvas02d9e7b2019-05-27 18:16:01 +03006078 step = "Getting nsr from database"
6079 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
ikalyvas02d9e7b2019-05-27 18:16:01 +03006080 old_operational_status = db_nsr["operational-status"]
6081 old_config_status = db_nsr["config-status"]
bravof922c4172020-11-24 21:21:43 -03006082
tierno59d22d22018-09-25 18:10:19 +02006083 step = "Parsing scaling parameters"
6084 db_nsr_update["operational-status"] = "scaling"
6085 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoe4f7e6c2018-11-27 14:55:30 +00006086 nsr_deployed = db_nsr["_admin"].get("deployed")
calvinosanch9f9c6f22019-11-04 13:37:39 +01006087
garciadeblas5697b8b2021-03-24 09:17:02 +01006088 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6089 "scaleByStepData"
6090 ]["member-vnf-index"]
6091 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6092 "scaleByStepData"
6093 ]["scaling-group-descriptor"]
tierno59d22d22018-09-25 18:10:19 +02006094 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
tierno82974b22018-11-27 21:55:36 +00006095 # for backward compatibility
6096 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6097 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6098 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6099 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6100
tierno59d22d22018-09-25 18:10:19 +02006101 step = "Getting vnfr from database"
garciadeblas5697b8b2021-03-24 09:17:02 +01006102 db_vnfr = self.db.get_one(
6103 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6104 )
bravof922c4172020-11-24 21:21:43 -03006105
David Garciac1fe90a2021-03-31 19:12:02 +02006106 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6107
tierno59d22d22018-09-25 18:10:19 +02006108 step = "Getting vnfd from database"
6109 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
ikalyvas02d9e7b2019-05-27 18:16:01 +03006110
aktas13251562021-02-12 22:19:10 +03006111 base_folder = db_vnfd["_admin"]["storage"]
6112
tierno59d22d22018-09-25 18:10:19 +02006113 step = "Getting scaling-group-descriptor"
bravof832f8992020-12-07 12:57:31 -03006114 scaling_descriptor = find_in_list(
garciadeblas5697b8b2021-03-24 09:17:02 +01006115 get_scaling_aspect(db_vnfd),
6116 lambda scale_desc: scale_desc["name"] == scaling_group,
bravof832f8992020-12-07 12:57:31 -03006117 )
6118 if not scaling_descriptor:
garciadeblas5697b8b2021-03-24 09:17:02 +01006119 raise LcmException(
6120 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6121 "at vnfd:scaling-group-descriptor".format(scaling_group)
6122 )
ikalyvas02d9e7b2019-05-27 18:16:01 +03006123
tierno15b1cf12019-08-29 13:21:40 +00006124 step = "Sending scale order to VIM"
bravof922c4172020-11-24 21:21:43 -03006125 # TODO check if ns is in a proper status
tierno59d22d22018-09-25 18:10:19 +02006126 nb_scale_op = 0
6127 if not db_nsr["_admin"].get("scaling-group"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006128 self.update_db_2(
6129 "nsrs",
6130 nsr_id,
6131 {
6132 "_admin.scaling-group": [
6133 {"name": scaling_group, "nb-scale-op": 0}
6134 ]
6135 },
6136 )
tierno59d22d22018-09-25 18:10:19 +02006137 admin_scale_index = 0
6138 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01006139 for admin_scale_index, admin_scale_info in enumerate(
6140 db_nsr["_admin"]["scaling-group"]
6141 ):
tierno59d22d22018-09-25 18:10:19 +02006142 if admin_scale_info["name"] == scaling_group:
6143 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6144 break
tierno9ab95942018-10-10 16:44:22 +02006145 else: # not found, set index one plus last element and add new entry with the name
6146 admin_scale_index += 1
garciadeblas5697b8b2021-03-24 09:17:02 +01006147 db_nsr_update[
6148 "_admin.scaling-group.{}.name".format(admin_scale_index)
6149 ] = scaling_group
aktas5f75f102021-03-15 11:26:10 +03006150
6151 vca_scaling_info = []
6152 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
tierno59d22d22018-09-25 18:10:19 +02006153 if scaling_type == "SCALE_OUT":
bravof832f8992020-12-07 12:57:31 -03006154 if "aspect-delta-details" not in scaling_descriptor:
6155 raise LcmException(
6156 "Aspect delta details not fount in scaling descriptor {}".format(
6157 scaling_descriptor["name"]
6158 )
6159 )
tierno59d22d22018-09-25 18:10:19 +02006160 # count if max-instance-count is reached
bravof832f8992020-12-07 12:57:31 -03006161 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
kuuse8b998e42019-07-30 15:22:16 +02006162
aktas5f75f102021-03-15 11:26:10 +03006163 scaling_info["scaling_direction"] = "OUT"
6164 scaling_info["vdu-create"] = {}
6165 scaling_info["kdu-create"] = {}
bravof832f8992020-12-07 12:57:31 -03006166 for delta in deltas:
aktas5f75f102021-03-15 11:26:10 +03006167 for vdu_delta in delta.get("vdu-delta", {}):
bravof832f8992020-12-07 12:57:31 -03006168 vdud = get_vdu(db_vnfd, vdu_delta["id"])
aktas5f75f102021-03-15 11:26:10 +03006169 # vdu_index also provides the number of instance of the targeted vdu
6170 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
garciadeblas5697b8b2021-03-24 09:17:02 +01006171 cloud_init_text = self._get_vdu_cloud_init_content(
6172 vdud, db_vnfd
6173 )
tierno72ef84f2020-10-06 08:22:07 +00006174 if cloud_init_text:
garciadeblas5697b8b2021-03-24 09:17:02 +01006175 additional_params = (
6176 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6177 or {}
6178 )
bravof832f8992020-12-07 12:57:31 -03006179 cloud_init_list = []
6180
6181 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6182 max_instance_count = 10
6183 if vdu_profile and "max-number-of-instances" in vdu_profile:
garciadeblas5697b8b2021-03-24 09:17:02 +01006184 max_instance_count = vdu_profile.get(
6185 "max-number-of-instances", 10
6186 )
6187
6188 default_instance_num = get_number_of_instances(
6189 db_vnfd, vdud["id"]
6190 )
aktas5f75f102021-03-15 11:26:10 +03006191 instances_number = vdu_delta.get("number-of-instances", 1)
6192 nb_scale_op += instances_number
bravof832f8992020-12-07 12:57:31 -03006193
aktas5f75f102021-03-15 11:26:10 +03006194 new_instance_count = nb_scale_op + default_instance_num
6195 # Control if new count is over max and vdu count is less than max.
6196 # Then assign new instance count
6197 if new_instance_count > max_instance_count > vdu_count:
6198 instances_number = new_instance_count - max_instance_count
6199 else:
6200 instances_number = instances_number
bravof832f8992020-12-07 12:57:31 -03006201
aktas5f75f102021-03-15 11:26:10 +03006202 if new_instance_count > max_instance_count:
bravof832f8992020-12-07 12:57:31 -03006203 raise LcmException(
6204 "reached the limit of {} (max-instance-count) "
6205 "scaling-out operations for the "
garciadeblas5697b8b2021-03-24 09:17:02 +01006206 "scaling-group-descriptor '{}'".format(
6207 nb_scale_op, scaling_group
6208 )
bravof922c4172020-11-24 21:21:43 -03006209 )
bravof832f8992020-12-07 12:57:31 -03006210 for x in range(vdu_delta.get("number-of-instances", 1)):
6211 if cloud_init_text:
6212 # TODO Information of its own ip is not available because db_vnfr is not updated.
6213 additional_params["OSM"] = get_osm_params(
garciadeblas5697b8b2021-03-24 09:17:02 +01006214 db_vnfr, vdu_delta["id"], vdu_index + x
bravof922c4172020-11-24 21:21:43 -03006215 )
bravof832f8992020-12-07 12:57:31 -03006216 cloud_init_list.append(
6217 self._parse_cloud_init(
6218 cloud_init_text,
6219 additional_params,
6220 db_vnfd["id"],
garciadeblas5697b8b2021-03-24 09:17:02 +01006221 vdud["id"],
bravof832f8992020-12-07 12:57:31 -03006222 )
6223 )
aktas5f75f102021-03-15 11:26:10 +03006224 vca_scaling_info.append(
aktas13251562021-02-12 22:19:10 +03006225 {
6226 "osm_vdu_id": vdu_delta["id"],
6227 "member-vnf-index": vnf_index,
6228 "type": "create",
garciadeblas5697b8b2021-03-24 09:17:02 +01006229 "vdu_index": vdu_index + x,
aktas13251562021-02-12 22:19:10 +03006230 }
6231 )
aktas5f75f102021-03-15 11:26:10 +03006232 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6233 for kdu_delta in delta.get("kdu-resource-delta", {}):
David Garciab4ebcd02021-10-28 02:00:43 +02006234 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
aktas5f75f102021-03-15 11:26:10 +03006235 kdu_name = kdu_profile["kdu-name"]
aktasc41fe832021-11-29 18:41:42 +03006236 resource_name = kdu_profile.get("resource-name", "")
aktas5f75f102021-03-15 11:26:10 +03006237
6238 # Might have different kdus in the same delta
6239 # Should have list for each kdu
6240 if not scaling_info["kdu-create"].get(kdu_name, None):
6241 scaling_info["kdu-create"][kdu_name] = []
6242
6243 kdur = get_kdur(db_vnfr, kdu_name)
6244 if kdur.get("helm-chart"):
6245 k8s_cluster_type = "helm-chart-v3"
6246 self.logger.debug("kdur: {}".format(kdur))
6247 if (
6248 kdur.get("helm-version")
6249 and kdur.get("helm-version") == "v2"
6250 ):
6251 k8s_cluster_type = "helm-chart"
aktas5f75f102021-03-15 11:26:10 +03006252 elif kdur.get("juju-bundle"):
6253 k8s_cluster_type = "juju-bundle"
6254 else:
6255 raise LcmException(
6256 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6257 "juju-bundle. Maybe an old NBI version is running".format(
6258 db_vnfr["member-vnf-index-ref"], kdu_name
6259 )
6260 )
6261
6262 max_instance_count = 10
6263 if kdu_profile and "max-number-of-instances" in kdu_profile:
6264 max_instance_count = kdu_profile.get(
6265 "max-number-of-instances", 10
6266 )
6267
6268 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6269 deployed_kdu, _ = get_deployed_kdu(
6270 nsr_deployed, kdu_name, vnf_index
bravof832f8992020-12-07 12:57:31 -03006271 )
aktas5f75f102021-03-15 11:26:10 +03006272 if deployed_kdu is None:
6273 raise LcmException(
6274 "KDU '{}' for vnf '{}' not deployed".format(
6275 kdu_name, vnf_index
6276 )
6277 )
6278 kdu_instance = deployed_kdu.get("kdu-instance")
6279 instance_num = await self.k8scluster_map[
6280 k8s_cluster_type
aktasc41fe832021-11-29 18:41:42 +03006281 ].get_scale_count(
6282 resource_name,
6283 kdu_instance,
6284 vca_id=vca_id,
6285 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6286 kdu_model=deployed_kdu.get("kdu-model"),
6287 )
aktas5f75f102021-03-15 11:26:10 +03006288 kdu_replica_count = instance_num + kdu_delta.get(
garciadeblas5697b8b2021-03-24 09:17:02 +01006289 "number-of-instances", 1
6290 )
ikalyvas02d9e7b2019-05-27 18:16:01 +03006291
aktas5f75f102021-03-15 11:26:10 +03006292 # Control if new count is over max and instance_num is less than max.
6293 # Then assign max instance number to kdu replica count
6294 if kdu_replica_count > max_instance_count > instance_num:
6295 kdu_replica_count = max_instance_count
6296 if kdu_replica_count > max_instance_count:
6297 raise LcmException(
6298 "reached the limit of {} (max-instance-count) "
6299 "scaling-out operations for the "
6300 "scaling-group-descriptor '{}'".format(
6301 instance_num, scaling_group
6302 )
6303 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006304
aktas5f75f102021-03-15 11:26:10 +03006305 for x in range(kdu_delta.get("number-of-instances", 1)):
6306 vca_scaling_info.append(
6307 {
6308 "osm_kdu_id": kdu_name,
6309 "member-vnf-index": vnf_index,
6310 "type": "create",
6311 "kdu_index": instance_num + x - 1,
6312 }
6313 )
6314 scaling_info["kdu-create"][kdu_name].append(
6315 {
6316 "member-vnf-index": vnf_index,
6317 "type": "create",
6318 "k8s-cluster-type": k8s_cluster_type,
6319 "resource-name": resource_name,
6320 "scale": kdu_replica_count,
6321 }
6322 )
6323 elif scaling_type == "SCALE_IN":
bravof832f8992020-12-07 12:57:31 -03006324 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
aktas5f75f102021-03-15 11:26:10 +03006325
6326 scaling_info["scaling_direction"] = "IN"
6327 scaling_info["vdu-delete"] = {}
6328 scaling_info["kdu-delete"] = {}
6329
bravof832f8992020-12-07 12:57:31 -03006330 for delta in deltas:
aktas5f75f102021-03-15 11:26:10 +03006331 for vdu_delta in delta.get("vdu-delta", {}):
6332 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
bravof832f8992020-12-07 12:57:31 -03006333 min_instance_count = 0
6334 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6335 if vdu_profile and "min-number-of-instances" in vdu_profile:
6336 min_instance_count = vdu_profile["min-number-of-instances"]
6337
garciadeblas5697b8b2021-03-24 09:17:02 +01006338 default_instance_num = get_number_of_instances(
6339 db_vnfd, vdu_delta["id"]
6340 )
aktas5f75f102021-03-15 11:26:10 +03006341 instance_num = vdu_delta.get("number-of-instances", 1)
6342 nb_scale_op -= instance_num
bravof832f8992020-12-07 12:57:31 -03006343
aktas5f75f102021-03-15 11:26:10 +03006344 new_instance_count = nb_scale_op + default_instance_num
6345
6346 if new_instance_count < min_instance_count < vdu_count:
6347 instances_number = min_instance_count - new_instance_count
6348 else:
6349 instances_number = instance_num
6350
6351 if new_instance_count < min_instance_count:
bravof832f8992020-12-07 12:57:31 -03006352 raise LcmException(
6353 "reached the limit of {} (min-instance-count) scaling-in operations for the "
garciadeblas5697b8b2021-03-24 09:17:02 +01006354 "scaling-group-descriptor '{}'".format(
6355 nb_scale_op, scaling_group
6356 )
bravof832f8992020-12-07 12:57:31 -03006357 )
aktas13251562021-02-12 22:19:10 +03006358 for x in range(vdu_delta.get("number-of-instances", 1)):
aktas5f75f102021-03-15 11:26:10 +03006359 vca_scaling_info.append(
aktas13251562021-02-12 22:19:10 +03006360 {
6361 "osm_vdu_id": vdu_delta["id"],
6362 "member-vnf-index": vnf_index,
6363 "type": "delete",
garciadeblas5697b8b2021-03-24 09:17:02 +01006364 "vdu_index": vdu_index - 1 - x,
aktas13251562021-02-12 22:19:10 +03006365 }
6366 )
aktas5f75f102021-03-15 11:26:10 +03006367 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6368 for kdu_delta in delta.get("kdu-resource-delta", {}):
David Garciab4ebcd02021-10-28 02:00:43 +02006369 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
aktas5f75f102021-03-15 11:26:10 +03006370 kdu_name = kdu_profile["kdu-name"]
aktasc41fe832021-11-29 18:41:42 +03006371 resource_name = kdu_profile.get("resource-name", "")
aktas5f75f102021-03-15 11:26:10 +03006372
6373 if not scaling_info["kdu-delete"].get(kdu_name, None):
6374 scaling_info["kdu-delete"][kdu_name] = []
6375
6376 kdur = get_kdur(db_vnfr, kdu_name)
6377 if kdur.get("helm-chart"):
6378 k8s_cluster_type = "helm-chart-v3"
6379 self.logger.debug("kdur: {}".format(kdur))
6380 if (
6381 kdur.get("helm-version")
6382 and kdur.get("helm-version") == "v2"
6383 ):
6384 k8s_cluster_type = "helm-chart"
aktas5f75f102021-03-15 11:26:10 +03006385 elif kdur.get("juju-bundle"):
6386 k8s_cluster_type = "juju-bundle"
6387 else:
6388 raise LcmException(
6389 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6390 "juju-bundle. Maybe an old NBI version is running".format(
6391 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6392 )
6393 )
6394
6395 min_instance_count = 0
6396 if kdu_profile and "min-number-of-instances" in kdu_profile:
6397 min_instance_count = kdu_profile["min-number-of-instances"]
6398
6399 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6400 deployed_kdu, _ = get_deployed_kdu(
6401 nsr_deployed, kdu_name, vnf_index
6402 )
6403 if deployed_kdu is None:
6404 raise LcmException(
6405 "KDU '{}' for vnf '{}' not deployed".format(
6406 kdu_name, vnf_index
6407 )
6408 )
6409 kdu_instance = deployed_kdu.get("kdu-instance")
6410 instance_num = await self.k8scluster_map[
6411 k8s_cluster_type
aktasc41fe832021-11-29 18:41:42 +03006412 ].get_scale_count(
6413 resource_name,
6414 kdu_instance,
6415 vca_id=vca_id,
6416 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6417 kdu_model=deployed_kdu.get("kdu-model"),
6418 )
aktas5f75f102021-03-15 11:26:10 +03006419 kdu_replica_count = instance_num - kdu_delta.get(
garciadeblas5697b8b2021-03-24 09:17:02 +01006420 "number-of-instances", 1
6421 )
tierno59d22d22018-09-25 18:10:19 +02006422
aktas5f75f102021-03-15 11:26:10 +03006423 if kdu_replica_count < min_instance_count < instance_num:
6424 kdu_replica_count = min_instance_count
6425 if kdu_replica_count < min_instance_count:
6426 raise LcmException(
6427 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6428 "scaling-group-descriptor '{}'".format(
6429 instance_num, scaling_group
6430 )
6431 )
6432
6433 for x in range(kdu_delta.get("number-of-instances", 1)):
6434 vca_scaling_info.append(
6435 {
6436 "osm_kdu_id": kdu_name,
6437 "member-vnf-index": vnf_index,
6438 "type": "delete",
6439 "kdu_index": instance_num - x - 1,
6440 }
6441 )
6442 scaling_info["kdu-delete"][kdu_name].append(
6443 {
6444 "member-vnf-index": vnf_index,
6445 "type": "delete",
6446 "k8s-cluster-type": k8s_cluster_type,
6447 "resource-name": resource_name,
6448 "scale": kdu_replica_count,
6449 }
6450 )
6451
tierno59d22d22018-09-25 18:10:19 +02006452 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
aktas5f75f102021-03-15 11:26:10 +03006453 vdu_delete = copy(scaling_info.get("vdu-delete"))
6454 if scaling_info["scaling_direction"] == "IN":
tierno59d22d22018-09-25 18:10:19 +02006455 for vdur in reversed(db_vnfr["vdur"]):
tierno27246d82018-09-27 15:59:09 +02006456 if vdu_delete.get(vdur["vdu-id-ref"]):
6457 vdu_delete[vdur["vdu-id-ref"]] -= 1
aktas5f75f102021-03-15 11:26:10 +03006458 scaling_info["vdu"].append(
garciadeblas5697b8b2021-03-24 09:17:02 +01006459 {
6460 "name": vdur.get("name") or vdur.get("vdu-name"),
6461 "vdu_id": vdur["vdu-id-ref"],
6462 "interface": [],
6463 }
6464 )
tierno59d22d22018-09-25 18:10:19 +02006465 for interface in vdur["interfaces"]:
aktas5f75f102021-03-15 11:26:10 +03006466 scaling_info["vdu"][-1]["interface"].append(
garciadeblas5697b8b2021-03-24 09:17:02 +01006467 {
6468 "name": interface["name"],
6469 "ip_address": interface["ip-address"],
6470 "mac_address": interface.get("mac-address"),
6471 }
6472 )
tierno2357f4e2020-10-19 16:38:59 +00006473 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
tierno59d22d22018-09-25 18:10:19 +02006474
kuuseac3a8882019-10-03 10:48:06 +02006475 # PRE-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02006476 step = "Executing pre-scale vnf-config-primitive"
6477 if scaling_descriptor.get("scaling-config-action"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006478 for scaling_config_action in scaling_descriptor[
6479 "scaling-config-action"
6480 ]:
6481 if (
6482 scaling_config_action.get("trigger") == "pre-scale-in"
6483 and scaling_type == "SCALE_IN"
6484 ) or (
6485 scaling_config_action.get("trigger") == "pre-scale-out"
6486 and scaling_type == "SCALE_OUT"
6487 ):
6488 vnf_config_primitive = scaling_config_action[
6489 "vnf-config-primitive-name-ref"
6490 ]
6491 step = db_nslcmop_update[
6492 "detailed-status"
6493 ] = "executing pre-scale scaling-config-action '{}'".format(
6494 vnf_config_primitive
6495 )
tiernoda964822019-01-14 15:53:47 +00006496
tierno59d22d22018-09-25 18:10:19 +02006497 # look for primitive
garciadeblas5697b8b2021-03-24 09:17:02 +01006498 for config_primitive in (
6499 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6500 ).get("config-primitive", ()):
tierno59d22d22018-09-25 18:10:19 +02006501 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02006502 break
6503 else:
6504 raise LcmException(
6505 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
tiernoda964822019-01-14 15:53:47 +00006506 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
garciadeblas5697b8b2021-03-24 09:17:02 +01006507 "primitive".format(scaling_group, vnf_config_primitive)
6508 )
tiernoda964822019-01-14 15:53:47 +00006509
aktas5f75f102021-03-15 11:26:10 +03006510 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
tiernoda964822019-01-14 15:53:47 +00006511 if db_vnfr.get("additionalParamsForVnf"):
6512 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
quilesj7e13aeb2019-10-08 13:34:55 +02006513
tierno9ab95942018-10-10 16:44:22 +02006514 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02006515 db_nsr_update["config-status"] = "configuring pre-scaling"
garciadeblas5697b8b2021-03-24 09:17:02 +01006516 primitive_params = self._map_primitive_params(
6517 config_primitive, {}, vnfr_params
6518 )
kuuseac3a8882019-10-03 10:48:06 +02006519
tierno7c4e24c2020-05-13 08:41:35 +00006520 # Pre-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02006521 op_index = self._check_or_add_scale_suboperation(
garciadeblas5697b8b2021-03-24 09:17:02 +01006522 db_nslcmop,
garciadeblas5697b8b2021-03-24 09:17:02 +01006523 vnf_index,
6524 vnf_config_primitive,
6525 primitive_params,
6526 "PRE-SCALE",
6527 )
tierno7c4e24c2020-05-13 08:41:35 +00006528 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02006529 # Skip sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006530 result = "COMPLETED"
6531 result_detail = "Done"
6532 self.logger.debug(
6533 logging_text
6534 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6535 vnf_config_primitive, result, result_detail
6536 )
6537 )
kuuseac3a8882019-10-03 10:48:06 +02006538 else:
tierno7c4e24c2020-05-13 08:41:35 +00006539 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02006540 # New sub-operation: Get index of this sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006541 op_index = (
6542 len(db_nslcmop.get("_admin", {}).get("operations"))
6543 - 1
6544 )
6545 self.logger.debug(
6546 logging_text
6547 + "vnf_config_primitive={} New sub-operation".format(
6548 vnf_config_primitive
6549 )
6550 )
kuuseac3a8882019-10-03 10:48:06 +02006551 else:
tierno7c4e24c2020-05-13 08:41:35 +00006552 # retry: Get registered params for this existing sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006553 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6554 op_index
6555 ]
6556 vnf_index = op.get("member_vnf_index")
6557 vnf_config_primitive = op.get("primitive")
6558 primitive_params = op.get("primitive_params")
6559 self.logger.debug(
6560 logging_text
6561 + "vnf_config_primitive={} Sub-operation retry".format(
6562 vnf_config_primitive
6563 )
6564 )
tierno588547c2020-07-01 15:30:20 +00006565 # Execute the primitive, either with new (first-time) or registered (reintent) args
garciadeblas5697b8b2021-03-24 09:17:02 +01006566 ee_descriptor_id = config_primitive.get(
6567 "execution-environment-ref"
6568 )
6569 primitive_name = config_primitive.get(
6570 "execution-environment-primitive", vnf_config_primitive
6571 )
6572 ee_id, vca_type = self._look_for_deployed_vca(
6573 nsr_deployed["VCA"],
6574 member_vnf_index=vnf_index,
6575 vdu_id=None,
6576 vdu_count_index=None,
6577 ee_descriptor_id=ee_descriptor_id,
6578 )
kuuseac3a8882019-10-03 10:48:06 +02006579 result, result_detail = await self._ns_execute_primitive(
garciadeblas5697b8b2021-03-24 09:17:02 +01006580 ee_id,
6581 primitive_name,
David Garciac1fe90a2021-03-31 19:12:02 +02006582 primitive_params,
6583 vca_type=vca_type,
6584 vca_id=vca_id,
6585 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006586 self.logger.debug(
6587 logging_text
6588 + "vnf_config_primitive={} Done with result {} {}".format(
6589 vnf_config_primitive, result, result_detail
6590 )
6591 )
kuuseac3a8882019-10-03 10:48:06 +02006592 # Update operationState = COMPLETED | FAILED
6593 self._update_suboperation_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01006594 db_nslcmop, op_index, result, result_detail
6595 )
kuuseac3a8882019-10-03 10:48:06 +02006596
tierno59d22d22018-09-25 18:10:19 +02006597 if result == "FAILED":
6598 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02006599 db_nsr_update["config-status"] = old_config_status
6600 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02006601 # PRE-SCALE END
tierno59d22d22018-09-25 18:10:19 +02006602
garciadeblas5697b8b2021-03-24 09:17:02 +01006603 db_nsr_update[
6604 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6605 ] = nb_scale_op
6606 db_nsr_update[
6607 "_admin.scaling-group.{}.time".format(admin_scale_index)
6608 ] = time()
tierno2357f4e2020-10-19 16:38:59 +00006609
aktas13251562021-02-12 22:19:10 +03006610 # SCALE-IN VCA - BEGIN
aktas5f75f102021-03-15 11:26:10 +03006611 if vca_scaling_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01006612 step = db_nslcmop_update[
6613 "detailed-status"
6614 ] = "Deleting the execution environments"
aktas13251562021-02-12 22:19:10 +03006615 scale_process = "VCA"
aktas5f75f102021-03-15 11:26:10 +03006616 for vca_info in vca_scaling_info:
Guillermo Calvinoa0c6baf2022-02-02 19:04:50 +01006617 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
aktas5f75f102021-03-15 11:26:10 +03006618 member_vnf_index = str(vca_info["member-vnf-index"])
garciadeblas5697b8b2021-03-24 09:17:02 +01006619 self.logger.debug(
aktas5f75f102021-03-15 11:26:10 +03006620 logging_text + "vdu info: {}".format(vca_info)
garciadeblas5697b8b2021-03-24 09:17:02 +01006621 )
aktas5f75f102021-03-15 11:26:10 +03006622 if vca_info.get("osm_vdu_id"):
6623 vdu_id = vca_info["osm_vdu_id"]
6624 vdu_index = int(vca_info["vdu_index"])
6625 stage[
6626 1
6627 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6628 member_vnf_index, vdu_id, vdu_index
6629 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006630 stage[2] = step = "Scaling in VCA"
6631 self._write_op_status(op_id=nslcmop_id, stage=stage)
aktas13251562021-02-12 22:19:10 +03006632 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6633 config_update = db_nsr["configurationStatus"]
6634 for vca_index, vca in enumerate(vca_update):
garciadeblas5697b8b2021-03-24 09:17:02 +01006635 if (
6636 (vca or vca.get("ee_id"))
6637 and vca["member-vnf-index"] == member_vnf_index
6638 and vca["vdu_count_index"] == vdu_index
6639 ):
aktas13251562021-02-12 22:19:10 +03006640 if vca.get("vdu_id"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006641 config_descriptor = get_configuration(
6642 db_vnfd, vca.get("vdu_id")
6643 )
aktas13251562021-02-12 22:19:10 +03006644 elif vca.get("kdu_name"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006645 config_descriptor = get_configuration(
6646 db_vnfd, vca.get("kdu_name")
6647 )
aktas13251562021-02-12 22:19:10 +03006648 else:
garciadeblas5697b8b2021-03-24 09:17:02 +01006649 config_descriptor = get_configuration(
6650 db_vnfd, db_vnfd["id"]
6651 )
6652 operation_params = (
6653 db_nslcmop.get("operationParams") or {}
6654 )
6655 exec_terminate_primitives = not operation_params.get(
6656 "skip_terminate_primitives"
6657 ) and vca.get("needed_terminate")
David Garciac1fe90a2021-03-31 19:12:02 +02006658 task = asyncio.ensure_future(
6659 asyncio.wait_for(
6660 self.destroy_N2VC(
6661 logging_text,
6662 db_nslcmop,
6663 vca,
6664 config_descriptor,
6665 vca_index,
6666 destroy_ee=True,
6667 exec_primitives=exec_terminate_primitives,
6668 scaling_in=True,
6669 vca_id=vca_id,
6670 ),
garciadeblas5697b8b2021-03-24 09:17:02 +01006671 timeout=self.timeout_charm_delete,
David Garciac1fe90a2021-03-31 19:12:02 +02006672 )
6673 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006674 tasks_dict_info[task] = "Terminating VCA {}".format(
6675 vca.get("ee_id")
6676 )
aktas13251562021-02-12 22:19:10 +03006677 del vca_update[vca_index]
6678 del config_update[vca_index]
6679 # wait for pending tasks of terminate primitives
6680 if tasks_dict_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01006681 self.logger.debug(
6682 logging_text
6683 + "Waiting for tasks {}".format(
6684 list(tasks_dict_info.keys())
6685 )
6686 )
6687 error_list = await self._wait_for_tasks(
6688 logging_text,
6689 tasks_dict_info,
6690 min(
6691 self.timeout_charm_delete, self.timeout_ns_terminate
6692 ),
6693 stage,
6694 nslcmop_id,
6695 )
aktas13251562021-02-12 22:19:10 +03006696 tasks_dict_info.clear()
6697 if error_list:
6698 raise LcmException("; ".join(error_list))
6699
6700 db_vca_and_config_update = {
6701 "_admin.deployed.VCA": vca_update,
garciadeblas5697b8b2021-03-24 09:17:02 +01006702 "configurationStatus": config_update,
aktas13251562021-02-12 22:19:10 +03006703 }
garciadeblas5697b8b2021-03-24 09:17:02 +01006704 self.update_db_2(
6705 "nsrs", db_nsr["_id"], db_vca_and_config_update
6706 )
aktas13251562021-02-12 22:19:10 +03006707 scale_process = None
6708 # SCALE-IN VCA - END
6709
kuuseac3a8882019-10-03 10:48:06 +02006710 # SCALE RO - BEGIN
aktas5f75f102021-03-15 11:26:10 +03006711 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
tierno9ab95942018-10-10 16:44:22 +02006712 scale_process = "RO"
tierno2357f4e2020-10-19 16:38:59 +00006713 if self.ro_config.get("ng"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006714 await self._scale_ng_ro(
aktas5f75f102021-03-15 11:26:10 +03006715 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
garciadeblas5697b8b2021-03-24 09:17:02 +01006716 )
aktas5f75f102021-03-15 11:26:10 +03006717 scaling_info.pop("vdu-create", None)
6718 scaling_info.pop("vdu-delete", None)
tierno59d22d22018-09-25 18:10:19 +02006719
tierno9ab95942018-10-10 16:44:22 +02006720 scale_process = None
aktas13251562021-02-12 22:19:10 +03006721 # SCALE RO - END
6722
aktas5f75f102021-03-15 11:26:10 +03006723 # SCALE KDU - BEGIN
6724 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6725 scale_process = "KDU"
6726 await self._scale_kdu(
6727 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6728 )
6729 scaling_info.pop("kdu-create", None)
6730 scaling_info.pop("kdu-delete", None)
6731
6732 scale_process = None
6733 # SCALE KDU - END
6734
6735 if db_nsr_update:
6736 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6737
aktas13251562021-02-12 22:19:10 +03006738 # SCALE-UP VCA - BEGIN
aktas5f75f102021-03-15 11:26:10 +03006739 if vca_scaling_info:
garciadeblas5697b8b2021-03-24 09:17:02 +01006740 step = db_nslcmop_update[
6741 "detailed-status"
6742 ] = "Creating new execution environments"
aktas13251562021-02-12 22:19:10 +03006743 scale_process = "VCA"
aktas5f75f102021-03-15 11:26:10 +03006744 for vca_info in vca_scaling_info:
Guillermo Calvinoa0c6baf2022-02-02 19:04:50 +01006745 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
aktas5f75f102021-03-15 11:26:10 +03006746 member_vnf_index = str(vca_info["member-vnf-index"])
garciadeblas5697b8b2021-03-24 09:17:02 +01006747 self.logger.debug(
aktas5f75f102021-03-15 11:26:10 +03006748 logging_text + "vdu info: {}".format(vca_info)
garciadeblas5697b8b2021-03-24 09:17:02 +01006749 )
aktas13251562021-02-12 22:19:10 +03006750 vnfd_id = db_vnfr["vnfd-ref"]
aktas5f75f102021-03-15 11:26:10 +03006751 if vca_info.get("osm_vdu_id"):
6752 vdu_index = int(vca_info["vdu_index"])
6753 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6754 if db_vnfr.get("additionalParamsForVnf"):
6755 deploy_params.update(
6756 parse_yaml_strings(
6757 db_vnfr["additionalParamsForVnf"].copy()
6758 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006759 )
aktas5f75f102021-03-15 11:26:10 +03006760 descriptor_config = get_configuration(
6761 db_vnfd, db_vnfd["id"]
garciadeblas5697b8b2021-03-24 09:17:02 +01006762 )
aktas5f75f102021-03-15 11:26:10 +03006763 if descriptor_config:
6764 vdu_id = None
6765 vdu_name = None
6766 kdu_name = None
6767 self._deploy_n2vc(
6768 logging_text=logging_text
6769 + "member_vnf_index={} ".format(member_vnf_index),
6770 db_nsr=db_nsr,
6771 db_vnfr=db_vnfr,
6772 nslcmop_id=nslcmop_id,
6773 nsr_id=nsr_id,
6774 nsi_id=nsi_id,
6775 vnfd_id=vnfd_id,
6776 vdu_id=vdu_id,
6777 kdu_name=kdu_name,
6778 member_vnf_index=member_vnf_index,
6779 vdu_index=vdu_index,
6780 vdu_name=vdu_name,
6781 deploy_params=deploy_params,
6782 descriptor_config=descriptor_config,
6783 base_folder=base_folder,
6784 task_instantiation_info=tasks_dict_info,
6785 stage=stage,
6786 )
6787 vdu_id = vca_info["osm_vdu_id"]
6788 vdur = find_in_list(
6789 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
aktas13251562021-02-12 22:19:10 +03006790 )
aktas5f75f102021-03-15 11:26:10 +03006791 descriptor_config = get_configuration(db_vnfd, vdu_id)
6792 if vdur.get("additionalParams"):
6793 deploy_params_vdu = parse_yaml_strings(
6794 vdur["additionalParams"]
6795 )
6796 else:
6797 deploy_params_vdu = deploy_params
6798 deploy_params_vdu["OSM"] = get_osm_params(
6799 db_vnfr, vdu_id, vdu_count_index=vdu_index
garciadeblas5697b8b2021-03-24 09:17:02 +01006800 )
aktas5f75f102021-03-15 11:26:10 +03006801 if descriptor_config:
6802 vdu_name = None
6803 kdu_name = None
6804 stage[
6805 1
6806 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
garciadeblas5697b8b2021-03-24 09:17:02 +01006807 member_vnf_index, vdu_id, vdu_index
aktas5f75f102021-03-15 11:26:10 +03006808 )
6809 stage[2] = step = "Scaling out VCA"
6810 self._write_op_status(op_id=nslcmop_id, stage=stage)
6811 self._deploy_n2vc(
6812 logging_text=logging_text
6813 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6814 member_vnf_index, vdu_id, vdu_index
6815 ),
6816 db_nsr=db_nsr,
6817 db_vnfr=db_vnfr,
6818 nslcmop_id=nslcmop_id,
6819 nsr_id=nsr_id,
6820 nsi_id=nsi_id,
6821 vnfd_id=vnfd_id,
6822 vdu_id=vdu_id,
6823 kdu_name=kdu_name,
6824 member_vnf_index=member_vnf_index,
6825 vdu_index=vdu_index,
6826 vdu_name=vdu_name,
6827 deploy_params=deploy_params_vdu,
6828 descriptor_config=descriptor_config,
6829 base_folder=base_folder,
6830 task_instantiation_info=tasks_dict_info,
6831 stage=stage,
6832 )
aktas13251562021-02-12 22:19:10 +03006833 # SCALE-UP VCA - END
6834 scale_process = None
tierno59d22d22018-09-25 18:10:19 +02006835
kuuseac3a8882019-10-03 10:48:06 +02006836 # POST-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02006837 # execute primitive service POST-SCALING
6838 step = "Executing post-scale vnf-config-primitive"
6839 if scaling_descriptor.get("scaling-config-action"):
garciadeblas5697b8b2021-03-24 09:17:02 +01006840 for scaling_config_action in scaling_descriptor[
6841 "scaling-config-action"
6842 ]:
6843 if (
6844 scaling_config_action.get("trigger") == "post-scale-in"
6845 and scaling_type == "SCALE_IN"
6846 ) or (
6847 scaling_config_action.get("trigger") == "post-scale-out"
6848 and scaling_type == "SCALE_OUT"
6849 ):
6850 vnf_config_primitive = scaling_config_action[
6851 "vnf-config-primitive-name-ref"
6852 ]
6853 step = db_nslcmop_update[
6854 "detailed-status"
6855 ] = "executing post-scale scaling-config-action '{}'".format(
6856 vnf_config_primitive
6857 )
tiernoda964822019-01-14 15:53:47 +00006858
aktas5f75f102021-03-15 11:26:10 +03006859 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
tiernoda964822019-01-14 15:53:47 +00006860 if db_vnfr.get("additionalParamsForVnf"):
6861 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6862
tierno59d22d22018-09-25 18:10:19 +02006863 # look for primitive
bravof9a256db2021-02-22 18:02:07 -03006864 for config_primitive in (
6865 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6866 ).get("config-primitive", ()):
tierno59d22d22018-09-25 18:10:19 +02006867 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02006868 break
6869 else:
tiernoa278b842020-07-08 15:33:55 +00006870 raise LcmException(
6871 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6872 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
garciadeblas5697b8b2021-03-24 09:17:02 +01006873 "config-primitive".format(
6874 scaling_group, vnf_config_primitive
6875 )
6876 )
tierno9ab95942018-10-10 16:44:22 +02006877 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02006878 db_nsr_update["config-status"] = "configuring post-scaling"
garciadeblas5697b8b2021-03-24 09:17:02 +01006879 primitive_params = self._map_primitive_params(
6880 config_primitive, {}, vnfr_params
6881 )
tiernod6de1992018-10-11 13:05:52 +02006882
tierno7c4e24c2020-05-13 08:41:35 +00006883 # Post-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02006884 op_index = self._check_or_add_scale_suboperation(
garciadeblas5697b8b2021-03-24 09:17:02 +01006885 db_nslcmop,
garciadeblas5697b8b2021-03-24 09:17:02 +01006886 vnf_index,
6887 vnf_config_primitive,
6888 primitive_params,
6889 "POST-SCALE",
6890 )
quilesj4cda56b2019-12-05 10:02:20 +00006891 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02006892 # Skip sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006893 result = "COMPLETED"
6894 result_detail = "Done"
6895 self.logger.debug(
6896 logging_text
6897 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6898 vnf_config_primitive, result, result_detail
6899 )
6900 )
kuuseac3a8882019-10-03 10:48:06 +02006901 else:
quilesj4cda56b2019-12-05 10:02:20 +00006902 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02006903 # New sub-operation: Get index of this sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006904 op_index = (
6905 len(db_nslcmop.get("_admin", {}).get("operations"))
6906 - 1
6907 )
6908 self.logger.debug(
6909 logging_text
6910 + "vnf_config_primitive={} New sub-operation".format(
6911 vnf_config_primitive
6912 )
6913 )
kuuseac3a8882019-10-03 10:48:06 +02006914 else:
tierno7c4e24c2020-05-13 08:41:35 +00006915 # retry: Get registered params for this existing sub-operation
garciadeblas5697b8b2021-03-24 09:17:02 +01006916 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6917 op_index
6918 ]
6919 vnf_index = op.get("member_vnf_index")
6920 vnf_config_primitive = op.get("primitive")
6921 primitive_params = op.get("primitive_params")
6922 self.logger.debug(
6923 logging_text
6924 + "vnf_config_primitive={} Sub-operation retry".format(
6925 vnf_config_primitive
6926 )
6927 )
tierno588547c2020-07-01 15:30:20 +00006928 # Execute the primitive, either with new (first-time) or registered (reintent) args
garciadeblas5697b8b2021-03-24 09:17:02 +01006929 ee_descriptor_id = config_primitive.get(
6930 "execution-environment-ref"
6931 )
6932 primitive_name = config_primitive.get(
6933 "execution-environment-primitive", vnf_config_primitive
6934 )
6935 ee_id, vca_type = self._look_for_deployed_vca(
6936 nsr_deployed["VCA"],
6937 member_vnf_index=vnf_index,
6938 vdu_id=None,
6939 vdu_count_index=None,
6940 ee_descriptor_id=ee_descriptor_id,
6941 )
kuuseac3a8882019-10-03 10:48:06 +02006942 result, result_detail = await self._ns_execute_primitive(
David Garciac1fe90a2021-03-31 19:12:02 +02006943 ee_id,
6944 primitive_name,
6945 primitive_params,
6946 vca_type=vca_type,
6947 vca_id=vca_id,
6948 )
garciadeblas5697b8b2021-03-24 09:17:02 +01006949 self.logger.debug(
6950 logging_text
6951 + "vnf_config_primitive={} Done with result {} {}".format(
6952 vnf_config_primitive, result, result_detail
6953 )
6954 )
kuuseac3a8882019-10-03 10:48:06 +02006955 # Update operationState = COMPLETED | FAILED
6956 self._update_suboperation_status(
garciadeblas5697b8b2021-03-24 09:17:02 +01006957 db_nslcmop, op_index, result, result_detail
6958 )
kuuseac3a8882019-10-03 10:48:06 +02006959
tierno59d22d22018-09-25 18:10:19 +02006960 if result == "FAILED":
6961 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02006962 db_nsr_update["config-status"] = old_config_status
6963 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02006964 # POST-SCALE END
tierno59d22d22018-09-25 18:10:19 +02006965
garciadeblas5697b8b2021-03-24 09:17:02 +01006966 db_nsr_update[
6967 "detailed-status"
6968 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6969 db_nsr_update["operational-status"] = (
6970 "running"
6971 if old_operational_status == "failed"
ikalyvas02d9e7b2019-05-27 18:16:01 +03006972 else old_operational_status
garciadeblas5697b8b2021-03-24 09:17:02 +01006973 )
tiernod6de1992018-10-11 13:05:52 +02006974 db_nsr_update["config-status"] = old_config_status
tierno59d22d22018-09-25 18:10:19 +02006975 return
garciadeblas5697b8b2021-03-24 09:17:02 +01006976 except (
6977 ROclient.ROClientException,
6978 DbException,
6979 LcmException,
6980 NgRoException,
6981 ) as e:
tierno59d22d22018-09-25 18:10:19 +02006982 self.logger.error(logging_text + "Exit Exception {}".format(e))
6983 exc = e
6984 except asyncio.CancelledError:
garciadeblas5697b8b2021-03-24 09:17:02 +01006985 self.logger.error(
6986 logging_text + "Cancelled Exception while '{}'".format(step)
6987 )
tierno59d22d22018-09-25 18:10:19 +02006988 exc = "Operation was cancelled"
6989 except Exception as e:
6990 exc = traceback.format_exc()
garciadeblas5697b8b2021-03-24 09:17:02 +01006991 self.logger.critical(
6992 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6993 exc_info=True,
6994 )
tierno59d22d22018-09-25 18:10:19 +02006995 finally:
garciadeblas5697b8b2021-03-24 09:17:02 +01006996 self._write_ns_status(
6997 nsr_id=nsr_id,
6998 ns_state=None,
6999 current_operation="IDLE",
7000 current_operation_id=None,
7001 )
aktas13251562021-02-12 22:19:10 +03007002 if tasks_dict_info:
7003 stage[1] = "Waiting for instantiate pending tasks."
7004 self.logger.debug(logging_text + stage[1])
garciadeblas5697b8b2021-03-24 09:17:02 +01007005 exc = await self._wait_for_tasks(
7006 logging_text,
7007 tasks_dict_info,
7008 self.timeout_ns_deploy,
7009 stage,
7010 nslcmop_id,
7011 nsr_id=nsr_id,
7012 )
tierno59d22d22018-09-25 18:10:19 +02007013 if exc:
garciadeblas5697b8b2021-03-24 09:17:02 +01007014 db_nslcmop_update[
7015 "detailed-status"
7016 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
tiernoa17d4f42020-04-28 09:59:23 +00007017 nslcmop_operation_state = "FAILED"
tierno59d22d22018-09-25 18:10:19 +02007018 if db_nsr:
tiernod6de1992018-10-11 13:05:52 +02007019 db_nsr_update["operational-status"] = old_operational_status
7020 db_nsr_update["config-status"] = old_config_status
7021 db_nsr_update["detailed-status"] = ""
7022 if scale_process:
7023 if "VCA" in scale_process:
7024 db_nsr_update["config-status"] = "failed"
7025 if "RO" in scale_process:
7026 db_nsr_update["operational-status"] = "failed"
garciadeblas5697b8b2021-03-24 09:17:02 +01007027 db_nsr_update[
7028 "detailed-status"
7029 ] = "FAILED scaling nslcmop={} {}: {}".format(
7030 nslcmop_id, step, exc
7031 )
tiernoa17d4f42020-04-28 09:59:23 +00007032 else:
7033 error_description_nslcmop = None
7034 nslcmop_operation_state = "COMPLETED"
7035 db_nslcmop_update["detailed-status"] = "Done"
quilesj4cda56b2019-12-05 10:02:20 +00007036
garciadeblas5697b8b2021-03-24 09:17:02 +01007037 self._write_op_status(
7038 op_id=nslcmop_id,
7039 stage="",
7040 error_message=error_description_nslcmop,
7041 operation_state=nslcmop_operation_state,
7042 other_update=db_nslcmop_update,
7043 )
tiernoa17d4f42020-04-28 09:59:23 +00007044 if db_nsr:
garciadeblas5697b8b2021-03-24 09:17:02 +01007045 self._write_ns_status(
7046 nsr_id=nsr_id,
7047 ns_state=None,
7048 current_operation="IDLE",
7049 current_operation_id=None,
7050 other_update=db_nsr_update,
7051 )
tiernoa17d4f42020-04-28 09:59:23 +00007052
tierno59d22d22018-09-25 18:10:19 +02007053 if nslcmop_operation_state:
7054 try:
garciadeblas5697b8b2021-03-24 09:17:02 +01007055 msg = {
7056 "nsr_id": nsr_id,
7057 "nslcmop_id": nslcmop_id,
7058 "operationState": nslcmop_operation_state,
7059 }
bravof922c4172020-11-24 21:21:43 -03007060 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02007061 except Exception as e:
garciadeblas5697b8b2021-03-24 09:17:02 +01007062 self.logger.error(
7063 logging_text + "kafka_write notification Exception {}".format(e)
7064 )
tierno59d22d22018-09-25 18:10:19 +02007065 self.logger.debug(logging_text + "Exit")
7066 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
tiernob996d942020-07-03 14:52:28 +00007067
aktas5f75f102021-03-15 11:26:10 +03007068 async def _scale_kdu(
7069 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7070 ):
7071 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7072 for kdu_name in _scaling_info:
7073 for kdu_scaling_info in _scaling_info[kdu_name]:
7074 deployed_kdu, index = get_deployed_kdu(
7075 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7076 )
7077 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7078 kdu_instance = deployed_kdu["kdu-instance"]
aktasc41fe832021-11-29 18:41:42 +03007079 kdu_model = deployed_kdu.get("kdu-model")
aktas5f75f102021-03-15 11:26:10 +03007080 scale = int(kdu_scaling_info["scale"])
7081 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7082
7083 db_dict = {
7084 "collection": "nsrs",
7085 "filter": {"_id": nsr_id},
7086 "path": "_admin.deployed.K8s.{}".format(index),
7087 }
7088
7089 step = "scaling application {}".format(
7090 kdu_scaling_info["resource-name"]
7091 )
7092 self.logger.debug(logging_text + step)
7093
7094 if kdu_scaling_info["type"] == "delete":
7095 kdu_config = get_configuration(db_vnfd, kdu_name)
7096 if (
7097 kdu_config
7098 and kdu_config.get("terminate-config-primitive")
7099 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7100 ):
7101 terminate_config_primitive_list = kdu_config.get(
7102 "terminate-config-primitive"
7103 )
7104 terminate_config_primitive_list.sort(
7105 key=lambda val: int(val["seq"])
7106 )
7107
7108 for (
7109 terminate_config_primitive
7110 ) in terminate_config_primitive_list:
7111 primitive_params_ = self._map_primitive_params(
7112 terminate_config_primitive, {}, {}
7113 )
7114 step = "execute terminate config primitive"
7115 self.logger.debug(logging_text + step)
7116 await asyncio.wait_for(
7117 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7118 cluster_uuid=cluster_uuid,
7119 kdu_instance=kdu_instance,
7120 primitive_name=terminate_config_primitive["name"],
7121 params=primitive_params_,
7122 db_dict=db_dict,
7123 vca_id=vca_id,
7124 ),
7125 timeout=600,
7126 )
7127
7128 await asyncio.wait_for(
7129 self.k8scluster_map[k8s_cluster_type].scale(
7130 kdu_instance,
7131 scale,
7132 kdu_scaling_info["resource-name"],
7133 vca_id=vca_id,
aktasc41fe832021-11-29 18:41:42 +03007134 cluster_uuid=cluster_uuid,
7135 kdu_model=kdu_model,
7136 atomic=True,
7137 db_dict=db_dict,
aktas5f75f102021-03-15 11:26:10 +03007138 ),
7139 timeout=self.timeout_vca_on_error,
7140 )
7141
7142 if kdu_scaling_info["type"] == "create":
7143 kdu_config = get_configuration(db_vnfd, kdu_name)
7144 if (
7145 kdu_config
7146 and kdu_config.get("initial-config-primitive")
7147 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7148 ):
7149 initial_config_primitive_list = kdu_config.get(
7150 "initial-config-primitive"
7151 )
7152 initial_config_primitive_list.sort(
7153 key=lambda val: int(val["seq"])
7154 )
7155
7156 for initial_config_primitive in initial_config_primitive_list:
7157 primitive_params_ = self._map_primitive_params(
7158 initial_config_primitive, {}, {}
7159 )
7160 step = "execute initial config primitive"
7161 self.logger.debug(logging_text + step)
7162 await asyncio.wait_for(
7163 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7164 cluster_uuid=cluster_uuid,
7165 kdu_instance=kdu_instance,
7166 primitive_name=initial_config_primitive["name"],
7167 params=primitive_params_,
7168 db_dict=db_dict,
7169 vca_id=vca_id,
7170 ),
7171 timeout=600,
7172 )
7173
garciadeblas5697b8b2021-03-24 09:17:02 +01007174 async def _scale_ng_ro(
7175 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7176 ):
tierno2357f4e2020-10-19 16:38:59 +00007177 nsr_id = db_nslcmop["nsInstanceId"]
7178 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7179 db_vnfrs = {}
7180
7181 # read from db: vnfd's for every vnf
bravof832f8992020-12-07 12:57:31 -03007182 db_vnfds = []
tierno2357f4e2020-10-19 16:38:59 +00007183
7184 # for each vnf in ns, read vnfd
7185 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7186 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7187 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
tierno2357f4e2020-10-19 16:38:59 +00007188 # if we haven't this vnfd, read it from db
bravof832f8992020-12-07 12:57:31 -03007189 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
tierno2357f4e2020-10-19 16:38:59 +00007190 # read from db
7191 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
bravof832f8992020-12-07 12:57:31 -03007192 db_vnfds.append(vnfd)
tierno2357f4e2020-10-19 16:38:59 +00007193 n2vc_key = self.n2vc.get_public_key()
7194 n2vc_key_list = [n2vc_key]
garciadeblas5697b8b2021-03-24 09:17:02 +01007195 self.scale_vnfr(
7196 db_vnfr,
7197 vdu_scaling_info.get("vdu-create"),
7198 vdu_scaling_info.get("vdu-delete"),
7199 mark_delete=True,
7200 )
tierno2357f4e2020-10-19 16:38:59 +00007201 # db_vnfr has been updated, update db_vnfrs to use it
7202 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
garciadeblas5697b8b2021-03-24 09:17:02 +01007203 await self._instantiate_ng_ro(
7204 logging_text,
7205 nsr_id,
7206 db_nsd,
7207 db_nsr,
7208 db_nslcmop,
7209 db_vnfrs,
7210 db_vnfds,
7211 n2vc_key_list,
7212 stage=stage,
7213 start_deploy=time(),
7214 timeout_ns_deploy=self.timeout_ns_deploy,
7215 )
tierno2357f4e2020-10-19 16:38:59 +00007216 if vdu_scaling_info.get("vdu-delete"):
garciadeblas5697b8b2021-03-24 09:17:02 +01007217 self.scale_vnfr(
7218 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7219 )
tierno2357f4e2020-10-19 16:38:59 +00007220
bravof73bac502021-05-11 07:38:47 -04007221 async def extract_prometheus_scrape_jobs(
aticig15db6142022-01-24 12:51:26 +03007222 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
garciadeblas5697b8b2021-03-24 09:17:02 +01007223 ):
tiernob996d942020-07-03 14:52:28 +00007224 # look if exist a file called 'prometheus*.j2' and
7225 artifact_content = self.fs.dir_ls(artifact_path)
garciadeblas5697b8b2021-03-24 09:17:02 +01007226 job_file = next(
7227 (
7228 f
7229 for f in artifact_content
7230 if f.startswith("prometheus") and f.endswith(".j2")
7231 ),
7232 None,
7233 )
tiernob996d942020-07-03 14:52:28 +00007234 if not job_file:
7235 return
7236 with self.fs.file_open((artifact_path, job_file), "r") as f:
7237 job_data = f.read()
7238
7239 # TODO get_service
garciadeblas5697b8b2021-03-24 09:17:02 +01007240 _, _, service = ee_id.partition(".") # remove prefix "namespace."
tiernob996d942020-07-03 14:52:28 +00007241 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7242 host_port = "80"
7243 vnfr_id = vnfr_id.replace("-", "")
7244 variables = {
7245 "JOB_NAME": vnfr_id,
7246 "TARGET_IP": target_ip,
7247 "EXPORTER_POD_IP": host_name,
7248 "EXPORTER_POD_PORT": host_port,
7249 }
bravof73bac502021-05-11 07:38:47 -04007250 job_list = parse_job(job_data, variables)
tiernob996d942020-07-03 14:52:28 +00007251 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7252 for job in job_list:
garciadeblas5697b8b2021-03-24 09:17:02 +01007253 if (
7254 not isinstance(job.get("job_name"), str)
7255 or vnfr_id not in job["job_name"]
7256 ):
tiernob996d942020-07-03 14:52:28 +00007257 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7258 job["nsr_id"] = nsr_id
bravof73bac502021-05-11 07:38:47 -04007259 job["vnfr_id"] = vnfr_id
7260 return job_list
David Garciaaae391f2020-11-09 11:12:54 +01007261
k4.rahulb827de92022-05-02 16:35:02 +00007262 async def rebuild_start_stop(self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type):
7263 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7264 self.logger.info(logging_text + "Enter")
7265 stage = ["Preparing the environment", ""]
7266 # database nsrs record
7267 db_nsr_update = {}
7268 vdu_vim_name = None
7269 vim_vm_id = None
7270 # in case of error, indicates what part of scale was failed to put nsr at error status
7271 start_deploy = time()
7272 try:
7273 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7274 vim_account_id = db_vnfr.get("vim-account-id")
7275 vim_info_key = "vim:" + vim_account_id
7276 vdur = find_in_list(
7277 db_vnfr["vdur"], lambda vdu: vdu["count-index"] == additional_param["count-index"]
7278 )
7279 if vdur:
7280 vdu_vim_name = vdur["name"]
7281 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7282 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7283 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7284 # wait for any previous tasks in process
7285 stage[1] = "Waiting for previous operations to terminate"
7286 self.logger.info(stage[1])
7287 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
7288
7289 stage[1] = "Reading from database."
7290 self.logger.info(stage[1])
7291 self._write_ns_status(
7292 nsr_id=nsr_id,
7293 ns_state=None,
7294 current_operation=operation_type.upper(),
7295 current_operation_id=nslcmop_id
7296 )
7297 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7298
7299 # read from db: ns
7300 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7301 db_nsr_update["operational-status"] = operation_type
7302 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7303 # Payload for RO
7304 desc = {
7305 operation_type: {
7306 "vim_vm_id": vim_vm_id,
7307 "vnf_id": vnf_id,
7308 "vdu_index": additional_param["count-index"],
7309 "vdu_id": vdur["id"],
7310 "target_vim": target_vim,
7311 "vim_account_id": vim_account_id
7312 }
7313 }
7314 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7315 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7316 self.logger.info("ro nsr id: {}".format(nsr_id))
7317 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7318 self.logger.info("response from RO: {}".format(result_dict))
7319 action_id = result_dict["action_id"]
7320 await self._wait_ng_ro(
7321 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_operate
7322 )
7323 return "COMPLETED", "Done"
7324 except (ROclient.ROClientException, DbException, LcmException) as e:
7325 self.logger.error("Exit Exception {}".format(e))
7326 exc = e
7327 except asyncio.CancelledError:
7328 self.logger.error("Cancelled Exception while '{}'".format(stage))
7329 exc = "Operation was cancelled"
7330 except Exception as e:
7331 exc = traceback.format_exc()
7332 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
7333 return "FAILED", "Error in operate VNF {}".format(exc)
7334
David Garciaaae391f2020-11-09 11:12:54 +01007335 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7336 """
7337 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7338
7339 :param: vim_account_id: VIM Account ID
7340
7341 :return: (cloud_name, cloud_credential)
7342 """
bravof922c4172020-11-24 21:21:43 -03007343 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
David Garciaaae391f2020-11-09 11:12:54 +01007344 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7345
7346 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7347 """
7348 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7349
7350 :param: vim_account_id: VIM Account ID
7351
7352 :return: (cloud_name, cloud_credential)
7353 """
bravof922c4172020-11-24 21:21:43 -03007354 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
David Garciaaae391f2020-11-09 11:12:54 +01007355 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
elumalai80bcf1c2022-04-28 18:05:01 +05307356
7357 async def migrate(self, nsr_id, nslcmop_id):
7358 """
7359 Migrate VNFs and VDUs instances in a NS
7360
7361 :param: nsr_id: NS Instance ID
7362 :param: nslcmop_id: nslcmop ID of migrate
7363
7364 """
7365 # Try to lock HA task here
7366 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7367 if not task_is_locked_by_me:
7368 return
7369 logging_text = "Task ns={} migrate ".format(nsr_id)
7370 self.logger.debug(logging_text + "Enter")
7371 # get all needed from database
7372 db_nslcmop = None
7373 db_nslcmop_update = {}
7374 nslcmop_operation_state = None
7375 db_nsr_update = {}
7376 target = {}
7377 exc = None
7378 # in case of error, indicates what part of scale was failed to put nsr at error status
7379 start_deploy = time()
7380
7381 try:
7382 # wait for any previous tasks in process
7383 step = "Waiting for previous operations to terminate"
aticig349aa462022-05-19 12:29:35 +03007384 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
elumalai80bcf1c2022-04-28 18:05:01 +05307385
7386 self._write_ns_status(
7387 nsr_id=nsr_id,
7388 ns_state=None,
7389 current_operation="MIGRATING",
aticig349aa462022-05-19 12:29:35 +03007390 current_operation_id=nslcmop_id,
elumalai80bcf1c2022-04-28 18:05:01 +05307391 )
7392 step = "Getting nslcmop from database"
aticig349aa462022-05-19 12:29:35 +03007393 self.logger.debug(
7394 step + " after having waited for previous tasks to be completed"
7395 )
elumalai80bcf1c2022-04-28 18:05:01 +05307396 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7397 migrate_params = db_nslcmop.get("operationParams")
7398
7399 target = {}
7400 target.update(migrate_params)
7401 desc = await self.RO.migrate(nsr_id, target)
7402 self.logger.debug("RO return > {}".format(desc))
7403 action_id = desc["action_id"]
7404 await self._wait_ng_ro(
garciadeblas07f4e4c2022-06-09 09:42:58 +02007405 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_migrate,
7406 operation="migrate"
elumalai80bcf1c2022-04-28 18:05:01 +05307407 )
7408 except (ROclient.ROClientException, DbException, LcmException) as e:
7409 self.logger.error("Exit Exception {}".format(e))
7410 exc = e
7411 except asyncio.CancelledError:
7412 self.logger.error("Cancelled Exception while '{}'".format(step))
7413 exc = "Operation was cancelled"
7414 except Exception as e:
7415 exc = traceback.format_exc()
aticig349aa462022-05-19 12:29:35 +03007416 self.logger.critical(
7417 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7418 )
elumalai80bcf1c2022-04-28 18:05:01 +05307419 finally:
7420 self._write_ns_status(
7421 nsr_id=nsr_id,
7422 ns_state=None,
7423 current_operation="IDLE",
7424 current_operation_id=None,
7425 )
7426 if exc:
aticig349aa462022-05-19 12:29:35 +03007427 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
elumalai80bcf1c2022-04-28 18:05:01 +05307428 nslcmop_operation_state = "FAILED"
7429 else:
7430 nslcmop_operation_state = "COMPLETED"
7431 db_nslcmop_update["detailed-status"] = "Done"
7432 db_nsr_update["detailed-status"] = "Done"
7433
7434 self._write_op_status(
7435 op_id=nslcmop_id,
7436 stage="",
7437 error_message="",
7438 operation_state=nslcmop_operation_state,
7439 other_update=db_nslcmop_update,
7440 )
7441 if nslcmop_operation_state:
7442 try:
7443 msg = {
7444 "nsr_id": nsr_id,
7445 "nslcmop_id": nslcmop_id,
7446 "operationState": nslcmop_operation_state,
7447 }
7448 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7449 except Exception as e:
7450 self.logger.error(
7451 logging_text + "kafka_write notification Exception {}".format(e)
7452 )
7453 self.logger.debug(logging_text + "Exit")
7454 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
garciadeblas07f4e4c2022-06-09 09:42:58 +02007455
7456
7457 async def heal(self, nsr_id, nslcmop_id):
7458 """
7459 Heal NS
7460
7461 :param nsr_id: ns instance to heal
7462 :param nslcmop_id: operation to run
7463 :return:
7464 """
7465
7466 # Try to lock HA task here
7467 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7468 if not task_is_locked_by_me:
7469 return
7470
7471 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7472 stage = ["", "", ""]
7473 tasks_dict_info = {}
7474 # ^ stage, step, VIM progress
7475 self.logger.debug(logging_text + "Enter")
7476 # get all needed from database
7477 db_nsr = None
7478 db_nslcmop_update = {}
7479 db_nsr_update = {}
7480 db_vnfrs = {} # vnf's info indexed by _id
7481 exc = None
7482 old_operational_status = ""
7483 old_config_status = ""
7484 nsi_id = None
7485 try:
7486 # wait for any previous tasks in process
7487 step = "Waiting for previous operations to terminate"
7488 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7489 self._write_ns_status(
7490 nsr_id=nsr_id,
7491 ns_state=None,
7492 current_operation="HEALING",
7493 current_operation_id=nslcmop_id,
7494 )
7495
7496 step = "Getting nslcmop from database"
7497 self.logger.debug(
7498 step + " after having waited for previous tasks to be completed"
7499 )
7500 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7501
7502 step = "Getting nsr from database"
7503 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7504 old_operational_status = db_nsr["operational-status"]
7505 old_config_status = db_nsr["config-status"]
7506
7507 db_nsr_update = {
7508 "_admin.deployed.RO.operational-status": "healing",
7509 }
7510 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7511
7512 step = "Sending heal order to VIM"
7513 task_ro = asyncio.ensure_future(
7514 self.heal_RO(
7515 logging_text=logging_text,
7516 nsr_id=nsr_id,
7517 db_nslcmop=db_nslcmop,
7518 stage=stage,
7519 )
7520 )
7521 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
7522 tasks_dict_info[task_ro] = "Healing at VIM"
7523
7524 # VCA tasks
7525 # read from db: nsd
7526 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7527 self.logger.debug(logging_text + stage[1])
7528 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7529 self.fs.sync(db_nsr["nsd-id"])
7530 db_nsr["nsd"] = nsd
7531 # read from db: vnfr's of this ns
7532 step = "Getting vnfrs from db"
7533 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7534 for vnfr in db_vnfrs_list:
7535 db_vnfrs[vnfr["_id"]] = vnfr
7536 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7537
7538 # Check for each target VNF
7539 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7540 for target_vnf in target_list:
7541 # Find this VNF in the list from DB
7542 vnfr_id = target_vnf.get("vnfInstanceId", None)
7543 if vnfr_id:
7544 db_vnfr = db_vnfrs[vnfr_id]
7545 vnfd_id = db_vnfr.get("vnfd-id")
7546 vnfd_ref = db_vnfr.get("vnfd-ref")
7547 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7548 base_folder = vnfd["_admin"]["storage"]
7549 vdu_id = None
7550 vdu_index = 0
7551 vdu_name = None
7552 kdu_name = None
7553 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7554 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7555
7556 # Check each target VDU and deploy N2VC
7557 for target_vdu in target_vnf["additionalParams"].get("vdu", None):
7558 deploy_params_vdu = target_vdu
7559 # Set run-day1 vnf level value if not vdu level value exists
7560 if not deploy_params_vdu.get("run-day1") and target_vnf["additionalParams"].get("run-day1"):
7561 deploy_params_vdu["run-day1"] = target_vnf["additionalParams"].get("run-day1")
7562 vdu_name = target_vdu.get("vdu-id", None)
7563 # TODO: Get vdu_id from vdud.
7564 vdu_id = vdu_name
7565 # For multi instance VDU count-index is mandatory
7566 # For single session VDU count-indes is 0
7567 vdu_index = target_vdu.get("count-index",0)
7568
7569 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7570 stage[1] = "Deploying Execution Environments."
7571 self.logger.debug(logging_text + stage[1])
7572
7573 # VNF Level charm. Normal case when proxy charms.
7574 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7575 descriptor_config = get_configuration(vnfd, vnfd_ref)
7576 if descriptor_config:
7577 # Continue if healed machine is management machine
7578 vnf_ip_address = db_vnfr.get("ip-address")
7579 target_instance = None
7580 for instance in db_vnfr.get("vdur", None):
7581 if ( instance["vdu-name"] == vdu_name and instance["count-index"] == vdu_index ):
7582 target_instance = instance
7583 break
7584 if vnf_ip_address == target_instance.get("ip-address"):
7585 self._heal_n2vc(
7586 logging_text=logging_text
7587 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7588 member_vnf_index, vdu_name, vdu_index
7589 ),
7590 db_nsr=db_nsr,
7591 db_vnfr=db_vnfr,
7592 nslcmop_id=nslcmop_id,
7593 nsr_id=nsr_id,
7594 nsi_id=nsi_id,
7595 vnfd_id=vnfd_ref,
7596 vdu_id=None,
7597 kdu_name=None,
7598 member_vnf_index=member_vnf_index,
7599 vdu_index=0,
7600 vdu_name=None,
7601 deploy_params=deploy_params_vdu,
7602 descriptor_config=descriptor_config,
7603 base_folder=base_folder,
7604 task_instantiation_info=tasks_dict_info,
7605 stage=stage,
7606 )
7607
7608 # VDU Level charm. Normal case with native charms.
7609 descriptor_config = get_configuration(vnfd, vdu_name)
7610 if descriptor_config:
7611 self._heal_n2vc(
7612 logging_text=logging_text
7613 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7614 member_vnf_index, vdu_name, vdu_index
7615 ),
7616 db_nsr=db_nsr,
7617 db_vnfr=db_vnfr,
7618 nslcmop_id=nslcmop_id,
7619 nsr_id=nsr_id,
7620 nsi_id=nsi_id,
7621 vnfd_id=vnfd_ref,
7622 vdu_id=vdu_id,
7623 kdu_name=kdu_name,
7624 member_vnf_index=member_vnf_index,
7625 vdu_index=vdu_index,
7626 vdu_name=vdu_name,
7627 deploy_params=deploy_params_vdu,
7628 descriptor_config=descriptor_config,
7629 base_folder=base_folder,
7630 task_instantiation_info=tasks_dict_info,
7631 stage=stage,
7632 )
7633
7634 except (
7635 ROclient.ROClientException,
7636 DbException,
7637 LcmException,
7638 NgRoException,
7639 ) as e:
7640 self.logger.error(logging_text + "Exit Exception {}".format(e))
7641 exc = e
7642 except asyncio.CancelledError:
7643 self.logger.error(
7644 logging_text + "Cancelled Exception while '{}'".format(step)
7645 )
7646 exc = "Operation was cancelled"
7647 except Exception as e:
7648 exc = traceback.format_exc()
7649 self.logger.critical(
7650 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7651 exc_info=True,
7652 )
7653 finally:
7654 if tasks_dict_info:
7655 stage[1] = "Waiting for healing pending tasks."
7656 self.logger.debug(logging_text + stage[1])
7657 exc = await self._wait_for_tasks(
7658 logging_text,
7659 tasks_dict_info,
7660 self.timeout_ns_deploy,
7661 stage,
7662 nslcmop_id,
7663 nsr_id=nsr_id,
7664 )
7665 if exc:
7666 db_nslcmop_update[
7667 "detailed-status"
7668 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7669 nslcmop_operation_state = "FAILED"
7670 if db_nsr:
7671 db_nsr_update["operational-status"] = old_operational_status
7672 db_nsr_update["config-status"] = old_config_status
7673 db_nsr_update[
7674 "detailed-status"
7675 ] = "FAILED healing nslcmop={} {}: {}".format(
7676 nslcmop_id, step, exc
7677 )
7678 for task, task_name in tasks_dict_info.items():
7679 if not task.done() or task.cancelled() or task.exception():
7680 if task_name.startswith(self.task_name_deploy_vca):
7681 # A N2VC task is pending
7682 db_nsr_update["config-status"] = "failed"
7683 else:
7684 # RO task is pending
7685 db_nsr_update["operational-status"] = "failed"
7686 else:
7687 error_description_nslcmop = None
7688 nslcmop_operation_state = "COMPLETED"
7689 db_nslcmop_update["detailed-status"] = "Done"
7690 db_nsr_update["detailed-status"] = "Done"
7691 db_nsr_update["operational-status"] = "running"
7692 db_nsr_update["config-status"] = "configured"
7693
7694 self._write_op_status(
7695 op_id=nslcmop_id,
7696 stage="",
7697 error_message=error_description_nslcmop,
7698 operation_state=nslcmop_operation_state,
7699 other_update=db_nslcmop_update,
7700 )
7701 if db_nsr:
7702 self._write_ns_status(
7703 nsr_id=nsr_id,
7704 ns_state=None,
7705 current_operation="IDLE",
7706 current_operation_id=None,
7707 other_update=db_nsr_update,
7708 )
7709
7710 if nslcmop_operation_state:
7711 try:
7712 msg = {
7713 "nsr_id": nsr_id,
7714 "nslcmop_id": nslcmop_id,
7715 "operationState": nslcmop_operation_state,
7716 }
7717 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7718 except Exception as e:
7719 self.logger.error(
7720 logging_text + "kafka_write notification Exception {}".format(e)
7721 )
7722 self.logger.debug(logging_text + "Exit")
7723 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7724
7725 async def heal_RO(
7726 self,
7727 logging_text,
7728 nsr_id,
7729 db_nslcmop,
7730 stage,
7731 ):
7732 """
7733 Heal at RO
7734 :param logging_text: preffix text to use at logging
7735 :param nsr_id: nsr identity
7736 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7737 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7738 :return: None or exception
7739 """
7740 def get_vim_account(vim_account_id):
7741 nonlocal db_vims
7742 if vim_account_id in db_vims:
7743 return db_vims[vim_account_id]
7744 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7745 db_vims[vim_account_id] = db_vim
7746 return db_vim
7747
7748 try:
7749 start_heal = time()
7750 ns_params = db_nslcmop.get("operationParams")
7751 if ns_params and ns_params.get("timeout_ns_heal"):
7752 timeout_ns_heal = ns_params["timeout_ns_heal"]
7753 else:
7754 timeout_ns_heal = self.timeout.get(
7755 "ns_heal", self.timeout_ns_heal
7756 )
7757
7758 db_vims = {}
7759
7760 nslcmop_id = db_nslcmop["_id"]
7761 target = {
7762 "action_id": nslcmop_id,
7763 }
7764 self.logger.warning("db_nslcmop={} and timeout_ns_heal={}".format(db_nslcmop,timeout_ns_heal))
7765 target.update(db_nslcmop.get("operationParams", {}))
7766
7767 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7768 desc = await self.RO.recreate(nsr_id, target)
7769 self.logger.debug("RO return > {}".format(desc))
7770 action_id = desc["action_id"]
7771 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7772 await self._wait_ng_ro(
7773 nsr_id, action_id, nslcmop_id, start_heal, timeout_ns_heal, stage,
7774 operation="healing"
7775 )
7776
7777 # Updating NSR
7778 db_nsr_update = {
7779 "_admin.deployed.RO.operational-status": "running",
7780 "detailed-status": " ".join(stage),
7781 }
7782 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7783 self._write_op_status(nslcmop_id, stage)
7784 self.logger.debug(
7785 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7786 )
7787
7788 except Exception as e:
7789 stage[2] = "ERROR healing at VIM"
7790 #self.set_vnfr_at_error(db_vnfrs, str(e))
7791 self.logger.error(
7792 "Error healing at VIM {}".format(e),
7793 exc_info=not isinstance(
7794 e,
7795 (
7796 ROclient.ROClientException,
7797 LcmException,
7798 DbException,
7799 NgRoException,
7800 ),
7801 ),
7802 )
7803 raise
7804
7805 def _heal_n2vc(
7806 self,
7807 logging_text,
7808 db_nsr,
7809 db_vnfr,
7810 nslcmop_id,
7811 nsr_id,
7812 nsi_id,
7813 vnfd_id,
7814 vdu_id,
7815 kdu_name,
7816 member_vnf_index,
7817 vdu_index,
7818 vdu_name,
7819 deploy_params,
7820 descriptor_config,
7821 base_folder,
7822 task_instantiation_info,
7823 stage,
7824 ):
7825 # launch instantiate_N2VC in a asyncio task and register task object
7826 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7827 # if not found, create one entry and update database
7828 # fill db_nsr._admin.deployed.VCA.<index>
7829
7830 self.logger.debug(
7831 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7832 )
7833 if "execution-environment-list" in descriptor_config:
7834 ee_list = descriptor_config.get("execution-environment-list", [])
7835 elif "juju" in descriptor_config:
7836 ee_list = [descriptor_config] # ns charms
7837 else: # other types as script are not supported
7838 ee_list = []
7839
7840 for ee_item in ee_list:
7841 self.logger.debug(
7842 logging_text
7843 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7844 ee_item.get("juju"), ee_item.get("helm-chart")
7845 )
7846 )
7847 ee_descriptor_id = ee_item.get("id")
7848 if ee_item.get("juju"):
7849 vca_name = ee_item["juju"].get("charm")
7850 vca_type = (
7851 "lxc_proxy_charm"
7852 if ee_item["juju"].get("charm") is not None
7853 else "native_charm"
7854 )
7855 if ee_item["juju"].get("cloud") == "k8s":
7856 vca_type = "k8s_proxy_charm"
7857 elif ee_item["juju"].get("proxy") is False:
7858 vca_type = "native_charm"
7859 elif ee_item.get("helm-chart"):
7860 vca_name = ee_item["helm-chart"]
7861 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7862 vca_type = "helm"
7863 else:
7864 vca_type = "helm-v3"
7865 else:
7866 self.logger.debug(
7867 logging_text + "skipping non juju neither charm configuration"
7868 )
7869 continue
7870
7871 vca_index = -1
7872 for vca_index, vca_deployed in enumerate(
7873 db_nsr["_admin"]["deployed"]["VCA"]
7874 ):
7875 if not vca_deployed:
7876 continue
7877 if (
7878 vca_deployed.get("member-vnf-index") == member_vnf_index
7879 and vca_deployed.get("vdu_id") == vdu_id
7880 and vca_deployed.get("kdu_name") == kdu_name
7881 and vca_deployed.get("vdu_count_index", 0) == vdu_index
7882 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
7883 ):
7884 break
7885 else:
7886 # not found, create one.
7887 target = (
7888 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
7889 )
7890 if vdu_id:
7891 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
7892 elif kdu_name:
7893 target += "/kdu/{}".format(kdu_name)
7894 vca_deployed = {
7895 "target_element": target,
7896 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
7897 "member-vnf-index": member_vnf_index,
7898 "vdu_id": vdu_id,
7899 "kdu_name": kdu_name,
7900 "vdu_count_index": vdu_index,
7901 "operational-status": "init", # TODO revise
7902 "detailed-status": "", # TODO revise
7903 "step": "initial-deploy", # TODO revise
7904 "vnfd_id": vnfd_id,
7905 "vdu_name": vdu_name,
7906 "type": vca_type,
7907 "ee_descriptor_id": ee_descriptor_id,
7908 }
7909 vca_index += 1
7910
7911 # create VCA and configurationStatus in db
7912 db_dict = {
7913 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
7914 "configurationStatus.{}".format(vca_index): dict(),
7915 }
7916 self.update_db_2("nsrs", nsr_id, db_dict)
7917
7918 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
7919
7920 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
7921 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
7922 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
7923
7924 # Launch task
7925 task_n2vc = asyncio.ensure_future(
7926 self.heal_N2VC(
7927 logging_text=logging_text,
7928 vca_index=vca_index,
7929 nsi_id=nsi_id,
7930 db_nsr=db_nsr,
7931 db_vnfr=db_vnfr,
7932 vdu_id=vdu_id,
7933 kdu_name=kdu_name,
7934 vdu_index=vdu_index,
7935 deploy_params=deploy_params,
7936 config_descriptor=descriptor_config,
7937 base_folder=base_folder,
7938 nslcmop_id=nslcmop_id,
7939 stage=stage,
7940 vca_type=vca_type,
7941 vca_name=vca_name,
7942 ee_config_descriptor=ee_item,
7943 )
7944 )
7945 self.lcm_tasks.register(
7946 "ns",
7947 nsr_id,
7948 nslcmop_id,
7949 "instantiate_N2VC-{}".format(vca_index),
7950 task_n2vc,
7951 )
7952 task_instantiation_info[
7953 task_n2vc
7954 ] = self.task_name_deploy_vca + " {}.{}".format(
7955 member_vnf_index or "", vdu_id or ""
7956 )
7957
7958 async def heal_N2VC(
7959 self,
7960 logging_text,
7961 vca_index,
7962 nsi_id,
7963 db_nsr,
7964 db_vnfr,
7965 vdu_id,
7966 kdu_name,
7967 vdu_index,
7968 config_descriptor,
7969 deploy_params,
7970 base_folder,
7971 nslcmop_id,
7972 stage,
7973 vca_type,
7974 vca_name,
7975 ee_config_descriptor,
7976 ):
7977 nsr_id = db_nsr["_id"]
7978 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
7979 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
7980 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
7981 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
7982 db_dict = {
7983 "collection": "nsrs",
7984 "filter": {"_id": nsr_id},
7985 "path": db_update_entry,
7986 }
7987 step = ""
7988 try:
7989
7990 element_type = "NS"
7991 element_under_configuration = nsr_id
7992
7993 vnfr_id = None
7994 if db_vnfr:
7995 vnfr_id = db_vnfr["_id"]
7996 osm_config["osm"]["vnf_id"] = vnfr_id
7997
7998 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
7999
8000 if vca_type == "native_charm":
8001 index_number = 0
8002 else:
8003 index_number = vdu_index or 0
8004
8005 if vnfr_id:
8006 element_type = "VNF"
8007 element_under_configuration = vnfr_id
8008 namespace += ".{}-{}".format(vnfr_id, index_number)
8009 if vdu_id:
8010 namespace += ".{}-{}".format(vdu_id, index_number)
8011 element_type = "VDU"
8012 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8013 osm_config["osm"]["vdu_id"] = vdu_id
8014 elif kdu_name:
8015 namespace += ".{}".format(kdu_name)
8016 element_type = "KDU"
8017 element_under_configuration = kdu_name
8018 osm_config["osm"]["kdu_name"] = kdu_name
8019
8020 # Get artifact path
8021 if base_folder["pkg-dir"]:
8022 artifact_path = "{}/{}/{}/{}".format(
8023 base_folder["folder"],
8024 base_folder["pkg-dir"],
8025 "charms"
8026 if vca_type
8027 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8028 else "helm-charts",
8029 vca_name,
8030 )
8031 else:
8032 artifact_path = "{}/Scripts/{}/{}/".format(
8033 base_folder["folder"],
8034 "charms"
8035 if vca_type
8036 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8037 else "helm-charts",
8038 vca_name,
8039 )
8040
8041 self.logger.debug("Artifact path > {}".format(artifact_path))
8042
8043 # get initial_config_primitive_list that applies to this element
8044 initial_config_primitive_list = config_descriptor.get(
8045 "initial-config-primitive"
8046 )
8047
8048 self.logger.debug(
8049 "Initial config primitive list > {}".format(
8050 initial_config_primitive_list
8051 )
8052 )
8053
8054 # add config if not present for NS charm
8055 ee_descriptor_id = ee_config_descriptor.get("id")
8056 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8057 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8058 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8059 )
8060
8061 self.logger.debug(
8062 "Initial config primitive list #2 > {}".format(
8063 initial_config_primitive_list
8064 )
8065 )
8066 # n2vc_redesign STEP 3.1
8067 # find old ee_id if exists
8068 ee_id = vca_deployed.get("ee_id")
8069
8070 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8071 # create or register execution environment in VCA. Only for native charms when healing
8072 if vca_type == "native_charm":
8073 step = "Waiting to VM being up and getting IP address"
8074 self.logger.debug(logging_text + step)
8075 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8076 logging_text,
8077 nsr_id,
8078 vnfr_id,
8079 vdu_id,
8080 vdu_index,
8081 user=None,
8082 pub_key=None,
8083 )
8084 credentials = {"hostname": rw_mgmt_ip}
8085 # get username
8086 username = deep_get(
8087 config_descriptor, ("config-access", "ssh-access", "default-user")
8088 )
8089 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8090 # merged. Meanwhile let's get username from initial-config-primitive
8091 if not username and initial_config_primitive_list:
8092 for config_primitive in initial_config_primitive_list:
8093 for param in config_primitive.get("parameter", ()):
8094 if param["name"] == "ssh-username":
8095 username = param["value"]
8096 break
8097 if not username:
8098 raise LcmException(
8099 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8100 "'config-access.ssh-access.default-user'"
8101 )
8102 credentials["username"] = username
8103
8104 # n2vc_redesign STEP 3.2
8105 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8106 self._write_configuration_status(
8107 nsr_id=nsr_id,
8108 vca_index=vca_index,
8109 status="REGISTERING",
8110 element_under_configuration=element_under_configuration,
8111 element_type=element_type,
8112 )
8113
8114 step = "register execution environment {}".format(credentials)
8115 self.logger.debug(logging_text + step)
8116 ee_id = await self.vca_map[vca_type].register_execution_environment(
8117 credentials=credentials,
8118 namespace=namespace,
8119 db_dict=db_dict,
8120 vca_id=vca_id,
8121 )
8122
8123 # update ee_id en db
8124 db_dict_ee_id = {
8125 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8126 }
8127 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8128
8129 # for compatibility with MON/POL modules, the need model and application name at database
8130 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8131 # Not sure if this need to be done when healing
8132 """
8133 ee_id_parts = ee_id.split(".")
8134 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8135 if len(ee_id_parts) >= 2:
8136 model_name = ee_id_parts[0]
8137 application_name = ee_id_parts[1]
8138 db_nsr_update[db_update_entry + "model"] = model_name
8139 db_nsr_update[db_update_entry + "application"] = application_name
8140 """
8141
8142 # n2vc_redesign STEP 3.3
8143 # Install configuration software. Only for native charms.
8144 step = "Install configuration Software"
8145
8146 self._write_configuration_status(
8147 nsr_id=nsr_id,
8148 vca_index=vca_index,
8149 status="INSTALLING SW",
8150 element_under_configuration=element_under_configuration,
8151 element_type=element_type,
8152 #other_update=db_nsr_update,
8153 other_update=None,
8154 )
8155
8156 # TODO check if already done
8157 self.logger.debug(logging_text + step)
8158 config = None
8159 if vca_type == "native_charm":
8160 config_primitive = next(
8161 (p for p in initial_config_primitive_list if p["name"] == "config"),
8162 None,
8163 )
8164 if config_primitive:
8165 config = self._map_primitive_params(
8166 config_primitive, {}, deploy_params
8167 )
8168 await self.vca_map[vca_type].install_configuration_sw(
8169 ee_id=ee_id,
8170 artifact_path=artifact_path,
8171 db_dict=db_dict,
8172 config=config,
8173 num_units=1,
8174 vca_id=vca_id,
8175 vca_type=vca_type,
8176 )
8177
8178 # write in db flag of configuration_sw already installed
8179 self.update_db_2(
8180 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8181 )
8182
8183 # Not sure if this need to be done when healing
8184 """
8185 # add relations for this VCA (wait for other peers related with this VCA)
8186 await self._add_vca_relations(
8187 logging_text=logging_text,
8188 nsr_id=nsr_id,
8189 vca_type=vca_type,
8190 vca_index=vca_index,
8191 )
8192 """
8193
8194 # if SSH access is required, then get execution environment SSH public
8195 # if native charm we have waited already to VM be UP
8196 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8197 pub_key = None
8198 user = None
8199 # self.logger.debug("get ssh key block")
8200 if deep_get(
8201 config_descriptor, ("config-access", "ssh-access", "required")
8202 ):
8203 # self.logger.debug("ssh key needed")
8204 # Needed to inject a ssh key
8205 user = deep_get(
8206 config_descriptor,
8207 ("config-access", "ssh-access", "default-user"),
8208 )
8209 step = "Install configuration Software, getting public ssh key"
8210 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8211 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8212 )
8213
8214 step = "Insert public key into VM user={} ssh_key={}".format(
8215 user, pub_key
8216 )
8217 else:
8218 # self.logger.debug("no need to get ssh key")
8219 step = "Waiting to VM being up and getting IP address"
8220 self.logger.debug(logging_text + step)
8221
8222 # n2vc_redesign STEP 5.1
8223 # wait for RO (ip-address) Insert pub_key into VM
8224 # IMPORTANT: We need do wait for RO to complete healing operation.
8225 await self._wait_heal_ro(nsr_id,self.timeout_ns_heal)
8226 if vnfr_id:
8227 if kdu_name:
8228 rw_mgmt_ip = await self.wait_kdu_up(
8229 logging_text, nsr_id, vnfr_id, kdu_name
8230 )
8231 else:
8232 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8233 logging_text,
8234 nsr_id,
8235 vnfr_id,
8236 vdu_id,
8237 vdu_index,
8238 user=user,
8239 pub_key=pub_key,
8240 )
8241 else:
8242 rw_mgmt_ip = None # This is for a NS configuration
8243
8244 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8245
8246 # store rw_mgmt_ip in deploy params for later replacement
8247 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8248
8249 # Day1 operations.
8250 # get run-day1 operation parameter
8251 runDay1 = deploy_params.get("run-day1",False)
8252 self.logger.debug(" Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id,vdu_id,runDay1))
8253 if runDay1:
8254 # n2vc_redesign STEP 6 Execute initial config primitive
8255 step = "execute initial config primitive"
8256
8257 # wait for dependent primitives execution (NS -> VNF -> VDU)
8258 if initial_config_primitive_list:
8259 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
8260
8261 # stage, in function of element type: vdu, kdu, vnf or ns
8262 my_vca = vca_deployed_list[vca_index]
8263 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8264 # VDU or KDU
8265 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8266 elif my_vca.get("member-vnf-index"):
8267 # VNF
8268 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8269 else:
8270 # NS
8271 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8272
8273 self._write_configuration_status(
8274 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8275 )
8276
8277 self._write_op_status(op_id=nslcmop_id, stage=stage)
8278
8279 check_if_terminated_needed = True
8280 for initial_config_primitive in initial_config_primitive_list:
8281 # adding information on the vca_deployed if it is a NS execution environment
8282 if not vca_deployed["member-vnf-index"]:
8283 deploy_params["ns_config_info"] = json.dumps(
8284 self._get_ns_config_info(nsr_id)
8285 )
8286 # TODO check if already done
8287 primitive_params_ = self._map_primitive_params(
8288 initial_config_primitive, {}, deploy_params
8289 )
8290
8291 step = "execute primitive '{}' params '{}'".format(
8292 initial_config_primitive["name"], primitive_params_
8293 )
8294 self.logger.debug(logging_text + step)
8295 await self.vca_map[vca_type].exec_primitive(
8296 ee_id=ee_id,
8297 primitive_name=initial_config_primitive["name"],
8298 params_dict=primitive_params_,
8299 db_dict=db_dict,
8300 vca_id=vca_id,
8301 vca_type=vca_type,
8302 )
8303 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8304 if check_if_terminated_needed:
8305 if config_descriptor.get("terminate-config-primitive"):
8306 self.update_db_2(
8307 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
8308 )
8309 check_if_terminated_needed = False
8310
8311 # TODO register in database that primitive is done
8312
8313 # STEP 7 Configure metrics
8314 # Not sure if this need to be done when healing
8315 """
8316 if vca_type == "helm" or vca_type == "helm-v3":
8317 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8318 ee_id=ee_id,
8319 artifact_path=artifact_path,
8320 ee_config_descriptor=ee_config_descriptor,
8321 vnfr_id=vnfr_id,
8322 nsr_id=nsr_id,
8323 target_ip=rw_mgmt_ip,
8324 )
8325 if prometheus_jobs:
8326 self.update_db_2(
8327 "nsrs",
8328 nsr_id,
8329 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8330 )
8331
8332 for job in prometheus_jobs:
8333 self.db.set_one(
8334 "prometheus_jobs",
8335 {"job_name": job["job_name"]},
8336 job,
8337 upsert=True,
8338 fail_on_empty=False,
8339 )
8340
8341 """
8342 step = "instantiated at VCA"
8343 self.logger.debug(logging_text + step)
8344
8345 self._write_configuration_status(
8346 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8347 )
8348
8349 except Exception as e: # TODO not use Exception but N2VC exception
8350 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8351 if not isinstance(
8352 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8353 ):
8354 self.logger.error(
8355 "Exception while {} : {}".format(step, e), exc_info=True
8356 )
8357 self._write_configuration_status(
8358 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8359 )
8360 raise LcmException("{} {}".format(step, e)) from e
8361
8362 async def _wait_heal_ro(
8363 self,
8364 nsr_id,
8365 timeout=600,
8366 ):
8367 start_time = time()
8368 while time() <= start_time + timeout:
8369 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8370 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"]["operational-status"]
8371 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8372 if operational_status_ro != "healing":
8373 break
8374 await asyncio.sleep(15, loop=self.loop)
8375 else: # timeout_ns_deploy
8376 raise NgRoException("Timeout waiting ns to deploy")