blob: 6af20dd6b0cb50aade83c95af1fac3a66738e312 [file] [log] [blame]
tierno59d22d22018-09-25 18:10:19 +02001# -*- coding: utf-8 -*-
2
tierno2e215512018-11-28 09:37:52 +00003##
4# Copyright 2018 Telefonica S.A.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17##
18
tierno59d22d22018-09-25 18:10:19 +020019import asyncio
20import yaml
21import logging
22import logging.handlers
tierno59d22d22018-09-25 18:10:19 +020023import traceback
David Garciad4816682019-12-09 14:57:43 +010024import json
gcalvino35be9152018-12-20 09:33:12 +010025from jinja2 import Environment, Template, meta, TemplateError, TemplateNotFound, TemplateSyntaxError
tierno59d22d22018-09-25 18:10:19 +020026
tierno77677d92019-08-22 13:46:35 +000027from osm_lcm import ROclient
tierno69f0d382020-05-07 13:08:09 +000028from osm_lcm.ng_ro import NgRoClient, NgRoException
tierno744303e2020-01-13 16:46:31 +000029from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict
calvinosanch9f9c6f22019-11-04 13:37:39 +010030from n2vc.k8s_helm_conn import K8sHelmConnector
Adam Israelbaacc302019-12-01 12:41:39 -050031from n2vc.k8s_juju_conn import K8sJujuConnector
tierno59d22d22018-09-25 18:10:19 +020032
tierno27246d82018-09-27 15:59:09 +020033from osm_common.dbbase import DbException
tierno59d22d22018-09-25 18:10:19 +020034from osm_common.fsbase import FsException
quilesj7e13aeb2019-10-08 13:34:55 +020035
36from n2vc.n2vc_juju_conn import N2VCJujuConnector
tiernof59ad6c2020-04-08 12:50:52 +000037from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
tierno59d22d22018-09-25 18:10:19 +020038
tierno588547c2020-07-01 15:30:20 +000039from osm_lcm.lcm_helm_conn import LCMHelmConn
40
tierno27246d82018-09-27 15:59:09 +020041from copy import copy, deepcopy
tierno59d22d22018-09-25 18:10:19 +020042from http import HTTPStatus
43from time import time
tierno27246d82018-09-27 15:59:09 +020044from uuid import uuid4
tiernob9018152020-04-16 14:18:24 +000045from functools import partial
tierno89f82902020-07-03 14:52:28 +000046from random import randint
tierno59d22d22018-09-25 18:10:19 +020047
tierno69f0d382020-05-07 13:08:09 +000048__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
tierno59d22d22018-09-25 18:10:19 +020049
50
tierno588547c2020-07-01 15:30:20 +000051class N2VCJujuConnectorLCM(N2VCJujuConnector):
52
53 async def create_execution_environment(self, namespace: str, db_dict: dict, reuse_ee_id: str = None,
54 progress_timeout: float = None, total_timeout: float = None,
tierno89f82902020-07-03 14:52:28 +000055 config: dict = None, artifact_path: str = None,
56 vca_type: str = None) -> (str, dict):
tierno588547c2020-07-01 15:30:20 +000057 # admit two new parameters, artifact_path and vca_type
58 if vca_type == "k8s_proxy_charm":
59 ee_id = await self.n2vc.install_k8s_proxy_charm(
60 charm_name=artifact_path[artifact_path.rfind("/") + 1:],
61 namespace=namespace,
62 artifact_path=artifact_path,
63 db_dict=db_dict)
64 return ee_id, None
65 else:
66 return await super().create_execution_environment(
67 namespace=namespace, db_dict=db_dict, reuse_ee_id=reuse_ee_id,
68 progress_timeout=progress_timeout, total_timeout=total_timeout)
69
70 async def install_configuration_sw(self, ee_id: str, artifact_path: str, db_dict: dict,
71 progress_timeout: float = None, total_timeout: float = None,
72 config: dict = None, num_units: int = 1, vca_type: str = "lxc_proxy_charm"):
73 if vca_type == "k8s_proxy_charm":
74 return
75 return await super().install_configuration_sw(
76 ee_id=ee_id, artifact_path=artifact_path, db_dict=db_dict, progress_timeout=progress_timeout,
77 total_timeout=total_timeout, config=config, num_units=num_units)
78
79
tierno59d22d22018-09-25 18:10:19 +020080class NsLcm(LcmBase):
tierno63de62e2018-10-31 16:38:52 +010081 timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed
tierno744303e2020-01-13 16:46:31 +000082 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
tiernoe876f672020-02-13 14:34:48 +000083 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
garciadeblasf9b04952019-04-09 18:53:58 +020084 timeout_charm_delete = 10 * 60
David Garciaf6919842020-05-21 16:41:07 +020085 timeout_primitive = 30 * 60 # timeout for primitive execution
86 timeout_progress_primitive = 10 * 60 # timeout for some progress in a primitive execution
tierno59d22d22018-09-25 18:10:19 +020087
kuuseac3a8882019-10-03 10:48:06 +020088 SUBOPERATION_STATUS_NOT_FOUND = -1
89 SUBOPERATION_STATUS_NEW = -2
90 SUBOPERATION_STATUS_SKIP = -3
tiernoa2143262020-03-27 16:20:40 +000091 task_name_deploy_vca = "Deploying VCA"
kuuseac3a8882019-10-03 10:48:06 +020092
tierno89f82902020-07-03 14:52:28 +000093 def __init__(self, db, msg, fs, lcm_tasks, config, loop, prometheus=None):
tierno59d22d22018-09-25 18:10:19 +020094 """
95 Init, Connect to database, filesystem storage, and messaging
96 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
97 :return: None
98 """
quilesj7e13aeb2019-10-08 13:34:55 +020099 super().__init__(
100 db=db,
101 msg=msg,
102 fs=fs,
103 logger=logging.getLogger('lcm.ns')
104 )
105
tierno59d22d22018-09-25 18:10:19 +0200106 self.loop = loop
107 self.lcm_tasks = lcm_tasks
tierno744303e2020-01-13 16:46:31 +0000108 self.timeout = config["timeout"]
109 self.ro_config = config["ro_config"]
tierno69f0d382020-05-07 13:08:09 +0000110 self.ng_ro = config["ro_config"].get("ng")
tierno744303e2020-01-13 16:46:31 +0000111 self.vca_config = config["VCA"].copy()
tierno59d22d22018-09-25 18:10:19 +0200112
quilesj7e13aeb2019-10-08 13:34:55 +0200113 # create N2VC connector
tierno588547c2020-07-01 15:30:20 +0000114 self.n2vc = N2VCJujuConnectorLCM(
quilesj7e13aeb2019-10-08 13:34:55 +0200115 db=self.db,
116 fs=self.fs,
tierno59d22d22018-09-25 18:10:19 +0200117 log=self.logger,
quilesj7e13aeb2019-10-08 13:34:55 +0200118 loop=self.loop,
119 url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
120 username=self.vca_config.get('user', None),
121 vca_config=self.vca_config,
quilesj3655ae02019-12-12 16:08:35 +0000122 on_update_db=self._on_update_n2vc_db
tierno59d22d22018-09-25 18:10:19 +0200123 )
quilesj7e13aeb2019-10-08 13:34:55 +0200124
tierno588547c2020-07-01 15:30:20 +0000125 self.conn_helm_ee = LCMHelmConn(
126 db=self.db,
127 fs=self.fs,
128 log=self.logger,
129 loop=self.loop,
130 url=None,
131 username=None,
132 vca_config=self.vca_config,
133 on_update_db=self._on_update_n2vc_db
134 )
135
calvinosanch9f9c6f22019-11-04 13:37:39 +0100136 self.k8sclusterhelm = K8sHelmConnector(
137 kubectl_command=self.vca_config.get("kubectlpath"),
138 helm_command=self.vca_config.get("helmpath"),
139 fs=self.fs,
140 log=self.logger,
141 db=self.db,
142 on_update_db=None,
143 )
144
Adam Israelbaacc302019-12-01 12:41:39 -0500145 self.k8sclusterjuju = K8sJujuConnector(
146 kubectl_command=self.vca_config.get("kubectlpath"),
147 juju_command=self.vca_config.get("jujupath"),
148 fs=self.fs,
149 log=self.logger,
150 db=self.db,
151 on_update_db=None,
152 )
153
tiernoa2143262020-03-27 16:20:40 +0000154 self.k8scluster_map = {
155 "helm-chart": self.k8sclusterhelm,
156 "chart": self.k8sclusterhelm,
157 "juju-bundle": self.k8sclusterjuju,
158 "juju": self.k8sclusterjuju,
159 }
tierno588547c2020-07-01 15:30:20 +0000160
161 self.vca_map = {
162 "lxc_proxy_charm": self.n2vc,
163 "native_charm": self.n2vc,
164 "k8s_proxy_charm": self.n2vc,
165 "helm": self.conn_helm_ee
166 }
167
tierno89f82902020-07-03 14:52:28 +0000168 self.prometheus = prometheus
169
quilesj7e13aeb2019-10-08 13:34:55 +0200170 # create RO client
tierno69f0d382020-05-07 13:08:09 +0000171 if self.ng_ro:
172 self.RO = NgRoClient(self.loop, **self.ro_config)
173 else:
174 self.RO = ROclient.ROClient(self.loop, **self.ro_config)
tierno59d22d22018-09-25 18:10:19 +0200175
quilesj3655ae02019-12-12 16:08:35 +0000176 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
quilesj7e13aeb2019-10-08 13:34:55 +0200177
quilesj3655ae02019-12-12 16:08:35 +0000178 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
179
180 try:
181 # TODO filter RO descriptor fields...
182
183 # write to database
184 db_dict = dict()
185 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
186 db_dict['deploymentStatus'] = ro_descriptor
187 self.update_db_2("nsrs", nsrs_id, db_dict)
188
189 except Exception as e:
190 self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e))
191
192 async def _on_update_n2vc_db(self, table, filter, path, updated_data):
193
quilesj69a722c2020-01-09 08:30:17 +0000194 # remove last dot from path (if exists)
195 if path.endswith('.'):
196 path = path[:-1]
197
quilesj3655ae02019-12-12 16:08:35 +0000198 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
199 # .format(table, filter, path, updated_data))
200
201 try:
202
203 nsr_id = filter.get('_id')
204
205 # read ns record from database
206 nsr = self.db.get_one(table='nsrs', q_filter=filter)
207 current_ns_status = nsr.get('nsState')
208
209 # get vca status for NS
quilesj69a722c2020-01-09 08:30:17 +0000210 status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False)
quilesj3655ae02019-12-12 16:08:35 +0000211
212 # vcaStatus
213 db_dict = dict()
214 db_dict['vcaStatus'] = status_dict
215
216 # update configurationStatus for this VCA
217 try:
218 vca_index = int(path[path.rfind(".")+1:])
219
220 vca_list = deep_get(target_dict=nsr, key_list=('_admin', 'deployed', 'VCA'))
221 vca_status = vca_list[vca_index].get('status')
222
223 configuration_status_list = nsr.get('configurationStatus')
224 config_status = configuration_status_list[vca_index].get('status')
225
226 if config_status == 'BROKEN' and vca_status != 'failed':
227 db_dict['configurationStatus'][vca_index] = 'READY'
228 elif config_status != 'BROKEN' and vca_status == 'failed':
229 db_dict['configurationStatus'][vca_index] = 'BROKEN'
230 except Exception as e:
231 # not update configurationStatus
232 self.logger.debug('Error updating vca_index (ignore): {}'.format(e))
233
234 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
235 # if nsState = 'DEGRADED' check if all is OK
236 is_degraded = False
237 if current_ns_status in ('READY', 'DEGRADED'):
238 error_description = ''
239 # check machines
240 if status_dict.get('machines'):
241 for machine_id in status_dict.get('machines'):
242 machine = status_dict.get('machines').get(machine_id)
243 # check machine agent-status
244 if machine.get('agent-status'):
245 s = machine.get('agent-status').get('status')
246 if s != 'started':
247 is_degraded = True
248 error_description += 'machine {} agent-status={} ; '.format(machine_id, s)
249 # check machine instance status
250 if machine.get('instance-status'):
251 s = machine.get('instance-status').get('status')
252 if s != 'running':
253 is_degraded = True
254 error_description += 'machine {} instance-status={} ; '.format(machine_id, s)
255 # check applications
256 if status_dict.get('applications'):
257 for app_id in status_dict.get('applications'):
258 app = status_dict.get('applications').get(app_id)
259 # check application status
260 if app.get('status'):
261 s = app.get('status').get('status')
262 if s != 'active':
263 is_degraded = True
264 error_description += 'application {} status={} ; '.format(app_id, s)
265
266 if error_description:
267 db_dict['errorDescription'] = error_description
268 if current_ns_status == 'READY' and is_degraded:
269 db_dict['nsState'] = 'DEGRADED'
270 if current_ns_status == 'DEGRADED' and not is_degraded:
271 db_dict['nsState'] = 'READY'
272
273 # write to database
274 self.update_db_2("nsrs", nsr_id, db_dict)
275
tierno51183952020-04-03 15:48:18 +0000276 except (asyncio.CancelledError, asyncio.TimeoutError):
277 raise
quilesj3655ae02019-12-12 16:08:35 +0000278 except Exception as e:
279 self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +0200280
gcalvino35be9152018-12-20 09:33:12 +0100281 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
tierno59d22d22018-09-25 18:10:19 +0200282 """
283 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
284 :param vnfd: input vnfd
285 :param new_id: overrides vnf id if provided
tierno8a518872018-12-21 13:42:14 +0000286 :param additionalParams: Instantiation params for VNFs provided
gcalvino35be9152018-12-20 09:33:12 +0100287 :param nsrId: Id of the NSR
tierno59d22d22018-09-25 18:10:19 +0200288 :return: copy of vnfd
289 """
tierno59d22d22018-09-25 18:10:19 +0200290 try:
291 vnfd_RO = deepcopy(vnfd)
tierno8a518872018-12-21 13:42:14 +0000292 # remove unused by RO configuration, monitoring, scaling and internal keys
tierno59d22d22018-09-25 18:10:19 +0200293 vnfd_RO.pop("_id", None)
294 vnfd_RO.pop("_admin", None)
tierno8a518872018-12-21 13:42:14 +0000295 vnfd_RO.pop("vnf-configuration", None)
296 vnfd_RO.pop("monitoring-param", None)
297 vnfd_RO.pop("scaling-group-descriptor", None)
calvinosanch9f9c6f22019-11-04 13:37:39 +0100298 vnfd_RO.pop("kdu", None)
299 vnfd_RO.pop("k8s-cluster", None)
tierno59d22d22018-09-25 18:10:19 +0200300 if new_id:
301 vnfd_RO["id"] = new_id
tierno8a518872018-12-21 13:42:14 +0000302
303 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
304 for vdu in get_iterable(vnfd_RO, "vdu"):
305 cloud_init_file = None
306 if vdu.get("cloud-init-file"):
tierno59d22d22018-09-25 18:10:19 +0200307 base_folder = vnfd["_admin"]["storage"]
gcalvino35be9152018-12-20 09:33:12 +0100308 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
309 vdu["cloud-init-file"])
310 with self.fs.file_open(cloud_init_file, "r") as ci_file:
311 cloud_init_content = ci_file.read()
tierno59d22d22018-09-25 18:10:19 +0200312 vdu.pop("cloud-init-file", None)
tierno8a518872018-12-21 13:42:14 +0000313 elif vdu.get("cloud-init"):
gcalvino35be9152018-12-20 09:33:12 +0100314 cloud_init_content = vdu["cloud-init"]
tierno8a518872018-12-21 13:42:14 +0000315 else:
316 continue
317
318 env = Environment()
319 ast = env.parse(cloud_init_content)
320 mandatory_vars = meta.find_undeclared_variables(ast)
321 if mandatory_vars:
322 for var in mandatory_vars:
323 if not additionalParams or var not in additionalParams.keys():
324 raise LcmException("Variable '{}' defined at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
325 "file, must be provided in the instantiation parameters inside the "
326 "'additionalParamsForVnf' block".format(var, vnfd["id"], vdu["id"]))
327 template = Template(cloud_init_content)
tierno2b611dd2019-01-11 10:30:57 +0000328 cloud_init_content = template.render(additionalParams or {})
gcalvino35be9152018-12-20 09:33:12 +0100329 vdu["cloud-init"] = cloud_init_content
tierno8a518872018-12-21 13:42:14 +0000330
tierno59d22d22018-09-25 18:10:19 +0200331 return vnfd_RO
332 except FsException as e:
tierno8a518872018-12-21 13:42:14 +0000333 raise LcmException("Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".
tiernoda964822019-01-14 15:53:47 +0000334 format(vnfd["id"], vdu["id"], cloud_init_file, e))
tierno8a518872018-12-21 13:42:14 +0000335 except (TemplateError, TemplateNotFound, TemplateSyntaxError) as e:
336 raise LcmException("Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".
337 format(vnfd["id"], vdu["id"], e))
tierno59d22d22018-09-25 18:10:19 +0200338
tiernoe95ed362020-04-23 08:24:57 +0000339 def _ns_params_2_RO(self, ns_params, nsd, vnfd_dict, db_vnfrs, n2vc_key_list):
tierno59d22d22018-09-25 18:10:19 +0200340 """
tierno27246d82018-09-27 15:59:09 +0200341 Creates a RO ns descriptor from OSM ns_instantiate params
tierno59d22d22018-09-25 18:10:19 +0200342 :param ns_params: OSM instantiate params
tiernoe95ed362020-04-23 08:24:57 +0000343 :param vnfd_dict: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
344 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index. {member-vnf-index: {vnfr_object}, ...}
tierno59d22d22018-09-25 18:10:19 +0200345 :return: The RO ns descriptor
346 """
347 vim_2_RO = {}
tiernob7f3f0d2019-03-20 17:17:21 +0000348 wim_2_RO = {}
tierno27246d82018-09-27 15:59:09 +0200349 # TODO feature 1417: Check that no instantiation is set over PDU
350 # check if PDU forces a concrete vim-network-id and add it
351 # check if PDU contains a SDN-assist info (dpid, switch, port) and pass it to RO
tierno59d22d22018-09-25 18:10:19 +0200352
353 def vim_account_2_RO(vim_account):
354 if vim_account in vim_2_RO:
355 return vim_2_RO[vim_account]
356
357 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
358 if db_vim["_admin"]["operationalState"] != "ENABLED":
359 raise LcmException("VIM={} is not available. operationalState={}".format(
360 vim_account, db_vim["_admin"]["operationalState"]))
361 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
362 vim_2_RO[vim_account] = RO_vim_id
363 return RO_vim_id
364
tiernob7f3f0d2019-03-20 17:17:21 +0000365 def wim_account_2_RO(wim_account):
366 if isinstance(wim_account, str):
367 if wim_account in wim_2_RO:
368 return wim_2_RO[wim_account]
369
370 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
371 if db_wim["_admin"]["operationalState"] != "ENABLED":
372 raise LcmException("WIM={} is not available. operationalState={}".format(
373 wim_account, db_wim["_admin"]["operationalState"]))
374 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
375 wim_2_RO[wim_account] = RO_wim_id
376 return RO_wim_id
377 else:
378 return wim_account
379
tierno59d22d22018-09-25 18:10:19 +0200380 def ip_profile_2_RO(ip_profile):
381 RO_ip_profile = deepcopy((ip_profile))
382 if "dns-server" in RO_ip_profile:
383 if isinstance(RO_ip_profile["dns-server"], list):
384 RO_ip_profile["dns-address"] = []
385 for ds in RO_ip_profile.pop("dns-server"):
386 RO_ip_profile["dns-address"].append(ds['address'])
387 else:
388 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
389 if RO_ip_profile.get("ip-version") == "ipv4":
390 RO_ip_profile["ip-version"] = "IPv4"
391 if RO_ip_profile.get("ip-version") == "ipv6":
392 RO_ip_profile["ip-version"] = "IPv6"
393 if "dhcp-params" in RO_ip_profile:
394 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
395 return RO_ip_profile
396
397 if not ns_params:
398 return None
399 RO_ns_params = {
400 # "name": ns_params["nsName"],
401 # "description": ns_params.get("nsDescription"),
402 "datacenter": vim_account_2_RO(ns_params["vimAccountId"]),
tiernob7f3f0d2019-03-20 17:17:21 +0000403 "wim_account": wim_account_2_RO(ns_params.get("wimAccountId")),
tierno59d22d22018-09-25 18:10:19 +0200404 # "scenario": ns_params["nsdId"],
tierno59d22d22018-09-25 18:10:19 +0200405 }
tiernoe95ed362020-04-23 08:24:57 +0000406 # set vim_account of each vnf if different from general vim_account.
407 # Get this information from <vnfr> database content, key vim-account-id
408 # Vim account can be set by placement_engine and it may be different from
409 # the instantiate parameters (vnfs.member-vnf-index.datacenter).
410 for vnf_index, vnfr in db_vnfrs.items():
411 if vnfr.get("vim-account-id") and vnfr["vim-account-id"] != ns_params["vimAccountId"]:
412 populate_dict(RO_ns_params, ("vnfs", vnf_index, "datacenter"), vim_account_2_RO(vnfr["vim-account-id"]))
quilesj7e13aeb2019-10-08 13:34:55 +0200413
tiernoe64f7fb2019-09-11 08:55:52 +0000414 n2vc_key_list = n2vc_key_list or []
415 for vnfd_ref, vnfd in vnfd_dict.items():
416 vdu_needed_access = []
417 mgmt_cp = None
418 if vnfd.get("vnf-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000419 ssh_required = deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000420 if ssh_required and vnfd.get("mgmt-interface"):
421 if vnfd["mgmt-interface"].get("vdu-id"):
422 vdu_needed_access.append(vnfd["mgmt-interface"]["vdu-id"])
423 elif vnfd["mgmt-interface"].get("cp"):
424 mgmt_cp = vnfd["mgmt-interface"]["cp"]
tierno27246d82018-09-27 15:59:09 +0200425
tiernoe64f7fb2019-09-11 08:55:52 +0000426 for vdu in vnfd.get("vdu", ()):
427 if vdu.get("vdu-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000428 ssh_required = deep_get(vdu, ("vdu-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000429 if ssh_required:
tierno27246d82018-09-27 15:59:09 +0200430 vdu_needed_access.append(vdu["id"])
tiernoe64f7fb2019-09-11 08:55:52 +0000431 elif mgmt_cp:
432 for vdu_interface in vdu.get("interface"):
433 if vdu_interface.get("external-connection-point-ref") and \
434 vdu_interface["external-connection-point-ref"] == mgmt_cp:
435 vdu_needed_access.append(vdu["id"])
436 mgmt_cp = None
437 break
tierno27246d82018-09-27 15:59:09 +0200438
tiernoe64f7fb2019-09-11 08:55:52 +0000439 if vdu_needed_access:
440 for vnf_member in nsd.get("constituent-vnfd"):
441 if vnf_member["vnfd-id-ref"] != vnfd_ref:
442 continue
443 for vdu in vdu_needed_access:
444 populate_dict(RO_ns_params,
445 ("vnfs", vnf_member["member-vnf-index"], "vdus", vdu, "mgmt_keys"),
446 n2vc_key_list)
tierno27246d82018-09-27 15:59:09 +0200447
tierno25ec7732018-10-24 18:47:11 +0200448 if ns_params.get("vduImage"):
449 RO_ns_params["vduImage"] = ns_params["vduImage"]
450
tiernoc255a822018-10-31 09:41:53 +0100451 if ns_params.get("ssh_keys"):
452 RO_ns_params["cloud-config"] = {"key-pairs": ns_params["ssh_keys"]}
tierno27246d82018-09-27 15:59:09 +0200453 for vnf_params in get_iterable(ns_params, "vnf"):
454 for constituent_vnfd in nsd["constituent-vnfd"]:
455 if constituent_vnfd["member-vnf-index"] == vnf_params["member-vnf-index"]:
456 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
457 break
458 else:
459 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index={} is not present at nsd:"
460 "constituent-vnfd".format(vnf_params["member-vnf-index"]))
tierno59d22d22018-09-25 18:10:19 +0200461
tierno27246d82018-09-27 15:59:09 +0200462 for vdu_params in get_iterable(vnf_params, "vdu"):
463 # TODO feature 1417: check that this VDU exist and it is not a PDU
464 if vdu_params.get("volume"):
465 for volume_params in vdu_params["volume"]:
466 if volume_params.get("vim-volume-id"):
467 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
468 vdu_params["id"], "devices", volume_params["name"], "vim_id"),
469 volume_params["vim-volume-id"])
470 if vdu_params.get("interface"):
471 for interface_params in vdu_params["interface"]:
472 if interface_params.get("ip-address"):
473 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
474 vdu_params["id"], "interfaces", interface_params["name"],
475 "ip_address"),
476 interface_params["ip-address"])
477 if interface_params.get("mac-address"):
478 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
479 vdu_params["id"], "interfaces", interface_params["name"],
480 "mac_address"),
481 interface_params["mac-address"])
482 if interface_params.get("floating-ip-required"):
483 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
484 vdu_params["id"], "interfaces", interface_params["name"],
485 "floating-ip"),
486 interface_params["floating-ip-required"])
487
488 for internal_vld_params in get_iterable(vnf_params, "internal-vld"):
489 if internal_vld_params.get("vim-network-name"):
490 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
491 internal_vld_params["name"], "vim-network-name"),
492 internal_vld_params["vim-network-name"])
gcalvino0d7ac8d2018-12-17 16:24:08 +0100493 if internal_vld_params.get("vim-network-id"):
494 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
495 internal_vld_params["name"], "vim-network-id"),
496 internal_vld_params["vim-network-id"])
tierno27246d82018-09-27 15:59:09 +0200497 if internal_vld_params.get("ip-profile"):
498 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
499 internal_vld_params["name"], "ip-profile"),
500 ip_profile_2_RO(internal_vld_params["ip-profile"]))
kbsub4d761eb2019-10-17 16:28:48 +0000501 if internal_vld_params.get("provider-network"):
502
503 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
504 internal_vld_params["name"], "provider-network"),
505 internal_vld_params["provider-network"].copy())
tierno27246d82018-09-27 15:59:09 +0200506
507 for icp_params in get_iterable(internal_vld_params, "internal-connection-point"):
508 # look for interface
509 iface_found = False
510 for vdu_descriptor in vnf_descriptor["vdu"]:
511 for vdu_interface in vdu_descriptor["interface"]:
512 if vdu_interface.get("internal-connection-point-ref") == icp_params["id-ref"]:
513 if icp_params.get("ip-address"):
514 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
515 vdu_descriptor["id"], "interfaces",
516 vdu_interface["name"], "ip_address"),
517 icp_params["ip-address"])
518
519 if icp_params.get("mac-address"):
520 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
521 vdu_descriptor["id"], "interfaces",
522 vdu_interface["name"], "mac_address"),
523 icp_params["mac-address"])
524 iface_found = True
tierno59d22d22018-09-25 18:10:19 +0200525 break
tierno27246d82018-09-27 15:59:09 +0200526 if iface_found:
527 break
528 else:
529 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index[{}]:"
530 "internal-vld:id-ref={} is not present at vnfd:internal-"
531 "connection-point".format(vnf_params["member-vnf-index"],
532 icp_params["id-ref"]))
533
534 for vld_params in get_iterable(ns_params, "vld"):
535 if "ip-profile" in vld_params:
536 populate_dict(RO_ns_params, ("networks", vld_params["name"], "ip-profile"),
537 ip_profile_2_RO(vld_params["ip-profile"]))
tiernob7f3f0d2019-03-20 17:17:21 +0000538
kbsub4d761eb2019-10-17 16:28:48 +0000539 if vld_params.get("provider-network"):
540
541 populate_dict(RO_ns_params, ("networks", vld_params["name"], "provider-network"),
542 vld_params["provider-network"].copy())
543
tiernob7f3f0d2019-03-20 17:17:21 +0000544 if "wimAccountId" in vld_params and vld_params["wimAccountId"] is not None:
545 populate_dict(RO_ns_params, ("networks", vld_params["name"], "wim_account"),
546 wim_account_2_RO(vld_params["wimAccountId"])),
tierno27246d82018-09-27 15:59:09 +0200547 if vld_params.get("vim-network-name"):
548 RO_vld_sites = []
549 if isinstance(vld_params["vim-network-name"], dict):
550 for vim_account, vim_net in vld_params["vim-network-name"].items():
551 RO_vld_sites.append({
552 "netmap-use": vim_net,
553 "datacenter": vim_account_2_RO(vim_account)
554 })
555 else: # isinstance str
556 RO_vld_sites.append({"netmap-use": vld_params["vim-network-name"]})
557 if RO_vld_sites:
558 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
kbsub4d761eb2019-10-17 16:28:48 +0000559
gcalvino0d7ac8d2018-12-17 16:24:08 +0100560 if vld_params.get("vim-network-id"):
561 RO_vld_sites = []
562 if isinstance(vld_params["vim-network-id"], dict):
563 for vim_account, vim_net in vld_params["vim-network-id"].items():
564 RO_vld_sites.append({
565 "netmap-use": vim_net,
566 "datacenter": vim_account_2_RO(vim_account)
567 })
568 else: # isinstance str
569 RO_vld_sites.append({"netmap-use": vld_params["vim-network-id"]})
570 if RO_vld_sites:
571 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
Felipe Vicens720b07a2019-01-31 02:32:09 +0100572 if vld_params.get("ns-net"):
573 if isinstance(vld_params["ns-net"], dict):
574 for vld_id, instance_scenario_id in vld_params["ns-net"].items():
575 RO_vld_ns_net = {"instance_scenario_id": instance_scenario_id, "osm_id": vld_id}
Felipe Vicensb0e5fe42019-12-05 10:30:38 +0100576 populate_dict(RO_ns_params, ("networks", vld_params["name"], "use-network"), RO_vld_ns_net)
tierno27246d82018-09-27 15:59:09 +0200577 if "vnfd-connection-point-ref" in vld_params:
578 for cp_params in vld_params["vnfd-connection-point-ref"]:
579 # look for interface
580 for constituent_vnfd in nsd["constituent-vnfd"]:
581 if constituent_vnfd["member-vnf-index"] == cp_params["member-vnf-index-ref"]:
582 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
583 break
584 else:
585 raise LcmException(
586 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={} "
587 "is not present at nsd:constituent-vnfd".format(cp_params["member-vnf-index-ref"]))
588 match_cp = False
589 for vdu_descriptor in vnf_descriptor["vdu"]:
590 for interface_descriptor in vdu_descriptor["interface"]:
591 if interface_descriptor.get("external-connection-point-ref") == \
592 cp_params["vnfd-connection-point-ref"]:
593 match_cp = True
tierno59d22d22018-09-25 18:10:19 +0200594 break
tierno27246d82018-09-27 15:59:09 +0200595 if match_cp:
596 break
597 else:
598 raise LcmException(
599 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={}:"
600 "vnfd-connection-point-ref={} is not present at vnfd={}".format(
601 cp_params["member-vnf-index-ref"],
602 cp_params["vnfd-connection-point-ref"],
603 vnf_descriptor["id"]))
604 if cp_params.get("ip-address"):
605 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
606 vdu_descriptor["id"], "interfaces",
607 interface_descriptor["name"], "ip_address"),
608 cp_params["ip-address"])
609 if cp_params.get("mac-address"):
610 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
611 vdu_descriptor["id"], "interfaces",
612 interface_descriptor["name"], "mac_address"),
613 cp_params["mac-address"])
tierno59d22d22018-09-25 18:10:19 +0200614 return RO_ns_params
615
tierno27246d82018-09-27 15:59:09 +0200616 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None):
617 # make a copy to do not change
618 vdu_create = copy(vdu_create)
619 vdu_delete = copy(vdu_delete)
620
621 vdurs = db_vnfr.get("vdur")
622 if vdurs is None:
623 vdurs = []
624 vdu_index = len(vdurs)
625 while vdu_index:
626 vdu_index -= 1
627 vdur = vdurs[vdu_index]
628 if vdur.get("pdu-type"):
629 continue
630 vdu_id_ref = vdur["vdu-id-ref"]
631 if vdu_create and vdu_create.get(vdu_id_ref):
632 for index in range(0, vdu_create[vdu_id_ref]):
633 vdur = deepcopy(vdur)
634 vdur["_id"] = str(uuid4())
635 vdur["count-index"] += 1
636 vdurs.insert(vdu_index+1+index, vdur)
637 del vdu_create[vdu_id_ref]
638 if vdu_delete and vdu_delete.get(vdu_id_ref):
639 del vdurs[vdu_index]
640 vdu_delete[vdu_id_ref] -= 1
641 if not vdu_delete[vdu_id_ref]:
642 del vdu_delete[vdu_id_ref]
643 # check all operations are done
644 if vdu_create or vdu_delete:
645 raise LcmException("Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
646 vdu_create))
647 if vdu_delete:
648 raise LcmException("Error scaling IN VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
649 vdu_delete))
650
651 vnfr_update = {"vdur": vdurs}
652 db_vnfr["vdur"] = vdurs
653 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
654
tiernof578e552018-11-08 19:07:20 +0100655 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
656 """
657 Updates database nsr with the RO info for the created vld
658 :param ns_update_nsr: dictionary to be filled with the updated info
659 :param db_nsr: content of db_nsr. This is also modified
660 :param nsr_desc_RO: nsr descriptor from RO
661 :return: Nothing, LcmException is raised on errors
662 """
663
664 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
665 for net_RO in get_iterable(nsr_desc_RO, "nets"):
666 if vld["id"] != net_RO.get("ns_net_osm_id"):
667 continue
668 vld["vim-id"] = net_RO.get("vim_net_id")
669 vld["name"] = net_RO.get("vim_name")
670 vld["status"] = net_RO.get("status")
671 vld["status-detailed"] = net_RO.get("error_msg")
672 ns_update_nsr["vld.{}".format(vld_index)] = vld
673 break
674 else:
675 raise LcmException("ns_update_nsr: Not found vld={} at RO info".format(vld["id"]))
676
tiernoe876f672020-02-13 14:34:48 +0000677 def set_vnfr_at_error(self, db_vnfrs, error_text):
678 try:
679 for db_vnfr in db_vnfrs.values():
680 vnfr_update = {"status": "ERROR"}
681 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
682 if "status" not in vdur:
683 vdur["status"] = "ERROR"
684 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
685 if error_text:
686 vdur["status-detailed"] = str(error_text)
687 vnfr_update["vdur.{}.status-detailed".format(vdu_index)] = "ERROR"
688 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
689 except DbException as e:
690 self.logger.error("Cannot update vnf. {}".format(e))
691
tierno59d22d22018-09-25 18:10:19 +0200692 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
693 """
694 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
tierno27246d82018-09-27 15:59:09 +0200695 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
696 :param nsr_desc_RO: nsr descriptor from RO
697 :return: Nothing, LcmException is raised on errors
tierno59d22d22018-09-25 18:10:19 +0200698 """
699 for vnf_index, db_vnfr in db_vnfrs.items():
700 for vnf_RO in nsr_desc_RO["vnfs"]:
tierno27246d82018-09-27 15:59:09 +0200701 if vnf_RO["member_vnf_index"] != vnf_index:
702 continue
703 vnfr_update = {}
tiernof578e552018-11-08 19:07:20 +0100704 if vnf_RO.get("ip_address"):
tierno1674de82019-04-09 13:03:14 +0000705 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"].split(";")[0]
tiernof578e552018-11-08 19:07:20 +0100706 elif not db_vnfr.get("ip-address"):
tierno0ec0c272020-02-19 17:43:01 +0000707 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
708 raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200709
tierno27246d82018-09-27 15:59:09 +0200710 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
711 vdur_RO_count_index = 0
712 if vdur.get("pdu-type"):
713 continue
714 for vdur_RO in get_iterable(vnf_RO, "vms"):
715 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
716 continue
717 if vdur["count-index"] != vdur_RO_count_index:
718 vdur_RO_count_index += 1
719 continue
720 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
tierno1674de82019-04-09 13:03:14 +0000721 if vdur_RO.get("ip_address"):
722 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
tierno274ed572019-04-04 13:33:27 +0000723 else:
724 vdur["ip-address"] = None
tierno27246d82018-09-27 15:59:09 +0200725 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
726 vdur["name"] = vdur_RO.get("vim_name")
727 vdur["status"] = vdur_RO.get("status")
728 vdur["status-detailed"] = vdur_RO.get("error_msg")
729 for ifacer in get_iterable(vdur, "interfaces"):
730 for interface_RO in get_iterable(vdur_RO, "interfaces"):
731 if ifacer["name"] == interface_RO.get("internal_name"):
732 ifacer["ip-address"] = interface_RO.get("ip_address")
733 ifacer["mac-address"] = interface_RO.get("mac_address")
734 break
735 else:
736 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
quilesj7e13aeb2019-10-08 13:34:55 +0200737 "from VIM info"
738 .format(vnf_index, vdur["vdu-id-ref"], ifacer["name"]))
tierno27246d82018-09-27 15:59:09 +0200739 vnfr_update["vdur.{}".format(vdu_index)] = vdur
740 break
741 else:
tierno15b1cf12019-08-29 13:21:40 +0000742 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
743 "VIM info".format(vnf_index, vdur["vdu-id-ref"], vdur["count-index"]))
tiernof578e552018-11-08 19:07:20 +0100744
745 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
746 for net_RO in get_iterable(nsr_desc_RO, "nets"):
747 if vld["id"] != net_RO.get("vnf_net_osm_id"):
748 continue
749 vld["vim-id"] = net_RO.get("vim_net_id")
750 vld["name"] = net_RO.get("vim_name")
751 vld["status"] = net_RO.get("status")
752 vld["status-detailed"] = net_RO.get("error_msg")
753 vnfr_update["vld.{}".format(vld_index)] = vld
754 break
755 else:
tierno15b1cf12019-08-29 13:21:40 +0000756 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
tiernof578e552018-11-08 19:07:20 +0100757 vnf_index, vld["id"]))
758
tierno27246d82018-09-27 15:59:09 +0200759 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
760 break
tierno59d22d22018-09-25 18:10:19 +0200761
762 else:
tierno15b1cf12019-08-29 13:21:40 +0000763 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200764
tierno5ee02052019-12-05 19:55:02 +0000765 def _get_ns_config_info(self, nsr_id):
tiernoc3f2a822019-11-05 13:45:04 +0000766 """
767 Generates a mapping between vnf,vdu elements and the N2VC id
tierno5ee02052019-12-05 19:55:02 +0000768 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
tiernoc3f2a822019-11-05 13:45:04 +0000769 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
770 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
771 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
772 """
tierno5ee02052019-12-05 19:55:02 +0000773 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
774 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernoc3f2a822019-11-05 13:45:04 +0000775 mapping = {}
776 ns_config_info = {"osm-config-mapping": mapping}
777 for vca in vca_deployed_list:
778 if not vca["member-vnf-index"]:
779 continue
780 if not vca["vdu_id"]:
781 mapping[vca["member-vnf-index"]] = vca["application"]
782 else:
783 mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\
784 vca["application"]
785 return ns_config_info
786
787 @staticmethod
tierno4fa7f8e2020-07-08 15:33:55 +0000788 def _get_initial_config_primitive_list(desc_primitive_list, vca_deployed, ee_descriptor_id):
tiernoc3f2a822019-11-05 13:45:04 +0000789 """
790 Generates a list of initial-config-primitive based on the list provided by the descriptor. It includes internal
791 primitives as verify-ssh-credentials, or config when needed
792 :param desc_primitive_list: information of the descriptor
793 :param vca_deployed: information of the deployed, needed for known if it is related to an NS, VNF, VDU and if
794 this element contains a ssh public key
tierno4fa7f8e2020-07-08 15:33:55 +0000795 :param ee_descriptor_id: execution environment descriptor id. It is the value of
796 XXX_configuration.execution-environment-list.INDEX.id; it can be None
tiernoc3f2a822019-11-05 13:45:04 +0000797 :return: The modified list. Can ba an empty list, but always a list
798 """
tierno4fa7f8e2020-07-08 15:33:55 +0000799
800 primitive_list = desc_primitive_list or []
801
802 # filter primitives by ee_id
803 primitive_list = [p for p in primitive_list if p.get("execution-environment-ref") == ee_descriptor_id]
804
805 # sort by 'seq'
806 if primitive_list:
807 primitive_list.sort(key=lambda val: int(val['seq']))
808
tiernoc3f2a822019-11-05 13:45:04 +0000809 # look for primitive config, and get the position. None if not present
810 config_position = None
811 for index, primitive in enumerate(primitive_list):
812 if primitive["name"] == "config":
813 config_position = index
814 break
815
816 # for NS, add always a config primitive if not present (bug 874)
817 if not vca_deployed["member-vnf-index"] and config_position is None:
818 primitive_list.insert(0, {"name": "config", "parameter": []})
819 config_position = 0
tierno4fa7f8e2020-07-08 15:33:55 +0000820 # TODO revise if needed: for VNF/VDU add verify-ssh-credentials after config
tiernoc3f2a822019-11-05 13:45:04 +0000821 if vca_deployed["member-vnf-index"] and config_position is not None and vca_deployed.get("ssh-public-key"):
822 primitive_list.insert(config_position + 1, {"name": "verify-ssh-credentials", "parameter": []})
823 return primitive_list
824
tierno69f0d382020-05-07 13:08:09 +0000825 async def _instantiate_ng_ro(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
826 n2vc_key_list, stage, start_deploy, timeout_ns_deploy):
827 nslcmop_id = db_nslcmop["_id"]
828 target = {
829 "name": db_nsr["name"],
830 "ns": {"vld": []},
831 "vnf": [],
832 "image": deepcopy(db_nsr["image"]),
833 "flavor": deepcopy(db_nsr["flavor"]),
834 "action_id": nslcmop_id,
835 }
836 for image in target["image"]:
837 image["vim_info"] = []
838 for flavor in target["flavor"]:
839 flavor["vim_info"] = []
840
841 ns_params = db_nslcmop.get("operationParams")
842 ssh_keys = []
843 if ns_params.get("ssh_keys"):
844 ssh_keys += ns_params.get("ssh_keys")
845 if n2vc_key_list:
846 ssh_keys += n2vc_key_list
847
848 cp2target = {}
849 for vld_index, vld in enumerate(nsd.get("vld")):
850 target_vld = {"id": vld["id"],
851 "name": vld["name"],
852 "mgmt-network": vld.get("mgmt-network", False),
853 "type": vld.get("type"),
854 "vim_info": [{"vim-network-name": vld.get("vim-network-name"),
855 "vim_account_id": ns_params["vimAccountId"]}],
856 }
857 for cp in vld["vnfd-connection-point-ref"]:
858 cp2target["member_vnf:{}.{}".format(cp["member-vnf-index-ref"], cp["vnfd-connection-point-ref"])] = \
859 "nsrs:{}:vld.{}".format(nsr_id, vld_index)
860 target["ns"]["vld"].append(target_vld)
861 for vnfr in db_vnfrs.values():
862 vnfd = db_vnfds_ref[vnfr["vnfd-ref"]]
863 target_vnf = deepcopy(vnfr)
864 for vld in target_vnf.get("vld", ()):
865 # check if connected to a ns.vld
866 vnf_cp = next((cp for cp in vnfd.get("connection-point", ()) if
867 cp.get("internal-vld-ref") == vld["id"]), None)
868 if vnf_cp:
869 ns_cp = "member_vnf:{}.{}".format(vnfr["member-vnf-index-ref"], vnf_cp["id"])
870 if cp2target.get(ns_cp):
871 vld["target"] = cp2target[ns_cp]
872 vld["vim_info"] = [{"vim-network-name": vld.get("vim-network-name"),
873 "vim_account_id": vnfr["vim-account-id"]}]
874
875 for vdur in target_vnf.get("vdur", ()):
876 vdur["vim_info"] = [{"vim_account_id": vnfr["vim-account-id"]}]
877 vdud_index, vdud = next(k for k in enumerate(vnfd["vdu"]) if k[1]["id"] == vdur["vdu-id-ref"])
878 # vdur["additionalParams"] = vnfr.get("additionalParamsForVnf") # TODO additional params for VDU
879
880 if ssh_keys:
881 if deep_get(vdud, ("vdu-configuration", "config-access", "ssh-access", "required")):
882 vdur["ssh-keys"] = ssh_keys
883 vdur["ssh-access-required"] = True
884 elif deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required")) and \
885 any(iface.get("mgmt-vnf") for iface in vdur["interfaces"]):
886 vdur["ssh-keys"] = ssh_keys
887 vdur["ssh-access-required"] = True
888
889 # cloud-init
890 if vdud.get("cloud-init-file"):
891 vdur["cloud-init"] = "{}:file:{}".format(vnfd["_id"], vdud.get("cloud-init-file"))
892 elif vdud.get("cloud-init"):
893 vdur["cloud-init"] = "{}:vdu:{}".format(vnfd["_id"], vdud_index)
894
895 # flavor
896 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
897 if not next((vi for vi in ns_flavor["vim_info"] if
898 vi and vi.get("vim_account_id") == vnfr["vim-account-id"]), None):
899 ns_flavor["vim_info"].append({"vim_account_id": vnfr["vim-account-id"]})
900 # image
901 ns_image = target["image"][int(vdur["ns-image-id"])]
902 if not next((vi for vi in ns_image["vim_info"] if
903 vi and vi.get("vim_account_id") == vnfr["vim-account-id"]), None):
904 ns_image["vim_info"].append({"vim_account_id": vnfr["vim-account-id"]})
905
906 vdur["vim_info"] = [{"vim_account_id": vnfr["vim-account-id"]}]
907 target["vnf"].append(target_vnf)
908
909 desc = await self.RO.deploy(nsr_id, target)
910 action_id = desc["action_id"]
911 await self._wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage)
912
913 # Updating NSR
914 db_nsr_update = {
915 "_admin.deployed.RO.operational-status": "running",
916 "detailed-status": " ".join(stage)
917 }
918 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
919 self.update_db_2("nsrs", nsr_id, db_nsr_update)
920 self._write_op_status(nslcmop_id, stage)
921 self.logger.debug(logging_text + "ns deployed at RO. RO_id={}".format(action_id))
922 return
923
924 async def _wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_time, timeout, stage):
925 detailed_status_old = None
926 db_nsr_update = {}
927 while time() <= start_time + timeout:
928 desc_status = await self.RO.status(nsr_id, action_id)
929 if desc_status["status"] == "FAILED":
930 raise NgRoException(desc_status["details"])
931 elif desc_status["status"] == "BUILD":
932 stage[2] = "VIM: ({})".format(desc_status["details"])
933 elif desc_status["status"] == "DONE":
934 stage[2] = "Deployed at VIM"
935 break
936 else:
937 assert False, "ROclient.check_ns_status returns unknown {}".format(desc_status["status"])
938 if stage[2] != detailed_status_old:
939 detailed_status_old = stage[2]
940 db_nsr_update["detailed-status"] = " ".join(stage)
941 self.update_db_2("nsrs", nsr_id, db_nsr_update)
942 self._write_op_status(nslcmop_id, stage)
943 await asyncio.sleep(5, loop=self.loop)
944 else: # timeout_ns_deploy
945 raise NgRoException("Timeout waiting ns to deploy")
946
947 async def _terminate_ng_ro(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
948 db_nsr_update = {}
949 failed_detail = []
950 action_id = None
951 start_deploy = time()
952 try:
953 target = {
954 "ns": {"vld": []},
955 "vnf": [],
956 "image": [],
957 "flavor": [],
958 }
959 desc = await self.RO.deploy(nsr_id, target)
960 action_id = desc["action_id"]
961 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
962 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
963 self.logger.debug(logging_text + "ns terminate action at RO. action_id={}".format(action_id))
964
965 # wait until done
966 delete_timeout = 20 * 60 # 20 minutes
967 await self._wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage)
968
969 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
970 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
971 # delete all nsr
972 await self.RO.delete(nsr_id)
973 except Exception as e:
974 if isinstance(e, NgRoException) and e.http_code == 404: # not found
975 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
976 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
977 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
978 self.logger.debug(logging_text + "RO_action_id={} already deleted".format(action_id))
979 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
980 failed_detail.append("delete conflict: {}".format(e))
981 self.logger.debug(logging_text + "RO_action_id={} delete conflict: {}".format(action_id, e))
982 else:
983 failed_detail.append("delete error: {}".format(e))
984 self.logger.error(logging_text + "RO_action_id={} delete error: {}".format(action_id, e))
985
986 if failed_detail:
987 stage[2] = "Error deleting from VIM"
988 else:
989 stage[2] = "Deleted from VIM"
990 db_nsr_update["detailed-status"] = " ".join(stage)
991 self.update_db_2("nsrs", nsr_id, db_nsr_update)
992 self._write_op_status(nslcmop_id, stage)
993
994 if failed_detail:
995 raise LcmException("; ".join(failed_detail))
996 return
997
tiernoe876f672020-02-13 14:34:48 +0000998 async def instantiate_RO(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
999 n2vc_key_list, stage):
tiernoe95ed362020-04-23 08:24:57 +00001000 """
1001 Instantiate at RO
1002 :param logging_text: preffix text to use at logging
1003 :param nsr_id: nsr identity
1004 :param nsd: database content of ns descriptor
1005 :param db_nsr: database content of ns record
1006 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1007 :param db_vnfrs:
1008 :param db_vnfds_ref: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1009 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1010 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1011 :return: None or exception
1012 """
tiernoe876f672020-02-13 14:34:48 +00001013 try:
1014 db_nsr_update = {}
1015 RO_descriptor_number = 0 # number of descriptors created at RO
1016 vnf_index_2_RO_id = {} # map between vnfd/nsd id to the id used at RO
1017 nslcmop_id = db_nslcmop["_id"]
1018 start_deploy = time()
1019 ns_params = db_nslcmop.get("operationParams")
1020 if ns_params and ns_params.get("timeout_ns_deploy"):
1021 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1022 else:
1023 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +02001024
tiernoe876f672020-02-13 14:34:48 +00001025 # Check for and optionally request placement optimization. Database will be updated if placement activated
1026 stage[2] = "Waiting for Placement."
tierno8790a3d2020-04-23 22:49:52 +00001027 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1028 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1029 for vnfr in db_vnfrs.values():
1030 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1031 break
1032 else:
1033 ns_params["vimAccountId"] == vnfr["vim-account-id"]
quilesj7e13aeb2019-10-08 13:34:55 +02001034
tierno69f0d382020-05-07 13:08:09 +00001035 if self.ng_ro:
1036 return await self._instantiate_ng_ro(logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs,
1037 db_vnfds_ref, n2vc_key_list, stage, start_deploy,
1038 timeout_ns_deploy)
tiernoe876f672020-02-13 14:34:48 +00001039 # deploy RO
tiernoe876f672020-02-13 14:34:48 +00001040 # get vnfds, instantiate at RO
1041 for c_vnf in nsd.get("constituent-vnfd", ()):
1042 member_vnf_index = c_vnf["member-vnf-index"]
1043 vnfd = db_vnfds_ref[c_vnf['vnfd-id-ref']]
1044 vnfd_ref = vnfd["id"]
quilesj7e13aeb2019-10-08 13:34:55 +02001045
tiernoe876f672020-02-13 14:34:48 +00001046 stage[2] = "Creating vnfd='{}' member_vnf_index='{}' at RO".format(vnfd_ref, member_vnf_index)
1047 db_nsr_update["detailed-status"] = " ".join(stage)
1048 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1049 self._write_op_status(nslcmop_id, stage)
calvinosanch9f9c6f22019-11-04 13:37:39 +01001050
tiernoe876f672020-02-13 14:34:48 +00001051 # self.logger.debug(logging_text + stage[2])
1052 vnfd_id_RO = "{}.{}.{}".format(nsr_id, RO_descriptor_number, member_vnf_index[:23])
1053 vnf_index_2_RO_id[member_vnf_index] = vnfd_id_RO
1054 RO_descriptor_number += 1
1055
1056 # look position at deployed.RO.vnfd if not present it will be appended at the end
1057 for index, vnf_deployed in enumerate(db_nsr["_admin"]["deployed"]["RO"]["vnfd"]):
1058 if vnf_deployed["member-vnf-index"] == member_vnf_index:
1059 break
1060 else:
1061 index = len(db_nsr["_admin"]["deployed"]["RO"]["vnfd"])
1062 db_nsr["_admin"]["deployed"]["RO"]["vnfd"].append(None)
1063
1064 # look if present
1065 RO_update = {"member-vnf-index": member_vnf_index}
1066 vnfd_list = await self.RO.get_list("vnfd", filter_by={"osm_id": vnfd_id_RO})
1067 if vnfd_list:
1068 RO_update["id"] = vnfd_list[0]["uuid"]
1069 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' exists at RO. Using RO_id={}".
1070 format(vnfd_ref, member_vnf_index, vnfd_list[0]["uuid"]))
1071 else:
1072 vnfd_RO = self.vnfd2RO(vnfd, vnfd_id_RO, db_vnfrs[c_vnf["member-vnf-index"]].
1073 get("additionalParamsForVnf"), nsr_id)
1074 desc = await self.RO.create("vnfd", descriptor=vnfd_RO)
1075 RO_update["id"] = desc["uuid"]
1076 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' created at RO. RO_id={}".format(
1077 vnfd_ref, member_vnf_index, desc["uuid"]))
1078 db_nsr_update["_admin.deployed.RO.vnfd.{}".format(index)] = RO_update
1079 db_nsr["_admin"]["deployed"]["RO"]["vnfd"][index] = RO_update
1080
1081 # create nsd at RO
1082 nsd_ref = nsd["id"]
1083
1084 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1085 db_nsr_update["detailed-status"] = " ".join(stage)
1086 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1087 self._write_op_status(nslcmop_id, stage)
1088
1089 # self.logger.debug(logging_text + stage[2])
1090 RO_osm_nsd_id = "{}.{}.{}".format(nsr_id, RO_descriptor_number, nsd_ref[:23])
tiernod8323042019-08-09 11:32:23 +00001091 RO_descriptor_number += 1
tiernoe876f672020-02-13 14:34:48 +00001092 nsd_list = await self.RO.get_list("nsd", filter_by={"osm_id": RO_osm_nsd_id})
1093 if nsd_list:
1094 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = nsd_list[0]["uuid"]
1095 self.logger.debug(logging_text + "nsd={} exists at RO. Using RO_id={}".format(
1096 nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +00001097 else:
tiernoe876f672020-02-13 14:34:48 +00001098 nsd_RO = deepcopy(nsd)
1099 nsd_RO["id"] = RO_osm_nsd_id
1100 nsd_RO.pop("_id", None)
1101 nsd_RO.pop("_admin", None)
1102 for c_vnf in nsd_RO.get("constituent-vnfd", ()):
1103 member_vnf_index = c_vnf["member-vnf-index"]
1104 c_vnf["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
1105 for c_vld in nsd_RO.get("vld", ()):
1106 for cp in c_vld.get("vnfd-connection-point-ref", ()):
1107 member_vnf_index = cp["member-vnf-index-ref"]
1108 cp["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
tiernod8323042019-08-09 11:32:23 +00001109
tiernoe876f672020-02-13 14:34:48 +00001110 desc = await self.RO.create("nsd", descriptor=nsd_RO)
1111 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1112 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = desc["uuid"]
1113 self.logger.debug(logging_text + "nsd={} created at RO. RO_id={}".format(nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +00001114 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1115
tiernoe876f672020-02-13 14:34:48 +00001116 # Crate ns at RO
1117 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1118 db_nsr_update["detailed-status"] = " ".join(stage)
1119 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1120 self._write_op_status(nslcmop_id, stage)
tiernod8323042019-08-09 11:32:23 +00001121
tiernoe876f672020-02-13 14:34:48 +00001122 # if present use it unless in error status
1123 RO_nsr_id = deep_get(db_nsr, ("_admin", "deployed", "RO", "nsr_id"))
1124 if RO_nsr_id:
1125 try:
1126 stage[2] = "Looking for existing ns at RO"
1127 db_nsr_update["detailed-status"] = " ".join(stage)
1128 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1129 self._write_op_status(nslcmop_id, stage)
1130 # self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1131 desc = await self.RO.show("ns", RO_nsr_id)
tiernod8323042019-08-09 11:32:23 +00001132
tiernoe876f672020-02-13 14:34:48 +00001133 except ROclient.ROClientException as e:
1134 if e.http_code != HTTPStatus.NOT_FOUND:
1135 raise
1136 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1137 if RO_nsr_id:
1138 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1139 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1140 if ns_status == "ERROR":
1141 stage[2] = "Deleting ns at RO. RO_ns_id={}".format(RO_nsr_id)
1142 self.logger.debug(logging_text + stage[2])
1143 await self.RO.delete("ns", RO_nsr_id)
1144 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1145 if not RO_nsr_id:
1146 stage[2] = "Checking dependencies"
1147 db_nsr_update["detailed-status"] = " ".join(stage)
1148 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1149 self._write_op_status(nslcmop_id, stage)
1150 # self.logger.debug(logging_text + stage[2])
tiernod8323042019-08-09 11:32:23 +00001151
tiernoe876f672020-02-13 14:34:48 +00001152 # check if VIM is creating and wait look if previous tasks in process
1153 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account", ns_params["vimAccountId"])
1154 if task_dependency:
1155 stage[2] = "Waiting for related tasks '{}' to be completed".format(task_name)
1156 self.logger.debug(logging_text + stage[2])
1157 await asyncio.wait(task_dependency, timeout=3600)
1158 if ns_params.get("vnf"):
1159 for vnf in ns_params["vnf"]:
1160 if "vimAccountId" in vnf:
1161 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account",
1162 vnf["vimAccountId"])
1163 if task_dependency:
1164 stage[2] = "Waiting for related tasks '{}' to be completed.".format(task_name)
1165 self.logger.debug(logging_text + stage[2])
1166 await asyncio.wait(task_dependency, timeout=3600)
1167
1168 stage[2] = "Checking instantiation parameters."
tiernoe95ed362020-04-23 08:24:57 +00001169 RO_ns_params = self._ns_params_2_RO(ns_params, nsd, db_vnfds_ref, db_vnfrs, n2vc_key_list)
tiernoe876f672020-02-13 14:34:48 +00001170 stage[2] = "Deploying ns at VIM."
1171 db_nsr_update["detailed-status"] = " ".join(stage)
1172 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1173 self._write_op_status(nslcmop_id, stage)
1174
1175 desc = await self.RO.create("ns", descriptor=RO_ns_params, name=db_nsr["name"], scenario=RO_nsd_uuid)
1176 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = desc["uuid"]
1177 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1178 db_nsr_update["_admin.deployed.RO.nsr_status"] = "BUILD"
1179 self.logger.debug(logging_text + "ns created at RO. RO_id={}".format(desc["uuid"]))
1180
1181 # wait until NS is ready
1182 stage[2] = "Waiting VIM to deploy ns."
1183 db_nsr_update["detailed-status"] = " ".join(stage)
1184 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1185 self._write_op_status(nslcmop_id, stage)
1186 detailed_status_old = None
1187 self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1188
1189 old_desc = None
1190 while time() <= start_deploy + timeout_ns_deploy:
tiernod8323042019-08-09 11:32:23 +00001191 desc = await self.RO.show("ns", RO_nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001192
tiernoe876f672020-02-13 14:34:48 +00001193 # deploymentStatus
1194 if desc != old_desc:
1195 # desc has changed => update db
1196 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
1197 old_desc = desc
tiernod8323042019-08-09 11:32:23 +00001198
tiernoe876f672020-02-13 14:34:48 +00001199 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1200 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1201 if ns_status == "ERROR":
1202 raise ROclient.ROClientException(ns_status_info)
1203 elif ns_status == "BUILD":
1204 stage[2] = "VIM: ({})".format(ns_status_info)
1205 elif ns_status == "ACTIVE":
1206 stage[2] = "Waiting for management IP address reported by the VIM. Updating VNFRs."
1207 try:
1208 self.ns_update_vnfr(db_vnfrs, desc)
1209 break
1210 except LcmExceptionNoMgmtIP:
1211 pass
1212 else:
1213 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
1214 if stage[2] != detailed_status_old:
1215 detailed_status_old = stage[2]
1216 db_nsr_update["detailed-status"] = " ".join(stage)
1217 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1218 self._write_op_status(nslcmop_id, stage)
1219 await asyncio.sleep(5, loop=self.loop)
1220 else: # timeout_ns_deploy
1221 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tiernod8323042019-08-09 11:32:23 +00001222
tiernoe876f672020-02-13 14:34:48 +00001223 # Updating NSR
1224 self.ns_update_nsr(db_nsr_update, db_nsr, desc)
tiernod8323042019-08-09 11:32:23 +00001225
tiernoe876f672020-02-13 14:34:48 +00001226 db_nsr_update["_admin.deployed.RO.operational-status"] = "running"
1227 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1228 stage[2] = "Deployed at VIM"
1229 db_nsr_update["detailed-status"] = " ".join(stage)
1230 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1231 self._write_op_status(nslcmop_id, stage)
1232 # await self._on_update_n2vc_db("nsrs", {"_id": nsr_id}, "_admin.deployed", db_nsr_update)
1233 # self.logger.debug(logging_text + "Deployed at VIM")
tierno69f0d382020-05-07 13:08:09 +00001234 except (ROclient.ROClientException, LcmException, DbException, NgRoException) as e:
tierno067e04a2020-03-31 12:53:13 +00001235 stage[2] = "ERROR deploying at VIM"
tiernoe876f672020-02-13 14:34:48 +00001236 self.set_vnfr_at_error(db_vnfrs, str(e))
1237 raise
quilesj7e13aeb2019-10-08 13:34:55 +02001238
tiernoa5088192019-11-26 16:12:53 +00001239 async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None):
1240 """
1241 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1242 :param logging_text: prefix use for logging
1243 :param nsr_id:
1244 :param vnfr_id:
1245 :param vdu_id:
1246 :param vdu_index:
1247 :param pub_key: public ssh key to inject, None to skip
1248 :param user: user to apply the public ssh key
1249 :return: IP address
1250 """
quilesj7e13aeb2019-10-08 13:34:55 +02001251
tiernoa5088192019-11-26 16:12:53 +00001252 # self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
tiernod8323042019-08-09 11:32:23 +00001253 ro_nsr_id = None
1254 ip_address = None
1255 nb_tries = 0
1256 target_vdu_id = None
quilesj3149f262019-12-03 10:58:10 +00001257 ro_retries = 0
quilesj7e13aeb2019-10-08 13:34:55 +02001258
tiernod8323042019-08-09 11:32:23 +00001259 while True:
quilesj7e13aeb2019-10-08 13:34:55 +02001260
quilesj3149f262019-12-03 10:58:10 +00001261 ro_retries += 1
1262 if ro_retries >= 360: # 1 hour
1263 raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id))
1264
tiernod8323042019-08-09 11:32:23 +00001265 await asyncio.sleep(10, loop=self.loop)
quilesj7e13aeb2019-10-08 13:34:55 +02001266
1267 # get ip address
tiernod8323042019-08-09 11:32:23 +00001268 if not target_vdu_id:
1269 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
quilesj3149f262019-12-03 10:58:10 +00001270
1271 if not vdu_id: # for the VNF case
tiernoe876f672020-02-13 14:34:48 +00001272 if db_vnfr.get("status") == "ERROR":
1273 raise LcmException("Cannot inject ssh-key because target VNF is in error state")
tiernod8323042019-08-09 11:32:23 +00001274 ip_address = db_vnfr.get("ip-address")
1275 if not ip_address:
1276 continue
quilesj3149f262019-12-03 10:58:10 +00001277 vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None)
1278 else: # VDU case
1279 vdur = next((x for x in get_iterable(db_vnfr, "vdur")
1280 if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None)
1281
tierno0e8c3f02020-03-12 17:18:21 +00001282 if not vdur and len(db_vnfr.get("vdur", ())) == 1: # If only one, this should be the target vdu
1283 vdur = db_vnfr["vdur"][0]
quilesj3149f262019-12-03 10:58:10 +00001284 if not vdur:
tierno0e8c3f02020-03-12 17:18:21 +00001285 raise LcmException("Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(vnfr_id, vdu_id,
1286 vdu_index))
quilesj7e13aeb2019-10-08 13:34:55 +02001287
tierno0e8c3f02020-03-12 17:18:21 +00001288 if vdur.get("pdu-type") or vdur.get("status") == "ACTIVE":
quilesj3149f262019-12-03 10:58:10 +00001289 ip_address = vdur.get("ip-address")
1290 if not ip_address:
1291 continue
1292 target_vdu_id = vdur["vdu-id-ref"]
1293 elif vdur.get("status") == "ERROR":
1294 raise LcmException("Cannot inject ssh-key because target VM is in error state")
1295
tiernod8323042019-08-09 11:32:23 +00001296 if not target_vdu_id:
1297 continue
tiernod8323042019-08-09 11:32:23 +00001298
quilesj7e13aeb2019-10-08 13:34:55 +02001299 # inject public key into machine
1300 if pub_key and user:
tiernoe876f672020-02-13 14:34:48 +00001301 # wait until NS is deployed at RO
1302 if not ro_nsr_id:
1303 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1304 ro_nsr_id = deep_get(db_nsrs, ("_admin", "deployed", "RO", "nsr_id"))
1305 if not ro_nsr_id:
1306 continue
1307
tiernoa5088192019-11-26 16:12:53 +00001308 # self.logger.debug(logging_text + "Inserting RO key")
tierno0e8c3f02020-03-12 17:18:21 +00001309 if vdur.get("pdu-type"):
1310 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1311 return ip_address
quilesj7e13aeb2019-10-08 13:34:55 +02001312 try:
1313 ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index
tierno69f0d382020-05-07 13:08:09 +00001314 if self.ng_ro:
1315 target = {"action": "inject_ssh_key", "key": pub_key, "user": user,
1316 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdu_id}]}],
1317 }
1318 await self.RO.deploy(nsr_id, target)
1319 else:
1320 result_dict = await self.RO.create_action(
1321 item="ns",
1322 item_id_name=ro_nsr_id,
1323 descriptor={"add_public_key": pub_key, "vms": [ro_vm_id], "user": user}
1324 )
1325 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1326 if not result_dict or not isinstance(result_dict, dict):
1327 raise LcmException("Unknown response from RO when injecting key")
1328 for result in result_dict.values():
1329 if result.get("vim_result") == 200:
1330 break
1331 else:
1332 raise ROclient.ROClientException("error injecting key: {}".format(
1333 result.get("description")))
1334 break
1335 except NgRoException as e:
1336 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001337 except ROclient.ROClientException as e:
tiernoa5088192019-11-26 16:12:53 +00001338 if not nb_tries:
1339 self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds".
1340 format(e, 20*10))
quilesj7e13aeb2019-10-08 13:34:55 +02001341 nb_tries += 1
tiernoa5088192019-11-26 16:12:53 +00001342 if nb_tries >= 20:
quilesj7e13aeb2019-10-08 13:34:55 +02001343 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001344 else:
quilesj7e13aeb2019-10-08 13:34:55 +02001345 break
1346
1347 return ip_address
1348
tierno5ee02052019-12-05 19:55:02 +00001349 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1350 """
1351 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1352 """
1353 my_vca = vca_deployed_list[vca_index]
1354 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
quilesj3655ae02019-12-12 16:08:35 +00001355 # vdu or kdu: no dependencies
tierno5ee02052019-12-05 19:55:02 +00001356 return
1357 timeout = 300
1358 while timeout >= 0:
quilesj3655ae02019-12-12 16:08:35 +00001359 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1360 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1361 configuration_status_list = db_nsr["configurationStatus"]
1362 for index, vca_deployed in enumerate(configuration_status_list):
tierno5ee02052019-12-05 19:55:02 +00001363 if index == vca_index:
quilesj3655ae02019-12-12 16:08:35 +00001364 # myself
tierno5ee02052019-12-05 19:55:02 +00001365 continue
1366 if not my_vca.get("member-vnf-index") or \
1367 (vca_deployed.get("member-vnf-index") == my_vca.get("member-vnf-index")):
quilesj3655ae02019-12-12 16:08:35 +00001368 internal_status = configuration_status_list[index].get("status")
1369 if internal_status == 'READY':
1370 continue
1371 elif internal_status == 'BROKEN':
tierno5ee02052019-12-05 19:55:02 +00001372 raise LcmException("Configuration aborted because dependent charm/s has failed")
quilesj3655ae02019-12-12 16:08:35 +00001373 else:
1374 break
tierno5ee02052019-12-05 19:55:02 +00001375 else:
quilesj3655ae02019-12-12 16:08:35 +00001376 # no dependencies, return
tierno5ee02052019-12-05 19:55:02 +00001377 return
1378 await asyncio.sleep(10)
1379 timeout -= 1
tierno5ee02052019-12-05 19:55:02 +00001380
1381 raise LcmException("Configuration aborted because dependent charm/s timeout")
1382
tiernoe876f672020-02-13 14:34:48 +00001383 async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index,
tierno89f82902020-07-03 14:52:28 +00001384 config_descriptor, deploy_params, base_folder, nslcmop_id, stage, vca_type, vca_name,
1385 ee_config_descriptor):
tiernod8323042019-08-09 11:32:23 +00001386 nsr_id = db_nsr["_id"]
1387 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
tiernoda6fb102019-11-23 00:36:52 +00001388 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernod8323042019-08-09 11:32:23 +00001389 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
tierno89f82902020-07-03 14:52:28 +00001390 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
quilesj7e13aeb2019-10-08 13:34:55 +02001391 db_dict = {
1392 'collection': 'nsrs',
1393 'filter': {'_id': nsr_id},
1394 'path': db_update_entry
1395 }
tiernod8323042019-08-09 11:32:23 +00001396 step = ""
1397 try:
quilesj3655ae02019-12-12 16:08:35 +00001398
1399 element_type = 'NS'
1400 element_under_configuration = nsr_id
1401
tiernod8323042019-08-09 11:32:23 +00001402 vnfr_id = None
1403 if db_vnfr:
1404 vnfr_id = db_vnfr["_id"]
tierno89f82902020-07-03 14:52:28 +00001405 osm_config["osm"]["vnf_id"] = vnfr_id
tiernod8323042019-08-09 11:32:23 +00001406
1407 namespace = "{nsi}.{ns}".format(
1408 nsi=nsi_id if nsi_id else "",
1409 ns=nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001410
tiernod8323042019-08-09 11:32:23 +00001411 if vnfr_id:
quilesj3655ae02019-12-12 16:08:35 +00001412 element_type = 'VNF'
1413 element_under_configuration = vnfr_id
quilesjb8a35dd2020-01-09 15:10:14 +00001414 namespace += ".{}".format(vnfr_id)
tiernod8323042019-08-09 11:32:23 +00001415 if vdu_id:
1416 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
quilesj3655ae02019-12-12 16:08:35 +00001417 element_type = 'VDU'
quilesjb8a35dd2020-01-09 15:10:14 +00001418 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
tierno89f82902020-07-03 14:52:28 +00001419 osm_config["osm"]["vdu_id"] = vdu_id
tierno51183952020-04-03 15:48:18 +00001420 elif kdu_name:
1421 namespace += ".{}".format(kdu_name)
1422 element_type = 'KDU'
1423 element_under_configuration = kdu_name
tierno89f82902020-07-03 14:52:28 +00001424 osm_config["osm"]["kdu_name"] = kdu_name
tiernod8323042019-08-09 11:32:23 +00001425
1426 # Get artifact path
tierno588547c2020-07-01 15:30:20 +00001427 artifact_path = "{}/{}/{}/{}".format(
tiernod8323042019-08-09 11:32:23 +00001428 base_folder["folder"],
1429 base_folder["pkg-dir"],
tierno588547c2020-07-01 15:30:20 +00001430 "charms" if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm") else "helm-charts",
1431 vca_name
tiernod8323042019-08-09 11:32:23 +00001432 )
tierno4fa7f8e2020-07-08 15:33:55 +00001433 # get initial_config_primitive_list that applies to this element
1434 initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
1435
1436 # add config if not present for NS charm
1437 ee_descriptor_id = ee_config_descriptor.get("id")
1438 initial_config_primitive_list = self._get_initial_config_primitive_list(initial_config_primitive_list,
1439 vca_deployed, ee_descriptor_id)
tiernod8323042019-08-09 11:32:23 +00001440
tierno588547c2020-07-01 15:30:20 +00001441 # n2vc_redesign STEP 3.1
tierno588547c2020-07-01 15:30:20 +00001442 # find old ee_id if exists
1443 ee_id = vca_deployed.get("ee_id")
tiernod8323042019-08-09 11:32:23 +00001444
tierno588547c2020-07-01 15:30:20 +00001445 # create or register execution environment in VCA
1446 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm"):
quilesj7e13aeb2019-10-08 13:34:55 +02001447
tierno588547c2020-07-01 15:30:20 +00001448 self._write_configuration_status(
1449 nsr_id=nsr_id,
1450 vca_index=vca_index,
1451 status='CREATING',
1452 element_under_configuration=element_under_configuration,
1453 element_type=element_type
1454 )
tiernod8323042019-08-09 11:32:23 +00001455
tierno588547c2020-07-01 15:30:20 +00001456 step = "create execution environment"
1457 self.logger.debug(logging_text + step)
1458 ee_id, credentials = await self.vca_map[vca_type].create_execution_environment(
1459 namespace=namespace,
1460 reuse_ee_id=ee_id,
1461 db_dict=db_dict,
tierno89f82902020-07-03 14:52:28 +00001462 config=osm_config,
tierno588547c2020-07-01 15:30:20 +00001463 artifact_path=artifact_path,
1464 vca_type=vca_type)
quilesj3655ae02019-12-12 16:08:35 +00001465
tierno588547c2020-07-01 15:30:20 +00001466 elif vca_type == "native_charm":
1467 step = "Waiting to VM being up and getting IP address"
1468 self.logger.debug(logging_text + step)
1469 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1470 user=None, pub_key=None)
1471 credentials = {"hostname": rw_mgmt_ip}
1472 # get username
1473 username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1474 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1475 # merged. Meanwhile let's get username from initial-config-primitive
tierno4fa7f8e2020-07-08 15:33:55 +00001476 if not username and initial_config_primitive_list:
1477 for config_primitive in initial_config_primitive_list:
tierno588547c2020-07-01 15:30:20 +00001478 for param in config_primitive.get("parameter", ()):
1479 if param["name"] == "ssh-username":
1480 username = param["value"]
1481 break
1482 if not username:
tierno4fa7f8e2020-07-08 15:33:55 +00001483 raise LcmException("Cannot determine the username neither with 'initial-config-primitive' nor with "
tierno588547c2020-07-01 15:30:20 +00001484 "'config-access.ssh-access.default-user'")
1485 credentials["username"] = username
1486 # n2vc_redesign STEP 3.2
quilesj3655ae02019-12-12 16:08:35 +00001487
tierno588547c2020-07-01 15:30:20 +00001488 self._write_configuration_status(
1489 nsr_id=nsr_id,
1490 vca_index=vca_index,
1491 status='REGISTERING',
1492 element_under_configuration=element_under_configuration,
1493 element_type=element_type
1494 )
quilesj3655ae02019-12-12 16:08:35 +00001495
tierno588547c2020-07-01 15:30:20 +00001496 step = "register execution environment {}".format(credentials)
1497 self.logger.debug(logging_text + step)
1498 ee_id = await self.vca_map[vca_type].register_execution_environment(
1499 credentials=credentials, namespace=namespace, db_dict=db_dict)
tierno3bedc9b2019-11-27 15:46:57 +00001500
tierno588547c2020-07-01 15:30:20 +00001501 # for compatibility with MON/POL modules, the need model and application name at database
1502 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1503 ee_id_parts = ee_id.split('.')
1504 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1505 if len(ee_id_parts) >= 2:
1506 model_name = ee_id_parts[0]
1507 application_name = ee_id_parts[1]
1508 db_nsr_update[db_update_entry + "model"] = model_name
1509 db_nsr_update[db_update_entry + "application"] = application_name
tiernod8323042019-08-09 11:32:23 +00001510
1511 # n2vc_redesign STEP 3.3
tiernod8323042019-08-09 11:32:23 +00001512 step = "Install configuration Software"
quilesj3655ae02019-12-12 16:08:35 +00001513
tiernoc231a872020-01-21 08:49:05 +00001514 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001515 nsr_id=nsr_id,
1516 vca_index=vca_index,
1517 status='INSTALLING SW',
1518 element_under_configuration=element_under_configuration,
tierno51183952020-04-03 15:48:18 +00001519 element_type=element_type,
tierno588547c2020-07-01 15:30:20 +00001520 other_update=db_nsr_update
quilesj3655ae02019-12-12 16:08:35 +00001521 )
1522
tierno3bedc9b2019-11-27 15:46:57 +00001523 # TODO check if already done
quilesj7e13aeb2019-10-08 13:34:55 +02001524 self.logger.debug(logging_text + step)
David Garcia18a63322020-04-01 16:14:59 +02001525 config = None
tierno588547c2020-07-01 15:30:20 +00001526 if vca_type == "native_charm":
tierno4fa7f8e2020-07-08 15:33:55 +00001527 config_primitive = next((p for p in initial_config_primitive_list if p["name"] == "config"), None)
1528 if config_primitive:
1529 config = self._map_primitive_params(
1530 config_primitive,
1531 {},
1532 deploy_params
1533 )
tierno588547c2020-07-01 15:30:20 +00001534 num_units = 1
1535 if vca_type == "lxc_proxy_charm":
1536 if element_type == "NS":
1537 num_units = db_nsr.get("config-units") or 1
1538 elif element_type == "VNF":
1539 num_units = db_vnfr.get("config-units") or 1
1540 elif element_type == "VDU":
1541 for v in db_vnfr["vdur"]:
1542 if vdu_id == v["vdu-id-ref"]:
1543 num_units = v.get("config-units") or 1
1544 break
David Garcia06a11f22020-03-25 18:21:37 +01001545
tierno588547c2020-07-01 15:30:20 +00001546 await self.vca_map[vca_type].install_configuration_sw(
1547 ee_id=ee_id,
1548 artifact_path=artifact_path,
1549 db_dict=db_dict,
1550 config=config,
1551 num_units=num_units,
1552 vca_type=vca_type
1553 )
quilesj7e13aeb2019-10-08 13:34:55 +02001554
quilesj63f90042020-01-17 09:53:55 +00001555 # write in db flag of configuration_sw already installed
1556 self.update_db_2("nsrs", nsr_id, {db_update_entry + "config_sw_installed": True})
1557
1558 # add relations for this VCA (wait for other peers related with this VCA)
tierno588547c2020-07-01 15:30:20 +00001559 await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id,
1560 vca_index=vca_index, vca_type=vca_type)
quilesj63f90042020-01-17 09:53:55 +00001561
quilesj7e13aeb2019-10-08 13:34:55 +02001562 # if SSH access is required, then get execution environment SSH public
tierno588547c2020-07-01 15:30:20 +00001563 if vca_type in ("lxc_proxy_charm", "helm"): # if native charm we have waited already to VM be UP
tierno3bedc9b2019-11-27 15:46:57 +00001564 pub_key = None
1565 user = None
tierno588547c2020-07-01 15:30:20 +00001566 # self.logger.debug("get ssh key block")
tierno3bedc9b2019-11-27 15:46:57 +00001567 if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
tierno588547c2020-07-01 15:30:20 +00001568 # self.logger.debug("ssh key needed")
tierno3bedc9b2019-11-27 15:46:57 +00001569 # Needed to inject a ssh key
1570 user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1571 step = "Install configuration Software, getting public ssh key"
tierno588547c2020-07-01 15:30:20 +00001572 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02001573
tiernoacc90452019-12-10 11:06:54 +00001574 step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
tierno3bedc9b2019-11-27 15:46:57 +00001575 else:
tierno588547c2020-07-01 15:30:20 +00001576 # self.logger.debug("no need to get ssh key")
tierno3bedc9b2019-11-27 15:46:57 +00001577 step = "Waiting to VM being up and getting IP address"
1578 self.logger.debug(logging_text + step)
quilesj7e13aeb2019-10-08 13:34:55 +02001579
tierno3bedc9b2019-11-27 15:46:57 +00001580 # n2vc_redesign STEP 5.1
1581 # wait for RO (ip-address) Insert pub_key into VM
tierno5ee02052019-12-05 19:55:02 +00001582 if vnfr_id:
1583 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1584 user=user, pub_key=pub_key)
1585 else:
1586 rw_mgmt_ip = None # This is for a NS configuration
tierno3bedc9b2019-11-27 15:46:57 +00001587
1588 self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
quilesj7e13aeb2019-10-08 13:34:55 +02001589
tiernoa5088192019-11-26 16:12:53 +00001590 # store rw_mgmt_ip in deploy params for later replacement
quilesj7e13aeb2019-10-08 13:34:55 +02001591 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
tiernod8323042019-08-09 11:32:23 +00001592
1593 # n2vc_redesign STEP 6 Execute initial config primitive
quilesj7e13aeb2019-10-08 13:34:55 +02001594 step = 'execute initial config primitive'
quilesj3655ae02019-12-12 16:08:35 +00001595
1596 # wait for dependent primitives execution (NS -> VNF -> VDU)
tierno5ee02052019-12-05 19:55:02 +00001597 if initial_config_primitive_list:
1598 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
quilesj3655ae02019-12-12 16:08:35 +00001599
1600 # stage, in function of element type: vdu, kdu, vnf or ns
1601 my_vca = vca_deployed_list[vca_index]
1602 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1603 # VDU or KDU
tiernoe876f672020-02-13 14:34:48 +00001604 stage[0] = 'Stage 3/5: running Day-1 primitives for VDU.'
quilesj3655ae02019-12-12 16:08:35 +00001605 elif my_vca.get("member-vnf-index"):
1606 # VNF
tiernoe876f672020-02-13 14:34:48 +00001607 stage[0] = 'Stage 4/5: running Day-1 primitives for VNF.'
quilesj3655ae02019-12-12 16:08:35 +00001608 else:
1609 # NS
tiernoe876f672020-02-13 14:34:48 +00001610 stage[0] = 'Stage 5/5: running Day-1 primitives for NS.'
quilesj3655ae02019-12-12 16:08:35 +00001611
tiernoc231a872020-01-21 08:49:05 +00001612 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001613 nsr_id=nsr_id,
1614 vca_index=vca_index,
1615 status='EXECUTING PRIMITIVE'
1616 )
1617
1618 self._write_op_status(
1619 op_id=nslcmop_id,
1620 stage=stage
1621 )
1622
tiernoe876f672020-02-13 14:34:48 +00001623 check_if_terminated_needed = True
tiernod8323042019-08-09 11:32:23 +00001624 for initial_config_primitive in initial_config_primitive_list:
tiernoda6fb102019-11-23 00:36:52 +00001625 # adding information on the vca_deployed if it is a NS execution environment
1626 if not vca_deployed["member-vnf-index"]:
David Garciad4816682019-12-09 14:57:43 +01001627 deploy_params["ns_config_info"] = json.dumps(self._get_ns_config_info(nsr_id))
tiernod8323042019-08-09 11:32:23 +00001628 # TODO check if already done
1629 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
tierno3bedc9b2019-11-27 15:46:57 +00001630
tiernod8323042019-08-09 11:32:23 +00001631 step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
1632 self.logger.debug(logging_text + step)
tierno588547c2020-07-01 15:30:20 +00001633 await self.vca_map[vca_type].exec_primitive(
quilesj7e13aeb2019-10-08 13:34:55 +02001634 ee_id=ee_id,
1635 primitive_name=initial_config_primitive["name"],
1636 params_dict=primitive_params_,
1637 db_dict=db_dict
1638 )
tiernoe876f672020-02-13 14:34:48 +00001639 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1640 if check_if_terminated_needed:
1641 if config_descriptor.get('terminate-config-primitive'):
1642 self.update_db_2("nsrs", nsr_id, {db_update_entry + "needed_terminate": True})
1643 check_if_terminated_needed = False
quilesj3655ae02019-12-12 16:08:35 +00001644
tiernod8323042019-08-09 11:32:23 +00001645 # TODO register in database that primitive is done
quilesj7e13aeb2019-10-08 13:34:55 +02001646
tierno89f82902020-07-03 14:52:28 +00001647 # STEP 7 Configure metrics
1648 if vca_type == "helm":
1649 prometheus_jobs = await self.add_prometheus_metrics(
1650 ee_id=ee_id,
1651 artifact_path=artifact_path,
1652 ee_config_descriptor=ee_config_descriptor,
1653 vnfr_id=vnfr_id,
1654 nsr_id=nsr_id,
1655 target_ip=rw_mgmt_ip,
1656 )
1657 if prometheus_jobs:
1658 self.update_db_2("nsrs", nsr_id, {db_update_entry + "prometheus_jobs": prometheus_jobs})
1659
quilesj7e13aeb2019-10-08 13:34:55 +02001660 step = "instantiated at VCA"
1661 self.logger.debug(logging_text + step)
1662
tiernoc231a872020-01-21 08:49:05 +00001663 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001664 nsr_id=nsr_id,
1665 vca_index=vca_index,
1666 status='READY'
1667 )
1668
tiernod8323042019-08-09 11:32:23 +00001669 except Exception as e: # TODO not use Exception but N2VC exception
quilesj3655ae02019-12-12 16:08:35 +00001670 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
tiernoe876f672020-02-13 14:34:48 +00001671 if not isinstance(e, (DbException, N2VCException, LcmException, asyncio.CancelledError)):
1672 self.logger.error("Exception while {} : {}".format(step, e), exc_info=True)
tiernoc231a872020-01-21 08:49:05 +00001673 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001674 nsr_id=nsr_id,
1675 vca_index=vca_index,
1676 status='BROKEN'
1677 )
tiernoe876f672020-02-13 14:34:48 +00001678 raise LcmException("{} {}".format(step, e)) from e
tiernod8323042019-08-09 11:32:23 +00001679
quilesj4cda56b2019-12-05 10:02:20 +00001680 def _write_ns_status(self, nsr_id: str, ns_state: str, current_operation: str, current_operation_id: str,
tiernoa2143262020-03-27 16:20:40 +00001681 error_description: str = None, error_detail: str = None, other_update: dict = None):
tiernoe876f672020-02-13 14:34:48 +00001682 """
1683 Update db_nsr fields.
1684 :param nsr_id:
1685 :param ns_state:
1686 :param current_operation:
1687 :param current_operation_id:
1688 :param error_description:
tiernoa2143262020-03-27 16:20:40 +00001689 :param error_detail:
tiernoe876f672020-02-13 14:34:48 +00001690 :param other_update: Other required changes at database if provided, will be cleared
1691 :return:
1692 """
quilesj4cda56b2019-12-05 10:02:20 +00001693 try:
tiernoe876f672020-02-13 14:34:48 +00001694 db_dict = other_update or {}
1695 db_dict["_admin.nslcmop"] = current_operation_id # for backward compatibility
1696 db_dict["_admin.current-operation"] = current_operation_id
1697 db_dict["_admin.operation-type"] = current_operation if current_operation != "IDLE" else None
quilesj4cda56b2019-12-05 10:02:20 +00001698 db_dict["currentOperation"] = current_operation
1699 db_dict["currentOperationID"] = current_operation_id
1700 db_dict["errorDescription"] = error_description
tiernoa2143262020-03-27 16:20:40 +00001701 db_dict["errorDetail"] = error_detail
tiernoe876f672020-02-13 14:34:48 +00001702
1703 if ns_state:
1704 db_dict["nsState"] = ns_state
quilesj4cda56b2019-12-05 10:02:20 +00001705 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001706 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001707 self.logger.warn('Error writing NS status, ns={}: {}'.format(nsr_id, e))
1708
tiernoe876f672020-02-13 14:34:48 +00001709 def _write_op_status(self, op_id: str, stage: list = None, error_message: str = None, queuePosition: int = 0,
1710 operation_state: str = None, other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001711 try:
tiernoe876f672020-02-13 14:34:48 +00001712 db_dict = other_update or {}
quilesj3655ae02019-12-12 16:08:35 +00001713 db_dict['queuePosition'] = queuePosition
tiernoe876f672020-02-13 14:34:48 +00001714 if isinstance(stage, list):
1715 db_dict['stage'] = stage[0]
1716 db_dict['detailed-status'] = " ".join(stage)
1717 elif stage is not None:
1718 db_dict['stage'] = str(stage)
1719
1720 if error_message is not None:
quilesj3655ae02019-12-12 16:08:35 +00001721 db_dict['errorMessage'] = error_message
tiernoe876f672020-02-13 14:34:48 +00001722 if operation_state is not None:
1723 db_dict['operationState'] = operation_state
1724 db_dict["statusEnteredTime"] = time()
quilesj3655ae02019-12-12 16:08:35 +00001725 self.update_db_2("nslcmops", op_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001726 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001727 self.logger.warn('Error writing OPERATION status for op_id: {} -> {}'.format(op_id, e))
1728
tierno51183952020-04-03 15:48:18 +00001729 def _write_all_config_status(self, db_nsr: dict, status: str):
quilesj3655ae02019-12-12 16:08:35 +00001730 try:
tierno51183952020-04-03 15:48:18 +00001731 nsr_id = db_nsr["_id"]
quilesj3655ae02019-12-12 16:08:35 +00001732 # configurationStatus
1733 config_status = db_nsr.get('configurationStatus')
1734 if config_status:
tierno51183952020-04-03 15:48:18 +00001735 db_nsr_update = {"configurationStatus.{}.status".format(index): status for index, v in
1736 enumerate(config_status) if v}
quilesj3655ae02019-12-12 16:08:35 +00001737 # update status
tierno51183952020-04-03 15:48:18 +00001738 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00001739
tiernoe876f672020-02-13 14:34:48 +00001740 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001741 self.logger.warn('Error writing all configuration status, ns={}: {}'.format(nsr_id, e))
1742
quilesj63f90042020-01-17 09:53:55 +00001743 def _write_configuration_status(self, nsr_id: str, vca_index: int, status: str = None,
tierno51183952020-04-03 15:48:18 +00001744 element_under_configuration: str = None, element_type: str = None,
1745 other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001746
1747 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
1748 # .format(vca_index, status))
1749
1750 try:
1751 db_path = 'configurationStatus.{}.'.format(vca_index)
tierno51183952020-04-03 15:48:18 +00001752 db_dict = other_update or {}
quilesj63f90042020-01-17 09:53:55 +00001753 if status:
1754 db_dict[db_path + 'status'] = status
quilesj3655ae02019-12-12 16:08:35 +00001755 if element_under_configuration:
1756 db_dict[db_path + 'elementUnderConfiguration'] = element_under_configuration
1757 if element_type:
1758 db_dict[db_path + 'elementType'] = element_type
1759 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001760 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001761 self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}'
1762 .format(status, nsr_id, vca_index, e))
quilesj4cda56b2019-12-05 10:02:20 +00001763
tierno38089af2020-04-16 07:56:58 +00001764 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
1765 """
1766 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
1767 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
1768 Database is used because the result can be obtained from a different LCM worker in case of HA.
1769 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
1770 :param db_nslcmop: database content of nslcmop
1771 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
tierno8790a3d2020-04-23 22:49:52 +00001772 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
1773 computed 'vim-account-id'
tierno38089af2020-04-16 07:56:58 +00001774 """
tierno8790a3d2020-04-23 22:49:52 +00001775 modified = False
tierno38089af2020-04-16 07:56:58 +00001776 nslcmop_id = db_nslcmop['_id']
magnussonle9198bb2020-01-21 13:00:51 +01001777 placement_engine = deep_get(db_nslcmop, ('operationParams', 'placement-engine'))
1778 if placement_engine == "PLA":
tierno38089af2020-04-16 07:56:58 +00001779 self.logger.debug(logging_text + "Invoke and wait for placement optimization")
1780 await self.msg.aiowrite("pla", "get_placement", {'nslcmopId': nslcmop_id}, loop=self.loop)
magnussonle9198bb2020-01-21 13:00:51 +01001781 db_poll_interval = 5
tierno38089af2020-04-16 07:56:58 +00001782 wait = db_poll_interval * 10
magnussonle9198bb2020-01-21 13:00:51 +01001783 pla_result = None
1784 while not pla_result and wait >= 0:
1785 await asyncio.sleep(db_poll_interval)
1786 wait -= db_poll_interval
tierno38089af2020-04-16 07:56:58 +00001787 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
magnussonle9198bb2020-01-21 13:00:51 +01001788 pla_result = deep_get(db_nslcmop, ('_admin', 'pla'))
1789
1790 if not pla_result:
tierno38089af2020-04-16 07:56:58 +00001791 raise LcmException("Placement timeout for nslcmopId={}".format(nslcmop_id))
magnussonle9198bb2020-01-21 13:00:51 +01001792
1793 for pla_vnf in pla_result['vnf']:
1794 vnfr = db_vnfrs.get(pla_vnf['member-vnf-index'])
1795 if not pla_vnf.get('vimAccountId') or not vnfr:
1796 continue
tierno8790a3d2020-04-23 22:49:52 +00001797 modified = True
magnussonle9198bb2020-01-21 13:00:51 +01001798 self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, {"vim-account-id": pla_vnf['vimAccountId']})
tierno38089af2020-04-16 07:56:58 +00001799 # Modifies db_vnfrs
1800 vnfr["vim-account-id"] = pla_vnf['vimAccountId']
tierno8790a3d2020-04-23 22:49:52 +00001801 return modified
magnussonle9198bb2020-01-21 13:00:51 +01001802
1803 def update_nsrs_with_pla_result(self, params):
1804 try:
1805 nslcmop_id = deep_get(params, ('placement', 'nslcmopId'))
1806 self.update_db_2("nslcmops", nslcmop_id, {"_admin.pla": params.get('placement')})
1807 except Exception as e:
1808 self.logger.warn('Update failed for nslcmop_id={}:{}'.format(nslcmop_id, e))
1809
tierno59d22d22018-09-25 18:10:19 +02001810 async def instantiate(self, nsr_id, nslcmop_id):
quilesj7e13aeb2019-10-08 13:34:55 +02001811 """
1812
1813 :param nsr_id: ns instance to deploy
1814 :param nslcmop_id: operation to run
1815 :return:
1816 """
kuused124bfe2019-06-18 12:09:24 +02001817
1818 # Try to lock HA task here
1819 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
1820 if not task_is_locked_by_me:
quilesj3655ae02019-12-12 16:08:35 +00001821 self.logger.debug('instantiate() task is not locked by me, ns={}'.format(nsr_id))
kuused124bfe2019-06-18 12:09:24 +02001822 return
1823
tierno59d22d22018-09-25 18:10:19 +02001824 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
1825 self.logger.debug(logging_text + "Enter")
quilesj7e13aeb2019-10-08 13:34:55 +02001826
sousaedua0deb2d2020-04-21 12:08:14 +01001827 # Sync from FSMongo
1828 self.fs.sync()
1829
tierno59d22d22018-09-25 18:10:19 +02001830 # get all needed from database
quilesj7e13aeb2019-10-08 13:34:55 +02001831
1832 # database nsrs record
tierno59d22d22018-09-25 18:10:19 +02001833 db_nsr = None
quilesj7e13aeb2019-10-08 13:34:55 +02001834
1835 # database nslcmops record
tierno59d22d22018-09-25 18:10:19 +02001836 db_nslcmop = None
quilesj7e13aeb2019-10-08 13:34:55 +02001837
1838 # update operation on nsrs
tiernoe876f672020-02-13 14:34:48 +00001839 db_nsr_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02001840 # update operation on nslcmops
tierno59d22d22018-09-25 18:10:19 +02001841 db_nslcmop_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02001842
tierno59d22d22018-09-25 18:10:19 +02001843 nslcmop_operation_state = None
quilesj7e13aeb2019-10-08 13:34:55 +02001844 db_vnfrs = {} # vnf's info indexed by member-index
1845 # n2vc_info = {}
tiernoe876f672020-02-13 14:34:48 +00001846 tasks_dict_info = {} # from task to info text
tierno59d22d22018-09-25 18:10:19 +02001847 exc = None
tiernoe876f672020-02-13 14:34:48 +00001848 error_list = []
1849 stage = ['Stage 1/5: preparation of the environment.', "Waiting for previous operations to terminate.", ""]
1850 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02001851 try:
kuused124bfe2019-06-18 12:09:24 +02001852 # wait for any previous tasks in process
1853 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
1854
quilesj7e13aeb2019-10-08 13:34:55 +02001855 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
tiernoe876f672020-02-13 14:34:48 +00001856 stage[1] = "Reading from database,"
quilesj4cda56b2019-12-05 10:02:20 +00001857 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
tiernoe876f672020-02-13 14:34:48 +00001858 db_nsr_update["detailed-status"] = "creating"
1859 db_nsr_update["operational-status"] = "init"
quilesj4cda56b2019-12-05 10:02:20 +00001860 self._write_ns_status(
1861 nsr_id=nsr_id,
1862 ns_state="BUILDING",
1863 current_operation="INSTANTIATING",
tiernoe876f672020-02-13 14:34:48 +00001864 current_operation_id=nslcmop_id,
1865 other_update=db_nsr_update
1866 )
1867 self._write_op_status(
1868 op_id=nslcmop_id,
1869 stage=stage,
1870 queuePosition=0
quilesj4cda56b2019-12-05 10:02:20 +00001871 )
1872
quilesj7e13aeb2019-10-08 13:34:55 +02001873 # read from db: operation
tiernoe876f672020-02-13 14:34:48 +00001874 stage[1] = "Getting nslcmop={} from db".format(nslcmop_id)
tierno59d22d22018-09-25 18:10:19 +02001875 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
tierno744303e2020-01-13 16:46:31 +00001876 ns_params = db_nslcmop.get("operationParams")
1877 if ns_params and ns_params.get("timeout_ns_deploy"):
1878 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1879 else:
1880 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +02001881
1882 # read from db: ns
tiernoe876f672020-02-13 14:34:48 +00001883 stage[1] = "Getting nsr={} from db".format(nsr_id)
tierno59d22d22018-09-25 18:10:19 +02001884 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernod732fb82020-05-21 13:18:23 +00001885 stage[1] = "Getting nsd={} from db".format(db_nsr["nsd-id"])
1886 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
1887 db_nsr["nsd"] = nsd
tiernod8323042019-08-09 11:32:23 +00001888 # nsr_name = db_nsr["name"] # TODO short-name??
tierno47e86b52018-10-10 14:05:55 +02001889
quilesj7e13aeb2019-10-08 13:34:55 +02001890 # read from db: vnf's of this ns
tiernoe876f672020-02-13 14:34:48 +00001891 stage[1] = "Getting vnfrs from db"
1892 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02001893 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
tierno27246d82018-09-27 15:59:09 +02001894
quilesj7e13aeb2019-10-08 13:34:55 +02001895 # read from db: vnfd's for every vnf
1896 db_vnfds_ref = {} # every vnfd data indexed by vnf name
1897 db_vnfds = {} # every vnfd data indexed by vnf id
1898 db_vnfds_index = {} # every vnfd data indexed by vnf member-index
1899
1900 # for each vnf in ns, read vnfd
tierno27246d82018-09-27 15:59:09 +02001901 for vnfr in db_vnfrs_list:
quilesj7e13aeb2019-10-08 13:34:55 +02001902 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr # vnf's dict indexed by member-index: '1', '2', etc
1903 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
1904 vnfd_ref = vnfr["vnfd-ref"] # vnfd name for this vnf
1905 # if we haven't this vnfd, read it from db
tierno27246d82018-09-27 15:59:09 +02001906 if vnfd_id not in db_vnfds:
quilesj63f90042020-01-17 09:53:55 +00001907 # read from db
tiernoe876f672020-02-13 14:34:48 +00001908 stage[1] = "Getting vnfd={} id='{}' from db".format(vnfd_id, vnfd_ref)
1909 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02001910 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
tierno27246d82018-09-27 15:59:09 +02001911
quilesj7e13aeb2019-10-08 13:34:55 +02001912 # store vnfd
1913 db_vnfds_ref[vnfd_ref] = vnfd # vnfd's indexed by name
1914 db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id
1915 db_vnfds_index[vnfr["member-vnf-index-ref"]] = db_vnfds[vnfd_id] # vnfd's indexed by member-index
1916
1917 # Get or generates the _admin.deployed.VCA list
tiernoe4f7e6c2018-11-27 14:55:30 +00001918 vca_deployed_list = None
1919 if db_nsr["_admin"].get("deployed"):
1920 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
1921 if vca_deployed_list is None:
1922 vca_deployed_list = []
quilesj3655ae02019-12-12 16:08:35 +00001923 configuration_status_list = []
tiernoe4f7e6c2018-11-27 14:55:30 +00001924 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
quilesj3655ae02019-12-12 16:08:35 +00001925 db_nsr_update["configurationStatus"] = configuration_status_list
quilesj7e13aeb2019-10-08 13:34:55 +02001926 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00001927 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00001928 elif isinstance(vca_deployed_list, dict):
1929 # maintain backward compatibility. Change a dict to list at database
1930 vca_deployed_list = list(vca_deployed_list.values())
1931 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00001932 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00001933
tierno6cf25f52019-09-12 09:33:40 +00001934 if not isinstance(deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list):
tiernoa009e552019-01-30 16:45:44 +00001935 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
1936 db_nsr_update["_admin.deployed.RO.vnfd"] = []
tierno59d22d22018-09-25 18:10:19 +02001937
tiernobaa51102018-12-14 13:16:18 +00001938 # set state to INSTANTIATED. When instantiated NBI will not delete directly
1939 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1940 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00001941
1942 # n2vc_redesign STEP 2 Deploy Network Scenario
tiernoe876f672020-02-13 14:34:48 +00001943 stage[0] = 'Stage 2/5: deployment of KDUs, VMs and execution environments.'
quilesj3655ae02019-12-12 16:08:35 +00001944 self._write_op_status(
1945 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00001946 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00001947 )
1948
tiernoe876f672020-02-13 14:34:48 +00001949 stage[1] = "Deploying KDUs,"
1950 # self.logger.debug(logging_text + "Before deploy_kdus")
calvinosanch9f9c6f22019-11-04 13:37:39 +01001951 # Call to deploy_kdus in case exists the "vdu:kdu" param
tiernoe876f672020-02-13 14:34:48 +00001952 await self.deploy_kdus(
1953 logging_text=logging_text,
1954 nsr_id=nsr_id,
1955 nslcmop_id=nslcmop_id,
1956 db_vnfrs=db_vnfrs,
1957 db_vnfds=db_vnfds,
1958 task_instantiation_info=tasks_dict_info,
calvinosanch9f9c6f22019-11-04 13:37:39 +01001959 )
tiernoe876f672020-02-13 14:34:48 +00001960
1961 stage[1] = "Getting VCA public key."
tiernod8323042019-08-09 11:32:23 +00001962 # n2vc_redesign STEP 1 Get VCA public ssh-key
1963 # feature 1429. Add n2vc public key to needed VMs
tierno3bedc9b2019-11-27 15:46:57 +00001964 n2vc_key = self.n2vc.get_public_key()
tiernoa5088192019-11-26 16:12:53 +00001965 n2vc_key_list = [n2vc_key]
1966 if self.vca_config.get("public_key"):
1967 n2vc_key_list.append(self.vca_config["public_key"])
tierno98ad6ea2019-05-30 17:16:28 +00001968
tiernoe876f672020-02-13 14:34:48 +00001969 stage[1] = "Deploying NS at VIM."
tiernod8323042019-08-09 11:32:23 +00001970 task_ro = asyncio.ensure_future(
quilesj7e13aeb2019-10-08 13:34:55 +02001971 self.instantiate_RO(
1972 logging_text=logging_text,
1973 nsr_id=nsr_id,
1974 nsd=nsd,
1975 db_nsr=db_nsr,
1976 db_nslcmop=db_nslcmop,
1977 db_vnfrs=db_vnfrs,
1978 db_vnfds_ref=db_vnfds_ref,
tiernoe876f672020-02-13 14:34:48 +00001979 n2vc_key_list=n2vc_key_list,
1980 stage=stage
tierno98ad6ea2019-05-30 17:16:28 +00001981 )
tiernod8323042019-08-09 11:32:23 +00001982 )
1983 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
tiernoa2143262020-03-27 16:20:40 +00001984 tasks_dict_info[task_ro] = "Deploying at VIM"
tierno98ad6ea2019-05-30 17:16:28 +00001985
tiernod8323042019-08-09 11:32:23 +00001986 # n2vc_redesign STEP 3 to 6 Deploy N2VC
tiernoe876f672020-02-13 14:34:48 +00001987 stage[1] = "Deploying Execution Environments."
1988 self.logger.debug(logging_text + stage[1])
tierno98ad6ea2019-05-30 17:16:28 +00001989
tiernod8323042019-08-09 11:32:23 +00001990 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
quilesj7e13aeb2019-10-08 13:34:55 +02001991 # get_iterable() returns a value from a dict or empty tuple if key does not exist
tierno98ad6ea2019-05-30 17:16:28 +00001992 for c_vnf in get_iterable(nsd, "constituent-vnfd"):
1993 vnfd_id = c_vnf["vnfd-id-ref"]
tierno98ad6ea2019-05-30 17:16:28 +00001994 vnfd = db_vnfds_ref[vnfd_id]
tiernod8323042019-08-09 11:32:23 +00001995 member_vnf_index = str(c_vnf["member-vnf-index"])
1996 db_vnfr = db_vnfrs[member_vnf_index]
1997 base_folder = vnfd["_admin"]["storage"]
1998 vdu_id = None
1999 vdu_index = 0
tierno98ad6ea2019-05-30 17:16:28 +00002000 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002001 kdu_name = None
tierno59d22d22018-09-25 18:10:19 +02002002
tierno8a518872018-12-21 13:42:14 +00002003 # Get additional parameters
tiernod8323042019-08-09 11:32:23 +00002004 deploy_params = {}
2005 if db_vnfr.get("additionalParamsForVnf"):
tierno626e0152019-11-29 14:16:16 +00002006 deploy_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"].copy())
tierno8a518872018-12-21 13:42:14 +00002007
tiernod8323042019-08-09 11:32:23 +00002008 descriptor_config = vnfd.get("vnf-configuration")
tierno588547c2020-07-01 15:30:20 +00002009 if descriptor_config:
quilesj7e13aeb2019-10-08 13:34:55 +02002010 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00002011 logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
quilesj7e13aeb2019-10-08 13:34:55 +02002012 db_nsr=db_nsr,
2013 db_vnfr=db_vnfr,
2014 nslcmop_id=nslcmop_id,
2015 nsr_id=nsr_id,
2016 nsi_id=nsi_id,
2017 vnfd_id=vnfd_id,
2018 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002019 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002020 member_vnf_index=member_vnf_index,
2021 vdu_index=vdu_index,
2022 vdu_name=vdu_name,
2023 deploy_params=deploy_params,
2024 descriptor_config=descriptor_config,
2025 base_folder=base_folder,
tiernoe876f672020-02-13 14:34:48 +00002026 task_instantiation_info=tasks_dict_info,
2027 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002028 )
tierno59d22d22018-09-25 18:10:19 +02002029
2030 # Deploy charms for each VDU that supports one.
tiernod8323042019-08-09 11:32:23 +00002031 for vdud in get_iterable(vnfd, 'vdu'):
2032 vdu_id = vdud["id"]
2033 descriptor_config = vdud.get('vdu-configuration')
tierno626e0152019-11-29 14:16:16 +00002034 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
2035 if vdur.get("additionalParams"):
2036 deploy_params_vdu = self._format_additional_params(vdur["additionalParams"])
2037 else:
2038 deploy_params_vdu = deploy_params
tierno588547c2020-07-01 15:30:20 +00002039 if descriptor_config:
tiernod8323042019-08-09 11:32:23 +00002040 # look for vdu index in the db_vnfr["vdu"] section
2041 # for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
2042 # if vdur["vdu-id-ref"] == vdu_id:
2043 # break
2044 # else:
2045 # raise LcmException("Mismatch vdu_id={} not found in the vnfr['vdur'] list for "
2046 # "member_vnf_index={}".format(vdu_id, member_vnf_index))
2047 # vdu_name = vdur.get("name")
2048 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002049 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002050 for vdu_index in range(int(vdud.get("count", 1))):
2051 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
quilesj7e13aeb2019-10-08 13:34:55 +02002052 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00002053 logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2054 member_vnf_index, vdu_id, vdu_index),
quilesj7e13aeb2019-10-08 13:34:55 +02002055 db_nsr=db_nsr,
2056 db_vnfr=db_vnfr,
2057 nslcmop_id=nslcmop_id,
2058 nsr_id=nsr_id,
2059 nsi_id=nsi_id,
2060 vnfd_id=vnfd_id,
2061 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002062 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002063 member_vnf_index=member_vnf_index,
2064 vdu_index=vdu_index,
2065 vdu_name=vdu_name,
tierno626e0152019-11-29 14:16:16 +00002066 deploy_params=deploy_params_vdu,
quilesj7e13aeb2019-10-08 13:34:55 +02002067 descriptor_config=descriptor_config,
2068 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002069 task_instantiation_info=tasks_dict_info,
2070 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002071 )
calvinosanch9f9c6f22019-11-04 13:37:39 +01002072 for kdud in get_iterable(vnfd, 'kdu'):
2073 kdu_name = kdud["name"]
2074 descriptor_config = kdud.get('kdu-configuration')
tierno588547c2020-07-01 15:30:20 +00002075 if descriptor_config:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002076 vdu_id = None
2077 vdu_index = 0
2078 vdu_name = None
2079 # look for vdu index in the db_vnfr["vdu"] section
2080 # for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
2081 # if vdur["vdu-id-ref"] == vdu_id:
2082 # break
2083 # else:
2084 # raise LcmException("Mismatch vdu_id={} not found in the vnfr['vdur'] list for "
2085 # "member_vnf_index={}".format(vdu_id, member_vnf_index))
2086 # vdu_name = vdur.get("name")
2087 # vdu_name = None
tierno59d22d22018-09-25 18:10:19 +02002088
calvinosanch9f9c6f22019-11-04 13:37:39 +01002089 self._deploy_n2vc(
2090 logging_text=logging_text,
2091 db_nsr=db_nsr,
2092 db_vnfr=db_vnfr,
2093 nslcmop_id=nslcmop_id,
2094 nsr_id=nsr_id,
2095 nsi_id=nsi_id,
2096 vnfd_id=vnfd_id,
2097 vdu_id=vdu_id,
2098 kdu_name=kdu_name,
2099 member_vnf_index=member_vnf_index,
2100 vdu_index=vdu_index,
2101 vdu_name=vdu_name,
2102 deploy_params=deploy_params,
2103 descriptor_config=descriptor_config,
2104 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002105 task_instantiation_info=tasks_dict_info,
2106 stage=stage
calvinosanch9f9c6f22019-11-04 13:37:39 +01002107 )
tierno59d22d22018-09-25 18:10:19 +02002108
tierno1b633412019-02-25 16:48:23 +00002109 # Check if this NS has a charm configuration
tiernod8323042019-08-09 11:32:23 +00002110 descriptor_config = nsd.get("ns-configuration")
2111 if descriptor_config and descriptor_config.get("juju"):
2112 vnfd_id = None
2113 db_vnfr = None
2114 member_vnf_index = None
2115 vdu_id = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002116 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002117 vdu_index = 0
2118 vdu_name = None
tierno1b633412019-02-25 16:48:23 +00002119
tiernod8323042019-08-09 11:32:23 +00002120 # Get additional parameters
2121 deploy_params = {}
2122 if db_nsr.get("additionalParamsForNs"):
tierno626e0152019-11-29 14:16:16 +00002123 deploy_params = self._format_additional_params(db_nsr["additionalParamsForNs"].copy())
tiernod8323042019-08-09 11:32:23 +00002124 base_folder = nsd["_admin"]["storage"]
quilesj7e13aeb2019-10-08 13:34:55 +02002125 self._deploy_n2vc(
2126 logging_text=logging_text,
2127 db_nsr=db_nsr,
2128 db_vnfr=db_vnfr,
2129 nslcmop_id=nslcmop_id,
2130 nsr_id=nsr_id,
2131 nsi_id=nsi_id,
2132 vnfd_id=vnfd_id,
2133 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002134 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002135 member_vnf_index=member_vnf_index,
2136 vdu_index=vdu_index,
2137 vdu_name=vdu_name,
2138 deploy_params=deploy_params,
2139 descriptor_config=descriptor_config,
2140 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002141 task_instantiation_info=tasks_dict_info,
2142 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002143 )
tierno1b633412019-02-25 16:48:23 +00002144
tiernoe876f672020-02-13 14:34:48 +00002145 # rest of staff will be done at finally
tierno1b633412019-02-25 16:48:23 +00002146
tiernoe876f672020-02-13 14:34:48 +00002147 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
2148 self.logger.error(logging_text + "Exit Exception while '{}': {}".format(stage[1], e))
tierno59d22d22018-09-25 18:10:19 +02002149 exc = e
2150 except asyncio.CancelledError:
tiernoe876f672020-02-13 14:34:48 +00002151 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
tierno59d22d22018-09-25 18:10:19 +02002152 exc = "Operation was cancelled"
2153 except Exception as e:
2154 exc = traceback.format_exc()
tiernoe876f672020-02-13 14:34:48 +00002155 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
tierno59d22d22018-09-25 18:10:19 +02002156 finally:
2157 if exc:
tiernoe876f672020-02-13 14:34:48 +00002158 error_list.append(str(exc))
tiernobaa51102018-12-14 13:16:18 +00002159 try:
tiernoe876f672020-02-13 14:34:48 +00002160 # wait for pending tasks
2161 if tasks_dict_info:
2162 stage[1] = "Waiting for instantiate pending tasks."
2163 self.logger.debug(logging_text + stage[1])
2164 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_deploy,
2165 stage, nslcmop_id, nsr_id=nsr_id)
2166 stage[1] = stage[2] = ""
2167 except asyncio.CancelledError:
2168 error_list.append("Cancelled")
2169 # TODO cancel all tasks
2170 except Exception as exc:
2171 error_list.append(str(exc))
quilesj4cda56b2019-12-05 10:02:20 +00002172
tiernoe876f672020-02-13 14:34:48 +00002173 # update operation-status
2174 db_nsr_update["operational-status"] = "running"
2175 # let's begin with VCA 'configured' status (later we can change it)
2176 db_nsr_update["config-status"] = "configured"
2177 for task, task_name in tasks_dict_info.items():
2178 if not task.done() or task.cancelled() or task.exception():
2179 if task_name.startswith(self.task_name_deploy_vca):
2180 # A N2VC task is pending
2181 db_nsr_update["config-status"] = "failed"
quilesj4cda56b2019-12-05 10:02:20 +00002182 else:
tiernoe876f672020-02-13 14:34:48 +00002183 # RO or KDU task is pending
2184 db_nsr_update["operational-status"] = "failed"
quilesj3655ae02019-12-12 16:08:35 +00002185
tiernoe876f672020-02-13 14:34:48 +00002186 # update status at database
2187 if error_list:
tiernoa2143262020-03-27 16:20:40 +00002188 error_detail = ". ".join(error_list)
tiernoe876f672020-02-13 14:34:48 +00002189 self.logger.error(logging_text + error_detail)
tiernoa2143262020-03-27 16:20:40 +00002190 error_description_nslcmop = 'Stage: {}. Detail: {}'.format(stage[0], error_detail)
2191 error_description_nsr = 'Operation: INSTANTIATING.{}, Stage {}'.format(nslcmop_id, stage[0])
quilesj3655ae02019-12-12 16:08:35 +00002192
tiernoa2143262020-03-27 16:20:40 +00002193 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00002194 db_nslcmop_update["detailed-status"] = error_detail
2195 nslcmop_operation_state = "FAILED"
2196 ns_state = "BROKEN"
2197 else:
tiernoa2143262020-03-27 16:20:40 +00002198 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00002199 error_description_nsr = error_description_nslcmop = None
2200 ns_state = "READY"
2201 db_nsr_update["detailed-status"] = "Done"
2202 db_nslcmop_update["detailed-status"] = "Done"
2203 nslcmop_operation_state = "COMPLETED"
quilesj4cda56b2019-12-05 10:02:20 +00002204
tiernoe876f672020-02-13 14:34:48 +00002205 if db_nsr:
2206 self._write_ns_status(
2207 nsr_id=nsr_id,
2208 ns_state=ns_state,
2209 current_operation="IDLE",
2210 current_operation_id=None,
2211 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00002212 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00002213 other_update=db_nsr_update
2214 )
tiernoa17d4f42020-04-28 09:59:23 +00002215 self._write_op_status(
2216 op_id=nslcmop_id,
2217 stage="",
2218 error_message=error_description_nslcmop,
2219 operation_state=nslcmop_operation_state,
2220 other_update=db_nslcmop_update,
2221 )
quilesj3655ae02019-12-12 16:08:35 +00002222
tierno59d22d22018-09-25 18:10:19 +02002223 if nslcmop_operation_state:
2224 try:
2225 await self.msg.aiowrite("ns", "instantiated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00002226 "operationState": nslcmop_operation_state},
2227 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02002228 except Exception as e:
2229 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
2230
2231 self.logger.debug(logging_text + "Exit")
2232 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2233
tierno588547c2020-07-01 15:30:20 +00002234 async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int,
2235 timeout: int = 3600, vca_type: str = None) -> bool:
quilesj63f90042020-01-17 09:53:55 +00002236
2237 # steps:
2238 # 1. find all relations for this VCA
2239 # 2. wait for other peers related
2240 # 3. add relations
2241
2242 try:
tierno588547c2020-07-01 15:30:20 +00002243 vca_type = vca_type or "lxc_proxy_charm"
quilesj63f90042020-01-17 09:53:55 +00002244
2245 # STEP 1: find all relations for this VCA
2246
2247 # read nsr record
2248 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
David Garcia171f3542020-05-21 16:41:07 +02002249 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
quilesj63f90042020-01-17 09:53:55 +00002250
2251 # this VCA data
2252 my_vca = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))[vca_index]
2253
2254 # read all ns-configuration relations
2255 ns_relations = list()
David Garcia171f3542020-05-21 16:41:07 +02002256 db_ns_relations = deep_get(nsd, ('ns-configuration', 'relation'))
quilesj63f90042020-01-17 09:53:55 +00002257 if db_ns_relations:
2258 for r in db_ns_relations:
2259 # check if this VCA is in the relation
2260 if my_vca.get('member-vnf-index') in\
2261 (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2262 ns_relations.append(r)
2263
2264 # read all vnf-configuration relations
2265 vnf_relations = list()
2266 db_vnfd_list = db_nsr.get('vnfd-id')
2267 if db_vnfd_list:
2268 for vnfd in db_vnfd_list:
2269 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2270 db_vnf_relations = deep_get(db_vnfd, ('vnf-configuration', 'relation'))
2271 if db_vnf_relations:
2272 for r in db_vnf_relations:
2273 # check if this VCA is in the relation
2274 if my_vca.get('vdu_id') in (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2275 vnf_relations.append(r)
2276
2277 # if no relations, terminate
2278 if not ns_relations and not vnf_relations:
2279 self.logger.debug(logging_text + ' No relations')
2280 return True
2281
2282 self.logger.debug(logging_text + ' adding relations\n {}\n {}'.format(ns_relations, vnf_relations))
2283
2284 # add all relations
2285 start = time()
2286 while True:
2287 # check timeout
2288 now = time()
2289 if now - start >= timeout:
2290 self.logger.error(logging_text + ' : timeout adding relations')
2291 return False
2292
2293 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2294 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2295
2296 # for each defined NS relation, find the VCA's related
2297 for r in ns_relations:
2298 from_vca_ee_id = None
2299 to_vca_ee_id = None
2300 from_vca_endpoint = None
2301 to_vca_endpoint = None
2302 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2303 for vca in vca_list:
2304 if vca.get('member-vnf-index') == r.get('entities')[0].get('id') \
2305 and vca.get('config_sw_installed'):
2306 from_vca_ee_id = vca.get('ee_id')
2307 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2308 if vca.get('member-vnf-index') == r.get('entities')[1].get('id') \
2309 and vca.get('config_sw_installed'):
2310 to_vca_ee_id = vca.get('ee_id')
2311 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2312 if from_vca_ee_id and to_vca_ee_id:
2313 # add relation
tierno588547c2020-07-01 15:30:20 +00002314 await self.vca_map[vca_type].add_relation(
quilesj63f90042020-01-17 09:53:55 +00002315 ee_id_1=from_vca_ee_id,
2316 ee_id_2=to_vca_ee_id,
2317 endpoint_1=from_vca_endpoint,
2318 endpoint_2=to_vca_endpoint)
2319 # remove entry from relations list
2320 ns_relations.remove(r)
2321 else:
2322 # check failed peers
2323 try:
2324 vca_status_list = db_nsr.get('configurationStatus')
2325 if vca_status_list:
2326 for i in range(len(vca_list)):
2327 vca = vca_list[i]
2328 vca_status = vca_status_list[i]
2329 if vca.get('member-vnf-index') == r.get('entities')[0].get('id'):
2330 if vca_status.get('status') == 'BROKEN':
2331 # peer broken: remove relation from list
2332 ns_relations.remove(r)
2333 if vca.get('member-vnf-index') == r.get('entities')[1].get('id'):
2334 if vca_status.get('status') == 'BROKEN':
2335 # peer broken: remove relation from list
2336 ns_relations.remove(r)
2337 except Exception:
2338 # ignore
2339 pass
2340
2341 # for each defined VNF relation, find the VCA's related
2342 for r in vnf_relations:
2343 from_vca_ee_id = None
2344 to_vca_ee_id = None
2345 from_vca_endpoint = None
2346 to_vca_endpoint = None
2347 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2348 for vca in vca_list:
2349 if vca.get('vdu_id') == r.get('entities')[0].get('id') and vca.get('config_sw_installed'):
2350 from_vca_ee_id = vca.get('ee_id')
2351 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2352 if vca.get('vdu_id') == r.get('entities')[1].get('id') and vca.get('config_sw_installed'):
2353 to_vca_ee_id = vca.get('ee_id')
2354 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2355 if from_vca_ee_id and to_vca_ee_id:
2356 # add relation
tierno588547c2020-07-01 15:30:20 +00002357 await self.vca_map[vca_type].add_relation(
quilesj63f90042020-01-17 09:53:55 +00002358 ee_id_1=from_vca_ee_id,
2359 ee_id_2=to_vca_ee_id,
2360 endpoint_1=from_vca_endpoint,
2361 endpoint_2=to_vca_endpoint)
2362 # remove entry from relations list
2363 vnf_relations.remove(r)
2364 else:
2365 # check failed peers
2366 try:
2367 vca_status_list = db_nsr.get('configurationStatus')
2368 if vca_status_list:
2369 for i in range(len(vca_list)):
2370 vca = vca_list[i]
2371 vca_status = vca_status_list[i]
2372 if vca.get('vdu_id') == r.get('entities')[0].get('id'):
2373 if vca_status.get('status') == 'BROKEN':
2374 # peer broken: remove relation from list
2375 ns_relations.remove(r)
2376 if vca.get('vdu_id') == r.get('entities')[1].get('id'):
2377 if vca_status.get('status') == 'BROKEN':
2378 # peer broken: remove relation from list
2379 ns_relations.remove(r)
2380 except Exception:
2381 # ignore
2382 pass
2383
2384 # wait for next try
2385 await asyncio.sleep(5.0)
2386
2387 if not ns_relations and not vnf_relations:
2388 self.logger.debug('Relations added')
2389 break
2390
2391 return True
2392
2393 except Exception as e:
2394 self.logger.warn(logging_text + ' ERROR adding relations: {}'.format(e))
2395 return False
2396
tiernob9018152020-04-16 14:18:24 +00002397 def _write_db_callback(self, task, item, _id, on_done=None, on_exc=None):
2398 """
2399 callback for kdu install intended to store the returned kdu_instance at database
2400 :return: None
2401 """
2402 db_update = {}
2403 try:
2404 result = task.result()
2405 if on_done:
2406 db_update[on_done] = str(result)
2407 except Exception as e:
2408 if on_exc:
2409 db_update[on_exc] = str(e)
2410 if db_update:
2411 try:
2412 self.update_db_2(item, _id, db_update)
2413 except Exception:
2414 pass
2415
tiernoe876f672020-02-13 14:34:48 +00002416 async def deploy_kdus(self, logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_instantiation_info):
calvinosanch9f9c6f22019-11-04 13:37:39 +01002417 # Launch kdus if present in the descriptor
tierno626e0152019-11-29 14:16:16 +00002418
2419 k8scluster_id_2_uuic = {"helm-chart": {}, "juju-bundle": {}}
2420
2421 def _get_cluster_id(cluster_id, cluster_type):
2422 nonlocal k8scluster_id_2_uuic
2423 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
2424 return k8scluster_id_2_uuic[cluster_type][cluster_id]
2425
2426 db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False)
2427 if not db_k8scluster:
2428 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
2429 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
2430 if not k8s_id:
2431 raise LcmException("K8s cluster '{}' has not been initilized for '{}'".format(cluster_id, cluster_type))
2432 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
2433 return k8s_id
2434
2435 logging_text += "Deploy kdus: "
tiernoe876f672020-02-13 14:34:48 +00002436 step = ""
calvinosanch9f9c6f22019-11-04 13:37:39 +01002437 try:
tierno626e0152019-11-29 14:16:16 +00002438 db_nsr_update = {"_admin.deployed.K8s": []}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002439 self.update_db_2("nsrs", nsr_id, db_nsr_update)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002440
tierno626e0152019-11-29 14:16:16 +00002441 index = 0
tiernoe876f672020-02-13 14:34:48 +00002442 updated_cluster_list = []
2443
tierno626e0152019-11-29 14:16:16 +00002444 for vnfr_data in db_vnfrs.values():
2445 for kdur in get_iterable(vnfr_data, "kdur"):
2446 desc_params = self._format_additional_params(kdur.get("additionalParams"))
quilesjacde94f2020-01-23 10:07:08 +00002447 vnfd_id = vnfr_data.get('vnfd-id')
tiernode1584f2020-04-07 09:07:33 +00002448 namespace = kdur.get("k8s-namespace")
tierno626e0152019-11-29 14:16:16 +00002449 if kdur.get("helm-chart"):
2450 kdumodel = kdur["helm-chart"]
tiernoe876f672020-02-13 14:34:48 +00002451 k8sclustertype = "helm-chart"
tierno626e0152019-11-29 14:16:16 +00002452 elif kdur.get("juju-bundle"):
2453 kdumodel = kdur["juju-bundle"]
tiernoe876f672020-02-13 14:34:48 +00002454 k8sclustertype = "juju-bundle"
tierno626e0152019-11-29 14:16:16 +00002455 else:
tiernoe876f672020-02-13 14:34:48 +00002456 raise LcmException("kdu type for kdu='{}.{}' is neither helm-chart nor "
2457 "juju-bundle. Maybe an old NBI version is running".
2458 format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]))
quilesjacde94f2020-01-23 10:07:08 +00002459 # check if kdumodel is a file and exists
2460 try:
tierno51183952020-04-03 15:48:18 +00002461 storage = deep_get(db_vnfds.get(vnfd_id), ('_admin', 'storage'))
2462 if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts
2463 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
Dominik Fleischmann010c0e72020-05-18 15:19:11 +02002464 filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["pkg-dir"], k8sclustertype,
tierno51183952020-04-03 15:48:18 +00002465 kdumodel)
2466 if self.fs.file_exists(filename, mode='file') or self.fs.file_exists(filename, mode='dir'):
2467 kdumodel = self.fs.path + filename
2468 except (asyncio.TimeoutError, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00002469 raise
2470 except Exception: # it is not a file
quilesjacde94f2020-01-23 10:07:08 +00002471 pass
lloretgallegedc5f332020-02-20 11:50:50 +01002472
tiernoe876f672020-02-13 14:34:48 +00002473 k8s_cluster_id = kdur["k8s-cluster"]["id"]
2474 step = "Synchronize repos for k8s cluster '{}'".format(k8s_cluster_id)
2475 cluster_uuid = _get_cluster_id(k8s_cluster_id, k8sclustertype)
lloretgallegedc5f332020-02-20 11:50:50 +01002476
tiernoe876f672020-02-13 14:34:48 +00002477 if k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list:
2478 del_repo_list, added_repo_dict = await asyncio.ensure_future(
2479 self.k8sclusterhelm.synchronize_repos(cluster_uuid=cluster_uuid))
2480 if del_repo_list or added_repo_dict:
2481 unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
2482 updated = {'_admin.helm_charts_added.' +
2483 item: name for item, name in added_repo_dict.items()}
2484 self.logger.debug(logging_text + "repos synchronized on k8s cluster '{}' to_delete: {}, "
2485 "to_add: {}".format(k8s_cluster_id, del_repo_list,
2486 added_repo_dict))
2487 self.db.set_one("k8sclusters", {"_id": k8s_cluster_id}, updated, unset=unset)
2488 updated_cluster_list.append(cluster_uuid)
lloretgallegedc5f332020-02-20 11:50:50 +01002489
tiernoe876f672020-02-13 14:34:48 +00002490 step = "Instantiating KDU {}.{} in k8s cluster {}".format(vnfr_data["member-vnf-index-ref"],
2491 kdur["kdu-name"], k8s_cluster_id)
tierno626e0152019-11-29 14:16:16 +00002492
tierno067e04a2020-03-31 12:53:13 +00002493 k8s_instace_info = {"kdu-instance": None,
2494 "k8scluster-uuid": cluster_uuid,
tierno626e0152019-11-29 14:16:16 +00002495 "k8scluster-type": k8sclustertype,
tierno067e04a2020-03-31 12:53:13 +00002496 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
2497 "kdu-name": kdur["kdu-name"],
tiernode1584f2020-04-07 09:07:33 +00002498 "kdu-model": kdumodel,
2499 "namespace": namespace}
tiernob9018152020-04-16 14:18:24 +00002500 db_path = "_admin.deployed.K8s.{}".format(index)
2501 db_nsr_update[db_path] = k8s_instace_info
tierno626e0152019-11-29 14:16:16 +00002502 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tierno626e0152019-11-29 14:16:16 +00002503
tiernoe876f672020-02-13 14:34:48 +00002504 db_dict = {"collection": "nsrs",
2505 "filter": {"_id": nsr_id},
tiernob9018152020-04-16 14:18:24 +00002506 "path": db_path}
lloretgallegedc5f332020-02-20 11:50:50 +01002507
tiernoa2143262020-03-27 16:20:40 +00002508 task = asyncio.ensure_future(
2509 self.k8scluster_map[k8sclustertype].install(cluster_uuid=cluster_uuid, kdu_model=kdumodel,
2510 atomic=True, params=desc_params,
2511 db_dict=db_dict, timeout=600,
tiernode1584f2020-04-07 09:07:33 +00002512 kdu_name=kdur["kdu-name"], namespace=namespace))
Adam Israelbaacc302019-12-01 12:41:39 -05002513
tiernob9018152020-04-16 14:18:24 +00002514 task.add_done_callback(partial(self._write_db_callback, item="nsrs", _id=nsr_id,
2515 on_done=db_path + ".kdu-instance",
2516 on_exc=db_path + ".detailed-status"))
tiernoe876f672020-02-13 14:34:48 +00002517 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task)
tiernoa2143262020-03-27 16:20:40 +00002518 task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"])
tiernoe876f672020-02-13 14:34:48 +00002519
tierno626e0152019-11-29 14:16:16 +00002520 index += 1
quilesjdd799ac2020-01-23 16:31:11 +00002521
tiernoe876f672020-02-13 14:34:48 +00002522 except (LcmException, asyncio.CancelledError):
2523 raise
calvinosanch9f9c6f22019-11-04 13:37:39 +01002524 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00002525 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
2526 if isinstance(e, (N2VCException, DbException)):
2527 self.logger.error(logging_text + msg)
2528 else:
2529 self.logger.critical(logging_text + msg, exc_info=True)
quilesjdd799ac2020-01-23 16:31:11 +00002530 raise LcmException(msg)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002531 finally:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002532 if db_nsr_update:
2533 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoda6fb102019-11-23 00:36:52 +00002534
quilesj7e13aeb2019-10-08 13:34:55 +02002535 def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002536 kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config,
tiernoe876f672020-02-13 14:34:48 +00002537 base_folder, task_instantiation_info, stage):
quilesj7e13aeb2019-10-08 13:34:55 +02002538 # launch instantiate_N2VC in a asyncio task and register task object
2539 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
2540 # if not found, create one entry and update database
quilesj7e13aeb2019-10-08 13:34:55 +02002541 # fill db_nsr._admin.deployed.VCA.<index>
tierno588547c2020-07-01 15:30:20 +00002542
2543 self.logger.debug(logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id))
2544 if descriptor_config.get("juju"): # There is one execution envioronment of type juju
2545 ee_list = [descriptor_config]
2546 elif descriptor_config.get("execution-environment-list"):
2547 ee_list = descriptor_config.get("execution-environment-list")
2548 else: # other types as script are not supported
2549 ee_list = []
2550
2551 for ee_item in ee_list:
2552 self.logger.debug(logging_text + "_deploy_n2vc ee_item juju={}, helm={}".format(ee_item.get('juju'),
2553 ee_item.get("helm-chart")))
tierno4fa7f8e2020-07-08 15:33:55 +00002554 ee_descriptor_id = ee_item.get("id")
tierno588547c2020-07-01 15:30:20 +00002555 if ee_item.get("juju"):
2556 vca_name = ee_item['juju'].get('charm')
2557 vca_type = "lxc_proxy_charm" if ee_item['juju'].get('charm') is not None else "native_charm"
2558 if ee_item['juju'].get('cloud') == "k8s":
2559 vca_type = "k8s_proxy_charm"
2560 elif ee_item['juju'].get('proxy') is False:
2561 vca_type = "native_charm"
2562 elif ee_item.get("helm-chart"):
2563 vca_name = ee_item['helm-chart']
2564 vca_type = "helm"
2565 else:
2566 self.logger.debug(logging_text + "skipping non juju neither charm configuration")
quilesj7e13aeb2019-10-08 13:34:55 +02002567 continue
quilesj3655ae02019-12-12 16:08:35 +00002568
tierno588547c2020-07-01 15:30:20 +00002569 vca_index = -1
2570 for vca_index, vca_deployed in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
2571 if not vca_deployed:
2572 continue
2573 if vca_deployed.get("member-vnf-index") == member_vnf_index and \
2574 vca_deployed.get("vdu_id") == vdu_id and \
2575 vca_deployed.get("kdu_name") == kdu_name and \
tierno4fa7f8e2020-07-08 15:33:55 +00002576 vca_deployed.get("vdu_count_index", 0) == vdu_index and \
2577 vca_deployed.get("ee_descriptor_id") == ee_descriptor_id:
tierno588547c2020-07-01 15:30:20 +00002578 break
2579 else:
2580 # not found, create one.
tierno4fa7f8e2020-07-08 15:33:55 +00002581 target = "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
2582 if vdu_id:
2583 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
2584 elif kdu_name:
2585 target += "/kdu/{}".format(kdu_name)
tierno588547c2020-07-01 15:30:20 +00002586 vca_deployed = {
tierno4fa7f8e2020-07-08 15:33:55 +00002587 "target_element": target,
2588 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
tierno588547c2020-07-01 15:30:20 +00002589 "member-vnf-index": member_vnf_index,
2590 "vdu_id": vdu_id,
2591 "kdu_name": kdu_name,
2592 "vdu_count_index": vdu_index,
2593 "operational-status": "init", # TODO revise
2594 "detailed-status": "", # TODO revise
2595 "step": "initial-deploy", # TODO revise
2596 "vnfd_id": vnfd_id,
2597 "vdu_name": vdu_name,
tierno4fa7f8e2020-07-08 15:33:55 +00002598 "type": vca_type,
2599 "ee_descriptor_id": ee_descriptor_id
tierno588547c2020-07-01 15:30:20 +00002600 }
2601 vca_index += 1
quilesj3655ae02019-12-12 16:08:35 +00002602
tierno588547c2020-07-01 15:30:20 +00002603 # create VCA and configurationStatus in db
2604 db_dict = {
2605 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
2606 "configurationStatus.{}".format(vca_index): dict()
2607 }
2608 self.update_db_2("nsrs", nsr_id, db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02002609
tierno588547c2020-07-01 15:30:20 +00002610 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
2611
2612 # Launch task
2613 task_n2vc = asyncio.ensure_future(
2614 self.instantiate_N2VC(
2615 logging_text=logging_text,
2616 vca_index=vca_index,
2617 nsi_id=nsi_id,
2618 db_nsr=db_nsr,
2619 db_vnfr=db_vnfr,
2620 vdu_id=vdu_id,
2621 kdu_name=kdu_name,
2622 vdu_index=vdu_index,
2623 deploy_params=deploy_params,
2624 config_descriptor=descriptor_config,
2625 base_folder=base_folder,
2626 nslcmop_id=nslcmop_id,
2627 stage=stage,
2628 vca_type=vca_type,
tierno89f82902020-07-03 14:52:28 +00002629 vca_name=vca_name,
2630 ee_config_descriptor=ee_item
tierno588547c2020-07-01 15:30:20 +00002631 )
quilesj7e13aeb2019-10-08 13:34:55 +02002632 )
tierno588547c2020-07-01 15:30:20 +00002633 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_N2VC-{}".format(vca_index), task_n2vc)
2634 task_instantiation_info[task_n2vc] = self.task_name_deploy_vca + " {}.{}".format(
2635 member_vnf_index or "", vdu_id or "")
tiernobaa51102018-12-14 13:16:18 +00002636
tiernoc9556972019-07-05 15:25:25 +00002637 @staticmethod
tierno4fa7f8e2020-07-08 15:33:55 +00002638 def _get_terminate_config_primitive(primitive_list, vca_deployed):
2639 """ Get a sorted terminate config primitive list. In case ee_descriptor_id is present at vca_deployed,
2640 it get only those primitives for this execution envirom"""
2641
2642 primitive_list = primitive_list or []
2643 # filter primitives by ee_descriptor_id
2644 ee_descriptor_id = vca_deployed.get("ee_descriptor_id")
2645 primitive_list = [p for p in primitive_list if p.get("execution-environment-ref") == ee_descriptor_id]
2646
2647 if primitive_list:
2648 primitive_list.sort(key=lambda val: int(val['seq']))
2649
2650 return primitive_list
kuuse0ca67472019-05-13 15:59:27 +02002651
2652 @staticmethod
2653 def _create_nslcmop(nsr_id, operation, params):
2654 """
2655 Creates a ns-lcm-opp content to be stored at database.
2656 :param nsr_id: internal id of the instance
2657 :param operation: instantiate, terminate, scale, action, ...
2658 :param params: user parameters for the operation
2659 :return: dictionary following SOL005 format
2660 """
2661 # Raise exception if invalid arguments
2662 if not (nsr_id and operation and params):
2663 raise LcmException(
2664 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided")
2665 now = time()
2666 _id = str(uuid4())
2667 nslcmop = {
2668 "id": _id,
2669 "_id": _id,
2670 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
2671 "operationState": "PROCESSING",
2672 "statusEnteredTime": now,
2673 "nsInstanceId": nsr_id,
2674 "lcmOperationType": operation,
2675 "startTime": now,
2676 "isAutomaticInvocation": False,
2677 "operationParams": params,
2678 "isCancelPending": False,
2679 "links": {
2680 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
2681 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
2682 }
2683 }
2684 return nslcmop
2685
calvinosanch9f9c6f22019-11-04 13:37:39 +01002686 def _format_additional_params(self, params):
tierno626e0152019-11-29 14:16:16 +00002687 params = params or {}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002688 for key, value in params.items():
2689 if str(value).startswith("!!yaml "):
2690 params[key] = yaml.safe_load(value[7:])
calvinosanch9f9c6f22019-11-04 13:37:39 +01002691 return params
2692
kuuse8b998e42019-07-30 15:22:16 +02002693 def _get_terminate_primitive_params(self, seq, vnf_index):
2694 primitive = seq.get('name')
2695 primitive_params = {}
2696 params = {
2697 "member_vnf_index": vnf_index,
2698 "primitive": primitive,
2699 "primitive_params": primitive_params,
2700 }
2701 desc_params = {}
2702 return self._map_primitive_params(seq, params, desc_params)
2703
kuuseac3a8882019-10-03 10:48:06 +02002704 # sub-operations
2705
tierno51183952020-04-03 15:48:18 +00002706 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
2707 op = deep_get(db_nslcmop, ('_admin', 'operations'), [])[op_index]
2708 if op.get('operationState') == 'COMPLETED':
kuuseac3a8882019-10-03 10:48:06 +02002709 # b. Skip sub-operation
2710 # _ns_execute_primitive() or RO.create_action() will NOT be executed
2711 return self.SUBOPERATION_STATUS_SKIP
2712 else:
tierno7c4e24c2020-05-13 08:41:35 +00002713 # c. retry executing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02002714 # The sub-operation exists, and operationState != 'COMPLETED'
tierno7c4e24c2020-05-13 08:41:35 +00002715 # Update operationState = 'PROCESSING' to indicate a retry.
kuuseac3a8882019-10-03 10:48:06 +02002716 operationState = 'PROCESSING'
2717 detailed_status = 'In progress'
2718 self._update_suboperation_status(
2719 db_nslcmop, op_index, operationState, detailed_status)
2720 # Return the sub-operation index
2721 # _ns_execute_primitive() or RO.create_action() will be called from scale()
2722 # with arguments extracted from the sub-operation
2723 return op_index
2724
2725 # Find a sub-operation where all keys in a matching dictionary must match
2726 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
2727 def _find_suboperation(self, db_nslcmop, match):
tierno7c4e24c2020-05-13 08:41:35 +00002728 if db_nslcmop and match:
kuuseac3a8882019-10-03 10:48:06 +02002729 op_list = db_nslcmop.get('_admin', {}).get('operations', [])
2730 for i, op in enumerate(op_list):
2731 if all(op.get(k) == match[k] for k in match):
2732 return i
2733 return self.SUBOPERATION_STATUS_NOT_FOUND
2734
2735 # Update status for a sub-operation given its index
2736 def _update_suboperation_status(self, db_nslcmop, op_index, operationState, detailed_status):
2737 # Update DB for HA tasks
2738 q_filter = {'_id': db_nslcmop['_id']}
2739 update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState,
2740 '_admin.operations.{}.detailed-status'.format(op_index): detailed_status}
2741 self.db.set_one("nslcmops",
2742 q_filter=q_filter,
2743 update_dict=update_dict,
2744 fail_on_empty=False)
2745
2746 # Add sub-operation, return the index of the added sub-operation
2747 # Optionally, set operationState, detailed-status, and operationType
2748 # Status and type are currently set for 'scale' sub-operations:
2749 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
2750 # 'detailed-status' : status message
2751 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
2752 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
quilesj7e13aeb2019-10-08 13:34:55 +02002753 def _add_suboperation(self, db_nslcmop, vnf_index, vdu_id, vdu_count_index, vdu_name, primitive,
2754 mapped_primitive_params, operationState=None, detailed_status=None, operationType=None,
kuuseac3a8882019-10-03 10:48:06 +02002755 RO_nsr_id=None, RO_scaling_info=None):
tiernoe876f672020-02-13 14:34:48 +00002756 if not db_nslcmop:
kuuseac3a8882019-10-03 10:48:06 +02002757 return self.SUBOPERATION_STATUS_NOT_FOUND
2758 # Get the "_admin.operations" list, if it exists
2759 db_nslcmop_admin = db_nslcmop.get('_admin', {})
2760 op_list = db_nslcmop_admin.get('operations')
2761 # Create or append to the "_admin.operations" list
kuuse8b998e42019-07-30 15:22:16 +02002762 new_op = {'member_vnf_index': vnf_index,
2763 'vdu_id': vdu_id,
2764 'vdu_count_index': vdu_count_index,
2765 'primitive': primitive,
2766 'primitive_params': mapped_primitive_params}
kuuseac3a8882019-10-03 10:48:06 +02002767 if operationState:
2768 new_op['operationState'] = operationState
2769 if detailed_status:
2770 new_op['detailed-status'] = detailed_status
2771 if operationType:
2772 new_op['lcmOperationType'] = operationType
2773 if RO_nsr_id:
2774 new_op['RO_nsr_id'] = RO_nsr_id
2775 if RO_scaling_info:
2776 new_op['RO_scaling_info'] = RO_scaling_info
2777 if not op_list:
2778 # No existing operations, create key 'operations' with current operation as first list element
2779 db_nslcmop_admin.update({'operations': [new_op]})
2780 op_list = db_nslcmop_admin.get('operations')
2781 else:
2782 # Existing operations, append operation to list
2783 op_list.append(new_op)
kuuse8b998e42019-07-30 15:22:16 +02002784
kuuseac3a8882019-10-03 10:48:06 +02002785 db_nslcmop_update = {'_admin.operations': op_list}
2786 self.update_db_2("nslcmops", db_nslcmop['_id'], db_nslcmop_update)
2787 op_index = len(op_list) - 1
2788 return op_index
2789
2790 # Helper methods for scale() sub-operations
2791
2792 # pre-scale/post-scale:
2793 # Check for 3 different cases:
2794 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
2795 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
tierno7c4e24c2020-05-13 08:41:35 +00002796 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
quilesj7e13aeb2019-10-08 13:34:55 +02002797 def _check_or_add_scale_suboperation(self, db_nslcmop, vnf_index, vnf_config_primitive, primitive_params,
2798 operationType, RO_nsr_id=None, RO_scaling_info=None):
kuuseac3a8882019-10-03 10:48:06 +02002799 # Find this sub-operation
tierno7c4e24c2020-05-13 08:41:35 +00002800 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02002801 operationType = 'SCALE-RO'
2802 match = {
2803 'member_vnf_index': vnf_index,
2804 'RO_nsr_id': RO_nsr_id,
2805 'RO_scaling_info': RO_scaling_info,
2806 }
2807 else:
2808 match = {
2809 'member_vnf_index': vnf_index,
2810 'primitive': vnf_config_primitive,
2811 'primitive_params': primitive_params,
2812 'lcmOperationType': operationType
2813 }
2814 op_index = self._find_suboperation(db_nslcmop, match)
tierno51183952020-04-03 15:48:18 +00002815 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
kuuseac3a8882019-10-03 10:48:06 +02002816 # a. New sub-operation
2817 # The sub-operation does not exist, add it.
2818 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
2819 # The following parameters are set to None for all kind of scaling:
2820 vdu_id = None
2821 vdu_count_index = None
2822 vdu_name = None
tierno51183952020-04-03 15:48:18 +00002823 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02002824 vnf_config_primitive = None
2825 primitive_params = None
2826 else:
2827 RO_nsr_id = None
2828 RO_scaling_info = None
2829 # Initial status for sub-operation
2830 operationState = 'PROCESSING'
2831 detailed_status = 'In progress'
2832 # Add sub-operation for pre/post-scaling (zero or more operations)
2833 self._add_suboperation(db_nslcmop,
2834 vnf_index,
2835 vdu_id,
2836 vdu_count_index,
2837 vdu_name,
2838 vnf_config_primitive,
2839 primitive_params,
2840 operationState,
2841 detailed_status,
2842 operationType,
2843 RO_nsr_id,
2844 RO_scaling_info)
2845 return self.SUBOPERATION_STATUS_NEW
2846 else:
2847 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
2848 # or op_index (operationState != 'COMPLETED')
tierno51183952020-04-03 15:48:18 +00002849 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
kuuseac3a8882019-10-03 10:48:06 +02002850
preethika.pdf7d8e02019-12-10 13:10:48 +00002851 # Function to return execution_environment id
2852
2853 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
tiernoe876f672020-02-13 14:34:48 +00002854 # TODO vdu_index_count
preethika.pdf7d8e02019-12-10 13:10:48 +00002855 for vca in vca_deployed_list:
2856 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
2857 return vca["ee_id"]
2858
tierno588547c2020-07-01 15:30:20 +00002859 async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor,
2860 vca_index, destroy_ee=True, exec_primitives=True):
tiernoe876f672020-02-13 14:34:48 +00002861 """
2862 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
2863 :param logging_text:
2864 :param db_nslcmop:
2865 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
2866 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
2867 :param vca_index: index in the database _admin.deployed.VCA
2868 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
tierno588547c2020-07-01 15:30:20 +00002869 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
2870 not executed properly
tiernoe876f672020-02-13 14:34:48 +00002871 :return: None or exception
2872 """
tiernoe876f672020-02-13 14:34:48 +00002873
tierno588547c2020-07-01 15:30:20 +00002874 self.logger.debug(
2875 logging_text + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
2876 vca_index, vca_deployed, config_descriptor, destroy_ee
2877 )
2878 )
2879
2880 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
2881
2882 # execute terminate_primitives
2883 if exec_primitives:
tierno4fa7f8e2020-07-08 15:33:55 +00002884 terminate_primitives = self._get_terminate_config_primitive(
2885 config_descriptor.get("terminate-config-primitive"), vca_deployed)
tierno588547c2020-07-01 15:30:20 +00002886 vdu_id = vca_deployed.get("vdu_id")
2887 vdu_count_index = vca_deployed.get("vdu_count_index")
2888 vdu_name = vca_deployed.get("vdu_name")
2889 vnf_index = vca_deployed.get("member-vnf-index")
2890 if terminate_primitives and vca_deployed.get("needed_terminate"):
tierno588547c2020-07-01 15:30:20 +00002891 for seq in terminate_primitives:
2892 # For each sequence in list, get primitive and call _ns_execute_primitive()
2893 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
2894 vnf_index, seq.get("name"))
2895 self.logger.debug(logging_text + step)
2896 # Create the primitive for each sequence, i.e. "primitive": "touch"
2897 primitive = seq.get('name')
2898 mapped_primitive_params = self._get_terminate_primitive_params(seq, vnf_index)
tierno588547c2020-07-01 15:30:20 +00002899
2900 # Add sub-operation
2901 self._add_suboperation(db_nslcmop,
2902 vnf_index,
2903 vdu_id,
2904 vdu_count_index,
2905 vdu_name,
2906 primitive,
2907 mapped_primitive_params)
2908 # Sub-operations: Call _ns_execute_primitive() instead of action()
2909 try:
2910 result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive,
2911 mapped_primitive_params,
2912 vca_type=vca_type)
2913 except LcmException:
2914 # this happens when VCA is not deployed. In this case it is not needed to terminate
2915 continue
2916 result_ok = ['COMPLETED', 'PARTIALLY_COMPLETED']
2917 if result not in result_ok:
2918 raise LcmException("terminate_primitive {} for vnf_member_index={} fails with "
2919 "error {}".format(seq.get("name"), vnf_index, result_detail))
2920 # set that this VCA do not need terminated
2921 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(vca_index)
2922 self.update_db_2("nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False})
tiernoe876f672020-02-13 14:34:48 +00002923
tierno89f82902020-07-03 14:52:28 +00002924 if vca_deployed.get("prometheus_jobs") and self.prometheus:
2925 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
2926
tiernoe876f672020-02-13 14:34:48 +00002927 if destroy_ee:
tierno588547c2020-07-01 15:30:20 +00002928 await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"])
kuuse0ca67472019-05-13 15:59:27 +02002929
tierno51183952020-04-03 15:48:18 +00002930 async def _delete_all_N2VC(self, db_nsr: dict):
2931 self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING')
2932 namespace = "." + db_nsr["_id"]
tiernof59ad6c2020-04-08 12:50:52 +00002933 try:
2934 await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete)
2935 except N2VCNotFound: # already deleted. Skip
2936 pass
tierno51183952020-04-03 15:48:18 +00002937 self._write_all_config_status(db_nsr=db_nsr, status='DELETED')
quilesj3655ae02019-12-12 16:08:35 +00002938
tiernoe876f672020-02-13 14:34:48 +00002939 async def _terminate_RO(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
2940 """
2941 Terminates a deployment from RO
2942 :param logging_text:
2943 :param nsr_deployed: db_nsr._admin.deployed
2944 :param nsr_id:
2945 :param nslcmop_id:
2946 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
2947 this method will update only the index 2, but it will write on database the concatenated content of the list
2948 :return:
2949 """
2950 db_nsr_update = {}
2951 failed_detail = []
2952 ro_nsr_id = ro_delete_action = None
2953 if nsr_deployed and nsr_deployed.get("RO"):
2954 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
2955 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
2956 try:
2957 if ro_nsr_id:
2958 stage[2] = "Deleting ns from VIM."
2959 db_nsr_update["detailed-status"] = " ".join(stage)
2960 self._write_op_status(nslcmop_id, stage)
2961 self.logger.debug(logging_text + stage[2])
2962 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2963 self._write_op_status(nslcmop_id, stage)
2964 desc = await self.RO.delete("ns", ro_nsr_id)
2965 ro_delete_action = desc["action_id"]
2966 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = ro_delete_action
2967 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
2968 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2969 if ro_delete_action:
2970 # wait until NS is deleted from VIM
2971 stage[2] = "Waiting ns deleted from VIM."
2972 detailed_status_old = None
2973 self.logger.debug(logging_text + stage[2] + " RO_id={} ro_delete_action={}".format(ro_nsr_id,
2974 ro_delete_action))
2975 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2976 self._write_op_status(nslcmop_id, stage)
kuused124bfe2019-06-18 12:09:24 +02002977
tiernoe876f672020-02-13 14:34:48 +00002978 delete_timeout = 20 * 60 # 20 minutes
2979 while delete_timeout > 0:
2980 desc = await self.RO.show(
2981 "ns",
2982 item_id_name=ro_nsr_id,
2983 extra_item="action",
2984 extra_item_id=ro_delete_action)
2985
2986 # deploymentStatus
2987 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
2988
2989 ns_status, ns_status_info = self.RO.check_action_status(desc)
2990 if ns_status == "ERROR":
2991 raise ROclient.ROClientException(ns_status_info)
2992 elif ns_status == "BUILD":
2993 stage[2] = "Deleting from VIM {}".format(ns_status_info)
2994 elif ns_status == "ACTIVE":
2995 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
2996 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2997 break
2998 else:
2999 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
3000 if stage[2] != detailed_status_old:
3001 detailed_status_old = stage[2]
3002 db_nsr_update["detailed-status"] = " ".join(stage)
3003 self._write_op_status(nslcmop_id, stage)
3004 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3005 await asyncio.sleep(5, loop=self.loop)
3006 delete_timeout -= 5
3007 else: # delete_timeout <= 0:
3008 raise ROclient.ROClientException("Timeout waiting ns deleted from VIM")
3009
3010 except Exception as e:
3011 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3012 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3013 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3014 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3015 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3016 self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id))
3017 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
tiernoa2143262020-03-27 16:20:40 +00003018 failed_detail.append("delete conflict: {}".format(e))
3019 self.logger.debug(logging_text + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00003020 else:
tiernoa2143262020-03-27 16:20:40 +00003021 failed_detail.append("delete error: {}".format(e))
3022 self.logger.error(logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00003023
3024 # Delete nsd
3025 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
3026 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
3027 try:
3028 stage[2] = "Deleting nsd from RO."
3029 db_nsr_update["detailed-status"] = " ".join(stage)
3030 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3031 self._write_op_status(nslcmop_id, stage)
3032 await self.RO.delete("nsd", ro_nsd_id)
3033 self.logger.debug(logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id))
3034 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3035 except Exception as e:
3036 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3037 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3038 self.logger.debug(logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id))
3039 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
3040 failed_detail.append("ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e))
3041 self.logger.debug(logging_text + failed_detail[-1])
3042 else:
3043 failed_detail.append("ro_nsd_id={} delete error: {}".format(ro_nsd_id, e))
3044 self.logger.error(logging_text + failed_detail[-1])
3045
3046 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
3047 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
3048 if not vnf_deployed or not vnf_deployed["id"]:
3049 continue
3050 try:
3051 ro_vnfd_id = vnf_deployed["id"]
3052 stage[2] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
3053 vnf_deployed["member-vnf-index"], ro_vnfd_id)
3054 db_nsr_update["detailed-status"] = " ".join(stage)
3055 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3056 self._write_op_status(nslcmop_id, stage)
3057 await self.RO.delete("vnfd", ro_vnfd_id)
3058 self.logger.debug(logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id))
3059 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3060 except Exception as e:
3061 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3062 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3063 self.logger.debug(logging_text + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id))
3064 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
3065 failed_detail.append("ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e))
3066 self.logger.debug(logging_text + failed_detail[-1])
3067 else:
3068 failed_detail.append("ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e))
3069 self.logger.error(logging_text + failed_detail[-1])
3070
tiernoa2143262020-03-27 16:20:40 +00003071 if failed_detail:
3072 stage[2] = "Error deleting from VIM"
3073 else:
3074 stage[2] = "Deleted from VIM"
tiernoe876f672020-02-13 14:34:48 +00003075 db_nsr_update["detailed-status"] = " ".join(stage)
3076 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3077 self._write_op_status(nslcmop_id, stage)
3078
3079 if failed_detail:
tiernoa2143262020-03-27 16:20:40 +00003080 raise LcmException("; ".join(failed_detail))
tiernoe876f672020-02-13 14:34:48 +00003081
3082 async def terminate(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003083 # Try to lock HA task here
3084 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3085 if not task_is_locked_by_me:
3086 return
3087
tierno59d22d22018-09-25 18:10:19 +02003088 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
3089 self.logger.debug(logging_text + "Enter")
tiernoe876f672020-02-13 14:34:48 +00003090 timeout_ns_terminate = self.timeout_ns_terminate
tierno59d22d22018-09-25 18:10:19 +02003091 db_nsr = None
3092 db_nslcmop = None
tiernoa17d4f42020-04-28 09:59:23 +00003093 operation_params = None
tierno59d22d22018-09-25 18:10:19 +02003094 exc = None
tiernoe876f672020-02-13 14:34:48 +00003095 error_list = [] # annotates all failed error messages
tierno59d22d22018-09-25 18:10:19 +02003096 db_nslcmop_update = {}
tiernoc2564fe2019-01-28 16:18:56 +00003097 autoremove = False # autoremove after terminated
tiernoe876f672020-02-13 14:34:48 +00003098 tasks_dict_info = {}
3099 db_nsr_update = {}
3100 stage = ["Stage 1/3: Preparing task.", "Waiting for previous operations to terminate.", ""]
3101 # ^ contains [stage, step, VIM-status]
tierno59d22d22018-09-25 18:10:19 +02003102 try:
kuused124bfe2019-06-18 12:09:24 +02003103 # wait for any previous tasks in process
3104 await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id)
3105
tiernoe876f672020-02-13 14:34:48 +00003106 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
3107 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3108 operation_params = db_nslcmop.get("operationParams") or {}
3109 if operation_params.get("timeout_ns_terminate"):
3110 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
3111 stage[1] = "Getting nsr={} from db.".format(nsr_id)
3112 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3113
3114 db_nsr_update["operational-status"] = "terminating"
3115 db_nsr_update["config-status"] = "terminating"
quilesj4cda56b2019-12-05 10:02:20 +00003116 self._write_ns_status(
3117 nsr_id=nsr_id,
3118 ns_state="TERMINATING",
3119 current_operation="TERMINATING",
tiernoe876f672020-02-13 14:34:48 +00003120 current_operation_id=nslcmop_id,
3121 other_update=db_nsr_update
quilesj4cda56b2019-12-05 10:02:20 +00003122 )
quilesj3655ae02019-12-12 16:08:35 +00003123 self._write_op_status(
3124 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00003125 queuePosition=0,
3126 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00003127 )
tiernoe876f672020-02-13 14:34:48 +00003128 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
tierno59d22d22018-09-25 18:10:19 +02003129 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
3130 return
tierno59d22d22018-09-25 18:10:19 +02003131
tiernoe876f672020-02-13 14:34:48 +00003132 stage[1] = "Getting vnf descriptors from db."
3133 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
3134 db_vnfds_from_id = {}
3135 db_vnfds_from_member_index = {}
3136 # Loop over VNFRs
3137 for vnfr in db_vnfrs_list:
3138 vnfd_id = vnfr["vnfd-id"]
3139 if vnfd_id not in db_vnfds_from_id:
3140 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
3141 db_vnfds_from_id[vnfd_id] = vnfd
3142 db_vnfds_from_member_index[vnfr["member-vnf-index-ref"]] = db_vnfds_from_id[vnfd_id]
calvinosanch9f9c6f22019-11-04 13:37:39 +01003143
tiernoe876f672020-02-13 14:34:48 +00003144 # Destroy individual execution environments when there are terminating primitives.
3145 # Rest of EE will be deleted at once
tierno588547c2020-07-01 15:30:20 +00003146 # TODO - check before calling _destroy_N2VC
3147 # if not operation_params.get("skip_terminate_primitives"):#
3148 # or not vca.get("needed_terminate"):
3149 stage[0] = "Stage 2/3 execute terminating primitives."
3150 self.logger.debug(logging_text + stage[0])
3151 stage[1] = "Looking execution environment that needs terminate."
3152 self.logger.debug(logging_text + stage[1])
tierno89f82902020-07-03 14:52:28 +00003153 # self.logger.debug("nsr_deployed: {}".format(nsr_deployed))
tierno588547c2020-07-01 15:30:20 +00003154 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
3155 self.logger.debug("vca_index: {}, vca: {}".format(vca_index, vca))
3156 config_descriptor = None
3157 if not vca or not vca.get("ee_id"):
3158 continue
3159 if not vca.get("member-vnf-index"):
3160 # ns
3161 config_descriptor = db_nsr.get("ns-configuration")
3162 elif vca.get("vdu_id"):
3163 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3164 vdud = next((vdu for vdu in db_vnfd.get("vdu", ()) if vdu["id"] == vca.get("vdu_id")), None)
3165 if vdud:
3166 config_descriptor = vdud.get("vdu-configuration")
3167 elif vca.get("kdu_name"):
3168 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3169 kdud = next((kdu for kdu in db_vnfd.get("kdu", ()) if kdu["name"] == vca.get("kdu_name")), None)
3170 if kdud:
3171 config_descriptor = kdud.get("kdu-configuration")
3172 else:
3173 config_descriptor = db_vnfds_from_member_index[vca["member-vnf-index"]].get("vnf-configuration")
tierno588547c2020-07-01 15:30:20 +00003174 vca_type = vca.get("type")
3175 exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and
3176 vca.get("needed_terminate"))
tierno89f82902020-07-03 14:52:28 +00003177 # For helm we must destroy_ee
3178 destroy_ee = "True" if vca_type == "helm" else "False"
3179 task = asyncio.ensure_future(
3180 self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor, vca_index,
3181 destroy_ee, exec_terminate_primitives))
tierno588547c2020-07-01 15:30:20 +00003182 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
tierno59d22d22018-09-25 18:10:19 +02003183
tierno588547c2020-07-01 15:30:20 +00003184 # wait for pending tasks of terminate primitives
3185 if tasks_dict_info:
3186 self.logger.debug(logging_text + 'Waiting for terminate primitive pending tasks...')
3187 error_list = await self._wait_for_tasks(logging_text, tasks_dict_info,
3188 min(self.timeout_charm_delete, timeout_ns_terminate),
3189 stage, nslcmop_id)
3190 if error_list:
3191 return # raise LcmException("; ".join(error_list))
3192 tasks_dict_info.clear()
tierno82974b22018-11-27 21:55:36 +00003193
tiernoe876f672020-02-13 14:34:48 +00003194 # remove All execution environments at once
3195 stage[0] = "Stage 3/3 delete all."
quilesj3655ae02019-12-12 16:08:35 +00003196
tierno49676be2020-04-07 16:34:35 +00003197 if nsr_deployed.get("VCA"):
3198 stage[1] = "Deleting all execution environments."
3199 self.logger.debug(logging_text + stage[1])
3200 task_delete_ee = asyncio.ensure_future(asyncio.wait_for(self._delete_all_N2VC(db_nsr=db_nsr),
3201 timeout=self.timeout_charm_delete))
3202 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
3203 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
tierno59d22d22018-09-25 18:10:19 +02003204
tiernoe876f672020-02-13 14:34:48 +00003205 # Delete from k8scluster
3206 stage[1] = "Deleting KDUs."
3207 self.logger.debug(logging_text + stage[1])
3208 # print(nsr_deployed)
3209 for kdu in get_iterable(nsr_deployed, "K8s"):
3210 if not kdu or not kdu.get("kdu-instance"):
3211 continue
3212 kdu_instance = kdu.get("kdu-instance")
tiernoa2143262020-03-27 16:20:40 +00003213 if kdu.get("k8scluster-type") in self.k8scluster_map:
tiernoe876f672020-02-13 14:34:48 +00003214 task_delete_kdu_instance = asyncio.ensure_future(
tiernoa2143262020-03-27 16:20:40 +00003215 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
3216 cluster_uuid=kdu.get("k8scluster-uuid"),
3217 kdu_instance=kdu_instance))
tiernoe876f672020-02-13 14:34:48 +00003218 else:
3219 self.logger.error(logging_text + "Unknown k8s deployment type {}".
3220 format(kdu.get("k8scluster-type")))
3221 continue
3222 tasks_dict_info[task_delete_kdu_instance] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
tierno59d22d22018-09-25 18:10:19 +02003223
3224 # remove from RO
tiernoe876f672020-02-13 14:34:48 +00003225 stage[1] = "Deleting ns from VIM."
tierno69f0d382020-05-07 13:08:09 +00003226 if self.ng_ro:
3227 task_delete_ro = asyncio.ensure_future(
3228 self._terminate_ng_ro(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
3229 else:
3230 task_delete_ro = asyncio.ensure_future(
3231 self._terminate_RO(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
tiernoe876f672020-02-13 14:34:48 +00003232 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
tierno59d22d22018-09-25 18:10:19 +02003233
tiernoe876f672020-02-13 14:34:48 +00003234 # rest of staff will be done at finally
3235
3236 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
3237 self.logger.error(logging_text + "Exit Exception {}".format(e))
3238 exc = e
3239 except asyncio.CancelledError:
3240 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
3241 exc = "Operation was cancelled"
3242 except Exception as e:
3243 exc = traceback.format_exc()
3244 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
3245 finally:
3246 if exc:
3247 error_list.append(str(exc))
tierno59d22d22018-09-25 18:10:19 +02003248 try:
tiernoe876f672020-02-13 14:34:48 +00003249 # wait for pending tasks
3250 if tasks_dict_info:
3251 stage[1] = "Waiting for terminate pending tasks."
3252 self.logger.debug(logging_text + stage[1])
3253 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_terminate,
3254 stage, nslcmop_id)
3255 stage[1] = stage[2] = ""
3256 except asyncio.CancelledError:
3257 error_list.append("Cancelled")
3258 # TODO cancell all tasks
3259 except Exception as exc:
3260 error_list.append(str(exc))
3261 # update status at database
3262 if error_list:
3263 error_detail = "; ".join(error_list)
3264 # self.logger.error(logging_text + error_detail)
tiernoa2143262020-03-27 16:20:40 +00003265 error_description_nslcmop = 'Stage: {}. Detail: {}'.format(stage[0], error_detail)
3266 error_description_nsr = 'Operation: TERMINATING.{}, Stage {}.'.format(nslcmop_id, stage[0])
tierno59d22d22018-09-25 18:10:19 +02003267
tierno59d22d22018-09-25 18:10:19 +02003268 db_nsr_update["operational-status"] = "failed"
tiernoa2143262020-03-27 16:20:40 +00003269 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00003270 db_nslcmop_update["detailed-status"] = error_detail
3271 nslcmop_operation_state = "FAILED"
3272 ns_state = "BROKEN"
tierno59d22d22018-09-25 18:10:19 +02003273 else:
tiernoa2143262020-03-27 16:20:40 +00003274 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00003275 error_description_nsr = error_description_nslcmop = None
3276 ns_state = "NOT_INSTANTIATED"
tierno59d22d22018-09-25 18:10:19 +02003277 db_nsr_update["operational-status"] = "terminated"
3278 db_nsr_update["detailed-status"] = "Done"
3279 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
3280 db_nslcmop_update["detailed-status"] = "Done"
tiernoe876f672020-02-13 14:34:48 +00003281 nslcmop_operation_state = "COMPLETED"
tierno59d22d22018-09-25 18:10:19 +02003282
tiernoe876f672020-02-13 14:34:48 +00003283 if db_nsr:
3284 self._write_ns_status(
3285 nsr_id=nsr_id,
3286 ns_state=ns_state,
3287 current_operation="IDLE",
3288 current_operation_id=None,
3289 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00003290 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00003291 other_update=db_nsr_update
3292 )
tiernoa17d4f42020-04-28 09:59:23 +00003293 self._write_op_status(
3294 op_id=nslcmop_id,
3295 stage="",
3296 error_message=error_description_nslcmop,
3297 operation_state=nslcmop_operation_state,
3298 other_update=db_nslcmop_update,
3299 )
3300 if operation_params:
tiernoe876f672020-02-13 14:34:48 +00003301 autoremove = operation_params.get("autoremove", False)
tierno59d22d22018-09-25 18:10:19 +02003302 if nslcmop_operation_state:
3303 try:
3304 await self.msg.aiowrite("ns", "terminated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tiernoc2564fe2019-01-28 16:18:56 +00003305 "operationState": nslcmop_operation_state,
3306 "autoremove": autoremove},
tierno8a518872018-12-21 13:42:14 +00003307 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003308 except Exception as e:
3309 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02003310
tierno59d22d22018-09-25 18:10:19 +02003311 self.logger.debug(logging_text + "Exit")
3312 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
3313
tiernoe876f672020-02-13 14:34:48 +00003314 async def _wait_for_tasks(self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None):
3315 time_start = time()
tiernoa2143262020-03-27 16:20:40 +00003316 error_detail_list = []
tiernoe876f672020-02-13 14:34:48 +00003317 error_list = []
3318 pending_tasks = list(created_tasks_info.keys())
3319 num_tasks = len(pending_tasks)
3320 num_done = 0
3321 stage[1] = "{}/{}.".format(num_done, num_tasks)
3322 self._write_op_status(nslcmop_id, stage)
tiernoe876f672020-02-13 14:34:48 +00003323 while pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00003324 new_error = None
tiernoe876f672020-02-13 14:34:48 +00003325 _timeout = timeout + time_start - time()
3326 done, pending_tasks = await asyncio.wait(pending_tasks, timeout=_timeout,
3327 return_when=asyncio.FIRST_COMPLETED)
3328 num_done += len(done)
3329 if not done: # Timeout
3330 for task in pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00003331 new_error = created_tasks_info[task] + ": Timeout"
3332 error_detail_list.append(new_error)
3333 error_list.append(new_error)
tiernoe876f672020-02-13 14:34:48 +00003334 break
3335 for task in done:
3336 if task.cancelled():
tierno067e04a2020-03-31 12:53:13 +00003337 exc = "Cancelled"
tiernoe876f672020-02-13 14:34:48 +00003338 else:
3339 exc = task.exception()
tierno067e04a2020-03-31 12:53:13 +00003340 if exc:
3341 if isinstance(exc, asyncio.TimeoutError):
3342 exc = "Timeout"
3343 new_error = created_tasks_info[task] + ": {}".format(exc)
3344 error_list.append(created_tasks_info[task])
3345 error_detail_list.append(new_error)
tierno28c63da2020-04-20 16:28:56 +00003346 if isinstance(exc, (str, DbException, N2VCException, ROclient.ROClientException, LcmException,
3347 K8sException)):
tierno067e04a2020-03-31 12:53:13 +00003348 self.logger.error(logging_text + new_error)
tiernoe876f672020-02-13 14:34:48 +00003349 else:
tierno067e04a2020-03-31 12:53:13 +00003350 exc_traceback = "".join(traceback.format_exception(None, exc, exc.__traceback__))
3351 self.logger.error(logging_text + created_tasks_info[task] + exc_traceback)
3352 else:
3353 self.logger.debug(logging_text + created_tasks_info[task] + ": Done")
tiernoe876f672020-02-13 14:34:48 +00003354 stage[1] = "{}/{}.".format(num_done, num_tasks)
3355 if new_error:
tiernoa2143262020-03-27 16:20:40 +00003356 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
tiernoe876f672020-02-13 14:34:48 +00003357 if nsr_id: # update also nsr
tiernoa2143262020-03-27 16:20:40 +00003358 self.update_db_2("nsrs", nsr_id, {"errorDescription": "Error at: " + ", ".join(error_list),
3359 "errorDetail": ". ".join(error_detail_list)})
tiernoe876f672020-02-13 14:34:48 +00003360 self._write_op_status(nslcmop_id, stage)
tiernoa2143262020-03-27 16:20:40 +00003361 return error_detail_list
tiernoe876f672020-02-13 14:34:48 +00003362
tiernoda964822019-01-14 15:53:47 +00003363 @staticmethod
3364 def _map_primitive_params(primitive_desc, params, instantiation_params):
3365 """
3366 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
3367 The default-value is used. If it is between < > it look for a value at instantiation_params
3368 :param primitive_desc: portion of VNFD/NSD that describes primitive
3369 :param params: Params provided by user
3370 :param instantiation_params: Instantiation params provided by user
3371 :return: a dictionary with the calculated params
3372 """
3373 calculated_params = {}
3374 for parameter in primitive_desc.get("parameter", ()):
3375 param_name = parameter["name"]
3376 if param_name in params:
3377 calculated_params[param_name] = params[param_name]
tierno98ad6ea2019-05-30 17:16:28 +00003378 elif "default-value" in parameter or "value" in parameter:
3379 if "value" in parameter:
3380 calculated_params[param_name] = parameter["value"]
3381 else:
3382 calculated_params[param_name] = parameter["default-value"]
3383 if isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("<") \
3384 and calculated_params[param_name].endswith(">"):
3385 if calculated_params[param_name][1:-1] in instantiation_params:
3386 calculated_params[param_name] = instantiation_params[calculated_params[param_name][1:-1]]
tiernoda964822019-01-14 15:53:47 +00003387 else:
3388 raise LcmException("Parameter {} needed to execute primitive {} not provided".
tiernod8323042019-08-09 11:32:23 +00003389 format(calculated_params[param_name], primitive_desc["name"]))
tiernoda964822019-01-14 15:53:47 +00003390 else:
3391 raise LcmException("Parameter {} needed to execute primitive {} not provided".
3392 format(param_name, primitive_desc["name"]))
tierno59d22d22018-09-25 18:10:19 +02003393
tiernoda964822019-01-14 15:53:47 +00003394 if isinstance(calculated_params[param_name], (dict, list, tuple)):
3395 calculated_params[param_name] = yaml.safe_dump(calculated_params[param_name], default_flow_style=True,
3396 width=256)
3397 elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "):
3398 calculated_params[param_name] = calculated_params[param_name][7:]
tiernoc3f2a822019-11-05 13:45:04 +00003399
3400 # add always ns_config_info if primitive name is config
3401 if primitive_desc["name"] == "config":
3402 if "ns_config_info" in instantiation_params:
3403 calculated_params["ns_config_info"] = instantiation_params["ns_config_info"]
tiernoda964822019-01-14 15:53:47 +00003404 return calculated_params
3405
tierno4fa7f8e2020-07-08 15:33:55 +00003406 def _look_for_deployed_vca(self, deployed_vca, member_vnf_index, vdu_id, vdu_count_index, kdu_name=None,
3407 ee_descriptor_id=None):
tiernoe876f672020-02-13 14:34:48 +00003408 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
3409 for vca in deployed_vca:
3410 if not vca:
3411 continue
3412 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
3413 continue
tiernoe876f672020-02-13 14:34:48 +00003414 if vdu_count_index is not None and vdu_count_index != vca["vdu_count_index"]:
3415 continue
3416 if kdu_name and kdu_name != vca["kdu_name"]:
3417 continue
tierno4fa7f8e2020-07-08 15:33:55 +00003418 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
3419 continue
tiernoe876f672020-02-13 14:34:48 +00003420 break
3421 else:
3422 # vca_deployed not found
tierno4fa7f8e2020-07-08 15:33:55 +00003423 raise LcmException("charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
3424 " is not deployed".format(member_vnf_index, vdu_id, vdu_count_index, kdu_name,
3425 ee_descriptor_id))
quilesj7e13aeb2019-10-08 13:34:55 +02003426
tiernoe876f672020-02-13 14:34:48 +00003427 # get ee_id
3428 ee_id = vca.get("ee_id")
tierno588547c2020-07-01 15:30:20 +00003429 vca_type = vca.get("type", "lxc_proxy_charm") # default value for backward compatibility - proxy charm
tiernoe876f672020-02-13 14:34:48 +00003430 if not ee_id:
tierno067e04a2020-03-31 12:53:13 +00003431 raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
tiernoe876f672020-02-13 14:34:48 +00003432 "execution environment"
tierno067e04a2020-03-31 12:53:13 +00003433 .format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
tierno588547c2020-07-01 15:30:20 +00003434 return ee_id, vca_type
tiernoe876f672020-02-13 14:34:48 +00003435
3436 async def _ns_execute_primitive(self, ee_id, primitive, primitive_params, retries=0,
tierno588547c2020-07-01 15:30:20 +00003437 retries_interval=30, timeout=None,
3438 vca_type=None, db_dict=None) -> (str, str):
tiernoda964822019-01-14 15:53:47 +00003439 try:
tierno98ad6ea2019-05-30 17:16:28 +00003440 if primitive == "config":
3441 primitive_params = {"params": primitive_params}
tierno2fc7ce52019-06-11 22:50:01 +00003442
tierno588547c2020-07-01 15:30:20 +00003443 vca_type = vca_type or "lxc_proxy_charm"
3444
quilesj7e13aeb2019-10-08 13:34:55 +02003445 while retries >= 0:
3446 try:
tierno067e04a2020-03-31 12:53:13 +00003447 output = await asyncio.wait_for(
tierno588547c2020-07-01 15:30:20 +00003448 self.vca_map[vca_type].exec_primitive(
tierno067e04a2020-03-31 12:53:13 +00003449 ee_id=ee_id,
3450 primitive_name=primitive,
3451 params_dict=primitive_params,
3452 progress_timeout=self.timeout_progress_primitive,
tierno588547c2020-07-01 15:30:20 +00003453 total_timeout=self.timeout_primitive,
3454 db_dict=db_dict),
tierno067e04a2020-03-31 12:53:13 +00003455 timeout=timeout or self.timeout_primitive)
quilesj7e13aeb2019-10-08 13:34:55 +02003456 # execution was OK
3457 break
tierno067e04a2020-03-31 12:53:13 +00003458 except asyncio.CancelledError:
3459 raise
3460 except Exception as e: # asyncio.TimeoutError
3461 if isinstance(e, asyncio.TimeoutError):
3462 e = "Timeout"
quilesj7e13aeb2019-10-08 13:34:55 +02003463 retries -= 1
3464 if retries >= 0:
tierno73d8bd02019-11-18 17:33:27 +00003465 self.logger.debug('Error executing action {} on {} -> {}'.format(primitive, ee_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +02003466 # wait and retry
3467 await asyncio.sleep(retries_interval, loop=self.loop)
tierno73d8bd02019-11-18 17:33:27 +00003468 else:
tierno067e04a2020-03-31 12:53:13 +00003469 return 'FAILED', str(e)
quilesj7e13aeb2019-10-08 13:34:55 +02003470
tiernoe876f672020-02-13 14:34:48 +00003471 return 'COMPLETED', output
quilesj7e13aeb2019-10-08 13:34:55 +02003472
tierno067e04a2020-03-31 12:53:13 +00003473 except (LcmException, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00003474 raise
quilesj7e13aeb2019-10-08 13:34:55 +02003475 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00003476 return 'FAIL', 'Error executing action {}: {}'.format(primitive, e)
tierno59d22d22018-09-25 18:10:19 +02003477
3478 async def action(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003479
3480 # Try to lock HA task here
3481 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3482 if not task_is_locked_by_me:
3483 return
3484
tierno59d22d22018-09-25 18:10:19 +02003485 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
3486 self.logger.debug(logging_text + "Enter")
3487 # get all needed from database
3488 db_nsr = None
3489 db_nslcmop = None
tiernoe876f672020-02-13 14:34:48 +00003490 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003491 db_nslcmop_update = {}
3492 nslcmop_operation_state = None
tierno067e04a2020-03-31 12:53:13 +00003493 error_description_nslcmop = None
tierno59d22d22018-09-25 18:10:19 +02003494 exc = None
3495 try:
kuused124bfe2019-06-18 12:09:24 +02003496 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003497 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003498 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
3499
quilesj4cda56b2019-12-05 10:02:20 +00003500 self._write_ns_status(
3501 nsr_id=nsr_id,
3502 ns_state=None,
3503 current_operation="RUNNING ACTION",
3504 current_operation_id=nslcmop_id
3505 )
3506
tierno59d22d22018-09-25 18:10:19 +02003507 step = "Getting information from database"
3508 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3509 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernoda964822019-01-14 15:53:47 +00003510
tiernoe4f7e6c2018-11-27 14:55:30 +00003511 nsr_deployed = db_nsr["_admin"].get("deployed")
tierno1b633412019-02-25 16:48:23 +00003512 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tierno59d22d22018-09-25 18:10:19 +02003513 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003514 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
tiernoe4f7e6c2018-11-27 14:55:30 +00003515 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
tierno067e04a2020-03-31 12:53:13 +00003516 primitive = db_nslcmop["operationParams"]["primitive"]
3517 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
3518 timeout_ns_action = db_nslcmop["operationParams"].get("timeout_ns_action", self.timeout_primitive)
tierno59d22d22018-09-25 18:10:19 +02003519
tierno1b633412019-02-25 16:48:23 +00003520 if vnf_index:
3521 step = "Getting vnfr from database"
3522 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3523 step = "Getting vnfd from database"
3524 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
3525 else:
tierno067e04a2020-03-31 12:53:13 +00003526 step = "Getting nsd from database"
3527 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
tiernoda964822019-01-14 15:53:47 +00003528
tierno82974b22018-11-27 21:55:36 +00003529 # for backward compatibility
3530 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3531 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3532 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3533 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3534
tiernoda964822019-01-14 15:53:47 +00003535 # look for primitive
tierno4fa7f8e2020-07-08 15:33:55 +00003536 config_primitive_desc = descriptor_configuration = None
tiernoda964822019-01-14 15:53:47 +00003537 if vdu_id:
3538 for vdu in get_iterable(db_vnfd, "vdu"):
3539 if vdu_id == vdu["id"]:
tierno4fa7f8e2020-07-08 15:33:55 +00003540 descriptor_configuration = vdu.get("vdu-configuration")
tierno067e04a2020-03-31 12:53:13 +00003541 break
calvinosanch9f9c6f22019-11-04 13:37:39 +01003542 elif kdu_name:
tierno067e04a2020-03-31 12:53:13 +00003543 for kdu in get_iterable(db_vnfd, "kdu"):
3544 if kdu_name == kdu["name"]:
tierno4fa7f8e2020-07-08 15:33:55 +00003545 descriptor_configuration = kdu.get("kdu-configuration")
tierno067e04a2020-03-31 12:53:13 +00003546 break
tierno1b633412019-02-25 16:48:23 +00003547 elif vnf_index:
tierno4fa7f8e2020-07-08 15:33:55 +00003548 descriptor_configuration = db_vnfd.get("vnf-configuration")
tierno1b633412019-02-25 16:48:23 +00003549 else:
tierno4fa7f8e2020-07-08 15:33:55 +00003550 descriptor_configuration = db_nsd.get("ns-configuration")
3551
3552 if descriptor_configuration and descriptor_configuration.get("config-primitive"):
3553 for config_primitive in descriptor_configuration["config-primitive"]:
tierno1b633412019-02-25 16:48:23 +00003554 if config_primitive["name"] == primitive:
3555 config_primitive_desc = config_primitive
3556 break
tiernoda964822019-01-14 15:53:47 +00003557
tierno067e04a2020-03-31 12:53:13 +00003558 if not config_primitive_desc and not (kdu_name and primitive in ("upgrade", "rollback", "status")):
tierno1b633412019-02-25 16:48:23 +00003559 raise LcmException("Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".
3560 format(primitive))
tierno4fa7f8e2020-07-08 15:33:55 +00003561 primitive_name = config_primitive_desc.get("execution-environment-primitive", primitive)
3562 ee_descriptor_id = config_primitive_desc.get("execution-environment-ref")
tierno1b633412019-02-25 16:48:23 +00003563
tierno1b633412019-02-25 16:48:23 +00003564 if vnf_index:
tierno626e0152019-11-29 14:16:16 +00003565 if vdu_id:
3566 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
tierno067e04a2020-03-31 12:53:13 +00003567 desc_params = self._format_additional_params(vdur.get("additionalParams"))
3568 elif kdu_name:
3569 kdur = next((x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None)
3570 desc_params = self._format_additional_params(kdur.get("additionalParams"))
3571 else:
3572 desc_params = self._format_additional_params(db_vnfr.get("additionalParamsForVnf"))
tierno1b633412019-02-25 16:48:23 +00003573 else:
tierno067e04a2020-03-31 12:53:13 +00003574 desc_params = self._format_additional_params(db_nsr.get("additionalParamsForNs"))
tiernoda964822019-01-14 15:53:47 +00003575
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003576 if kdu_name:
3577 kdu_action = True if not deep_get(kdu, ("kdu-configuration", "juju")) else False
3578
tiernoda964822019-01-14 15:53:47 +00003579 # TODO check if ns is in a proper status
tierno4fa7f8e2020-07-08 15:33:55 +00003580 if kdu_name and (primitive_name in ("upgrade", "rollback", "status") or kdu_action):
tierno067e04a2020-03-31 12:53:13 +00003581 # kdur and desc_params already set from before
3582 if primitive_params:
3583 desc_params.update(primitive_params)
3584 # TODO Check if we will need something at vnf level
3585 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
3586 if kdu_name == kdu["kdu-name"] and kdu["member-vnf-index"] == vnf_index:
3587 break
3588 else:
3589 raise LcmException("KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index))
quilesj7e13aeb2019-10-08 13:34:55 +02003590
tierno067e04a2020-03-31 12:53:13 +00003591 if kdu.get("k8scluster-type") not in self.k8scluster_map:
3592 msg = "unknown k8scluster-type '{}'".format(kdu.get("k8scluster-type"))
3593 raise LcmException(msg)
3594
3595 db_dict = {"collection": "nsrs",
3596 "filter": {"_id": nsr_id},
3597 "path": "_admin.deployed.K8s.{}".format(index)}
tierno4fa7f8e2020-07-08 15:33:55 +00003598 self.logger.debug(logging_text + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name))
3599 step = "Executing kdu {}".format(primitive_name)
3600 if primitive_name == "upgrade":
tierno067e04a2020-03-31 12:53:13 +00003601 if desc_params.get("kdu_model"):
3602 kdu_model = desc_params.get("kdu_model")
3603 del desc_params["kdu_model"]
3604 else:
3605 kdu_model = kdu.get("kdu-model")
3606 parts = kdu_model.split(sep=":")
3607 if len(parts) == 2:
3608 kdu_model = parts[0]
3609
3610 detailed_status = await asyncio.wait_for(
3611 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
3612 cluster_uuid=kdu.get("k8scluster-uuid"),
3613 kdu_instance=kdu.get("kdu-instance"),
3614 atomic=True, kdu_model=kdu_model,
3615 params=desc_params, db_dict=db_dict,
3616 timeout=timeout_ns_action),
3617 timeout=timeout_ns_action + 10)
3618 self.logger.debug(logging_text + " Upgrade of kdu {} done".format(detailed_status))
tierno4fa7f8e2020-07-08 15:33:55 +00003619 elif primitive_name == "rollback":
tierno067e04a2020-03-31 12:53:13 +00003620 detailed_status = await asyncio.wait_for(
3621 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
3622 cluster_uuid=kdu.get("k8scluster-uuid"),
3623 kdu_instance=kdu.get("kdu-instance"),
3624 db_dict=db_dict),
3625 timeout=timeout_ns_action)
tierno4fa7f8e2020-07-08 15:33:55 +00003626 elif primitive_name == "status":
tierno067e04a2020-03-31 12:53:13 +00003627 detailed_status = await asyncio.wait_for(
3628 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
3629 cluster_uuid=kdu.get("k8scluster-uuid"),
3630 kdu_instance=kdu.get("kdu-instance")),
3631 timeout=timeout_ns_action)
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003632 else:
3633 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(kdu["kdu-name"], nsr_id)
3634 params = self._map_primitive_params(config_primitive_desc, primitive_params, desc_params)
3635
3636 detailed_status = await asyncio.wait_for(
3637 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
3638 cluster_uuid=kdu.get("k8scluster-uuid"),
3639 kdu_instance=kdu_instance,
tierno4fa7f8e2020-07-08 15:33:55 +00003640 primitive_name=primitive_name,
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003641 params=params, db_dict=db_dict,
3642 timeout=timeout_ns_action),
3643 timeout=timeout_ns_action)
tierno067e04a2020-03-31 12:53:13 +00003644
3645 if detailed_status:
3646 nslcmop_operation_state = 'COMPLETED'
3647 else:
3648 detailed_status = ''
3649 nslcmop_operation_state = 'FAILED'
tierno067e04a2020-03-31 12:53:13 +00003650 else:
tierno588547c2020-07-01 15:30:20 +00003651 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
3652 member_vnf_index=vnf_index,
3653 vdu_id=vdu_id,
tierno4fa7f8e2020-07-08 15:33:55 +00003654 vdu_count_index=vdu_count_index,
3655 ee_descriptor_id=ee_descriptor_id)
tierno588547c2020-07-01 15:30:20 +00003656 db_nslcmop_notif = {"collection": "nslcmops",
3657 "filter": {"_id": nslcmop_id},
3658 "path": "admin.VCA"}
tierno067e04a2020-03-31 12:53:13 +00003659 nslcmop_operation_state, detailed_status = await self._ns_execute_primitive(
tierno588547c2020-07-01 15:30:20 +00003660 ee_id,
tierno4fa7f8e2020-07-08 15:33:55 +00003661 primitive=primitive_name,
tierno067e04a2020-03-31 12:53:13 +00003662 primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params),
tierno588547c2020-07-01 15:30:20 +00003663 timeout=timeout_ns_action,
3664 vca_type=vca_type,
3665 db_dict=db_nslcmop_notif)
tierno067e04a2020-03-31 12:53:13 +00003666
3667 db_nslcmop_update["detailed-status"] = detailed_status
3668 error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else ""
3669 self.logger.debug(logging_text + " task Done with result {} {}".format(nslcmop_operation_state,
3670 detailed_status))
tierno59d22d22018-09-25 18:10:19 +02003671 return # database update is called inside finally
3672
tiernof59ad6c2020-04-08 12:50:52 +00003673 except (DbException, LcmException, N2VCException, K8sException) as e:
tierno59d22d22018-09-25 18:10:19 +02003674 self.logger.error(logging_text + "Exit Exception {}".format(e))
3675 exc = e
3676 except asyncio.CancelledError:
3677 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
3678 exc = "Operation was cancelled"
tierno067e04a2020-03-31 12:53:13 +00003679 except asyncio.TimeoutError:
3680 self.logger.error(logging_text + "Timeout while '{}'".format(step))
3681 exc = "Timeout"
tierno59d22d22018-09-25 18:10:19 +02003682 except Exception as e:
3683 exc = traceback.format_exc()
3684 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
3685 finally:
tierno067e04a2020-03-31 12:53:13 +00003686 if exc:
3687 db_nslcmop_update["detailed-status"] = detailed_status = error_description_nslcmop = \
kuuse0ca67472019-05-13 15:59:27 +02003688 "FAILED {}: {}".format(step, exc)
tierno067e04a2020-03-31 12:53:13 +00003689 nslcmop_operation_state = "FAILED"
3690 if db_nsr:
3691 self._write_ns_status(
3692 nsr_id=nsr_id,
3693 ns_state=db_nsr["nsState"], # TODO check if degraded. For the moment use previous status
3694 current_operation="IDLE",
3695 current_operation_id=None,
3696 # error_description=error_description_nsr,
3697 # error_detail=error_detail,
3698 other_update=db_nsr_update
3699 )
3700
tiernoa17d4f42020-04-28 09:59:23 +00003701 self._write_op_status(
3702 op_id=nslcmop_id,
3703 stage="",
3704 error_message=error_description_nslcmop,
3705 operation_state=nslcmop_operation_state,
3706 other_update=db_nslcmop_update,
3707 )
tierno067e04a2020-03-31 12:53:13 +00003708
tierno59d22d22018-09-25 18:10:19 +02003709 if nslcmop_operation_state:
3710 try:
3711 await self.msg.aiowrite("ns", "actioned", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00003712 "operationState": nslcmop_operation_state},
3713 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003714 except Exception as e:
3715 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
3716 self.logger.debug(logging_text + "Exit")
3717 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
tierno067e04a2020-03-31 12:53:13 +00003718 return nslcmop_operation_state, detailed_status
tierno59d22d22018-09-25 18:10:19 +02003719
3720 async def scale(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003721
3722 # Try to lock HA task here
3723 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3724 if not task_is_locked_by_me:
3725 return
3726
tierno59d22d22018-09-25 18:10:19 +02003727 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
3728 self.logger.debug(logging_text + "Enter")
3729 # get all needed from database
3730 db_nsr = None
3731 db_nslcmop = None
3732 db_nslcmop_update = {}
3733 nslcmop_operation_state = None
tiernoe876f672020-02-13 14:34:48 +00003734 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003735 exc = None
tierno9ab95942018-10-10 16:44:22 +02003736 # in case of error, indicates what part of scale was failed to put nsr at error status
3737 scale_process = None
tiernod6de1992018-10-11 13:05:52 +02003738 old_operational_status = ""
3739 old_config_status = ""
tiernof578e552018-11-08 19:07:20 +01003740 vnfr_scaled = False
tierno59d22d22018-09-25 18:10:19 +02003741 try:
kuused124bfe2019-06-18 12:09:24 +02003742 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003743 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003744 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
tierno47e86b52018-10-10 14:05:55 +02003745
quilesj4cda56b2019-12-05 10:02:20 +00003746 self._write_ns_status(
3747 nsr_id=nsr_id,
3748 ns_state=None,
3749 current_operation="SCALING",
3750 current_operation_id=nslcmop_id
3751 )
3752
ikalyvas02d9e7b2019-05-27 18:16:01 +03003753 step = "Getting nslcmop from database"
ikalyvas02d9e7b2019-05-27 18:16:01 +03003754 self.logger.debug(step + " after having waited for previous tasks to be completed")
3755 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3756 step = "Getting nsr from database"
3757 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3758
3759 old_operational_status = db_nsr["operational-status"]
3760 old_config_status = db_nsr["config-status"]
tierno59d22d22018-09-25 18:10:19 +02003761 step = "Parsing scaling parameters"
tierno9babfda2019-06-07 12:36:50 +00003762 # self.logger.debug(step)
tierno59d22d22018-09-25 18:10:19 +02003763 db_nsr_update["operational-status"] = "scaling"
3764 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoe4f7e6c2018-11-27 14:55:30 +00003765 nsr_deployed = db_nsr["_admin"].get("deployed")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003766
3767 #######
3768 nsr_deployed = db_nsr["_admin"].get("deployed")
3769 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tiernoda6fb102019-11-23 00:36:52 +00003770 # vdu_id = db_nslcmop["operationParams"].get("vdu_id")
3771 # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
3772 # vdu_name = db_nslcmop["operationParams"].get("vdu_name")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003773 #######
3774
tiernoe4f7e6c2018-11-27 14:55:30 +00003775 RO_nsr_id = nsr_deployed["RO"]["nsr_id"]
tierno59d22d22018-09-25 18:10:19 +02003776 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["member-vnf-index"]
3777 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"]
3778 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
3779 # scaling_policy = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"].get("scaling-policy")
3780
tierno82974b22018-11-27 21:55:36 +00003781 # for backward compatibility
3782 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3783 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3784 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3785 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3786
tierno59d22d22018-09-25 18:10:19 +02003787 step = "Getting vnfr from database"
3788 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3789 step = "Getting vnfd from database"
3790 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
ikalyvas02d9e7b2019-05-27 18:16:01 +03003791
tierno59d22d22018-09-25 18:10:19 +02003792 step = "Getting scaling-group-descriptor"
3793 for scaling_descriptor in db_vnfd["scaling-group-descriptor"]:
3794 if scaling_descriptor["name"] == scaling_group:
3795 break
3796 else:
3797 raise LcmException("input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
3798 "at vnfd:scaling-group-descriptor".format(scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03003799
tierno59d22d22018-09-25 18:10:19 +02003800 # cooldown_time = 0
3801 # for scaling_policy_descriptor in scaling_descriptor.get("scaling-policy", ()):
3802 # cooldown_time = scaling_policy_descriptor.get("cooldown-time", 0)
3803 # if scaling_policy and scaling_policy == scaling_policy_descriptor.get("name"):
3804 # break
3805
3806 # TODO check if ns is in a proper status
tierno15b1cf12019-08-29 13:21:40 +00003807 step = "Sending scale order to VIM"
tierno59d22d22018-09-25 18:10:19 +02003808 nb_scale_op = 0
3809 if not db_nsr["_admin"].get("scaling-group"):
3810 self.update_db_2("nsrs", nsr_id, {"_admin.scaling-group": [{"name": scaling_group, "nb-scale-op": 0}]})
3811 admin_scale_index = 0
3812 else:
3813 for admin_scale_index, admin_scale_info in enumerate(db_nsr["_admin"]["scaling-group"]):
3814 if admin_scale_info["name"] == scaling_group:
3815 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
3816 break
tierno9ab95942018-10-10 16:44:22 +02003817 else: # not found, set index one plus last element and add new entry with the name
3818 admin_scale_index += 1
3819 db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group
tierno59d22d22018-09-25 18:10:19 +02003820 RO_scaling_info = []
3821 vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []}
3822 if scaling_type == "SCALE_OUT":
3823 # count if max-instance-count is reached
kuuse818d70c2019-08-07 14:43:44 +02003824 max_instance_count = scaling_descriptor.get("max-instance-count", 10)
3825 # self.logger.debug("MAX_INSTANCE_COUNT is {}".format(max_instance_count))
3826 if nb_scale_op >= max_instance_count:
3827 raise LcmException("reached the limit of {} (max-instance-count) "
3828 "scaling-out operations for the "
3829 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
kuuse8b998e42019-07-30 15:22:16 +02003830
ikalyvas02d9e7b2019-05-27 18:16:01 +03003831 nb_scale_op += 1
tierno59d22d22018-09-25 18:10:19 +02003832 vdu_scaling_info["scaling_direction"] = "OUT"
3833 vdu_scaling_info["vdu-create"] = {}
3834 for vdu_scale_info in scaling_descriptor["vdu"]:
3835 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
3836 "type": "create", "count": vdu_scale_info.get("count", 1)})
3837 vdu_scaling_info["vdu-create"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
ikalyvas02d9e7b2019-05-27 18:16:01 +03003838
tierno59d22d22018-09-25 18:10:19 +02003839 elif scaling_type == "SCALE_IN":
3840 # count if min-instance-count is reached
tierno27246d82018-09-27 15:59:09 +02003841 min_instance_count = 0
tierno59d22d22018-09-25 18:10:19 +02003842 if "min-instance-count" in scaling_descriptor and scaling_descriptor["min-instance-count"] is not None:
3843 min_instance_count = int(scaling_descriptor["min-instance-count"])
tierno9babfda2019-06-07 12:36:50 +00003844 if nb_scale_op <= min_instance_count:
3845 raise LcmException("reached the limit of {} (min-instance-count) scaling-in operations for the "
3846 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03003847 nb_scale_op -= 1
tierno59d22d22018-09-25 18:10:19 +02003848 vdu_scaling_info["scaling_direction"] = "IN"
3849 vdu_scaling_info["vdu-delete"] = {}
3850 for vdu_scale_info in scaling_descriptor["vdu"]:
3851 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
3852 "type": "delete", "count": vdu_scale_info.get("count", 1)})
3853 vdu_scaling_info["vdu-delete"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
3854
3855 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
tierno27246d82018-09-27 15:59:09 +02003856 vdu_create = vdu_scaling_info.get("vdu-create")
3857 vdu_delete = copy(vdu_scaling_info.get("vdu-delete"))
tierno59d22d22018-09-25 18:10:19 +02003858 if vdu_scaling_info["scaling_direction"] == "IN":
3859 for vdur in reversed(db_vnfr["vdur"]):
tierno27246d82018-09-27 15:59:09 +02003860 if vdu_delete.get(vdur["vdu-id-ref"]):
3861 vdu_delete[vdur["vdu-id-ref"]] -= 1
tierno59d22d22018-09-25 18:10:19 +02003862 vdu_scaling_info["vdu"].append({
3863 "name": vdur["name"],
3864 "vdu_id": vdur["vdu-id-ref"],
3865 "interface": []
3866 })
3867 for interface in vdur["interfaces"]:
3868 vdu_scaling_info["vdu"][-1]["interface"].append({
3869 "name": interface["name"],
3870 "ip_address": interface["ip-address"],
3871 "mac_address": interface.get("mac-address"),
3872 })
tierno27246d82018-09-27 15:59:09 +02003873 vdu_delete = vdu_scaling_info.pop("vdu-delete")
tierno59d22d22018-09-25 18:10:19 +02003874
kuuseac3a8882019-10-03 10:48:06 +02003875 # PRE-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02003876 step = "Executing pre-scale vnf-config-primitive"
3877 if scaling_descriptor.get("scaling-config-action"):
3878 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02003879 if (scaling_config_action.get("trigger") == "pre-scale-in" and scaling_type == "SCALE_IN") \
3880 or (scaling_config_action.get("trigger") == "pre-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02003881 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
3882 step = db_nslcmop_update["detailed-status"] = \
3883 "executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00003884
tierno59d22d22018-09-25 18:10:19 +02003885 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02003886 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
3887 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02003888 break
3889 else:
3890 raise LcmException(
3891 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
tiernoda964822019-01-14 15:53:47 +00003892 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
tierno4fa7f8e2020-07-08 15:33:55 +00003893 "primitive".format(scaling_group, vnf_config_primitive))
tiernoda964822019-01-14 15:53:47 +00003894
tierno16fedf52019-05-24 08:38:26 +00003895 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00003896 if db_vnfr.get("additionalParamsForVnf"):
3897 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
quilesj7e13aeb2019-10-08 13:34:55 +02003898
tierno9ab95942018-10-10 16:44:22 +02003899 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02003900 db_nsr_update["config-status"] = "configuring pre-scaling"
kuuseac3a8882019-10-03 10:48:06 +02003901 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
3902
tierno7c4e24c2020-05-13 08:41:35 +00003903 # Pre-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02003904 op_index = self._check_or_add_scale_suboperation(
3905 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'PRE-SCALE')
tierno7c4e24c2020-05-13 08:41:35 +00003906 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02003907 # Skip sub-operation
3908 result = 'COMPLETED'
3909 result_detail = 'Done'
3910 self.logger.debug(logging_text +
3911 "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
3912 vnf_config_primitive, result, result_detail))
3913 else:
tierno7c4e24c2020-05-13 08:41:35 +00003914 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02003915 # New sub-operation: Get index of this sub-operation
3916 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3917 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
3918 format(vnf_config_primitive))
3919 else:
tierno7c4e24c2020-05-13 08:41:35 +00003920 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02003921 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3922 vnf_index = op.get('member_vnf_index')
3923 vnf_config_primitive = op.get('primitive')
3924 primitive_params = op.get('primitive_params')
tierno7c4e24c2020-05-13 08:41:35 +00003925 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
kuuseac3a8882019-10-03 10:48:06 +02003926 format(vnf_config_primitive))
tierno588547c2020-07-01 15:30:20 +00003927 # Execute the primitive, either with new (first-time) or registered (reintent) args
tierno4fa7f8e2020-07-08 15:33:55 +00003928 ee_descriptor_id = config_primitive.get("execution-environment-ref")
3929 primitive_name = config_primitive.get("execution-environment-primitive",
3930 vnf_config_primitive)
tierno588547c2020-07-01 15:30:20 +00003931 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
3932 member_vnf_index=vnf_index,
3933 vdu_id=None,
tierno4fa7f8e2020-07-08 15:33:55 +00003934 vdu_count_index=None,
3935 ee_descriptor_id=ee_descriptor_id)
kuuseac3a8882019-10-03 10:48:06 +02003936 result, result_detail = await self._ns_execute_primitive(
tierno4fa7f8e2020-07-08 15:33:55 +00003937 ee_id, primitive_name, primitive_params, vca_type)
kuuseac3a8882019-10-03 10:48:06 +02003938 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
3939 vnf_config_primitive, result, result_detail))
3940 # Update operationState = COMPLETED | FAILED
3941 self._update_suboperation_status(
3942 db_nslcmop, op_index, result, result_detail)
3943
tierno59d22d22018-09-25 18:10:19 +02003944 if result == "FAILED":
3945 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02003946 db_nsr_update["config-status"] = old_config_status
3947 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02003948 # PRE-SCALE END
tierno59d22d22018-09-25 18:10:19 +02003949
kuuseac3a8882019-10-03 10:48:06 +02003950 # SCALE RO - BEGIN
3951 # Should this block be skipped if 'RO_nsr_id' == None ?
3952 # if (RO_nsr_id and RO_scaling_info):
tierno59d22d22018-09-25 18:10:19 +02003953 if RO_scaling_info:
tierno9ab95942018-10-10 16:44:22 +02003954 scale_process = "RO"
tierno7c4e24c2020-05-13 08:41:35 +00003955 # Scale RO retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02003956 op_index = self._check_or_add_scale_suboperation(
3957 db_nslcmop, vnf_index, None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info)
tierno7c4e24c2020-05-13 08:41:35 +00003958 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02003959 # Skip sub-operation
3960 result = 'COMPLETED'
3961 result_detail = 'Done'
3962 self.logger.debug(logging_text + "Skipped sub-operation RO, result {} {}".format(
3963 result, result_detail))
3964 else:
tierno7c4e24c2020-05-13 08:41:35 +00003965 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02003966 # New sub-operation: Get index of this sub-operation
3967 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3968 self.logger.debug(logging_text + "New sub-operation RO")
tierno59d22d22018-09-25 18:10:19 +02003969 else:
tierno7c4e24c2020-05-13 08:41:35 +00003970 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02003971 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3972 RO_nsr_id = op.get('RO_nsr_id')
3973 RO_scaling_info = op.get('RO_scaling_info')
tierno7c4e24c2020-05-13 08:41:35 +00003974 self.logger.debug(logging_text + "Sub-operation RO retry for primitive {}".format(
kuuseac3a8882019-10-03 10:48:06 +02003975 vnf_config_primitive))
3976
3977 RO_desc = await self.RO.create_action("ns", RO_nsr_id, {"vdu-scaling": RO_scaling_info})
3978 db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op
3979 db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time()
3980 # wait until ready
3981 RO_nslcmop_id = RO_desc["instance_action_id"]
3982 db_nslcmop_update["_admin.deploy.RO"] = RO_nslcmop_id
3983
3984 RO_task_done = False
3985 step = detailed_status = "Waiting RO_task_id={} to complete the scale action.".format(RO_nslcmop_id)
3986 detailed_status_old = None
3987 self.logger.debug(logging_text + step)
3988
3989 deployment_timeout = 1 * 3600 # One hour
3990 while deployment_timeout > 0:
3991 if not RO_task_done:
3992 desc = await self.RO.show("ns", item_id_name=RO_nsr_id, extra_item="action",
3993 extra_item_id=RO_nslcmop_id)
quilesj3655ae02019-12-12 16:08:35 +00003994
3995 # deploymentStatus
3996 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3997
kuuseac3a8882019-10-03 10:48:06 +02003998 ns_status, ns_status_info = self.RO.check_action_status(desc)
3999 if ns_status == "ERROR":
4000 raise ROclient.ROClientException(ns_status_info)
4001 elif ns_status == "BUILD":
4002 detailed_status = step + "; {}".format(ns_status_info)
4003 elif ns_status == "ACTIVE":
4004 RO_task_done = True
4005 step = detailed_status = "Waiting ns ready at RO. RO_id={}".format(RO_nsr_id)
4006 self.logger.debug(logging_text + step)
4007 else:
4008 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
tierno59d22d22018-09-25 18:10:19 +02004009 else:
quilesj7e13aeb2019-10-08 13:34:55 +02004010
kuuseac3a8882019-10-03 10:48:06 +02004011 if ns_status == "ERROR":
4012 raise ROclient.ROClientException(ns_status_info)
4013 elif ns_status == "BUILD":
4014 detailed_status = step + "; {}".format(ns_status_info)
4015 elif ns_status == "ACTIVE":
4016 step = detailed_status = \
4017 "Waiting for management IP address reported by the VIM. Updating VNFRs"
4018 if not vnfr_scaled:
4019 self.scale_vnfr(db_vnfr, vdu_create=vdu_create, vdu_delete=vdu_delete)
4020 vnfr_scaled = True
4021 try:
4022 desc = await self.RO.show("ns", RO_nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00004023
4024 # deploymentStatus
4025 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4026
kuuseac3a8882019-10-03 10:48:06 +02004027 # nsr_deployed["nsr_ip"] = RO.get_ns_vnf_info(desc)
4028 self.ns_update_vnfr({db_vnfr["member-vnf-index-ref"]: db_vnfr}, desc)
4029 break
4030 except LcmExceptionNoMgmtIP:
4031 pass
4032 else:
4033 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
4034 if detailed_status != detailed_status_old:
4035 self._update_suboperation_status(
4036 db_nslcmop, op_index, 'COMPLETED', detailed_status)
4037 detailed_status_old = db_nslcmop_update["detailed-status"] = detailed_status
4038 self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
tierno59d22d22018-09-25 18:10:19 +02004039
kuuseac3a8882019-10-03 10:48:06 +02004040 await asyncio.sleep(5, loop=self.loop)
4041 deployment_timeout -= 5
4042 if deployment_timeout <= 0:
4043 self._update_suboperation_status(
4044 db_nslcmop, nslcmop_id, op_index, 'FAILED', "Timeout when waiting for ns to get ready")
4045 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tierno59d22d22018-09-25 18:10:19 +02004046
kuuseac3a8882019-10-03 10:48:06 +02004047 # update VDU_SCALING_INFO with the obtained ip_addresses
4048 if vdu_scaling_info["scaling_direction"] == "OUT":
4049 for vdur in reversed(db_vnfr["vdur"]):
4050 if vdu_scaling_info["vdu-create"].get(vdur["vdu-id-ref"]):
4051 vdu_scaling_info["vdu-create"][vdur["vdu-id-ref"]] -= 1
4052 vdu_scaling_info["vdu"].append({
4053 "name": vdur["name"],
4054 "vdu_id": vdur["vdu-id-ref"],
4055 "interface": []
tierno59d22d22018-09-25 18:10:19 +02004056 })
kuuseac3a8882019-10-03 10:48:06 +02004057 for interface in vdur["interfaces"]:
4058 vdu_scaling_info["vdu"][-1]["interface"].append({
4059 "name": interface["name"],
4060 "ip_address": interface["ip-address"],
4061 "mac_address": interface.get("mac-address"),
4062 })
4063 del vdu_scaling_info["vdu-create"]
4064
4065 self._update_suboperation_status(db_nslcmop, op_index, 'COMPLETED', 'Done')
4066 # SCALE RO - END
tierno59d22d22018-09-25 18:10:19 +02004067
tierno9ab95942018-10-10 16:44:22 +02004068 scale_process = None
tierno59d22d22018-09-25 18:10:19 +02004069 if db_nsr_update:
4070 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4071
kuuseac3a8882019-10-03 10:48:06 +02004072 # POST-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02004073 # execute primitive service POST-SCALING
4074 step = "Executing post-scale vnf-config-primitive"
4075 if scaling_descriptor.get("scaling-config-action"):
4076 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02004077 if (scaling_config_action.get("trigger") == "post-scale-in" and scaling_type == "SCALE_IN") \
4078 or (scaling_config_action.get("trigger") == "post-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02004079 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
4080 step = db_nslcmop_update["detailed-status"] = \
4081 "executing post-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00004082
tierno589befb2019-05-29 07:06:23 +00004083 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00004084 if db_vnfr.get("additionalParamsForVnf"):
4085 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
4086
tierno59d22d22018-09-25 18:10:19 +02004087 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02004088 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
4089 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02004090 break
4091 else:
tierno4fa7f8e2020-07-08 15:33:55 +00004092 raise LcmException(
4093 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
4094 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
4095 "config-primitive".format(scaling_group, vnf_config_primitive))
tierno9ab95942018-10-10 16:44:22 +02004096 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02004097 db_nsr_update["config-status"] = "configuring post-scaling"
kuuseac3a8882019-10-03 10:48:06 +02004098 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
tiernod6de1992018-10-11 13:05:52 +02004099
tierno7c4e24c2020-05-13 08:41:35 +00004100 # Post-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02004101 op_index = self._check_or_add_scale_suboperation(
4102 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'POST-SCALE')
quilesj4cda56b2019-12-05 10:02:20 +00004103 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02004104 # Skip sub-operation
4105 result = 'COMPLETED'
4106 result_detail = 'Done'
4107 self.logger.debug(logging_text +
4108 "vnf_config_primitive={} Skipped sub-operation, result {} {}".
4109 format(vnf_config_primitive, result, result_detail))
4110 else:
quilesj4cda56b2019-12-05 10:02:20 +00004111 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02004112 # New sub-operation: Get index of this sub-operation
4113 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4114 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
4115 format(vnf_config_primitive))
4116 else:
tierno7c4e24c2020-05-13 08:41:35 +00004117 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02004118 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4119 vnf_index = op.get('member_vnf_index')
4120 vnf_config_primitive = op.get('primitive')
4121 primitive_params = op.get('primitive_params')
tierno7c4e24c2020-05-13 08:41:35 +00004122 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
kuuseac3a8882019-10-03 10:48:06 +02004123 format(vnf_config_primitive))
tierno588547c2020-07-01 15:30:20 +00004124 # Execute the primitive, either with new (first-time) or registered (reintent) args
tierno4fa7f8e2020-07-08 15:33:55 +00004125 ee_descriptor_id = config_primitive.get("execution-environment-ref")
4126 primitive_name = config_primitive.get("execution-environment-primitive",
4127 vnf_config_primitive)
tierno588547c2020-07-01 15:30:20 +00004128 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
4129 member_vnf_index=vnf_index,
4130 vdu_id=None,
tierno4fa7f8e2020-07-08 15:33:55 +00004131 vdu_count_index=None,
4132 ee_descriptor_id=ee_descriptor_id)
kuuseac3a8882019-10-03 10:48:06 +02004133 result, result_detail = await self._ns_execute_primitive(
tierno4fa7f8e2020-07-08 15:33:55 +00004134 ee_id, primitive_name, primitive_params, vca_type)
kuuseac3a8882019-10-03 10:48:06 +02004135 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
4136 vnf_config_primitive, result, result_detail))
4137 # Update operationState = COMPLETED | FAILED
4138 self._update_suboperation_status(
4139 db_nslcmop, op_index, result, result_detail)
4140
tierno59d22d22018-09-25 18:10:19 +02004141 if result == "FAILED":
4142 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02004143 db_nsr_update["config-status"] = old_config_status
4144 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02004145 # POST-SCALE END
tierno59d22d22018-09-25 18:10:19 +02004146
tiernod6de1992018-10-11 13:05:52 +02004147 db_nsr_update["detailed-status"] = "" # "scaled {} {}".format(scaling_group, scaling_type)
ikalyvas02d9e7b2019-05-27 18:16:01 +03004148 db_nsr_update["operational-status"] = "running" if old_operational_status == "failed" \
4149 else old_operational_status
tiernod6de1992018-10-11 13:05:52 +02004150 db_nsr_update["config-status"] = old_config_status
tierno59d22d22018-09-25 18:10:19 +02004151 return
4152 except (ROclient.ROClientException, DbException, LcmException) as e:
4153 self.logger.error(logging_text + "Exit Exception {}".format(e))
4154 exc = e
4155 except asyncio.CancelledError:
4156 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
4157 exc = "Operation was cancelled"
4158 except Exception as e:
4159 exc = traceback.format_exc()
4160 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
4161 finally:
quilesj3655ae02019-12-12 16:08:35 +00004162 self._write_ns_status(
4163 nsr_id=nsr_id,
4164 ns_state=None,
4165 current_operation="IDLE",
4166 current_operation_id=None
4167 )
tierno59d22d22018-09-25 18:10:19 +02004168 if exc:
tiernoa17d4f42020-04-28 09:59:23 +00004169 db_nslcmop_update["detailed-status"] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4170 nslcmop_operation_state = "FAILED"
tierno59d22d22018-09-25 18:10:19 +02004171 if db_nsr:
tiernod6de1992018-10-11 13:05:52 +02004172 db_nsr_update["operational-status"] = old_operational_status
4173 db_nsr_update["config-status"] = old_config_status
4174 db_nsr_update["detailed-status"] = ""
4175 if scale_process:
4176 if "VCA" in scale_process:
4177 db_nsr_update["config-status"] = "failed"
4178 if "RO" in scale_process:
4179 db_nsr_update["operational-status"] = "failed"
4180 db_nsr_update["detailed-status"] = "FAILED scaling nslcmop={} {}: {}".format(nslcmop_id, step,
4181 exc)
tiernoa17d4f42020-04-28 09:59:23 +00004182 else:
4183 error_description_nslcmop = None
4184 nslcmop_operation_state = "COMPLETED"
4185 db_nslcmop_update["detailed-status"] = "Done"
quilesj4cda56b2019-12-05 10:02:20 +00004186
tiernoa17d4f42020-04-28 09:59:23 +00004187 self._write_op_status(
4188 op_id=nslcmop_id,
4189 stage="",
4190 error_message=error_description_nslcmop,
4191 operation_state=nslcmop_operation_state,
4192 other_update=db_nslcmop_update,
4193 )
4194 if db_nsr:
4195 self._write_ns_status(
4196 nsr_id=nsr_id,
4197 ns_state=None,
4198 current_operation="IDLE",
4199 current_operation_id=None,
4200 other_update=db_nsr_update
4201 )
4202
tierno59d22d22018-09-25 18:10:19 +02004203 if nslcmop_operation_state:
4204 try:
4205 await self.msg.aiowrite("ns", "scaled", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00004206 "operationState": nslcmop_operation_state},
4207 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004208 # if cooldown_time:
tiernod8323042019-08-09 11:32:23 +00004209 # await asyncio.sleep(cooldown_time, loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004210 # await self.msg.aiowrite("ns","scaled-cooldown-time", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id})
4211 except Exception as e:
4212 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
4213 self.logger.debug(logging_text + "Exit")
4214 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
tierno89f82902020-07-03 14:52:28 +00004215
4216 async def add_prometheus_metrics(self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip):
4217 if not self.prometheus:
4218 return
4219 # look if exist a file called 'prometheus*.j2' and
4220 artifact_content = self.fs.dir_ls(artifact_path)
4221 job_file = next((f for f in artifact_content if f.startswith("prometheus") and f.endswith(".j2")), None)
4222 if not job_file:
4223 return
4224 with self.fs.file_open((artifact_path, job_file), "r") as f:
4225 job_data = f.read()
4226
4227 # TODO get_service
4228 _, _, service = ee_id.partition(".") # remove prefix "namespace."
4229 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
4230 host_port = "80"
4231 vnfr_id = vnfr_id.replace("-", "")
4232 variables = {
4233 "JOB_NAME": vnfr_id,
4234 "TARGET_IP": target_ip,
4235 "EXPORTER_POD_IP": host_name,
4236 "EXPORTER_POD_PORT": host_port,
4237 }
4238 job_list = self.prometheus.parse_job(job_data, variables)
4239 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
4240 for job in job_list:
4241 if not isinstance(job.get("job_name"), str) or vnfr_id not in job["job_name"]:
4242 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
4243 job["nsr_id"] = nsr_id
4244 job_dict = {jl["job_name"]: jl for jl in job_list}
4245 if await self.prometheus.update(job_dict):
4246 return list(job_dict.keys())