blob: 2a0e8b40c0a3311c4deed90937e5a9a619330982 [file] [log] [blame]
tierno59d22d22018-09-25 18:10:19 +02001# -*- coding: utf-8 -*-
2
tierno2e215512018-11-28 09:37:52 +00003##
4# Copyright 2018 Telefonica S.A.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17##
18
tierno59d22d22018-09-25 18:10:19 +020019import asyncio
20import yaml
21import logging
22import logging.handlers
tierno59d22d22018-09-25 18:10:19 +020023import traceback
David Garciad4816682019-12-09 14:57:43 +010024import json
tiernof7b42112020-10-06 08:22:07 +000025from jinja2 import Environment, TemplateError, TemplateNotFound, StrictUndefined, UndefinedError
tierno59d22d22018-09-25 18:10:19 +020026
tierno77677d92019-08-22 13:46:35 +000027from osm_lcm import ROclient
tierno69f0d382020-05-07 13:08:09 +000028from osm_lcm.ng_ro import NgRoClient, NgRoException
tierno744303e2020-01-13 16:46:31 +000029from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict
calvinosanch9f9c6f22019-11-04 13:37:39 +010030from n2vc.k8s_helm_conn import K8sHelmConnector
Adam Israelbaacc302019-12-01 12:41:39 -050031from n2vc.k8s_juju_conn import K8sJujuConnector
tierno59d22d22018-09-25 18:10:19 +020032
tierno27246d82018-09-27 15:59:09 +020033from osm_common.dbbase import DbException
tierno59d22d22018-09-25 18:10:19 +020034from osm_common.fsbase import FsException
quilesj7e13aeb2019-10-08 13:34:55 +020035
36from n2vc.n2vc_juju_conn import N2VCJujuConnector
tiernof59ad6c2020-04-08 12:50:52 +000037from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
tierno59d22d22018-09-25 18:10:19 +020038
tierno588547c2020-07-01 15:30:20 +000039from osm_lcm.lcm_helm_conn import LCMHelmConn
40
tierno27246d82018-09-27 15:59:09 +020041from copy import copy, deepcopy
tierno59d22d22018-09-25 18:10:19 +020042from http import HTTPStatus
43from time import time
tierno27246d82018-09-27 15:59:09 +020044from uuid import uuid4
lloretgalleg80ad9212020-07-08 07:53:22 +000045
tierno89f82902020-07-03 14:52:28 +000046from random import randint
tierno59d22d22018-09-25 18:10:19 +020047
tierno69f0d382020-05-07 13:08:09 +000048__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
tierno59d22d22018-09-25 18:10:19 +020049
50
tierno588547c2020-07-01 15:30:20 +000051class N2VCJujuConnectorLCM(N2VCJujuConnector):
52
53 async def create_execution_environment(self, namespace: str, db_dict: dict, reuse_ee_id: str = None,
54 progress_timeout: float = None, total_timeout: float = None,
tierno89f82902020-07-03 14:52:28 +000055 config: dict = None, artifact_path: str = None,
56 vca_type: str = None) -> (str, dict):
tierno588547c2020-07-01 15:30:20 +000057 # admit two new parameters, artifact_path and vca_type
58 if vca_type == "k8s_proxy_charm":
David Garciaf36326c2020-07-10 13:12:44 +020059 ee_id = await self.install_k8s_proxy_charm(
tierno588547c2020-07-01 15:30:20 +000060 charm_name=artifact_path[artifact_path.rfind("/") + 1:],
61 namespace=namespace,
62 artifact_path=artifact_path,
63 db_dict=db_dict)
64 return ee_id, None
65 else:
66 return await super().create_execution_environment(
67 namespace=namespace, db_dict=db_dict, reuse_ee_id=reuse_ee_id,
68 progress_timeout=progress_timeout, total_timeout=total_timeout)
69
70 async def install_configuration_sw(self, ee_id: str, artifact_path: str, db_dict: dict,
71 progress_timeout: float = None, total_timeout: float = None,
72 config: dict = None, num_units: int = 1, vca_type: str = "lxc_proxy_charm"):
73 if vca_type == "k8s_proxy_charm":
74 return
75 return await super().install_configuration_sw(
76 ee_id=ee_id, artifact_path=artifact_path, db_dict=db_dict, progress_timeout=progress_timeout,
77 total_timeout=total_timeout, config=config, num_units=num_units)
78
79
tierno59d22d22018-09-25 18:10:19 +020080class NsLcm(LcmBase):
tierno63de62e2018-10-31 16:38:52 +010081 timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed
tierno744303e2020-01-13 16:46:31 +000082 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
tiernoe876f672020-02-13 14:34:48 +000083 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
garciadeblasf9b04952019-04-09 18:53:58 +020084 timeout_charm_delete = 10 * 60
David Garciaf6919842020-05-21 16:41:07 +020085 timeout_primitive = 30 * 60 # timeout for primitive execution
86 timeout_progress_primitive = 10 * 60 # timeout for some progress in a primitive execution
tierno59d22d22018-09-25 18:10:19 +020087
kuuseac3a8882019-10-03 10:48:06 +020088 SUBOPERATION_STATUS_NOT_FOUND = -1
89 SUBOPERATION_STATUS_NEW = -2
90 SUBOPERATION_STATUS_SKIP = -3
tiernoa2143262020-03-27 16:20:40 +000091 task_name_deploy_vca = "Deploying VCA"
kuuseac3a8882019-10-03 10:48:06 +020092
tierno89f82902020-07-03 14:52:28 +000093 def __init__(self, db, msg, fs, lcm_tasks, config, loop, prometheus=None):
tierno59d22d22018-09-25 18:10:19 +020094 """
95 Init, Connect to database, filesystem storage, and messaging
96 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
97 :return: None
98 """
quilesj7e13aeb2019-10-08 13:34:55 +020099 super().__init__(
100 db=db,
101 msg=msg,
102 fs=fs,
103 logger=logging.getLogger('lcm.ns')
104 )
105
tierno59d22d22018-09-25 18:10:19 +0200106 self.loop = loop
107 self.lcm_tasks = lcm_tasks
tierno744303e2020-01-13 16:46:31 +0000108 self.timeout = config["timeout"]
109 self.ro_config = config["ro_config"]
tierno69f0d382020-05-07 13:08:09 +0000110 self.ng_ro = config["ro_config"].get("ng")
tierno744303e2020-01-13 16:46:31 +0000111 self.vca_config = config["VCA"].copy()
tierno59d22d22018-09-25 18:10:19 +0200112
quilesj7e13aeb2019-10-08 13:34:55 +0200113 # create N2VC connector
tierno588547c2020-07-01 15:30:20 +0000114 self.n2vc = N2VCJujuConnectorLCM(
quilesj7e13aeb2019-10-08 13:34:55 +0200115 db=self.db,
116 fs=self.fs,
tierno59d22d22018-09-25 18:10:19 +0200117 log=self.logger,
quilesj7e13aeb2019-10-08 13:34:55 +0200118 loop=self.loop,
119 url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
120 username=self.vca_config.get('user', None),
121 vca_config=self.vca_config,
quilesj3655ae02019-12-12 16:08:35 +0000122 on_update_db=self._on_update_n2vc_db
tierno59d22d22018-09-25 18:10:19 +0200123 )
quilesj7e13aeb2019-10-08 13:34:55 +0200124
tierno588547c2020-07-01 15:30:20 +0000125 self.conn_helm_ee = LCMHelmConn(
126 db=self.db,
127 fs=self.fs,
128 log=self.logger,
129 loop=self.loop,
130 url=None,
131 username=None,
132 vca_config=self.vca_config,
133 on_update_db=self._on_update_n2vc_db
134 )
135
calvinosanch9f9c6f22019-11-04 13:37:39 +0100136 self.k8sclusterhelm = K8sHelmConnector(
137 kubectl_command=self.vca_config.get("kubectlpath"),
138 helm_command=self.vca_config.get("helmpath"),
139 fs=self.fs,
140 log=self.logger,
141 db=self.db,
142 on_update_db=None,
143 )
144
Adam Israelbaacc302019-12-01 12:41:39 -0500145 self.k8sclusterjuju = K8sJujuConnector(
146 kubectl_command=self.vca_config.get("kubectlpath"),
147 juju_command=self.vca_config.get("jujupath"),
148 fs=self.fs,
149 log=self.logger,
150 db=self.db,
151 on_update_db=None,
152 )
153
tiernoa2143262020-03-27 16:20:40 +0000154 self.k8scluster_map = {
155 "helm-chart": self.k8sclusterhelm,
156 "chart": self.k8sclusterhelm,
157 "juju-bundle": self.k8sclusterjuju,
158 "juju": self.k8sclusterjuju,
159 }
tierno588547c2020-07-01 15:30:20 +0000160
161 self.vca_map = {
162 "lxc_proxy_charm": self.n2vc,
163 "native_charm": self.n2vc,
164 "k8s_proxy_charm": self.n2vc,
165 "helm": self.conn_helm_ee
166 }
167
tierno89f82902020-07-03 14:52:28 +0000168 self.prometheus = prometheus
169
quilesj7e13aeb2019-10-08 13:34:55 +0200170 # create RO client
tierno69f0d382020-05-07 13:08:09 +0000171 if self.ng_ro:
172 self.RO = NgRoClient(self.loop, **self.ro_config)
173 else:
174 self.RO = ROclient.ROClient(self.loop, **self.ro_config)
tierno59d22d22018-09-25 18:10:19 +0200175
quilesj3655ae02019-12-12 16:08:35 +0000176 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
quilesj7e13aeb2019-10-08 13:34:55 +0200177
quilesj3655ae02019-12-12 16:08:35 +0000178 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
179
180 try:
181 # TODO filter RO descriptor fields...
182
183 # write to database
184 db_dict = dict()
185 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
186 db_dict['deploymentStatus'] = ro_descriptor
187 self.update_db_2("nsrs", nsrs_id, db_dict)
188
189 except Exception as e:
190 self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e))
191
192 async def _on_update_n2vc_db(self, table, filter, path, updated_data):
193
quilesj69a722c2020-01-09 08:30:17 +0000194 # remove last dot from path (if exists)
195 if path.endswith('.'):
196 path = path[:-1]
197
quilesj3655ae02019-12-12 16:08:35 +0000198 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
199 # .format(table, filter, path, updated_data))
200
201 try:
202
203 nsr_id = filter.get('_id')
204
205 # read ns record from database
206 nsr = self.db.get_one(table='nsrs', q_filter=filter)
207 current_ns_status = nsr.get('nsState')
208
209 # get vca status for NS
quilesj69a722c2020-01-09 08:30:17 +0000210 status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False)
quilesj3655ae02019-12-12 16:08:35 +0000211
212 # vcaStatus
213 db_dict = dict()
214 db_dict['vcaStatus'] = status_dict
215
216 # update configurationStatus for this VCA
217 try:
218 vca_index = int(path[path.rfind(".")+1:])
219
220 vca_list = deep_get(target_dict=nsr, key_list=('_admin', 'deployed', 'VCA'))
221 vca_status = vca_list[vca_index].get('status')
222
223 configuration_status_list = nsr.get('configurationStatus')
224 config_status = configuration_status_list[vca_index].get('status')
225
226 if config_status == 'BROKEN' and vca_status != 'failed':
227 db_dict['configurationStatus'][vca_index] = 'READY'
228 elif config_status != 'BROKEN' and vca_status == 'failed':
229 db_dict['configurationStatus'][vca_index] = 'BROKEN'
230 except Exception as e:
231 # not update configurationStatus
232 self.logger.debug('Error updating vca_index (ignore): {}'.format(e))
233
234 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
235 # if nsState = 'DEGRADED' check if all is OK
236 is_degraded = False
237 if current_ns_status in ('READY', 'DEGRADED'):
238 error_description = ''
239 # check machines
240 if status_dict.get('machines'):
241 for machine_id in status_dict.get('machines'):
242 machine = status_dict.get('machines').get(machine_id)
243 # check machine agent-status
244 if machine.get('agent-status'):
245 s = machine.get('agent-status').get('status')
246 if s != 'started':
247 is_degraded = True
248 error_description += 'machine {} agent-status={} ; '.format(machine_id, s)
249 # check machine instance status
250 if machine.get('instance-status'):
251 s = machine.get('instance-status').get('status')
252 if s != 'running':
253 is_degraded = True
254 error_description += 'machine {} instance-status={} ; '.format(machine_id, s)
255 # check applications
256 if status_dict.get('applications'):
257 for app_id in status_dict.get('applications'):
258 app = status_dict.get('applications').get(app_id)
259 # check application status
260 if app.get('status'):
261 s = app.get('status').get('status')
262 if s != 'active':
263 is_degraded = True
264 error_description += 'application {} status={} ; '.format(app_id, s)
265
266 if error_description:
267 db_dict['errorDescription'] = error_description
268 if current_ns_status == 'READY' and is_degraded:
269 db_dict['nsState'] = 'DEGRADED'
270 if current_ns_status == 'DEGRADED' and not is_degraded:
271 db_dict['nsState'] = 'READY'
272
273 # write to database
274 self.update_db_2("nsrs", nsr_id, db_dict)
275
tierno51183952020-04-03 15:48:18 +0000276 except (asyncio.CancelledError, asyncio.TimeoutError):
277 raise
quilesj3655ae02019-12-12 16:08:35 +0000278 except Exception as e:
279 self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +0200280
tiernof7b42112020-10-06 08:22:07 +0000281 @staticmethod
282 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
283 try:
284 env = Environment(undefined=StrictUndefined)
285 template = env.from_string(cloud_init_text)
286 return template.render(additional_params or {})
287 except UndefinedError as e:
288 raise LcmException("Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
289 "file, must be provided in the instantiation parameters inside the "
290 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id))
291 except (TemplateError, TemplateNotFound) as e:
292 raise LcmException("Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".
293 format(vnfd_id, vdu_id, e))
294
295 def _get_cloud_init(self, vdu, vnfd):
296 try:
297 cloud_init_content = cloud_init_file = None
298 if vdu.get("cloud-init-file"):
299 base_folder = vnfd["_admin"]["storage"]
300 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
301 vdu["cloud-init-file"])
302 with self.fs.file_open(cloud_init_file, "r") as ci_file:
303 cloud_init_content = ci_file.read()
304 elif vdu.get("cloud-init"):
305 cloud_init_content = vdu["cloud-init"]
306
307 return cloud_init_content
308 except FsException as e:
309 raise LcmException("Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".
310 format(vnfd["id"], vdu["id"], cloud_init_file, e))
311
312 def _get_osm_params(self, db_vnfr, vdu_id=None, vdu_count_index=0):
313 osm_params = {x.replace("-", "_"): db_vnfr[x] for x in ("ip-address", "vim-account-id", "vnfd-id", "vnfd-ref")
314 if db_vnfr.get(x) is not None}
315 osm_params["ns_id"] = db_vnfr["nsr-id-ref"]
316 osm_params["vnf_id"] = db_vnfr["_id"]
317 osm_params["member_vnf_index"] = db_vnfr["member-vnf-index-ref"]
318 if db_vnfr.get("vdur"):
319 osm_params["vdu"] = {}
320 for vdur in db_vnfr["vdur"]:
321 vdu = {
322 "count_index": vdur["count-index"],
323 "vdu_id": vdur["vdu-id-ref"],
324 "interfaces": {}
325 }
326 if vdur.get("ip-address"):
327 vdu["ip_address"] = vdur["ip-address"]
328 for iface in vdur["interfaces"]:
329 vdu["interfaces"][iface["name"]] = \
330 {x.replace("-", "_"): iface[x] for x in ("mac-address", "ip-address", "vnf-vld-id", "name")
331 if iface.get(x) is not None}
332 vdu_id_index = "{}-{}".format(vdur["vdu-id-ref"], vdur["count-index"])
333 osm_params["vdu"][vdu_id_index] = vdu
334 if vdu_id:
335 osm_params["vdu_id"] = vdu_id
336 osm_params["count_index"] = vdu_count_index
337 return osm_params
338
339 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
340 vdur = next(vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"])
341 additional_params = vdur.get("additionalParams")
342 return self._format_additional_params(additional_params)
343
gcalvino35be9152018-12-20 09:33:12 +0100344 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
tierno59d22d22018-09-25 18:10:19 +0200345 """
346 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
347 :param vnfd: input vnfd
348 :param new_id: overrides vnf id if provided
tierno8a518872018-12-21 13:42:14 +0000349 :param additionalParams: Instantiation params for VNFs provided
gcalvino35be9152018-12-20 09:33:12 +0100350 :param nsrId: Id of the NSR
tierno59d22d22018-09-25 18:10:19 +0200351 :return: copy of vnfd
352 """
tiernof7b42112020-10-06 08:22:07 +0000353 vnfd_RO = deepcopy(vnfd)
354 # remove unused by RO configuration, monitoring, scaling and internal keys
355 vnfd_RO.pop("_id", None)
356 vnfd_RO.pop("_admin", None)
357 vnfd_RO.pop("vnf-configuration", None)
358 vnfd_RO.pop("monitoring-param", None)
359 vnfd_RO.pop("scaling-group-descriptor", None)
360 vnfd_RO.pop("kdu", None)
361 vnfd_RO.pop("k8s-cluster", None)
362 if new_id:
363 vnfd_RO["id"] = new_id
tierno8a518872018-12-21 13:42:14 +0000364
tiernof7b42112020-10-06 08:22:07 +0000365 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
366 for vdu in get_iterable(vnfd_RO, "vdu"):
367 vdu.pop("cloud-init-file", None)
368 vdu.pop("cloud-init", None)
369 return vnfd_RO
tierno59d22d22018-09-25 18:10:19 +0200370
tiernoe95ed362020-04-23 08:24:57 +0000371 def _ns_params_2_RO(self, ns_params, nsd, vnfd_dict, db_vnfrs, n2vc_key_list):
tierno59d22d22018-09-25 18:10:19 +0200372 """
tierno27246d82018-09-27 15:59:09 +0200373 Creates a RO ns descriptor from OSM ns_instantiate params
tierno59d22d22018-09-25 18:10:19 +0200374 :param ns_params: OSM instantiate params
tiernoe95ed362020-04-23 08:24:57 +0000375 :param vnfd_dict: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
376 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index. {member-vnf-index: {vnfr_object}, ...}
tierno59d22d22018-09-25 18:10:19 +0200377 :return: The RO ns descriptor
378 """
379 vim_2_RO = {}
tiernob7f3f0d2019-03-20 17:17:21 +0000380 wim_2_RO = {}
tierno27246d82018-09-27 15:59:09 +0200381 # TODO feature 1417: Check that no instantiation is set over PDU
382 # check if PDU forces a concrete vim-network-id and add it
383 # check if PDU contains a SDN-assist info (dpid, switch, port) and pass it to RO
tierno59d22d22018-09-25 18:10:19 +0200384
385 def vim_account_2_RO(vim_account):
386 if vim_account in vim_2_RO:
387 return vim_2_RO[vim_account]
388
389 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
390 if db_vim["_admin"]["operationalState"] != "ENABLED":
391 raise LcmException("VIM={} is not available. operationalState={}".format(
392 vim_account, db_vim["_admin"]["operationalState"]))
393 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
394 vim_2_RO[vim_account] = RO_vim_id
395 return RO_vim_id
396
tiernob7f3f0d2019-03-20 17:17:21 +0000397 def wim_account_2_RO(wim_account):
398 if isinstance(wim_account, str):
399 if wim_account in wim_2_RO:
400 return wim_2_RO[wim_account]
401
402 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
403 if db_wim["_admin"]["operationalState"] != "ENABLED":
404 raise LcmException("WIM={} is not available. operationalState={}".format(
405 wim_account, db_wim["_admin"]["operationalState"]))
406 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
407 wim_2_RO[wim_account] = RO_wim_id
408 return RO_wim_id
409 else:
410 return wim_account
411
tierno59d22d22018-09-25 18:10:19 +0200412 def ip_profile_2_RO(ip_profile):
413 RO_ip_profile = deepcopy((ip_profile))
414 if "dns-server" in RO_ip_profile:
415 if isinstance(RO_ip_profile["dns-server"], list):
416 RO_ip_profile["dns-address"] = []
417 for ds in RO_ip_profile.pop("dns-server"):
418 RO_ip_profile["dns-address"].append(ds['address'])
419 else:
420 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
421 if RO_ip_profile.get("ip-version") == "ipv4":
422 RO_ip_profile["ip-version"] = "IPv4"
423 if RO_ip_profile.get("ip-version") == "ipv6":
424 RO_ip_profile["ip-version"] = "IPv6"
425 if "dhcp-params" in RO_ip_profile:
426 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
427 return RO_ip_profile
428
429 if not ns_params:
430 return None
431 RO_ns_params = {
432 # "name": ns_params["nsName"],
433 # "description": ns_params.get("nsDescription"),
434 "datacenter": vim_account_2_RO(ns_params["vimAccountId"]),
tiernob7f3f0d2019-03-20 17:17:21 +0000435 "wim_account": wim_account_2_RO(ns_params.get("wimAccountId")),
tierno59d22d22018-09-25 18:10:19 +0200436 # "scenario": ns_params["nsdId"],
tierno59d22d22018-09-25 18:10:19 +0200437 }
tiernoe95ed362020-04-23 08:24:57 +0000438 # set vim_account of each vnf if different from general vim_account.
439 # Get this information from <vnfr> database content, key vim-account-id
440 # Vim account can be set by placement_engine and it may be different from
441 # the instantiate parameters (vnfs.member-vnf-index.datacenter).
442 for vnf_index, vnfr in db_vnfrs.items():
443 if vnfr.get("vim-account-id") and vnfr["vim-account-id"] != ns_params["vimAccountId"]:
444 populate_dict(RO_ns_params, ("vnfs", vnf_index, "datacenter"), vim_account_2_RO(vnfr["vim-account-id"]))
quilesj7e13aeb2019-10-08 13:34:55 +0200445
tiernoe64f7fb2019-09-11 08:55:52 +0000446 n2vc_key_list = n2vc_key_list or []
447 for vnfd_ref, vnfd in vnfd_dict.items():
448 vdu_needed_access = []
449 mgmt_cp = None
450 if vnfd.get("vnf-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000451 ssh_required = deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000452 if ssh_required and vnfd.get("mgmt-interface"):
453 if vnfd["mgmt-interface"].get("vdu-id"):
454 vdu_needed_access.append(vnfd["mgmt-interface"]["vdu-id"])
455 elif vnfd["mgmt-interface"].get("cp"):
456 mgmt_cp = vnfd["mgmt-interface"]["cp"]
tierno27246d82018-09-27 15:59:09 +0200457
tiernoe64f7fb2019-09-11 08:55:52 +0000458 for vdu in vnfd.get("vdu", ()):
459 if vdu.get("vdu-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000460 ssh_required = deep_get(vdu, ("vdu-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000461 if ssh_required:
tierno27246d82018-09-27 15:59:09 +0200462 vdu_needed_access.append(vdu["id"])
tiernoe64f7fb2019-09-11 08:55:52 +0000463 elif mgmt_cp:
464 for vdu_interface in vdu.get("interface"):
465 if vdu_interface.get("external-connection-point-ref") and \
466 vdu_interface["external-connection-point-ref"] == mgmt_cp:
467 vdu_needed_access.append(vdu["id"])
468 mgmt_cp = None
469 break
tierno27246d82018-09-27 15:59:09 +0200470
tiernoe64f7fb2019-09-11 08:55:52 +0000471 if vdu_needed_access:
472 for vnf_member in nsd.get("constituent-vnfd"):
473 if vnf_member["vnfd-id-ref"] != vnfd_ref:
474 continue
475 for vdu in vdu_needed_access:
476 populate_dict(RO_ns_params,
477 ("vnfs", vnf_member["member-vnf-index"], "vdus", vdu, "mgmt_keys"),
478 n2vc_key_list)
tiernof7b42112020-10-06 08:22:07 +0000479 # cloud init
480 for vdu in get_iterable(vnfd, "vdu"):
481 cloud_init_text = self._get_cloud_init(vdu, vnfd)
482 if not cloud_init_text:
483 continue
484 for vnf_member in nsd.get("constituent-vnfd"):
485 if vnf_member["vnfd-id-ref"] != vnfd_ref:
486 continue
487 db_vnfr = db_vnfrs[vnf_member["member-vnf-index"]]
488 additional_params = self._get_vdu_additional_params(db_vnfr, vdu["id"]) or {}
489
490 cloud_init_list = []
491 for vdu_index in range(0, int(vdu.get("count", 1))):
492 additional_params["OSM"] = self._get_osm_params(db_vnfr, vdu["id"], vdu_index)
493 cloud_init_list.append(self._parse_cloud_init(cloud_init_text, additional_params, vnfd["id"],
494 vdu["id"]))
495 populate_dict(RO_ns_params,
496 ("vnfs", vnf_member["member-vnf-index"], "vdus", vdu["id"], "cloud_init"),
497 cloud_init_list)
tierno27246d82018-09-27 15:59:09 +0200498
tierno25ec7732018-10-24 18:47:11 +0200499 if ns_params.get("vduImage"):
500 RO_ns_params["vduImage"] = ns_params["vduImage"]
501
tiernoc255a822018-10-31 09:41:53 +0100502 if ns_params.get("ssh_keys"):
503 RO_ns_params["cloud-config"] = {"key-pairs": ns_params["ssh_keys"]}
tierno27246d82018-09-27 15:59:09 +0200504 for vnf_params in get_iterable(ns_params, "vnf"):
505 for constituent_vnfd in nsd["constituent-vnfd"]:
506 if constituent_vnfd["member-vnf-index"] == vnf_params["member-vnf-index"]:
507 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
508 break
509 else:
510 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index={} is not present at nsd:"
511 "constituent-vnfd".format(vnf_params["member-vnf-index"]))
tierno59d22d22018-09-25 18:10:19 +0200512
tierno27246d82018-09-27 15:59:09 +0200513 for vdu_params in get_iterable(vnf_params, "vdu"):
514 # TODO feature 1417: check that this VDU exist and it is not a PDU
515 if vdu_params.get("volume"):
516 for volume_params in vdu_params["volume"]:
517 if volume_params.get("vim-volume-id"):
518 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
519 vdu_params["id"], "devices", volume_params["name"], "vim_id"),
520 volume_params["vim-volume-id"])
521 if vdu_params.get("interface"):
522 for interface_params in vdu_params["interface"]:
523 if interface_params.get("ip-address"):
524 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
525 vdu_params["id"], "interfaces", interface_params["name"],
526 "ip_address"),
527 interface_params["ip-address"])
528 if interface_params.get("mac-address"):
529 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
530 vdu_params["id"], "interfaces", interface_params["name"],
531 "mac_address"),
532 interface_params["mac-address"])
533 if interface_params.get("floating-ip-required"):
534 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
535 vdu_params["id"], "interfaces", interface_params["name"],
536 "floating-ip"),
537 interface_params["floating-ip-required"])
538
539 for internal_vld_params in get_iterable(vnf_params, "internal-vld"):
540 if internal_vld_params.get("vim-network-name"):
541 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
542 internal_vld_params["name"], "vim-network-name"),
543 internal_vld_params["vim-network-name"])
gcalvino0d7ac8d2018-12-17 16:24:08 +0100544 if internal_vld_params.get("vim-network-id"):
545 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
546 internal_vld_params["name"], "vim-network-id"),
547 internal_vld_params["vim-network-id"])
tierno27246d82018-09-27 15:59:09 +0200548 if internal_vld_params.get("ip-profile"):
549 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
550 internal_vld_params["name"], "ip-profile"),
551 ip_profile_2_RO(internal_vld_params["ip-profile"]))
kbsub4d761eb2019-10-17 16:28:48 +0000552 if internal_vld_params.get("provider-network"):
553
554 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
555 internal_vld_params["name"], "provider-network"),
556 internal_vld_params["provider-network"].copy())
tierno27246d82018-09-27 15:59:09 +0200557
558 for icp_params in get_iterable(internal_vld_params, "internal-connection-point"):
559 # look for interface
560 iface_found = False
561 for vdu_descriptor in vnf_descriptor["vdu"]:
562 for vdu_interface in vdu_descriptor["interface"]:
563 if vdu_interface.get("internal-connection-point-ref") == icp_params["id-ref"]:
564 if icp_params.get("ip-address"):
565 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
566 vdu_descriptor["id"], "interfaces",
567 vdu_interface["name"], "ip_address"),
568 icp_params["ip-address"])
569
570 if icp_params.get("mac-address"):
571 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
572 vdu_descriptor["id"], "interfaces",
573 vdu_interface["name"], "mac_address"),
574 icp_params["mac-address"])
575 iface_found = True
tierno59d22d22018-09-25 18:10:19 +0200576 break
tierno27246d82018-09-27 15:59:09 +0200577 if iface_found:
578 break
579 else:
580 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index[{}]:"
581 "internal-vld:id-ref={} is not present at vnfd:internal-"
582 "connection-point".format(vnf_params["member-vnf-index"],
583 icp_params["id-ref"]))
584
585 for vld_params in get_iterable(ns_params, "vld"):
586 if "ip-profile" in vld_params:
587 populate_dict(RO_ns_params, ("networks", vld_params["name"], "ip-profile"),
588 ip_profile_2_RO(vld_params["ip-profile"]))
tiernob7f3f0d2019-03-20 17:17:21 +0000589
kbsub4d761eb2019-10-17 16:28:48 +0000590 if vld_params.get("provider-network"):
591
592 populate_dict(RO_ns_params, ("networks", vld_params["name"], "provider-network"),
593 vld_params["provider-network"].copy())
594
tiernob7f3f0d2019-03-20 17:17:21 +0000595 if "wimAccountId" in vld_params and vld_params["wimAccountId"] is not None:
596 populate_dict(RO_ns_params, ("networks", vld_params["name"], "wim_account"),
597 wim_account_2_RO(vld_params["wimAccountId"])),
tierno27246d82018-09-27 15:59:09 +0200598 if vld_params.get("vim-network-name"):
599 RO_vld_sites = []
600 if isinstance(vld_params["vim-network-name"], dict):
601 for vim_account, vim_net in vld_params["vim-network-name"].items():
602 RO_vld_sites.append({
603 "netmap-use": vim_net,
604 "datacenter": vim_account_2_RO(vim_account)
605 })
606 else: # isinstance str
607 RO_vld_sites.append({"netmap-use": vld_params["vim-network-name"]})
608 if RO_vld_sites:
609 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
kbsub4d761eb2019-10-17 16:28:48 +0000610
gcalvino0d7ac8d2018-12-17 16:24:08 +0100611 if vld_params.get("vim-network-id"):
612 RO_vld_sites = []
613 if isinstance(vld_params["vim-network-id"], dict):
614 for vim_account, vim_net in vld_params["vim-network-id"].items():
615 RO_vld_sites.append({
616 "netmap-use": vim_net,
617 "datacenter": vim_account_2_RO(vim_account)
618 })
619 else: # isinstance str
620 RO_vld_sites.append({"netmap-use": vld_params["vim-network-id"]})
621 if RO_vld_sites:
622 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
Felipe Vicens720b07a2019-01-31 02:32:09 +0100623 if vld_params.get("ns-net"):
624 if isinstance(vld_params["ns-net"], dict):
625 for vld_id, instance_scenario_id in vld_params["ns-net"].items():
626 RO_vld_ns_net = {"instance_scenario_id": instance_scenario_id, "osm_id": vld_id}
Felipe Vicensb0e5fe42019-12-05 10:30:38 +0100627 populate_dict(RO_ns_params, ("networks", vld_params["name"], "use-network"), RO_vld_ns_net)
tierno27246d82018-09-27 15:59:09 +0200628 if "vnfd-connection-point-ref" in vld_params:
629 for cp_params in vld_params["vnfd-connection-point-ref"]:
630 # look for interface
631 for constituent_vnfd in nsd["constituent-vnfd"]:
632 if constituent_vnfd["member-vnf-index"] == cp_params["member-vnf-index-ref"]:
633 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
634 break
635 else:
636 raise LcmException(
637 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={} "
638 "is not present at nsd:constituent-vnfd".format(cp_params["member-vnf-index-ref"]))
639 match_cp = False
640 for vdu_descriptor in vnf_descriptor["vdu"]:
641 for interface_descriptor in vdu_descriptor["interface"]:
642 if interface_descriptor.get("external-connection-point-ref") == \
643 cp_params["vnfd-connection-point-ref"]:
644 match_cp = True
tierno59d22d22018-09-25 18:10:19 +0200645 break
tierno27246d82018-09-27 15:59:09 +0200646 if match_cp:
647 break
648 else:
649 raise LcmException(
650 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={}:"
651 "vnfd-connection-point-ref={} is not present at vnfd={}".format(
652 cp_params["member-vnf-index-ref"],
653 cp_params["vnfd-connection-point-ref"],
654 vnf_descriptor["id"]))
655 if cp_params.get("ip-address"):
656 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
657 vdu_descriptor["id"], "interfaces",
658 interface_descriptor["name"], "ip_address"),
659 cp_params["ip-address"])
660 if cp_params.get("mac-address"):
661 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
662 vdu_descriptor["id"], "interfaces",
663 interface_descriptor["name"], "mac_address"),
664 cp_params["mac-address"])
tierno59d22d22018-09-25 18:10:19 +0200665 return RO_ns_params
666
tierno27246d82018-09-27 15:59:09 +0200667 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None):
668 # make a copy to do not change
669 vdu_create = copy(vdu_create)
670 vdu_delete = copy(vdu_delete)
671
672 vdurs = db_vnfr.get("vdur")
673 if vdurs is None:
674 vdurs = []
675 vdu_index = len(vdurs)
676 while vdu_index:
677 vdu_index -= 1
678 vdur = vdurs[vdu_index]
679 if vdur.get("pdu-type"):
680 continue
681 vdu_id_ref = vdur["vdu-id-ref"]
682 if vdu_create and vdu_create.get(vdu_id_ref):
tiernodf24ef82020-09-25 12:33:15 +0000683 vdur_copy = deepcopy(vdur)
684 vdur_copy["status"] = "BUILD"
685 vdur_copy["status-detailed"] = None
686 vdur_copy["ip_address"]: None
687 for iface in vdur_copy["interfaces"]:
688 iface["ip-address"] = None
689 iface["mac-address"] = None
690 iface.pop("mgmt_vnf", None) # only first vdu can be managment of vnf # TODO ALF
tierno27246d82018-09-27 15:59:09 +0200691 for index in range(0, vdu_create[vdu_id_ref]):
tiernodf24ef82020-09-25 12:33:15 +0000692 vdur_copy["_id"] = str(uuid4())
693 vdur_copy["count-index"] += 1
694 vdurs.insert(vdu_index+1+index, vdur_copy)
695 self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
696 vdur_copy = deepcopy(vdur_copy)
697
tierno27246d82018-09-27 15:59:09 +0200698 del vdu_create[vdu_id_ref]
699 if vdu_delete and vdu_delete.get(vdu_id_ref):
700 del vdurs[vdu_index]
701 vdu_delete[vdu_id_ref] -= 1
702 if not vdu_delete[vdu_id_ref]:
703 del vdu_delete[vdu_id_ref]
704 # check all operations are done
705 if vdu_create or vdu_delete:
706 raise LcmException("Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
707 vdu_create))
708 if vdu_delete:
709 raise LcmException("Error scaling IN VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
710 vdu_delete))
711
712 vnfr_update = {"vdur": vdurs}
713 db_vnfr["vdur"] = vdurs
714 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
715
tiernof578e552018-11-08 19:07:20 +0100716 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
717 """
718 Updates database nsr with the RO info for the created vld
719 :param ns_update_nsr: dictionary to be filled with the updated info
720 :param db_nsr: content of db_nsr. This is also modified
721 :param nsr_desc_RO: nsr descriptor from RO
722 :return: Nothing, LcmException is raised on errors
723 """
724
725 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
726 for net_RO in get_iterable(nsr_desc_RO, "nets"):
727 if vld["id"] != net_RO.get("ns_net_osm_id"):
728 continue
729 vld["vim-id"] = net_RO.get("vim_net_id")
730 vld["name"] = net_RO.get("vim_name")
731 vld["status"] = net_RO.get("status")
732 vld["status-detailed"] = net_RO.get("error_msg")
733 ns_update_nsr["vld.{}".format(vld_index)] = vld
734 break
735 else:
736 raise LcmException("ns_update_nsr: Not found vld={} at RO info".format(vld["id"]))
737
tiernoe876f672020-02-13 14:34:48 +0000738 def set_vnfr_at_error(self, db_vnfrs, error_text):
739 try:
740 for db_vnfr in db_vnfrs.values():
741 vnfr_update = {"status": "ERROR"}
742 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
743 if "status" not in vdur:
744 vdur["status"] = "ERROR"
745 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
746 if error_text:
747 vdur["status-detailed"] = str(error_text)
748 vnfr_update["vdur.{}.status-detailed".format(vdu_index)] = "ERROR"
749 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
750 except DbException as e:
751 self.logger.error("Cannot update vnf. {}".format(e))
752
tierno59d22d22018-09-25 18:10:19 +0200753 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
754 """
755 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
tierno27246d82018-09-27 15:59:09 +0200756 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
757 :param nsr_desc_RO: nsr descriptor from RO
758 :return: Nothing, LcmException is raised on errors
tierno59d22d22018-09-25 18:10:19 +0200759 """
760 for vnf_index, db_vnfr in db_vnfrs.items():
761 for vnf_RO in nsr_desc_RO["vnfs"]:
tierno27246d82018-09-27 15:59:09 +0200762 if vnf_RO["member_vnf_index"] != vnf_index:
763 continue
764 vnfr_update = {}
tiernof578e552018-11-08 19:07:20 +0100765 if vnf_RO.get("ip_address"):
tierno1674de82019-04-09 13:03:14 +0000766 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"].split(";")[0]
tiernof578e552018-11-08 19:07:20 +0100767 elif not db_vnfr.get("ip-address"):
tierno0ec0c272020-02-19 17:43:01 +0000768 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
769 raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200770
tierno27246d82018-09-27 15:59:09 +0200771 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
772 vdur_RO_count_index = 0
773 if vdur.get("pdu-type"):
774 continue
775 for vdur_RO in get_iterable(vnf_RO, "vms"):
776 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
777 continue
778 if vdur["count-index"] != vdur_RO_count_index:
779 vdur_RO_count_index += 1
780 continue
781 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
tierno1674de82019-04-09 13:03:14 +0000782 if vdur_RO.get("ip_address"):
783 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
tierno274ed572019-04-04 13:33:27 +0000784 else:
785 vdur["ip-address"] = None
tierno27246d82018-09-27 15:59:09 +0200786 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
787 vdur["name"] = vdur_RO.get("vim_name")
788 vdur["status"] = vdur_RO.get("status")
789 vdur["status-detailed"] = vdur_RO.get("error_msg")
790 for ifacer in get_iterable(vdur, "interfaces"):
791 for interface_RO in get_iterable(vdur_RO, "interfaces"):
792 if ifacer["name"] == interface_RO.get("internal_name"):
793 ifacer["ip-address"] = interface_RO.get("ip_address")
794 ifacer["mac-address"] = interface_RO.get("mac_address")
795 break
796 else:
797 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
quilesj7e13aeb2019-10-08 13:34:55 +0200798 "from VIM info"
799 .format(vnf_index, vdur["vdu-id-ref"], ifacer["name"]))
tierno27246d82018-09-27 15:59:09 +0200800 vnfr_update["vdur.{}".format(vdu_index)] = vdur
801 break
802 else:
tierno15b1cf12019-08-29 13:21:40 +0000803 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
804 "VIM info".format(vnf_index, vdur["vdu-id-ref"], vdur["count-index"]))
tiernof578e552018-11-08 19:07:20 +0100805
806 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
807 for net_RO in get_iterable(nsr_desc_RO, "nets"):
808 if vld["id"] != net_RO.get("vnf_net_osm_id"):
809 continue
810 vld["vim-id"] = net_RO.get("vim_net_id")
811 vld["name"] = net_RO.get("vim_name")
812 vld["status"] = net_RO.get("status")
813 vld["status-detailed"] = net_RO.get("error_msg")
814 vnfr_update["vld.{}".format(vld_index)] = vld
815 break
816 else:
tierno15b1cf12019-08-29 13:21:40 +0000817 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
tiernof578e552018-11-08 19:07:20 +0100818 vnf_index, vld["id"]))
819
tierno27246d82018-09-27 15:59:09 +0200820 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
821 break
tierno59d22d22018-09-25 18:10:19 +0200822
823 else:
tierno15b1cf12019-08-29 13:21:40 +0000824 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200825
tierno5ee02052019-12-05 19:55:02 +0000826 def _get_ns_config_info(self, nsr_id):
tiernoc3f2a822019-11-05 13:45:04 +0000827 """
828 Generates a mapping between vnf,vdu elements and the N2VC id
tierno5ee02052019-12-05 19:55:02 +0000829 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
tiernoc3f2a822019-11-05 13:45:04 +0000830 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
831 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
832 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
833 """
tierno5ee02052019-12-05 19:55:02 +0000834 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
835 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernoc3f2a822019-11-05 13:45:04 +0000836 mapping = {}
837 ns_config_info = {"osm-config-mapping": mapping}
838 for vca in vca_deployed_list:
839 if not vca["member-vnf-index"]:
840 continue
841 if not vca["vdu_id"]:
842 mapping[vca["member-vnf-index"]] = vca["application"]
843 else:
844 mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\
845 vca["application"]
846 return ns_config_info
847
848 @staticmethod
tierno4fa7f8e2020-07-08 15:33:55 +0000849 def _get_initial_config_primitive_list(desc_primitive_list, vca_deployed, ee_descriptor_id):
tiernoc3f2a822019-11-05 13:45:04 +0000850 """
851 Generates a list of initial-config-primitive based on the list provided by the descriptor. It includes internal
852 primitives as verify-ssh-credentials, or config when needed
853 :param desc_primitive_list: information of the descriptor
854 :param vca_deployed: information of the deployed, needed for known if it is related to an NS, VNF, VDU and if
855 this element contains a ssh public key
tierno4fa7f8e2020-07-08 15:33:55 +0000856 :param ee_descriptor_id: execution environment descriptor id. It is the value of
857 XXX_configuration.execution-environment-list.INDEX.id; it can be None
tiernoc3f2a822019-11-05 13:45:04 +0000858 :return: The modified list. Can ba an empty list, but always a list
859 """
tierno4fa7f8e2020-07-08 15:33:55 +0000860
861 primitive_list = desc_primitive_list or []
862
863 # filter primitives by ee_id
864 primitive_list = [p for p in primitive_list if p.get("execution-environment-ref") == ee_descriptor_id]
865
866 # sort by 'seq'
867 if primitive_list:
868 primitive_list.sort(key=lambda val: int(val['seq']))
869
tiernoc3f2a822019-11-05 13:45:04 +0000870 # look for primitive config, and get the position. None if not present
871 config_position = None
872 for index, primitive in enumerate(primitive_list):
873 if primitive["name"] == "config":
874 config_position = index
875 break
876
877 # for NS, add always a config primitive if not present (bug 874)
878 if not vca_deployed["member-vnf-index"] and config_position is None:
879 primitive_list.insert(0, {"name": "config", "parameter": []})
880 config_position = 0
tierno4fa7f8e2020-07-08 15:33:55 +0000881 # TODO revise if needed: for VNF/VDU add verify-ssh-credentials after config
tiernoc3f2a822019-11-05 13:45:04 +0000882 if vca_deployed["member-vnf-index"] and config_position is not None and vca_deployed.get("ssh-public-key"):
883 primitive_list.insert(config_position + 1, {"name": "verify-ssh-credentials", "parameter": []})
884 return primitive_list
885
tierno69f0d382020-05-07 13:08:09 +0000886 async def _instantiate_ng_ro(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
887 n2vc_key_list, stage, start_deploy, timeout_ns_deploy):
888 nslcmop_id = db_nslcmop["_id"]
889 target = {
890 "name": db_nsr["name"],
891 "ns": {"vld": []},
892 "vnf": [],
893 "image": deepcopy(db_nsr["image"]),
894 "flavor": deepcopy(db_nsr["flavor"]),
895 "action_id": nslcmop_id,
896 }
897 for image in target["image"]:
898 image["vim_info"] = []
899 for flavor in target["flavor"]:
900 flavor["vim_info"] = []
901
902 ns_params = db_nslcmop.get("operationParams")
903 ssh_keys = []
904 if ns_params.get("ssh_keys"):
905 ssh_keys += ns_params.get("ssh_keys")
906 if n2vc_key_list:
907 ssh_keys += n2vc_key_list
908
909 cp2target = {}
910 for vld_index, vld in enumerate(nsd.get("vld")):
911 target_vld = {"id": vld["id"],
912 "name": vld["name"],
913 "mgmt-network": vld.get("mgmt-network", False),
914 "type": vld.get("type"),
915 "vim_info": [{"vim-network-name": vld.get("vim-network-name"),
916 "vim_account_id": ns_params["vimAccountId"]}],
917 }
918 for cp in vld["vnfd-connection-point-ref"]:
919 cp2target["member_vnf:{}.{}".format(cp["member-vnf-index-ref"], cp["vnfd-connection-point-ref"])] = \
920 "nsrs:{}:vld.{}".format(nsr_id, vld_index)
921 target["ns"]["vld"].append(target_vld)
922 for vnfr in db_vnfrs.values():
923 vnfd = db_vnfds_ref[vnfr["vnfd-ref"]]
924 target_vnf = deepcopy(vnfr)
925 for vld in target_vnf.get("vld", ()):
926 # check if connected to a ns.vld
927 vnf_cp = next((cp for cp in vnfd.get("connection-point", ()) if
928 cp.get("internal-vld-ref") == vld["id"]), None)
929 if vnf_cp:
930 ns_cp = "member_vnf:{}.{}".format(vnfr["member-vnf-index-ref"], vnf_cp["id"])
931 if cp2target.get(ns_cp):
932 vld["target"] = cp2target[ns_cp]
933 vld["vim_info"] = [{"vim-network-name": vld.get("vim-network-name"),
934 "vim_account_id": vnfr["vim-account-id"]}]
935
936 for vdur in target_vnf.get("vdur", ()):
937 vdur["vim_info"] = [{"vim_account_id": vnfr["vim-account-id"]}]
938 vdud_index, vdud = next(k for k in enumerate(vnfd["vdu"]) if k[1]["id"] == vdur["vdu-id-ref"])
939 # vdur["additionalParams"] = vnfr.get("additionalParamsForVnf") # TODO additional params for VDU
940
941 if ssh_keys:
942 if deep_get(vdud, ("vdu-configuration", "config-access", "ssh-access", "required")):
943 vdur["ssh-keys"] = ssh_keys
944 vdur["ssh-access-required"] = True
945 elif deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required")) and \
946 any(iface.get("mgmt-vnf") for iface in vdur["interfaces"]):
947 vdur["ssh-keys"] = ssh_keys
948 vdur["ssh-access-required"] = True
949
950 # cloud-init
951 if vdud.get("cloud-init-file"):
952 vdur["cloud-init"] = "{}:file:{}".format(vnfd["_id"], vdud.get("cloud-init-file"))
953 elif vdud.get("cloud-init"):
954 vdur["cloud-init"] = "{}:vdu:{}".format(vnfd["_id"], vdud_index)
955
956 # flavor
957 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
958 if not next((vi for vi in ns_flavor["vim_info"] if
959 vi and vi.get("vim_account_id") == vnfr["vim-account-id"]), None):
960 ns_flavor["vim_info"].append({"vim_account_id": vnfr["vim-account-id"]})
961 # image
962 ns_image = target["image"][int(vdur["ns-image-id"])]
963 if not next((vi for vi in ns_image["vim_info"] if
964 vi and vi.get("vim_account_id") == vnfr["vim-account-id"]), None):
965 ns_image["vim_info"].append({"vim_account_id": vnfr["vim-account-id"]})
966
967 vdur["vim_info"] = [{"vim_account_id": vnfr["vim-account-id"]}]
968 target["vnf"].append(target_vnf)
969
970 desc = await self.RO.deploy(nsr_id, target)
971 action_id = desc["action_id"]
972 await self._wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage)
973
974 # Updating NSR
975 db_nsr_update = {
976 "_admin.deployed.RO.operational-status": "running",
977 "detailed-status": " ".join(stage)
978 }
979 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
980 self.update_db_2("nsrs", nsr_id, db_nsr_update)
981 self._write_op_status(nslcmop_id, stage)
982 self.logger.debug(logging_text + "ns deployed at RO. RO_id={}".format(action_id))
983 return
984
985 async def _wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_time, timeout, stage):
986 detailed_status_old = None
987 db_nsr_update = {}
988 while time() <= start_time + timeout:
989 desc_status = await self.RO.status(nsr_id, action_id)
990 if desc_status["status"] == "FAILED":
991 raise NgRoException(desc_status["details"])
992 elif desc_status["status"] == "BUILD":
993 stage[2] = "VIM: ({})".format(desc_status["details"])
994 elif desc_status["status"] == "DONE":
995 stage[2] = "Deployed at VIM"
996 break
997 else:
998 assert False, "ROclient.check_ns_status returns unknown {}".format(desc_status["status"])
999 if stage[2] != detailed_status_old:
1000 detailed_status_old = stage[2]
1001 db_nsr_update["detailed-status"] = " ".join(stage)
1002 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1003 self._write_op_status(nslcmop_id, stage)
1004 await asyncio.sleep(5, loop=self.loop)
1005 else: # timeout_ns_deploy
1006 raise NgRoException("Timeout waiting ns to deploy")
1007
1008 async def _terminate_ng_ro(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
1009 db_nsr_update = {}
1010 failed_detail = []
1011 action_id = None
1012 start_deploy = time()
1013 try:
1014 target = {
1015 "ns": {"vld": []},
1016 "vnf": [],
1017 "image": [],
1018 "flavor": [],
1019 }
1020 desc = await self.RO.deploy(nsr_id, target)
1021 action_id = desc["action_id"]
1022 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1023 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1024 self.logger.debug(logging_text + "ns terminate action at RO. action_id={}".format(action_id))
1025
1026 # wait until done
1027 delete_timeout = 20 * 60 # 20 minutes
1028 await self._wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage)
1029
1030 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1031 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1032 # delete all nsr
1033 await self.RO.delete(nsr_id)
1034 except Exception as e:
1035 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1036 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1037 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1038 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1039 self.logger.debug(logging_text + "RO_action_id={} already deleted".format(action_id))
1040 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1041 failed_detail.append("delete conflict: {}".format(e))
1042 self.logger.debug(logging_text + "RO_action_id={} delete conflict: {}".format(action_id, e))
1043 else:
1044 failed_detail.append("delete error: {}".format(e))
1045 self.logger.error(logging_text + "RO_action_id={} delete error: {}".format(action_id, e))
1046
1047 if failed_detail:
1048 stage[2] = "Error deleting from VIM"
1049 else:
1050 stage[2] = "Deleted from VIM"
1051 db_nsr_update["detailed-status"] = " ".join(stage)
1052 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1053 self._write_op_status(nslcmop_id, stage)
1054
1055 if failed_detail:
1056 raise LcmException("; ".join(failed_detail))
1057 return
1058
tiernoe876f672020-02-13 14:34:48 +00001059 async def instantiate_RO(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
1060 n2vc_key_list, stage):
tiernoe95ed362020-04-23 08:24:57 +00001061 """
1062 Instantiate at RO
1063 :param logging_text: preffix text to use at logging
1064 :param nsr_id: nsr identity
1065 :param nsd: database content of ns descriptor
1066 :param db_nsr: database content of ns record
1067 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1068 :param db_vnfrs:
1069 :param db_vnfds_ref: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1070 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1071 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1072 :return: None or exception
1073 """
tiernoe876f672020-02-13 14:34:48 +00001074 try:
1075 db_nsr_update = {}
1076 RO_descriptor_number = 0 # number of descriptors created at RO
1077 vnf_index_2_RO_id = {} # map between vnfd/nsd id to the id used at RO
1078 nslcmop_id = db_nslcmop["_id"]
1079 start_deploy = time()
1080 ns_params = db_nslcmop.get("operationParams")
1081 if ns_params and ns_params.get("timeout_ns_deploy"):
1082 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1083 else:
1084 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +02001085
tiernoe876f672020-02-13 14:34:48 +00001086 # Check for and optionally request placement optimization. Database will be updated if placement activated
1087 stage[2] = "Waiting for Placement."
tierno8790a3d2020-04-23 22:49:52 +00001088 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1089 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1090 for vnfr in db_vnfrs.values():
1091 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1092 break
1093 else:
1094 ns_params["vimAccountId"] == vnfr["vim-account-id"]
quilesj7e13aeb2019-10-08 13:34:55 +02001095
tierno69f0d382020-05-07 13:08:09 +00001096 if self.ng_ro:
1097 return await self._instantiate_ng_ro(logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs,
1098 db_vnfds_ref, n2vc_key_list, stage, start_deploy,
1099 timeout_ns_deploy)
tiernoe876f672020-02-13 14:34:48 +00001100 # deploy RO
tiernoe876f672020-02-13 14:34:48 +00001101 # get vnfds, instantiate at RO
1102 for c_vnf in nsd.get("constituent-vnfd", ()):
1103 member_vnf_index = c_vnf["member-vnf-index"]
1104 vnfd = db_vnfds_ref[c_vnf['vnfd-id-ref']]
1105 vnfd_ref = vnfd["id"]
quilesj7e13aeb2019-10-08 13:34:55 +02001106
tiernoe876f672020-02-13 14:34:48 +00001107 stage[2] = "Creating vnfd='{}' member_vnf_index='{}' at RO".format(vnfd_ref, member_vnf_index)
1108 db_nsr_update["detailed-status"] = " ".join(stage)
1109 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1110 self._write_op_status(nslcmop_id, stage)
calvinosanch9f9c6f22019-11-04 13:37:39 +01001111
tiernoe876f672020-02-13 14:34:48 +00001112 # self.logger.debug(logging_text + stage[2])
1113 vnfd_id_RO = "{}.{}.{}".format(nsr_id, RO_descriptor_number, member_vnf_index[:23])
1114 vnf_index_2_RO_id[member_vnf_index] = vnfd_id_RO
1115 RO_descriptor_number += 1
1116
1117 # look position at deployed.RO.vnfd if not present it will be appended at the end
1118 for index, vnf_deployed in enumerate(db_nsr["_admin"]["deployed"]["RO"]["vnfd"]):
1119 if vnf_deployed["member-vnf-index"] == member_vnf_index:
1120 break
1121 else:
1122 index = len(db_nsr["_admin"]["deployed"]["RO"]["vnfd"])
1123 db_nsr["_admin"]["deployed"]["RO"]["vnfd"].append(None)
1124
1125 # look if present
1126 RO_update = {"member-vnf-index": member_vnf_index}
1127 vnfd_list = await self.RO.get_list("vnfd", filter_by={"osm_id": vnfd_id_RO})
1128 if vnfd_list:
1129 RO_update["id"] = vnfd_list[0]["uuid"]
1130 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' exists at RO. Using RO_id={}".
1131 format(vnfd_ref, member_vnf_index, vnfd_list[0]["uuid"]))
1132 else:
1133 vnfd_RO = self.vnfd2RO(vnfd, vnfd_id_RO, db_vnfrs[c_vnf["member-vnf-index"]].
1134 get("additionalParamsForVnf"), nsr_id)
1135 desc = await self.RO.create("vnfd", descriptor=vnfd_RO)
1136 RO_update["id"] = desc["uuid"]
1137 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' created at RO. RO_id={}".format(
1138 vnfd_ref, member_vnf_index, desc["uuid"]))
1139 db_nsr_update["_admin.deployed.RO.vnfd.{}".format(index)] = RO_update
1140 db_nsr["_admin"]["deployed"]["RO"]["vnfd"][index] = RO_update
1141
1142 # create nsd at RO
1143 nsd_ref = nsd["id"]
1144
1145 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1146 db_nsr_update["detailed-status"] = " ".join(stage)
1147 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1148 self._write_op_status(nslcmop_id, stage)
1149
1150 # self.logger.debug(logging_text + stage[2])
1151 RO_osm_nsd_id = "{}.{}.{}".format(nsr_id, RO_descriptor_number, nsd_ref[:23])
tiernod8323042019-08-09 11:32:23 +00001152 RO_descriptor_number += 1
tiernoe876f672020-02-13 14:34:48 +00001153 nsd_list = await self.RO.get_list("nsd", filter_by={"osm_id": RO_osm_nsd_id})
1154 if nsd_list:
1155 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = nsd_list[0]["uuid"]
1156 self.logger.debug(logging_text + "nsd={} exists at RO. Using RO_id={}".format(
1157 nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +00001158 else:
tiernoe876f672020-02-13 14:34:48 +00001159 nsd_RO = deepcopy(nsd)
1160 nsd_RO["id"] = RO_osm_nsd_id
1161 nsd_RO.pop("_id", None)
1162 nsd_RO.pop("_admin", None)
1163 for c_vnf in nsd_RO.get("constituent-vnfd", ()):
1164 member_vnf_index = c_vnf["member-vnf-index"]
1165 c_vnf["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
1166 for c_vld in nsd_RO.get("vld", ()):
1167 for cp in c_vld.get("vnfd-connection-point-ref", ()):
1168 member_vnf_index = cp["member-vnf-index-ref"]
1169 cp["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
tiernod8323042019-08-09 11:32:23 +00001170
tiernoe876f672020-02-13 14:34:48 +00001171 desc = await self.RO.create("nsd", descriptor=nsd_RO)
1172 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1173 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = desc["uuid"]
1174 self.logger.debug(logging_text + "nsd={} created at RO. RO_id={}".format(nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +00001175 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1176
tiernoe876f672020-02-13 14:34:48 +00001177 # Crate ns at RO
1178 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1179 db_nsr_update["detailed-status"] = " ".join(stage)
1180 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1181 self._write_op_status(nslcmop_id, stage)
tiernod8323042019-08-09 11:32:23 +00001182
tiernoe876f672020-02-13 14:34:48 +00001183 # if present use it unless in error status
1184 RO_nsr_id = deep_get(db_nsr, ("_admin", "deployed", "RO", "nsr_id"))
1185 if RO_nsr_id:
1186 try:
1187 stage[2] = "Looking for existing ns at RO"
1188 db_nsr_update["detailed-status"] = " ".join(stage)
1189 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1190 self._write_op_status(nslcmop_id, stage)
1191 # self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1192 desc = await self.RO.show("ns", RO_nsr_id)
tiernod8323042019-08-09 11:32:23 +00001193
tiernoe876f672020-02-13 14:34:48 +00001194 except ROclient.ROClientException as e:
1195 if e.http_code != HTTPStatus.NOT_FOUND:
1196 raise
1197 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1198 if RO_nsr_id:
1199 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1200 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1201 if ns_status == "ERROR":
1202 stage[2] = "Deleting ns at RO. RO_ns_id={}".format(RO_nsr_id)
1203 self.logger.debug(logging_text + stage[2])
1204 await self.RO.delete("ns", RO_nsr_id)
1205 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1206 if not RO_nsr_id:
1207 stage[2] = "Checking dependencies"
1208 db_nsr_update["detailed-status"] = " ".join(stage)
1209 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1210 self._write_op_status(nslcmop_id, stage)
1211 # self.logger.debug(logging_text + stage[2])
tiernod8323042019-08-09 11:32:23 +00001212
tiernoe876f672020-02-13 14:34:48 +00001213 # check if VIM is creating and wait look if previous tasks in process
1214 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account", ns_params["vimAccountId"])
1215 if task_dependency:
1216 stage[2] = "Waiting for related tasks '{}' to be completed".format(task_name)
1217 self.logger.debug(logging_text + stage[2])
1218 await asyncio.wait(task_dependency, timeout=3600)
1219 if ns_params.get("vnf"):
1220 for vnf in ns_params["vnf"]:
1221 if "vimAccountId" in vnf:
1222 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account",
1223 vnf["vimAccountId"])
1224 if task_dependency:
1225 stage[2] = "Waiting for related tasks '{}' to be completed.".format(task_name)
1226 self.logger.debug(logging_text + stage[2])
1227 await asyncio.wait(task_dependency, timeout=3600)
1228
1229 stage[2] = "Checking instantiation parameters."
tiernoe95ed362020-04-23 08:24:57 +00001230 RO_ns_params = self._ns_params_2_RO(ns_params, nsd, db_vnfds_ref, db_vnfrs, n2vc_key_list)
tiernoe876f672020-02-13 14:34:48 +00001231 stage[2] = "Deploying ns at VIM."
1232 db_nsr_update["detailed-status"] = " ".join(stage)
1233 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1234 self._write_op_status(nslcmop_id, stage)
1235
1236 desc = await self.RO.create("ns", descriptor=RO_ns_params, name=db_nsr["name"], scenario=RO_nsd_uuid)
1237 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = desc["uuid"]
1238 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1239 db_nsr_update["_admin.deployed.RO.nsr_status"] = "BUILD"
1240 self.logger.debug(logging_text + "ns created at RO. RO_id={}".format(desc["uuid"]))
1241
1242 # wait until NS is ready
1243 stage[2] = "Waiting VIM to deploy ns."
1244 db_nsr_update["detailed-status"] = " ".join(stage)
1245 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1246 self._write_op_status(nslcmop_id, stage)
1247 detailed_status_old = None
1248 self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1249
1250 old_desc = None
1251 while time() <= start_deploy + timeout_ns_deploy:
tiernod8323042019-08-09 11:32:23 +00001252 desc = await self.RO.show("ns", RO_nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001253
tiernoe876f672020-02-13 14:34:48 +00001254 # deploymentStatus
1255 if desc != old_desc:
1256 # desc has changed => update db
1257 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
1258 old_desc = desc
tiernod8323042019-08-09 11:32:23 +00001259
tiernoe876f672020-02-13 14:34:48 +00001260 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1261 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1262 if ns_status == "ERROR":
1263 raise ROclient.ROClientException(ns_status_info)
1264 elif ns_status == "BUILD":
1265 stage[2] = "VIM: ({})".format(ns_status_info)
1266 elif ns_status == "ACTIVE":
1267 stage[2] = "Waiting for management IP address reported by the VIM. Updating VNFRs."
1268 try:
1269 self.ns_update_vnfr(db_vnfrs, desc)
1270 break
1271 except LcmExceptionNoMgmtIP:
1272 pass
1273 else:
1274 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
1275 if stage[2] != detailed_status_old:
1276 detailed_status_old = stage[2]
1277 db_nsr_update["detailed-status"] = " ".join(stage)
1278 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1279 self._write_op_status(nslcmop_id, stage)
1280 await asyncio.sleep(5, loop=self.loop)
1281 else: # timeout_ns_deploy
1282 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tiernod8323042019-08-09 11:32:23 +00001283
tiernoe876f672020-02-13 14:34:48 +00001284 # Updating NSR
1285 self.ns_update_nsr(db_nsr_update, db_nsr, desc)
tiernod8323042019-08-09 11:32:23 +00001286
tiernoe876f672020-02-13 14:34:48 +00001287 db_nsr_update["_admin.deployed.RO.operational-status"] = "running"
1288 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1289 stage[2] = "Deployed at VIM"
1290 db_nsr_update["detailed-status"] = " ".join(stage)
1291 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1292 self._write_op_status(nslcmop_id, stage)
1293 # await self._on_update_n2vc_db("nsrs", {"_id": nsr_id}, "_admin.deployed", db_nsr_update)
1294 # self.logger.debug(logging_text + "Deployed at VIM")
tierno69f0d382020-05-07 13:08:09 +00001295 except (ROclient.ROClientException, LcmException, DbException, NgRoException) as e:
tierno067e04a2020-03-31 12:53:13 +00001296 stage[2] = "ERROR deploying at VIM"
tiernoe876f672020-02-13 14:34:48 +00001297 self.set_vnfr_at_error(db_vnfrs, str(e))
1298 raise
quilesj7e13aeb2019-10-08 13:34:55 +02001299
tiernof24bcdd2020-09-21 14:05:39 +00001300 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1301 """
1302 Wait for kdu to be up, get ip address
1303 :param logging_text: prefix use for logging
1304 :param nsr_id:
1305 :param vnfr_id:
1306 :param kdu_name:
1307 :return: IP address
1308 """
1309
1310 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1311 nb_tries = 0
1312
1313 while nb_tries < 360:
1314 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
tierno61396ee2020-10-09 12:03:24 +00001315 kdur = next((x for x in get_iterable(db_vnfr, "kdur") if x.get("kdu-name") == kdu_name), None)
tiernof24bcdd2020-09-21 14:05:39 +00001316 if not kdur:
1317 raise LcmException("Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name))
1318 if kdur.get("status"):
1319 if kdur["status"] in ("READY", "ENABLED"):
1320 return kdur.get("ip-address")
1321 else:
1322 raise LcmException("target KDU={} is in error state".format(kdu_name))
1323
1324 await asyncio.sleep(10, loop=self.loop)
1325 nb_tries += 1
1326 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1327
tiernoa5088192019-11-26 16:12:53 +00001328 async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None):
1329 """
1330 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1331 :param logging_text: prefix use for logging
1332 :param nsr_id:
1333 :param vnfr_id:
1334 :param vdu_id:
1335 :param vdu_index:
1336 :param pub_key: public ssh key to inject, None to skip
1337 :param user: user to apply the public ssh key
1338 :return: IP address
1339 """
quilesj7e13aeb2019-10-08 13:34:55 +02001340
tiernoa5088192019-11-26 16:12:53 +00001341 # self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
tiernod8323042019-08-09 11:32:23 +00001342 ro_nsr_id = None
1343 ip_address = None
1344 nb_tries = 0
1345 target_vdu_id = None
quilesj3149f262019-12-03 10:58:10 +00001346 ro_retries = 0
quilesj7e13aeb2019-10-08 13:34:55 +02001347
tiernod8323042019-08-09 11:32:23 +00001348 while True:
quilesj7e13aeb2019-10-08 13:34:55 +02001349
quilesj3149f262019-12-03 10:58:10 +00001350 ro_retries += 1
1351 if ro_retries >= 360: # 1 hour
1352 raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id))
1353
tiernod8323042019-08-09 11:32:23 +00001354 await asyncio.sleep(10, loop=self.loop)
quilesj7e13aeb2019-10-08 13:34:55 +02001355
1356 # get ip address
tiernod8323042019-08-09 11:32:23 +00001357 if not target_vdu_id:
1358 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
quilesj3149f262019-12-03 10:58:10 +00001359
1360 if not vdu_id: # for the VNF case
tiernoe876f672020-02-13 14:34:48 +00001361 if db_vnfr.get("status") == "ERROR":
1362 raise LcmException("Cannot inject ssh-key because target VNF is in error state")
tiernod8323042019-08-09 11:32:23 +00001363 ip_address = db_vnfr.get("ip-address")
1364 if not ip_address:
1365 continue
quilesj3149f262019-12-03 10:58:10 +00001366 vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None)
1367 else: # VDU case
1368 vdur = next((x for x in get_iterable(db_vnfr, "vdur")
1369 if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None)
1370
tierno0e8c3f02020-03-12 17:18:21 +00001371 if not vdur and len(db_vnfr.get("vdur", ())) == 1: # If only one, this should be the target vdu
1372 vdur = db_vnfr["vdur"][0]
quilesj3149f262019-12-03 10:58:10 +00001373 if not vdur:
tierno0e8c3f02020-03-12 17:18:21 +00001374 raise LcmException("Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(vnfr_id, vdu_id,
1375 vdu_index))
quilesj7e13aeb2019-10-08 13:34:55 +02001376
tierno0e8c3f02020-03-12 17:18:21 +00001377 if vdur.get("pdu-type") or vdur.get("status") == "ACTIVE":
quilesj3149f262019-12-03 10:58:10 +00001378 ip_address = vdur.get("ip-address")
1379 if not ip_address:
1380 continue
1381 target_vdu_id = vdur["vdu-id-ref"]
1382 elif vdur.get("status") == "ERROR":
1383 raise LcmException("Cannot inject ssh-key because target VM is in error state")
1384
tiernod8323042019-08-09 11:32:23 +00001385 if not target_vdu_id:
1386 continue
tiernod8323042019-08-09 11:32:23 +00001387
quilesj7e13aeb2019-10-08 13:34:55 +02001388 # inject public key into machine
1389 if pub_key and user:
tiernoe876f672020-02-13 14:34:48 +00001390 # wait until NS is deployed at RO
1391 if not ro_nsr_id:
1392 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1393 ro_nsr_id = deep_get(db_nsrs, ("_admin", "deployed", "RO", "nsr_id"))
1394 if not ro_nsr_id:
1395 continue
1396
tiernoa5088192019-11-26 16:12:53 +00001397 # self.logger.debug(logging_text + "Inserting RO key")
tierno0e8c3f02020-03-12 17:18:21 +00001398 if vdur.get("pdu-type"):
1399 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1400 return ip_address
quilesj7e13aeb2019-10-08 13:34:55 +02001401 try:
1402 ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index
tierno69f0d382020-05-07 13:08:09 +00001403 if self.ng_ro:
1404 target = {"action": "inject_ssh_key", "key": pub_key, "user": user,
1405 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdu_id}]}],
1406 }
1407 await self.RO.deploy(nsr_id, target)
1408 else:
1409 result_dict = await self.RO.create_action(
1410 item="ns",
1411 item_id_name=ro_nsr_id,
1412 descriptor={"add_public_key": pub_key, "vms": [ro_vm_id], "user": user}
1413 )
1414 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1415 if not result_dict or not isinstance(result_dict, dict):
1416 raise LcmException("Unknown response from RO when injecting key")
1417 for result in result_dict.values():
1418 if result.get("vim_result") == 200:
1419 break
1420 else:
1421 raise ROclient.ROClientException("error injecting key: {}".format(
1422 result.get("description")))
1423 break
1424 except NgRoException as e:
1425 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001426 except ROclient.ROClientException as e:
tiernoa5088192019-11-26 16:12:53 +00001427 if not nb_tries:
1428 self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds".
1429 format(e, 20*10))
quilesj7e13aeb2019-10-08 13:34:55 +02001430 nb_tries += 1
tiernoa5088192019-11-26 16:12:53 +00001431 if nb_tries >= 20:
quilesj7e13aeb2019-10-08 13:34:55 +02001432 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001433 else:
quilesj7e13aeb2019-10-08 13:34:55 +02001434 break
1435
1436 return ip_address
1437
tierno5ee02052019-12-05 19:55:02 +00001438 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1439 """
1440 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1441 """
1442 my_vca = vca_deployed_list[vca_index]
1443 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
quilesj3655ae02019-12-12 16:08:35 +00001444 # vdu or kdu: no dependencies
tierno5ee02052019-12-05 19:55:02 +00001445 return
1446 timeout = 300
1447 while timeout >= 0:
quilesj3655ae02019-12-12 16:08:35 +00001448 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1449 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1450 configuration_status_list = db_nsr["configurationStatus"]
1451 for index, vca_deployed in enumerate(configuration_status_list):
tierno5ee02052019-12-05 19:55:02 +00001452 if index == vca_index:
quilesj3655ae02019-12-12 16:08:35 +00001453 # myself
tierno5ee02052019-12-05 19:55:02 +00001454 continue
1455 if not my_vca.get("member-vnf-index") or \
1456 (vca_deployed.get("member-vnf-index") == my_vca.get("member-vnf-index")):
quilesj3655ae02019-12-12 16:08:35 +00001457 internal_status = configuration_status_list[index].get("status")
1458 if internal_status == 'READY':
1459 continue
1460 elif internal_status == 'BROKEN':
tierno5ee02052019-12-05 19:55:02 +00001461 raise LcmException("Configuration aborted because dependent charm/s has failed")
quilesj3655ae02019-12-12 16:08:35 +00001462 else:
1463 break
tierno5ee02052019-12-05 19:55:02 +00001464 else:
quilesj3655ae02019-12-12 16:08:35 +00001465 # no dependencies, return
tierno5ee02052019-12-05 19:55:02 +00001466 return
1467 await asyncio.sleep(10)
1468 timeout -= 1
tierno5ee02052019-12-05 19:55:02 +00001469
1470 raise LcmException("Configuration aborted because dependent charm/s timeout")
1471
tiernoe876f672020-02-13 14:34:48 +00001472 async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index,
tierno89f82902020-07-03 14:52:28 +00001473 config_descriptor, deploy_params, base_folder, nslcmop_id, stage, vca_type, vca_name,
1474 ee_config_descriptor):
tiernod8323042019-08-09 11:32:23 +00001475 nsr_id = db_nsr["_id"]
1476 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
tiernoda6fb102019-11-23 00:36:52 +00001477 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernod8323042019-08-09 11:32:23 +00001478 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
tierno89f82902020-07-03 14:52:28 +00001479 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
quilesj7e13aeb2019-10-08 13:34:55 +02001480 db_dict = {
1481 'collection': 'nsrs',
1482 'filter': {'_id': nsr_id},
1483 'path': db_update_entry
1484 }
tiernod8323042019-08-09 11:32:23 +00001485 step = ""
1486 try:
quilesj3655ae02019-12-12 16:08:35 +00001487
1488 element_type = 'NS'
1489 element_under_configuration = nsr_id
1490
tiernod8323042019-08-09 11:32:23 +00001491 vnfr_id = None
1492 if db_vnfr:
1493 vnfr_id = db_vnfr["_id"]
tierno89f82902020-07-03 14:52:28 +00001494 osm_config["osm"]["vnf_id"] = vnfr_id
tiernod8323042019-08-09 11:32:23 +00001495
1496 namespace = "{nsi}.{ns}".format(
1497 nsi=nsi_id if nsi_id else "",
1498 ns=nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001499
tiernod8323042019-08-09 11:32:23 +00001500 if vnfr_id:
quilesj3655ae02019-12-12 16:08:35 +00001501 element_type = 'VNF'
1502 element_under_configuration = vnfr_id
quilesjb8a35dd2020-01-09 15:10:14 +00001503 namespace += ".{}".format(vnfr_id)
tiernod8323042019-08-09 11:32:23 +00001504 if vdu_id:
1505 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
quilesj3655ae02019-12-12 16:08:35 +00001506 element_type = 'VDU'
quilesjb8a35dd2020-01-09 15:10:14 +00001507 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
tierno89f82902020-07-03 14:52:28 +00001508 osm_config["osm"]["vdu_id"] = vdu_id
tierno51183952020-04-03 15:48:18 +00001509 elif kdu_name:
1510 namespace += ".{}".format(kdu_name)
1511 element_type = 'KDU'
1512 element_under_configuration = kdu_name
tierno89f82902020-07-03 14:52:28 +00001513 osm_config["osm"]["kdu_name"] = kdu_name
tiernod8323042019-08-09 11:32:23 +00001514
1515 # Get artifact path
tierno588547c2020-07-01 15:30:20 +00001516 artifact_path = "{}/{}/{}/{}".format(
tiernod8323042019-08-09 11:32:23 +00001517 base_folder["folder"],
1518 base_folder["pkg-dir"],
tierno588547c2020-07-01 15:30:20 +00001519 "charms" if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm") else "helm-charts",
1520 vca_name
tiernod8323042019-08-09 11:32:23 +00001521 )
tierno4fa7f8e2020-07-08 15:33:55 +00001522 # get initial_config_primitive_list that applies to this element
1523 initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
1524
1525 # add config if not present for NS charm
1526 ee_descriptor_id = ee_config_descriptor.get("id")
1527 initial_config_primitive_list = self._get_initial_config_primitive_list(initial_config_primitive_list,
1528 vca_deployed, ee_descriptor_id)
tiernod8323042019-08-09 11:32:23 +00001529
tierno588547c2020-07-01 15:30:20 +00001530 # n2vc_redesign STEP 3.1
tierno588547c2020-07-01 15:30:20 +00001531 # find old ee_id if exists
1532 ee_id = vca_deployed.get("ee_id")
tiernod8323042019-08-09 11:32:23 +00001533
tierno588547c2020-07-01 15:30:20 +00001534 # create or register execution environment in VCA
1535 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm"):
quilesj7e13aeb2019-10-08 13:34:55 +02001536
tierno588547c2020-07-01 15:30:20 +00001537 self._write_configuration_status(
1538 nsr_id=nsr_id,
1539 vca_index=vca_index,
1540 status='CREATING',
1541 element_under_configuration=element_under_configuration,
1542 element_type=element_type
1543 )
tiernod8323042019-08-09 11:32:23 +00001544
tierno588547c2020-07-01 15:30:20 +00001545 step = "create execution environment"
1546 self.logger.debug(logging_text + step)
1547 ee_id, credentials = await self.vca_map[vca_type].create_execution_environment(
1548 namespace=namespace,
1549 reuse_ee_id=ee_id,
1550 db_dict=db_dict,
tierno89f82902020-07-03 14:52:28 +00001551 config=osm_config,
tierno588547c2020-07-01 15:30:20 +00001552 artifact_path=artifact_path,
1553 vca_type=vca_type)
quilesj3655ae02019-12-12 16:08:35 +00001554
tierno588547c2020-07-01 15:30:20 +00001555 elif vca_type == "native_charm":
1556 step = "Waiting to VM being up and getting IP address"
1557 self.logger.debug(logging_text + step)
1558 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1559 user=None, pub_key=None)
1560 credentials = {"hostname": rw_mgmt_ip}
1561 # get username
1562 username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1563 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1564 # merged. Meanwhile let's get username from initial-config-primitive
tierno4fa7f8e2020-07-08 15:33:55 +00001565 if not username and initial_config_primitive_list:
1566 for config_primitive in initial_config_primitive_list:
tierno588547c2020-07-01 15:30:20 +00001567 for param in config_primitive.get("parameter", ()):
1568 if param["name"] == "ssh-username":
1569 username = param["value"]
1570 break
1571 if not username:
tierno4fa7f8e2020-07-08 15:33:55 +00001572 raise LcmException("Cannot determine the username neither with 'initial-config-primitive' nor with "
tierno588547c2020-07-01 15:30:20 +00001573 "'config-access.ssh-access.default-user'")
1574 credentials["username"] = username
1575 # n2vc_redesign STEP 3.2
quilesj3655ae02019-12-12 16:08:35 +00001576
tierno588547c2020-07-01 15:30:20 +00001577 self._write_configuration_status(
1578 nsr_id=nsr_id,
1579 vca_index=vca_index,
1580 status='REGISTERING',
1581 element_under_configuration=element_under_configuration,
1582 element_type=element_type
1583 )
quilesj3655ae02019-12-12 16:08:35 +00001584
tierno588547c2020-07-01 15:30:20 +00001585 step = "register execution environment {}".format(credentials)
1586 self.logger.debug(logging_text + step)
1587 ee_id = await self.vca_map[vca_type].register_execution_environment(
1588 credentials=credentials, namespace=namespace, db_dict=db_dict)
tierno3bedc9b2019-11-27 15:46:57 +00001589
tierno588547c2020-07-01 15:30:20 +00001590 # for compatibility with MON/POL modules, the need model and application name at database
1591 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1592 ee_id_parts = ee_id.split('.')
1593 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1594 if len(ee_id_parts) >= 2:
1595 model_name = ee_id_parts[0]
1596 application_name = ee_id_parts[1]
1597 db_nsr_update[db_update_entry + "model"] = model_name
1598 db_nsr_update[db_update_entry + "application"] = application_name
tiernod8323042019-08-09 11:32:23 +00001599
1600 # n2vc_redesign STEP 3.3
tiernod8323042019-08-09 11:32:23 +00001601 step = "Install configuration Software"
quilesj3655ae02019-12-12 16:08:35 +00001602
tiernoc231a872020-01-21 08:49:05 +00001603 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001604 nsr_id=nsr_id,
1605 vca_index=vca_index,
1606 status='INSTALLING SW',
1607 element_under_configuration=element_under_configuration,
tierno51183952020-04-03 15:48:18 +00001608 element_type=element_type,
tierno588547c2020-07-01 15:30:20 +00001609 other_update=db_nsr_update
quilesj3655ae02019-12-12 16:08:35 +00001610 )
1611
tierno3bedc9b2019-11-27 15:46:57 +00001612 # TODO check if already done
quilesj7e13aeb2019-10-08 13:34:55 +02001613 self.logger.debug(logging_text + step)
David Garcia18a63322020-04-01 16:14:59 +02001614 config = None
tierno588547c2020-07-01 15:30:20 +00001615 if vca_type == "native_charm":
tierno4fa7f8e2020-07-08 15:33:55 +00001616 config_primitive = next((p for p in initial_config_primitive_list if p["name"] == "config"), None)
1617 if config_primitive:
1618 config = self._map_primitive_params(
1619 config_primitive,
1620 {},
1621 deploy_params
1622 )
tierno588547c2020-07-01 15:30:20 +00001623 num_units = 1
1624 if vca_type == "lxc_proxy_charm":
1625 if element_type == "NS":
1626 num_units = db_nsr.get("config-units") or 1
1627 elif element_type == "VNF":
1628 num_units = db_vnfr.get("config-units") or 1
1629 elif element_type == "VDU":
1630 for v in db_vnfr["vdur"]:
1631 if vdu_id == v["vdu-id-ref"]:
1632 num_units = v.get("config-units") or 1
1633 break
David Garcia06a11f22020-03-25 18:21:37 +01001634
tierno588547c2020-07-01 15:30:20 +00001635 await self.vca_map[vca_type].install_configuration_sw(
1636 ee_id=ee_id,
1637 artifact_path=artifact_path,
1638 db_dict=db_dict,
1639 config=config,
1640 num_units=num_units,
1641 vca_type=vca_type
1642 )
quilesj7e13aeb2019-10-08 13:34:55 +02001643
quilesj63f90042020-01-17 09:53:55 +00001644 # write in db flag of configuration_sw already installed
1645 self.update_db_2("nsrs", nsr_id, {db_update_entry + "config_sw_installed": True})
1646
1647 # add relations for this VCA (wait for other peers related with this VCA)
tierno588547c2020-07-01 15:30:20 +00001648 await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id,
1649 vca_index=vca_index, vca_type=vca_type)
quilesj63f90042020-01-17 09:53:55 +00001650
quilesj7e13aeb2019-10-08 13:34:55 +02001651 # if SSH access is required, then get execution environment SSH public
David Garciaf36326c2020-07-10 13:12:44 +02001652 # if native charm we have waited already to VM be UP
1653 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm"):
tierno3bedc9b2019-11-27 15:46:57 +00001654 pub_key = None
1655 user = None
tierno588547c2020-07-01 15:30:20 +00001656 # self.logger.debug("get ssh key block")
tierno3bedc9b2019-11-27 15:46:57 +00001657 if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
tierno588547c2020-07-01 15:30:20 +00001658 # self.logger.debug("ssh key needed")
tierno3bedc9b2019-11-27 15:46:57 +00001659 # Needed to inject a ssh key
1660 user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1661 step = "Install configuration Software, getting public ssh key"
tierno588547c2020-07-01 15:30:20 +00001662 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02001663
tiernoacc90452019-12-10 11:06:54 +00001664 step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
tierno3bedc9b2019-11-27 15:46:57 +00001665 else:
tierno588547c2020-07-01 15:30:20 +00001666 # self.logger.debug("no need to get ssh key")
tierno3bedc9b2019-11-27 15:46:57 +00001667 step = "Waiting to VM being up and getting IP address"
1668 self.logger.debug(logging_text + step)
quilesj7e13aeb2019-10-08 13:34:55 +02001669
tierno3bedc9b2019-11-27 15:46:57 +00001670 # n2vc_redesign STEP 5.1
1671 # wait for RO (ip-address) Insert pub_key into VM
tierno5ee02052019-12-05 19:55:02 +00001672 if vnfr_id:
tiernof24bcdd2020-09-21 14:05:39 +00001673 if kdu_name:
1674 rw_mgmt_ip = await self.wait_kdu_up(logging_text, nsr_id, vnfr_id, kdu_name)
1675 else:
1676 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id,
1677 vdu_index, user=user, pub_key=pub_key)
tierno5ee02052019-12-05 19:55:02 +00001678 else:
1679 rw_mgmt_ip = None # This is for a NS configuration
tierno3bedc9b2019-11-27 15:46:57 +00001680
1681 self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
quilesj7e13aeb2019-10-08 13:34:55 +02001682
tiernoa5088192019-11-26 16:12:53 +00001683 # store rw_mgmt_ip in deploy params for later replacement
quilesj7e13aeb2019-10-08 13:34:55 +02001684 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
tiernod8323042019-08-09 11:32:23 +00001685
1686 # n2vc_redesign STEP 6 Execute initial config primitive
quilesj7e13aeb2019-10-08 13:34:55 +02001687 step = 'execute initial config primitive'
quilesj3655ae02019-12-12 16:08:35 +00001688
1689 # wait for dependent primitives execution (NS -> VNF -> VDU)
tierno5ee02052019-12-05 19:55:02 +00001690 if initial_config_primitive_list:
1691 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
quilesj3655ae02019-12-12 16:08:35 +00001692
1693 # stage, in function of element type: vdu, kdu, vnf or ns
1694 my_vca = vca_deployed_list[vca_index]
1695 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1696 # VDU or KDU
tiernoe876f672020-02-13 14:34:48 +00001697 stage[0] = 'Stage 3/5: running Day-1 primitives for VDU.'
quilesj3655ae02019-12-12 16:08:35 +00001698 elif my_vca.get("member-vnf-index"):
1699 # VNF
tiernoe876f672020-02-13 14:34:48 +00001700 stage[0] = 'Stage 4/5: running Day-1 primitives for VNF.'
quilesj3655ae02019-12-12 16:08:35 +00001701 else:
1702 # NS
tiernoe876f672020-02-13 14:34:48 +00001703 stage[0] = 'Stage 5/5: running Day-1 primitives for NS.'
quilesj3655ae02019-12-12 16:08:35 +00001704
tiernoc231a872020-01-21 08:49:05 +00001705 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001706 nsr_id=nsr_id,
1707 vca_index=vca_index,
1708 status='EXECUTING PRIMITIVE'
1709 )
1710
1711 self._write_op_status(
1712 op_id=nslcmop_id,
1713 stage=stage
1714 )
1715
tiernoe876f672020-02-13 14:34:48 +00001716 check_if_terminated_needed = True
tiernod8323042019-08-09 11:32:23 +00001717 for initial_config_primitive in initial_config_primitive_list:
tiernoda6fb102019-11-23 00:36:52 +00001718 # adding information on the vca_deployed if it is a NS execution environment
1719 if not vca_deployed["member-vnf-index"]:
David Garciad4816682019-12-09 14:57:43 +01001720 deploy_params["ns_config_info"] = json.dumps(self._get_ns_config_info(nsr_id))
tiernod8323042019-08-09 11:32:23 +00001721 # TODO check if already done
1722 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
tierno3bedc9b2019-11-27 15:46:57 +00001723
tiernod8323042019-08-09 11:32:23 +00001724 step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
1725 self.logger.debug(logging_text + step)
tierno588547c2020-07-01 15:30:20 +00001726 await self.vca_map[vca_type].exec_primitive(
quilesj7e13aeb2019-10-08 13:34:55 +02001727 ee_id=ee_id,
1728 primitive_name=initial_config_primitive["name"],
1729 params_dict=primitive_params_,
1730 db_dict=db_dict
1731 )
tiernoe876f672020-02-13 14:34:48 +00001732 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1733 if check_if_terminated_needed:
1734 if config_descriptor.get('terminate-config-primitive'):
1735 self.update_db_2("nsrs", nsr_id, {db_update_entry + "needed_terminate": True})
1736 check_if_terminated_needed = False
quilesj3655ae02019-12-12 16:08:35 +00001737
tiernod8323042019-08-09 11:32:23 +00001738 # TODO register in database that primitive is done
quilesj7e13aeb2019-10-08 13:34:55 +02001739
tierno89f82902020-07-03 14:52:28 +00001740 # STEP 7 Configure metrics
1741 if vca_type == "helm":
1742 prometheus_jobs = await self.add_prometheus_metrics(
1743 ee_id=ee_id,
1744 artifact_path=artifact_path,
1745 ee_config_descriptor=ee_config_descriptor,
1746 vnfr_id=vnfr_id,
1747 nsr_id=nsr_id,
1748 target_ip=rw_mgmt_ip,
1749 )
1750 if prometheus_jobs:
1751 self.update_db_2("nsrs", nsr_id, {db_update_entry + "prometheus_jobs": prometheus_jobs})
1752
quilesj7e13aeb2019-10-08 13:34:55 +02001753 step = "instantiated at VCA"
1754 self.logger.debug(logging_text + step)
1755
tiernoc231a872020-01-21 08:49:05 +00001756 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001757 nsr_id=nsr_id,
1758 vca_index=vca_index,
1759 status='READY'
1760 )
1761
tiernod8323042019-08-09 11:32:23 +00001762 except Exception as e: # TODO not use Exception but N2VC exception
quilesj3655ae02019-12-12 16:08:35 +00001763 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
tiernoe876f672020-02-13 14:34:48 +00001764 if not isinstance(e, (DbException, N2VCException, LcmException, asyncio.CancelledError)):
1765 self.logger.error("Exception while {} : {}".format(step, e), exc_info=True)
tiernoc231a872020-01-21 08:49:05 +00001766 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001767 nsr_id=nsr_id,
1768 vca_index=vca_index,
1769 status='BROKEN'
1770 )
tiernoe876f672020-02-13 14:34:48 +00001771 raise LcmException("{} {}".format(step, e)) from e
tiernod8323042019-08-09 11:32:23 +00001772
quilesj4cda56b2019-12-05 10:02:20 +00001773 def _write_ns_status(self, nsr_id: str, ns_state: str, current_operation: str, current_operation_id: str,
tiernoa2143262020-03-27 16:20:40 +00001774 error_description: str = None, error_detail: str = None, other_update: dict = None):
tiernoe876f672020-02-13 14:34:48 +00001775 """
1776 Update db_nsr fields.
1777 :param nsr_id:
1778 :param ns_state:
1779 :param current_operation:
1780 :param current_operation_id:
1781 :param error_description:
tiernoa2143262020-03-27 16:20:40 +00001782 :param error_detail:
tiernoe876f672020-02-13 14:34:48 +00001783 :param other_update: Other required changes at database if provided, will be cleared
1784 :return:
1785 """
quilesj4cda56b2019-12-05 10:02:20 +00001786 try:
tiernoe876f672020-02-13 14:34:48 +00001787 db_dict = other_update or {}
1788 db_dict["_admin.nslcmop"] = current_operation_id # for backward compatibility
1789 db_dict["_admin.current-operation"] = current_operation_id
1790 db_dict["_admin.operation-type"] = current_operation if current_operation != "IDLE" else None
quilesj4cda56b2019-12-05 10:02:20 +00001791 db_dict["currentOperation"] = current_operation
1792 db_dict["currentOperationID"] = current_operation_id
1793 db_dict["errorDescription"] = error_description
tiernoa2143262020-03-27 16:20:40 +00001794 db_dict["errorDetail"] = error_detail
tiernoe876f672020-02-13 14:34:48 +00001795
1796 if ns_state:
1797 db_dict["nsState"] = ns_state
quilesj4cda56b2019-12-05 10:02:20 +00001798 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001799 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001800 self.logger.warn('Error writing NS status, ns={}: {}'.format(nsr_id, e))
1801
tiernoe876f672020-02-13 14:34:48 +00001802 def _write_op_status(self, op_id: str, stage: list = None, error_message: str = None, queuePosition: int = 0,
1803 operation_state: str = None, other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001804 try:
tiernoe876f672020-02-13 14:34:48 +00001805 db_dict = other_update or {}
quilesj3655ae02019-12-12 16:08:35 +00001806 db_dict['queuePosition'] = queuePosition
tiernoe876f672020-02-13 14:34:48 +00001807 if isinstance(stage, list):
1808 db_dict['stage'] = stage[0]
1809 db_dict['detailed-status'] = " ".join(stage)
1810 elif stage is not None:
1811 db_dict['stage'] = str(stage)
1812
1813 if error_message is not None:
quilesj3655ae02019-12-12 16:08:35 +00001814 db_dict['errorMessage'] = error_message
tiernoe876f672020-02-13 14:34:48 +00001815 if operation_state is not None:
1816 db_dict['operationState'] = operation_state
1817 db_dict["statusEnteredTime"] = time()
quilesj3655ae02019-12-12 16:08:35 +00001818 self.update_db_2("nslcmops", op_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001819 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001820 self.logger.warn('Error writing OPERATION status for op_id: {} -> {}'.format(op_id, e))
1821
tierno51183952020-04-03 15:48:18 +00001822 def _write_all_config_status(self, db_nsr: dict, status: str):
quilesj3655ae02019-12-12 16:08:35 +00001823 try:
tierno51183952020-04-03 15:48:18 +00001824 nsr_id = db_nsr["_id"]
quilesj3655ae02019-12-12 16:08:35 +00001825 # configurationStatus
1826 config_status = db_nsr.get('configurationStatus')
1827 if config_status:
tierno51183952020-04-03 15:48:18 +00001828 db_nsr_update = {"configurationStatus.{}.status".format(index): status for index, v in
1829 enumerate(config_status) if v}
quilesj3655ae02019-12-12 16:08:35 +00001830 # update status
tierno51183952020-04-03 15:48:18 +00001831 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00001832
tiernoe876f672020-02-13 14:34:48 +00001833 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001834 self.logger.warn('Error writing all configuration status, ns={}: {}'.format(nsr_id, e))
1835
quilesj63f90042020-01-17 09:53:55 +00001836 def _write_configuration_status(self, nsr_id: str, vca_index: int, status: str = None,
tierno51183952020-04-03 15:48:18 +00001837 element_under_configuration: str = None, element_type: str = None,
1838 other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001839
1840 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
1841 # .format(vca_index, status))
1842
1843 try:
1844 db_path = 'configurationStatus.{}.'.format(vca_index)
tierno51183952020-04-03 15:48:18 +00001845 db_dict = other_update or {}
quilesj63f90042020-01-17 09:53:55 +00001846 if status:
1847 db_dict[db_path + 'status'] = status
quilesj3655ae02019-12-12 16:08:35 +00001848 if element_under_configuration:
1849 db_dict[db_path + 'elementUnderConfiguration'] = element_under_configuration
1850 if element_type:
1851 db_dict[db_path + 'elementType'] = element_type
1852 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001853 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001854 self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}'
1855 .format(status, nsr_id, vca_index, e))
quilesj4cda56b2019-12-05 10:02:20 +00001856
tierno38089af2020-04-16 07:56:58 +00001857 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
1858 """
1859 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
1860 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
1861 Database is used because the result can be obtained from a different LCM worker in case of HA.
1862 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
1863 :param db_nslcmop: database content of nslcmop
1864 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
tierno8790a3d2020-04-23 22:49:52 +00001865 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
1866 computed 'vim-account-id'
tierno38089af2020-04-16 07:56:58 +00001867 """
tierno8790a3d2020-04-23 22:49:52 +00001868 modified = False
tierno38089af2020-04-16 07:56:58 +00001869 nslcmop_id = db_nslcmop['_id']
magnussonle9198bb2020-01-21 13:00:51 +01001870 placement_engine = deep_get(db_nslcmop, ('operationParams', 'placement-engine'))
1871 if placement_engine == "PLA":
tierno38089af2020-04-16 07:56:58 +00001872 self.logger.debug(logging_text + "Invoke and wait for placement optimization")
1873 await self.msg.aiowrite("pla", "get_placement", {'nslcmopId': nslcmop_id}, loop=self.loop)
magnussonle9198bb2020-01-21 13:00:51 +01001874 db_poll_interval = 5
tierno38089af2020-04-16 07:56:58 +00001875 wait = db_poll_interval * 10
magnussonle9198bb2020-01-21 13:00:51 +01001876 pla_result = None
1877 while not pla_result and wait >= 0:
1878 await asyncio.sleep(db_poll_interval)
1879 wait -= db_poll_interval
tierno38089af2020-04-16 07:56:58 +00001880 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
magnussonle9198bb2020-01-21 13:00:51 +01001881 pla_result = deep_get(db_nslcmop, ('_admin', 'pla'))
1882
1883 if not pla_result:
tierno38089af2020-04-16 07:56:58 +00001884 raise LcmException("Placement timeout for nslcmopId={}".format(nslcmop_id))
magnussonle9198bb2020-01-21 13:00:51 +01001885
1886 for pla_vnf in pla_result['vnf']:
1887 vnfr = db_vnfrs.get(pla_vnf['member-vnf-index'])
1888 if not pla_vnf.get('vimAccountId') or not vnfr:
1889 continue
tierno8790a3d2020-04-23 22:49:52 +00001890 modified = True
magnussonle9198bb2020-01-21 13:00:51 +01001891 self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, {"vim-account-id": pla_vnf['vimAccountId']})
tierno38089af2020-04-16 07:56:58 +00001892 # Modifies db_vnfrs
1893 vnfr["vim-account-id"] = pla_vnf['vimAccountId']
tierno8790a3d2020-04-23 22:49:52 +00001894 return modified
magnussonle9198bb2020-01-21 13:00:51 +01001895
1896 def update_nsrs_with_pla_result(self, params):
1897 try:
1898 nslcmop_id = deep_get(params, ('placement', 'nslcmopId'))
1899 self.update_db_2("nslcmops", nslcmop_id, {"_admin.pla": params.get('placement')})
1900 except Exception as e:
1901 self.logger.warn('Update failed for nslcmop_id={}:{}'.format(nslcmop_id, e))
1902
tierno59d22d22018-09-25 18:10:19 +02001903 async def instantiate(self, nsr_id, nslcmop_id):
quilesj7e13aeb2019-10-08 13:34:55 +02001904 """
1905
1906 :param nsr_id: ns instance to deploy
1907 :param nslcmop_id: operation to run
1908 :return:
1909 """
kuused124bfe2019-06-18 12:09:24 +02001910
1911 # Try to lock HA task here
1912 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
1913 if not task_is_locked_by_me:
quilesj3655ae02019-12-12 16:08:35 +00001914 self.logger.debug('instantiate() task is not locked by me, ns={}'.format(nsr_id))
kuused124bfe2019-06-18 12:09:24 +02001915 return
1916
tierno59d22d22018-09-25 18:10:19 +02001917 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
1918 self.logger.debug(logging_text + "Enter")
quilesj7e13aeb2019-10-08 13:34:55 +02001919
tierno59d22d22018-09-25 18:10:19 +02001920 # get all needed from database
quilesj7e13aeb2019-10-08 13:34:55 +02001921
1922 # database nsrs record
tierno59d22d22018-09-25 18:10:19 +02001923 db_nsr = None
quilesj7e13aeb2019-10-08 13:34:55 +02001924
1925 # database nslcmops record
tierno59d22d22018-09-25 18:10:19 +02001926 db_nslcmop = None
quilesj7e13aeb2019-10-08 13:34:55 +02001927
1928 # update operation on nsrs
tiernoe876f672020-02-13 14:34:48 +00001929 db_nsr_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02001930 # update operation on nslcmops
tierno59d22d22018-09-25 18:10:19 +02001931 db_nslcmop_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02001932
tierno59d22d22018-09-25 18:10:19 +02001933 nslcmop_operation_state = None
quilesj7e13aeb2019-10-08 13:34:55 +02001934 db_vnfrs = {} # vnf's info indexed by member-index
1935 # n2vc_info = {}
tiernoe876f672020-02-13 14:34:48 +00001936 tasks_dict_info = {} # from task to info text
tierno59d22d22018-09-25 18:10:19 +02001937 exc = None
tiernoe876f672020-02-13 14:34:48 +00001938 error_list = []
1939 stage = ['Stage 1/5: preparation of the environment.', "Waiting for previous operations to terminate.", ""]
1940 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02001941 try:
kuused124bfe2019-06-18 12:09:24 +02001942 # wait for any previous tasks in process
1943 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
1944
tierno50b41432020-08-11 11:20:13 +00001945 stage[1] = "Sync filesystem from database."
tiernob3edda02020-07-09 13:51:20 +00001946 self.fs.sync() # TODO, make use of partial sync, only for the needed packages
1947
quilesj7e13aeb2019-10-08 13:34:55 +02001948 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
tierno50b41432020-08-11 11:20:13 +00001949 stage[1] = "Reading from database."
quilesj4cda56b2019-12-05 10:02:20 +00001950 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
tiernoe876f672020-02-13 14:34:48 +00001951 db_nsr_update["detailed-status"] = "creating"
1952 db_nsr_update["operational-status"] = "init"
quilesj4cda56b2019-12-05 10:02:20 +00001953 self._write_ns_status(
1954 nsr_id=nsr_id,
1955 ns_state="BUILDING",
1956 current_operation="INSTANTIATING",
tiernoe876f672020-02-13 14:34:48 +00001957 current_operation_id=nslcmop_id,
1958 other_update=db_nsr_update
1959 )
1960 self._write_op_status(
1961 op_id=nslcmop_id,
1962 stage=stage,
1963 queuePosition=0
quilesj4cda56b2019-12-05 10:02:20 +00001964 )
1965
quilesj7e13aeb2019-10-08 13:34:55 +02001966 # read from db: operation
tierno50b41432020-08-11 11:20:13 +00001967 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
tierno59d22d22018-09-25 18:10:19 +02001968 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
tierno744303e2020-01-13 16:46:31 +00001969 ns_params = db_nslcmop.get("operationParams")
1970 if ns_params and ns_params.get("timeout_ns_deploy"):
1971 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1972 else:
1973 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +02001974
1975 # read from db: ns
tierno50b41432020-08-11 11:20:13 +00001976 stage[1] = "Getting nsr={} from db.".format(nsr_id)
tierno59d22d22018-09-25 18:10:19 +02001977 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tierno50b41432020-08-11 11:20:13 +00001978 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
tiernod732fb82020-05-21 13:18:23 +00001979 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
1980 db_nsr["nsd"] = nsd
tiernod8323042019-08-09 11:32:23 +00001981 # nsr_name = db_nsr["name"] # TODO short-name??
tierno47e86b52018-10-10 14:05:55 +02001982
quilesj7e13aeb2019-10-08 13:34:55 +02001983 # read from db: vnf's of this ns
tierno50b41432020-08-11 11:20:13 +00001984 stage[1] = "Getting vnfrs from db."
tiernoe876f672020-02-13 14:34:48 +00001985 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02001986 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
tierno27246d82018-09-27 15:59:09 +02001987
quilesj7e13aeb2019-10-08 13:34:55 +02001988 # read from db: vnfd's for every vnf
1989 db_vnfds_ref = {} # every vnfd data indexed by vnf name
1990 db_vnfds = {} # every vnfd data indexed by vnf id
1991 db_vnfds_index = {} # every vnfd data indexed by vnf member-index
1992
1993 # for each vnf in ns, read vnfd
tierno27246d82018-09-27 15:59:09 +02001994 for vnfr in db_vnfrs_list:
quilesj7e13aeb2019-10-08 13:34:55 +02001995 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr # vnf's dict indexed by member-index: '1', '2', etc
1996 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
1997 vnfd_ref = vnfr["vnfd-ref"] # vnfd name for this vnf
lloretgalleg32ead8c2020-07-22 10:13:46 +00001998
quilesj7e13aeb2019-10-08 13:34:55 +02001999 # if we haven't this vnfd, read it from db
tierno27246d82018-09-27 15:59:09 +02002000 if vnfd_id not in db_vnfds:
quilesj63f90042020-01-17 09:53:55 +00002001 # read from db
tierno50b41432020-08-11 11:20:13 +00002002 stage[1] = "Getting vnfd={} id='{}' from db.".format(vnfd_id, vnfd_ref)
tiernoe876f672020-02-13 14:34:48 +00002003 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02002004 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
tierno27246d82018-09-27 15:59:09 +02002005
quilesj7e13aeb2019-10-08 13:34:55 +02002006 # store vnfd
2007 db_vnfds_ref[vnfd_ref] = vnfd # vnfd's indexed by name
2008 db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id
2009 db_vnfds_index[vnfr["member-vnf-index-ref"]] = db_vnfds[vnfd_id] # vnfd's indexed by member-index
2010
2011 # Get or generates the _admin.deployed.VCA list
tiernoe4f7e6c2018-11-27 14:55:30 +00002012 vca_deployed_list = None
2013 if db_nsr["_admin"].get("deployed"):
2014 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2015 if vca_deployed_list is None:
2016 vca_deployed_list = []
quilesj3655ae02019-12-12 16:08:35 +00002017 configuration_status_list = []
tiernoe4f7e6c2018-11-27 14:55:30 +00002018 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
quilesj3655ae02019-12-12 16:08:35 +00002019 db_nsr_update["configurationStatus"] = configuration_status_list
quilesj7e13aeb2019-10-08 13:34:55 +02002020 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00002021 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00002022 elif isinstance(vca_deployed_list, dict):
2023 # maintain backward compatibility. Change a dict to list at database
2024 vca_deployed_list = list(vca_deployed_list.values())
2025 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00002026 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00002027
tierno6cf25f52019-09-12 09:33:40 +00002028 if not isinstance(deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list):
tiernoa009e552019-01-30 16:45:44 +00002029 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2030 db_nsr_update["_admin.deployed.RO.vnfd"] = []
tierno59d22d22018-09-25 18:10:19 +02002031
tiernobaa51102018-12-14 13:16:18 +00002032 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2033 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2034 self.update_db_2("nsrs", nsr_id, db_nsr_update)
lloretgalleg32ead8c2020-07-22 10:13:46 +00002035 self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"})
quilesj3655ae02019-12-12 16:08:35 +00002036
2037 # n2vc_redesign STEP 2 Deploy Network Scenario
tiernoe876f672020-02-13 14:34:48 +00002038 stage[0] = 'Stage 2/5: deployment of KDUs, VMs and execution environments.'
quilesj3655ae02019-12-12 16:08:35 +00002039 self._write_op_status(
2040 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00002041 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00002042 )
2043
tierno50b41432020-08-11 11:20:13 +00002044 stage[1] = "Deploying KDUs."
tiernoe876f672020-02-13 14:34:48 +00002045 # self.logger.debug(logging_text + "Before deploy_kdus")
calvinosanch9f9c6f22019-11-04 13:37:39 +01002046 # Call to deploy_kdus in case exists the "vdu:kdu" param
tiernoe876f672020-02-13 14:34:48 +00002047 await self.deploy_kdus(
2048 logging_text=logging_text,
2049 nsr_id=nsr_id,
2050 nslcmop_id=nslcmop_id,
2051 db_vnfrs=db_vnfrs,
2052 db_vnfds=db_vnfds,
2053 task_instantiation_info=tasks_dict_info,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002054 )
tiernoe876f672020-02-13 14:34:48 +00002055
2056 stage[1] = "Getting VCA public key."
tiernod8323042019-08-09 11:32:23 +00002057 # n2vc_redesign STEP 1 Get VCA public ssh-key
2058 # feature 1429. Add n2vc public key to needed VMs
tierno3bedc9b2019-11-27 15:46:57 +00002059 n2vc_key = self.n2vc.get_public_key()
tiernoa5088192019-11-26 16:12:53 +00002060 n2vc_key_list = [n2vc_key]
2061 if self.vca_config.get("public_key"):
2062 n2vc_key_list.append(self.vca_config["public_key"])
tierno98ad6ea2019-05-30 17:16:28 +00002063
tiernoe876f672020-02-13 14:34:48 +00002064 stage[1] = "Deploying NS at VIM."
tiernod8323042019-08-09 11:32:23 +00002065 task_ro = asyncio.ensure_future(
quilesj7e13aeb2019-10-08 13:34:55 +02002066 self.instantiate_RO(
2067 logging_text=logging_text,
2068 nsr_id=nsr_id,
2069 nsd=nsd,
2070 db_nsr=db_nsr,
2071 db_nslcmop=db_nslcmop,
2072 db_vnfrs=db_vnfrs,
2073 db_vnfds_ref=db_vnfds_ref,
tiernoe876f672020-02-13 14:34:48 +00002074 n2vc_key_list=n2vc_key_list,
2075 stage=stage
tierno98ad6ea2019-05-30 17:16:28 +00002076 )
tiernod8323042019-08-09 11:32:23 +00002077 )
2078 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
tiernoa2143262020-03-27 16:20:40 +00002079 tasks_dict_info[task_ro] = "Deploying at VIM"
tierno98ad6ea2019-05-30 17:16:28 +00002080
tiernod8323042019-08-09 11:32:23 +00002081 # n2vc_redesign STEP 3 to 6 Deploy N2VC
tiernoe876f672020-02-13 14:34:48 +00002082 stage[1] = "Deploying Execution Environments."
2083 self.logger.debug(logging_text + stage[1])
tierno98ad6ea2019-05-30 17:16:28 +00002084
tiernod8323042019-08-09 11:32:23 +00002085 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
quilesj7e13aeb2019-10-08 13:34:55 +02002086 # get_iterable() returns a value from a dict or empty tuple if key does not exist
tierno98ad6ea2019-05-30 17:16:28 +00002087 for c_vnf in get_iterable(nsd, "constituent-vnfd"):
2088 vnfd_id = c_vnf["vnfd-id-ref"]
tierno98ad6ea2019-05-30 17:16:28 +00002089 vnfd = db_vnfds_ref[vnfd_id]
tiernod8323042019-08-09 11:32:23 +00002090 member_vnf_index = str(c_vnf["member-vnf-index"])
2091 db_vnfr = db_vnfrs[member_vnf_index]
2092 base_folder = vnfd["_admin"]["storage"]
2093 vdu_id = None
2094 vdu_index = 0
tierno98ad6ea2019-05-30 17:16:28 +00002095 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002096 kdu_name = None
tierno59d22d22018-09-25 18:10:19 +02002097
tierno8a518872018-12-21 13:42:14 +00002098 # Get additional parameters
tiernof7b42112020-10-06 08:22:07 +00002099 deploy_params = {"OSM": self._get_osm_params(db_vnfr)}
tiernod8323042019-08-09 11:32:23 +00002100 if db_vnfr.get("additionalParamsForVnf"):
tiernof7b42112020-10-06 08:22:07 +00002101 deploy_params.update(self._format_additional_params(db_vnfr["additionalParamsForVnf"].copy()))
tierno8a518872018-12-21 13:42:14 +00002102
tiernod8323042019-08-09 11:32:23 +00002103 descriptor_config = vnfd.get("vnf-configuration")
tierno588547c2020-07-01 15:30:20 +00002104 if descriptor_config:
quilesj7e13aeb2019-10-08 13:34:55 +02002105 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00002106 logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
quilesj7e13aeb2019-10-08 13:34:55 +02002107 db_nsr=db_nsr,
2108 db_vnfr=db_vnfr,
2109 nslcmop_id=nslcmop_id,
2110 nsr_id=nsr_id,
2111 nsi_id=nsi_id,
2112 vnfd_id=vnfd_id,
2113 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002114 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002115 member_vnf_index=member_vnf_index,
2116 vdu_index=vdu_index,
2117 vdu_name=vdu_name,
2118 deploy_params=deploy_params,
2119 descriptor_config=descriptor_config,
2120 base_folder=base_folder,
tiernoe876f672020-02-13 14:34:48 +00002121 task_instantiation_info=tasks_dict_info,
2122 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002123 )
tierno59d22d22018-09-25 18:10:19 +02002124
2125 # Deploy charms for each VDU that supports one.
tiernod8323042019-08-09 11:32:23 +00002126 for vdud in get_iterable(vnfd, 'vdu'):
2127 vdu_id = vdud["id"]
2128 descriptor_config = vdud.get('vdu-configuration')
tierno626e0152019-11-29 14:16:16 +00002129 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
2130 if vdur.get("additionalParams"):
2131 deploy_params_vdu = self._format_additional_params(vdur["additionalParams"])
2132 else:
2133 deploy_params_vdu = deploy_params
tiernof7b42112020-10-06 08:22:07 +00002134 deploy_params_vdu["OSM"] = self._get_osm_params(db_vnfr, vdu_id, vdu_count_index=0)
tierno588547c2020-07-01 15:30:20 +00002135 if descriptor_config:
tiernod8323042019-08-09 11:32:23 +00002136 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002137 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002138 for vdu_index in range(int(vdud.get("count", 1))):
2139 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
quilesj7e13aeb2019-10-08 13:34:55 +02002140 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00002141 logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2142 member_vnf_index, vdu_id, vdu_index),
quilesj7e13aeb2019-10-08 13:34:55 +02002143 db_nsr=db_nsr,
2144 db_vnfr=db_vnfr,
2145 nslcmop_id=nslcmop_id,
2146 nsr_id=nsr_id,
2147 nsi_id=nsi_id,
2148 vnfd_id=vnfd_id,
2149 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002150 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002151 member_vnf_index=member_vnf_index,
2152 vdu_index=vdu_index,
2153 vdu_name=vdu_name,
tierno626e0152019-11-29 14:16:16 +00002154 deploy_params=deploy_params_vdu,
quilesj7e13aeb2019-10-08 13:34:55 +02002155 descriptor_config=descriptor_config,
2156 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002157 task_instantiation_info=tasks_dict_info,
2158 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002159 )
calvinosanch9f9c6f22019-11-04 13:37:39 +01002160 for kdud in get_iterable(vnfd, 'kdu'):
2161 kdu_name = kdud["name"]
2162 descriptor_config = kdud.get('kdu-configuration')
tierno588547c2020-07-01 15:30:20 +00002163 if descriptor_config:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002164 vdu_id = None
2165 vdu_index = 0
2166 vdu_name = None
tiernof7b42112020-10-06 08:22:07 +00002167 kdur = next(x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name)
2168 deploy_params_kdu = {"OSM": self._get_osm_params(db_vnfr)}
2169 if kdur.get("additionalParams"):
2170 deploy_params_kdu = self._format_additional_params(kdur["additionalParams"])
tierno59d22d22018-09-25 18:10:19 +02002171
calvinosanch9f9c6f22019-11-04 13:37:39 +01002172 self._deploy_n2vc(
2173 logging_text=logging_text,
2174 db_nsr=db_nsr,
2175 db_vnfr=db_vnfr,
2176 nslcmop_id=nslcmop_id,
2177 nsr_id=nsr_id,
2178 nsi_id=nsi_id,
2179 vnfd_id=vnfd_id,
2180 vdu_id=vdu_id,
2181 kdu_name=kdu_name,
2182 member_vnf_index=member_vnf_index,
2183 vdu_index=vdu_index,
2184 vdu_name=vdu_name,
tiernof7b42112020-10-06 08:22:07 +00002185 deploy_params=deploy_params_kdu,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002186 descriptor_config=descriptor_config,
2187 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002188 task_instantiation_info=tasks_dict_info,
2189 stage=stage
calvinosanch9f9c6f22019-11-04 13:37:39 +01002190 )
tierno59d22d22018-09-25 18:10:19 +02002191
tierno1b633412019-02-25 16:48:23 +00002192 # Check if this NS has a charm configuration
tiernod8323042019-08-09 11:32:23 +00002193 descriptor_config = nsd.get("ns-configuration")
2194 if descriptor_config and descriptor_config.get("juju"):
2195 vnfd_id = None
2196 db_vnfr = None
2197 member_vnf_index = None
2198 vdu_id = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002199 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002200 vdu_index = 0
2201 vdu_name = None
tierno1b633412019-02-25 16:48:23 +00002202
tiernod8323042019-08-09 11:32:23 +00002203 # Get additional parameters
tiernof7b42112020-10-06 08:22:07 +00002204 deploy_params = {"OSM": self._get_osm_params(db_vnfr)}
tiernod8323042019-08-09 11:32:23 +00002205 if db_nsr.get("additionalParamsForNs"):
tiernof7b42112020-10-06 08:22:07 +00002206 deploy_params.update(self._format_additional_params(db_nsr["additionalParamsForNs"].copy()))
tiernod8323042019-08-09 11:32:23 +00002207 base_folder = nsd["_admin"]["storage"]
quilesj7e13aeb2019-10-08 13:34:55 +02002208 self._deploy_n2vc(
2209 logging_text=logging_text,
2210 db_nsr=db_nsr,
2211 db_vnfr=db_vnfr,
2212 nslcmop_id=nslcmop_id,
2213 nsr_id=nsr_id,
2214 nsi_id=nsi_id,
2215 vnfd_id=vnfd_id,
2216 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002217 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002218 member_vnf_index=member_vnf_index,
2219 vdu_index=vdu_index,
2220 vdu_name=vdu_name,
2221 deploy_params=deploy_params,
2222 descriptor_config=descriptor_config,
2223 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002224 task_instantiation_info=tasks_dict_info,
2225 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002226 )
tierno1b633412019-02-25 16:48:23 +00002227
tiernoe876f672020-02-13 14:34:48 +00002228 # rest of staff will be done at finally
tierno1b633412019-02-25 16:48:23 +00002229
tiernoe876f672020-02-13 14:34:48 +00002230 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
2231 self.logger.error(logging_text + "Exit Exception while '{}': {}".format(stage[1], e))
tierno59d22d22018-09-25 18:10:19 +02002232 exc = e
2233 except asyncio.CancelledError:
tiernoe876f672020-02-13 14:34:48 +00002234 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
tierno59d22d22018-09-25 18:10:19 +02002235 exc = "Operation was cancelled"
2236 except Exception as e:
2237 exc = traceback.format_exc()
tiernoe876f672020-02-13 14:34:48 +00002238 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
tierno59d22d22018-09-25 18:10:19 +02002239 finally:
2240 if exc:
tiernoe876f672020-02-13 14:34:48 +00002241 error_list.append(str(exc))
tiernobaa51102018-12-14 13:16:18 +00002242 try:
tiernoe876f672020-02-13 14:34:48 +00002243 # wait for pending tasks
2244 if tasks_dict_info:
2245 stage[1] = "Waiting for instantiate pending tasks."
2246 self.logger.debug(logging_text + stage[1])
2247 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_deploy,
2248 stage, nslcmop_id, nsr_id=nsr_id)
2249 stage[1] = stage[2] = ""
2250 except asyncio.CancelledError:
2251 error_list.append("Cancelled")
2252 # TODO cancel all tasks
2253 except Exception as exc:
2254 error_list.append(str(exc))
quilesj4cda56b2019-12-05 10:02:20 +00002255
tiernoe876f672020-02-13 14:34:48 +00002256 # update operation-status
2257 db_nsr_update["operational-status"] = "running"
2258 # let's begin with VCA 'configured' status (later we can change it)
2259 db_nsr_update["config-status"] = "configured"
2260 for task, task_name in tasks_dict_info.items():
2261 if not task.done() or task.cancelled() or task.exception():
2262 if task_name.startswith(self.task_name_deploy_vca):
2263 # A N2VC task is pending
2264 db_nsr_update["config-status"] = "failed"
quilesj4cda56b2019-12-05 10:02:20 +00002265 else:
tiernoe876f672020-02-13 14:34:48 +00002266 # RO or KDU task is pending
2267 db_nsr_update["operational-status"] = "failed"
quilesj3655ae02019-12-12 16:08:35 +00002268
tiernoe876f672020-02-13 14:34:48 +00002269 # update status at database
2270 if error_list:
tiernoa2143262020-03-27 16:20:40 +00002271 error_detail = ". ".join(error_list)
tiernoe876f672020-02-13 14:34:48 +00002272 self.logger.error(logging_text + error_detail)
tierno50b41432020-08-11 11:20:13 +00002273 error_description_nslcmop = '{} Detail: {}'.format(stage[0], error_detail)
2274 error_description_nsr = 'Operation: INSTANTIATING.{}, {}'.format(nslcmop_id, stage[0])
quilesj3655ae02019-12-12 16:08:35 +00002275
tiernoa2143262020-03-27 16:20:40 +00002276 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00002277 db_nslcmop_update["detailed-status"] = error_detail
2278 nslcmop_operation_state = "FAILED"
2279 ns_state = "BROKEN"
2280 else:
tiernoa2143262020-03-27 16:20:40 +00002281 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00002282 error_description_nsr = error_description_nslcmop = None
2283 ns_state = "READY"
2284 db_nsr_update["detailed-status"] = "Done"
2285 db_nslcmop_update["detailed-status"] = "Done"
2286 nslcmop_operation_state = "COMPLETED"
quilesj4cda56b2019-12-05 10:02:20 +00002287
tiernoe876f672020-02-13 14:34:48 +00002288 if db_nsr:
2289 self._write_ns_status(
2290 nsr_id=nsr_id,
2291 ns_state=ns_state,
2292 current_operation="IDLE",
2293 current_operation_id=None,
2294 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00002295 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00002296 other_update=db_nsr_update
2297 )
tiernoa17d4f42020-04-28 09:59:23 +00002298 self._write_op_status(
2299 op_id=nslcmop_id,
2300 stage="",
2301 error_message=error_description_nslcmop,
2302 operation_state=nslcmop_operation_state,
2303 other_update=db_nslcmop_update,
2304 )
quilesj3655ae02019-12-12 16:08:35 +00002305
tierno59d22d22018-09-25 18:10:19 +02002306 if nslcmop_operation_state:
2307 try:
2308 await self.msg.aiowrite("ns", "instantiated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00002309 "operationState": nslcmop_operation_state},
2310 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02002311 except Exception as e:
2312 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
2313
2314 self.logger.debug(logging_text + "Exit")
2315 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2316
tierno588547c2020-07-01 15:30:20 +00002317 async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int,
2318 timeout: int = 3600, vca_type: str = None) -> bool:
quilesj63f90042020-01-17 09:53:55 +00002319
2320 # steps:
2321 # 1. find all relations for this VCA
2322 # 2. wait for other peers related
2323 # 3. add relations
2324
2325 try:
tierno588547c2020-07-01 15:30:20 +00002326 vca_type = vca_type or "lxc_proxy_charm"
quilesj63f90042020-01-17 09:53:55 +00002327
2328 # STEP 1: find all relations for this VCA
2329
2330 # read nsr record
2331 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
David Garcia171f3542020-05-21 16:41:07 +02002332 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
quilesj63f90042020-01-17 09:53:55 +00002333
2334 # this VCA data
2335 my_vca = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))[vca_index]
2336
2337 # read all ns-configuration relations
2338 ns_relations = list()
David Garcia171f3542020-05-21 16:41:07 +02002339 db_ns_relations = deep_get(nsd, ('ns-configuration', 'relation'))
quilesj63f90042020-01-17 09:53:55 +00002340 if db_ns_relations:
2341 for r in db_ns_relations:
2342 # check if this VCA is in the relation
2343 if my_vca.get('member-vnf-index') in\
2344 (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2345 ns_relations.append(r)
2346
2347 # read all vnf-configuration relations
2348 vnf_relations = list()
2349 db_vnfd_list = db_nsr.get('vnfd-id')
2350 if db_vnfd_list:
2351 for vnfd in db_vnfd_list:
2352 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2353 db_vnf_relations = deep_get(db_vnfd, ('vnf-configuration', 'relation'))
2354 if db_vnf_relations:
2355 for r in db_vnf_relations:
2356 # check if this VCA is in the relation
2357 if my_vca.get('vdu_id') in (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2358 vnf_relations.append(r)
2359
2360 # if no relations, terminate
2361 if not ns_relations and not vnf_relations:
2362 self.logger.debug(logging_text + ' No relations')
2363 return True
2364
2365 self.logger.debug(logging_text + ' adding relations\n {}\n {}'.format(ns_relations, vnf_relations))
2366
2367 # add all relations
2368 start = time()
2369 while True:
2370 # check timeout
2371 now = time()
2372 if now - start >= timeout:
2373 self.logger.error(logging_text + ' : timeout adding relations')
2374 return False
2375
2376 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2377 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2378
2379 # for each defined NS relation, find the VCA's related
tierno9984d0e2020-09-14 12:11:32 +00002380 for r in ns_relations.copy():
quilesj63f90042020-01-17 09:53:55 +00002381 from_vca_ee_id = None
2382 to_vca_ee_id = None
2383 from_vca_endpoint = None
2384 to_vca_endpoint = None
2385 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2386 for vca in vca_list:
2387 if vca.get('member-vnf-index') == r.get('entities')[0].get('id') \
2388 and vca.get('config_sw_installed'):
2389 from_vca_ee_id = vca.get('ee_id')
2390 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2391 if vca.get('member-vnf-index') == r.get('entities')[1].get('id') \
2392 and vca.get('config_sw_installed'):
2393 to_vca_ee_id = vca.get('ee_id')
2394 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2395 if from_vca_ee_id and to_vca_ee_id:
2396 # add relation
tierno588547c2020-07-01 15:30:20 +00002397 await self.vca_map[vca_type].add_relation(
quilesj63f90042020-01-17 09:53:55 +00002398 ee_id_1=from_vca_ee_id,
2399 ee_id_2=to_vca_ee_id,
2400 endpoint_1=from_vca_endpoint,
2401 endpoint_2=to_vca_endpoint)
2402 # remove entry from relations list
2403 ns_relations.remove(r)
2404 else:
2405 # check failed peers
2406 try:
2407 vca_status_list = db_nsr.get('configurationStatus')
2408 if vca_status_list:
2409 for i in range(len(vca_list)):
2410 vca = vca_list[i]
2411 vca_status = vca_status_list[i]
2412 if vca.get('member-vnf-index') == r.get('entities')[0].get('id'):
2413 if vca_status.get('status') == 'BROKEN':
2414 # peer broken: remove relation from list
2415 ns_relations.remove(r)
2416 if vca.get('member-vnf-index') == r.get('entities')[1].get('id'):
2417 if vca_status.get('status') == 'BROKEN':
2418 # peer broken: remove relation from list
2419 ns_relations.remove(r)
2420 except Exception:
2421 # ignore
2422 pass
2423
2424 # for each defined VNF relation, find the VCA's related
tierno9984d0e2020-09-14 12:11:32 +00002425 for r in vnf_relations.copy():
quilesj63f90042020-01-17 09:53:55 +00002426 from_vca_ee_id = None
2427 to_vca_ee_id = None
2428 from_vca_endpoint = None
2429 to_vca_endpoint = None
2430 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2431 for vca in vca_list:
David Garciab3972b92020-09-09 15:40:44 +02002432 key_to_check = "vdu_id"
2433 if vca.get("vdu_id") is None:
2434 key_to_check = "vnfd_id"
2435 if vca.get(key_to_check) == r.get('entities')[0].get('id') and vca.get('config_sw_installed'):
quilesj63f90042020-01-17 09:53:55 +00002436 from_vca_ee_id = vca.get('ee_id')
2437 from_vca_endpoint = r.get('entities')[0].get('endpoint')
David Garciab3972b92020-09-09 15:40:44 +02002438 if vca.get(key_to_check) == r.get('entities')[1].get('id') and vca.get('config_sw_installed'):
quilesj63f90042020-01-17 09:53:55 +00002439 to_vca_ee_id = vca.get('ee_id')
2440 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2441 if from_vca_ee_id and to_vca_ee_id:
2442 # add relation
tierno588547c2020-07-01 15:30:20 +00002443 await self.vca_map[vca_type].add_relation(
quilesj63f90042020-01-17 09:53:55 +00002444 ee_id_1=from_vca_ee_id,
2445 ee_id_2=to_vca_ee_id,
2446 endpoint_1=from_vca_endpoint,
2447 endpoint_2=to_vca_endpoint)
2448 # remove entry from relations list
2449 vnf_relations.remove(r)
2450 else:
2451 # check failed peers
2452 try:
2453 vca_status_list = db_nsr.get('configurationStatus')
2454 if vca_status_list:
2455 for i in range(len(vca_list)):
2456 vca = vca_list[i]
2457 vca_status = vca_status_list[i]
2458 if vca.get('vdu_id') == r.get('entities')[0].get('id'):
2459 if vca_status.get('status') == 'BROKEN':
2460 # peer broken: remove relation from list
David Garciad85490b2020-08-25 13:17:25 +02002461 vnf_relations.remove(r)
quilesj63f90042020-01-17 09:53:55 +00002462 if vca.get('vdu_id') == r.get('entities')[1].get('id'):
2463 if vca_status.get('status') == 'BROKEN':
2464 # peer broken: remove relation from list
David Garciad85490b2020-08-25 13:17:25 +02002465 vnf_relations.remove(r)
quilesj63f90042020-01-17 09:53:55 +00002466 except Exception:
2467 # ignore
2468 pass
2469
2470 # wait for next try
2471 await asyncio.sleep(5.0)
2472
2473 if not ns_relations and not vnf_relations:
2474 self.logger.debug('Relations added')
2475 break
2476
2477 return True
2478
2479 except Exception as e:
2480 self.logger.warn(logging_text + ' ERROR adding relations: {}'.format(e))
2481 return False
2482
tiernof24bcdd2020-09-21 14:05:39 +00002483 async def _install_kdu(self, nsr_id: str, nsr_db_path: str, vnfr_data: dict, kdu_index: int, kdud: dict,
lloretgalleg80ad9212020-07-08 07:53:22 +00002484 vnfd: dict, k8s_instance_info: dict, k8params: dict = None, timeout: int = 600):
2485
tiernob9018152020-04-16 14:18:24 +00002486 try:
lloretgalleg80ad9212020-07-08 07:53:22 +00002487 k8sclustertype = k8s_instance_info["k8scluster-type"]
2488 # Instantiate kdu
2489 db_dict_install = {"collection": "nsrs",
2490 "filter": {"_id": nsr_id},
2491 "path": nsr_db_path}
2492
2493 kdu_instance = await self.k8scluster_map[k8sclustertype].install(
2494 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2495 kdu_model=k8s_instance_info["kdu-model"],
2496 atomic=True,
2497 params=k8params,
2498 db_dict=db_dict_install,
2499 timeout=timeout,
2500 kdu_name=k8s_instance_info["kdu-name"],
2501 namespace=k8s_instance_info["namespace"])
2502 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance})
2503
2504 # Obtain services to obtain management service ip
2505 services = await self.k8scluster_map[k8sclustertype].get_services(
2506 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2507 kdu_instance=kdu_instance,
2508 namespace=k8s_instance_info["namespace"])
2509
2510 # Obtain management service info (if exists)
tiernof24bcdd2020-09-21 14:05:39 +00002511 vnfr_update_dict = {}
lloretgalleg80ad9212020-07-08 07:53:22 +00002512 if services:
tiernof24bcdd2020-09-21 14:05:39 +00002513 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
lloretgalleg80ad9212020-07-08 07:53:22 +00002514 mgmt_services = [service for service in kdud.get("service", []) if service.get("mgmt-service")]
2515 for mgmt_service in mgmt_services:
2516 for service in services:
2517 if service["name"].startswith(mgmt_service["name"]):
2518 # Mgmt service found, Obtain service ip
2519 ip = service.get("external_ip", service.get("cluster_ip"))
2520 if isinstance(ip, list) and len(ip) == 1:
2521 ip = ip[0]
2522
2523 vnfr_update_dict["kdur.{}.ip-address".format(kdu_index)] = ip
2524
2525 # Check if must update also mgmt ip at the vnf
2526 service_external_cp = mgmt_service.get("external-connection-point-ref")
2527 if service_external_cp:
2528 if deep_get(vnfd, ("mgmt-interface", "cp")) == service_external_cp:
2529 vnfr_update_dict["ip-address"] = ip
2530
2531 break
2532 else:
2533 self.logger.warn("Mgmt service name: {} not found".format(mgmt_service["name"]))
2534
tiernof24bcdd2020-09-21 14:05:39 +00002535 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
2536 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
lloretgalleg80ad9212020-07-08 07:53:22 +00002537
Dominik Fleischmanndd27fd22020-08-19 12:17:51 +02002538 kdu_config = kdud.get("kdu-configuration")
2539 if kdu_config and kdu_config.get("initial-config-primitive") and kdu_config.get("juju") is None:
2540 initial_config_primitive_list = kdu_config.get("initial-config-primitive")
2541 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
2542
2543 for initial_config_primitive in initial_config_primitive_list:
2544 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, {})
2545
2546 await asyncio.wait_for(
2547 self.k8scluster_map[k8sclustertype].exec_primitive(
2548 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2549 kdu_instance=kdu_instance,
2550 primitive_name=initial_config_primitive["name"],
2551 params=primitive_params_, db_dict={}),
2552 timeout=timeout)
2553
tiernob9018152020-04-16 14:18:24 +00002554 except Exception as e:
lloretgalleg80ad9212020-07-08 07:53:22 +00002555 # Prepare update db with error and raise exception
tiernob9018152020-04-16 14:18:24 +00002556 try:
lloretgalleg80ad9212020-07-08 07:53:22 +00002557 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)})
tiernof24bcdd2020-09-21 14:05:39 +00002558 self.update_db_2("vnfrs", vnfr_data.get("_id"), {"kdur.{}.status".format(kdu_index): "ERROR"})
tiernob9018152020-04-16 14:18:24 +00002559 except Exception:
lloretgalleg80ad9212020-07-08 07:53:22 +00002560 # ignore to keep original exception
tiernob9018152020-04-16 14:18:24 +00002561 pass
lloretgalleg80ad9212020-07-08 07:53:22 +00002562 # reraise original error
2563 raise
2564
2565 return kdu_instance
tiernob9018152020-04-16 14:18:24 +00002566
tiernoe876f672020-02-13 14:34:48 +00002567 async def deploy_kdus(self, logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_instantiation_info):
calvinosanch9f9c6f22019-11-04 13:37:39 +01002568 # Launch kdus if present in the descriptor
tierno626e0152019-11-29 14:16:16 +00002569
2570 k8scluster_id_2_uuic = {"helm-chart": {}, "juju-bundle": {}}
2571
tierno430008e2020-07-20 09:05:51 +00002572 async def _get_cluster_id(cluster_id, cluster_type):
tierno626e0152019-11-29 14:16:16 +00002573 nonlocal k8scluster_id_2_uuic
2574 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
2575 return k8scluster_id_2_uuic[cluster_type][cluster_id]
2576
tierno430008e2020-07-20 09:05:51 +00002577 # check if K8scluster is creating and wait look if previous tasks in process
2578 task_name, task_dependency = self.lcm_tasks.lookfor_related("k8scluster", cluster_id)
2579 if task_dependency:
2580 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(task_name, cluster_id)
2581 self.logger.debug(logging_text + text)
2582 await asyncio.wait(task_dependency, timeout=3600)
2583
tierno626e0152019-11-29 14:16:16 +00002584 db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False)
2585 if not db_k8scluster:
2586 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
tierno430008e2020-07-20 09:05:51 +00002587
tierno626e0152019-11-29 14:16:16 +00002588 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
2589 if not k8s_id:
tierno923e16c2020-07-14 10:46:57 +00002590 raise LcmException("K8s cluster '{}' has not been initialized for '{}'".format(cluster_id,
2591 cluster_type))
tierno626e0152019-11-29 14:16:16 +00002592 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
2593 return k8s_id
2594
2595 logging_text += "Deploy kdus: "
tiernoe876f672020-02-13 14:34:48 +00002596 step = ""
calvinosanch9f9c6f22019-11-04 13:37:39 +01002597 try:
tierno626e0152019-11-29 14:16:16 +00002598 db_nsr_update = {"_admin.deployed.K8s": []}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002599 self.update_db_2("nsrs", nsr_id, db_nsr_update)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002600
tierno626e0152019-11-29 14:16:16 +00002601 index = 0
tiernoe876f672020-02-13 14:34:48 +00002602 updated_cluster_list = []
2603
tierno626e0152019-11-29 14:16:16 +00002604 for vnfr_data in db_vnfrs.values():
lloretgalleg80ad9212020-07-08 07:53:22 +00002605 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
2606 # Step 0: Prepare and set parameters
tierno626e0152019-11-29 14:16:16 +00002607 desc_params = self._format_additional_params(kdur.get("additionalParams"))
quilesjacde94f2020-01-23 10:07:08 +00002608 vnfd_id = vnfr_data.get('vnfd-id')
lloretgalleg80ad9212020-07-08 07:53:22 +00002609 kdud = next(kdud for kdud in db_vnfds[vnfd_id]["kdu"] if kdud["name"] == kdur["kdu-name"])
tiernode1584f2020-04-07 09:07:33 +00002610 namespace = kdur.get("k8s-namespace")
tierno626e0152019-11-29 14:16:16 +00002611 if kdur.get("helm-chart"):
2612 kdumodel = kdur["helm-chart"]
tiernoe876f672020-02-13 14:34:48 +00002613 k8sclustertype = "helm-chart"
tierno626e0152019-11-29 14:16:16 +00002614 elif kdur.get("juju-bundle"):
2615 kdumodel = kdur["juju-bundle"]
tiernoe876f672020-02-13 14:34:48 +00002616 k8sclustertype = "juju-bundle"
tierno626e0152019-11-29 14:16:16 +00002617 else:
tiernoe876f672020-02-13 14:34:48 +00002618 raise LcmException("kdu type for kdu='{}.{}' is neither helm-chart nor "
2619 "juju-bundle. Maybe an old NBI version is running".
2620 format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]))
quilesjacde94f2020-01-23 10:07:08 +00002621 # check if kdumodel is a file and exists
2622 try:
tierno51183952020-04-03 15:48:18 +00002623 storage = deep_get(db_vnfds.get(vnfd_id), ('_admin', 'storage'))
2624 if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts
2625 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
Dominik Fleischmann010c0e72020-05-18 15:19:11 +02002626 filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["pkg-dir"], k8sclustertype,
tierno51183952020-04-03 15:48:18 +00002627 kdumodel)
2628 if self.fs.file_exists(filename, mode='file') or self.fs.file_exists(filename, mode='dir'):
2629 kdumodel = self.fs.path + filename
2630 except (asyncio.TimeoutError, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00002631 raise
2632 except Exception: # it is not a file
quilesjacde94f2020-01-23 10:07:08 +00002633 pass
lloretgallegedc5f332020-02-20 11:50:50 +01002634
tiernoe876f672020-02-13 14:34:48 +00002635 k8s_cluster_id = kdur["k8s-cluster"]["id"]
2636 step = "Synchronize repos for k8s cluster '{}'".format(k8s_cluster_id)
tierno430008e2020-07-20 09:05:51 +00002637 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
lloretgallegedc5f332020-02-20 11:50:50 +01002638
lloretgalleg80ad9212020-07-08 07:53:22 +00002639 # Synchronize repos
tiernoe876f672020-02-13 14:34:48 +00002640 if k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list:
2641 del_repo_list, added_repo_dict = await asyncio.ensure_future(
2642 self.k8sclusterhelm.synchronize_repos(cluster_uuid=cluster_uuid))
2643 if del_repo_list or added_repo_dict:
2644 unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
2645 updated = {'_admin.helm_charts_added.' +
2646 item: name for item, name in added_repo_dict.items()}
2647 self.logger.debug(logging_text + "repos synchronized on k8s cluster '{}' to_delete: {}, "
2648 "to_add: {}".format(k8s_cluster_id, del_repo_list,
2649 added_repo_dict))
2650 self.db.set_one("k8sclusters", {"_id": k8s_cluster_id}, updated, unset=unset)
2651 updated_cluster_list.append(cluster_uuid)
lloretgallegedc5f332020-02-20 11:50:50 +01002652
lloretgalleg80ad9212020-07-08 07:53:22 +00002653 # Instantiate kdu
tiernoe876f672020-02-13 14:34:48 +00002654 step = "Instantiating KDU {}.{} in k8s cluster {}".format(vnfr_data["member-vnf-index-ref"],
2655 kdur["kdu-name"], k8s_cluster_id)
lloretgalleg80ad9212020-07-08 07:53:22 +00002656 k8s_instance_info = {"kdu-instance": None,
2657 "k8scluster-uuid": cluster_uuid,
2658 "k8scluster-type": k8sclustertype,
2659 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
2660 "kdu-name": kdur["kdu-name"],
2661 "kdu-model": kdumodel,
2662 "namespace": namespace}
tiernob9018152020-04-16 14:18:24 +00002663 db_path = "_admin.deployed.K8s.{}".format(index)
lloretgalleg80ad9212020-07-08 07:53:22 +00002664 db_nsr_update[db_path] = k8s_instance_info
tierno626e0152019-11-29 14:16:16 +00002665 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tierno626e0152019-11-29 14:16:16 +00002666
tiernoa2143262020-03-27 16:20:40 +00002667 task = asyncio.ensure_future(
tiernof24bcdd2020-09-21 14:05:39 +00002668 self._install_kdu(nsr_id, db_path, vnfr_data, kdu_index, kdud, db_vnfds[vnfd_id],
lloretgalleg80ad9212020-07-08 07:53:22 +00002669 k8s_instance_info, k8params=desc_params, timeout=600))
tiernoe876f672020-02-13 14:34:48 +00002670 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task)
tiernoa2143262020-03-27 16:20:40 +00002671 task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"])
tiernoe876f672020-02-13 14:34:48 +00002672
tierno626e0152019-11-29 14:16:16 +00002673 index += 1
quilesjdd799ac2020-01-23 16:31:11 +00002674
tiernoe876f672020-02-13 14:34:48 +00002675 except (LcmException, asyncio.CancelledError):
2676 raise
calvinosanch9f9c6f22019-11-04 13:37:39 +01002677 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00002678 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
2679 if isinstance(e, (N2VCException, DbException)):
2680 self.logger.error(logging_text + msg)
2681 else:
2682 self.logger.critical(logging_text + msg, exc_info=True)
quilesjdd799ac2020-01-23 16:31:11 +00002683 raise LcmException(msg)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002684 finally:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002685 if db_nsr_update:
2686 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoda6fb102019-11-23 00:36:52 +00002687
quilesj7e13aeb2019-10-08 13:34:55 +02002688 def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002689 kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config,
tiernoe876f672020-02-13 14:34:48 +00002690 base_folder, task_instantiation_info, stage):
quilesj7e13aeb2019-10-08 13:34:55 +02002691 # launch instantiate_N2VC in a asyncio task and register task object
2692 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
2693 # if not found, create one entry and update database
quilesj7e13aeb2019-10-08 13:34:55 +02002694 # fill db_nsr._admin.deployed.VCA.<index>
tierno588547c2020-07-01 15:30:20 +00002695
2696 self.logger.debug(logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id))
2697 if descriptor_config.get("juju"): # There is one execution envioronment of type juju
2698 ee_list = [descriptor_config]
2699 elif descriptor_config.get("execution-environment-list"):
2700 ee_list = descriptor_config.get("execution-environment-list")
2701 else: # other types as script are not supported
2702 ee_list = []
2703
2704 for ee_item in ee_list:
2705 self.logger.debug(logging_text + "_deploy_n2vc ee_item juju={}, helm={}".format(ee_item.get('juju'),
2706 ee_item.get("helm-chart")))
tierno4fa7f8e2020-07-08 15:33:55 +00002707 ee_descriptor_id = ee_item.get("id")
tierno588547c2020-07-01 15:30:20 +00002708 if ee_item.get("juju"):
2709 vca_name = ee_item['juju'].get('charm')
2710 vca_type = "lxc_proxy_charm" if ee_item['juju'].get('charm') is not None else "native_charm"
2711 if ee_item['juju'].get('cloud') == "k8s":
2712 vca_type = "k8s_proxy_charm"
2713 elif ee_item['juju'].get('proxy') is False:
2714 vca_type = "native_charm"
2715 elif ee_item.get("helm-chart"):
2716 vca_name = ee_item['helm-chart']
2717 vca_type = "helm"
2718 else:
2719 self.logger.debug(logging_text + "skipping non juju neither charm configuration")
quilesj7e13aeb2019-10-08 13:34:55 +02002720 continue
quilesj3655ae02019-12-12 16:08:35 +00002721
tierno588547c2020-07-01 15:30:20 +00002722 vca_index = -1
2723 for vca_index, vca_deployed in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
2724 if not vca_deployed:
2725 continue
2726 if vca_deployed.get("member-vnf-index") == member_vnf_index and \
2727 vca_deployed.get("vdu_id") == vdu_id and \
2728 vca_deployed.get("kdu_name") == kdu_name and \
tierno4fa7f8e2020-07-08 15:33:55 +00002729 vca_deployed.get("vdu_count_index", 0) == vdu_index and \
2730 vca_deployed.get("ee_descriptor_id") == ee_descriptor_id:
tierno588547c2020-07-01 15:30:20 +00002731 break
2732 else:
2733 # not found, create one.
tierno4fa7f8e2020-07-08 15:33:55 +00002734 target = "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
2735 if vdu_id:
2736 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
2737 elif kdu_name:
2738 target += "/kdu/{}".format(kdu_name)
tierno588547c2020-07-01 15:30:20 +00002739 vca_deployed = {
tierno4fa7f8e2020-07-08 15:33:55 +00002740 "target_element": target,
2741 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
tierno588547c2020-07-01 15:30:20 +00002742 "member-vnf-index": member_vnf_index,
2743 "vdu_id": vdu_id,
2744 "kdu_name": kdu_name,
2745 "vdu_count_index": vdu_index,
2746 "operational-status": "init", # TODO revise
2747 "detailed-status": "", # TODO revise
2748 "step": "initial-deploy", # TODO revise
2749 "vnfd_id": vnfd_id,
2750 "vdu_name": vdu_name,
tierno4fa7f8e2020-07-08 15:33:55 +00002751 "type": vca_type,
2752 "ee_descriptor_id": ee_descriptor_id
tierno588547c2020-07-01 15:30:20 +00002753 }
2754 vca_index += 1
quilesj3655ae02019-12-12 16:08:35 +00002755
tierno588547c2020-07-01 15:30:20 +00002756 # create VCA and configurationStatus in db
2757 db_dict = {
2758 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
2759 "configurationStatus.{}".format(vca_index): dict()
2760 }
2761 self.update_db_2("nsrs", nsr_id, db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02002762
tierno588547c2020-07-01 15:30:20 +00002763 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
2764
2765 # Launch task
2766 task_n2vc = asyncio.ensure_future(
2767 self.instantiate_N2VC(
2768 logging_text=logging_text,
2769 vca_index=vca_index,
2770 nsi_id=nsi_id,
2771 db_nsr=db_nsr,
2772 db_vnfr=db_vnfr,
2773 vdu_id=vdu_id,
2774 kdu_name=kdu_name,
2775 vdu_index=vdu_index,
2776 deploy_params=deploy_params,
2777 config_descriptor=descriptor_config,
2778 base_folder=base_folder,
2779 nslcmop_id=nslcmop_id,
2780 stage=stage,
2781 vca_type=vca_type,
tierno89f82902020-07-03 14:52:28 +00002782 vca_name=vca_name,
2783 ee_config_descriptor=ee_item
tierno588547c2020-07-01 15:30:20 +00002784 )
quilesj7e13aeb2019-10-08 13:34:55 +02002785 )
tierno588547c2020-07-01 15:30:20 +00002786 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_N2VC-{}".format(vca_index), task_n2vc)
2787 task_instantiation_info[task_n2vc] = self.task_name_deploy_vca + " {}.{}".format(
2788 member_vnf_index or "", vdu_id or "")
tiernobaa51102018-12-14 13:16:18 +00002789
tiernoc9556972019-07-05 15:25:25 +00002790 @staticmethod
tierno4fa7f8e2020-07-08 15:33:55 +00002791 def _get_terminate_config_primitive(primitive_list, vca_deployed):
2792 """ Get a sorted terminate config primitive list. In case ee_descriptor_id is present at vca_deployed,
2793 it get only those primitives for this execution envirom"""
2794
2795 primitive_list = primitive_list or []
2796 # filter primitives by ee_descriptor_id
2797 ee_descriptor_id = vca_deployed.get("ee_descriptor_id")
2798 primitive_list = [p for p in primitive_list if p.get("execution-environment-ref") == ee_descriptor_id]
2799
2800 if primitive_list:
2801 primitive_list.sort(key=lambda val: int(val['seq']))
2802
2803 return primitive_list
kuuse0ca67472019-05-13 15:59:27 +02002804
2805 @staticmethod
2806 def _create_nslcmop(nsr_id, operation, params):
2807 """
2808 Creates a ns-lcm-opp content to be stored at database.
2809 :param nsr_id: internal id of the instance
2810 :param operation: instantiate, terminate, scale, action, ...
2811 :param params: user parameters for the operation
2812 :return: dictionary following SOL005 format
2813 """
2814 # Raise exception if invalid arguments
2815 if not (nsr_id and operation and params):
2816 raise LcmException(
2817 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided")
2818 now = time()
2819 _id = str(uuid4())
2820 nslcmop = {
2821 "id": _id,
2822 "_id": _id,
2823 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
2824 "operationState": "PROCESSING",
2825 "statusEnteredTime": now,
2826 "nsInstanceId": nsr_id,
2827 "lcmOperationType": operation,
2828 "startTime": now,
2829 "isAutomaticInvocation": False,
2830 "operationParams": params,
2831 "isCancelPending": False,
2832 "links": {
2833 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
2834 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
2835 }
2836 }
2837 return nslcmop
2838
calvinosanch9f9c6f22019-11-04 13:37:39 +01002839 def _format_additional_params(self, params):
tierno626e0152019-11-29 14:16:16 +00002840 params = params or {}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002841 for key, value in params.items():
2842 if str(value).startswith("!!yaml "):
2843 params[key] = yaml.safe_load(value[7:])
calvinosanch9f9c6f22019-11-04 13:37:39 +01002844 return params
2845
kuuse8b998e42019-07-30 15:22:16 +02002846 def _get_terminate_primitive_params(self, seq, vnf_index):
2847 primitive = seq.get('name')
2848 primitive_params = {}
2849 params = {
2850 "member_vnf_index": vnf_index,
2851 "primitive": primitive,
2852 "primitive_params": primitive_params,
2853 }
2854 desc_params = {}
2855 return self._map_primitive_params(seq, params, desc_params)
2856
kuuseac3a8882019-10-03 10:48:06 +02002857 # sub-operations
2858
tierno51183952020-04-03 15:48:18 +00002859 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
2860 op = deep_get(db_nslcmop, ('_admin', 'operations'), [])[op_index]
2861 if op.get('operationState') == 'COMPLETED':
kuuseac3a8882019-10-03 10:48:06 +02002862 # b. Skip sub-operation
2863 # _ns_execute_primitive() or RO.create_action() will NOT be executed
2864 return self.SUBOPERATION_STATUS_SKIP
2865 else:
tierno7c4e24c2020-05-13 08:41:35 +00002866 # c. retry executing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02002867 # The sub-operation exists, and operationState != 'COMPLETED'
tierno7c4e24c2020-05-13 08:41:35 +00002868 # Update operationState = 'PROCESSING' to indicate a retry.
kuuseac3a8882019-10-03 10:48:06 +02002869 operationState = 'PROCESSING'
2870 detailed_status = 'In progress'
2871 self._update_suboperation_status(
2872 db_nslcmop, op_index, operationState, detailed_status)
2873 # Return the sub-operation index
2874 # _ns_execute_primitive() or RO.create_action() will be called from scale()
2875 # with arguments extracted from the sub-operation
2876 return op_index
2877
2878 # Find a sub-operation where all keys in a matching dictionary must match
2879 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
2880 def _find_suboperation(self, db_nslcmop, match):
tierno7c4e24c2020-05-13 08:41:35 +00002881 if db_nslcmop and match:
kuuseac3a8882019-10-03 10:48:06 +02002882 op_list = db_nslcmop.get('_admin', {}).get('operations', [])
2883 for i, op in enumerate(op_list):
2884 if all(op.get(k) == match[k] for k in match):
2885 return i
2886 return self.SUBOPERATION_STATUS_NOT_FOUND
2887
2888 # Update status for a sub-operation given its index
2889 def _update_suboperation_status(self, db_nslcmop, op_index, operationState, detailed_status):
2890 # Update DB for HA tasks
2891 q_filter = {'_id': db_nslcmop['_id']}
2892 update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState,
2893 '_admin.operations.{}.detailed-status'.format(op_index): detailed_status}
2894 self.db.set_one("nslcmops",
2895 q_filter=q_filter,
2896 update_dict=update_dict,
2897 fail_on_empty=False)
2898
2899 # Add sub-operation, return the index of the added sub-operation
2900 # Optionally, set operationState, detailed-status, and operationType
2901 # Status and type are currently set for 'scale' sub-operations:
2902 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
2903 # 'detailed-status' : status message
2904 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
2905 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
quilesj7e13aeb2019-10-08 13:34:55 +02002906 def _add_suboperation(self, db_nslcmop, vnf_index, vdu_id, vdu_count_index, vdu_name, primitive,
2907 mapped_primitive_params, operationState=None, detailed_status=None, operationType=None,
kuuseac3a8882019-10-03 10:48:06 +02002908 RO_nsr_id=None, RO_scaling_info=None):
tiernoe876f672020-02-13 14:34:48 +00002909 if not db_nslcmop:
kuuseac3a8882019-10-03 10:48:06 +02002910 return self.SUBOPERATION_STATUS_NOT_FOUND
2911 # Get the "_admin.operations" list, if it exists
2912 db_nslcmop_admin = db_nslcmop.get('_admin', {})
2913 op_list = db_nslcmop_admin.get('operations')
2914 # Create or append to the "_admin.operations" list
kuuse8b998e42019-07-30 15:22:16 +02002915 new_op = {'member_vnf_index': vnf_index,
2916 'vdu_id': vdu_id,
2917 'vdu_count_index': vdu_count_index,
2918 'primitive': primitive,
2919 'primitive_params': mapped_primitive_params}
kuuseac3a8882019-10-03 10:48:06 +02002920 if operationState:
2921 new_op['operationState'] = operationState
2922 if detailed_status:
2923 new_op['detailed-status'] = detailed_status
2924 if operationType:
2925 new_op['lcmOperationType'] = operationType
2926 if RO_nsr_id:
2927 new_op['RO_nsr_id'] = RO_nsr_id
2928 if RO_scaling_info:
2929 new_op['RO_scaling_info'] = RO_scaling_info
2930 if not op_list:
2931 # No existing operations, create key 'operations' with current operation as first list element
2932 db_nslcmop_admin.update({'operations': [new_op]})
2933 op_list = db_nslcmop_admin.get('operations')
2934 else:
2935 # Existing operations, append operation to list
2936 op_list.append(new_op)
kuuse8b998e42019-07-30 15:22:16 +02002937
kuuseac3a8882019-10-03 10:48:06 +02002938 db_nslcmop_update = {'_admin.operations': op_list}
2939 self.update_db_2("nslcmops", db_nslcmop['_id'], db_nslcmop_update)
2940 op_index = len(op_list) - 1
2941 return op_index
2942
2943 # Helper methods for scale() sub-operations
2944
2945 # pre-scale/post-scale:
2946 # Check for 3 different cases:
2947 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
2948 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
tierno7c4e24c2020-05-13 08:41:35 +00002949 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
quilesj7e13aeb2019-10-08 13:34:55 +02002950 def _check_or_add_scale_suboperation(self, db_nslcmop, vnf_index, vnf_config_primitive, primitive_params,
2951 operationType, RO_nsr_id=None, RO_scaling_info=None):
kuuseac3a8882019-10-03 10:48:06 +02002952 # Find this sub-operation
tierno7c4e24c2020-05-13 08:41:35 +00002953 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02002954 operationType = 'SCALE-RO'
2955 match = {
2956 'member_vnf_index': vnf_index,
2957 'RO_nsr_id': RO_nsr_id,
2958 'RO_scaling_info': RO_scaling_info,
2959 }
2960 else:
2961 match = {
2962 'member_vnf_index': vnf_index,
2963 'primitive': vnf_config_primitive,
2964 'primitive_params': primitive_params,
2965 'lcmOperationType': operationType
2966 }
2967 op_index = self._find_suboperation(db_nslcmop, match)
tierno51183952020-04-03 15:48:18 +00002968 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
kuuseac3a8882019-10-03 10:48:06 +02002969 # a. New sub-operation
2970 # The sub-operation does not exist, add it.
2971 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
2972 # The following parameters are set to None for all kind of scaling:
2973 vdu_id = None
2974 vdu_count_index = None
2975 vdu_name = None
tierno51183952020-04-03 15:48:18 +00002976 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02002977 vnf_config_primitive = None
2978 primitive_params = None
2979 else:
2980 RO_nsr_id = None
2981 RO_scaling_info = None
2982 # Initial status for sub-operation
2983 operationState = 'PROCESSING'
2984 detailed_status = 'In progress'
2985 # Add sub-operation for pre/post-scaling (zero or more operations)
2986 self._add_suboperation(db_nslcmop,
2987 vnf_index,
2988 vdu_id,
2989 vdu_count_index,
2990 vdu_name,
2991 vnf_config_primitive,
2992 primitive_params,
2993 operationState,
2994 detailed_status,
2995 operationType,
2996 RO_nsr_id,
2997 RO_scaling_info)
2998 return self.SUBOPERATION_STATUS_NEW
2999 else:
3000 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
3001 # or op_index (operationState != 'COMPLETED')
tierno51183952020-04-03 15:48:18 +00003002 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
kuuseac3a8882019-10-03 10:48:06 +02003003
preethika.pdf7d8e02019-12-10 13:10:48 +00003004 # Function to return execution_environment id
3005
3006 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
tiernoe876f672020-02-13 14:34:48 +00003007 # TODO vdu_index_count
preethika.pdf7d8e02019-12-10 13:10:48 +00003008 for vca in vca_deployed_list:
3009 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
3010 return vca["ee_id"]
3011
tierno588547c2020-07-01 15:30:20 +00003012 async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor,
3013 vca_index, destroy_ee=True, exec_primitives=True):
tiernoe876f672020-02-13 14:34:48 +00003014 """
3015 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
3016 :param logging_text:
3017 :param db_nslcmop:
3018 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
3019 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
3020 :param vca_index: index in the database _admin.deployed.VCA
3021 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
tierno588547c2020-07-01 15:30:20 +00003022 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
3023 not executed properly
tiernoe876f672020-02-13 14:34:48 +00003024 :return: None or exception
3025 """
tiernoe876f672020-02-13 14:34:48 +00003026
tierno588547c2020-07-01 15:30:20 +00003027 self.logger.debug(
3028 logging_text + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
3029 vca_index, vca_deployed, config_descriptor, destroy_ee
3030 )
3031 )
3032
3033 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
3034
3035 # execute terminate_primitives
3036 if exec_primitives:
tierno4fa7f8e2020-07-08 15:33:55 +00003037 terminate_primitives = self._get_terminate_config_primitive(
3038 config_descriptor.get("terminate-config-primitive"), vca_deployed)
tierno588547c2020-07-01 15:30:20 +00003039 vdu_id = vca_deployed.get("vdu_id")
3040 vdu_count_index = vca_deployed.get("vdu_count_index")
3041 vdu_name = vca_deployed.get("vdu_name")
3042 vnf_index = vca_deployed.get("member-vnf-index")
3043 if terminate_primitives and vca_deployed.get("needed_terminate"):
tierno588547c2020-07-01 15:30:20 +00003044 for seq in terminate_primitives:
3045 # For each sequence in list, get primitive and call _ns_execute_primitive()
3046 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
3047 vnf_index, seq.get("name"))
3048 self.logger.debug(logging_text + step)
3049 # Create the primitive for each sequence, i.e. "primitive": "touch"
3050 primitive = seq.get('name')
3051 mapped_primitive_params = self._get_terminate_primitive_params(seq, vnf_index)
tierno588547c2020-07-01 15:30:20 +00003052
3053 # Add sub-operation
3054 self._add_suboperation(db_nslcmop,
3055 vnf_index,
3056 vdu_id,
3057 vdu_count_index,
3058 vdu_name,
3059 primitive,
3060 mapped_primitive_params)
3061 # Sub-operations: Call _ns_execute_primitive() instead of action()
3062 try:
3063 result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive,
3064 mapped_primitive_params,
3065 vca_type=vca_type)
3066 except LcmException:
3067 # this happens when VCA is not deployed. In this case it is not needed to terminate
3068 continue
3069 result_ok = ['COMPLETED', 'PARTIALLY_COMPLETED']
3070 if result not in result_ok:
3071 raise LcmException("terminate_primitive {} for vnf_member_index={} fails with "
3072 "error {}".format(seq.get("name"), vnf_index, result_detail))
3073 # set that this VCA do not need terminated
3074 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(vca_index)
3075 self.update_db_2("nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False})
tiernoe876f672020-02-13 14:34:48 +00003076
tierno89f82902020-07-03 14:52:28 +00003077 if vca_deployed.get("prometheus_jobs") and self.prometheus:
3078 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
3079
tiernoe876f672020-02-13 14:34:48 +00003080 if destroy_ee:
tierno588547c2020-07-01 15:30:20 +00003081 await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"])
kuuse0ca67472019-05-13 15:59:27 +02003082
tierno51183952020-04-03 15:48:18 +00003083 async def _delete_all_N2VC(self, db_nsr: dict):
3084 self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING')
3085 namespace = "." + db_nsr["_id"]
tiernof59ad6c2020-04-08 12:50:52 +00003086 try:
3087 await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete)
3088 except N2VCNotFound: # already deleted. Skip
3089 pass
tierno51183952020-04-03 15:48:18 +00003090 self._write_all_config_status(db_nsr=db_nsr, status='DELETED')
quilesj3655ae02019-12-12 16:08:35 +00003091
tiernoe876f672020-02-13 14:34:48 +00003092 async def _terminate_RO(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
3093 """
3094 Terminates a deployment from RO
3095 :param logging_text:
3096 :param nsr_deployed: db_nsr._admin.deployed
3097 :param nsr_id:
3098 :param nslcmop_id:
3099 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
3100 this method will update only the index 2, but it will write on database the concatenated content of the list
3101 :return:
3102 """
3103 db_nsr_update = {}
3104 failed_detail = []
3105 ro_nsr_id = ro_delete_action = None
3106 if nsr_deployed and nsr_deployed.get("RO"):
3107 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
3108 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
3109 try:
3110 if ro_nsr_id:
3111 stage[2] = "Deleting ns from VIM."
3112 db_nsr_update["detailed-status"] = " ".join(stage)
3113 self._write_op_status(nslcmop_id, stage)
3114 self.logger.debug(logging_text + stage[2])
3115 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3116 self._write_op_status(nslcmop_id, stage)
3117 desc = await self.RO.delete("ns", ro_nsr_id)
3118 ro_delete_action = desc["action_id"]
3119 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = ro_delete_action
3120 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3121 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3122 if ro_delete_action:
3123 # wait until NS is deleted from VIM
3124 stage[2] = "Waiting ns deleted from VIM."
3125 detailed_status_old = None
3126 self.logger.debug(logging_text + stage[2] + " RO_id={} ro_delete_action={}".format(ro_nsr_id,
3127 ro_delete_action))
3128 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3129 self._write_op_status(nslcmop_id, stage)
kuused124bfe2019-06-18 12:09:24 +02003130
tiernoe876f672020-02-13 14:34:48 +00003131 delete_timeout = 20 * 60 # 20 minutes
3132 while delete_timeout > 0:
3133 desc = await self.RO.show(
3134 "ns",
3135 item_id_name=ro_nsr_id,
3136 extra_item="action",
3137 extra_item_id=ro_delete_action)
3138
3139 # deploymentStatus
3140 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3141
3142 ns_status, ns_status_info = self.RO.check_action_status(desc)
3143 if ns_status == "ERROR":
3144 raise ROclient.ROClientException(ns_status_info)
3145 elif ns_status == "BUILD":
3146 stage[2] = "Deleting from VIM {}".format(ns_status_info)
3147 elif ns_status == "ACTIVE":
3148 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3149 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3150 break
3151 else:
3152 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
3153 if stage[2] != detailed_status_old:
3154 detailed_status_old = stage[2]
3155 db_nsr_update["detailed-status"] = " ".join(stage)
3156 self._write_op_status(nslcmop_id, stage)
3157 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3158 await asyncio.sleep(5, loop=self.loop)
3159 delete_timeout -= 5
3160 else: # delete_timeout <= 0:
3161 raise ROclient.ROClientException("Timeout waiting ns deleted from VIM")
3162
3163 except Exception as e:
3164 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3165 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3166 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3167 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3168 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3169 self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id))
3170 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
tiernoa2143262020-03-27 16:20:40 +00003171 failed_detail.append("delete conflict: {}".format(e))
3172 self.logger.debug(logging_text + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00003173 else:
tiernoa2143262020-03-27 16:20:40 +00003174 failed_detail.append("delete error: {}".format(e))
3175 self.logger.error(logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00003176
3177 # Delete nsd
3178 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
3179 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
3180 try:
3181 stage[2] = "Deleting nsd from RO."
3182 db_nsr_update["detailed-status"] = " ".join(stage)
3183 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3184 self._write_op_status(nslcmop_id, stage)
3185 await self.RO.delete("nsd", ro_nsd_id)
3186 self.logger.debug(logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id))
3187 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3188 except Exception as e:
3189 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3190 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3191 self.logger.debug(logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id))
3192 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
3193 failed_detail.append("ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e))
3194 self.logger.debug(logging_text + failed_detail[-1])
3195 else:
3196 failed_detail.append("ro_nsd_id={} delete error: {}".format(ro_nsd_id, e))
3197 self.logger.error(logging_text + failed_detail[-1])
3198
3199 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
3200 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
3201 if not vnf_deployed or not vnf_deployed["id"]:
3202 continue
3203 try:
3204 ro_vnfd_id = vnf_deployed["id"]
3205 stage[2] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
3206 vnf_deployed["member-vnf-index"], ro_vnfd_id)
3207 db_nsr_update["detailed-status"] = " ".join(stage)
3208 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3209 self._write_op_status(nslcmop_id, stage)
3210 await self.RO.delete("vnfd", ro_vnfd_id)
3211 self.logger.debug(logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id))
3212 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3213 except Exception as e:
3214 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3215 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3216 self.logger.debug(logging_text + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id))
3217 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
3218 failed_detail.append("ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e))
3219 self.logger.debug(logging_text + failed_detail[-1])
3220 else:
3221 failed_detail.append("ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e))
3222 self.logger.error(logging_text + failed_detail[-1])
3223
tiernoa2143262020-03-27 16:20:40 +00003224 if failed_detail:
3225 stage[2] = "Error deleting from VIM"
3226 else:
3227 stage[2] = "Deleted from VIM"
tiernoe876f672020-02-13 14:34:48 +00003228 db_nsr_update["detailed-status"] = " ".join(stage)
3229 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3230 self._write_op_status(nslcmop_id, stage)
3231
3232 if failed_detail:
tiernoa2143262020-03-27 16:20:40 +00003233 raise LcmException("; ".join(failed_detail))
tiernoe876f672020-02-13 14:34:48 +00003234
3235 async def terminate(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003236 # Try to lock HA task here
3237 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3238 if not task_is_locked_by_me:
3239 return
3240
tierno59d22d22018-09-25 18:10:19 +02003241 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
3242 self.logger.debug(logging_text + "Enter")
tiernoe876f672020-02-13 14:34:48 +00003243 timeout_ns_terminate = self.timeout_ns_terminate
tierno59d22d22018-09-25 18:10:19 +02003244 db_nsr = None
3245 db_nslcmop = None
tiernoa17d4f42020-04-28 09:59:23 +00003246 operation_params = None
tierno59d22d22018-09-25 18:10:19 +02003247 exc = None
tiernoe876f672020-02-13 14:34:48 +00003248 error_list = [] # annotates all failed error messages
tierno59d22d22018-09-25 18:10:19 +02003249 db_nslcmop_update = {}
tiernoc2564fe2019-01-28 16:18:56 +00003250 autoremove = False # autoremove after terminated
tiernoe876f672020-02-13 14:34:48 +00003251 tasks_dict_info = {}
3252 db_nsr_update = {}
3253 stage = ["Stage 1/3: Preparing task.", "Waiting for previous operations to terminate.", ""]
3254 # ^ contains [stage, step, VIM-status]
tierno59d22d22018-09-25 18:10:19 +02003255 try:
kuused124bfe2019-06-18 12:09:24 +02003256 # wait for any previous tasks in process
3257 await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id)
3258
tiernoe876f672020-02-13 14:34:48 +00003259 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
3260 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3261 operation_params = db_nslcmop.get("operationParams") or {}
3262 if operation_params.get("timeout_ns_terminate"):
3263 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
3264 stage[1] = "Getting nsr={} from db.".format(nsr_id)
3265 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3266
3267 db_nsr_update["operational-status"] = "terminating"
3268 db_nsr_update["config-status"] = "terminating"
quilesj4cda56b2019-12-05 10:02:20 +00003269 self._write_ns_status(
3270 nsr_id=nsr_id,
3271 ns_state="TERMINATING",
3272 current_operation="TERMINATING",
tiernoe876f672020-02-13 14:34:48 +00003273 current_operation_id=nslcmop_id,
3274 other_update=db_nsr_update
quilesj4cda56b2019-12-05 10:02:20 +00003275 )
quilesj3655ae02019-12-12 16:08:35 +00003276 self._write_op_status(
3277 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00003278 queuePosition=0,
3279 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00003280 )
tiernoe876f672020-02-13 14:34:48 +00003281 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
tierno59d22d22018-09-25 18:10:19 +02003282 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
3283 return
tierno59d22d22018-09-25 18:10:19 +02003284
tiernoe876f672020-02-13 14:34:48 +00003285 stage[1] = "Getting vnf descriptors from db."
3286 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
3287 db_vnfds_from_id = {}
3288 db_vnfds_from_member_index = {}
3289 # Loop over VNFRs
3290 for vnfr in db_vnfrs_list:
3291 vnfd_id = vnfr["vnfd-id"]
3292 if vnfd_id not in db_vnfds_from_id:
3293 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
3294 db_vnfds_from_id[vnfd_id] = vnfd
3295 db_vnfds_from_member_index[vnfr["member-vnf-index-ref"]] = db_vnfds_from_id[vnfd_id]
calvinosanch9f9c6f22019-11-04 13:37:39 +01003296
tiernoe876f672020-02-13 14:34:48 +00003297 # Destroy individual execution environments when there are terminating primitives.
3298 # Rest of EE will be deleted at once
tierno588547c2020-07-01 15:30:20 +00003299 # TODO - check before calling _destroy_N2VC
3300 # if not operation_params.get("skip_terminate_primitives"):#
3301 # or not vca.get("needed_terminate"):
3302 stage[0] = "Stage 2/3 execute terminating primitives."
3303 self.logger.debug(logging_text + stage[0])
3304 stage[1] = "Looking execution environment that needs terminate."
3305 self.logger.debug(logging_text + stage[1])
tierno89f82902020-07-03 14:52:28 +00003306 # self.logger.debug("nsr_deployed: {}".format(nsr_deployed))
tierno588547c2020-07-01 15:30:20 +00003307 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
tierno588547c2020-07-01 15:30:20 +00003308 config_descriptor = None
3309 if not vca or not vca.get("ee_id"):
3310 continue
3311 if not vca.get("member-vnf-index"):
3312 # ns
3313 config_descriptor = db_nsr.get("ns-configuration")
3314 elif vca.get("vdu_id"):
3315 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3316 vdud = next((vdu for vdu in db_vnfd.get("vdu", ()) if vdu["id"] == vca.get("vdu_id")), None)
3317 if vdud:
3318 config_descriptor = vdud.get("vdu-configuration")
3319 elif vca.get("kdu_name"):
3320 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3321 kdud = next((kdu for kdu in db_vnfd.get("kdu", ()) if kdu["name"] == vca.get("kdu_name")), None)
3322 if kdud:
3323 config_descriptor = kdud.get("kdu-configuration")
3324 else:
3325 config_descriptor = db_vnfds_from_member_index[vca["member-vnf-index"]].get("vnf-configuration")
tierno588547c2020-07-01 15:30:20 +00003326 vca_type = vca.get("type")
3327 exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and
3328 vca.get("needed_terminate"))
tiernob010eb02020-08-07 06:36:38 +00003329 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
3330 # pending native charms
tiernoc6600ff2020-09-16 14:13:06 +00003331 destroy_ee = True if vca_type in ("helm", "native_charm") else False
3332 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
3333 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
tierno89f82902020-07-03 14:52:28 +00003334 task = asyncio.ensure_future(
3335 self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor, vca_index,
3336 destroy_ee, exec_terminate_primitives))
tierno588547c2020-07-01 15:30:20 +00003337 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
tierno59d22d22018-09-25 18:10:19 +02003338
tierno588547c2020-07-01 15:30:20 +00003339 # wait for pending tasks of terminate primitives
3340 if tasks_dict_info:
tiernoc6600ff2020-09-16 14:13:06 +00003341 self.logger.debug(logging_text + 'Waiting for tasks {}'.format(list(tasks_dict_info.keys())))
tierno588547c2020-07-01 15:30:20 +00003342 error_list = await self._wait_for_tasks(logging_text, tasks_dict_info,
3343 min(self.timeout_charm_delete, timeout_ns_terminate),
3344 stage, nslcmop_id)
tiernoc6600ff2020-09-16 14:13:06 +00003345 tasks_dict_info.clear()
tierno588547c2020-07-01 15:30:20 +00003346 if error_list:
3347 return # raise LcmException("; ".join(error_list))
tierno82974b22018-11-27 21:55:36 +00003348
tiernoe876f672020-02-13 14:34:48 +00003349 # remove All execution environments at once
3350 stage[0] = "Stage 3/3 delete all."
quilesj3655ae02019-12-12 16:08:35 +00003351
tierno49676be2020-04-07 16:34:35 +00003352 if nsr_deployed.get("VCA"):
3353 stage[1] = "Deleting all execution environments."
3354 self.logger.debug(logging_text + stage[1])
3355 task_delete_ee = asyncio.ensure_future(asyncio.wait_for(self._delete_all_N2VC(db_nsr=db_nsr),
3356 timeout=self.timeout_charm_delete))
3357 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
3358 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
tierno59d22d22018-09-25 18:10:19 +02003359
tiernoe876f672020-02-13 14:34:48 +00003360 # Delete from k8scluster
3361 stage[1] = "Deleting KDUs."
3362 self.logger.debug(logging_text + stage[1])
3363 # print(nsr_deployed)
3364 for kdu in get_iterable(nsr_deployed, "K8s"):
3365 if not kdu or not kdu.get("kdu-instance"):
3366 continue
3367 kdu_instance = kdu.get("kdu-instance")
tiernoa2143262020-03-27 16:20:40 +00003368 if kdu.get("k8scluster-type") in self.k8scluster_map:
tiernoe876f672020-02-13 14:34:48 +00003369 task_delete_kdu_instance = asyncio.ensure_future(
tiernoa2143262020-03-27 16:20:40 +00003370 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
3371 cluster_uuid=kdu.get("k8scluster-uuid"),
3372 kdu_instance=kdu_instance))
tiernoe876f672020-02-13 14:34:48 +00003373 else:
3374 self.logger.error(logging_text + "Unknown k8s deployment type {}".
3375 format(kdu.get("k8scluster-type")))
3376 continue
3377 tasks_dict_info[task_delete_kdu_instance] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
tierno59d22d22018-09-25 18:10:19 +02003378
3379 # remove from RO
tiernoe876f672020-02-13 14:34:48 +00003380 stage[1] = "Deleting ns from VIM."
tierno69f0d382020-05-07 13:08:09 +00003381 if self.ng_ro:
3382 task_delete_ro = asyncio.ensure_future(
3383 self._terminate_ng_ro(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
3384 else:
3385 task_delete_ro = asyncio.ensure_future(
3386 self._terminate_RO(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
tiernoe876f672020-02-13 14:34:48 +00003387 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
tierno59d22d22018-09-25 18:10:19 +02003388
tiernoe876f672020-02-13 14:34:48 +00003389 # rest of staff will be done at finally
3390
3391 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
3392 self.logger.error(logging_text + "Exit Exception {}".format(e))
3393 exc = e
3394 except asyncio.CancelledError:
3395 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
3396 exc = "Operation was cancelled"
3397 except Exception as e:
3398 exc = traceback.format_exc()
3399 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
3400 finally:
3401 if exc:
3402 error_list.append(str(exc))
tierno59d22d22018-09-25 18:10:19 +02003403 try:
tiernoe876f672020-02-13 14:34:48 +00003404 # wait for pending tasks
3405 if tasks_dict_info:
3406 stage[1] = "Waiting for terminate pending tasks."
3407 self.logger.debug(logging_text + stage[1])
3408 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_terminate,
3409 stage, nslcmop_id)
3410 stage[1] = stage[2] = ""
3411 except asyncio.CancelledError:
3412 error_list.append("Cancelled")
3413 # TODO cancell all tasks
3414 except Exception as exc:
3415 error_list.append(str(exc))
3416 # update status at database
3417 if error_list:
3418 error_detail = "; ".join(error_list)
3419 # self.logger.error(logging_text + error_detail)
tierno50b41432020-08-11 11:20:13 +00003420 error_description_nslcmop = '{} Detail: {}'.format(stage[0], error_detail)
3421 error_description_nsr = 'Operation: TERMINATING.{}, {}.'.format(nslcmop_id, stage[0])
tierno59d22d22018-09-25 18:10:19 +02003422
tierno59d22d22018-09-25 18:10:19 +02003423 db_nsr_update["operational-status"] = "failed"
tiernoa2143262020-03-27 16:20:40 +00003424 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00003425 db_nslcmop_update["detailed-status"] = error_detail
3426 nslcmop_operation_state = "FAILED"
3427 ns_state = "BROKEN"
tierno59d22d22018-09-25 18:10:19 +02003428 else:
tiernoa2143262020-03-27 16:20:40 +00003429 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00003430 error_description_nsr = error_description_nslcmop = None
3431 ns_state = "NOT_INSTANTIATED"
tierno59d22d22018-09-25 18:10:19 +02003432 db_nsr_update["operational-status"] = "terminated"
3433 db_nsr_update["detailed-status"] = "Done"
3434 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
3435 db_nslcmop_update["detailed-status"] = "Done"
tiernoe876f672020-02-13 14:34:48 +00003436 nslcmop_operation_state = "COMPLETED"
tierno59d22d22018-09-25 18:10:19 +02003437
tiernoe876f672020-02-13 14:34:48 +00003438 if db_nsr:
3439 self._write_ns_status(
3440 nsr_id=nsr_id,
3441 ns_state=ns_state,
3442 current_operation="IDLE",
3443 current_operation_id=None,
3444 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00003445 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00003446 other_update=db_nsr_update
3447 )
tiernoa17d4f42020-04-28 09:59:23 +00003448 self._write_op_status(
3449 op_id=nslcmop_id,
3450 stage="",
3451 error_message=error_description_nslcmop,
3452 operation_state=nslcmop_operation_state,
3453 other_update=db_nslcmop_update,
3454 )
lloretgalleg32ead8c2020-07-22 10:13:46 +00003455 if ns_state == "NOT_INSTANTIATED":
3456 try:
3457 self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "NOT_INSTANTIATED"})
3458 except DbException as e:
3459 self.logger.warn(logging_text + 'Error writing VNFR status for nsr-id-ref: {} -> {}'.
3460 format(nsr_id, e))
tiernoa17d4f42020-04-28 09:59:23 +00003461 if operation_params:
tiernoe876f672020-02-13 14:34:48 +00003462 autoremove = operation_params.get("autoremove", False)
tierno59d22d22018-09-25 18:10:19 +02003463 if nslcmop_operation_state:
3464 try:
3465 await self.msg.aiowrite("ns", "terminated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tiernoc2564fe2019-01-28 16:18:56 +00003466 "operationState": nslcmop_operation_state,
3467 "autoremove": autoremove},
tierno8a518872018-12-21 13:42:14 +00003468 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003469 except Exception as e:
3470 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02003471
tierno59d22d22018-09-25 18:10:19 +02003472 self.logger.debug(logging_text + "Exit")
3473 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
3474
tiernoe876f672020-02-13 14:34:48 +00003475 async def _wait_for_tasks(self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None):
3476 time_start = time()
tiernoa2143262020-03-27 16:20:40 +00003477 error_detail_list = []
tiernoe876f672020-02-13 14:34:48 +00003478 error_list = []
3479 pending_tasks = list(created_tasks_info.keys())
3480 num_tasks = len(pending_tasks)
3481 num_done = 0
3482 stage[1] = "{}/{}.".format(num_done, num_tasks)
3483 self._write_op_status(nslcmop_id, stage)
tiernoe876f672020-02-13 14:34:48 +00003484 while pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00003485 new_error = None
tiernoe876f672020-02-13 14:34:48 +00003486 _timeout = timeout + time_start - time()
3487 done, pending_tasks = await asyncio.wait(pending_tasks, timeout=_timeout,
3488 return_when=asyncio.FIRST_COMPLETED)
3489 num_done += len(done)
3490 if not done: # Timeout
3491 for task in pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00003492 new_error = created_tasks_info[task] + ": Timeout"
3493 error_detail_list.append(new_error)
3494 error_list.append(new_error)
tiernoe876f672020-02-13 14:34:48 +00003495 break
3496 for task in done:
3497 if task.cancelled():
tierno067e04a2020-03-31 12:53:13 +00003498 exc = "Cancelled"
tiernoe876f672020-02-13 14:34:48 +00003499 else:
3500 exc = task.exception()
tierno067e04a2020-03-31 12:53:13 +00003501 if exc:
3502 if isinstance(exc, asyncio.TimeoutError):
3503 exc = "Timeout"
3504 new_error = created_tasks_info[task] + ": {}".format(exc)
3505 error_list.append(created_tasks_info[task])
3506 error_detail_list.append(new_error)
tierno28c63da2020-04-20 16:28:56 +00003507 if isinstance(exc, (str, DbException, N2VCException, ROclient.ROClientException, LcmException,
3508 K8sException)):
tierno067e04a2020-03-31 12:53:13 +00003509 self.logger.error(logging_text + new_error)
tiernoe876f672020-02-13 14:34:48 +00003510 else:
tierno067e04a2020-03-31 12:53:13 +00003511 exc_traceback = "".join(traceback.format_exception(None, exc, exc.__traceback__))
3512 self.logger.error(logging_text + created_tasks_info[task] + exc_traceback)
3513 else:
3514 self.logger.debug(logging_text + created_tasks_info[task] + ": Done")
tiernoe876f672020-02-13 14:34:48 +00003515 stage[1] = "{}/{}.".format(num_done, num_tasks)
3516 if new_error:
tiernoa2143262020-03-27 16:20:40 +00003517 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
tiernoe876f672020-02-13 14:34:48 +00003518 if nsr_id: # update also nsr
tiernoa2143262020-03-27 16:20:40 +00003519 self.update_db_2("nsrs", nsr_id, {"errorDescription": "Error at: " + ", ".join(error_list),
3520 "errorDetail": ". ".join(error_detail_list)})
tiernoe876f672020-02-13 14:34:48 +00003521 self._write_op_status(nslcmop_id, stage)
tiernoa2143262020-03-27 16:20:40 +00003522 return error_detail_list
tiernoe876f672020-02-13 14:34:48 +00003523
tiernoa0917882020-10-22 14:15:02 +00003524 @staticmethod
3525 def _map_primitive_params(primitive_desc, params, instantiation_params):
tiernoda964822019-01-14 15:53:47 +00003526 """
3527 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
3528 The default-value is used. If it is between < > it look for a value at instantiation_params
3529 :param primitive_desc: portion of VNFD/NSD that describes primitive
3530 :param params: Params provided by user
3531 :param instantiation_params: Instantiation params provided by user
3532 :return: a dictionary with the calculated params
3533 """
3534 calculated_params = {}
3535 for parameter in primitive_desc.get("parameter", ()):
3536 param_name = parameter["name"]
3537 if param_name in params:
3538 calculated_params[param_name] = params[param_name]
tierno98ad6ea2019-05-30 17:16:28 +00003539 elif "default-value" in parameter or "value" in parameter:
3540 if "value" in parameter:
3541 calculated_params[param_name] = parameter["value"]
3542 else:
3543 calculated_params[param_name] = parameter["default-value"]
3544 if isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("<") \
3545 and calculated_params[param_name].endswith(">"):
3546 if calculated_params[param_name][1:-1] in instantiation_params:
3547 calculated_params[param_name] = instantiation_params[calculated_params[param_name][1:-1]]
tiernoda964822019-01-14 15:53:47 +00003548 else:
3549 raise LcmException("Parameter {} needed to execute primitive {} not provided".
tiernod8323042019-08-09 11:32:23 +00003550 format(calculated_params[param_name], primitive_desc["name"]))
tiernoda964822019-01-14 15:53:47 +00003551 else:
3552 raise LcmException("Parameter {} needed to execute primitive {} not provided".
3553 format(param_name, primitive_desc["name"]))
tierno59d22d22018-09-25 18:10:19 +02003554
tiernoda964822019-01-14 15:53:47 +00003555 if isinstance(calculated_params[param_name], (dict, list, tuple)):
3556 calculated_params[param_name] = yaml.safe_dump(calculated_params[param_name], default_flow_style=True,
3557 width=256)
3558 elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "):
3559 calculated_params[param_name] = calculated_params[param_name][7:]
tierno97eb2362020-10-14 14:59:36 +00003560 if parameter.get("data-type") == "INTEGER":
3561 try:
3562 calculated_params[param_name] = int(calculated_params[param_name])
3563 except ValueError: # error converting string to int
3564 raise LcmException(
3565 "Parameter {} of primitive {} must be integer".format(param_name, primitive_desc["name"]))
3566 elif parameter.get("data-type") == "BOOLEAN":
3567 calculated_params[param_name] = not ((str(calculated_params[param_name])).lower() == 'false')
tiernoc3f2a822019-11-05 13:45:04 +00003568
3569 # add always ns_config_info if primitive name is config
3570 if primitive_desc["name"] == "config":
3571 if "ns_config_info" in instantiation_params:
3572 calculated_params["ns_config_info"] = instantiation_params["ns_config_info"]
tiernoda964822019-01-14 15:53:47 +00003573 return calculated_params
3574
tierno4fa7f8e2020-07-08 15:33:55 +00003575 def _look_for_deployed_vca(self, deployed_vca, member_vnf_index, vdu_id, vdu_count_index, kdu_name=None,
3576 ee_descriptor_id=None):
tiernoe876f672020-02-13 14:34:48 +00003577 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
3578 for vca in deployed_vca:
3579 if not vca:
3580 continue
3581 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
3582 continue
tiernoe876f672020-02-13 14:34:48 +00003583 if vdu_count_index is not None and vdu_count_index != vca["vdu_count_index"]:
3584 continue
3585 if kdu_name and kdu_name != vca["kdu_name"]:
3586 continue
tierno4fa7f8e2020-07-08 15:33:55 +00003587 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
3588 continue
tiernoe876f672020-02-13 14:34:48 +00003589 break
3590 else:
3591 # vca_deployed not found
tierno4fa7f8e2020-07-08 15:33:55 +00003592 raise LcmException("charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
3593 " is not deployed".format(member_vnf_index, vdu_id, vdu_count_index, kdu_name,
3594 ee_descriptor_id))
quilesj7e13aeb2019-10-08 13:34:55 +02003595
tiernoe876f672020-02-13 14:34:48 +00003596 # get ee_id
3597 ee_id = vca.get("ee_id")
tierno588547c2020-07-01 15:30:20 +00003598 vca_type = vca.get("type", "lxc_proxy_charm") # default value for backward compatibility - proxy charm
tiernoe876f672020-02-13 14:34:48 +00003599 if not ee_id:
tierno067e04a2020-03-31 12:53:13 +00003600 raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
tiernoe876f672020-02-13 14:34:48 +00003601 "execution environment"
tierno067e04a2020-03-31 12:53:13 +00003602 .format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
tierno588547c2020-07-01 15:30:20 +00003603 return ee_id, vca_type
tiernoe876f672020-02-13 14:34:48 +00003604
3605 async def _ns_execute_primitive(self, ee_id, primitive, primitive_params, retries=0,
tierno588547c2020-07-01 15:30:20 +00003606 retries_interval=30, timeout=None,
3607 vca_type=None, db_dict=None) -> (str, str):
tiernoda964822019-01-14 15:53:47 +00003608 try:
tierno98ad6ea2019-05-30 17:16:28 +00003609 if primitive == "config":
3610 primitive_params = {"params": primitive_params}
tierno2fc7ce52019-06-11 22:50:01 +00003611
tierno588547c2020-07-01 15:30:20 +00003612 vca_type = vca_type or "lxc_proxy_charm"
3613
quilesj7e13aeb2019-10-08 13:34:55 +02003614 while retries >= 0:
3615 try:
tierno067e04a2020-03-31 12:53:13 +00003616 output = await asyncio.wait_for(
tierno588547c2020-07-01 15:30:20 +00003617 self.vca_map[vca_type].exec_primitive(
tierno067e04a2020-03-31 12:53:13 +00003618 ee_id=ee_id,
3619 primitive_name=primitive,
3620 params_dict=primitive_params,
3621 progress_timeout=self.timeout_progress_primitive,
tierno588547c2020-07-01 15:30:20 +00003622 total_timeout=self.timeout_primitive,
3623 db_dict=db_dict),
tierno067e04a2020-03-31 12:53:13 +00003624 timeout=timeout or self.timeout_primitive)
quilesj7e13aeb2019-10-08 13:34:55 +02003625 # execution was OK
3626 break
tierno067e04a2020-03-31 12:53:13 +00003627 except asyncio.CancelledError:
3628 raise
3629 except Exception as e: # asyncio.TimeoutError
3630 if isinstance(e, asyncio.TimeoutError):
3631 e = "Timeout"
quilesj7e13aeb2019-10-08 13:34:55 +02003632 retries -= 1
3633 if retries >= 0:
tierno73d8bd02019-11-18 17:33:27 +00003634 self.logger.debug('Error executing action {} on {} -> {}'.format(primitive, ee_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +02003635 # wait and retry
3636 await asyncio.sleep(retries_interval, loop=self.loop)
tierno73d8bd02019-11-18 17:33:27 +00003637 else:
tierno067e04a2020-03-31 12:53:13 +00003638 return 'FAILED', str(e)
quilesj7e13aeb2019-10-08 13:34:55 +02003639
tiernoe876f672020-02-13 14:34:48 +00003640 return 'COMPLETED', output
quilesj7e13aeb2019-10-08 13:34:55 +02003641
tierno067e04a2020-03-31 12:53:13 +00003642 except (LcmException, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00003643 raise
quilesj7e13aeb2019-10-08 13:34:55 +02003644 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00003645 return 'FAIL', 'Error executing action {}: {}'.format(primitive, e)
tierno59d22d22018-09-25 18:10:19 +02003646
3647 async def action(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003648
3649 # Try to lock HA task here
3650 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3651 if not task_is_locked_by_me:
3652 return
3653
tierno59d22d22018-09-25 18:10:19 +02003654 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
3655 self.logger.debug(logging_text + "Enter")
3656 # get all needed from database
3657 db_nsr = None
3658 db_nslcmop = None
tiernoe876f672020-02-13 14:34:48 +00003659 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003660 db_nslcmop_update = {}
3661 nslcmop_operation_state = None
tierno067e04a2020-03-31 12:53:13 +00003662 error_description_nslcmop = None
tierno59d22d22018-09-25 18:10:19 +02003663 exc = None
3664 try:
kuused124bfe2019-06-18 12:09:24 +02003665 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003666 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003667 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
3668
quilesj4cda56b2019-12-05 10:02:20 +00003669 self._write_ns_status(
3670 nsr_id=nsr_id,
3671 ns_state=None,
3672 current_operation="RUNNING ACTION",
3673 current_operation_id=nslcmop_id
3674 )
3675
tierno59d22d22018-09-25 18:10:19 +02003676 step = "Getting information from database"
3677 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3678 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernoda964822019-01-14 15:53:47 +00003679
tiernoe4f7e6c2018-11-27 14:55:30 +00003680 nsr_deployed = db_nsr["_admin"].get("deployed")
tierno1b633412019-02-25 16:48:23 +00003681 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tierno59d22d22018-09-25 18:10:19 +02003682 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003683 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
tiernoe4f7e6c2018-11-27 14:55:30 +00003684 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
tierno067e04a2020-03-31 12:53:13 +00003685 primitive = db_nslcmop["operationParams"]["primitive"]
3686 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
3687 timeout_ns_action = db_nslcmop["operationParams"].get("timeout_ns_action", self.timeout_primitive)
tierno59d22d22018-09-25 18:10:19 +02003688
tierno1b633412019-02-25 16:48:23 +00003689 if vnf_index:
3690 step = "Getting vnfr from database"
3691 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3692 step = "Getting vnfd from database"
3693 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
3694 else:
tierno067e04a2020-03-31 12:53:13 +00003695 step = "Getting nsd from database"
3696 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
tiernoda964822019-01-14 15:53:47 +00003697
tierno82974b22018-11-27 21:55:36 +00003698 # for backward compatibility
3699 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3700 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3701 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3702 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3703
tiernoda964822019-01-14 15:53:47 +00003704 # look for primitive
tierno4fa7f8e2020-07-08 15:33:55 +00003705 config_primitive_desc = descriptor_configuration = None
tiernoda964822019-01-14 15:53:47 +00003706 if vdu_id:
3707 for vdu in get_iterable(db_vnfd, "vdu"):
3708 if vdu_id == vdu["id"]:
tierno4fa7f8e2020-07-08 15:33:55 +00003709 descriptor_configuration = vdu.get("vdu-configuration")
tierno067e04a2020-03-31 12:53:13 +00003710 break
calvinosanch9f9c6f22019-11-04 13:37:39 +01003711 elif kdu_name:
tierno067e04a2020-03-31 12:53:13 +00003712 for kdu in get_iterable(db_vnfd, "kdu"):
3713 if kdu_name == kdu["name"]:
tierno4fa7f8e2020-07-08 15:33:55 +00003714 descriptor_configuration = kdu.get("kdu-configuration")
tierno067e04a2020-03-31 12:53:13 +00003715 break
tierno1b633412019-02-25 16:48:23 +00003716 elif vnf_index:
tierno4fa7f8e2020-07-08 15:33:55 +00003717 descriptor_configuration = db_vnfd.get("vnf-configuration")
tierno1b633412019-02-25 16:48:23 +00003718 else:
tierno4fa7f8e2020-07-08 15:33:55 +00003719 descriptor_configuration = db_nsd.get("ns-configuration")
3720
3721 if descriptor_configuration and descriptor_configuration.get("config-primitive"):
3722 for config_primitive in descriptor_configuration["config-primitive"]:
tierno1b633412019-02-25 16:48:23 +00003723 if config_primitive["name"] == primitive:
3724 config_primitive_desc = config_primitive
3725 break
tiernoda964822019-01-14 15:53:47 +00003726
garciadeblas003ac802020-07-20 11:05:42 +00003727 if not config_primitive_desc:
3728 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
3729 raise LcmException("Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".
3730 format(primitive))
3731 primitive_name = primitive
3732 ee_descriptor_id = None
3733 else:
3734 primitive_name = config_primitive_desc.get("execution-environment-primitive", primitive)
3735 ee_descriptor_id = config_primitive_desc.get("execution-environment-ref")
tierno1b633412019-02-25 16:48:23 +00003736
tierno1b633412019-02-25 16:48:23 +00003737 if vnf_index:
tierno626e0152019-11-29 14:16:16 +00003738 if vdu_id:
3739 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
tierno067e04a2020-03-31 12:53:13 +00003740 desc_params = self._format_additional_params(vdur.get("additionalParams"))
3741 elif kdu_name:
3742 kdur = next((x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None)
3743 desc_params = self._format_additional_params(kdur.get("additionalParams"))
3744 else:
3745 desc_params = self._format_additional_params(db_vnfr.get("additionalParamsForVnf"))
tierno1b633412019-02-25 16:48:23 +00003746 else:
tierno067e04a2020-03-31 12:53:13 +00003747 desc_params = self._format_additional_params(db_nsr.get("additionalParamsForNs"))
tiernoda964822019-01-14 15:53:47 +00003748
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003749 if kdu_name:
3750 kdu_action = True if not deep_get(kdu, ("kdu-configuration", "juju")) else False
3751
tiernoda964822019-01-14 15:53:47 +00003752 # TODO check if ns is in a proper status
tierno4fa7f8e2020-07-08 15:33:55 +00003753 if kdu_name and (primitive_name in ("upgrade", "rollback", "status") or kdu_action):
tierno067e04a2020-03-31 12:53:13 +00003754 # kdur and desc_params already set from before
3755 if primitive_params:
3756 desc_params.update(primitive_params)
3757 # TODO Check if we will need something at vnf level
3758 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
3759 if kdu_name == kdu["kdu-name"] and kdu["member-vnf-index"] == vnf_index:
3760 break
3761 else:
3762 raise LcmException("KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index))
quilesj7e13aeb2019-10-08 13:34:55 +02003763
tierno067e04a2020-03-31 12:53:13 +00003764 if kdu.get("k8scluster-type") not in self.k8scluster_map:
3765 msg = "unknown k8scluster-type '{}'".format(kdu.get("k8scluster-type"))
3766 raise LcmException(msg)
3767
3768 db_dict = {"collection": "nsrs",
3769 "filter": {"_id": nsr_id},
3770 "path": "_admin.deployed.K8s.{}".format(index)}
tierno4fa7f8e2020-07-08 15:33:55 +00003771 self.logger.debug(logging_text + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name))
3772 step = "Executing kdu {}".format(primitive_name)
3773 if primitive_name == "upgrade":
tierno067e04a2020-03-31 12:53:13 +00003774 if desc_params.get("kdu_model"):
3775 kdu_model = desc_params.get("kdu_model")
3776 del desc_params["kdu_model"]
3777 else:
3778 kdu_model = kdu.get("kdu-model")
3779 parts = kdu_model.split(sep=":")
3780 if len(parts) == 2:
3781 kdu_model = parts[0]
3782
3783 detailed_status = await asyncio.wait_for(
3784 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
3785 cluster_uuid=kdu.get("k8scluster-uuid"),
3786 kdu_instance=kdu.get("kdu-instance"),
3787 atomic=True, kdu_model=kdu_model,
3788 params=desc_params, db_dict=db_dict,
3789 timeout=timeout_ns_action),
3790 timeout=timeout_ns_action + 10)
3791 self.logger.debug(logging_text + " Upgrade of kdu {} done".format(detailed_status))
tierno4fa7f8e2020-07-08 15:33:55 +00003792 elif primitive_name == "rollback":
tierno067e04a2020-03-31 12:53:13 +00003793 detailed_status = await asyncio.wait_for(
3794 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
3795 cluster_uuid=kdu.get("k8scluster-uuid"),
3796 kdu_instance=kdu.get("kdu-instance"),
3797 db_dict=db_dict),
3798 timeout=timeout_ns_action)
tierno4fa7f8e2020-07-08 15:33:55 +00003799 elif primitive_name == "status":
tierno067e04a2020-03-31 12:53:13 +00003800 detailed_status = await asyncio.wait_for(
3801 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
3802 cluster_uuid=kdu.get("k8scluster-uuid"),
3803 kdu_instance=kdu.get("kdu-instance")),
3804 timeout=timeout_ns_action)
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003805 else:
3806 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(kdu["kdu-name"], nsr_id)
3807 params = self._map_primitive_params(config_primitive_desc, primitive_params, desc_params)
3808
3809 detailed_status = await asyncio.wait_for(
3810 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
3811 cluster_uuid=kdu.get("k8scluster-uuid"),
3812 kdu_instance=kdu_instance,
tierno4fa7f8e2020-07-08 15:33:55 +00003813 primitive_name=primitive_name,
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003814 params=params, db_dict=db_dict,
3815 timeout=timeout_ns_action),
3816 timeout=timeout_ns_action)
tierno067e04a2020-03-31 12:53:13 +00003817
3818 if detailed_status:
3819 nslcmop_operation_state = 'COMPLETED'
3820 else:
3821 detailed_status = ''
3822 nslcmop_operation_state = 'FAILED'
tierno067e04a2020-03-31 12:53:13 +00003823 else:
tierno588547c2020-07-01 15:30:20 +00003824 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
3825 member_vnf_index=vnf_index,
3826 vdu_id=vdu_id,
tierno4fa7f8e2020-07-08 15:33:55 +00003827 vdu_count_index=vdu_count_index,
3828 ee_descriptor_id=ee_descriptor_id)
tierno588547c2020-07-01 15:30:20 +00003829 db_nslcmop_notif = {"collection": "nslcmops",
3830 "filter": {"_id": nslcmop_id},
3831 "path": "admin.VCA"}
tierno067e04a2020-03-31 12:53:13 +00003832 nslcmop_operation_state, detailed_status = await self._ns_execute_primitive(
tierno588547c2020-07-01 15:30:20 +00003833 ee_id,
tierno4fa7f8e2020-07-08 15:33:55 +00003834 primitive=primitive_name,
tierno067e04a2020-03-31 12:53:13 +00003835 primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params),
tierno588547c2020-07-01 15:30:20 +00003836 timeout=timeout_ns_action,
3837 vca_type=vca_type,
3838 db_dict=db_nslcmop_notif)
tierno067e04a2020-03-31 12:53:13 +00003839
3840 db_nslcmop_update["detailed-status"] = detailed_status
3841 error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else ""
3842 self.logger.debug(logging_text + " task Done with result {} {}".format(nslcmop_operation_state,
3843 detailed_status))
tierno59d22d22018-09-25 18:10:19 +02003844 return # database update is called inside finally
3845
tiernof59ad6c2020-04-08 12:50:52 +00003846 except (DbException, LcmException, N2VCException, K8sException) as e:
tierno59d22d22018-09-25 18:10:19 +02003847 self.logger.error(logging_text + "Exit Exception {}".format(e))
3848 exc = e
3849 except asyncio.CancelledError:
3850 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
3851 exc = "Operation was cancelled"
tierno067e04a2020-03-31 12:53:13 +00003852 except asyncio.TimeoutError:
3853 self.logger.error(logging_text + "Timeout while '{}'".format(step))
3854 exc = "Timeout"
tierno59d22d22018-09-25 18:10:19 +02003855 except Exception as e:
3856 exc = traceback.format_exc()
3857 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
3858 finally:
tierno067e04a2020-03-31 12:53:13 +00003859 if exc:
3860 db_nslcmop_update["detailed-status"] = detailed_status = error_description_nslcmop = \
kuuse0ca67472019-05-13 15:59:27 +02003861 "FAILED {}: {}".format(step, exc)
tierno067e04a2020-03-31 12:53:13 +00003862 nslcmop_operation_state = "FAILED"
3863 if db_nsr:
3864 self._write_ns_status(
3865 nsr_id=nsr_id,
3866 ns_state=db_nsr["nsState"], # TODO check if degraded. For the moment use previous status
3867 current_operation="IDLE",
3868 current_operation_id=None,
3869 # error_description=error_description_nsr,
3870 # error_detail=error_detail,
3871 other_update=db_nsr_update
3872 )
3873
tiernoa17d4f42020-04-28 09:59:23 +00003874 self._write_op_status(
3875 op_id=nslcmop_id,
3876 stage="",
3877 error_message=error_description_nslcmop,
3878 operation_state=nslcmop_operation_state,
3879 other_update=db_nslcmop_update,
3880 )
tierno067e04a2020-03-31 12:53:13 +00003881
tierno59d22d22018-09-25 18:10:19 +02003882 if nslcmop_operation_state:
3883 try:
3884 await self.msg.aiowrite("ns", "actioned", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00003885 "operationState": nslcmop_operation_state},
3886 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003887 except Exception as e:
3888 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
3889 self.logger.debug(logging_text + "Exit")
3890 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
tierno067e04a2020-03-31 12:53:13 +00003891 return nslcmop_operation_state, detailed_status
tierno59d22d22018-09-25 18:10:19 +02003892
3893 async def scale(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003894
3895 # Try to lock HA task here
3896 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3897 if not task_is_locked_by_me:
3898 return
3899
tierno59d22d22018-09-25 18:10:19 +02003900 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
3901 self.logger.debug(logging_text + "Enter")
3902 # get all needed from database
3903 db_nsr = None
3904 db_nslcmop = None
3905 db_nslcmop_update = {}
3906 nslcmop_operation_state = None
tiernoe876f672020-02-13 14:34:48 +00003907 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003908 exc = None
tierno9ab95942018-10-10 16:44:22 +02003909 # in case of error, indicates what part of scale was failed to put nsr at error status
3910 scale_process = None
tiernod6de1992018-10-11 13:05:52 +02003911 old_operational_status = ""
3912 old_config_status = ""
tierno59d22d22018-09-25 18:10:19 +02003913 try:
kuused124bfe2019-06-18 12:09:24 +02003914 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003915 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003916 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
tierno47e86b52018-10-10 14:05:55 +02003917
quilesj4cda56b2019-12-05 10:02:20 +00003918 self._write_ns_status(
3919 nsr_id=nsr_id,
3920 ns_state=None,
3921 current_operation="SCALING",
3922 current_operation_id=nslcmop_id
3923 )
3924
ikalyvas02d9e7b2019-05-27 18:16:01 +03003925 step = "Getting nslcmop from database"
ikalyvas02d9e7b2019-05-27 18:16:01 +03003926 self.logger.debug(step + " after having waited for previous tasks to be completed")
3927 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3928 step = "Getting nsr from database"
3929 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3930
3931 old_operational_status = db_nsr["operational-status"]
3932 old_config_status = db_nsr["config-status"]
tierno59d22d22018-09-25 18:10:19 +02003933 step = "Parsing scaling parameters"
tierno9babfda2019-06-07 12:36:50 +00003934 # self.logger.debug(step)
tierno59d22d22018-09-25 18:10:19 +02003935 db_nsr_update["operational-status"] = "scaling"
3936 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoe4f7e6c2018-11-27 14:55:30 +00003937 nsr_deployed = db_nsr["_admin"].get("deployed")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003938
3939 #######
3940 nsr_deployed = db_nsr["_admin"].get("deployed")
3941 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tiernoda6fb102019-11-23 00:36:52 +00003942 # vdu_id = db_nslcmop["operationParams"].get("vdu_id")
3943 # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
3944 # vdu_name = db_nslcmop["operationParams"].get("vdu_name")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003945 #######
3946
tiernoe4f7e6c2018-11-27 14:55:30 +00003947 RO_nsr_id = nsr_deployed["RO"]["nsr_id"]
tierno59d22d22018-09-25 18:10:19 +02003948 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["member-vnf-index"]
3949 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"]
3950 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
3951 # scaling_policy = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"].get("scaling-policy")
3952
tierno82974b22018-11-27 21:55:36 +00003953 # for backward compatibility
3954 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3955 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3956 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3957 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3958
tierno59d22d22018-09-25 18:10:19 +02003959 step = "Getting vnfr from database"
3960 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3961 step = "Getting vnfd from database"
3962 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
ikalyvas02d9e7b2019-05-27 18:16:01 +03003963
tierno59d22d22018-09-25 18:10:19 +02003964 step = "Getting scaling-group-descriptor"
3965 for scaling_descriptor in db_vnfd["scaling-group-descriptor"]:
3966 if scaling_descriptor["name"] == scaling_group:
3967 break
3968 else:
3969 raise LcmException("input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
3970 "at vnfd:scaling-group-descriptor".format(scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03003971
tierno59d22d22018-09-25 18:10:19 +02003972 # cooldown_time = 0
3973 # for scaling_policy_descriptor in scaling_descriptor.get("scaling-policy", ()):
3974 # cooldown_time = scaling_policy_descriptor.get("cooldown-time", 0)
3975 # if scaling_policy and scaling_policy == scaling_policy_descriptor.get("name"):
3976 # break
3977
3978 # TODO check if ns is in a proper status
tierno15b1cf12019-08-29 13:21:40 +00003979 step = "Sending scale order to VIM"
tierno59d22d22018-09-25 18:10:19 +02003980 nb_scale_op = 0
3981 if not db_nsr["_admin"].get("scaling-group"):
3982 self.update_db_2("nsrs", nsr_id, {"_admin.scaling-group": [{"name": scaling_group, "nb-scale-op": 0}]})
3983 admin_scale_index = 0
3984 else:
3985 for admin_scale_index, admin_scale_info in enumerate(db_nsr["_admin"]["scaling-group"]):
3986 if admin_scale_info["name"] == scaling_group:
3987 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
3988 break
tierno9ab95942018-10-10 16:44:22 +02003989 else: # not found, set index one plus last element and add new entry with the name
3990 admin_scale_index += 1
3991 db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group
tierno59d22d22018-09-25 18:10:19 +02003992 RO_scaling_info = []
3993 vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []}
3994 if scaling_type == "SCALE_OUT":
3995 # count if max-instance-count is reached
kuuse818d70c2019-08-07 14:43:44 +02003996 max_instance_count = scaling_descriptor.get("max-instance-count", 10)
3997 # self.logger.debug("MAX_INSTANCE_COUNT is {}".format(max_instance_count))
3998 if nb_scale_op >= max_instance_count:
3999 raise LcmException("reached the limit of {} (max-instance-count) "
4000 "scaling-out operations for the "
4001 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
kuuse8b998e42019-07-30 15:22:16 +02004002
ikalyvas02d9e7b2019-05-27 18:16:01 +03004003 nb_scale_op += 1
tierno59d22d22018-09-25 18:10:19 +02004004 vdu_scaling_info["scaling_direction"] = "OUT"
4005 vdu_scaling_info["vdu-create"] = {}
4006 for vdu_scale_info in scaling_descriptor["vdu"]:
tiernof7b42112020-10-06 08:22:07 +00004007 vdud = next(vdu for vdu in db_vnfd.get("vdu") if vdu["id"] == vdu_scale_info["vdu-id-ref"])
4008 vdu_index = len([x for x in db_vnfr.get("vdur", ())
4009 if x.get("vdu-id-ref") == vdu_scale_info["vdu-id-ref"] and
4010 x.get("member-vnf-index-ref") == vnf_index])
4011 cloud_init_text = self._get_cloud_init(vdud, db_vnfd)
4012 if cloud_init_text:
4013 additional_params = self._get_vdu_additional_params(db_vnfr, vdud["id"]) or {}
4014 cloud_init_list = []
4015 for x in range(vdu_scale_info.get("count", 1)):
4016 if cloud_init_text:
4017 # TODO Information of its own ip is not available because db_vnfr is not updated.
4018 additional_params["OSM"] = self._get_osm_params(db_vnfr, vdu_scale_info["vdu-id-ref"],
4019 vdu_index + x)
4020 cloud_init_list.append(self._parse_cloud_init(cloud_init_text, additional_params,
4021 db_vnfd["id"], vdud["id"]))
tierno59d22d22018-09-25 18:10:19 +02004022 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
4023 "type": "create", "count": vdu_scale_info.get("count", 1)})
tiernof7b42112020-10-06 08:22:07 +00004024 if cloud_init_list:
4025 RO_scaling_info[-1]["cloud_init"] = cloud_init_list
tierno59d22d22018-09-25 18:10:19 +02004026 vdu_scaling_info["vdu-create"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
ikalyvas02d9e7b2019-05-27 18:16:01 +03004027
tierno59d22d22018-09-25 18:10:19 +02004028 elif scaling_type == "SCALE_IN":
4029 # count if min-instance-count is reached
tierno27246d82018-09-27 15:59:09 +02004030 min_instance_count = 0
tierno59d22d22018-09-25 18:10:19 +02004031 if "min-instance-count" in scaling_descriptor and scaling_descriptor["min-instance-count"] is not None:
4032 min_instance_count = int(scaling_descriptor["min-instance-count"])
tierno9babfda2019-06-07 12:36:50 +00004033 if nb_scale_op <= min_instance_count:
4034 raise LcmException("reached the limit of {} (min-instance-count) scaling-in operations for the "
4035 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03004036 nb_scale_op -= 1
tierno59d22d22018-09-25 18:10:19 +02004037 vdu_scaling_info["scaling_direction"] = "IN"
4038 vdu_scaling_info["vdu-delete"] = {}
4039 for vdu_scale_info in scaling_descriptor["vdu"]:
4040 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
4041 "type": "delete", "count": vdu_scale_info.get("count", 1)})
4042 vdu_scaling_info["vdu-delete"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
4043
4044 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
tierno27246d82018-09-27 15:59:09 +02004045 vdu_create = vdu_scaling_info.get("vdu-create")
4046 vdu_delete = copy(vdu_scaling_info.get("vdu-delete"))
tierno59d22d22018-09-25 18:10:19 +02004047 if vdu_scaling_info["scaling_direction"] == "IN":
4048 for vdur in reversed(db_vnfr["vdur"]):
tierno27246d82018-09-27 15:59:09 +02004049 if vdu_delete.get(vdur["vdu-id-ref"]):
4050 vdu_delete[vdur["vdu-id-ref"]] -= 1
tierno59d22d22018-09-25 18:10:19 +02004051 vdu_scaling_info["vdu"].append({
4052 "name": vdur["name"],
4053 "vdu_id": vdur["vdu-id-ref"],
4054 "interface": []
4055 })
4056 for interface in vdur["interfaces"]:
4057 vdu_scaling_info["vdu"][-1]["interface"].append({
4058 "name": interface["name"],
4059 "ip_address": interface["ip-address"],
4060 "mac_address": interface.get("mac-address"),
4061 })
tierno27246d82018-09-27 15:59:09 +02004062 vdu_delete = vdu_scaling_info.pop("vdu-delete")
tierno59d22d22018-09-25 18:10:19 +02004063
kuuseac3a8882019-10-03 10:48:06 +02004064 # PRE-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02004065 step = "Executing pre-scale vnf-config-primitive"
4066 if scaling_descriptor.get("scaling-config-action"):
4067 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02004068 if (scaling_config_action.get("trigger") == "pre-scale-in" and scaling_type == "SCALE_IN") \
4069 or (scaling_config_action.get("trigger") == "pre-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02004070 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
4071 step = db_nslcmop_update["detailed-status"] = \
4072 "executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00004073
tierno59d22d22018-09-25 18:10:19 +02004074 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02004075 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
4076 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02004077 break
4078 else:
4079 raise LcmException(
4080 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
tiernoda964822019-01-14 15:53:47 +00004081 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
tierno4fa7f8e2020-07-08 15:33:55 +00004082 "primitive".format(scaling_group, vnf_config_primitive))
tiernoda964822019-01-14 15:53:47 +00004083
tierno16fedf52019-05-24 08:38:26 +00004084 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00004085 if db_vnfr.get("additionalParamsForVnf"):
4086 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
quilesj7e13aeb2019-10-08 13:34:55 +02004087
tierno9ab95942018-10-10 16:44:22 +02004088 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02004089 db_nsr_update["config-status"] = "configuring pre-scaling"
kuuseac3a8882019-10-03 10:48:06 +02004090 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
4091
tierno7c4e24c2020-05-13 08:41:35 +00004092 # Pre-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02004093 op_index = self._check_or_add_scale_suboperation(
4094 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'PRE-SCALE')
tierno7c4e24c2020-05-13 08:41:35 +00004095 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02004096 # Skip sub-operation
4097 result = 'COMPLETED'
4098 result_detail = 'Done'
4099 self.logger.debug(logging_text +
4100 "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
4101 vnf_config_primitive, result, result_detail))
4102 else:
tierno7c4e24c2020-05-13 08:41:35 +00004103 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02004104 # New sub-operation: Get index of this sub-operation
4105 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4106 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
4107 format(vnf_config_primitive))
4108 else:
tierno7c4e24c2020-05-13 08:41:35 +00004109 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02004110 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4111 vnf_index = op.get('member_vnf_index')
4112 vnf_config_primitive = op.get('primitive')
4113 primitive_params = op.get('primitive_params')
tierno7c4e24c2020-05-13 08:41:35 +00004114 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
kuuseac3a8882019-10-03 10:48:06 +02004115 format(vnf_config_primitive))
tierno588547c2020-07-01 15:30:20 +00004116 # Execute the primitive, either with new (first-time) or registered (reintent) args
tierno4fa7f8e2020-07-08 15:33:55 +00004117 ee_descriptor_id = config_primitive.get("execution-environment-ref")
4118 primitive_name = config_primitive.get("execution-environment-primitive",
4119 vnf_config_primitive)
tierno588547c2020-07-01 15:30:20 +00004120 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
4121 member_vnf_index=vnf_index,
4122 vdu_id=None,
tierno4fa7f8e2020-07-08 15:33:55 +00004123 vdu_count_index=None,
4124 ee_descriptor_id=ee_descriptor_id)
kuuseac3a8882019-10-03 10:48:06 +02004125 result, result_detail = await self._ns_execute_primitive(
tierno4fa7f8e2020-07-08 15:33:55 +00004126 ee_id, primitive_name, primitive_params, vca_type)
kuuseac3a8882019-10-03 10:48:06 +02004127 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
4128 vnf_config_primitive, result, result_detail))
4129 # Update operationState = COMPLETED | FAILED
4130 self._update_suboperation_status(
4131 db_nslcmop, op_index, result, result_detail)
4132
tierno59d22d22018-09-25 18:10:19 +02004133 if result == "FAILED":
4134 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02004135 db_nsr_update["config-status"] = old_config_status
4136 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02004137 # PRE-SCALE END
tierno59d22d22018-09-25 18:10:19 +02004138
kuuseac3a8882019-10-03 10:48:06 +02004139 # SCALE RO - BEGIN
4140 # Should this block be skipped if 'RO_nsr_id' == None ?
4141 # if (RO_nsr_id and RO_scaling_info):
tierno59d22d22018-09-25 18:10:19 +02004142 if RO_scaling_info:
tierno9ab95942018-10-10 16:44:22 +02004143 scale_process = "RO"
tierno7c4e24c2020-05-13 08:41:35 +00004144 # Scale RO retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02004145 op_index = self._check_or_add_scale_suboperation(
4146 db_nslcmop, vnf_index, None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info)
tierno7c4e24c2020-05-13 08:41:35 +00004147 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02004148 # Skip sub-operation
4149 result = 'COMPLETED'
4150 result_detail = 'Done'
4151 self.logger.debug(logging_text + "Skipped sub-operation RO, result {} {}".format(
4152 result, result_detail))
4153 else:
tierno7c4e24c2020-05-13 08:41:35 +00004154 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02004155 # New sub-operation: Get index of this sub-operation
4156 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4157 self.logger.debug(logging_text + "New sub-operation RO")
tierno59d22d22018-09-25 18:10:19 +02004158 else:
tierno7c4e24c2020-05-13 08:41:35 +00004159 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02004160 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4161 RO_nsr_id = op.get('RO_nsr_id')
4162 RO_scaling_info = op.get('RO_scaling_info')
tierno7c4e24c2020-05-13 08:41:35 +00004163 self.logger.debug(logging_text + "Sub-operation RO retry for primitive {}".format(
kuuseac3a8882019-10-03 10:48:06 +02004164 vnf_config_primitive))
4165
4166 RO_desc = await self.RO.create_action("ns", RO_nsr_id, {"vdu-scaling": RO_scaling_info})
4167 db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op
4168 db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time()
4169 # wait until ready
4170 RO_nslcmop_id = RO_desc["instance_action_id"]
4171 db_nslcmop_update["_admin.deploy.RO"] = RO_nslcmop_id
4172
4173 RO_task_done = False
tiernodf24ef82020-09-25 12:33:15 +00004174 step = detailed_status = "Waiting for VIM to scale. RO_task_id={}.".format(RO_nslcmop_id)
kuuseac3a8882019-10-03 10:48:06 +02004175 detailed_status_old = None
4176 self.logger.debug(logging_text + step)
4177
4178 deployment_timeout = 1 * 3600 # One hour
4179 while deployment_timeout > 0:
4180 if not RO_task_done:
4181 desc = await self.RO.show("ns", item_id_name=RO_nsr_id, extra_item="action",
4182 extra_item_id=RO_nslcmop_id)
quilesj3655ae02019-12-12 16:08:35 +00004183
4184 # deploymentStatus
4185 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4186
kuuseac3a8882019-10-03 10:48:06 +02004187 ns_status, ns_status_info = self.RO.check_action_status(desc)
4188 if ns_status == "ERROR":
4189 raise ROclient.ROClientException(ns_status_info)
4190 elif ns_status == "BUILD":
4191 detailed_status = step + "; {}".format(ns_status_info)
4192 elif ns_status == "ACTIVE":
4193 RO_task_done = True
tiernodf24ef82020-09-25 12:33:15 +00004194 self.scale_vnfr(db_vnfr, vdu_create=vdu_create, vdu_delete=vdu_delete)
kuuseac3a8882019-10-03 10:48:06 +02004195 step = detailed_status = "Waiting ns ready at RO. RO_id={}".format(RO_nsr_id)
4196 self.logger.debug(logging_text + step)
4197 else:
4198 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
tierno59d22d22018-09-25 18:10:19 +02004199 else:
tiernodf24ef82020-09-25 12:33:15 +00004200 desc = await self.RO.show("ns", RO_nsr_id)
4201 ns_status, ns_status_info = self.RO.check_ns_status(desc)
4202 # deploymentStatus
4203 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
quilesj7e13aeb2019-10-08 13:34:55 +02004204
kuuseac3a8882019-10-03 10:48:06 +02004205 if ns_status == "ERROR":
4206 raise ROclient.ROClientException(ns_status_info)
4207 elif ns_status == "BUILD":
4208 detailed_status = step + "; {}".format(ns_status_info)
4209 elif ns_status == "ACTIVE":
4210 step = detailed_status = \
4211 "Waiting for management IP address reported by the VIM. Updating VNFRs"
kuuseac3a8882019-10-03 10:48:06 +02004212 try:
kuuseac3a8882019-10-03 10:48:06 +02004213 # nsr_deployed["nsr_ip"] = RO.get_ns_vnf_info(desc)
4214 self.ns_update_vnfr({db_vnfr["member-vnf-index-ref"]: db_vnfr}, desc)
4215 break
4216 except LcmExceptionNoMgmtIP:
4217 pass
4218 else:
4219 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
4220 if detailed_status != detailed_status_old:
4221 self._update_suboperation_status(
4222 db_nslcmop, op_index, 'COMPLETED', detailed_status)
4223 detailed_status_old = db_nslcmop_update["detailed-status"] = detailed_status
4224 self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
tierno59d22d22018-09-25 18:10:19 +02004225
kuuseac3a8882019-10-03 10:48:06 +02004226 await asyncio.sleep(5, loop=self.loop)
4227 deployment_timeout -= 5
4228 if deployment_timeout <= 0:
4229 self._update_suboperation_status(
4230 db_nslcmop, nslcmop_id, op_index, 'FAILED', "Timeout when waiting for ns to get ready")
4231 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tierno59d22d22018-09-25 18:10:19 +02004232
kuuseac3a8882019-10-03 10:48:06 +02004233 # update VDU_SCALING_INFO with the obtained ip_addresses
4234 if vdu_scaling_info["scaling_direction"] == "OUT":
4235 for vdur in reversed(db_vnfr["vdur"]):
4236 if vdu_scaling_info["vdu-create"].get(vdur["vdu-id-ref"]):
4237 vdu_scaling_info["vdu-create"][vdur["vdu-id-ref"]] -= 1
4238 vdu_scaling_info["vdu"].append({
4239 "name": vdur["name"],
4240 "vdu_id": vdur["vdu-id-ref"],
4241 "interface": []
tierno59d22d22018-09-25 18:10:19 +02004242 })
kuuseac3a8882019-10-03 10:48:06 +02004243 for interface in vdur["interfaces"]:
4244 vdu_scaling_info["vdu"][-1]["interface"].append({
4245 "name": interface["name"],
4246 "ip_address": interface["ip-address"],
4247 "mac_address": interface.get("mac-address"),
4248 })
4249 del vdu_scaling_info["vdu-create"]
4250
4251 self._update_suboperation_status(db_nslcmop, op_index, 'COMPLETED', 'Done')
4252 # SCALE RO - END
tierno59d22d22018-09-25 18:10:19 +02004253
tierno9ab95942018-10-10 16:44:22 +02004254 scale_process = None
tierno59d22d22018-09-25 18:10:19 +02004255 if db_nsr_update:
4256 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4257
kuuseac3a8882019-10-03 10:48:06 +02004258 # POST-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02004259 # execute primitive service POST-SCALING
4260 step = "Executing post-scale vnf-config-primitive"
4261 if scaling_descriptor.get("scaling-config-action"):
4262 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02004263 if (scaling_config_action.get("trigger") == "post-scale-in" and scaling_type == "SCALE_IN") \
4264 or (scaling_config_action.get("trigger") == "post-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02004265 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
4266 step = db_nslcmop_update["detailed-status"] = \
4267 "executing post-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00004268
tierno589befb2019-05-29 07:06:23 +00004269 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00004270 if db_vnfr.get("additionalParamsForVnf"):
4271 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
4272
tierno59d22d22018-09-25 18:10:19 +02004273 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02004274 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
4275 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02004276 break
4277 else:
tierno4fa7f8e2020-07-08 15:33:55 +00004278 raise LcmException(
4279 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
4280 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
4281 "config-primitive".format(scaling_group, vnf_config_primitive))
tierno9ab95942018-10-10 16:44:22 +02004282 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02004283 db_nsr_update["config-status"] = "configuring post-scaling"
kuuseac3a8882019-10-03 10:48:06 +02004284 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
tiernod6de1992018-10-11 13:05:52 +02004285
tierno7c4e24c2020-05-13 08:41:35 +00004286 # Post-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02004287 op_index = self._check_or_add_scale_suboperation(
4288 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'POST-SCALE')
quilesj4cda56b2019-12-05 10:02:20 +00004289 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02004290 # Skip sub-operation
4291 result = 'COMPLETED'
4292 result_detail = 'Done'
4293 self.logger.debug(logging_text +
4294 "vnf_config_primitive={} Skipped sub-operation, result {} {}".
4295 format(vnf_config_primitive, result, result_detail))
4296 else:
quilesj4cda56b2019-12-05 10:02:20 +00004297 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02004298 # New sub-operation: Get index of this sub-operation
4299 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4300 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
4301 format(vnf_config_primitive))
4302 else:
tierno7c4e24c2020-05-13 08:41:35 +00004303 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02004304 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4305 vnf_index = op.get('member_vnf_index')
4306 vnf_config_primitive = op.get('primitive')
4307 primitive_params = op.get('primitive_params')
tierno7c4e24c2020-05-13 08:41:35 +00004308 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
kuuseac3a8882019-10-03 10:48:06 +02004309 format(vnf_config_primitive))
tierno588547c2020-07-01 15:30:20 +00004310 # Execute the primitive, either with new (first-time) or registered (reintent) args
tierno4fa7f8e2020-07-08 15:33:55 +00004311 ee_descriptor_id = config_primitive.get("execution-environment-ref")
4312 primitive_name = config_primitive.get("execution-environment-primitive",
4313 vnf_config_primitive)
tierno588547c2020-07-01 15:30:20 +00004314 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
4315 member_vnf_index=vnf_index,
4316 vdu_id=None,
tierno4fa7f8e2020-07-08 15:33:55 +00004317 vdu_count_index=None,
4318 ee_descriptor_id=ee_descriptor_id)
kuuseac3a8882019-10-03 10:48:06 +02004319 result, result_detail = await self._ns_execute_primitive(
tierno4fa7f8e2020-07-08 15:33:55 +00004320 ee_id, primitive_name, primitive_params, vca_type)
kuuseac3a8882019-10-03 10:48:06 +02004321 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
4322 vnf_config_primitive, result, result_detail))
4323 # Update operationState = COMPLETED | FAILED
4324 self._update_suboperation_status(
4325 db_nslcmop, op_index, result, result_detail)
4326
tierno59d22d22018-09-25 18:10:19 +02004327 if result == "FAILED":
4328 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02004329 db_nsr_update["config-status"] = old_config_status
4330 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02004331 # POST-SCALE END
tierno59d22d22018-09-25 18:10:19 +02004332
tiernod6de1992018-10-11 13:05:52 +02004333 db_nsr_update["detailed-status"] = "" # "scaled {} {}".format(scaling_group, scaling_type)
ikalyvas02d9e7b2019-05-27 18:16:01 +03004334 db_nsr_update["operational-status"] = "running" if old_operational_status == "failed" \
4335 else old_operational_status
tiernod6de1992018-10-11 13:05:52 +02004336 db_nsr_update["config-status"] = old_config_status
tierno59d22d22018-09-25 18:10:19 +02004337 return
4338 except (ROclient.ROClientException, DbException, LcmException) as e:
4339 self.logger.error(logging_text + "Exit Exception {}".format(e))
4340 exc = e
4341 except asyncio.CancelledError:
4342 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
4343 exc = "Operation was cancelled"
4344 except Exception as e:
4345 exc = traceback.format_exc()
4346 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
4347 finally:
quilesj3655ae02019-12-12 16:08:35 +00004348 self._write_ns_status(
4349 nsr_id=nsr_id,
4350 ns_state=None,
4351 current_operation="IDLE",
4352 current_operation_id=None
4353 )
tierno59d22d22018-09-25 18:10:19 +02004354 if exc:
tiernoa17d4f42020-04-28 09:59:23 +00004355 db_nslcmop_update["detailed-status"] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4356 nslcmop_operation_state = "FAILED"
tierno59d22d22018-09-25 18:10:19 +02004357 if db_nsr:
tiernod6de1992018-10-11 13:05:52 +02004358 db_nsr_update["operational-status"] = old_operational_status
4359 db_nsr_update["config-status"] = old_config_status
4360 db_nsr_update["detailed-status"] = ""
4361 if scale_process:
4362 if "VCA" in scale_process:
4363 db_nsr_update["config-status"] = "failed"
4364 if "RO" in scale_process:
4365 db_nsr_update["operational-status"] = "failed"
4366 db_nsr_update["detailed-status"] = "FAILED scaling nslcmop={} {}: {}".format(nslcmop_id, step,
4367 exc)
tiernoa17d4f42020-04-28 09:59:23 +00004368 else:
4369 error_description_nslcmop = None
4370 nslcmop_operation_state = "COMPLETED"
4371 db_nslcmop_update["detailed-status"] = "Done"
quilesj4cda56b2019-12-05 10:02:20 +00004372
tiernoa17d4f42020-04-28 09:59:23 +00004373 self._write_op_status(
4374 op_id=nslcmop_id,
4375 stage="",
4376 error_message=error_description_nslcmop,
4377 operation_state=nslcmop_operation_state,
4378 other_update=db_nslcmop_update,
4379 )
4380 if db_nsr:
4381 self._write_ns_status(
4382 nsr_id=nsr_id,
4383 ns_state=None,
4384 current_operation="IDLE",
4385 current_operation_id=None,
4386 other_update=db_nsr_update
4387 )
4388
tierno59d22d22018-09-25 18:10:19 +02004389 if nslcmop_operation_state:
4390 try:
4391 await self.msg.aiowrite("ns", "scaled", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00004392 "operationState": nslcmop_operation_state},
4393 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004394 # if cooldown_time:
tiernod8323042019-08-09 11:32:23 +00004395 # await asyncio.sleep(cooldown_time, loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004396 # await self.msg.aiowrite("ns","scaled-cooldown-time", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id})
4397 except Exception as e:
4398 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
4399 self.logger.debug(logging_text + "Exit")
4400 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
tierno89f82902020-07-03 14:52:28 +00004401
4402 async def add_prometheus_metrics(self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip):
4403 if not self.prometheus:
4404 return
4405 # look if exist a file called 'prometheus*.j2' and
4406 artifact_content = self.fs.dir_ls(artifact_path)
4407 job_file = next((f for f in artifact_content if f.startswith("prometheus") and f.endswith(".j2")), None)
4408 if not job_file:
4409 return
4410 with self.fs.file_open((artifact_path, job_file), "r") as f:
4411 job_data = f.read()
4412
4413 # TODO get_service
4414 _, _, service = ee_id.partition(".") # remove prefix "namespace."
4415 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
4416 host_port = "80"
4417 vnfr_id = vnfr_id.replace("-", "")
4418 variables = {
4419 "JOB_NAME": vnfr_id,
4420 "TARGET_IP": target_ip,
4421 "EXPORTER_POD_IP": host_name,
4422 "EXPORTER_POD_PORT": host_port,
4423 }
4424 job_list = self.prometheus.parse_job(job_data, variables)
4425 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
4426 for job in job_list:
4427 if not isinstance(job.get("job_name"), str) or vnfr_id not in job["job_name"]:
4428 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
4429 job["nsr_id"] = nsr_id
4430 job_dict = {jl["job_name"]: jl for jl in job_list}
4431 if await self.prometheus.update(job_dict):
4432 return list(job_dict.keys())