blob: 9267b45e0e521aa870754e9e2c76775ba49d264a [file] [log] [blame]
tierno59d22d22018-09-25 18:10:19 +02001# -*- coding: utf-8 -*-
2
tierno2e215512018-11-28 09:37:52 +00003##
4# Copyright 2018 Telefonica S.A.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17##
18
tierno59d22d22018-09-25 18:10:19 +020019import asyncio
20import yaml
21import logging
22import logging.handlers
tierno59d22d22018-09-25 18:10:19 +020023import traceback
David Garciad4816682019-12-09 14:57:43 +010024import json
gcalvino35be9152018-12-20 09:33:12 +010025from jinja2 import Environment, Template, meta, TemplateError, TemplateNotFound, TemplateSyntaxError
tierno59d22d22018-09-25 18:10:19 +020026
tierno77677d92019-08-22 13:46:35 +000027from osm_lcm import ROclient
tierno69f0d382020-05-07 13:08:09 +000028from osm_lcm.ng_ro import NgRoClient, NgRoException
tierno744303e2020-01-13 16:46:31 +000029from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict
calvinosanch9f9c6f22019-11-04 13:37:39 +010030from n2vc.k8s_helm_conn import K8sHelmConnector
Adam Israelbaacc302019-12-01 12:41:39 -050031from n2vc.k8s_juju_conn import K8sJujuConnector
tierno59d22d22018-09-25 18:10:19 +020032
tierno27246d82018-09-27 15:59:09 +020033from osm_common.dbbase import DbException
tierno59d22d22018-09-25 18:10:19 +020034from osm_common.fsbase import FsException
quilesj7e13aeb2019-10-08 13:34:55 +020035
36from n2vc.n2vc_juju_conn import N2VCJujuConnector
tiernof59ad6c2020-04-08 12:50:52 +000037from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
tierno59d22d22018-09-25 18:10:19 +020038
tierno588547c2020-07-01 15:30:20 +000039from osm_lcm.lcm_helm_conn import LCMHelmConn
40
tierno27246d82018-09-27 15:59:09 +020041from copy import copy, deepcopy
tierno59d22d22018-09-25 18:10:19 +020042from http import HTTPStatus
43from time import time
tierno27246d82018-09-27 15:59:09 +020044from uuid import uuid4
lloretgalleg7c121132020-07-08 07:53:22 +000045
tiernob996d942020-07-03 14:52:28 +000046from random import randint
tierno59d22d22018-09-25 18:10:19 +020047
tierno69f0d382020-05-07 13:08:09 +000048__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
tierno59d22d22018-09-25 18:10:19 +020049
50
tierno588547c2020-07-01 15:30:20 +000051class N2VCJujuConnectorLCM(N2VCJujuConnector):
52
53 async def create_execution_environment(self, namespace: str, db_dict: dict, reuse_ee_id: str = None,
54 progress_timeout: float = None, total_timeout: float = None,
tiernob996d942020-07-03 14:52:28 +000055 config: dict = None, artifact_path: str = None,
56 vca_type: str = None) -> (str, dict):
tierno588547c2020-07-01 15:30:20 +000057 # admit two new parameters, artifact_path and vca_type
58 if vca_type == "k8s_proxy_charm":
David Garciaa27e20a2020-07-10 13:12:44 +020059 ee_id = await self.install_k8s_proxy_charm(
tierno588547c2020-07-01 15:30:20 +000060 charm_name=artifact_path[artifact_path.rfind("/") + 1:],
61 namespace=namespace,
62 artifact_path=artifact_path,
63 db_dict=db_dict)
64 return ee_id, None
65 else:
66 return await super().create_execution_environment(
67 namespace=namespace, db_dict=db_dict, reuse_ee_id=reuse_ee_id,
68 progress_timeout=progress_timeout, total_timeout=total_timeout)
69
70 async def install_configuration_sw(self, ee_id: str, artifact_path: str, db_dict: dict,
71 progress_timeout: float = None, total_timeout: float = None,
72 config: dict = None, num_units: int = 1, vca_type: str = "lxc_proxy_charm"):
73 if vca_type == "k8s_proxy_charm":
74 return
75 return await super().install_configuration_sw(
76 ee_id=ee_id, artifact_path=artifact_path, db_dict=db_dict, progress_timeout=progress_timeout,
77 total_timeout=total_timeout, config=config, num_units=num_units)
78
79
tierno59d22d22018-09-25 18:10:19 +020080class NsLcm(LcmBase):
tierno63de62e2018-10-31 16:38:52 +010081 timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed
tierno744303e2020-01-13 16:46:31 +000082 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
tiernoe876f672020-02-13 14:34:48 +000083 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
garciadeblasf9b04952019-04-09 18:53:58 +020084 timeout_charm_delete = 10 * 60
David Garciaf6919842020-05-21 16:41:07 +020085 timeout_primitive = 30 * 60 # timeout for primitive execution
86 timeout_progress_primitive = 10 * 60 # timeout for some progress in a primitive execution
tierno59d22d22018-09-25 18:10:19 +020087
kuuseac3a8882019-10-03 10:48:06 +020088 SUBOPERATION_STATUS_NOT_FOUND = -1
89 SUBOPERATION_STATUS_NEW = -2
90 SUBOPERATION_STATUS_SKIP = -3
tiernoa2143262020-03-27 16:20:40 +000091 task_name_deploy_vca = "Deploying VCA"
kuuseac3a8882019-10-03 10:48:06 +020092
tiernob996d942020-07-03 14:52:28 +000093 def __init__(self, db, msg, fs, lcm_tasks, config, loop, prometheus=None):
tierno59d22d22018-09-25 18:10:19 +020094 """
95 Init, Connect to database, filesystem storage, and messaging
96 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
97 :return: None
98 """
quilesj7e13aeb2019-10-08 13:34:55 +020099 super().__init__(
100 db=db,
101 msg=msg,
102 fs=fs,
103 logger=logging.getLogger('lcm.ns')
104 )
105
tierno59d22d22018-09-25 18:10:19 +0200106 self.loop = loop
107 self.lcm_tasks = lcm_tasks
tierno744303e2020-01-13 16:46:31 +0000108 self.timeout = config["timeout"]
109 self.ro_config = config["ro_config"]
tierno69f0d382020-05-07 13:08:09 +0000110 self.ng_ro = config["ro_config"].get("ng")
tierno744303e2020-01-13 16:46:31 +0000111 self.vca_config = config["VCA"].copy()
tierno59d22d22018-09-25 18:10:19 +0200112
quilesj7e13aeb2019-10-08 13:34:55 +0200113 # create N2VC connector
tierno588547c2020-07-01 15:30:20 +0000114 self.n2vc = N2VCJujuConnectorLCM(
quilesj7e13aeb2019-10-08 13:34:55 +0200115 db=self.db,
116 fs=self.fs,
tierno59d22d22018-09-25 18:10:19 +0200117 log=self.logger,
quilesj7e13aeb2019-10-08 13:34:55 +0200118 loop=self.loop,
119 url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
120 username=self.vca_config.get('user', None),
121 vca_config=self.vca_config,
quilesj3655ae02019-12-12 16:08:35 +0000122 on_update_db=self._on_update_n2vc_db
tierno59d22d22018-09-25 18:10:19 +0200123 )
quilesj7e13aeb2019-10-08 13:34:55 +0200124
tierno588547c2020-07-01 15:30:20 +0000125 self.conn_helm_ee = LCMHelmConn(
126 db=self.db,
127 fs=self.fs,
128 log=self.logger,
129 loop=self.loop,
130 url=None,
131 username=None,
132 vca_config=self.vca_config,
133 on_update_db=self._on_update_n2vc_db
134 )
135
calvinosanch9f9c6f22019-11-04 13:37:39 +0100136 self.k8sclusterhelm = K8sHelmConnector(
137 kubectl_command=self.vca_config.get("kubectlpath"),
138 helm_command=self.vca_config.get("helmpath"),
139 fs=self.fs,
140 log=self.logger,
141 db=self.db,
142 on_update_db=None,
143 )
144
Adam Israelbaacc302019-12-01 12:41:39 -0500145 self.k8sclusterjuju = K8sJujuConnector(
146 kubectl_command=self.vca_config.get("kubectlpath"),
147 juju_command=self.vca_config.get("jujupath"),
148 fs=self.fs,
149 log=self.logger,
150 db=self.db,
151 on_update_db=None,
152 )
153
tiernoa2143262020-03-27 16:20:40 +0000154 self.k8scluster_map = {
155 "helm-chart": self.k8sclusterhelm,
156 "chart": self.k8sclusterhelm,
157 "juju-bundle": self.k8sclusterjuju,
158 "juju": self.k8sclusterjuju,
159 }
tierno588547c2020-07-01 15:30:20 +0000160
161 self.vca_map = {
162 "lxc_proxy_charm": self.n2vc,
163 "native_charm": self.n2vc,
164 "k8s_proxy_charm": self.n2vc,
165 "helm": self.conn_helm_ee
166 }
167
tiernob996d942020-07-03 14:52:28 +0000168 self.prometheus = prometheus
169
quilesj7e13aeb2019-10-08 13:34:55 +0200170 # create RO client
tierno69f0d382020-05-07 13:08:09 +0000171 if self.ng_ro:
172 self.RO = NgRoClient(self.loop, **self.ro_config)
173 else:
174 self.RO = ROclient.ROClient(self.loop, **self.ro_config)
tierno59d22d22018-09-25 18:10:19 +0200175
quilesj3655ae02019-12-12 16:08:35 +0000176 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
quilesj7e13aeb2019-10-08 13:34:55 +0200177
quilesj3655ae02019-12-12 16:08:35 +0000178 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
179
180 try:
181 # TODO filter RO descriptor fields...
182
183 # write to database
184 db_dict = dict()
185 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
186 db_dict['deploymentStatus'] = ro_descriptor
187 self.update_db_2("nsrs", nsrs_id, db_dict)
188
189 except Exception as e:
190 self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e))
191
192 async def _on_update_n2vc_db(self, table, filter, path, updated_data):
193
quilesj69a722c2020-01-09 08:30:17 +0000194 # remove last dot from path (if exists)
195 if path.endswith('.'):
196 path = path[:-1]
197
quilesj3655ae02019-12-12 16:08:35 +0000198 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
199 # .format(table, filter, path, updated_data))
200
201 try:
202
203 nsr_id = filter.get('_id')
204
205 # read ns record from database
206 nsr = self.db.get_one(table='nsrs', q_filter=filter)
207 current_ns_status = nsr.get('nsState')
208
209 # get vca status for NS
quilesj69a722c2020-01-09 08:30:17 +0000210 status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False)
quilesj3655ae02019-12-12 16:08:35 +0000211
212 # vcaStatus
213 db_dict = dict()
214 db_dict['vcaStatus'] = status_dict
215
216 # update configurationStatus for this VCA
217 try:
218 vca_index = int(path[path.rfind(".")+1:])
219
220 vca_list = deep_get(target_dict=nsr, key_list=('_admin', 'deployed', 'VCA'))
221 vca_status = vca_list[vca_index].get('status')
222
223 configuration_status_list = nsr.get('configurationStatus')
224 config_status = configuration_status_list[vca_index].get('status')
225
226 if config_status == 'BROKEN' and vca_status != 'failed':
227 db_dict['configurationStatus'][vca_index] = 'READY'
228 elif config_status != 'BROKEN' and vca_status == 'failed':
229 db_dict['configurationStatus'][vca_index] = 'BROKEN'
230 except Exception as e:
231 # not update configurationStatus
232 self.logger.debug('Error updating vca_index (ignore): {}'.format(e))
233
234 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
235 # if nsState = 'DEGRADED' check if all is OK
236 is_degraded = False
237 if current_ns_status in ('READY', 'DEGRADED'):
238 error_description = ''
239 # check machines
240 if status_dict.get('machines'):
241 for machine_id in status_dict.get('machines'):
242 machine = status_dict.get('machines').get(machine_id)
243 # check machine agent-status
244 if machine.get('agent-status'):
245 s = machine.get('agent-status').get('status')
246 if s != 'started':
247 is_degraded = True
248 error_description += 'machine {} agent-status={} ; '.format(machine_id, s)
249 # check machine instance status
250 if machine.get('instance-status'):
251 s = machine.get('instance-status').get('status')
252 if s != 'running':
253 is_degraded = True
254 error_description += 'machine {} instance-status={} ; '.format(machine_id, s)
255 # check applications
256 if status_dict.get('applications'):
257 for app_id in status_dict.get('applications'):
258 app = status_dict.get('applications').get(app_id)
259 # check application status
260 if app.get('status'):
261 s = app.get('status').get('status')
262 if s != 'active':
263 is_degraded = True
264 error_description += 'application {} status={} ; '.format(app_id, s)
265
266 if error_description:
267 db_dict['errorDescription'] = error_description
268 if current_ns_status == 'READY' and is_degraded:
269 db_dict['nsState'] = 'DEGRADED'
270 if current_ns_status == 'DEGRADED' and not is_degraded:
271 db_dict['nsState'] = 'READY'
272
273 # write to database
274 self.update_db_2("nsrs", nsr_id, db_dict)
275
tierno51183952020-04-03 15:48:18 +0000276 except (asyncio.CancelledError, asyncio.TimeoutError):
277 raise
quilesj3655ae02019-12-12 16:08:35 +0000278 except Exception as e:
279 self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +0200280
gcalvino35be9152018-12-20 09:33:12 +0100281 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
tierno59d22d22018-09-25 18:10:19 +0200282 """
283 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
284 :param vnfd: input vnfd
285 :param new_id: overrides vnf id if provided
tierno8a518872018-12-21 13:42:14 +0000286 :param additionalParams: Instantiation params for VNFs provided
gcalvino35be9152018-12-20 09:33:12 +0100287 :param nsrId: Id of the NSR
tierno59d22d22018-09-25 18:10:19 +0200288 :return: copy of vnfd
289 """
tierno59d22d22018-09-25 18:10:19 +0200290 try:
291 vnfd_RO = deepcopy(vnfd)
tierno8a518872018-12-21 13:42:14 +0000292 # remove unused by RO configuration, monitoring, scaling and internal keys
tierno59d22d22018-09-25 18:10:19 +0200293 vnfd_RO.pop("_id", None)
294 vnfd_RO.pop("_admin", None)
tierno8a518872018-12-21 13:42:14 +0000295 vnfd_RO.pop("vnf-configuration", None)
296 vnfd_RO.pop("monitoring-param", None)
297 vnfd_RO.pop("scaling-group-descriptor", None)
calvinosanch9f9c6f22019-11-04 13:37:39 +0100298 vnfd_RO.pop("kdu", None)
299 vnfd_RO.pop("k8s-cluster", None)
tierno59d22d22018-09-25 18:10:19 +0200300 if new_id:
301 vnfd_RO["id"] = new_id
tierno8a518872018-12-21 13:42:14 +0000302
303 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
304 for vdu in get_iterable(vnfd_RO, "vdu"):
305 cloud_init_file = None
306 if vdu.get("cloud-init-file"):
tierno59d22d22018-09-25 18:10:19 +0200307 base_folder = vnfd["_admin"]["storage"]
gcalvino35be9152018-12-20 09:33:12 +0100308 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
309 vdu["cloud-init-file"])
310 with self.fs.file_open(cloud_init_file, "r") as ci_file:
311 cloud_init_content = ci_file.read()
tierno59d22d22018-09-25 18:10:19 +0200312 vdu.pop("cloud-init-file", None)
tierno8a518872018-12-21 13:42:14 +0000313 elif vdu.get("cloud-init"):
gcalvino35be9152018-12-20 09:33:12 +0100314 cloud_init_content = vdu["cloud-init"]
tierno8a518872018-12-21 13:42:14 +0000315 else:
316 continue
317
318 env = Environment()
319 ast = env.parse(cloud_init_content)
320 mandatory_vars = meta.find_undeclared_variables(ast)
321 if mandatory_vars:
322 for var in mandatory_vars:
323 if not additionalParams or var not in additionalParams.keys():
324 raise LcmException("Variable '{}' defined at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
325 "file, must be provided in the instantiation parameters inside the "
326 "'additionalParamsForVnf' block".format(var, vnfd["id"], vdu["id"]))
327 template = Template(cloud_init_content)
tierno2b611dd2019-01-11 10:30:57 +0000328 cloud_init_content = template.render(additionalParams or {})
gcalvino35be9152018-12-20 09:33:12 +0100329 vdu["cloud-init"] = cloud_init_content
tierno8a518872018-12-21 13:42:14 +0000330
tierno59d22d22018-09-25 18:10:19 +0200331 return vnfd_RO
332 except FsException as e:
tierno8a518872018-12-21 13:42:14 +0000333 raise LcmException("Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".
tiernoda964822019-01-14 15:53:47 +0000334 format(vnfd["id"], vdu["id"], cloud_init_file, e))
tierno8a518872018-12-21 13:42:14 +0000335 except (TemplateError, TemplateNotFound, TemplateSyntaxError) as e:
336 raise LcmException("Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".
337 format(vnfd["id"], vdu["id"], e))
tierno59d22d22018-09-25 18:10:19 +0200338
tiernoe95ed362020-04-23 08:24:57 +0000339 def _ns_params_2_RO(self, ns_params, nsd, vnfd_dict, db_vnfrs, n2vc_key_list):
tierno59d22d22018-09-25 18:10:19 +0200340 """
tierno27246d82018-09-27 15:59:09 +0200341 Creates a RO ns descriptor from OSM ns_instantiate params
tierno59d22d22018-09-25 18:10:19 +0200342 :param ns_params: OSM instantiate params
tiernoe95ed362020-04-23 08:24:57 +0000343 :param vnfd_dict: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
344 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index. {member-vnf-index: {vnfr_object}, ...}
tierno59d22d22018-09-25 18:10:19 +0200345 :return: The RO ns descriptor
346 """
347 vim_2_RO = {}
tiernob7f3f0d2019-03-20 17:17:21 +0000348 wim_2_RO = {}
tierno27246d82018-09-27 15:59:09 +0200349 # TODO feature 1417: Check that no instantiation is set over PDU
350 # check if PDU forces a concrete vim-network-id and add it
351 # check if PDU contains a SDN-assist info (dpid, switch, port) and pass it to RO
tierno59d22d22018-09-25 18:10:19 +0200352
353 def vim_account_2_RO(vim_account):
354 if vim_account in vim_2_RO:
355 return vim_2_RO[vim_account]
356
357 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
358 if db_vim["_admin"]["operationalState"] != "ENABLED":
359 raise LcmException("VIM={} is not available. operationalState={}".format(
360 vim_account, db_vim["_admin"]["operationalState"]))
361 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
362 vim_2_RO[vim_account] = RO_vim_id
363 return RO_vim_id
364
tiernob7f3f0d2019-03-20 17:17:21 +0000365 def wim_account_2_RO(wim_account):
366 if isinstance(wim_account, str):
367 if wim_account in wim_2_RO:
368 return wim_2_RO[wim_account]
369
370 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
371 if db_wim["_admin"]["operationalState"] != "ENABLED":
372 raise LcmException("WIM={} is not available. operationalState={}".format(
373 wim_account, db_wim["_admin"]["operationalState"]))
374 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
375 wim_2_RO[wim_account] = RO_wim_id
376 return RO_wim_id
377 else:
378 return wim_account
379
tierno59d22d22018-09-25 18:10:19 +0200380 def ip_profile_2_RO(ip_profile):
381 RO_ip_profile = deepcopy((ip_profile))
382 if "dns-server" in RO_ip_profile:
383 if isinstance(RO_ip_profile["dns-server"], list):
384 RO_ip_profile["dns-address"] = []
385 for ds in RO_ip_profile.pop("dns-server"):
386 RO_ip_profile["dns-address"].append(ds['address'])
387 else:
388 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
389 if RO_ip_profile.get("ip-version") == "ipv4":
390 RO_ip_profile["ip-version"] = "IPv4"
391 if RO_ip_profile.get("ip-version") == "ipv6":
392 RO_ip_profile["ip-version"] = "IPv6"
393 if "dhcp-params" in RO_ip_profile:
394 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
395 return RO_ip_profile
396
397 if not ns_params:
398 return None
399 RO_ns_params = {
400 # "name": ns_params["nsName"],
401 # "description": ns_params.get("nsDescription"),
402 "datacenter": vim_account_2_RO(ns_params["vimAccountId"]),
tiernob7f3f0d2019-03-20 17:17:21 +0000403 "wim_account": wim_account_2_RO(ns_params.get("wimAccountId")),
tierno59d22d22018-09-25 18:10:19 +0200404 # "scenario": ns_params["nsdId"],
tierno59d22d22018-09-25 18:10:19 +0200405 }
tiernoe95ed362020-04-23 08:24:57 +0000406 # set vim_account of each vnf if different from general vim_account.
407 # Get this information from <vnfr> database content, key vim-account-id
408 # Vim account can be set by placement_engine and it may be different from
409 # the instantiate parameters (vnfs.member-vnf-index.datacenter).
410 for vnf_index, vnfr in db_vnfrs.items():
411 if vnfr.get("vim-account-id") and vnfr["vim-account-id"] != ns_params["vimAccountId"]:
412 populate_dict(RO_ns_params, ("vnfs", vnf_index, "datacenter"), vim_account_2_RO(vnfr["vim-account-id"]))
quilesj7e13aeb2019-10-08 13:34:55 +0200413
tiernoe64f7fb2019-09-11 08:55:52 +0000414 n2vc_key_list = n2vc_key_list or []
415 for vnfd_ref, vnfd in vnfd_dict.items():
416 vdu_needed_access = []
417 mgmt_cp = None
418 if vnfd.get("vnf-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000419 ssh_required = deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000420 if ssh_required and vnfd.get("mgmt-interface"):
421 if vnfd["mgmt-interface"].get("vdu-id"):
422 vdu_needed_access.append(vnfd["mgmt-interface"]["vdu-id"])
423 elif vnfd["mgmt-interface"].get("cp"):
424 mgmt_cp = vnfd["mgmt-interface"]["cp"]
tierno27246d82018-09-27 15:59:09 +0200425
tiernoe64f7fb2019-09-11 08:55:52 +0000426 for vdu in vnfd.get("vdu", ()):
427 if vdu.get("vdu-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000428 ssh_required = deep_get(vdu, ("vdu-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000429 if ssh_required:
tierno27246d82018-09-27 15:59:09 +0200430 vdu_needed_access.append(vdu["id"])
tiernoe64f7fb2019-09-11 08:55:52 +0000431 elif mgmt_cp:
432 for vdu_interface in vdu.get("interface"):
433 if vdu_interface.get("external-connection-point-ref") and \
434 vdu_interface["external-connection-point-ref"] == mgmt_cp:
435 vdu_needed_access.append(vdu["id"])
436 mgmt_cp = None
437 break
tierno27246d82018-09-27 15:59:09 +0200438
tiernoe64f7fb2019-09-11 08:55:52 +0000439 if vdu_needed_access:
440 for vnf_member in nsd.get("constituent-vnfd"):
441 if vnf_member["vnfd-id-ref"] != vnfd_ref:
442 continue
443 for vdu in vdu_needed_access:
444 populate_dict(RO_ns_params,
445 ("vnfs", vnf_member["member-vnf-index"], "vdus", vdu, "mgmt_keys"),
446 n2vc_key_list)
tierno27246d82018-09-27 15:59:09 +0200447
tierno25ec7732018-10-24 18:47:11 +0200448 if ns_params.get("vduImage"):
449 RO_ns_params["vduImage"] = ns_params["vduImage"]
450
tiernoc255a822018-10-31 09:41:53 +0100451 if ns_params.get("ssh_keys"):
452 RO_ns_params["cloud-config"] = {"key-pairs": ns_params["ssh_keys"]}
tierno27246d82018-09-27 15:59:09 +0200453 for vnf_params in get_iterable(ns_params, "vnf"):
454 for constituent_vnfd in nsd["constituent-vnfd"]:
455 if constituent_vnfd["member-vnf-index"] == vnf_params["member-vnf-index"]:
456 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
457 break
458 else:
459 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index={} is not present at nsd:"
460 "constituent-vnfd".format(vnf_params["member-vnf-index"]))
tierno59d22d22018-09-25 18:10:19 +0200461
tierno27246d82018-09-27 15:59:09 +0200462 for vdu_params in get_iterable(vnf_params, "vdu"):
463 # TODO feature 1417: check that this VDU exist and it is not a PDU
464 if vdu_params.get("volume"):
465 for volume_params in vdu_params["volume"]:
466 if volume_params.get("vim-volume-id"):
467 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
468 vdu_params["id"], "devices", volume_params["name"], "vim_id"),
469 volume_params["vim-volume-id"])
470 if vdu_params.get("interface"):
471 for interface_params in vdu_params["interface"]:
472 if interface_params.get("ip-address"):
473 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
474 vdu_params["id"], "interfaces", interface_params["name"],
475 "ip_address"),
476 interface_params["ip-address"])
477 if interface_params.get("mac-address"):
478 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
479 vdu_params["id"], "interfaces", interface_params["name"],
480 "mac_address"),
481 interface_params["mac-address"])
482 if interface_params.get("floating-ip-required"):
483 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
484 vdu_params["id"], "interfaces", interface_params["name"],
485 "floating-ip"),
486 interface_params["floating-ip-required"])
487
488 for internal_vld_params in get_iterable(vnf_params, "internal-vld"):
489 if internal_vld_params.get("vim-network-name"):
490 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
491 internal_vld_params["name"], "vim-network-name"),
492 internal_vld_params["vim-network-name"])
gcalvino0d7ac8d2018-12-17 16:24:08 +0100493 if internal_vld_params.get("vim-network-id"):
494 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
495 internal_vld_params["name"], "vim-network-id"),
496 internal_vld_params["vim-network-id"])
tierno27246d82018-09-27 15:59:09 +0200497 if internal_vld_params.get("ip-profile"):
498 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
499 internal_vld_params["name"], "ip-profile"),
500 ip_profile_2_RO(internal_vld_params["ip-profile"]))
kbsub4d761eb2019-10-17 16:28:48 +0000501 if internal_vld_params.get("provider-network"):
502
503 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
504 internal_vld_params["name"], "provider-network"),
505 internal_vld_params["provider-network"].copy())
tierno27246d82018-09-27 15:59:09 +0200506
507 for icp_params in get_iterable(internal_vld_params, "internal-connection-point"):
508 # look for interface
509 iface_found = False
510 for vdu_descriptor in vnf_descriptor["vdu"]:
511 for vdu_interface in vdu_descriptor["interface"]:
512 if vdu_interface.get("internal-connection-point-ref") == icp_params["id-ref"]:
513 if icp_params.get("ip-address"):
514 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
515 vdu_descriptor["id"], "interfaces",
516 vdu_interface["name"], "ip_address"),
517 icp_params["ip-address"])
518
519 if icp_params.get("mac-address"):
520 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
521 vdu_descriptor["id"], "interfaces",
522 vdu_interface["name"], "mac_address"),
523 icp_params["mac-address"])
524 iface_found = True
tierno59d22d22018-09-25 18:10:19 +0200525 break
tierno27246d82018-09-27 15:59:09 +0200526 if iface_found:
527 break
528 else:
529 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index[{}]:"
530 "internal-vld:id-ref={} is not present at vnfd:internal-"
531 "connection-point".format(vnf_params["member-vnf-index"],
532 icp_params["id-ref"]))
533
534 for vld_params in get_iterable(ns_params, "vld"):
535 if "ip-profile" in vld_params:
536 populate_dict(RO_ns_params, ("networks", vld_params["name"], "ip-profile"),
537 ip_profile_2_RO(vld_params["ip-profile"]))
tiernob7f3f0d2019-03-20 17:17:21 +0000538
kbsub4d761eb2019-10-17 16:28:48 +0000539 if vld_params.get("provider-network"):
540
541 populate_dict(RO_ns_params, ("networks", vld_params["name"], "provider-network"),
542 vld_params["provider-network"].copy())
543
tiernob7f3f0d2019-03-20 17:17:21 +0000544 if "wimAccountId" in vld_params and vld_params["wimAccountId"] is not None:
545 populate_dict(RO_ns_params, ("networks", vld_params["name"], "wim_account"),
546 wim_account_2_RO(vld_params["wimAccountId"])),
tierno27246d82018-09-27 15:59:09 +0200547 if vld_params.get("vim-network-name"):
548 RO_vld_sites = []
549 if isinstance(vld_params["vim-network-name"], dict):
550 for vim_account, vim_net in vld_params["vim-network-name"].items():
551 RO_vld_sites.append({
552 "netmap-use": vim_net,
553 "datacenter": vim_account_2_RO(vim_account)
554 })
555 else: # isinstance str
556 RO_vld_sites.append({"netmap-use": vld_params["vim-network-name"]})
557 if RO_vld_sites:
558 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
kbsub4d761eb2019-10-17 16:28:48 +0000559
gcalvino0d7ac8d2018-12-17 16:24:08 +0100560 if vld_params.get("vim-network-id"):
561 RO_vld_sites = []
562 if isinstance(vld_params["vim-network-id"], dict):
563 for vim_account, vim_net in vld_params["vim-network-id"].items():
564 RO_vld_sites.append({
565 "netmap-use": vim_net,
566 "datacenter": vim_account_2_RO(vim_account)
567 })
568 else: # isinstance str
569 RO_vld_sites.append({"netmap-use": vld_params["vim-network-id"]})
570 if RO_vld_sites:
571 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
Felipe Vicens720b07a2019-01-31 02:32:09 +0100572 if vld_params.get("ns-net"):
573 if isinstance(vld_params["ns-net"], dict):
574 for vld_id, instance_scenario_id in vld_params["ns-net"].items():
575 RO_vld_ns_net = {"instance_scenario_id": instance_scenario_id, "osm_id": vld_id}
Felipe Vicensb0e5fe42019-12-05 10:30:38 +0100576 populate_dict(RO_ns_params, ("networks", vld_params["name"], "use-network"), RO_vld_ns_net)
tierno27246d82018-09-27 15:59:09 +0200577 if "vnfd-connection-point-ref" in vld_params:
578 for cp_params in vld_params["vnfd-connection-point-ref"]:
579 # look for interface
580 for constituent_vnfd in nsd["constituent-vnfd"]:
581 if constituent_vnfd["member-vnf-index"] == cp_params["member-vnf-index-ref"]:
582 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
583 break
584 else:
585 raise LcmException(
586 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={} "
587 "is not present at nsd:constituent-vnfd".format(cp_params["member-vnf-index-ref"]))
588 match_cp = False
589 for vdu_descriptor in vnf_descriptor["vdu"]:
590 for interface_descriptor in vdu_descriptor["interface"]:
591 if interface_descriptor.get("external-connection-point-ref") == \
592 cp_params["vnfd-connection-point-ref"]:
593 match_cp = True
tierno59d22d22018-09-25 18:10:19 +0200594 break
tierno27246d82018-09-27 15:59:09 +0200595 if match_cp:
596 break
597 else:
598 raise LcmException(
599 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={}:"
600 "vnfd-connection-point-ref={} is not present at vnfd={}".format(
601 cp_params["member-vnf-index-ref"],
602 cp_params["vnfd-connection-point-ref"],
603 vnf_descriptor["id"]))
604 if cp_params.get("ip-address"):
605 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
606 vdu_descriptor["id"], "interfaces",
607 interface_descriptor["name"], "ip_address"),
608 cp_params["ip-address"])
609 if cp_params.get("mac-address"):
610 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
611 vdu_descriptor["id"], "interfaces",
612 interface_descriptor["name"], "mac_address"),
613 cp_params["mac-address"])
tierno59d22d22018-09-25 18:10:19 +0200614 return RO_ns_params
615
tierno27246d82018-09-27 15:59:09 +0200616 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None):
617 # make a copy to do not change
618 vdu_create = copy(vdu_create)
619 vdu_delete = copy(vdu_delete)
620
621 vdurs = db_vnfr.get("vdur")
622 if vdurs is None:
623 vdurs = []
624 vdu_index = len(vdurs)
625 while vdu_index:
626 vdu_index -= 1
627 vdur = vdurs[vdu_index]
628 if vdur.get("pdu-type"):
629 continue
630 vdu_id_ref = vdur["vdu-id-ref"]
631 if vdu_create and vdu_create.get(vdu_id_ref):
632 for index in range(0, vdu_create[vdu_id_ref]):
633 vdur = deepcopy(vdur)
634 vdur["_id"] = str(uuid4())
635 vdur["count-index"] += 1
636 vdurs.insert(vdu_index+1+index, vdur)
637 del vdu_create[vdu_id_ref]
638 if vdu_delete and vdu_delete.get(vdu_id_ref):
639 del vdurs[vdu_index]
640 vdu_delete[vdu_id_ref] -= 1
641 if not vdu_delete[vdu_id_ref]:
642 del vdu_delete[vdu_id_ref]
643 # check all operations are done
644 if vdu_create or vdu_delete:
645 raise LcmException("Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
646 vdu_create))
647 if vdu_delete:
648 raise LcmException("Error scaling IN VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
649 vdu_delete))
650
651 vnfr_update = {"vdur": vdurs}
652 db_vnfr["vdur"] = vdurs
653 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
654
tiernof578e552018-11-08 19:07:20 +0100655 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
656 """
657 Updates database nsr with the RO info for the created vld
658 :param ns_update_nsr: dictionary to be filled with the updated info
659 :param db_nsr: content of db_nsr. This is also modified
660 :param nsr_desc_RO: nsr descriptor from RO
661 :return: Nothing, LcmException is raised on errors
662 """
663
664 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
665 for net_RO in get_iterable(nsr_desc_RO, "nets"):
666 if vld["id"] != net_RO.get("ns_net_osm_id"):
667 continue
668 vld["vim-id"] = net_RO.get("vim_net_id")
669 vld["name"] = net_RO.get("vim_name")
670 vld["status"] = net_RO.get("status")
671 vld["status-detailed"] = net_RO.get("error_msg")
672 ns_update_nsr["vld.{}".format(vld_index)] = vld
673 break
674 else:
675 raise LcmException("ns_update_nsr: Not found vld={} at RO info".format(vld["id"]))
676
tiernoe876f672020-02-13 14:34:48 +0000677 def set_vnfr_at_error(self, db_vnfrs, error_text):
678 try:
679 for db_vnfr in db_vnfrs.values():
680 vnfr_update = {"status": "ERROR"}
681 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
682 if "status" not in vdur:
683 vdur["status"] = "ERROR"
684 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
685 if error_text:
686 vdur["status-detailed"] = str(error_text)
687 vnfr_update["vdur.{}.status-detailed".format(vdu_index)] = "ERROR"
688 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
689 except DbException as e:
690 self.logger.error("Cannot update vnf. {}".format(e))
691
tierno59d22d22018-09-25 18:10:19 +0200692 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
693 """
694 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
tierno27246d82018-09-27 15:59:09 +0200695 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
696 :param nsr_desc_RO: nsr descriptor from RO
697 :return: Nothing, LcmException is raised on errors
tierno59d22d22018-09-25 18:10:19 +0200698 """
699 for vnf_index, db_vnfr in db_vnfrs.items():
700 for vnf_RO in nsr_desc_RO["vnfs"]:
tierno27246d82018-09-27 15:59:09 +0200701 if vnf_RO["member_vnf_index"] != vnf_index:
702 continue
703 vnfr_update = {}
tiernof578e552018-11-08 19:07:20 +0100704 if vnf_RO.get("ip_address"):
tierno1674de82019-04-09 13:03:14 +0000705 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"].split(";")[0]
tiernof578e552018-11-08 19:07:20 +0100706 elif not db_vnfr.get("ip-address"):
tierno0ec0c272020-02-19 17:43:01 +0000707 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
708 raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200709
tierno27246d82018-09-27 15:59:09 +0200710 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
711 vdur_RO_count_index = 0
712 if vdur.get("pdu-type"):
713 continue
714 for vdur_RO in get_iterable(vnf_RO, "vms"):
715 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
716 continue
717 if vdur["count-index"] != vdur_RO_count_index:
718 vdur_RO_count_index += 1
719 continue
720 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
tierno1674de82019-04-09 13:03:14 +0000721 if vdur_RO.get("ip_address"):
722 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
tierno274ed572019-04-04 13:33:27 +0000723 else:
724 vdur["ip-address"] = None
tierno27246d82018-09-27 15:59:09 +0200725 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
726 vdur["name"] = vdur_RO.get("vim_name")
727 vdur["status"] = vdur_RO.get("status")
728 vdur["status-detailed"] = vdur_RO.get("error_msg")
729 for ifacer in get_iterable(vdur, "interfaces"):
730 for interface_RO in get_iterable(vdur_RO, "interfaces"):
731 if ifacer["name"] == interface_RO.get("internal_name"):
732 ifacer["ip-address"] = interface_RO.get("ip_address")
733 ifacer["mac-address"] = interface_RO.get("mac_address")
734 break
735 else:
736 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
quilesj7e13aeb2019-10-08 13:34:55 +0200737 "from VIM info"
738 .format(vnf_index, vdur["vdu-id-ref"], ifacer["name"]))
tierno27246d82018-09-27 15:59:09 +0200739 vnfr_update["vdur.{}".format(vdu_index)] = vdur
740 break
741 else:
tierno15b1cf12019-08-29 13:21:40 +0000742 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
743 "VIM info".format(vnf_index, vdur["vdu-id-ref"], vdur["count-index"]))
tiernof578e552018-11-08 19:07:20 +0100744
745 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
746 for net_RO in get_iterable(nsr_desc_RO, "nets"):
747 if vld["id"] != net_RO.get("vnf_net_osm_id"):
748 continue
749 vld["vim-id"] = net_RO.get("vim_net_id")
750 vld["name"] = net_RO.get("vim_name")
751 vld["status"] = net_RO.get("status")
752 vld["status-detailed"] = net_RO.get("error_msg")
753 vnfr_update["vld.{}".format(vld_index)] = vld
754 break
755 else:
tierno15b1cf12019-08-29 13:21:40 +0000756 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
tiernof578e552018-11-08 19:07:20 +0100757 vnf_index, vld["id"]))
758
tierno27246d82018-09-27 15:59:09 +0200759 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
760 break
tierno59d22d22018-09-25 18:10:19 +0200761
762 else:
tierno15b1cf12019-08-29 13:21:40 +0000763 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200764
tierno5ee02052019-12-05 19:55:02 +0000765 def _get_ns_config_info(self, nsr_id):
tiernoc3f2a822019-11-05 13:45:04 +0000766 """
767 Generates a mapping between vnf,vdu elements and the N2VC id
tierno5ee02052019-12-05 19:55:02 +0000768 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
tiernoc3f2a822019-11-05 13:45:04 +0000769 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
770 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
771 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
772 """
tierno5ee02052019-12-05 19:55:02 +0000773 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
774 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernoc3f2a822019-11-05 13:45:04 +0000775 mapping = {}
776 ns_config_info = {"osm-config-mapping": mapping}
777 for vca in vca_deployed_list:
778 if not vca["member-vnf-index"]:
779 continue
780 if not vca["vdu_id"]:
781 mapping[vca["member-vnf-index"]] = vca["application"]
782 else:
783 mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\
784 vca["application"]
785 return ns_config_info
786
787 @staticmethod
tiernoa278b842020-07-08 15:33:55 +0000788 def _get_initial_config_primitive_list(desc_primitive_list, vca_deployed, ee_descriptor_id):
tiernoc3f2a822019-11-05 13:45:04 +0000789 """
790 Generates a list of initial-config-primitive based on the list provided by the descriptor. It includes internal
791 primitives as verify-ssh-credentials, or config when needed
792 :param desc_primitive_list: information of the descriptor
793 :param vca_deployed: information of the deployed, needed for known if it is related to an NS, VNF, VDU and if
794 this element contains a ssh public key
tiernoa278b842020-07-08 15:33:55 +0000795 :param ee_descriptor_id: execution environment descriptor id. It is the value of
796 XXX_configuration.execution-environment-list.INDEX.id; it can be None
tiernoc3f2a822019-11-05 13:45:04 +0000797 :return: The modified list. Can ba an empty list, but always a list
798 """
tiernoa278b842020-07-08 15:33:55 +0000799
800 primitive_list = desc_primitive_list or []
801
802 # filter primitives by ee_id
803 primitive_list = [p for p in primitive_list if p.get("execution-environment-ref") == ee_descriptor_id]
804
805 # sort by 'seq'
806 if primitive_list:
807 primitive_list.sort(key=lambda val: int(val['seq']))
808
tiernoc3f2a822019-11-05 13:45:04 +0000809 # look for primitive config, and get the position. None if not present
810 config_position = None
811 for index, primitive in enumerate(primitive_list):
812 if primitive["name"] == "config":
813 config_position = index
814 break
815
816 # for NS, add always a config primitive if not present (bug 874)
817 if not vca_deployed["member-vnf-index"] and config_position is None:
818 primitive_list.insert(0, {"name": "config", "parameter": []})
819 config_position = 0
tiernoa278b842020-07-08 15:33:55 +0000820 # TODO revise if needed: for VNF/VDU add verify-ssh-credentials after config
tiernoc3f2a822019-11-05 13:45:04 +0000821 if vca_deployed["member-vnf-index"] and config_position is not None and vca_deployed.get("ssh-public-key"):
822 primitive_list.insert(config_position + 1, {"name": "verify-ssh-credentials", "parameter": []})
823 return primitive_list
824
tierno69f0d382020-05-07 13:08:09 +0000825 async def _instantiate_ng_ro(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
826 n2vc_key_list, stage, start_deploy, timeout_ns_deploy):
827 nslcmop_id = db_nslcmop["_id"]
828 target = {
829 "name": db_nsr["name"],
830 "ns": {"vld": []},
831 "vnf": [],
832 "image": deepcopy(db_nsr["image"]),
833 "flavor": deepcopy(db_nsr["flavor"]),
834 "action_id": nslcmop_id,
835 }
836 for image in target["image"]:
837 image["vim_info"] = []
838 for flavor in target["flavor"]:
839 flavor["vim_info"] = []
840
841 ns_params = db_nslcmop.get("operationParams")
842 ssh_keys = []
843 if ns_params.get("ssh_keys"):
844 ssh_keys += ns_params.get("ssh_keys")
845 if n2vc_key_list:
846 ssh_keys += n2vc_key_list
847
848 cp2target = {}
849 for vld_index, vld in enumerate(nsd.get("vld")):
850 target_vld = {"id": vld["id"],
851 "name": vld["name"],
852 "mgmt-network": vld.get("mgmt-network", False),
853 "type": vld.get("type"),
854 "vim_info": [{"vim-network-name": vld.get("vim-network-name"),
855 "vim_account_id": ns_params["vimAccountId"]}],
856 }
857 for cp in vld["vnfd-connection-point-ref"]:
858 cp2target["member_vnf:{}.{}".format(cp["member-vnf-index-ref"], cp["vnfd-connection-point-ref"])] = \
859 "nsrs:{}:vld.{}".format(nsr_id, vld_index)
860 target["ns"]["vld"].append(target_vld)
861 for vnfr in db_vnfrs.values():
862 vnfd = db_vnfds_ref[vnfr["vnfd-ref"]]
863 target_vnf = deepcopy(vnfr)
864 for vld in target_vnf.get("vld", ()):
865 # check if connected to a ns.vld
866 vnf_cp = next((cp for cp in vnfd.get("connection-point", ()) if
867 cp.get("internal-vld-ref") == vld["id"]), None)
868 if vnf_cp:
869 ns_cp = "member_vnf:{}.{}".format(vnfr["member-vnf-index-ref"], vnf_cp["id"])
870 if cp2target.get(ns_cp):
871 vld["target"] = cp2target[ns_cp]
872 vld["vim_info"] = [{"vim-network-name": vld.get("vim-network-name"),
873 "vim_account_id": vnfr["vim-account-id"]}]
874
875 for vdur in target_vnf.get("vdur", ()):
876 vdur["vim_info"] = [{"vim_account_id": vnfr["vim-account-id"]}]
877 vdud_index, vdud = next(k for k in enumerate(vnfd["vdu"]) if k[1]["id"] == vdur["vdu-id-ref"])
878 # vdur["additionalParams"] = vnfr.get("additionalParamsForVnf") # TODO additional params for VDU
879
880 if ssh_keys:
881 if deep_get(vdud, ("vdu-configuration", "config-access", "ssh-access", "required")):
882 vdur["ssh-keys"] = ssh_keys
883 vdur["ssh-access-required"] = True
884 elif deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required")) and \
885 any(iface.get("mgmt-vnf") for iface in vdur["interfaces"]):
886 vdur["ssh-keys"] = ssh_keys
887 vdur["ssh-access-required"] = True
888
889 # cloud-init
890 if vdud.get("cloud-init-file"):
891 vdur["cloud-init"] = "{}:file:{}".format(vnfd["_id"], vdud.get("cloud-init-file"))
892 elif vdud.get("cloud-init"):
893 vdur["cloud-init"] = "{}:vdu:{}".format(vnfd["_id"], vdud_index)
894
895 # flavor
896 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
897 if not next((vi for vi in ns_flavor["vim_info"] if
898 vi and vi.get("vim_account_id") == vnfr["vim-account-id"]), None):
899 ns_flavor["vim_info"].append({"vim_account_id": vnfr["vim-account-id"]})
900 # image
901 ns_image = target["image"][int(vdur["ns-image-id"])]
902 if not next((vi for vi in ns_image["vim_info"] if
903 vi and vi.get("vim_account_id") == vnfr["vim-account-id"]), None):
904 ns_image["vim_info"].append({"vim_account_id": vnfr["vim-account-id"]})
905
906 vdur["vim_info"] = [{"vim_account_id": vnfr["vim-account-id"]}]
907 target["vnf"].append(target_vnf)
908
909 desc = await self.RO.deploy(nsr_id, target)
910 action_id = desc["action_id"]
911 await self._wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage)
912
913 # Updating NSR
914 db_nsr_update = {
915 "_admin.deployed.RO.operational-status": "running",
916 "detailed-status": " ".join(stage)
917 }
918 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
919 self.update_db_2("nsrs", nsr_id, db_nsr_update)
920 self._write_op_status(nslcmop_id, stage)
921 self.logger.debug(logging_text + "ns deployed at RO. RO_id={}".format(action_id))
922 return
923
924 async def _wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_time, timeout, stage):
925 detailed_status_old = None
926 db_nsr_update = {}
927 while time() <= start_time + timeout:
928 desc_status = await self.RO.status(nsr_id, action_id)
929 if desc_status["status"] == "FAILED":
930 raise NgRoException(desc_status["details"])
931 elif desc_status["status"] == "BUILD":
932 stage[2] = "VIM: ({})".format(desc_status["details"])
933 elif desc_status["status"] == "DONE":
934 stage[2] = "Deployed at VIM"
935 break
936 else:
937 assert False, "ROclient.check_ns_status returns unknown {}".format(desc_status["status"])
938 if stage[2] != detailed_status_old:
939 detailed_status_old = stage[2]
940 db_nsr_update["detailed-status"] = " ".join(stage)
941 self.update_db_2("nsrs", nsr_id, db_nsr_update)
942 self._write_op_status(nslcmop_id, stage)
943 await asyncio.sleep(5, loop=self.loop)
944 else: # timeout_ns_deploy
945 raise NgRoException("Timeout waiting ns to deploy")
946
947 async def _terminate_ng_ro(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
948 db_nsr_update = {}
949 failed_detail = []
950 action_id = None
951 start_deploy = time()
952 try:
953 target = {
954 "ns": {"vld": []},
955 "vnf": [],
956 "image": [],
957 "flavor": [],
958 }
959 desc = await self.RO.deploy(nsr_id, target)
960 action_id = desc["action_id"]
961 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
962 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
963 self.logger.debug(logging_text + "ns terminate action at RO. action_id={}".format(action_id))
964
965 # wait until done
966 delete_timeout = 20 * 60 # 20 minutes
967 await self._wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage)
968
969 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
970 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
971 # delete all nsr
972 await self.RO.delete(nsr_id)
973 except Exception as e:
974 if isinstance(e, NgRoException) and e.http_code == 404: # not found
975 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
976 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
977 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
978 self.logger.debug(logging_text + "RO_action_id={} already deleted".format(action_id))
979 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
980 failed_detail.append("delete conflict: {}".format(e))
981 self.logger.debug(logging_text + "RO_action_id={} delete conflict: {}".format(action_id, e))
982 else:
983 failed_detail.append("delete error: {}".format(e))
984 self.logger.error(logging_text + "RO_action_id={} delete error: {}".format(action_id, e))
985
986 if failed_detail:
987 stage[2] = "Error deleting from VIM"
988 else:
989 stage[2] = "Deleted from VIM"
990 db_nsr_update["detailed-status"] = " ".join(stage)
991 self.update_db_2("nsrs", nsr_id, db_nsr_update)
992 self._write_op_status(nslcmop_id, stage)
993
994 if failed_detail:
995 raise LcmException("; ".join(failed_detail))
996 return
997
tiernoe876f672020-02-13 14:34:48 +0000998 async def instantiate_RO(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
999 n2vc_key_list, stage):
tiernoe95ed362020-04-23 08:24:57 +00001000 """
1001 Instantiate at RO
1002 :param logging_text: preffix text to use at logging
1003 :param nsr_id: nsr identity
1004 :param nsd: database content of ns descriptor
1005 :param db_nsr: database content of ns record
1006 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1007 :param db_vnfrs:
1008 :param db_vnfds_ref: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1009 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1010 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1011 :return: None or exception
1012 """
tiernoe876f672020-02-13 14:34:48 +00001013 try:
1014 db_nsr_update = {}
1015 RO_descriptor_number = 0 # number of descriptors created at RO
1016 vnf_index_2_RO_id = {} # map between vnfd/nsd id to the id used at RO
1017 nslcmop_id = db_nslcmop["_id"]
1018 start_deploy = time()
1019 ns_params = db_nslcmop.get("operationParams")
1020 if ns_params and ns_params.get("timeout_ns_deploy"):
1021 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1022 else:
1023 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +02001024
tiernoe876f672020-02-13 14:34:48 +00001025 # Check for and optionally request placement optimization. Database will be updated if placement activated
1026 stage[2] = "Waiting for Placement."
tierno8790a3d2020-04-23 22:49:52 +00001027 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1028 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1029 for vnfr in db_vnfrs.values():
1030 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1031 break
1032 else:
1033 ns_params["vimAccountId"] == vnfr["vim-account-id"]
quilesj7e13aeb2019-10-08 13:34:55 +02001034
tierno69f0d382020-05-07 13:08:09 +00001035 if self.ng_ro:
1036 return await self._instantiate_ng_ro(logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs,
1037 db_vnfds_ref, n2vc_key_list, stage, start_deploy,
1038 timeout_ns_deploy)
tiernoe876f672020-02-13 14:34:48 +00001039 # deploy RO
tiernoe876f672020-02-13 14:34:48 +00001040 # get vnfds, instantiate at RO
1041 for c_vnf in nsd.get("constituent-vnfd", ()):
1042 member_vnf_index = c_vnf["member-vnf-index"]
1043 vnfd = db_vnfds_ref[c_vnf['vnfd-id-ref']]
1044 vnfd_ref = vnfd["id"]
quilesj7e13aeb2019-10-08 13:34:55 +02001045
tiernoe876f672020-02-13 14:34:48 +00001046 stage[2] = "Creating vnfd='{}' member_vnf_index='{}' at RO".format(vnfd_ref, member_vnf_index)
1047 db_nsr_update["detailed-status"] = " ".join(stage)
1048 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1049 self._write_op_status(nslcmop_id, stage)
calvinosanch9f9c6f22019-11-04 13:37:39 +01001050
tiernoe876f672020-02-13 14:34:48 +00001051 # self.logger.debug(logging_text + stage[2])
1052 vnfd_id_RO = "{}.{}.{}".format(nsr_id, RO_descriptor_number, member_vnf_index[:23])
1053 vnf_index_2_RO_id[member_vnf_index] = vnfd_id_RO
1054 RO_descriptor_number += 1
1055
1056 # look position at deployed.RO.vnfd if not present it will be appended at the end
1057 for index, vnf_deployed in enumerate(db_nsr["_admin"]["deployed"]["RO"]["vnfd"]):
1058 if vnf_deployed["member-vnf-index"] == member_vnf_index:
1059 break
1060 else:
1061 index = len(db_nsr["_admin"]["deployed"]["RO"]["vnfd"])
1062 db_nsr["_admin"]["deployed"]["RO"]["vnfd"].append(None)
1063
1064 # look if present
1065 RO_update = {"member-vnf-index": member_vnf_index}
1066 vnfd_list = await self.RO.get_list("vnfd", filter_by={"osm_id": vnfd_id_RO})
1067 if vnfd_list:
1068 RO_update["id"] = vnfd_list[0]["uuid"]
1069 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' exists at RO. Using RO_id={}".
1070 format(vnfd_ref, member_vnf_index, vnfd_list[0]["uuid"]))
1071 else:
1072 vnfd_RO = self.vnfd2RO(vnfd, vnfd_id_RO, db_vnfrs[c_vnf["member-vnf-index"]].
1073 get("additionalParamsForVnf"), nsr_id)
1074 desc = await self.RO.create("vnfd", descriptor=vnfd_RO)
1075 RO_update["id"] = desc["uuid"]
1076 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' created at RO. RO_id={}".format(
1077 vnfd_ref, member_vnf_index, desc["uuid"]))
1078 db_nsr_update["_admin.deployed.RO.vnfd.{}".format(index)] = RO_update
1079 db_nsr["_admin"]["deployed"]["RO"]["vnfd"][index] = RO_update
1080
1081 # create nsd at RO
1082 nsd_ref = nsd["id"]
1083
1084 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1085 db_nsr_update["detailed-status"] = " ".join(stage)
1086 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1087 self._write_op_status(nslcmop_id, stage)
1088
1089 # self.logger.debug(logging_text + stage[2])
1090 RO_osm_nsd_id = "{}.{}.{}".format(nsr_id, RO_descriptor_number, nsd_ref[:23])
tiernod8323042019-08-09 11:32:23 +00001091 RO_descriptor_number += 1
tiernoe876f672020-02-13 14:34:48 +00001092 nsd_list = await self.RO.get_list("nsd", filter_by={"osm_id": RO_osm_nsd_id})
1093 if nsd_list:
1094 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = nsd_list[0]["uuid"]
1095 self.logger.debug(logging_text + "nsd={} exists at RO. Using RO_id={}".format(
1096 nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +00001097 else:
tiernoe876f672020-02-13 14:34:48 +00001098 nsd_RO = deepcopy(nsd)
1099 nsd_RO["id"] = RO_osm_nsd_id
1100 nsd_RO.pop("_id", None)
1101 nsd_RO.pop("_admin", None)
1102 for c_vnf in nsd_RO.get("constituent-vnfd", ()):
1103 member_vnf_index = c_vnf["member-vnf-index"]
1104 c_vnf["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
1105 for c_vld in nsd_RO.get("vld", ()):
1106 for cp in c_vld.get("vnfd-connection-point-ref", ()):
1107 member_vnf_index = cp["member-vnf-index-ref"]
1108 cp["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
tiernod8323042019-08-09 11:32:23 +00001109
tiernoe876f672020-02-13 14:34:48 +00001110 desc = await self.RO.create("nsd", descriptor=nsd_RO)
1111 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1112 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = desc["uuid"]
1113 self.logger.debug(logging_text + "nsd={} created at RO. RO_id={}".format(nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +00001114 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1115
tiernoe876f672020-02-13 14:34:48 +00001116 # Crate ns at RO
1117 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1118 db_nsr_update["detailed-status"] = " ".join(stage)
1119 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1120 self._write_op_status(nslcmop_id, stage)
tiernod8323042019-08-09 11:32:23 +00001121
tiernoe876f672020-02-13 14:34:48 +00001122 # if present use it unless in error status
1123 RO_nsr_id = deep_get(db_nsr, ("_admin", "deployed", "RO", "nsr_id"))
1124 if RO_nsr_id:
1125 try:
1126 stage[2] = "Looking for existing ns at RO"
1127 db_nsr_update["detailed-status"] = " ".join(stage)
1128 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1129 self._write_op_status(nslcmop_id, stage)
1130 # self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1131 desc = await self.RO.show("ns", RO_nsr_id)
tiernod8323042019-08-09 11:32:23 +00001132
tiernoe876f672020-02-13 14:34:48 +00001133 except ROclient.ROClientException as e:
1134 if e.http_code != HTTPStatus.NOT_FOUND:
1135 raise
1136 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1137 if RO_nsr_id:
1138 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1139 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1140 if ns_status == "ERROR":
1141 stage[2] = "Deleting ns at RO. RO_ns_id={}".format(RO_nsr_id)
1142 self.logger.debug(logging_text + stage[2])
1143 await self.RO.delete("ns", RO_nsr_id)
1144 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1145 if not RO_nsr_id:
1146 stage[2] = "Checking dependencies"
1147 db_nsr_update["detailed-status"] = " ".join(stage)
1148 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1149 self._write_op_status(nslcmop_id, stage)
1150 # self.logger.debug(logging_text + stage[2])
tiernod8323042019-08-09 11:32:23 +00001151
tiernoe876f672020-02-13 14:34:48 +00001152 # check if VIM is creating and wait look if previous tasks in process
1153 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account", ns_params["vimAccountId"])
1154 if task_dependency:
1155 stage[2] = "Waiting for related tasks '{}' to be completed".format(task_name)
1156 self.logger.debug(logging_text + stage[2])
1157 await asyncio.wait(task_dependency, timeout=3600)
1158 if ns_params.get("vnf"):
1159 for vnf in ns_params["vnf"]:
1160 if "vimAccountId" in vnf:
1161 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account",
1162 vnf["vimAccountId"])
1163 if task_dependency:
1164 stage[2] = "Waiting for related tasks '{}' to be completed.".format(task_name)
1165 self.logger.debug(logging_text + stage[2])
1166 await asyncio.wait(task_dependency, timeout=3600)
1167
1168 stage[2] = "Checking instantiation parameters."
tiernoe95ed362020-04-23 08:24:57 +00001169 RO_ns_params = self._ns_params_2_RO(ns_params, nsd, db_vnfds_ref, db_vnfrs, n2vc_key_list)
tiernoe876f672020-02-13 14:34:48 +00001170 stage[2] = "Deploying ns at VIM."
1171 db_nsr_update["detailed-status"] = " ".join(stage)
1172 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1173 self._write_op_status(nslcmop_id, stage)
1174
1175 desc = await self.RO.create("ns", descriptor=RO_ns_params, name=db_nsr["name"], scenario=RO_nsd_uuid)
1176 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = desc["uuid"]
1177 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1178 db_nsr_update["_admin.deployed.RO.nsr_status"] = "BUILD"
1179 self.logger.debug(logging_text + "ns created at RO. RO_id={}".format(desc["uuid"]))
1180
1181 # wait until NS is ready
1182 stage[2] = "Waiting VIM to deploy ns."
1183 db_nsr_update["detailed-status"] = " ".join(stage)
1184 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1185 self._write_op_status(nslcmop_id, stage)
1186 detailed_status_old = None
1187 self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1188
1189 old_desc = None
1190 while time() <= start_deploy + timeout_ns_deploy:
tiernod8323042019-08-09 11:32:23 +00001191 desc = await self.RO.show("ns", RO_nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001192
tiernoe876f672020-02-13 14:34:48 +00001193 # deploymentStatus
1194 if desc != old_desc:
1195 # desc has changed => update db
1196 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
1197 old_desc = desc
tiernod8323042019-08-09 11:32:23 +00001198
tiernoe876f672020-02-13 14:34:48 +00001199 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1200 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1201 if ns_status == "ERROR":
1202 raise ROclient.ROClientException(ns_status_info)
1203 elif ns_status == "BUILD":
1204 stage[2] = "VIM: ({})".format(ns_status_info)
1205 elif ns_status == "ACTIVE":
1206 stage[2] = "Waiting for management IP address reported by the VIM. Updating VNFRs."
1207 try:
1208 self.ns_update_vnfr(db_vnfrs, desc)
1209 break
1210 except LcmExceptionNoMgmtIP:
1211 pass
1212 else:
1213 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
1214 if stage[2] != detailed_status_old:
1215 detailed_status_old = stage[2]
1216 db_nsr_update["detailed-status"] = " ".join(stage)
1217 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1218 self._write_op_status(nslcmop_id, stage)
1219 await asyncio.sleep(5, loop=self.loop)
1220 else: # timeout_ns_deploy
1221 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tiernod8323042019-08-09 11:32:23 +00001222
tiernoe876f672020-02-13 14:34:48 +00001223 # Updating NSR
1224 self.ns_update_nsr(db_nsr_update, db_nsr, desc)
tiernod8323042019-08-09 11:32:23 +00001225
tiernoe876f672020-02-13 14:34:48 +00001226 db_nsr_update["_admin.deployed.RO.operational-status"] = "running"
1227 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1228 stage[2] = "Deployed at VIM"
1229 db_nsr_update["detailed-status"] = " ".join(stage)
1230 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1231 self._write_op_status(nslcmop_id, stage)
1232 # await self._on_update_n2vc_db("nsrs", {"_id": nsr_id}, "_admin.deployed", db_nsr_update)
1233 # self.logger.debug(logging_text + "Deployed at VIM")
tierno69f0d382020-05-07 13:08:09 +00001234 except (ROclient.ROClientException, LcmException, DbException, NgRoException) as e:
tierno067e04a2020-03-31 12:53:13 +00001235 stage[2] = "ERROR deploying at VIM"
tiernoe876f672020-02-13 14:34:48 +00001236 self.set_vnfr_at_error(db_vnfrs, str(e))
1237 raise
quilesj7e13aeb2019-10-08 13:34:55 +02001238
tiernoa5088192019-11-26 16:12:53 +00001239 async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None):
1240 """
1241 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1242 :param logging_text: prefix use for logging
1243 :param nsr_id:
1244 :param vnfr_id:
1245 :param vdu_id:
1246 :param vdu_index:
1247 :param pub_key: public ssh key to inject, None to skip
1248 :param user: user to apply the public ssh key
1249 :return: IP address
1250 """
quilesj7e13aeb2019-10-08 13:34:55 +02001251
tiernoa5088192019-11-26 16:12:53 +00001252 # self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
tiernod8323042019-08-09 11:32:23 +00001253 ro_nsr_id = None
1254 ip_address = None
1255 nb_tries = 0
1256 target_vdu_id = None
quilesj3149f262019-12-03 10:58:10 +00001257 ro_retries = 0
quilesj7e13aeb2019-10-08 13:34:55 +02001258
tiernod8323042019-08-09 11:32:23 +00001259 while True:
quilesj7e13aeb2019-10-08 13:34:55 +02001260
quilesj3149f262019-12-03 10:58:10 +00001261 ro_retries += 1
1262 if ro_retries >= 360: # 1 hour
1263 raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id))
1264
tiernod8323042019-08-09 11:32:23 +00001265 await asyncio.sleep(10, loop=self.loop)
quilesj7e13aeb2019-10-08 13:34:55 +02001266
1267 # get ip address
tiernod8323042019-08-09 11:32:23 +00001268 if not target_vdu_id:
1269 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
quilesj3149f262019-12-03 10:58:10 +00001270
1271 if not vdu_id: # for the VNF case
tiernoe876f672020-02-13 14:34:48 +00001272 if db_vnfr.get("status") == "ERROR":
1273 raise LcmException("Cannot inject ssh-key because target VNF is in error state")
tiernod8323042019-08-09 11:32:23 +00001274 ip_address = db_vnfr.get("ip-address")
1275 if not ip_address:
1276 continue
quilesj3149f262019-12-03 10:58:10 +00001277 vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None)
1278 else: # VDU case
1279 vdur = next((x for x in get_iterable(db_vnfr, "vdur")
1280 if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None)
1281
tierno0e8c3f02020-03-12 17:18:21 +00001282 if not vdur and len(db_vnfr.get("vdur", ())) == 1: # If only one, this should be the target vdu
1283 vdur = db_vnfr["vdur"][0]
quilesj3149f262019-12-03 10:58:10 +00001284 if not vdur:
tierno0e8c3f02020-03-12 17:18:21 +00001285 raise LcmException("Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(vnfr_id, vdu_id,
1286 vdu_index))
quilesj7e13aeb2019-10-08 13:34:55 +02001287
tierno0e8c3f02020-03-12 17:18:21 +00001288 if vdur.get("pdu-type") or vdur.get("status") == "ACTIVE":
quilesj3149f262019-12-03 10:58:10 +00001289 ip_address = vdur.get("ip-address")
1290 if not ip_address:
1291 continue
1292 target_vdu_id = vdur["vdu-id-ref"]
1293 elif vdur.get("status") == "ERROR":
1294 raise LcmException("Cannot inject ssh-key because target VM is in error state")
1295
tiernod8323042019-08-09 11:32:23 +00001296 if not target_vdu_id:
1297 continue
tiernod8323042019-08-09 11:32:23 +00001298
quilesj7e13aeb2019-10-08 13:34:55 +02001299 # inject public key into machine
1300 if pub_key and user:
tiernoe876f672020-02-13 14:34:48 +00001301 # wait until NS is deployed at RO
1302 if not ro_nsr_id:
1303 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1304 ro_nsr_id = deep_get(db_nsrs, ("_admin", "deployed", "RO", "nsr_id"))
1305 if not ro_nsr_id:
1306 continue
1307
tiernoa5088192019-11-26 16:12:53 +00001308 # self.logger.debug(logging_text + "Inserting RO key")
tierno0e8c3f02020-03-12 17:18:21 +00001309 if vdur.get("pdu-type"):
1310 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1311 return ip_address
quilesj7e13aeb2019-10-08 13:34:55 +02001312 try:
1313 ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index
tierno69f0d382020-05-07 13:08:09 +00001314 if self.ng_ro:
1315 target = {"action": "inject_ssh_key", "key": pub_key, "user": user,
1316 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdu_id}]}],
1317 }
1318 await self.RO.deploy(nsr_id, target)
1319 else:
1320 result_dict = await self.RO.create_action(
1321 item="ns",
1322 item_id_name=ro_nsr_id,
1323 descriptor={"add_public_key": pub_key, "vms": [ro_vm_id], "user": user}
1324 )
1325 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1326 if not result_dict or not isinstance(result_dict, dict):
1327 raise LcmException("Unknown response from RO when injecting key")
1328 for result in result_dict.values():
1329 if result.get("vim_result") == 200:
1330 break
1331 else:
1332 raise ROclient.ROClientException("error injecting key: {}".format(
1333 result.get("description")))
1334 break
1335 except NgRoException as e:
1336 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001337 except ROclient.ROClientException as e:
tiernoa5088192019-11-26 16:12:53 +00001338 if not nb_tries:
1339 self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds".
1340 format(e, 20*10))
quilesj7e13aeb2019-10-08 13:34:55 +02001341 nb_tries += 1
tiernoa5088192019-11-26 16:12:53 +00001342 if nb_tries >= 20:
quilesj7e13aeb2019-10-08 13:34:55 +02001343 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001344 else:
quilesj7e13aeb2019-10-08 13:34:55 +02001345 break
1346
1347 return ip_address
1348
tierno5ee02052019-12-05 19:55:02 +00001349 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1350 """
1351 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1352 """
1353 my_vca = vca_deployed_list[vca_index]
1354 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
quilesj3655ae02019-12-12 16:08:35 +00001355 # vdu or kdu: no dependencies
tierno5ee02052019-12-05 19:55:02 +00001356 return
1357 timeout = 300
1358 while timeout >= 0:
quilesj3655ae02019-12-12 16:08:35 +00001359 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1360 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1361 configuration_status_list = db_nsr["configurationStatus"]
1362 for index, vca_deployed in enumerate(configuration_status_list):
tierno5ee02052019-12-05 19:55:02 +00001363 if index == vca_index:
quilesj3655ae02019-12-12 16:08:35 +00001364 # myself
tierno5ee02052019-12-05 19:55:02 +00001365 continue
1366 if not my_vca.get("member-vnf-index") or \
1367 (vca_deployed.get("member-vnf-index") == my_vca.get("member-vnf-index")):
quilesj3655ae02019-12-12 16:08:35 +00001368 internal_status = configuration_status_list[index].get("status")
1369 if internal_status == 'READY':
1370 continue
1371 elif internal_status == 'BROKEN':
tierno5ee02052019-12-05 19:55:02 +00001372 raise LcmException("Configuration aborted because dependent charm/s has failed")
quilesj3655ae02019-12-12 16:08:35 +00001373 else:
1374 break
tierno5ee02052019-12-05 19:55:02 +00001375 else:
quilesj3655ae02019-12-12 16:08:35 +00001376 # no dependencies, return
tierno5ee02052019-12-05 19:55:02 +00001377 return
1378 await asyncio.sleep(10)
1379 timeout -= 1
tierno5ee02052019-12-05 19:55:02 +00001380
1381 raise LcmException("Configuration aborted because dependent charm/s timeout")
1382
tiernoe876f672020-02-13 14:34:48 +00001383 async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index,
tiernob996d942020-07-03 14:52:28 +00001384 config_descriptor, deploy_params, base_folder, nslcmop_id, stage, vca_type, vca_name,
1385 ee_config_descriptor):
tiernod8323042019-08-09 11:32:23 +00001386 nsr_id = db_nsr["_id"]
1387 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
tiernoda6fb102019-11-23 00:36:52 +00001388 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernod8323042019-08-09 11:32:23 +00001389 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
tiernob996d942020-07-03 14:52:28 +00001390 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
quilesj7e13aeb2019-10-08 13:34:55 +02001391 db_dict = {
1392 'collection': 'nsrs',
1393 'filter': {'_id': nsr_id},
1394 'path': db_update_entry
1395 }
tiernod8323042019-08-09 11:32:23 +00001396 step = ""
1397 try:
quilesj3655ae02019-12-12 16:08:35 +00001398
1399 element_type = 'NS'
1400 element_under_configuration = nsr_id
1401
tiernod8323042019-08-09 11:32:23 +00001402 vnfr_id = None
1403 if db_vnfr:
1404 vnfr_id = db_vnfr["_id"]
tiernob996d942020-07-03 14:52:28 +00001405 osm_config["osm"]["vnf_id"] = vnfr_id
tiernod8323042019-08-09 11:32:23 +00001406
1407 namespace = "{nsi}.{ns}".format(
1408 nsi=nsi_id if nsi_id else "",
1409 ns=nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001410
tiernod8323042019-08-09 11:32:23 +00001411 if vnfr_id:
quilesj3655ae02019-12-12 16:08:35 +00001412 element_type = 'VNF'
1413 element_under_configuration = vnfr_id
quilesjb8a35dd2020-01-09 15:10:14 +00001414 namespace += ".{}".format(vnfr_id)
tiernod8323042019-08-09 11:32:23 +00001415 if vdu_id:
1416 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
quilesj3655ae02019-12-12 16:08:35 +00001417 element_type = 'VDU'
quilesjb8a35dd2020-01-09 15:10:14 +00001418 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
tiernob996d942020-07-03 14:52:28 +00001419 osm_config["osm"]["vdu_id"] = vdu_id
tierno51183952020-04-03 15:48:18 +00001420 elif kdu_name:
1421 namespace += ".{}".format(kdu_name)
1422 element_type = 'KDU'
1423 element_under_configuration = kdu_name
tiernob996d942020-07-03 14:52:28 +00001424 osm_config["osm"]["kdu_name"] = kdu_name
tiernod8323042019-08-09 11:32:23 +00001425
1426 # Get artifact path
tierno588547c2020-07-01 15:30:20 +00001427 artifact_path = "{}/{}/{}/{}".format(
tiernod8323042019-08-09 11:32:23 +00001428 base_folder["folder"],
1429 base_folder["pkg-dir"],
tierno588547c2020-07-01 15:30:20 +00001430 "charms" if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm") else "helm-charts",
1431 vca_name
tiernod8323042019-08-09 11:32:23 +00001432 )
tiernoa278b842020-07-08 15:33:55 +00001433 # get initial_config_primitive_list that applies to this element
1434 initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
1435
1436 # add config if not present for NS charm
1437 ee_descriptor_id = ee_config_descriptor.get("id")
1438 initial_config_primitive_list = self._get_initial_config_primitive_list(initial_config_primitive_list,
1439 vca_deployed, ee_descriptor_id)
tiernod8323042019-08-09 11:32:23 +00001440
tierno588547c2020-07-01 15:30:20 +00001441 # n2vc_redesign STEP 3.1
tierno588547c2020-07-01 15:30:20 +00001442 # find old ee_id if exists
1443 ee_id = vca_deployed.get("ee_id")
tiernod8323042019-08-09 11:32:23 +00001444
tierno588547c2020-07-01 15:30:20 +00001445 # create or register execution environment in VCA
1446 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm"):
quilesj7e13aeb2019-10-08 13:34:55 +02001447
tierno588547c2020-07-01 15:30:20 +00001448 self._write_configuration_status(
1449 nsr_id=nsr_id,
1450 vca_index=vca_index,
1451 status='CREATING',
1452 element_under_configuration=element_under_configuration,
1453 element_type=element_type
1454 )
tiernod8323042019-08-09 11:32:23 +00001455
tierno588547c2020-07-01 15:30:20 +00001456 step = "create execution environment"
1457 self.logger.debug(logging_text + step)
1458 ee_id, credentials = await self.vca_map[vca_type].create_execution_environment(
1459 namespace=namespace,
1460 reuse_ee_id=ee_id,
1461 db_dict=db_dict,
tiernob996d942020-07-03 14:52:28 +00001462 config=osm_config,
tierno588547c2020-07-01 15:30:20 +00001463 artifact_path=artifact_path,
1464 vca_type=vca_type)
quilesj3655ae02019-12-12 16:08:35 +00001465
tierno588547c2020-07-01 15:30:20 +00001466 elif vca_type == "native_charm":
1467 step = "Waiting to VM being up and getting IP address"
1468 self.logger.debug(logging_text + step)
1469 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1470 user=None, pub_key=None)
1471 credentials = {"hostname": rw_mgmt_ip}
1472 # get username
1473 username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1474 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1475 # merged. Meanwhile let's get username from initial-config-primitive
tiernoa278b842020-07-08 15:33:55 +00001476 if not username and initial_config_primitive_list:
1477 for config_primitive in initial_config_primitive_list:
tierno588547c2020-07-01 15:30:20 +00001478 for param in config_primitive.get("parameter", ()):
1479 if param["name"] == "ssh-username":
1480 username = param["value"]
1481 break
1482 if not username:
tiernoa278b842020-07-08 15:33:55 +00001483 raise LcmException("Cannot determine the username neither with 'initial-config-primitive' nor with "
tierno588547c2020-07-01 15:30:20 +00001484 "'config-access.ssh-access.default-user'")
1485 credentials["username"] = username
1486 # n2vc_redesign STEP 3.2
quilesj3655ae02019-12-12 16:08:35 +00001487
tierno588547c2020-07-01 15:30:20 +00001488 self._write_configuration_status(
1489 nsr_id=nsr_id,
1490 vca_index=vca_index,
1491 status='REGISTERING',
1492 element_under_configuration=element_under_configuration,
1493 element_type=element_type
1494 )
quilesj3655ae02019-12-12 16:08:35 +00001495
tierno588547c2020-07-01 15:30:20 +00001496 step = "register execution environment {}".format(credentials)
1497 self.logger.debug(logging_text + step)
1498 ee_id = await self.vca_map[vca_type].register_execution_environment(
1499 credentials=credentials, namespace=namespace, db_dict=db_dict)
tierno3bedc9b2019-11-27 15:46:57 +00001500
tierno588547c2020-07-01 15:30:20 +00001501 # for compatibility with MON/POL modules, the need model and application name at database
1502 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1503 ee_id_parts = ee_id.split('.')
1504 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1505 if len(ee_id_parts) >= 2:
1506 model_name = ee_id_parts[0]
1507 application_name = ee_id_parts[1]
1508 db_nsr_update[db_update_entry + "model"] = model_name
1509 db_nsr_update[db_update_entry + "application"] = application_name
tiernod8323042019-08-09 11:32:23 +00001510
1511 # n2vc_redesign STEP 3.3
tiernod8323042019-08-09 11:32:23 +00001512 step = "Install configuration Software"
quilesj3655ae02019-12-12 16:08:35 +00001513
tiernoc231a872020-01-21 08:49:05 +00001514 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001515 nsr_id=nsr_id,
1516 vca_index=vca_index,
1517 status='INSTALLING SW',
1518 element_under_configuration=element_under_configuration,
tierno51183952020-04-03 15:48:18 +00001519 element_type=element_type,
tierno588547c2020-07-01 15:30:20 +00001520 other_update=db_nsr_update
quilesj3655ae02019-12-12 16:08:35 +00001521 )
1522
tierno3bedc9b2019-11-27 15:46:57 +00001523 # TODO check if already done
quilesj7e13aeb2019-10-08 13:34:55 +02001524 self.logger.debug(logging_text + step)
David Garcia18a63322020-04-01 16:14:59 +02001525 config = None
tierno588547c2020-07-01 15:30:20 +00001526 if vca_type == "native_charm":
tiernoa278b842020-07-08 15:33:55 +00001527 config_primitive = next((p for p in initial_config_primitive_list if p["name"] == "config"), None)
1528 if config_primitive:
1529 config = self._map_primitive_params(
1530 config_primitive,
1531 {},
1532 deploy_params
1533 )
tierno588547c2020-07-01 15:30:20 +00001534 num_units = 1
1535 if vca_type == "lxc_proxy_charm":
1536 if element_type == "NS":
1537 num_units = db_nsr.get("config-units") or 1
1538 elif element_type == "VNF":
1539 num_units = db_vnfr.get("config-units") or 1
1540 elif element_type == "VDU":
1541 for v in db_vnfr["vdur"]:
1542 if vdu_id == v["vdu-id-ref"]:
1543 num_units = v.get("config-units") or 1
1544 break
David Garcia06a11f22020-03-25 18:21:37 +01001545
tierno588547c2020-07-01 15:30:20 +00001546 await self.vca_map[vca_type].install_configuration_sw(
1547 ee_id=ee_id,
1548 artifact_path=artifact_path,
1549 db_dict=db_dict,
1550 config=config,
1551 num_units=num_units,
1552 vca_type=vca_type
1553 )
quilesj7e13aeb2019-10-08 13:34:55 +02001554
quilesj63f90042020-01-17 09:53:55 +00001555 # write in db flag of configuration_sw already installed
1556 self.update_db_2("nsrs", nsr_id, {db_update_entry + "config_sw_installed": True})
1557
1558 # add relations for this VCA (wait for other peers related with this VCA)
tierno588547c2020-07-01 15:30:20 +00001559 await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id,
1560 vca_index=vca_index, vca_type=vca_type)
quilesj63f90042020-01-17 09:53:55 +00001561
quilesj7e13aeb2019-10-08 13:34:55 +02001562 # if SSH access is required, then get execution environment SSH public
David Garciaa27e20a2020-07-10 13:12:44 +02001563 # if native charm we have waited already to VM be UP
1564 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm"):
tierno3bedc9b2019-11-27 15:46:57 +00001565 pub_key = None
1566 user = None
tierno588547c2020-07-01 15:30:20 +00001567 # self.logger.debug("get ssh key block")
tierno3bedc9b2019-11-27 15:46:57 +00001568 if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
tierno588547c2020-07-01 15:30:20 +00001569 # self.logger.debug("ssh key needed")
tierno3bedc9b2019-11-27 15:46:57 +00001570 # Needed to inject a ssh key
1571 user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1572 step = "Install configuration Software, getting public ssh key"
tierno588547c2020-07-01 15:30:20 +00001573 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02001574
tiernoacc90452019-12-10 11:06:54 +00001575 step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
tierno3bedc9b2019-11-27 15:46:57 +00001576 else:
tierno588547c2020-07-01 15:30:20 +00001577 # self.logger.debug("no need to get ssh key")
tierno3bedc9b2019-11-27 15:46:57 +00001578 step = "Waiting to VM being up and getting IP address"
1579 self.logger.debug(logging_text + step)
quilesj7e13aeb2019-10-08 13:34:55 +02001580
tierno3bedc9b2019-11-27 15:46:57 +00001581 # n2vc_redesign STEP 5.1
1582 # wait for RO (ip-address) Insert pub_key into VM
tierno5ee02052019-12-05 19:55:02 +00001583 if vnfr_id:
1584 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1585 user=user, pub_key=pub_key)
1586 else:
1587 rw_mgmt_ip = None # This is for a NS configuration
tierno3bedc9b2019-11-27 15:46:57 +00001588
1589 self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
quilesj7e13aeb2019-10-08 13:34:55 +02001590
tiernoa5088192019-11-26 16:12:53 +00001591 # store rw_mgmt_ip in deploy params for later replacement
quilesj7e13aeb2019-10-08 13:34:55 +02001592 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
tiernod8323042019-08-09 11:32:23 +00001593
1594 # n2vc_redesign STEP 6 Execute initial config primitive
quilesj7e13aeb2019-10-08 13:34:55 +02001595 step = 'execute initial config primitive'
quilesj3655ae02019-12-12 16:08:35 +00001596
1597 # wait for dependent primitives execution (NS -> VNF -> VDU)
tierno5ee02052019-12-05 19:55:02 +00001598 if initial_config_primitive_list:
1599 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
quilesj3655ae02019-12-12 16:08:35 +00001600
1601 # stage, in function of element type: vdu, kdu, vnf or ns
1602 my_vca = vca_deployed_list[vca_index]
1603 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1604 # VDU or KDU
tiernoe876f672020-02-13 14:34:48 +00001605 stage[0] = 'Stage 3/5: running Day-1 primitives for VDU.'
quilesj3655ae02019-12-12 16:08:35 +00001606 elif my_vca.get("member-vnf-index"):
1607 # VNF
tiernoe876f672020-02-13 14:34:48 +00001608 stage[0] = 'Stage 4/5: running Day-1 primitives for VNF.'
quilesj3655ae02019-12-12 16:08:35 +00001609 else:
1610 # NS
tiernoe876f672020-02-13 14:34:48 +00001611 stage[0] = 'Stage 5/5: running Day-1 primitives for NS.'
quilesj3655ae02019-12-12 16:08:35 +00001612
tiernoc231a872020-01-21 08:49:05 +00001613 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001614 nsr_id=nsr_id,
1615 vca_index=vca_index,
1616 status='EXECUTING PRIMITIVE'
1617 )
1618
1619 self._write_op_status(
1620 op_id=nslcmop_id,
1621 stage=stage
1622 )
1623
tiernoe876f672020-02-13 14:34:48 +00001624 check_if_terminated_needed = True
tiernod8323042019-08-09 11:32:23 +00001625 for initial_config_primitive in initial_config_primitive_list:
tiernoda6fb102019-11-23 00:36:52 +00001626 # adding information on the vca_deployed if it is a NS execution environment
1627 if not vca_deployed["member-vnf-index"]:
David Garciad4816682019-12-09 14:57:43 +01001628 deploy_params["ns_config_info"] = json.dumps(self._get_ns_config_info(nsr_id))
tiernod8323042019-08-09 11:32:23 +00001629 # TODO check if already done
1630 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
tierno3bedc9b2019-11-27 15:46:57 +00001631
tiernod8323042019-08-09 11:32:23 +00001632 step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
1633 self.logger.debug(logging_text + step)
tierno588547c2020-07-01 15:30:20 +00001634 await self.vca_map[vca_type].exec_primitive(
quilesj7e13aeb2019-10-08 13:34:55 +02001635 ee_id=ee_id,
1636 primitive_name=initial_config_primitive["name"],
1637 params_dict=primitive_params_,
1638 db_dict=db_dict
1639 )
tiernoe876f672020-02-13 14:34:48 +00001640 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1641 if check_if_terminated_needed:
1642 if config_descriptor.get('terminate-config-primitive'):
1643 self.update_db_2("nsrs", nsr_id, {db_update_entry + "needed_terminate": True})
1644 check_if_terminated_needed = False
quilesj3655ae02019-12-12 16:08:35 +00001645
tiernod8323042019-08-09 11:32:23 +00001646 # TODO register in database that primitive is done
quilesj7e13aeb2019-10-08 13:34:55 +02001647
tiernob996d942020-07-03 14:52:28 +00001648 # STEP 7 Configure metrics
1649 if vca_type == "helm":
1650 prometheus_jobs = await self.add_prometheus_metrics(
1651 ee_id=ee_id,
1652 artifact_path=artifact_path,
1653 ee_config_descriptor=ee_config_descriptor,
1654 vnfr_id=vnfr_id,
1655 nsr_id=nsr_id,
1656 target_ip=rw_mgmt_ip,
1657 )
1658 if prometheus_jobs:
1659 self.update_db_2("nsrs", nsr_id, {db_update_entry + "prometheus_jobs": prometheus_jobs})
1660
quilesj7e13aeb2019-10-08 13:34:55 +02001661 step = "instantiated at VCA"
1662 self.logger.debug(logging_text + step)
1663
tiernoc231a872020-01-21 08:49:05 +00001664 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001665 nsr_id=nsr_id,
1666 vca_index=vca_index,
1667 status='READY'
1668 )
1669
tiernod8323042019-08-09 11:32:23 +00001670 except Exception as e: # TODO not use Exception but N2VC exception
quilesj3655ae02019-12-12 16:08:35 +00001671 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
tiernoe876f672020-02-13 14:34:48 +00001672 if not isinstance(e, (DbException, N2VCException, LcmException, asyncio.CancelledError)):
1673 self.logger.error("Exception while {} : {}".format(step, e), exc_info=True)
tiernoc231a872020-01-21 08:49:05 +00001674 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001675 nsr_id=nsr_id,
1676 vca_index=vca_index,
1677 status='BROKEN'
1678 )
tiernoe876f672020-02-13 14:34:48 +00001679 raise LcmException("{} {}".format(step, e)) from e
tiernod8323042019-08-09 11:32:23 +00001680
quilesj4cda56b2019-12-05 10:02:20 +00001681 def _write_ns_status(self, nsr_id: str, ns_state: str, current_operation: str, current_operation_id: str,
tiernoa2143262020-03-27 16:20:40 +00001682 error_description: str = None, error_detail: str = None, other_update: dict = None):
tiernoe876f672020-02-13 14:34:48 +00001683 """
1684 Update db_nsr fields.
1685 :param nsr_id:
1686 :param ns_state:
1687 :param current_operation:
1688 :param current_operation_id:
1689 :param error_description:
tiernoa2143262020-03-27 16:20:40 +00001690 :param error_detail:
tiernoe876f672020-02-13 14:34:48 +00001691 :param other_update: Other required changes at database if provided, will be cleared
1692 :return:
1693 """
quilesj4cda56b2019-12-05 10:02:20 +00001694 try:
tiernoe876f672020-02-13 14:34:48 +00001695 db_dict = other_update or {}
1696 db_dict["_admin.nslcmop"] = current_operation_id # for backward compatibility
1697 db_dict["_admin.current-operation"] = current_operation_id
1698 db_dict["_admin.operation-type"] = current_operation if current_operation != "IDLE" else None
quilesj4cda56b2019-12-05 10:02:20 +00001699 db_dict["currentOperation"] = current_operation
1700 db_dict["currentOperationID"] = current_operation_id
1701 db_dict["errorDescription"] = error_description
tiernoa2143262020-03-27 16:20:40 +00001702 db_dict["errorDetail"] = error_detail
tiernoe876f672020-02-13 14:34:48 +00001703
1704 if ns_state:
1705 db_dict["nsState"] = ns_state
quilesj4cda56b2019-12-05 10:02:20 +00001706 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001707 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001708 self.logger.warn('Error writing NS status, ns={}: {}'.format(nsr_id, e))
1709
tiernoe876f672020-02-13 14:34:48 +00001710 def _write_op_status(self, op_id: str, stage: list = None, error_message: str = None, queuePosition: int = 0,
1711 operation_state: str = None, other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001712 try:
tiernoe876f672020-02-13 14:34:48 +00001713 db_dict = other_update or {}
quilesj3655ae02019-12-12 16:08:35 +00001714 db_dict['queuePosition'] = queuePosition
tiernoe876f672020-02-13 14:34:48 +00001715 if isinstance(stage, list):
1716 db_dict['stage'] = stage[0]
1717 db_dict['detailed-status'] = " ".join(stage)
1718 elif stage is not None:
1719 db_dict['stage'] = str(stage)
1720
1721 if error_message is not None:
quilesj3655ae02019-12-12 16:08:35 +00001722 db_dict['errorMessage'] = error_message
tiernoe876f672020-02-13 14:34:48 +00001723 if operation_state is not None:
1724 db_dict['operationState'] = operation_state
1725 db_dict["statusEnteredTime"] = time()
quilesj3655ae02019-12-12 16:08:35 +00001726 self.update_db_2("nslcmops", op_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001727 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001728 self.logger.warn('Error writing OPERATION status for op_id: {} -> {}'.format(op_id, e))
1729
tierno51183952020-04-03 15:48:18 +00001730 def _write_all_config_status(self, db_nsr: dict, status: str):
quilesj3655ae02019-12-12 16:08:35 +00001731 try:
tierno51183952020-04-03 15:48:18 +00001732 nsr_id = db_nsr["_id"]
quilesj3655ae02019-12-12 16:08:35 +00001733 # configurationStatus
1734 config_status = db_nsr.get('configurationStatus')
1735 if config_status:
tierno51183952020-04-03 15:48:18 +00001736 db_nsr_update = {"configurationStatus.{}.status".format(index): status for index, v in
1737 enumerate(config_status) if v}
quilesj3655ae02019-12-12 16:08:35 +00001738 # update status
tierno51183952020-04-03 15:48:18 +00001739 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00001740
tiernoe876f672020-02-13 14:34:48 +00001741 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001742 self.logger.warn('Error writing all configuration status, ns={}: {}'.format(nsr_id, e))
1743
quilesj63f90042020-01-17 09:53:55 +00001744 def _write_configuration_status(self, nsr_id: str, vca_index: int, status: str = None,
tierno51183952020-04-03 15:48:18 +00001745 element_under_configuration: str = None, element_type: str = None,
1746 other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001747
1748 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
1749 # .format(vca_index, status))
1750
1751 try:
1752 db_path = 'configurationStatus.{}.'.format(vca_index)
tierno51183952020-04-03 15:48:18 +00001753 db_dict = other_update or {}
quilesj63f90042020-01-17 09:53:55 +00001754 if status:
1755 db_dict[db_path + 'status'] = status
quilesj3655ae02019-12-12 16:08:35 +00001756 if element_under_configuration:
1757 db_dict[db_path + 'elementUnderConfiguration'] = element_under_configuration
1758 if element_type:
1759 db_dict[db_path + 'elementType'] = element_type
1760 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001761 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001762 self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}'
1763 .format(status, nsr_id, vca_index, e))
quilesj4cda56b2019-12-05 10:02:20 +00001764
tierno38089af2020-04-16 07:56:58 +00001765 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
1766 """
1767 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
1768 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
1769 Database is used because the result can be obtained from a different LCM worker in case of HA.
1770 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
1771 :param db_nslcmop: database content of nslcmop
1772 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
tierno8790a3d2020-04-23 22:49:52 +00001773 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
1774 computed 'vim-account-id'
tierno38089af2020-04-16 07:56:58 +00001775 """
tierno8790a3d2020-04-23 22:49:52 +00001776 modified = False
tierno38089af2020-04-16 07:56:58 +00001777 nslcmop_id = db_nslcmop['_id']
magnussonle9198bb2020-01-21 13:00:51 +01001778 placement_engine = deep_get(db_nslcmop, ('operationParams', 'placement-engine'))
1779 if placement_engine == "PLA":
tierno38089af2020-04-16 07:56:58 +00001780 self.logger.debug(logging_text + "Invoke and wait for placement optimization")
1781 await self.msg.aiowrite("pla", "get_placement", {'nslcmopId': nslcmop_id}, loop=self.loop)
magnussonle9198bb2020-01-21 13:00:51 +01001782 db_poll_interval = 5
tierno38089af2020-04-16 07:56:58 +00001783 wait = db_poll_interval * 10
magnussonle9198bb2020-01-21 13:00:51 +01001784 pla_result = None
1785 while not pla_result and wait >= 0:
1786 await asyncio.sleep(db_poll_interval)
1787 wait -= db_poll_interval
tierno38089af2020-04-16 07:56:58 +00001788 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
magnussonle9198bb2020-01-21 13:00:51 +01001789 pla_result = deep_get(db_nslcmop, ('_admin', 'pla'))
1790
1791 if not pla_result:
tierno38089af2020-04-16 07:56:58 +00001792 raise LcmException("Placement timeout for nslcmopId={}".format(nslcmop_id))
magnussonle9198bb2020-01-21 13:00:51 +01001793
1794 for pla_vnf in pla_result['vnf']:
1795 vnfr = db_vnfrs.get(pla_vnf['member-vnf-index'])
1796 if not pla_vnf.get('vimAccountId') or not vnfr:
1797 continue
tierno8790a3d2020-04-23 22:49:52 +00001798 modified = True
magnussonle9198bb2020-01-21 13:00:51 +01001799 self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, {"vim-account-id": pla_vnf['vimAccountId']})
tierno38089af2020-04-16 07:56:58 +00001800 # Modifies db_vnfrs
1801 vnfr["vim-account-id"] = pla_vnf['vimAccountId']
tierno8790a3d2020-04-23 22:49:52 +00001802 return modified
magnussonle9198bb2020-01-21 13:00:51 +01001803
1804 def update_nsrs_with_pla_result(self, params):
1805 try:
1806 nslcmop_id = deep_get(params, ('placement', 'nslcmopId'))
1807 self.update_db_2("nslcmops", nslcmop_id, {"_admin.pla": params.get('placement')})
1808 except Exception as e:
1809 self.logger.warn('Update failed for nslcmop_id={}:{}'.format(nslcmop_id, e))
1810
tierno59d22d22018-09-25 18:10:19 +02001811 async def instantiate(self, nsr_id, nslcmop_id):
quilesj7e13aeb2019-10-08 13:34:55 +02001812 """
1813
1814 :param nsr_id: ns instance to deploy
1815 :param nslcmop_id: operation to run
1816 :return:
1817 """
kuused124bfe2019-06-18 12:09:24 +02001818
1819 # Try to lock HA task here
1820 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
1821 if not task_is_locked_by_me:
quilesj3655ae02019-12-12 16:08:35 +00001822 self.logger.debug('instantiate() task is not locked by me, ns={}'.format(nsr_id))
kuused124bfe2019-06-18 12:09:24 +02001823 return
1824
tierno59d22d22018-09-25 18:10:19 +02001825 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
1826 self.logger.debug(logging_text + "Enter")
quilesj7e13aeb2019-10-08 13:34:55 +02001827
tierno59d22d22018-09-25 18:10:19 +02001828 # get all needed from database
quilesj7e13aeb2019-10-08 13:34:55 +02001829
1830 # database nsrs record
tierno59d22d22018-09-25 18:10:19 +02001831 db_nsr = None
quilesj7e13aeb2019-10-08 13:34:55 +02001832
1833 # database nslcmops record
tierno59d22d22018-09-25 18:10:19 +02001834 db_nslcmop = None
quilesj7e13aeb2019-10-08 13:34:55 +02001835
1836 # update operation on nsrs
tiernoe876f672020-02-13 14:34:48 +00001837 db_nsr_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02001838 # update operation on nslcmops
tierno59d22d22018-09-25 18:10:19 +02001839 db_nslcmop_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02001840
tierno59d22d22018-09-25 18:10:19 +02001841 nslcmop_operation_state = None
quilesj7e13aeb2019-10-08 13:34:55 +02001842 db_vnfrs = {} # vnf's info indexed by member-index
1843 # n2vc_info = {}
tiernoe876f672020-02-13 14:34:48 +00001844 tasks_dict_info = {} # from task to info text
tierno59d22d22018-09-25 18:10:19 +02001845 exc = None
tiernoe876f672020-02-13 14:34:48 +00001846 error_list = []
1847 stage = ['Stage 1/5: preparation of the environment.', "Waiting for previous operations to terminate.", ""]
1848 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02001849 try:
kuused124bfe2019-06-18 12:09:24 +02001850 # wait for any previous tasks in process
1851 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
1852
tierno21e42212020-07-09 13:51:20 +00001853 stage[1] = "Sync filesystem from database"
1854 self.fs.sync() # TODO, make use of partial sync, only for the needed packages
1855
quilesj7e13aeb2019-10-08 13:34:55 +02001856 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
tierno21e42212020-07-09 13:51:20 +00001857 stage[1] = "Reading from database"
quilesj4cda56b2019-12-05 10:02:20 +00001858 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
tiernoe876f672020-02-13 14:34:48 +00001859 db_nsr_update["detailed-status"] = "creating"
1860 db_nsr_update["operational-status"] = "init"
quilesj4cda56b2019-12-05 10:02:20 +00001861 self._write_ns_status(
1862 nsr_id=nsr_id,
1863 ns_state="BUILDING",
1864 current_operation="INSTANTIATING",
tiernoe876f672020-02-13 14:34:48 +00001865 current_operation_id=nslcmop_id,
1866 other_update=db_nsr_update
1867 )
1868 self._write_op_status(
1869 op_id=nslcmop_id,
1870 stage=stage,
1871 queuePosition=0
quilesj4cda56b2019-12-05 10:02:20 +00001872 )
1873
quilesj7e13aeb2019-10-08 13:34:55 +02001874 # read from db: operation
tiernoe876f672020-02-13 14:34:48 +00001875 stage[1] = "Getting nslcmop={} from db".format(nslcmop_id)
tierno59d22d22018-09-25 18:10:19 +02001876 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
tierno744303e2020-01-13 16:46:31 +00001877 ns_params = db_nslcmop.get("operationParams")
1878 if ns_params and ns_params.get("timeout_ns_deploy"):
1879 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1880 else:
1881 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +02001882
1883 # read from db: ns
tiernoe876f672020-02-13 14:34:48 +00001884 stage[1] = "Getting nsr={} from db".format(nsr_id)
tierno59d22d22018-09-25 18:10:19 +02001885 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernod732fb82020-05-21 13:18:23 +00001886 stage[1] = "Getting nsd={} from db".format(db_nsr["nsd-id"])
1887 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
1888 db_nsr["nsd"] = nsd
tiernod8323042019-08-09 11:32:23 +00001889 # nsr_name = db_nsr["name"] # TODO short-name??
tierno47e86b52018-10-10 14:05:55 +02001890
quilesj7e13aeb2019-10-08 13:34:55 +02001891 # read from db: vnf's of this ns
tiernoe876f672020-02-13 14:34:48 +00001892 stage[1] = "Getting vnfrs from db"
1893 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02001894 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
tierno27246d82018-09-27 15:59:09 +02001895
quilesj7e13aeb2019-10-08 13:34:55 +02001896 # read from db: vnfd's for every vnf
1897 db_vnfds_ref = {} # every vnfd data indexed by vnf name
1898 db_vnfds = {} # every vnfd data indexed by vnf id
1899 db_vnfds_index = {} # every vnfd data indexed by vnf member-index
1900
1901 # for each vnf in ns, read vnfd
tierno27246d82018-09-27 15:59:09 +02001902 for vnfr in db_vnfrs_list:
quilesj7e13aeb2019-10-08 13:34:55 +02001903 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr # vnf's dict indexed by member-index: '1', '2', etc
1904 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
1905 vnfd_ref = vnfr["vnfd-ref"] # vnfd name for this vnf
lloretgalleg6d488782020-07-22 10:13:46 +00001906
quilesj7e13aeb2019-10-08 13:34:55 +02001907 # if we haven't this vnfd, read it from db
tierno27246d82018-09-27 15:59:09 +02001908 if vnfd_id not in db_vnfds:
quilesj63f90042020-01-17 09:53:55 +00001909 # read from db
tiernoe876f672020-02-13 14:34:48 +00001910 stage[1] = "Getting vnfd={} id='{}' from db".format(vnfd_id, vnfd_ref)
1911 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02001912 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
tierno27246d82018-09-27 15:59:09 +02001913
quilesj7e13aeb2019-10-08 13:34:55 +02001914 # store vnfd
1915 db_vnfds_ref[vnfd_ref] = vnfd # vnfd's indexed by name
1916 db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id
1917 db_vnfds_index[vnfr["member-vnf-index-ref"]] = db_vnfds[vnfd_id] # vnfd's indexed by member-index
1918
1919 # Get or generates the _admin.deployed.VCA list
tiernoe4f7e6c2018-11-27 14:55:30 +00001920 vca_deployed_list = None
1921 if db_nsr["_admin"].get("deployed"):
1922 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
1923 if vca_deployed_list is None:
1924 vca_deployed_list = []
quilesj3655ae02019-12-12 16:08:35 +00001925 configuration_status_list = []
tiernoe4f7e6c2018-11-27 14:55:30 +00001926 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
quilesj3655ae02019-12-12 16:08:35 +00001927 db_nsr_update["configurationStatus"] = configuration_status_list
quilesj7e13aeb2019-10-08 13:34:55 +02001928 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00001929 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00001930 elif isinstance(vca_deployed_list, dict):
1931 # maintain backward compatibility. Change a dict to list at database
1932 vca_deployed_list = list(vca_deployed_list.values())
1933 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00001934 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00001935
tierno6cf25f52019-09-12 09:33:40 +00001936 if not isinstance(deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list):
tiernoa009e552019-01-30 16:45:44 +00001937 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
1938 db_nsr_update["_admin.deployed.RO.vnfd"] = []
tierno59d22d22018-09-25 18:10:19 +02001939
tiernobaa51102018-12-14 13:16:18 +00001940 # set state to INSTANTIATED. When instantiated NBI will not delete directly
1941 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1942 self.update_db_2("nsrs", nsr_id, db_nsr_update)
lloretgalleg6d488782020-07-22 10:13:46 +00001943 self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"})
quilesj3655ae02019-12-12 16:08:35 +00001944
1945 # n2vc_redesign STEP 2 Deploy Network Scenario
tiernoe876f672020-02-13 14:34:48 +00001946 stage[0] = 'Stage 2/5: deployment of KDUs, VMs and execution environments.'
quilesj3655ae02019-12-12 16:08:35 +00001947 self._write_op_status(
1948 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00001949 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00001950 )
1951
tiernoe876f672020-02-13 14:34:48 +00001952 stage[1] = "Deploying KDUs,"
1953 # self.logger.debug(logging_text + "Before deploy_kdus")
calvinosanch9f9c6f22019-11-04 13:37:39 +01001954 # Call to deploy_kdus in case exists the "vdu:kdu" param
tiernoe876f672020-02-13 14:34:48 +00001955 await self.deploy_kdus(
1956 logging_text=logging_text,
1957 nsr_id=nsr_id,
1958 nslcmop_id=nslcmop_id,
1959 db_vnfrs=db_vnfrs,
1960 db_vnfds=db_vnfds,
1961 task_instantiation_info=tasks_dict_info,
calvinosanch9f9c6f22019-11-04 13:37:39 +01001962 )
tiernoe876f672020-02-13 14:34:48 +00001963
1964 stage[1] = "Getting VCA public key."
tiernod8323042019-08-09 11:32:23 +00001965 # n2vc_redesign STEP 1 Get VCA public ssh-key
1966 # feature 1429. Add n2vc public key to needed VMs
tierno3bedc9b2019-11-27 15:46:57 +00001967 n2vc_key = self.n2vc.get_public_key()
tiernoa5088192019-11-26 16:12:53 +00001968 n2vc_key_list = [n2vc_key]
1969 if self.vca_config.get("public_key"):
1970 n2vc_key_list.append(self.vca_config["public_key"])
tierno98ad6ea2019-05-30 17:16:28 +00001971
tiernoe876f672020-02-13 14:34:48 +00001972 stage[1] = "Deploying NS at VIM."
tiernod8323042019-08-09 11:32:23 +00001973 task_ro = asyncio.ensure_future(
quilesj7e13aeb2019-10-08 13:34:55 +02001974 self.instantiate_RO(
1975 logging_text=logging_text,
1976 nsr_id=nsr_id,
1977 nsd=nsd,
1978 db_nsr=db_nsr,
1979 db_nslcmop=db_nslcmop,
1980 db_vnfrs=db_vnfrs,
1981 db_vnfds_ref=db_vnfds_ref,
tiernoe876f672020-02-13 14:34:48 +00001982 n2vc_key_list=n2vc_key_list,
1983 stage=stage
tierno98ad6ea2019-05-30 17:16:28 +00001984 )
tiernod8323042019-08-09 11:32:23 +00001985 )
1986 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
tiernoa2143262020-03-27 16:20:40 +00001987 tasks_dict_info[task_ro] = "Deploying at VIM"
tierno98ad6ea2019-05-30 17:16:28 +00001988
tiernod8323042019-08-09 11:32:23 +00001989 # n2vc_redesign STEP 3 to 6 Deploy N2VC
tiernoe876f672020-02-13 14:34:48 +00001990 stage[1] = "Deploying Execution Environments."
1991 self.logger.debug(logging_text + stage[1])
tierno98ad6ea2019-05-30 17:16:28 +00001992
tiernod8323042019-08-09 11:32:23 +00001993 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
quilesj7e13aeb2019-10-08 13:34:55 +02001994 # get_iterable() returns a value from a dict or empty tuple if key does not exist
tierno98ad6ea2019-05-30 17:16:28 +00001995 for c_vnf in get_iterable(nsd, "constituent-vnfd"):
1996 vnfd_id = c_vnf["vnfd-id-ref"]
tierno98ad6ea2019-05-30 17:16:28 +00001997 vnfd = db_vnfds_ref[vnfd_id]
tiernod8323042019-08-09 11:32:23 +00001998 member_vnf_index = str(c_vnf["member-vnf-index"])
1999 db_vnfr = db_vnfrs[member_vnf_index]
2000 base_folder = vnfd["_admin"]["storage"]
2001 vdu_id = None
2002 vdu_index = 0
tierno98ad6ea2019-05-30 17:16:28 +00002003 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002004 kdu_name = None
tierno59d22d22018-09-25 18:10:19 +02002005
tierno8a518872018-12-21 13:42:14 +00002006 # Get additional parameters
tiernod8323042019-08-09 11:32:23 +00002007 deploy_params = {}
2008 if db_vnfr.get("additionalParamsForVnf"):
tierno626e0152019-11-29 14:16:16 +00002009 deploy_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"].copy())
tierno8a518872018-12-21 13:42:14 +00002010
tiernod8323042019-08-09 11:32:23 +00002011 descriptor_config = vnfd.get("vnf-configuration")
tierno588547c2020-07-01 15:30:20 +00002012 if descriptor_config:
quilesj7e13aeb2019-10-08 13:34:55 +02002013 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00002014 logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
quilesj7e13aeb2019-10-08 13:34:55 +02002015 db_nsr=db_nsr,
2016 db_vnfr=db_vnfr,
2017 nslcmop_id=nslcmop_id,
2018 nsr_id=nsr_id,
2019 nsi_id=nsi_id,
2020 vnfd_id=vnfd_id,
2021 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002022 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002023 member_vnf_index=member_vnf_index,
2024 vdu_index=vdu_index,
2025 vdu_name=vdu_name,
2026 deploy_params=deploy_params,
2027 descriptor_config=descriptor_config,
2028 base_folder=base_folder,
tiernoe876f672020-02-13 14:34:48 +00002029 task_instantiation_info=tasks_dict_info,
2030 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002031 )
tierno59d22d22018-09-25 18:10:19 +02002032
2033 # Deploy charms for each VDU that supports one.
tiernod8323042019-08-09 11:32:23 +00002034 for vdud in get_iterable(vnfd, 'vdu'):
2035 vdu_id = vdud["id"]
2036 descriptor_config = vdud.get('vdu-configuration')
tierno626e0152019-11-29 14:16:16 +00002037 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
2038 if vdur.get("additionalParams"):
2039 deploy_params_vdu = self._format_additional_params(vdur["additionalParams"])
2040 else:
2041 deploy_params_vdu = deploy_params
tierno588547c2020-07-01 15:30:20 +00002042 if descriptor_config:
tiernod8323042019-08-09 11:32:23 +00002043 # look for vdu index in the db_vnfr["vdu"] section
2044 # for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
2045 # if vdur["vdu-id-ref"] == vdu_id:
2046 # break
2047 # else:
2048 # raise LcmException("Mismatch vdu_id={} not found in the vnfr['vdur'] list for "
2049 # "member_vnf_index={}".format(vdu_id, member_vnf_index))
2050 # vdu_name = vdur.get("name")
2051 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002052 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002053 for vdu_index in range(int(vdud.get("count", 1))):
2054 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
quilesj7e13aeb2019-10-08 13:34:55 +02002055 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00002056 logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2057 member_vnf_index, vdu_id, vdu_index),
quilesj7e13aeb2019-10-08 13:34:55 +02002058 db_nsr=db_nsr,
2059 db_vnfr=db_vnfr,
2060 nslcmop_id=nslcmop_id,
2061 nsr_id=nsr_id,
2062 nsi_id=nsi_id,
2063 vnfd_id=vnfd_id,
2064 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002065 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002066 member_vnf_index=member_vnf_index,
2067 vdu_index=vdu_index,
2068 vdu_name=vdu_name,
tierno626e0152019-11-29 14:16:16 +00002069 deploy_params=deploy_params_vdu,
quilesj7e13aeb2019-10-08 13:34:55 +02002070 descriptor_config=descriptor_config,
2071 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002072 task_instantiation_info=tasks_dict_info,
2073 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002074 )
calvinosanch9f9c6f22019-11-04 13:37:39 +01002075 for kdud in get_iterable(vnfd, 'kdu'):
2076 kdu_name = kdud["name"]
2077 descriptor_config = kdud.get('kdu-configuration')
tierno588547c2020-07-01 15:30:20 +00002078 if descriptor_config:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002079 vdu_id = None
2080 vdu_index = 0
2081 vdu_name = None
2082 # look for vdu index in the db_vnfr["vdu"] section
2083 # for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
2084 # if vdur["vdu-id-ref"] == vdu_id:
2085 # break
2086 # else:
2087 # raise LcmException("Mismatch vdu_id={} not found in the vnfr['vdur'] list for "
2088 # "member_vnf_index={}".format(vdu_id, member_vnf_index))
2089 # vdu_name = vdur.get("name")
2090 # vdu_name = None
tierno59d22d22018-09-25 18:10:19 +02002091
calvinosanch9f9c6f22019-11-04 13:37:39 +01002092 self._deploy_n2vc(
2093 logging_text=logging_text,
2094 db_nsr=db_nsr,
2095 db_vnfr=db_vnfr,
2096 nslcmop_id=nslcmop_id,
2097 nsr_id=nsr_id,
2098 nsi_id=nsi_id,
2099 vnfd_id=vnfd_id,
2100 vdu_id=vdu_id,
2101 kdu_name=kdu_name,
2102 member_vnf_index=member_vnf_index,
2103 vdu_index=vdu_index,
2104 vdu_name=vdu_name,
2105 deploy_params=deploy_params,
2106 descriptor_config=descriptor_config,
2107 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002108 task_instantiation_info=tasks_dict_info,
2109 stage=stage
calvinosanch9f9c6f22019-11-04 13:37:39 +01002110 )
tierno59d22d22018-09-25 18:10:19 +02002111
tierno1b633412019-02-25 16:48:23 +00002112 # Check if this NS has a charm configuration
tiernod8323042019-08-09 11:32:23 +00002113 descriptor_config = nsd.get("ns-configuration")
2114 if descriptor_config and descriptor_config.get("juju"):
2115 vnfd_id = None
2116 db_vnfr = None
2117 member_vnf_index = None
2118 vdu_id = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002119 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002120 vdu_index = 0
2121 vdu_name = None
tierno1b633412019-02-25 16:48:23 +00002122
tiernod8323042019-08-09 11:32:23 +00002123 # Get additional parameters
2124 deploy_params = {}
2125 if db_nsr.get("additionalParamsForNs"):
tierno626e0152019-11-29 14:16:16 +00002126 deploy_params = self._format_additional_params(db_nsr["additionalParamsForNs"].copy())
tiernod8323042019-08-09 11:32:23 +00002127 base_folder = nsd["_admin"]["storage"]
quilesj7e13aeb2019-10-08 13:34:55 +02002128 self._deploy_n2vc(
2129 logging_text=logging_text,
2130 db_nsr=db_nsr,
2131 db_vnfr=db_vnfr,
2132 nslcmop_id=nslcmop_id,
2133 nsr_id=nsr_id,
2134 nsi_id=nsi_id,
2135 vnfd_id=vnfd_id,
2136 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002137 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002138 member_vnf_index=member_vnf_index,
2139 vdu_index=vdu_index,
2140 vdu_name=vdu_name,
2141 deploy_params=deploy_params,
2142 descriptor_config=descriptor_config,
2143 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002144 task_instantiation_info=tasks_dict_info,
2145 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002146 )
tierno1b633412019-02-25 16:48:23 +00002147
tiernoe876f672020-02-13 14:34:48 +00002148 # rest of staff will be done at finally
tierno1b633412019-02-25 16:48:23 +00002149
tiernoe876f672020-02-13 14:34:48 +00002150 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
2151 self.logger.error(logging_text + "Exit Exception while '{}': {}".format(stage[1], e))
tierno59d22d22018-09-25 18:10:19 +02002152 exc = e
2153 except asyncio.CancelledError:
tiernoe876f672020-02-13 14:34:48 +00002154 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
tierno59d22d22018-09-25 18:10:19 +02002155 exc = "Operation was cancelled"
2156 except Exception as e:
2157 exc = traceback.format_exc()
tiernoe876f672020-02-13 14:34:48 +00002158 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
tierno59d22d22018-09-25 18:10:19 +02002159 finally:
2160 if exc:
tiernoe876f672020-02-13 14:34:48 +00002161 error_list.append(str(exc))
tiernobaa51102018-12-14 13:16:18 +00002162 try:
tiernoe876f672020-02-13 14:34:48 +00002163 # wait for pending tasks
2164 if tasks_dict_info:
2165 stage[1] = "Waiting for instantiate pending tasks."
2166 self.logger.debug(logging_text + stage[1])
2167 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_deploy,
2168 stage, nslcmop_id, nsr_id=nsr_id)
2169 stage[1] = stage[2] = ""
2170 except asyncio.CancelledError:
2171 error_list.append("Cancelled")
2172 # TODO cancel all tasks
2173 except Exception as exc:
2174 error_list.append(str(exc))
quilesj4cda56b2019-12-05 10:02:20 +00002175
tiernoe876f672020-02-13 14:34:48 +00002176 # update operation-status
2177 db_nsr_update["operational-status"] = "running"
2178 # let's begin with VCA 'configured' status (later we can change it)
2179 db_nsr_update["config-status"] = "configured"
2180 for task, task_name in tasks_dict_info.items():
2181 if not task.done() or task.cancelled() or task.exception():
2182 if task_name.startswith(self.task_name_deploy_vca):
2183 # A N2VC task is pending
2184 db_nsr_update["config-status"] = "failed"
quilesj4cda56b2019-12-05 10:02:20 +00002185 else:
tiernoe876f672020-02-13 14:34:48 +00002186 # RO or KDU task is pending
2187 db_nsr_update["operational-status"] = "failed"
quilesj3655ae02019-12-12 16:08:35 +00002188
tiernoe876f672020-02-13 14:34:48 +00002189 # update status at database
2190 if error_list:
tiernoa2143262020-03-27 16:20:40 +00002191 error_detail = ". ".join(error_list)
tiernoe876f672020-02-13 14:34:48 +00002192 self.logger.error(logging_text + error_detail)
tiernoa2143262020-03-27 16:20:40 +00002193 error_description_nslcmop = 'Stage: {}. Detail: {}'.format(stage[0], error_detail)
2194 error_description_nsr = 'Operation: INSTANTIATING.{}, Stage {}'.format(nslcmop_id, stage[0])
quilesj3655ae02019-12-12 16:08:35 +00002195
tiernoa2143262020-03-27 16:20:40 +00002196 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00002197 db_nslcmop_update["detailed-status"] = error_detail
2198 nslcmop_operation_state = "FAILED"
2199 ns_state = "BROKEN"
2200 else:
tiernoa2143262020-03-27 16:20:40 +00002201 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00002202 error_description_nsr = error_description_nslcmop = None
2203 ns_state = "READY"
2204 db_nsr_update["detailed-status"] = "Done"
2205 db_nslcmop_update["detailed-status"] = "Done"
2206 nslcmop_operation_state = "COMPLETED"
quilesj4cda56b2019-12-05 10:02:20 +00002207
tiernoe876f672020-02-13 14:34:48 +00002208 if db_nsr:
2209 self._write_ns_status(
2210 nsr_id=nsr_id,
2211 ns_state=ns_state,
2212 current_operation="IDLE",
2213 current_operation_id=None,
2214 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00002215 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00002216 other_update=db_nsr_update
2217 )
tiernoa17d4f42020-04-28 09:59:23 +00002218 self._write_op_status(
2219 op_id=nslcmop_id,
2220 stage="",
2221 error_message=error_description_nslcmop,
2222 operation_state=nslcmop_operation_state,
2223 other_update=db_nslcmop_update,
2224 )
quilesj3655ae02019-12-12 16:08:35 +00002225
tierno59d22d22018-09-25 18:10:19 +02002226 if nslcmop_operation_state:
2227 try:
2228 await self.msg.aiowrite("ns", "instantiated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00002229 "operationState": nslcmop_operation_state},
2230 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02002231 except Exception as e:
2232 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
2233
2234 self.logger.debug(logging_text + "Exit")
2235 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2236
tierno588547c2020-07-01 15:30:20 +00002237 async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int,
2238 timeout: int = 3600, vca_type: str = None) -> bool:
quilesj63f90042020-01-17 09:53:55 +00002239
2240 # steps:
2241 # 1. find all relations for this VCA
2242 # 2. wait for other peers related
2243 # 3. add relations
2244
2245 try:
tierno588547c2020-07-01 15:30:20 +00002246 vca_type = vca_type or "lxc_proxy_charm"
quilesj63f90042020-01-17 09:53:55 +00002247
2248 # STEP 1: find all relations for this VCA
2249
2250 # read nsr record
2251 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
David Garcia171f3542020-05-21 16:41:07 +02002252 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
quilesj63f90042020-01-17 09:53:55 +00002253
2254 # this VCA data
2255 my_vca = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))[vca_index]
2256
2257 # read all ns-configuration relations
2258 ns_relations = list()
David Garcia171f3542020-05-21 16:41:07 +02002259 db_ns_relations = deep_get(nsd, ('ns-configuration', 'relation'))
quilesj63f90042020-01-17 09:53:55 +00002260 if db_ns_relations:
2261 for r in db_ns_relations:
2262 # check if this VCA is in the relation
2263 if my_vca.get('member-vnf-index') in\
2264 (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2265 ns_relations.append(r)
2266
2267 # read all vnf-configuration relations
2268 vnf_relations = list()
2269 db_vnfd_list = db_nsr.get('vnfd-id')
2270 if db_vnfd_list:
2271 for vnfd in db_vnfd_list:
2272 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2273 db_vnf_relations = deep_get(db_vnfd, ('vnf-configuration', 'relation'))
2274 if db_vnf_relations:
2275 for r in db_vnf_relations:
2276 # check if this VCA is in the relation
2277 if my_vca.get('vdu_id') in (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2278 vnf_relations.append(r)
2279
2280 # if no relations, terminate
2281 if not ns_relations and not vnf_relations:
2282 self.logger.debug(logging_text + ' No relations')
2283 return True
2284
2285 self.logger.debug(logging_text + ' adding relations\n {}\n {}'.format(ns_relations, vnf_relations))
2286
2287 # add all relations
2288 start = time()
2289 while True:
2290 # check timeout
2291 now = time()
2292 if now - start >= timeout:
2293 self.logger.error(logging_text + ' : timeout adding relations')
2294 return False
2295
2296 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2297 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2298
2299 # for each defined NS relation, find the VCA's related
2300 for r in ns_relations:
2301 from_vca_ee_id = None
2302 to_vca_ee_id = None
2303 from_vca_endpoint = None
2304 to_vca_endpoint = None
2305 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2306 for vca in vca_list:
2307 if vca.get('member-vnf-index') == r.get('entities')[0].get('id') \
2308 and vca.get('config_sw_installed'):
2309 from_vca_ee_id = vca.get('ee_id')
2310 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2311 if vca.get('member-vnf-index') == r.get('entities')[1].get('id') \
2312 and vca.get('config_sw_installed'):
2313 to_vca_ee_id = vca.get('ee_id')
2314 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2315 if from_vca_ee_id and to_vca_ee_id:
2316 # add relation
tierno588547c2020-07-01 15:30:20 +00002317 await self.vca_map[vca_type].add_relation(
quilesj63f90042020-01-17 09:53:55 +00002318 ee_id_1=from_vca_ee_id,
2319 ee_id_2=to_vca_ee_id,
2320 endpoint_1=from_vca_endpoint,
2321 endpoint_2=to_vca_endpoint)
2322 # remove entry from relations list
2323 ns_relations.remove(r)
2324 else:
2325 # check failed peers
2326 try:
2327 vca_status_list = db_nsr.get('configurationStatus')
2328 if vca_status_list:
2329 for i in range(len(vca_list)):
2330 vca = vca_list[i]
2331 vca_status = vca_status_list[i]
2332 if vca.get('member-vnf-index') == r.get('entities')[0].get('id'):
2333 if vca_status.get('status') == 'BROKEN':
2334 # peer broken: remove relation from list
2335 ns_relations.remove(r)
2336 if vca.get('member-vnf-index') == r.get('entities')[1].get('id'):
2337 if vca_status.get('status') == 'BROKEN':
2338 # peer broken: remove relation from list
2339 ns_relations.remove(r)
2340 except Exception:
2341 # ignore
2342 pass
2343
2344 # for each defined VNF relation, find the VCA's related
2345 for r in vnf_relations:
2346 from_vca_ee_id = None
2347 to_vca_ee_id = None
2348 from_vca_endpoint = None
2349 to_vca_endpoint = None
2350 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2351 for vca in vca_list:
2352 if vca.get('vdu_id') == r.get('entities')[0].get('id') and vca.get('config_sw_installed'):
2353 from_vca_ee_id = vca.get('ee_id')
2354 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2355 if vca.get('vdu_id') == r.get('entities')[1].get('id') and vca.get('config_sw_installed'):
2356 to_vca_ee_id = vca.get('ee_id')
2357 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2358 if from_vca_ee_id and to_vca_ee_id:
2359 # add relation
tierno588547c2020-07-01 15:30:20 +00002360 await self.vca_map[vca_type].add_relation(
quilesj63f90042020-01-17 09:53:55 +00002361 ee_id_1=from_vca_ee_id,
2362 ee_id_2=to_vca_ee_id,
2363 endpoint_1=from_vca_endpoint,
2364 endpoint_2=to_vca_endpoint)
2365 # remove entry from relations list
2366 vnf_relations.remove(r)
2367 else:
2368 # check failed peers
2369 try:
2370 vca_status_list = db_nsr.get('configurationStatus')
2371 if vca_status_list:
2372 for i in range(len(vca_list)):
2373 vca = vca_list[i]
2374 vca_status = vca_status_list[i]
2375 if vca.get('vdu_id') == r.get('entities')[0].get('id'):
2376 if vca_status.get('status') == 'BROKEN':
2377 # peer broken: remove relation from list
2378 ns_relations.remove(r)
2379 if vca.get('vdu_id') == r.get('entities')[1].get('id'):
2380 if vca_status.get('status') == 'BROKEN':
2381 # peer broken: remove relation from list
2382 ns_relations.remove(r)
2383 except Exception:
2384 # ignore
2385 pass
2386
2387 # wait for next try
2388 await asyncio.sleep(5.0)
2389
2390 if not ns_relations and not vnf_relations:
2391 self.logger.debug('Relations added')
2392 break
2393
2394 return True
2395
2396 except Exception as e:
2397 self.logger.warn(logging_text + ' ERROR adding relations: {}'.format(e))
2398 return False
2399
lloretgalleg7c121132020-07-08 07:53:22 +00002400 async def _install_kdu(self, nsr_id: str, nsr_db_path: str, vnfr_data: dict, kdu_index: int, kdur: dict, kdud: dict,
2401 vnfd: dict, k8s_instance_info: dict, k8params: dict = None, timeout: int = 600):
2402
tiernob9018152020-04-16 14:18:24 +00002403 try:
lloretgalleg7c121132020-07-08 07:53:22 +00002404 k8sclustertype = k8s_instance_info["k8scluster-type"]
2405 # Instantiate kdu
2406 db_dict_install = {"collection": "nsrs",
2407 "filter": {"_id": nsr_id},
2408 "path": nsr_db_path}
2409
2410 kdu_instance = await self.k8scluster_map[k8sclustertype].install(
2411 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2412 kdu_model=k8s_instance_info["kdu-model"],
2413 atomic=True,
2414 params=k8params,
2415 db_dict=db_dict_install,
2416 timeout=timeout,
2417 kdu_name=k8s_instance_info["kdu-name"],
2418 namespace=k8s_instance_info["namespace"])
2419 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance})
2420
2421 # Obtain services to obtain management service ip
2422 services = await self.k8scluster_map[k8sclustertype].get_services(
2423 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2424 kdu_instance=kdu_instance,
2425 namespace=k8s_instance_info["namespace"])
2426
2427 # Obtain management service info (if exists)
2428 if services:
2429 vnfr_update_dict = {"kdur.{}.services".format(kdu_index): services}
2430 mgmt_services = [service for service in kdud.get("service", []) if service.get("mgmt-service")]
2431 for mgmt_service in mgmt_services:
2432 for service in services:
2433 if service["name"].startswith(mgmt_service["name"]):
2434 # Mgmt service found, Obtain service ip
2435 ip = service.get("external_ip", service.get("cluster_ip"))
2436 if isinstance(ip, list) and len(ip) == 1:
2437 ip = ip[0]
2438
2439 vnfr_update_dict["kdur.{}.ip-address".format(kdu_index)] = ip
2440
2441 # Check if must update also mgmt ip at the vnf
2442 service_external_cp = mgmt_service.get("external-connection-point-ref")
2443 if service_external_cp:
2444 if deep_get(vnfd, ("mgmt-interface", "cp")) == service_external_cp:
2445 vnfr_update_dict["ip-address"] = ip
2446
2447 break
2448 else:
2449 self.logger.warn("Mgmt service name: {} not found".format(mgmt_service["name"]))
2450
2451 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
2452
tiernob9018152020-04-16 14:18:24 +00002453 except Exception as e:
lloretgalleg7c121132020-07-08 07:53:22 +00002454 # Prepare update db with error and raise exception
tiernob9018152020-04-16 14:18:24 +00002455 try:
lloretgalleg7c121132020-07-08 07:53:22 +00002456 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)})
tiernob9018152020-04-16 14:18:24 +00002457 except Exception:
lloretgalleg7c121132020-07-08 07:53:22 +00002458 # ignore to keep original exception
tiernob9018152020-04-16 14:18:24 +00002459 pass
lloretgalleg7c121132020-07-08 07:53:22 +00002460 # reraise original error
2461 raise
2462
2463 return kdu_instance
tiernob9018152020-04-16 14:18:24 +00002464
tiernoe876f672020-02-13 14:34:48 +00002465 async def deploy_kdus(self, logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_instantiation_info):
calvinosanch9f9c6f22019-11-04 13:37:39 +01002466 # Launch kdus if present in the descriptor
tierno626e0152019-11-29 14:16:16 +00002467
2468 k8scluster_id_2_uuic = {"helm-chart": {}, "juju-bundle": {}}
2469
tierno16f4a4e2020-07-20 09:05:51 +00002470 async def _get_cluster_id(cluster_id, cluster_type):
tierno626e0152019-11-29 14:16:16 +00002471 nonlocal k8scluster_id_2_uuic
2472 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
2473 return k8scluster_id_2_uuic[cluster_type][cluster_id]
2474
tierno16f4a4e2020-07-20 09:05:51 +00002475 # check if K8scluster is creating and wait look if previous tasks in process
2476 task_name, task_dependency = self.lcm_tasks.lookfor_related("k8scluster", cluster_id)
2477 if task_dependency:
2478 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(task_name, cluster_id)
2479 self.logger.debug(logging_text + text)
2480 await asyncio.wait(task_dependency, timeout=3600)
2481
tierno626e0152019-11-29 14:16:16 +00002482 db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False)
2483 if not db_k8scluster:
2484 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
tierno16f4a4e2020-07-20 09:05:51 +00002485
tierno626e0152019-11-29 14:16:16 +00002486 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
2487 if not k8s_id:
tierno78e3ec62020-07-14 10:46:57 +00002488 raise LcmException("K8s cluster '{}' has not been initialized for '{}'".format(cluster_id,
2489 cluster_type))
tierno626e0152019-11-29 14:16:16 +00002490 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
2491 return k8s_id
2492
2493 logging_text += "Deploy kdus: "
tiernoe876f672020-02-13 14:34:48 +00002494 step = ""
calvinosanch9f9c6f22019-11-04 13:37:39 +01002495 try:
tierno626e0152019-11-29 14:16:16 +00002496 db_nsr_update = {"_admin.deployed.K8s": []}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002497 self.update_db_2("nsrs", nsr_id, db_nsr_update)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002498
tierno626e0152019-11-29 14:16:16 +00002499 index = 0
tiernoe876f672020-02-13 14:34:48 +00002500 updated_cluster_list = []
2501
tierno626e0152019-11-29 14:16:16 +00002502 for vnfr_data in db_vnfrs.values():
lloretgalleg7c121132020-07-08 07:53:22 +00002503 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
2504 # Step 0: Prepare and set parameters
tierno626e0152019-11-29 14:16:16 +00002505 desc_params = self._format_additional_params(kdur.get("additionalParams"))
quilesjacde94f2020-01-23 10:07:08 +00002506 vnfd_id = vnfr_data.get('vnfd-id')
lloretgalleg7c121132020-07-08 07:53:22 +00002507 kdud = next(kdud for kdud in db_vnfds[vnfd_id]["kdu"] if kdud["name"] == kdur["kdu-name"])
tiernode1584f2020-04-07 09:07:33 +00002508 namespace = kdur.get("k8s-namespace")
tierno626e0152019-11-29 14:16:16 +00002509 if kdur.get("helm-chart"):
2510 kdumodel = kdur["helm-chart"]
tiernoe876f672020-02-13 14:34:48 +00002511 k8sclustertype = "helm-chart"
tierno626e0152019-11-29 14:16:16 +00002512 elif kdur.get("juju-bundle"):
2513 kdumodel = kdur["juju-bundle"]
tiernoe876f672020-02-13 14:34:48 +00002514 k8sclustertype = "juju-bundle"
tierno626e0152019-11-29 14:16:16 +00002515 else:
tiernoe876f672020-02-13 14:34:48 +00002516 raise LcmException("kdu type for kdu='{}.{}' is neither helm-chart nor "
2517 "juju-bundle. Maybe an old NBI version is running".
2518 format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]))
quilesjacde94f2020-01-23 10:07:08 +00002519 # check if kdumodel is a file and exists
2520 try:
tierno51183952020-04-03 15:48:18 +00002521 storage = deep_get(db_vnfds.get(vnfd_id), ('_admin', 'storage'))
2522 if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts
2523 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
Dominik Fleischmann010c0e72020-05-18 15:19:11 +02002524 filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["pkg-dir"], k8sclustertype,
tierno51183952020-04-03 15:48:18 +00002525 kdumodel)
2526 if self.fs.file_exists(filename, mode='file') or self.fs.file_exists(filename, mode='dir'):
2527 kdumodel = self.fs.path + filename
2528 except (asyncio.TimeoutError, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00002529 raise
2530 except Exception: # it is not a file
quilesjacde94f2020-01-23 10:07:08 +00002531 pass
lloretgallegedc5f332020-02-20 11:50:50 +01002532
tiernoe876f672020-02-13 14:34:48 +00002533 k8s_cluster_id = kdur["k8s-cluster"]["id"]
2534 step = "Synchronize repos for k8s cluster '{}'".format(k8s_cluster_id)
tierno16f4a4e2020-07-20 09:05:51 +00002535 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
lloretgallegedc5f332020-02-20 11:50:50 +01002536
lloretgalleg7c121132020-07-08 07:53:22 +00002537 # Synchronize repos
tiernoe876f672020-02-13 14:34:48 +00002538 if k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list:
2539 del_repo_list, added_repo_dict = await asyncio.ensure_future(
2540 self.k8sclusterhelm.synchronize_repos(cluster_uuid=cluster_uuid))
2541 if del_repo_list or added_repo_dict:
2542 unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
2543 updated = {'_admin.helm_charts_added.' +
2544 item: name for item, name in added_repo_dict.items()}
2545 self.logger.debug(logging_text + "repos synchronized on k8s cluster '{}' to_delete: {}, "
2546 "to_add: {}".format(k8s_cluster_id, del_repo_list,
2547 added_repo_dict))
2548 self.db.set_one("k8sclusters", {"_id": k8s_cluster_id}, updated, unset=unset)
2549 updated_cluster_list.append(cluster_uuid)
lloretgallegedc5f332020-02-20 11:50:50 +01002550
lloretgalleg7c121132020-07-08 07:53:22 +00002551 # Instantiate kdu
tiernoe876f672020-02-13 14:34:48 +00002552 step = "Instantiating KDU {}.{} in k8s cluster {}".format(vnfr_data["member-vnf-index-ref"],
2553 kdur["kdu-name"], k8s_cluster_id)
lloretgalleg7c121132020-07-08 07:53:22 +00002554 k8s_instance_info = {"kdu-instance": None,
2555 "k8scluster-uuid": cluster_uuid,
2556 "k8scluster-type": k8sclustertype,
2557 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
2558 "kdu-name": kdur["kdu-name"],
2559 "kdu-model": kdumodel,
2560 "namespace": namespace}
tiernob9018152020-04-16 14:18:24 +00002561 db_path = "_admin.deployed.K8s.{}".format(index)
lloretgalleg7c121132020-07-08 07:53:22 +00002562 db_nsr_update[db_path] = k8s_instance_info
tierno626e0152019-11-29 14:16:16 +00002563 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tierno626e0152019-11-29 14:16:16 +00002564
tiernoa2143262020-03-27 16:20:40 +00002565 task = asyncio.ensure_future(
lloretgalleg7c121132020-07-08 07:53:22 +00002566 self._install_kdu(nsr_id, db_path, vnfr_data, kdu_index, kdur, kdud, db_vnfds[vnfd_id],
2567 k8s_instance_info, k8params=desc_params, timeout=600))
tiernoe876f672020-02-13 14:34:48 +00002568 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task)
tiernoa2143262020-03-27 16:20:40 +00002569 task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"])
tiernoe876f672020-02-13 14:34:48 +00002570
tierno626e0152019-11-29 14:16:16 +00002571 index += 1
quilesjdd799ac2020-01-23 16:31:11 +00002572
tiernoe876f672020-02-13 14:34:48 +00002573 except (LcmException, asyncio.CancelledError):
2574 raise
calvinosanch9f9c6f22019-11-04 13:37:39 +01002575 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00002576 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
2577 if isinstance(e, (N2VCException, DbException)):
2578 self.logger.error(logging_text + msg)
2579 else:
2580 self.logger.critical(logging_text + msg, exc_info=True)
quilesjdd799ac2020-01-23 16:31:11 +00002581 raise LcmException(msg)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002582 finally:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002583 if db_nsr_update:
2584 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoda6fb102019-11-23 00:36:52 +00002585
quilesj7e13aeb2019-10-08 13:34:55 +02002586 def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002587 kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config,
tiernoe876f672020-02-13 14:34:48 +00002588 base_folder, task_instantiation_info, stage):
quilesj7e13aeb2019-10-08 13:34:55 +02002589 # launch instantiate_N2VC in a asyncio task and register task object
2590 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
2591 # if not found, create one entry and update database
quilesj7e13aeb2019-10-08 13:34:55 +02002592 # fill db_nsr._admin.deployed.VCA.<index>
tierno588547c2020-07-01 15:30:20 +00002593
2594 self.logger.debug(logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id))
2595 if descriptor_config.get("juju"): # There is one execution envioronment of type juju
2596 ee_list = [descriptor_config]
2597 elif descriptor_config.get("execution-environment-list"):
2598 ee_list = descriptor_config.get("execution-environment-list")
2599 else: # other types as script are not supported
2600 ee_list = []
2601
2602 for ee_item in ee_list:
2603 self.logger.debug(logging_text + "_deploy_n2vc ee_item juju={}, helm={}".format(ee_item.get('juju'),
2604 ee_item.get("helm-chart")))
tiernoa278b842020-07-08 15:33:55 +00002605 ee_descriptor_id = ee_item.get("id")
tierno588547c2020-07-01 15:30:20 +00002606 if ee_item.get("juju"):
2607 vca_name = ee_item['juju'].get('charm')
2608 vca_type = "lxc_proxy_charm" if ee_item['juju'].get('charm') is not None else "native_charm"
2609 if ee_item['juju'].get('cloud') == "k8s":
2610 vca_type = "k8s_proxy_charm"
2611 elif ee_item['juju'].get('proxy') is False:
2612 vca_type = "native_charm"
2613 elif ee_item.get("helm-chart"):
2614 vca_name = ee_item['helm-chart']
2615 vca_type = "helm"
2616 else:
2617 self.logger.debug(logging_text + "skipping non juju neither charm configuration")
quilesj7e13aeb2019-10-08 13:34:55 +02002618 continue
quilesj3655ae02019-12-12 16:08:35 +00002619
tierno588547c2020-07-01 15:30:20 +00002620 vca_index = -1
2621 for vca_index, vca_deployed in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
2622 if not vca_deployed:
2623 continue
2624 if vca_deployed.get("member-vnf-index") == member_vnf_index and \
2625 vca_deployed.get("vdu_id") == vdu_id and \
2626 vca_deployed.get("kdu_name") == kdu_name and \
tiernoa278b842020-07-08 15:33:55 +00002627 vca_deployed.get("vdu_count_index", 0) == vdu_index and \
2628 vca_deployed.get("ee_descriptor_id") == ee_descriptor_id:
tierno588547c2020-07-01 15:30:20 +00002629 break
2630 else:
2631 # not found, create one.
tiernoa278b842020-07-08 15:33:55 +00002632 target = "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
2633 if vdu_id:
2634 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
2635 elif kdu_name:
2636 target += "/kdu/{}".format(kdu_name)
tierno588547c2020-07-01 15:30:20 +00002637 vca_deployed = {
tiernoa278b842020-07-08 15:33:55 +00002638 "target_element": target,
2639 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
tierno588547c2020-07-01 15:30:20 +00002640 "member-vnf-index": member_vnf_index,
2641 "vdu_id": vdu_id,
2642 "kdu_name": kdu_name,
2643 "vdu_count_index": vdu_index,
2644 "operational-status": "init", # TODO revise
2645 "detailed-status": "", # TODO revise
2646 "step": "initial-deploy", # TODO revise
2647 "vnfd_id": vnfd_id,
2648 "vdu_name": vdu_name,
tiernoa278b842020-07-08 15:33:55 +00002649 "type": vca_type,
2650 "ee_descriptor_id": ee_descriptor_id
tierno588547c2020-07-01 15:30:20 +00002651 }
2652 vca_index += 1
quilesj3655ae02019-12-12 16:08:35 +00002653
tierno588547c2020-07-01 15:30:20 +00002654 # create VCA and configurationStatus in db
2655 db_dict = {
2656 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
2657 "configurationStatus.{}".format(vca_index): dict()
2658 }
2659 self.update_db_2("nsrs", nsr_id, db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02002660
tierno588547c2020-07-01 15:30:20 +00002661 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
2662
2663 # Launch task
2664 task_n2vc = asyncio.ensure_future(
2665 self.instantiate_N2VC(
2666 logging_text=logging_text,
2667 vca_index=vca_index,
2668 nsi_id=nsi_id,
2669 db_nsr=db_nsr,
2670 db_vnfr=db_vnfr,
2671 vdu_id=vdu_id,
2672 kdu_name=kdu_name,
2673 vdu_index=vdu_index,
2674 deploy_params=deploy_params,
2675 config_descriptor=descriptor_config,
2676 base_folder=base_folder,
2677 nslcmop_id=nslcmop_id,
2678 stage=stage,
2679 vca_type=vca_type,
tiernob996d942020-07-03 14:52:28 +00002680 vca_name=vca_name,
2681 ee_config_descriptor=ee_item
tierno588547c2020-07-01 15:30:20 +00002682 )
quilesj7e13aeb2019-10-08 13:34:55 +02002683 )
tierno588547c2020-07-01 15:30:20 +00002684 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_N2VC-{}".format(vca_index), task_n2vc)
2685 task_instantiation_info[task_n2vc] = self.task_name_deploy_vca + " {}.{}".format(
2686 member_vnf_index or "", vdu_id or "")
tiernobaa51102018-12-14 13:16:18 +00002687
tiernoc9556972019-07-05 15:25:25 +00002688 @staticmethod
tiernoa278b842020-07-08 15:33:55 +00002689 def _get_terminate_config_primitive(primitive_list, vca_deployed):
2690 """ Get a sorted terminate config primitive list. In case ee_descriptor_id is present at vca_deployed,
2691 it get only those primitives for this execution envirom"""
2692
2693 primitive_list = primitive_list or []
2694 # filter primitives by ee_descriptor_id
2695 ee_descriptor_id = vca_deployed.get("ee_descriptor_id")
2696 primitive_list = [p for p in primitive_list if p.get("execution-environment-ref") == ee_descriptor_id]
2697
2698 if primitive_list:
2699 primitive_list.sort(key=lambda val: int(val['seq']))
2700
2701 return primitive_list
kuuse0ca67472019-05-13 15:59:27 +02002702
2703 @staticmethod
2704 def _create_nslcmop(nsr_id, operation, params):
2705 """
2706 Creates a ns-lcm-opp content to be stored at database.
2707 :param nsr_id: internal id of the instance
2708 :param operation: instantiate, terminate, scale, action, ...
2709 :param params: user parameters for the operation
2710 :return: dictionary following SOL005 format
2711 """
2712 # Raise exception if invalid arguments
2713 if not (nsr_id and operation and params):
2714 raise LcmException(
2715 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided")
2716 now = time()
2717 _id = str(uuid4())
2718 nslcmop = {
2719 "id": _id,
2720 "_id": _id,
2721 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
2722 "operationState": "PROCESSING",
2723 "statusEnteredTime": now,
2724 "nsInstanceId": nsr_id,
2725 "lcmOperationType": operation,
2726 "startTime": now,
2727 "isAutomaticInvocation": False,
2728 "operationParams": params,
2729 "isCancelPending": False,
2730 "links": {
2731 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
2732 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
2733 }
2734 }
2735 return nslcmop
2736
calvinosanch9f9c6f22019-11-04 13:37:39 +01002737 def _format_additional_params(self, params):
tierno626e0152019-11-29 14:16:16 +00002738 params = params or {}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002739 for key, value in params.items():
2740 if str(value).startswith("!!yaml "):
2741 params[key] = yaml.safe_load(value[7:])
calvinosanch9f9c6f22019-11-04 13:37:39 +01002742 return params
2743
kuuse8b998e42019-07-30 15:22:16 +02002744 def _get_terminate_primitive_params(self, seq, vnf_index):
2745 primitive = seq.get('name')
2746 primitive_params = {}
2747 params = {
2748 "member_vnf_index": vnf_index,
2749 "primitive": primitive,
2750 "primitive_params": primitive_params,
2751 }
2752 desc_params = {}
2753 return self._map_primitive_params(seq, params, desc_params)
2754
kuuseac3a8882019-10-03 10:48:06 +02002755 # sub-operations
2756
tierno51183952020-04-03 15:48:18 +00002757 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
2758 op = deep_get(db_nslcmop, ('_admin', 'operations'), [])[op_index]
2759 if op.get('operationState') == 'COMPLETED':
kuuseac3a8882019-10-03 10:48:06 +02002760 # b. Skip sub-operation
2761 # _ns_execute_primitive() or RO.create_action() will NOT be executed
2762 return self.SUBOPERATION_STATUS_SKIP
2763 else:
tierno7c4e24c2020-05-13 08:41:35 +00002764 # c. retry executing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02002765 # The sub-operation exists, and operationState != 'COMPLETED'
tierno7c4e24c2020-05-13 08:41:35 +00002766 # Update operationState = 'PROCESSING' to indicate a retry.
kuuseac3a8882019-10-03 10:48:06 +02002767 operationState = 'PROCESSING'
2768 detailed_status = 'In progress'
2769 self._update_suboperation_status(
2770 db_nslcmop, op_index, operationState, detailed_status)
2771 # Return the sub-operation index
2772 # _ns_execute_primitive() or RO.create_action() will be called from scale()
2773 # with arguments extracted from the sub-operation
2774 return op_index
2775
2776 # Find a sub-operation where all keys in a matching dictionary must match
2777 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
2778 def _find_suboperation(self, db_nslcmop, match):
tierno7c4e24c2020-05-13 08:41:35 +00002779 if db_nslcmop and match:
kuuseac3a8882019-10-03 10:48:06 +02002780 op_list = db_nslcmop.get('_admin', {}).get('operations', [])
2781 for i, op in enumerate(op_list):
2782 if all(op.get(k) == match[k] for k in match):
2783 return i
2784 return self.SUBOPERATION_STATUS_NOT_FOUND
2785
2786 # Update status for a sub-operation given its index
2787 def _update_suboperation_status(self, db_nslcmop, op_index, operationState, detailed_status):
2788 # Update DB for HA tasks
2789 q_filter = {'_id': db_nslcmop['_id']}
2790 update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState,
2791 '_admin.operations.{}.detailed-status'.format(op_index): detailed_status}
2792 self.db.set_one("nslcmops",
2793 q_filter=q_filter,
2794 update_dict=update_dict,
2795 fail_on_empty=False)
2796
2797 # Add sub-operation, return the index of the added sub-operation
2798 # Optionally, set operationState, detailed-status, and operationType
2799 # Status and type are currently set for 'scale' sub-operations:
2800 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
2801 # 'detailed-status' : status message
2802 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
2803 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
quilesj7e13aeb2019-10-08 13:34:55 +02002804 def _add_suboperation(self, db_nslcmop, vnf_index, vdu_id, vdu_count_index, vdu_name, primitive,
2805 mapped_primitive_params, operationState=None, detailed_status=None, operationType=None,
kuuseac3a8882019-10-03 10:48:06 +02002806 RO_nsr_id=None, RO_scaling_info=None):
tiernoe876f672020-02-13 14:34:48 +00002807 if not db_nslcmop:
kuuseac3a8882019-10-03 10:48:06 +02002808 return self.SUBOPERATION_STATUS_NOT_FOUND
2809 # Get the "_admin.operations" list, if it exists
2810 db_nslcmop_admin = db_nslcmop.get('_admin', {})
2811 op_list = db_nslcmop_admin.get('operations')
2812 # Create or append to the "_admin.operations" list
kuuse8b998e42019-07-30 15:22:16 +02002813 new_op = {'member_vnf_index': vnf_index,
2814 'vdu_id': vdu_id,
2815 'vdu_count_index': vdu_count_index,
2816 'primitive': primitive,
2817 'primitive_params': mapped_primitive_params}
kuuseac3a8882019-10-03 10:48:06 +02002818 if operationState:
2819 new_op['operationState'] = operationState
2820 if detailed_status:
2821 new_op['detailed-status'] = detailed_status
2822 if operationType:
2823 new_op['lcmOperationType'] = operationType
2824 if RO_nsr_id:
2825 new_op['RO_nsr_id'] = RO_nsr_id
2826 if RO_scaling_info:
2827 new_op['RO_scaling_info'] = RO_scaling_info
2828 if not op_list:
2829 # No existing operations, create key 'operations' with current operation as first list element
2830 db_nslcmop_admin.update({'operations': [new_op]})
2831 op_list = db_nslcmop_admin.get('operations')
2832 else:
2833 # Existing operations, append operation to list
2834 op_list.append(new_op)
kuuse8b998e42019-07-30 15:22:16 +02002835
kuuseac3a8882019-10-03 10:48:06 +02002836 db_nslcmop_update = {'_admin.operations': op_list}
2837 self.update_db_2("nslcmops", db_nslcmop['_id'], db_nslcmop_update)
2838 op_index = len(op_list) - 1
2839 return op_index
2840
2841 # Helper methods for scale() sub-operations
2842
2843 # pre-scale/post-scale:
2844 # Check for 3 different cases:
2845 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
2846 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
tierno7c4e24c2020-05-13 08:41:35 +00002847 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
quilesj7e13aeb2019-10-08 13:34:55 +02002848 def _check_or_add_scale_suboperation(self, db_nslcmop, vnf_index, vnf_config_primitive, primitive_params,
2849 operationType, RO_nsr_id=None, RO_scaling_info=None):
kuuseac3a8882019-10-03 10:48:06 +02002850 # Find this sub-operation
tierno7c4e24c2020-05-13 08:41:35 +00002851 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02002852 operationType = 'SCALE-RO'
2853 match = {
2854 'member_vnf_index': vnf_index,
2855 'RO_nsr_id': RO_nsr_id,
2856 'RO_scaling_info': RO_scaling_info,
2857 }
2858 else:
2859 match = {
2860 'member_vnf_index': vnf_index,
2861 'primitive': vnf_config_primitive,
2862 'primitive_params': primitive_params,
2863 'lcmOperationType': operationType
2864 }
2865 op_index = self._find_suboperation(db_nslcmop, match)
tierno51183952020-04-03 15:48:18 +00002866 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
kuuseac3a8882019-10-03 10:48:06 +02002867 # a. New sub-operation
2868 # The sub-operation does not exist, add it.
2869 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
2870 # The following parameters are set to None for all kind of scaling:
2871 vdu_id = None
2872 vdu_count_index = None
2873 vdu_name = None
tierno51183952020-04-03 15:48:18 +00002874 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02002875 vnf_config_primitive = None
2876 primitive_params = None
2877 else:
2878 RO_nsr_id = None
2879 RO_scaling_info = None
2880 # Initial status for sub-operation
2881 operationState = 'PROCESSING'
2882 detailed_status = 'In progress'
2883 # Add sub-operation for pre/post-scaling (zero or more operations)
2884 self._add_suboperation(db_nslcmop,
2885 vnf_index,
2886 vdu_id,
2887 vdu_count_index,
2888 vdu_name,
2889 vnf_config_primitive,
2890 primitive_params,
2891 operationState,
2892 detailed_status,
2893 operationType,
2894 RO_nsr_id,
2895 RO_scaling_info)
2896 return self.SUBOPERATION_STATUS_NEW
2897 else:
2898 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
2899 # or op_index (operationState != 'COMPLETED')
tierno51183952020-04-03 15:48:18 +00002900 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
kuuseac3a8882019-10-03 10:48:06 +02002901
preethika.pdf7d8e02019-12-10 13:10:48 +00002902 # Function to return execution_environment id
2903
2904 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
tiernoe876f672020-02-13 14:34:48 +00002905 # TODO vdu_index_count
preethika.pdf7d8e02019-12-10 13:10:48 +00002906 for vca in vca_deployed_list:
2907 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
2908 return vca["ee_id"]
2909
tierno588547c2020-07-01 15:30:20 +00002910 async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor,
2911 vca_index, destroy_ee=True, exec_primitives=True):
tiernoe876f672020-02-13 14:34:48 +00002912 """
2913 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
2914 :param logging_text:
2915 :param db_nslcmop:
2916 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
2917 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
2918 :param vca_index: index in the database _admin.deployed.VCA
2919 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
tierno588547c2020-07-01 15:30:20 +00002920 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
2921 not executed properly
tiernoe876f672020-02-13 14:34:48 +00002922 :return: None or exception
2923 """
tiernoe876f672020-02-13 14:34:48 +00002924
tierno588547c2020-07-01 15:30:20 +00002925 self.logger.debug(
2926 logging_text + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
2927 vca_index, vca_deployed, config_descriptor, destroy_ee
2928 )
2929 )
2930
2931 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
2932
2933 # execute terminate_primitives
2934 if exec_primitives:
tiernoa278b842020-07-08 15:33:55 +00002935 terminate_primitives = self._get_terminate_config_primitive(
2936 config_descriptor.get("terminate-config-primitive"), vca_deployed)
tierno588547c2020-07-01 15:30:20 +00002937 vdu_id = vca_deployed.get("vdu_id")
2938 vdu_count_index = vca_deployed.get("vdu_count_index")
2939 vdu_name = vca_deployed.get("vdu_name")
2940 vnf_index = vca_deployed.get("member-vnf-index")
2941 if terminate_primitives and vca_deployed.get("needed_terminate"):
tierno588547c2020-07-01 15:30:20 +00002942 for seq in terminate_primitives:
2943 # For each sequence in list, get primitive and call _ns_execute_primitive()
2944 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
2945 vnf_index, seq.get("name"))
2946 self.logger.debug(logging_text + step)
2947 # Create the primitive for each sequence, i.e. "primitive": "touch"
2948 primitive = seq.get('name')
2949 mapped_primitive_params = self._get_terminate_primitive_params(seq, vnf_index)
tierno588547c2020-07-01 15:30:20 +00002950
2951 # Add sub-operation
2952 self._add_suboperation(db_nslcmop,
2953 vnf_index,
2954 vdu_id,
2955 vdu_count_index,
2956 vdu_name,
2957 primitive,
2958 mapped_primitive_params)
2959 # Sub-operations: Call _ns_execute_primitive() instead of action()
2960 try:
2961 result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive,
2962 mapped_primitive_params,
2963 vca_type=vca_type)
2964 except LcmException:
2965 # this happens when VCA is not deployed. In this case it is not needed to terminate
2966 continue
2967 result_ok = ['COMPLETED', 'PARTIALLY_COMPLETED']
2968 if result not in result_ok:
2969 raise LcmException("terminate_primitive {} for vnf_member_index={} fails with "
2970 "error {}".format(seq.get("name"), vnf_index, result_detail))
2971 # set that this VCA do not need terminated
2972 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(vca_index)
2973 self.update_db_2("nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False})
tiernoe876f672020-02-13 14:34:48 +00002974
tiernob996d942020-07-03 14:52:28 +00002975 if vca_deployed.get("prometheus_jobs") and self.prometheus:
2976 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
2977
tiernoe876f672020-02-13 14:34:48 +00002978 if destroy_ee:
tierno588547c2020-07-01 15:30:20 +00002979 await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"])
kuuse0ca67472019-05-13 15:59:27 +02002980
tierno51183952020-04-03 15:48:18 +00002981 async def _delete_all_N2VC(self, db_nsr: dict):
2982 self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING')
2983 namespace = "." + db_nsr["_id"]
tiernof59ad6c2020-04-08 12:50:52 +00002984 try:
2985 await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete)
2986 except N2VCNotFound: # already deleted. Skip
2987 pass
tierno51183952020-04-03 15:48:18 +00002988 self._write_all_config_status(db_nsr=db_nsr, status='DELETED')
quilesj3655ae02019-12-12 16:08:35 +00002989
tiernoe876f672020-02-13 14:34:48 +00002990 async def _terminate_RO(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
2991 """
2992 Terminates a deployment from RO
2993 :param logging_text:
2994 :param nsr_deployed: db_nsr._admin.deployed
2995 :param nsr_id:
2996 :param nslcmop_id:
2997 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
2998 this method will update only the index 2, but it will write on database the concatenated content of the list
2999 :return:
3000 """
3001 db_nsr_update = {}
3002 failed_detail = []
3003 ro_nsr_id = ro_delete_action = None
3004 if nsr_deployed and nsr_deployed.get("RO"):
3005 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
3006 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
3007 try:
3008 if ro_nsr_id:
3009 stage[2] = "Deleting ns from VIM."
3010 db_nsr_update["detailed-status"] = " ".join(stage)
3011 self._write_op_status(nslcmop_id, stage)
3012 self.logger.debug(logging_text + stage[2])
3013 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3014 self._write_op_status(nslcmop_id, stage)
3015 desc = await self.RO.delete("ns", ro_nsr_id)
3016 ro_delete_action = desc["action_id"]
3017 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = ro_delete_action
3018 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3019 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3020 if ro_delete_action:
3021 # wait until NS is deleted from VIM
3022 stage[2] = "Waiting ns deleted from VIM."
3023 detailed_status_old = None
3024 self.logger.debug(logging_text + stage[2] + " RO_id={} ro_delete_action={}".format(ro_nsr_id,
3025 ro_delete_action))
3026 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3027 self._write_op_status(nslcmop_id, stage)
kuused124bfe2019-06-18 12:09:24 +02003028
tiernoe876f672020-02-13 14:34:48 +00003029 delete_timeout = 20 * 60 # 20 minutes
3030 while delete_timeout > 0:
3031 desc = await self.RO.show(
3032 "ns",
3033 item_id_name=ro_nsr_id,
3034 extra_item="action",
3035 extra_item_id=ro_delete_action)
3036
3037 # deploymentStatus
3038 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3039
3040 ns_status, ns_status_info = self.RO.check_action_status(desc)
3041 if ns_status == "ERROR":
3042 raise ROclient.ROClientException(ns_status_info)
3043 elif ns_status == "BUILD":
3044 stage[2] = "Deleting from VIM {}".format(ns_status_info)
3045 elif ns_status == "ACTIVE":
3046 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3047 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3048 break
3049 else:
3050 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
3051 if stage[2] != detailed_status_old:
3052 detailed_status_old = stage[2]
3053 db_nsr_update["detailed-status"] = " ".join(stage)
3054 self._write_op_status(nslcmop_id, stage)
3055 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3056 await asyncio.sleep(5, loop=self.loop)
3057 delete_timeout -= 5
3058 else: # delete_timeout <= 0:
3059 raise ROclient.ROClientException("Timeout waiting ns deleted from VIM")
3060
3061 except Exception as e:
3062 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3063 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3064 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3065 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3066 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3067 self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id))
3068 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
tiernoa2143262020-03-27 16:20:40 +00003069 failed_detail.append("delete conflict: {}".format(e))
3070 self.logger.debug(logging_text + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00003071 else:
tiernoa2143262020-03-27 16:20:40 +00003072 failed_detail.append("delete error: {}".format(e))
3073 self.logger.error(logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00003074
3075 # Delete nsd
3076 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
3077 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
3078 try:
3079 stage[2] = "Deleting nsd from RO."
3080 db_nsr_update["detailed-status"] = " ".join(stage)
3081 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3082 self._write_op_status(nslcmop_id, stage)
3083 await self.RO.delete("nsd", ro_nsd_id)
3084 self.logger.debug(logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id))
3085 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3086 except Exception as e:
3087 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3088 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3089 self.logger.debug(logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id))
3090 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
3091 failed_detail.append("ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e))
3092 self.logger.debug(logging_text + failed_detail[-1])
3093 else:
3094 failed_detail.append("ro_nsd_id={} delete error: {}".format(ro_nsd_id, e))
3095 self.logger.error(logging_text + failed_detail[-1])
3096
3097 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
3098 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
3099 if not vnf_deployed or not vnf_deployed["id"]:
3100 continue
3101 try:
3102 ro_vnfd_id = vnf_deployed["id"]
3103 stage[2] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
3104 vnf_deployed["member-vnf-index"], ro_vnfd_id)
3105 db_nsr_update["detailed-status"] = " ".join(stage)
3106 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3107 self._write_op_status(nslcmop_id, stage)
3108 await self.RO.delete("vnfd", ro_vnfd_id)
3109 self.logger.debug(logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id))
3110 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3111 except Exception as e:
3112 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3113 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3114 self.logger.debug(logging_text + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id))
3115 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
3116 failed_detail.append("ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e))
3117 self.logger.debug(logging_text + failed_detail[-1])
3118 else:
3119 failed_detail.append("ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e))
3120 self.logger.error(logging_text + failed_detail[-1])
3121
tiernoa2143262020-03-27 16:20:40 +00003122 if failed_detail:
3123 stage[2] = "Error deleting from VIM"
3124 else:
3125 stage[2] = "Deleted from VIM"
tiernoe876f672020-02-13 14:34:48 +00003126 db_nsr_update["detailed-status"] = " ".join(stage)
3127 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3128 self._write_op_status(nslcmop_id, stage)
3129
3130 if failed_detail:
tiernoa2143262020-03-27 16:20:40 +00003131 raise LcmException("; ".join(failed_detail))
tiernoe876f672020-02-13 14:34:48 +00003132
3133 async def terminate(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003134 # Try to lock HA task here
3135 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3136 if not task_is_locked_by_me:
3137 return
3138
tierno59d22d22018-09-25 18:10:19 +02003139 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
3140 self.logger.debug(logging_text + "Enter")
tiernoe876f672020-02-13 14:34:48 +00003141 timeout_ns_terminate = self.timeout_ns_terminate
tierno59d22d22018-09-25 18:10:19 +02003142 db_nsr = None
3143 db_nslcmop = None
tiernoa17d4f42020-04-28 09:59:23 +00003144 operation_params = None
tierno59d22d22018-09-25 18:10:19 +02003145 exc = None
tiernoe876f672020-02-13 14:34:48 +00003146 error_list = [] # annotates all failed error messages
tierno59d22d22018-09-25 18:10:19 +02003147 db_nslcmop_update = {}
tiernoc2564fe2019-01-28 16:18:56 +00003148 autoremove = False # autoremove after terminated
tiernoe876f672020-02-13 14:34:48 +00003149 tasks_dict_info = {}
3150 db_nsr_update = {}
3151 stage = ["Stage 1/3: Preparing task.", "Waiting for previous operations to terminate.", ""]
3152 # ^ contains [stage, step, VIM-status]
tierno59d22d22018-09-25 18:10:19 +02003153 try:
kuused124bfe2019-06-18 12:09:24 +02003154 # wait for any previous tasks in process
3155 await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id)
3156
tiernoe876f672020-02-13 14:34:48 +00003157 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
3158 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3159 operation_params = db_nslcmop.get("operationParams") or {}
3160 if operation_params.get("timeout_ns_terminate"):
3161 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
3162 stage[1] = "Getting nsr={} from db.".format(nsr_id)
3163 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3164
3165 db_nsr_update["operational-status"] = "terminating"
3166 db_nsr_update["config-status"] = "terminating"
quilesj4cda56b2019-12-05 10:02:20 +00003167 self._write_ns_status(
3168 nsr_id=nsr_id,
3169 ns_state="TERMINATING",
3170 current_operation="TERMINATING",
tiernoe876f672020-02-13 14:34:48 +00003171 current_operation_id=nslcmop_id,
3172 other_update=db_nsr_update
quilesj4cda56b2019-12-05 10:02:20 +00003173 )
quilesj3655ae02019-12-12 16:08:35 +00003174 self._write_op_status(
3175 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00003176 queuePosition=0,
3177 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00003178 )
tiernoe876f672020-02-13 14:34:48 +00003179 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
tierno59d22d22018-09-25 18:10:19 +02003180 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
3181 return
tierno59d22d22018-09-25 18:10:19 +02003182
tiernoe876f672020-02-13 14:34:48 +00003183 stage[1] = "Getting vnf descriptors from db."
3184 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
3185 db_vnfds_from_id = {}
3186 db_vnfds_from_member_index = {}
3187 # Loop over VNFRs
3188 for vnfr in db_vnfrs_list:
3189 vnfd_id = vnfr["vnfd-id"]
3190 if vnfd_id not in db_vnfds_from_id:
3191 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
3192 db_vnfds_from_id[vnfd_id] = vnfd
3193 db_vnfds_from_member_index[vnfr["member-vnf-index-ref"]] = db_vnfds_from_id[vnfd_id]
calvinosanch9f9c6f22019-11-04 13:37:39 +01003194
tiernoe876f672020-02-13 14:34:48 +00003195 # Destroy individual execution environments when there are terminating primitives.
3196 # Rest of EE will be deleted at once
tierno588547c2020-07-01 15:30:20 +00003197 # TODO - check before calling _destroy_N2VC
3198 # if not operation_params.get("skip_terminate_primitives"):#
3199 # or not vca.get("needed_terminate"):
3200 stage[0] = "Stage 2/3 execute terminating primitives."
3201 self.logger.debug(logging_text + stage[0])
3202 stage[1] = "Looking execution environment that needs terminate."
3203 self.logger.debug(logging_text + stage[1])
tiernob996d942020-07-03 14:52:28 +00003204 # self.logger.debug("nsr_deployed: {}".format(nsr_deployed))
tierno588547c2020-07-01 15:30:20 +00003205 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
3206 self.logger.debug("vca_index: {}, vca: {}".format(vca_index, vca))
3207 config_descriptor = None
3208 if not vca or not vca.get("ee_id"):
3209 continue
3210 if not vca.get("member-vnf-index"):
3211 # ns
3212 config_descriptor = db_nsr.get("ns-configuration")
3213 elif vca.get("vdu_id"):
3214 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3215 vdud = next((vdu for vdu in db_vnfd.get("vdu", ()) if vdu["id"] == vca.get("vdu_id")), None)
3216 if vdud:
3217 config_descriptor = vdud.get("vdu-configuration")
3218 elif vca.get("kdu_name"):
3219 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3220 kdud = next((kdu for kdu in db_vnfd.get("kdu", ()) if kdu["name"] == vca.get("kdu_name")), None)
3221 if kdud:
3222 config_descriptor = kdud.get("kdu-configuration")
3223 else:
3224 config_descriptor = db_vnfds_from_member_index[vca["member-vnf-index"]].get("vnf-configuration")
tierno588547c2020-07-01 15:30:20 +00003225 vca_type = vca.get("type")
3226 exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and
3227 vca.get("needed_terminate"))
tiernob996d942020-07-03 14:52:28 +00003228 # For helm we must destroy_ee
3229 destroy_ee = "True" if vca_type == "helm" else "False"
3230 task = asyncio.ensure_future(
3231 self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor, vca_index,
3232 destroy_ee, exec_terminate_primitives))
tierno588547c2020-07-01 15:30:20 +00003233 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
tierno59d22d22018-09-25 18:10:19 +02003234
tierno588547c2020-07-01 15:30:20 +00003235 # wait for pending tasks of terminate primitives
3236 if tasks_dict_info:
3237 self.logger.debug(logging_text + 'Waiting for terminate primitive pending tasks...')
3238 error_list = await self._wait_for_tasks(logging_text, tasks_dict_info,
3239 min(self.timeout_charm_delete, timeout_ns_terminate),
3240 stage, nslcmop_id)
3241 if error_list:
3242 return # raise LcmException("; ".join(error_list))
3243 tasks_dict_info.clear()
tierno82974b22018-11-27 21:55:36 +00003244
tiernoe876f672020-02-13 14:34:48 +00003245 # remove All execution environments at once
3246 stage[0] = "Stage 3/3 delete all."
quilesj3655ae02019-12-12 16:08:35 +00003247
tierno49676be2020-04-07 16:34:35 +00003248 if nsr_deployed.get("VCA"):
3249 stage[1] = "Deleting all execution environments."
3250 self.logger.debug(logging_text + stage[1])
3251 task_delete_ee = asyncio.ensure_future(asyncio.wait_for(self._delete_all_N2VC(db_nsr=db_nsr),
3252 timeout=self.timeout_charm_delete))
3253 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
3254 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
tierno59d22d22018-09-25 18:10:19 +02003255
tiernoe876f672020-02-13 14:34:48 +00003256 # Delete from k8scluster
3257 stage[1] = "Deleting KDUs."
3258 self.logger.debug(logging_text + stage[1])
3259 # print(nsr_deployed)
3260 for kdu in get_iterable(nsr_deployed, "K8s"):
3261 if not kdu or not kdu.get("kdu-instance"):
3262 continue
3263 kdu_instance = kdu.get("kdu-instance")
tiernoa2143262020-03-27 16:20:40 +00003264 if kdu.get("k8scluster-type") in self.k8scluster_map:
tiernoe876f672020-02-13 14:34:48 +00003265 task_delete_kdu_instance = asyncio.ensure_future(
tiernoa2143262020-03-27 16:20:40 +00003266 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
3267 cluster_uuid=kdu.get("k8scluster-uuid"),
3268 kdu_instance=kdu_instance))
tiernoe876f672020-02-13 14:34:48 +00003269 else:
3270 self.logger.error(logging_text + "Unknown k8s deployment type {}".
3271 format(kdu.get("k8scluster-type")))
3272 continue
3273 tasks_dict_info[task_delete_kdu_instance] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
tierno59d22d22018-09-25 18:10:19 +02003274
3275 # remove from RO
tiernoe876f672020-02-13 14:34:48 +00003276 stage[1] = "Deleting ns from VIM."
tierno69f0d382020-05-07 13:08:09 +00003277 if self.ng_ro:
3278 task_delete_ro = asyncio.ensure_future(
3279 self._terminate_ng_ro(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
3280 else:
3281 task_delete_ro = asyncio.ensure_future(
3282 self._terminate_RO(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
tiernoe876f672020-02-13 14:34:48 +00003283 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
tierno59d22d22018-09-25 18:10:19 +02003284
tiernoe876f672020-02-13 14:34:48 +00003285 # rest of staff will be done at finally
3286
3287 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
3288 self.logger.error(logging_text + "Exit Exception {}".format(e))
3289 exc = e
3290 except asyncio.CancelledError:
3291 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
3292 exc = "Operation was cancelled"
3293 except Exception as e:
3294 exc = traceback.format_exc()
3295 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
3296 finally:
3297 if exc:
3298 error_list.append(str(exc))
tierno59d22d22018-09-25 18:10:19 +02003299 try:
tiernoe876f672020-02-13 14:34:48 +00003300 # wait for pending tasks
3301 if tasks_dict_info:
3302 stage[1] = "Waiting for terminate pending tasks."
3303 self.logger.debug(logging_text + stage[1])
3304 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_terminate,
3305 stage, nslcmop_id)
3306 stage[1] = stage[2] = ""
3307 except asyncio.CancelledError:
3308 error_list.append("Cancelled")
3309 # TODO cancell all tasks
3310 except Exception as exc:
3311 error_list.append(str(exc))
3312 # update status at database
3313 if error_list:
3314 error_detail = "; ".join(error_list)
3315 # self.logger.error(logging_text + error_detail)
tiernoa2143262020-03-27 16:20:40 +00003316 error_description_nslcmop = 'Stage: {}. Detail: {}'.format(stage[0], error_detail)
3317 error_description_nsr = 'Operation: TERMINATING.{}, Stage {}.'.format(nslcmop_id, stage[0])
tierno59d22d22018-09-25 18:10:19 +02003318
tierno59d22d22018-09-25 18:10:19 +02003319 db_nsr_update["operational-status"] = "failed"
tiernoa2143262020-03-27 16:20:40 +00003320 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00003321 db_nslcmop_update["detailed-status"] = error_detail
3322 nslcmop_operation_state = "FAILED"
3323 ns_state = "BROKEN"
tierno59d22d22018-09-25 18:10:19 +02003324 else:
tiernoa2143262020-03-27 16:20:40 +00003325 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00003326 error_description_nsr = error_description_nslcmop = None
3327 ns_state = "NOT_INSTANTIATED"
tierno59d22d22018-09-25 18:10:19 +02003328 db_nsr_update["operational-status"] = "terminated"
3329 db_nsr_update["detailed-status"] = "Done"
3330 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
3331 db_nslcmop_update["detailed-status"] = "Done"
tiernoe876f672020-02-13 14:34:48 +00003332 nslcmop_operation_state = "COMPLETED"
tierno59d22d22018-09-25 18:10:19 +02003333
tiernoe876f672020-02-13 14:34:48 +00003334 if db_nsr:
3335 self._write_ns_status(
3336 nsr_id=nsr_id,
3337 ns_state=ns_state,
3338 current_operation="IDLE",
3339 current_operation_id=None,
3340 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00003341 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00003342 other_update=db_nsr_update
3343 )
tiernoa17d4f42020-04-28 09:59:23 +00003344 self._write_op_status(
3345 op_id=nslcmop_id,
3346 stage="",
3347 error_message=error_description_nslcmop,
3348 operation_state=nslcmop_operation_state,
3349 other_update=db_nslcmop_update,
3350 )
lloretgalleg6d488782020-07-22 10:13:46 +00003351 if ns_state == "NOT_INSTANTIATED":
3352 try:
3353 self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "NOT_INSTANTIATED"})
3354 except DbException as e:
3355 self.logger.warn(logging_text + 'Error writing VNFR status for nsr-id-ref: {} -> {}'.
3356 format(nsr_id, e))
tiernoa17d4f42020-04-28 09:59:23 +00003357 if operation_params:
tiernoe876f672020-02-13 14:34:48 +00003358 autoremove = operation_params.get("autoremove", False)
tierno59d22d22018-09-25 18:10:19 +02003359 if nslcmop_operation_state:
3360 try:
3361 await self.msg.aiowrite("ns", "terminated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tiernoc2564fe2019-01-28 16:18:56 +00003362 "operationState": nslcmop_operation_state,
3363 "autoremove": autoremove},
tierno8a518872018-12-21 13:42:14 +00003364 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003365 except Exception as e:
3366 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02003367
tierno59d22d22018-09-25 18:10:19 +02003368 self.logger.debug(logging_text + "Exit")
3369 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
3370
tiernoe876f672020-02-13 14:34:48 +00003371 async def _wait_for_tasks(self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None):
3372 time_start = time()
tiernoa2143262020-03-27 16:20:40 +00003373 error_detail_list = []
tiernoe876f672020-02-13 14:34:48 +00003374 error_list = []
3375 pending_tasks = list(created_tasks_info.keys())
3376 num_tasks = len(pending_tasks)
3377 num_done = 0
3378 stage[1] = "{}/{}.".format(num_done, num_tasks)
3379 self._write_op_status(nslcmop_id, stage)
tiernoe876f672020-02-13 14:34:48 +00003380 while pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00003381 new_error = None
tiernoe876f672020-02-13 14:34:48 +00003382 _timeout = timeout + time_start - time()
3383 done, pending_tasks = await asyncio.wait(pending_tasks, timeout=_timeout,
3384 return_when=asyncio.FIRST_COMPLETED)
3385 num_done += len(done)
3386 if not done: # Timeout
3387 for task in pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00003388 new_error = created_tasks_info[task] + ": Timeout"
3389 error_detail_list.append(new_error)
3390 error_list.append(new_error)
tiernoe876f672020-02-13 14:34:48 +00003391 break
3392 for task in done:
3393 if task.cancelled():
tierno067e04a2020-03-31 12:53:13 +00003394 exc = "Cancelled"
tiernoe876f672020-02-13 14:34:48 +00003395 else:
3396 exc = task.exception()
tierno067e04a2020-03-31 12:53:13 +00003397 if exc:
3398 if isinstance(exc, asyncio.TimeoutError):
3399 exc = "Timeout"
3400 new_error = created_tasks_info[task] + ": {}".format(exc)
3401 error_list.append(created_tasks_info[task])
3402 error_detail_list.append(new_error)
tierno28c63da2020-04-20 16:28:56 +00003403 if isinstance(exc, (str, DbException, N2VCException, ROclient.ROClientException, LcmException,
3404 K8sException)):
tierno067e04a2020-03-31 12:53:13 +00003405 self.logger.error(logging_text + new_error)
tiernoe876f672020-02-13 14:34:48 +00003406 else:
tierno067e04a2020-03-31 12:53:13 +00003407 exc_traceback = "".join(traceback.format_exception(None, exc, exc.__traceback__))
3408 self.logger.error(logging_text + created_tasks_info[task] + exc_traceback)
3409 else:
3410 self.logger.debug(logging_text + created_tasks_info[task] + ": Done")
tiernoe876f672020-02-13 14:34:48 +00003411 stage[1] = "{}/{}.".format(num_done, num_tasks)
3412 if new_error:
tiernoa2143262020-03-27 16:20:40 +00003413 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
tiernoe876f672020-02-13 14:34:48 +00003414 if nsr_id: # update also nsr
tiernoa2143262020-03-27 16:20:40 +00003415 self.update_db_2("nsrs", nsr_id, {"errorDescription": "Error at: " + ", ".join(error_list),
3416 "errorDetail": ". ".join(error_detail_list)})
tiernoe876f672020-02-13 14:34:48 +00003417 self._write_op_status(nslcmop_id, stage)
tiernoa2143262020-03-27 16:20:40 +00003418 return error_detail_list
tiernoe876f672020-02-13 14:34:48 +00003419
tiernoda964822019-01-14 15:53:47 +00003420 @staticmethod
3421 def _map_primitive_params(primitive_desc, params, instantiation_params):
3422 """
3423 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
3424 The default-value is used. If it is between < > it look for a value at instantiation_params
3425 :param primitive_desc: portion of VNFD/NSD that describes primitive
3426 :param params: Params provided by user
3427 :param instantiation_params: Instantiation params provided by user
3428 :return: a dictionary with the calculated params
3429 """
3430 calculated_params = {}
3431 for parameter in primitive_desc.get("parameter", ()):
3432 param_name = parameter["name"]
3433 if param_name in params:
3434 calculated_params[param_name] = params[param_name]
tierno98ad6ea2019-05-30 17:16:28 +00003435 elif "default-value" in parameter or "value" in parameter:
3436 if "value" in parameter:
3437 calculated_params[param_name] = parameter["value"]
3438 else:
3439 calculated_params[param_name] = parameter["default-value"]
3440 if isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("<") \
3441 and calculated_params[param_name].endswith(">"):
3442 if calculated_params[param_name][1:-1] in instantiation_params:
3443 calculated_params[param_name] = instantiation_params[calculated_params[param_name][1:-1]]
tiernoda964822019-01-14 15:53:47 +00003444 else:
3445 raise LcmException("Parameter {} needed to execute primitive {} not provided".
tiernod8323042019-08-09 11:32:23 +00003446 format(calculated_params[param_name], primitive_desc["name"]))
tiernoda964822019-01-14 15:53:47 +00003447 else:
3448 raise LcmException("Parameter {} needed to execute primitive {} not provided".
3449 format(param_name, primitive_desc["name"]))
tierno59d22d22018-09-25 18:10:19 +02003450
tiernoda964822019-01-14 15:53:47 +00003451 if isinstance(calculated_params[param_name], (dict, list, tuple)):
3452 calculated_params[param_name] = yaml.safe_dump(calculated_params[param_name], default_flow_style=True,
3453 width=256)
3454 elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "):
3455 calculated_params[param_name] = calculated_params[param_name][7:]
tiernoc3f2a822019-11-05 13:45:04 +00003456
3457 # add always ns_config_info if primitive name is config
3458 if primitive_desc["name"] == "config":
3459 if "ns_config_info" in instantiation_params:
3460 calculated_params["ns_config_info"] = instantiation_params["ns_config_info"]
tiernoda964822019-01-14 15:53:47 +00003461 return calculated_params
3462
tiernoa278b842020-07-08 15:33:55 +00003463 def _look_for_deployed_vca(self, deployed_vca, member_vnf_index, vdu_id, vdu_count_index, kdu_name=None,
3464 ee_descriptor_id=None):
tiernoe876f672020-02-13 14:34:48 +00003465 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
3466 for vca in deployed_vca:
3467 if not vca:
3468 continue
3469 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
3470 continue
tiernoe876f672020-02-13 14:34:48 +00003471 if vdu_count_index is not None and vdu_count_index != vca["vdu_count_index"]:
3472 continue
3473 if kdu_name and kdu_name != vca["kdu_name"]:
3474 continue
tiernoa278b842020-07-08 15:33:55 +00003475 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
3476 continue
tiernoe876f672020-02-13 14:34:48 +00003477 break
3478 else:
3479 # vca_deployed not found
tiernoa278b842020-07-08 15:33:55 +00003480 raise LcmException("charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
3481 " is not deployed".format(member_vnf_index, vdu_id, vdu_count_index, kdu_name,
3482 ee_descriptor_id))
quilesj7e13aeb2019-10-08 13:34:55 +02003483
tiernoe876f672020-02-13 14:34:48 +00003484 # get ee_id
3485 ee_id = vca.get("ee_id")
tierno588547c2020-07-01 15:30:20 +00003486 vca_type = vca.get("type", "lxc_proxy_charm") # default value for backward compatibility - proxy charm
tiernoe876f672020-02-13 14:34:48 +00003487 if not ee_id:
tierno067e04a2020-03-31 12:53:13 +00003488 raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
tiernoe876f672020-02-13 14:34:48 +00003489 "execution environment"
tierno067e04a2020-03-31 12:53:13 +00003490 .format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
tierno588547c2020-07-01 15:30:20 +00003491 return ee_id, vca_type
tiernoe876f672020-02-13 14:34:48 +00003492
3493 async def _ns_execute_primitive(self, ee_id, primitive, primitive_params, retries=0,
tierno588547c2020-07-01 15:30:20 +00003494 retries_interval=30, timeout=None,
3495 vca_type=None, db_dict=None) -> (str, str):
tiernoda964822019-01-14 15:53:47 +00003496 try:
tierno98ad6ea2019-05-30 17:16:28 +00003497 if primitive == "config":
3498 primitive_params = {"params": primitive_params}
tierno2fc7ce52019-06-11 22:50:01 +00003499
tierno588547c2020-07-01 15:30:20 +00003500 vca_type = vca_type or "lxc_proxy_charm"
3501
quilesj7e13aeb2019-10-08 13:34:55 +02003502 while retries >= 0:
3503 try:
tierno067e04a2020-03-31 12:53:13 +00003504 output = await asyncio.wait_for(
tierno588547c2020-07-01 15:30:20 +00003505 self.vca_map[vca_type].exec_primitive(
tierno067e04a2020-03-31 12:53:13 +00003506 ee_id=ee_id,
3507 primitive_name=primitive,
3508 params_dict=primitive_params,
3509 progress_timeout=self.timeout_progress_primitive,
tierno588547c2020-07-01 15:30:20 +00003510 total_timeout=self.timeout_primitive,
3511 db_dict=db_dict),
tierno067e04a2020-03-31 12:53:13 +00003512 timeout=timeout or self.timeout_primitive)
quilesj7e13aeb2019-10-08 13:34:55 +02003513 # execution was OK
3514 break
tierno067e04a2020-03-31 12:53:13 +00003515 except asyncio.CancelledError:
3516 raise
3517 except Exception as e: # asyncio.TimeoutError
3518 if isinstance(e, asyncio.TimeoutError):
3519 e = "Timeout"
quilesj7e13aeb2019-10-08 13:34:55 +02003520 retries -= 1
3521 if retries >= 0:
tierno73d8bd02019-11-18 17:33:27 +00003522 self.logger.debug('Error executing action {} on {} -> {}'.format(primitive, ee_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +02003523 # wait and retry
3524 await asyncio.sleep(retries_interval, loop=self.loop)
tierno73d8bd02019-11-18 17:33:27 +00003525 else:
tierno067e04a2020-03-31 12:53:13 +00003526 return 'FAILED', str(e)
quilesj7e13aeb2019-10-08 13:34:55 +02003527
tiernoe876f672020-02-13 14:34:48 +00003528 return 'COMPLETED', output
quilesj7e13aeb2019-10-08 13:34:55 +02003529
tierno067e04a2020-03-31 12:53:13 +00003530 except (LcmException, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00003531 raise
quilesj7e13aeb2019-10-08 13:34:55 +02003532 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00003533 return 'FAIL', 'Error executing action {}: {}'.format(primitive, e)
tierno59d22d22018-09-25 18:10:19 +02003534
3535 async def action(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003536
3537 # Try to lock HA task here
3538 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3539 if not task_is_locked_by_me:
3540 return
3541
tierno59d22d22018-09-25 18:10:19 +02003542 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
3543 self.logger.debug(logging_text + "Enter")
3544 # get all needed from database
3545 db_nsr = None
3546 db_nslcmop = None
tiernoe876f672020-02-13 14:34:48 +00003547 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003548 db_nslcmop_update = {}
3549 nslcmop_operation_state = None
tierno067e04a2020-03-31 12:53:13 +00003550 error_description_nslcmop = None
tierno59d22d22018-09-25 18:10:19 +02003551 exc = None
3552 try:
kuused124bfe2019-06-18 12:09:24 +02003553 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003554 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003555 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
3556
quilesj4cda56b2019-12-05 10:02:20 +00003557 self._write_ns_status(
3558 nsr_id=nsr_id,
3559 ns_state=None,
3560 current_operation="RUNNING ACTION",
3561 current_operation_id=nslcmop_id
3562 )
3563
tierno59d22d22018-09-25 18:10:19 +02003564 step = "Getting information from database"
3565 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3566 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernoda964822019-01-14 15:53:47 +00003567
tiernoe4f7e6c2018-11-27 14:55:30 +00003568 nsr_deployed = db_nsr["_admin"].get("deployed")
tierno1b633412019-02-25 16:48:23 +00003569 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tierno59d22d22018-09-25 18:10:19 +02003570 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003571 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
tiernoe4f7e6c2018-11-27 14:55:30 +00003572 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
tierno067e04a2020-03-31 12:53:13 +00003573 primitive = db_nslcmop["operationParams"]["primitive"]
3574 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
3575 timeout_ns_action = db_nslcmop["operationParams"].get("timeout_ns_action", self.timeout_primitive)
tierno59d22d22018-09-25 18:10:19 +02003576
tierno1b633412019-02-25 16:48:23 +00003577 if vnf_index:
3578 step = "Getting vnfr from database"
3579 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3580 step = "Getting vnfd from database"
3581 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
3582 else:
tierno067e04a2020-03-31 12:53:13 +00003583 step = "Getting nsd from database"
3584 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
tiernoda964822019-01-14 15:53:47 +00003585
tierno82974b22018-11-27 21:55:36 +00003586 # for backward compatibility
3587 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3588 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3589 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3590 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3591
tiernoda964822019-01-14 15:53:47 +00003592 # look for primitive
tiernoa278b842020-07-08 15:33:55 +00003593 config_primitive_desc = descriptor_configuration = None
tiernoda964822019-01-14 15:53:47 +00003594 if vdu_id:
3595 for vdu in get_iterable(db_vnfd, "vdu"):
3596 if vdu_id == vdu["id"]:
tiernoa278b842020-07-08 15:33:55 +00003597 descriptor_configuration = vdu.get("vdu-configuration")
tierno067e04a2020-03-31 12:53:13 +00003598 break
calvinosanch9f9c6f22019-11-04 13:37:39 +01003599 elif kdu_name:
tierno067e04a2020-03-31 12:53:13 +00003600 for kdu in get_iterable(db_vnfd, "kdu"):
3601 if kdu_name == kdu["name"]:
tiernoa278b842020-07-08 15:33:55 +00003602 descriptor_configuration = kdu.get("kdu-configuration")
tierno067e04a2020-03-31 12:53:13 +00003603 break
tierno1b633412019-02-25 16:48:23 +00003604 elif vnf_index:
tiernoa278b842020-07-08 15:33:55 +00003605 descriptor_configuration = db_vnfd.get("vnf-configuration")
tierno1b633412019-02-25 16:48:23 +00003606 else:
tiernoa278b842020-07-08 15:33:55 +00003607 descriptor_configuration = db_nsd.get("ns-configuration")
3608
3609 if descriptor_configuration and descriptor_configuration.get("config-primitive"):
3610 for config_primitive in descriptor_configuration["config-primitive"]:
tierno1b633412019-02-25 16:48:23 +00003611 if config_primitive["name"] == primitive:
3612 config_primitive_desc = config_primitive
3613 break
tiernoda964822019-01-14 15:53:47 +00003614
garciadeblas6bed6b32020-07-20 11:05:42 +00003615 if not config_primitive_desc:
3616 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
3617 raise LcmException("Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".
3618 format(primitive))
3619 primitive_name = primitive
3620 ee_descriptor_id = None
3621 else:
3622 primitive_name = config_primitive_desc.get("execution-environment-primitive", primitive)
3623 ee_descriptor_id = config_primitive_desc.get("execution-environment-ref")
tierno1b633412019-02-25 16:48:23 +00003624
tierno1b633412019-02-25 16:48:23 +00003625 if vnf_index:
tierno626e0152019-11-29 14:16:16 +00003626 if vdu_id:
3627 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
tierno067e04a2020-03-31 12:53:13 +00003628 desc_params = self._format_additional_params(vdur.get("additionalParams"))
3629 elif kdu_name:
3630 kdur = next((x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None)
3631 desc_params = self._format_additional_params(kdur.get("additionalParams"))
3632 else:
3633 desc_params = self._format_additional_params(db_vnfr.get("additionalParamsForVnf"))
tierno1b633412019-02-25 16:48:23 +00003634 else:
tierno067e04a2020-03-31 12:53:13 +00003635 desc_params = self._format_additional_params(db_nsr.get("additionalParamsForNs"))
tiernoda964822019-01-14 15:53:47 +00003636
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003637 if kdu_name:
3638 kdu_action = True if not deep_get(kdu, ("kdu-configuration", "juju")) else False
3639
tiernoda964822019-01-14 15:53:47 +00003640 # TODO check if ns is in a proper status
tiernoa278b842020-07-08 15:33:55 +00003641 if kdu_name and (primitive_name in ("upgrade", "rollback", "status") or kdu_action):
tierno067e04a2020-03-31 12:53:13 +00003642 # kdur and desc_params already set from before
3643 if primitive_params:
3644 desc_params.update(primitive_params)
3645 # TODO Check if we will need something at vnf level
3646 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
3647 if kdu_name == kdu["kdu-name"] and kdu["member-vnf-index"] == vnf_index:
3648 break
3649 else:
3650 raise LcmException("KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index))
quilesj7e13aeb2019-10-08 13:34:55 +02003651
tierno067e04a2020-03-31 12:53:13 +00003652 if kdu.get("k8scluster-type") not in self.k8scluster_map:
3653 msg = "unknown k8scluster-type '{}'".format(kdu.get("k8scluster-type"))
3654 raise LcmException(msg)
3655
3656 db_dict = {"collection": "nsrs",
3657 "filter": {"_id": nsr_id},
3658 "path": "_admin.deployed.K8s.{}".format(index)}
tiernoa278b842020-07-08 15:33:55 +00003659 self.logger.debug(logging_text + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name))
3660 step = "Executing kdu {}".format(primitive_name)
3661 if primitive_name == "upgrade":
tierno067e04a2020-03-31 12:53:13 +00003662 if desc_params.get("kdu_model"):
3663 kdu_model = desc_params.get("kdu_model")
3664 del desc_params["kdu_model"]
3665 else:
3666 kdu_model = kdu.get("kdu-model")
3667 parts = kdu_model.split(sep=":")
3668 if len(parts) == 2:
3669 kdu_model = parts[0]
3670
3671 detailed_status = await asyncio.wait_for(
3672 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
3673 cluster_uuid=kdu.get("k8scluster-uuid"),
3674 kdu_instance=kdu.get("kdu-instance"),
3675 atomic=True, kdu_model=kdu_model,
3676 params=desc_params, db_dict=db_dict,
3677 timeout=timeout_ns_action),
3678 timeout=timeout_ns_action + 10)
3679 self.logger.debug(logging_text + " Upgrade of kdu {} done".format(detailed_status))
tiernoa278b842020-07-08 15:33:55 +00003680 elif primitive_name == "rollback":
tierno067e04a2020-03-31 12:53:13 +00003681 detailed_status = await asyncio.wait_for(
3682 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
3683 cluster_uuid=kdu.get("k8scluster-uuid"),
3684 kdu_instance=kdu.get("kdu-instance"),
3685 db_dict=db_dict),
3686 timeout=timeout_ns_action)
tiernoa278b842020-07-08 15:33:55 +00003687 elif primitive_name == "status":
tierno067e04a2020-03-31 12:53:13 +00003688 detailed_status = await asyncio.wait_for(
3689 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
3690 cluster_uuid=kdu.get("k8scluster-uuid"),
3691 kdu_instance=kdu.get("kdu-instance")),
3692 timeout=timeout_ns_action)
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003693 else:
3694 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(kdu["kdu-name"], nsr_id)
3695 params = self._map_primitive_params(config_primitive_desc, primitive_params, desc_params)
3696
3697 detailed_status = await asyncio.wait_for(
3698 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
3699 cluster_uuid=kdu.get("k8scluster-uuid"),
3700 kdu_instance=kdu_instance,
tiernoa278b842020-07-08 15:33:55 +00003701 primitive_name=primitive_name,
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003702 params=params, db_dict=db_dict,
3703 timeout=timeout_ns_action),
3704 timeout=timeout_ns_action)
tierno067e04a2020-03-31 12:53:13 +00003705
3706 if detailed_status:
3707 nslcmop_operation_state = 'COMPLETED'
3708 else:
3709 detailed_status = ''
3710 nslcmop_operation_state = 'FAILED'
tierno067e04a2020-03-31 12:53:13 +00003711 else:
tierno588547c2020-07-01 15:30:20 +00003712 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
3713 member_vnf_index=vnf_index,
3714 vdu_id=vdu_id,
tiernoa278b842020-07-08 15:33:55 +00003715 vdu_count_index=vdu_count_index,
3716 ee_descriptor_id=ee_descriptor_id)
tierno588547c2020-07-01 15:30:20 +00003717 db_nslcmop_notif = {"collection": "nslcmops",
3718 "filter": {"_id": nslcmop_id},
3719 "path": "admin.VCA"}
tierno067e04a2020-03-31 12:53:13 +00003720 nslcmop_operation_state, detailed_status = await self._ns_execute_primitive(
tierno588547c2020-07-01 15:30:20 +00003721 ee_id,
tiernoa278b842020-07-08 15:33:55 +00003722 primitive=primitive_name,
tierno067e04a2020-03-31 12:53:13 +00003723 primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params),
tierno588547c2020-07-01 15:30:20 +00003724 timeout=timeout_ns_action,
3725 vca_type=vca_type,
3726 db_dict=db_nslcmop_notif)
tierno067e04a2020-03-31 12:53:13 +00003727
3728 db_nslcmop_update["detailed-status"] = detailed_status
3729 error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else ""
3730 self.logger.debug(logging_text + " task Done with result {} {}".format(nslcmop_operation_state,
3731 detailed_status))
tierno59d22d22018-09-25 18:10:19 +02003732 return # database update is called inside finally
3733
tiernof59ad6c2020-04-08 12:50:52 +00003734 except (DbException, LcmException, N2VCException, K8sException) as e:
tierno59d22d22018-09-25 18:10:19 +02003735 self.logger.error(logging_text + "Exit Exception {}".format(e))
3736 exc = e
3737 except asyncio.CancelledError:
3738 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
3739 exc = "Operation was cancelled"
tierno067e04a2020-03-31 12:53:13 +00003740 except asyncio.TimeoutError:
3741 self.logger.error(logging_text + "Timeout while '{}'".format(step))
3742 exc = "Timeout"
tierno59d22d22018-09-25 18:10:19 +02003743 except Exception as e:
3744 exc = traceback.format_exc()
3745 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
3746 finally:
tierno067e04a2020-03-31 12:53:13 +00003747 if exc:
3748 db_nslcmop_update["detailed-status"] = detailed_status = error_description_nslcmop = \
kuuse0ca67472019-05-13 15:59:27 +02003749 "FAILED {}: {}".format(step, exc)
tierno067e04a2020-03-31 12:53:13 +00003750 nslcmop_operation_state = "FAILED"
3751 if db_nsr:
3752 self._write_ns_status(
3753 nsr_id=nsr_id,
3754 ns_state=db_nsr["nsState"], # TODO check if degraded. For the moment use previous status
3755 current_operation="IDLE",
3756 current_operation_id=None,
3757 # error_description=error_description_nsr,
3758 # error_detail=error_detail,
3759 other_update=db_nsr_update
3760 )
3761
tiernoa17d4f42020-04-28 09:59:23 +00003762 self._write_op_status(
3763 op_id=nslcmop_id,
3764 stage="",
3765 error_message=error_description_nslcmop,
3766 operation_state=nslcmop_operation_state,
3767 other_update=db_nslcmop_update,
3768 )
tierno067e04a2020-03-31 12:53:13 +00003769
tierno59d22d22018-09-25 18:10:19 +02003770 if nslcmop_operation_state:
3771 try:
3772 await self.msg.aiowrite("ns", "actioned", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00003773 "operationState": nslcmop_operation_state},
3774 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003775 except Exception as e:
3776 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
3777 self.logger.debug(logging_text + "Exit")
3778 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
tierno067e04a2020-03-31 12:53:13 +00003779 return nslcmop_operation_state, detailed_status
tierno59d22d22018-09-25 18:10:19 +02003780
3781 async def scale(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003782
3783 # Try to lock HA task here
3784 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3785 if not task_is_locked_by_me:
3786 return
3787
tierno59d22d22018-09-25 18:10:19 +02003788 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
3789 self.logger.debug(logging_text + "Enter")
3790 # get all needed from database
3791 db_nsr = None
3792 db_nslcmop = None
3793 db_nslcmop_update = {}
3794 nslcmop_operation_state = None
tiernoe876f672020-02-13 14:34:48 +00003795 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003796 exc = None
tierno9ab95942018-10-10 16:44:22 +02003797 # in case of error, indicates what part of scale was failed to put nsr at error status
3798 scale_process = None
tiernod6de1992018-10-11 13:05:52 +02003799 old_operational_status = ""
3800 old_config_status = ""
tiernof578e552018-11-08 19:07:20 +01003801 vnfr_scaled = False
tierno59d22d22018-09-25 18:10:19 +02003802 try:
kuused124bfe2019-06-18 12:09:24 +02003803 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003804 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003805 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
tierno47e86b52018-10-10 14:05:55 +02003806
quilesj4cda56b2019-12-05 10:02:20 +00003807 self._write_ns_status(
3808 nsr_id=nsr_id,
3809 ns_state=None,
3810 current_operation="SCALING",
3811 current_operation_id=nslcmop_id
3812 )
3813
ikalyvas02d9e7b2019-05-27 18:16:01 +03003814 step = "Getting nslcmop from database"
ikalyvas02d9e7b2019-05-27 18:16:01 +03003815 self.logger.debug(step + " after having waited for previous tasks to be completed")
3816 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3817 step = "Getting nsr from database"
3818 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3819
3820 old_operational_status = db_nsr["operational-status"]
3821 old_config_status = db_nsr["config-status"]
tierno59d22d22018-09-25 18:10:19 +02003822 step = "Parsing scaling parameters"
tierno9babfda2019-06-07 12:36:50 +00003823 # self.logger.debug(step)
tierno59d22d22018-09-25 18:10:19 +02003824 db_nsr_update["operational-status"] = "scaling"
3825 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoe4f7e6c2018-11-27 14:55:30 +00003826 nsr_deployed = db_nsr["_admin"].get("deployed")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003827
3828 #######
3829 nsr_deployed = db_nsr["_admin"].get("deployed")
3830 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tiernoda6fb102019-11-23 00:36:52 +00003831 # vdu_id = db_nslcmop["operationParams"].get("vdu_id")
3832 # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
3833 # vdu_name = db_nslcmop["operationParams"].get("vdu_name")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003834 #######
3835
tiernoe4f7e6c2018-11-27 14:55:30 +00003836 RO_nsr_id = nsr_deployed["RO"]["nsr_id"]
tierno59d22d22018-09-25 18:10:19 +02003837 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["member-vnf-index"]
3838 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"]
3839 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
3840 # scaling_policy = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"].get("scaling-policy")
3841
tierno82974b22018-11-27 21:55:36 +00003842 # for backward compatibility
3843 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3844 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3845 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3846 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3847
tierno59d22d22018-09-25 18:10:19 +02003848 step = "Getting vnfr from database"
3849 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3850 step = "Getting vnfd from database"
3851 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
ikalyvas02d9e7b2019-05-27 18:16:01 +03003852
tierno59d22d22018-09-25 18:10:19 +02003853 step = "Getting scaling-group-descriptor"
3854 for scaling_descriptor in db_vnfd["scaling-group-descriptor"]:
3855 if scaling_descriptor["name"] == scaling_group:
3856 break
3857 else:
3858 raise LcmException("input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
3859 "at vnfd:scaling-group-descriptor".format(scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03003860
tierno59d22d22018-09-25 18:10:19 +02003861 # cooldown_time = 0
3862 # for scaling_policy_descriptor in scaling_descriptor.get("scaling-policy", ()):
3863 # cooldown_time = scaling_policy_descriptor.get("cooldown-time", 0)
3864 # if scaling_policy and scaling_policy == scaling_policy_descriptor.get("name"):
3865 # break
3866
3867 # TODO check if ns is in a proper status
tierno15b1cf12019-08-29 13:21:40 +00003868 step = "Sending scale order to VIM"
tierno59d22d22018-09-25 18:10:19 +02003869 nb_scale_op = 0
3870 if not db_nsr["_admin"].get("scaling-group"):
3871 self.update_db_2("nsrs", nsr_id, {"_admin.scaling-group": [{"name": scaling_group, "nb-scale-op": 0}]})
3872 admin_scale_index = 0
3873 else:
3874 for admin_scale_index, admin_scale_info in enumerate(db_nsr["_admin"]["scaling-group"]):
3875 if admin_scale_info["name"] == scaling_group:
3876 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
3877 break
tierno9ab95942018-10-10 16:44:22 +02003878 else: # not found, set index one plus last element and add new entry with the name
3879 admin_scale_index += 1
3880 db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group
tierno59d22d22018-09-25 18:10:19 +02003881 RO_scaling_info = []
3882 vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []}
3883 if scaling_type == "SCALE_OUT":
3884 # count if max-instance-count is reached
kuuse818d70c2019-08-07 14:43:44 +02003885 max_instance_count = scaling_descriptor.get("max-instance-count", 10)
3886 # self.logger.debug("MAX_INSTANCE_COUNT is {}".format(max_instance_count))
3887 if nb_scale_op >= max_instance_count:
3888 raise LcmException("reached the limit of {} (max-instance-count) "
3889 "scaling-out operations for the "
3890 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
kuuse8b998e42019-07-30 15:22:16 +02003891
ikalyvas02d9e7b2019-05-27 18:16:01 +03003892 nb_scale_op += 1
tierno59d22d22018-09-25 18:10:19 +02003893 vdu_scaling_info["scaling_direction"] = "OUT"
3894 vdu_scaling_info["vdu-create"] = {}
3895 for vdu_scale_info in scaling_descriptor["vdu"]:
3896 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
3897 "type": "create", "count": vdu_scale_info.get("count", 1)})
3898 vdu_scaling_info["vdu-create"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
ikalyvas02d9e7b2019-05-27 18:16:01 +03003899
tierno59d22d22018-09-25 18:10:19 +02003900 elif scaling_type == "SCALE_IN":
3901 # count if min-instance-count is reached
tierno27246d82018-09-27 15:59:09 +02003902 min_instance_count = 0
tierno59d22d22018-09-25 18:10:19 +02003903 if "min-instance-count" in scaling_descriptor and scaling_descriptor["min-instance-count"] is not None:
3904 min_instance_count = int(scaling_descriptor["min-instance-count"])
tierno9babfda2019-06-07 12:36:50 +00003905 if nb_scale_op <= min_instance_count:
3906 raise LcmException("reached the limit of {} (min-instance-count) scaling-in operations for the "
3907 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03003908 nb_scale_op -= 1
tierno59d22d22018-09-25 18:10:19 +02003909 vdu_scaling_info["scaling_direction"] = "IN"
3910 vdu_scaling_info["vdu-delete"] = {}
3911 for vdu_scale_info in scaling_descriptor["vdu"]:
3912 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
3913 "type": "delete", "count": vdu_scale_info.get("count", 1)})
3914 vdu_scaling_info["vdu-delete"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
3915
3916 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
tierno27246d82018-09-27 15:59:09 +02003917 vdu_create = vdu_scaling_info.get("vdu-create")
3918 vdu_delete = copy(vdu_scaling_info.get("vdu-delete"))
tierno59d22d22018-09-25 18:10:19 +02003919 if vdu_scaling_info["scaling_direction"] == "IN":
3920 for vdur in reversed(db_vnfr["vdur"]):
tierno27246d82018-09-27 15:59:09 +02003921 if vdu_delete.get(vdur["vdu-id-ref"]):
3922 vdu_delete[vdur["vdu-id-ref"]] -= 1
tierno59d22d22018-09-25 18:10:19 +02003923 vdu_scaling_info["vdu"].append({
3924 "name": vdur["name"],
3925 "vdu_id": vdur["vdu-id-ref"],
3926 "interface": []
3927 })
3928 for interface in vdur["interfaces"]:
3929 vdu_scaling_info["vdu"][-1]["interface"].append({
3930 "name": interface["name"],
3931 "ip_address": interface["ip-address"],
3932 "mac_address": interface.get("mac-address"),
3933 })
tierno27246d82018-09-27 15:59:09 +02003934 vdu_delete = vdu_scaling_info.pop("vdu-delete")
tierno59d22d22018-09-25 18:10:19 +02003935
kuuseac3a8882019-10-03 10:48:06 +02003936 # PRE-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02003937 step = "Executing pre-scale vnf-config-primitive"
3938 if scaling_descriptor.get("scaling-config-action"):
3939 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02003940 if (scaling_config_action.get("trigger") == "pre-scale-in" and scaling_type == "SCALE_IN") \
3941 or (scaling_config_action.get("trigger") == "pre-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02003942 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
3943 step = db_nslcmop_update["detailed-status"] = \
3944 "executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00003945
tierno59d22d22018-09-25 18:10:19 +02003946 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02003947 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
3948 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02003949 break
3950 else:
3951 raise LcmException(
3952 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
tiernoda964822019-01-14 15:53:47 +00003953 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
tiernoa278b842020-07-08 15:33:55 +00003954 "primitive".format(scaling_group, vnf_config_primitive))
tiernoda964822019-01-14 15:53:47 +00003955
tierno16fedf52019-05-24 08:38:26 +00003956 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00003957 if db_vnfr.get("additionalParamsForVnf"):
3958 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
quilesj7e13aeb2019-10-08 13:34:55 +02003959
tierno9ab95942018-10-10 16:44:22 +02003960 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02003961 db_nsr_update["config-status"] = "configuring pre-scaling"
kuuseac3a8882019-10-03 10:48:06 +02003962 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
3963
tierno7c4e24c2020-05-13 08:41:35 +00003964 # Pre-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02003965 op_index = self._check_or_add_scale_suboperation(
3966 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'PRE-SCALE')
tierno7c4e24c2020-05-13 08:41:35 +00003967 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02003968 # Skip sub-operation
3969 result = 'COMPLETED'
3970 result_detail = 'Done'
3971 self.logger.debug(logging_text +
3972 "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
3973 vnf_config_primitive, result, result_detail))
3974 else:
tierno7c4e24c2020-05-13 08:41:35 +00003975 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02003976 # New sub-operation: Get index of this sub-operation
3977 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3978 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
3979 format(vnf_config_primitive))
3980 else:
tierno7c4e24c2020-05-13 08:41:35 +00003981 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02003982 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3983 vnf_index = op.get('member_vnf_index')
3984 vnf_config_primitive = op.get('primitive')
3985 primitive_params = op.get('primitive_params')
tierno7c4e24c2020-05-13 08:41:35 +00003986 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
kuuseac3a8882019-10-03 10:48:06 +02003987 format(vnf_config_primitive))
tierno588547c2020-07-01 15:30:20 +00003988 # Execute the primitive, either with new (first-time) or registered (reintent) args
tiernoa278b842020-07-08 15:33:55 +00003989 ee_descriptor_id = config_primitive.get("execution-environment-ref")
3990 primitive_name = config_primitive.get("execution-environment-primitive",
3991 vnf_config_primitive)
tierno588547c2020-07-01 15:30:20 +00003992 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
3993 member_vnf_index=vnf_index,
3994 vdu_id=None,
tiernoa278b842020-07-08 15:33:55 +00003995 vdu_count_index=None,
3996 ee_descriptor_id=ee_descriptor_id)
kuuseac3a8882019-10-03 10:48:06 +02003997 result, result_detail = await self._ns_execute_primitive(
tiernoa278b842020-07-08 15:33:55 +00003998 ee_id, primitive_name, primitive_params, vca_type)
kuuseac3a8882019-10-03 10:48:06 +02003999 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
4000 vnf_config_primitive, result, result_detail))
4001 # Update operationState = COMPLETED | FAILED
4002 self._update_suboperation_status(
4003 db_nslcmop, op_index, result, result_detail)
4004
tierno59d22d22018-09-25 18:10:19 +02004005 if result == "FAILED":
4006 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02004007 db_nsr_update["config-status"] = old_config_status
4008 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02004009 # PRE-SCALE END
tierno59d22d22018-09-25 18:10:19 +02004010
kuuseac3a8882019-10-03 10:48:06 +02004011 # SCALE RO - BEGIN
4012 # Should this block be skipped if 'RO_nsr_id' == None ?
4013 # if (RO_nsr_id and RO_scaling_info):
tierno59d22d22018-09-25 18:10:19 +02004014 if RO_scaling_info:
tierno9ab95942018-10-10 16:44:22 +02004015 scale_process = "RO"
tierno7c4e24c2020-05-13 08:41:35 +00004016 # Scale RO retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02004017 op_index = self._check_or_add_scale_suboperation(
4018 db_nslcmop, vnf_index, None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info)
tierno7c4e24c2020-05-13 08:41:35 +00004019 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02004020 # Skip sub-operation
4021 result = 'COMPLETED'
4022 result_detail = 'Done'
4023 self.logger.debug(logging_text + "Skipped sub-operation RO, result {} {}".format(
4024 result, result_detail))
4025 else:
tierno7c4e24c2020-05-13 08:41:35 +00004026 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02004027 # New sub-operation: Get index of this sub-operation
4028 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4029 self.logger.debug(logging_text + "New sub-operation RO")
tierno59d22d22018-09-25 18:10:19 +02004030 else:
tierno7c4e24c2020-05-13 08:41:35 +00004031 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02004032 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4033 RO_nsr_id = op.get('RO_nsr_id')
4034 RO_scaling_info = op.get('RO_scaling_info')
tierno7c4e24c2020-05-13 08:41:35 +00004035 self.logger.debug(logging_text + "Sub-operation RO retry for primitive {}".format(
kuuseac3a8882019-10-03 10:48:06 +02004036 vnf_config_primitive))
4037
4038 RO_desc = await self.RO.create_action("ns", RO_nsr_id, {"vdu-scaling": RO_scaling_info})
4039 db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op
4040 db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time()
4041 # wait until ready
4042 RO_nslcmop_id = RO_desc["instance_action_id"]
4043 db_nslcmop_update["_admin.deploy.RO"] = RO_nslcmop_id
4044
4045 RO_task_done = False
4046 step = detailed_status = "Waiting RO_task_id={} to complete the scale action.".format(RO_nslcmop_id)
4047 detailed_status_old = None
4048 self.logger.debug(logging_text + step)
4049
4050 deployment_timeout = 1 * 3600 # One hour
4051 while deployment_timeout > 0:
4052 if not RO_task_done:
4053 desc = await self.RO.show("ns", item_id_name=RO_nsr_id, extra_item="action",
4054 extra_item_id=RO_nslcmop_id)
quilesj3655ae02019-12-12 16:08:35 +00004055
4056 # deploymentStatus
4057 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4058
kuuseac3a8882019-10-03 10:48:06 +02004059 ns_status, ns_status_info = self.RO.check_action_status(desc)
4060 if ns_status == "ERROR":
4061 raise ROclient.ROClientException(ns_status_info)
4062 elif ns_status == "BUILD":
4063 detailed_status = step + "; {}".format(ns_status_info)
4064 elif ns_status == "ACTIVE":
4065 RO_task_done = True
4066 step = detailed_status = "Waiting ns ready at RO. RO_id={}".format(RO_nsr_id)
4067 self.logger.debug(logging_text + step)
4068 else:
4069 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
tierno59d22d22018-09-25 18:10:19 +02004070 else:
quilesj7e13aeb2019-10-08 13:34:55 +02004071
kuuseac3a8882019-10-03 10:48:06 +02004072 if ns_status == "ERROR":
4073 raise ROclient.ROClientException(ns_status_info)
4074 elif ns_status == "BUILD":
4075 detailed_status = step + "; {}".format(ns_status_info)
4076 elif ns_status == "ACTIVE":
4077 step = detailed_status = \
4078 "Waiting for management IP address reported by the VIM. Updating VNFRs"
4079 if not vnfr_scaled:
4080 self.scale_vnfr(db_vnfr, vdu_create=vdu_create, vdu_delete=vdu_delete)
4081 vnfr_scaled = True
4082 try:
4083 desc = await self.RO.show("ns", RO_nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00004084
4085 # deploymentStatus
4086 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4087
kuuseac3a8882019-10-03 10:48:06 +02004088 # nsr_deployed["nsr_ip"] = RO.get_ns_vnf_info(desc)
4089 self.ns_update_vnfr({db_vnfr["member-vnf-index-ref"]: db_vnfr}, desc)
4090 break
4091 except LcmExceptionNoMgmtIP:
4092 pass
4093 else:
4094 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
4095 if detailed_status != detailed_status_old:
4096 self._update_suboperation_status(
4097 db_nslcmop, op_index, 'COMPLETED', detailed_status)
4098 detailed_status_old = db_nslcmop_update["detailed-status"] = detailed_status
4099 self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
tierno59d22d22018-09-25 18:10:19 +02004100
kuuseac3a8882019-10-03 10:48:06 +02004101 await asyncio.sleep(5, loop=self.loop)
4102 deployment_timeout -= 5
4103 if deployment_timeout <= 0:
4104 self._update_suboperation_status(
4105 db_nslcmop, nslcmop_id, op_index, 'FAILED', "Timeout when waiting for ns to get ready")
4106 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tierno59d22d22018-09-25 18:10:19 +02004107
kuuseac3a8882019-10-03 10:48:06 +02004108 # update VDU_SCALING_INFO with the obtained ip_addresses
4109 if vdu_scaling_info["scaling_direction"] == "OUT":
4110 for vdur in reversed(db_vnfr["vdur"]):
4111 if vdu_scaling_info["vdu-create"].get(vdur["vdu-id-ref"]):
4112 vdu_scaling_info["vdu-create"][vdur["vdu-id-ref"]] -= 1
4113 vdu_scaling_info["vdu"].append({
4114 "name": vdur["name"],
4115 "vdu_id": vdur["vdu-id-ref"],
4116 "interface": []
tierno59d22d22018-09-25 18:10:19 +02004117 })
kuuseac3a8882019-10-03 10:48:06 +02004118 for interface in vdur["interfaces"]:
4119 vdu_scaling_info["vdu"][-1]["interface"].append({
4120 "name": interface["name"],
4121 "ip_address": interface["ip-address"],
4122 "mac_address": interface.get("mac-address"),
4123 })
4124 del vdu_scaling_info["vdu-create"]
4125
4126 self._update_suboperation_status(db_nslcmop, op_index, 'COMPLETED', 'Done')
4127 # SCALE RO - END
tierno59d22d22018-09-25 18:10:19 +02004128
tierno9ab95942018-10-10 16:44:22 +02004129 scale_process = None
tierno59d22d22018-09-25 18:10:19 +02004130 if db_nsr_update:
4131 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4132
kuuseac3a8882019-10-03 10:48:06 +02004133 # POST-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02004134 # execute primitive service POST-SCALING
4135 step = "Executing post-scale vnf-config-primitive"
4136 if scaling_descriptor.get("scaling-config-action"):
4137 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02004138 if (scaling_config_action.get("trigger") == "post-scale-in" and scaling_type == "SCALE_IN") \
4139 or (scaling_config_action.get("trigger") == "post-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02004140 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
4141 step = db_nslcmop_update["detailed-status"] = \
4142 "executing post-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00004143
tierno589befb2019-05-29 07:06:23 +00004144 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00004145 if db_vnfr.get("additionalParamsForVnf"):
4146 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
4147
tierno59d22d22018-09-25 18:10:19 +02004148 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02004149 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
4150 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02004151 break
4152 else:
tiernoa278b842020-07-08 15:33:55 +00004153 raise LcmException(
4154 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
4155 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
4156 "config-primitive".format(scaling_group, vnf_config_primitive))
tierno9ab95942018-10-10 16:44:22 +02004157 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02004158 db_nsr_update["config-status"] = "configuring post-scaling"
kuuseac3a8882019-10-03 10:48:06 +02004159 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
tiernod6de1992018-10-11 13:05:52 +02004160
tierno7c4e24c2020-05-13 08:41:35 +00004161 # Post-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02004162 op_index = self._check_or_add_scale_suboperation(
4163 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'POST-SCALE')
quilesj4cda56b2019-12-05 10:02:20 +00004164 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02004165 # Skip sub-operation
4166 result = 'COMPLETED'
4167 result_detail = 'Done'
4168 self.logger.debug(logging_text +
4169 "vnf_config_primitive={} Skipped sub-operation, result {} {}".
4170 format(vnf_config_primitive, result, result_detail))
4171 else:
quilesj4cda56b2019-12-05 10:02:20 +00004172 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02004173 # New sub-operation: Get index of this sub-operation
4174 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4175 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
4176 format(vnf_config_primitive))
4177 else:
tierno7c4e24c2020-05-13 08:41:35 +00004178 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02004179 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4180 vnf_index = op.get('member_vnf_index')
4181 vnf_config_primitive = op.get('primitive')
4182 primitive_params = op.get('primitive_params')
tierno7c4e24c2020-05-13 08:41:35 +00004183 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
kuuseac3a8882019-10-03 10:48:06 +02004184 format(vnf_config_primitive))
tierno588547c2020-07-01 15:30:20 +00004185 # Execute the primitive, either with new (first-time) or registered (reintent) args
tiernoa278b842020-07-08 15:33:55 +00004186 ee_descriptor_id = config_primitive.get("execution-environment-ref")
4187 primitive_name = config_primitive.get("execution-environment-primitive",
4188 vnf_config_primitive)
tierno588547c2020-07-01 15:30:20 +00004189 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
4190 member_vnf_index=vnf_index,
4191 vdu_id=None,
tiernoa278b842020-07-08 15:33:55 +00004192 vdu_count_index=None,
4193 ee_descriptor_id=ee_descriptor_id)
kuuseac3a8882019-10-03 10:48:06 +02004194 result, result_detail = await self._ns_execute_primitive(
tiernoa278b842020-07-08 15:33:55 +00004195 ee_id, primitive_name, primitive_params, vca_type)
kuuseac3a8882019-10-03 10:48:06 +02004196 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
4197 vnf_config_primitive, result, result_detail))
4198 # Update operationState = COMPLETED | FAILED
4199 self._update_suboperation_status(
4200 db_nslcmop, op_index, result, result_detail)
4201
tierno59d22d22018-09-25 18:10:19 +02004202 if result == "FAILED":
4203 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02004204 db_nsr_update["config-status"] = old_config_status
4205 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02004206 # POST-SCALE END
tierno59d22d22018-09-25 18:10:19 +02004207
tiernod6de1992018-10-11 13:05:52 +02004208 db_nsr_update["detailed-status"] = "" # "scaled {} {}".format(scaling_group, scaling_type)
ikalyvas02d9e7b2019-05-27 18:16:01 +03004209 db_nsr_update["operational-status"] = "running" if old_operational_status == "failed" \
4210 else old_operational_status
tiernod6de1992018-10-11 13:05:52 +02004211 db_nsr_update["config-status"] = old_config_status
tierno59d22d22018-09-25 18:10:19 +02004212 return
4213 except (ROclient.ROClientException, DbException, LcmException) as e:
4214 self.logger.error(logging_text + "Exit Exception {}".format(e))
4215 exc = e
4216 except asyncio.CancelledError:
4217 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
4218 exc = "Operation was cancelled"
4219 except Exception as e:
4220 exc = traceback.format_exc()
4221 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
4222 finally:
quilesj3655ae02019-12-12 16:08:35 +00004223 self._write_ns_status(
4224 nsr_id=nsr_id,
4225 ns_state=None,
4226 current_operation="IDLE",
4227 current_operation_id=None
4228 )
tierno59d22d22018-09-25 18:10:19 +02004229 if exc:
tiernoa17d4f42020-04-28 09:59:23 +00004230 db_nslcmop_update["detailed-status"] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4231 nslcmop_operation_state = "FAILED"
tierno59d22d22018-09-25 18:10:19 +02004232 if db_nsr:
tiernod6de1992018-10-11 13:05:52 +02004233 db_nsr_update["operational-status"] = old_operational_status
4234 db_nsr_update["config-status"] = old_config_status
4235 db_nsr_update["detailed-status"] = ""
4236 if scale_process:
4237 if "VCA" in scale_process:
4238 db_nsr_update["config-status"] = "failed"
4239 if "RO" in scale_process:
4240 db_nsr_update["operational-status"] = "failed"
4241 db_nsr_update["detailed-status"] = "FAILED scaling nslcmop={} {}: {}".format(nslcmop_id, step,
4242 exc)
tiernoa17d4f42020-04-28 09:59:23 +00004243 else:
4244 error_description_nslcmop = None
4245 nslcmop_operation_state = "COMPLETED"
4246 db_nslcmop_update["detailed-status"] = "Done"
quilesj4cda56b2019-12-05 10:02:20 +00004247
tiernoa17d4f42020-04-28 09:59:23 +00004248 self._write_op_status(
4249 op_id=nslcmop_id,
4250 stage="",
4251 error_message=error_description_nslcmop,
4252 operation_state=nslcmop_operation_state,
4253 other_update=db_nslcmop_update,
4254 )
4255 if db_nsr:
4256 self._write_ns_status(
4257 nsr_id=nsr_id,
4258 ns_state=None,
4259 current_operation="IDLE",
4260 current_operation_id=None,
4261 other_update=db_nsr_update
4262 )
4263
tierno59d22d22018-09-25 18:10:19 +02004264 if nslcmop_operation_state:
4265 try:
4266 await self.msg.aiowrite("ns", "scaled", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00004267 "operationState": nslcmop_operation_state},
4268 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004269 # if cooldown_time:
tiernod8323042019-08-09 11:32:23 +00004270 # await asyncio.sleep(cooldown_time, loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004271 # await self.msg.aiowrite("ns","scaled-cooldown-time", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id})
4272 except Exception as e:
4273 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
4274 self.logger.debug(logging_text + "Exit")
4275 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
tiernob996d942020-07-03 14:52:28 +00004276
4277 async def add_prometheus_metrics(self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip):
4278 if not self.prometheus:
4279 return
4280 # look if exist a file called 'prometheus*.j2' and
4281 artifact_content = self.fs.dir_ls(artifact_path)
4282 job_file = next((f for f in artifact_content if f.startswith("prometheus") and f.endswith(".j2")), None)
4283 if not job_file:
4284 return
4285 with self.fs.file_open((artifact_path, job_file), "r") as f:
4286 job_data = f.read()
4287
4288 # TODO get_service
4289 _, _, service = ee_id.partition(".") # remove prefix "namespace."
4290 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
4291 host_port = "80"
4292 vnfr_id = vnfr_id.replace("-", "")
4293 variables = {
4294 "JOB_NAME": vnfr_id,
4295 "TARGET_IP": target_ip,
4296 "EXPORTER_POD_IP": host_name,
4297 "EXPORTER_POD_PORT": host_port,
4298 }
4299 job_list = self.prometheus.parse_job(job_data, variables)
4300 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
4301 for job in job_list:
4302 if not isinstance(job.get("job_name"), str) or vnfr_id not in job["job_name"]:
4303 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
4304 job["nsr_id"] = nsr_id
4305 job_dict = {jl["job_name"]: jl for jl in job_list}
4306 if await self.prometheus.update(job_dict):
4307 return list(job_dict.keys())