blob: c327a57aafffe12b42f827d77d3f8dac4fefefa1 [file] [log] [blame]
tierno59d22d22018-09-25 18:10:19 +02001# -*- coding: utf-8 -*-
2
tierno2e215512018-11-28 09:37:52 +00003##
4# Copyright 2018 Telefonica S.A.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17##
18
tierno59d22d22018-09-25 18:10:19 +020019import asyncio
20import yaml
21import logging
22import logging.handlers
tierno59d22d22018-09-25 18:10:19 +020023import traceback
David Garciad4816682019-12-09 14:57:43 +010024import json
gcalvino35be9152018-12-20 09:33:12 +010025from jinja2 import Environment, Template, meta, TemplateError, TemplateNotFound, TemplateSyntaxError
tierno59d22d22018-09-25 18:10:19 +020026
tierno77677d92019-08-22 13:46:35 +000027from osm_lcm import ROclient
tierno69f0d382020-05-07 13:08:09 +000028from osm_lcm.ng_ro import NgRoClient, NgRoException
tierno744303e2020-01-13 16:46:31 +000029from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict
calvinosanch9f9c6f22019-11-04 13:37:39 +010030from n2vc.k8s_helm_conn import K8sHelmConnector
Adam Israelbaacc302019-12-01 12:41:39 -050031from n2vc.k8s_juju_conn import K8sJujuConnector
tierno59d22d22018-09-25 18:10:19 +020032
tierno27246d82018-09-27 15:59:09 +020033from osm_common.dbbase import DbException
tierno59d22d22018-09-25 18:10:19 +020034from osm_common.fsbase import FsException
quilesj7e13aeb2019-10-08 13:34:55 +020035
36from n2vc.n2vc_juju_conn import N2VCJujuConnector
tiernof59ad6c2020-04-08 12:50:52 +000037from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
tierno59d22d22018-09-25 18:10:19 +020038
tierno588547c2020-07-01 15:30:20 +000039from osm_lcm.lcm_helm_conn import LCMHelmConn
40
tierno27246d82018-09-27 15:59:09 +020041from copy import copy, deepcopy
tierno59d22d22018-09-25 18:10:19 +020042from http import HTTPStatus
43from time import time
tierno27246d82018-09-27 15:59:09 +020044from uuid import uuid4
lloretgalleg80ad9212020-07-08 07:53:22 +000045
tierno89f82902020-07-03 14:52:28 +000046from random import randint
tierno59d22d22018-09-25 18:10:19 +020047
tierno69f0d382020-05-07 13:08:09 +000048__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
tierno59d22d22018-09-25 18:10:19 +020049
50
tierno588547c2020-07-01 15:30:20 +000051class N2VCJujuConnectorLCM(N2VCJujuConnector):
52
53 async def create_execution_environment(self, namespace: str, db_dict: dict, reuse_ee_id: str = None,
54 progress_timeout: float = None, total_timeout: float = None,
tierno89f82902020-07-03 14:52:28 +000055 config: dict = None, artifact_path: str = None,
56 vca_type: str = None) -> (str, dict):
tierno588547c2020-07-01 15:30:20 +000057 # admit two new parameters, artifact_path and vca_type
58 if vca_type == "k8s_proxy_charm":
David Garciaf36326c2020-07-10 13:12:44 +020059 ee_id = await self.install_k8s_proxy_charm(
tierno588547c2020-07-01 15:30:20 +000060 charm_name=artifact_path[artifact_path.rfind("/") + 1:],
61 namespace=namespace,
62 artifact_path=artifact_path,
63 db_dict=db_dict)
64 return ee_id, None
65 else:
66 return await super().create_execution_environment(
67 namespace=namespace, db_dict=db_dict, reuse_ee_id=reuse_ee_id,
68 progress_timeout=progress_timeout, total_timeout=total_timeout)
69
70 async def install_configuration_sw(self, ee_id: str, artifact_path: str, db_dict: dict,
71 progress_timeout: float = None, total_timeout: float = None,
72 config: dict = None, num_units: int = 1, vca_type: str = "lxc_proxy_charm"):
73 if vca_type == "k8s_proxy_charm":
74 return
75 return await super().install_configuration_sw(
76 ee_id=ee_id, artifact_path=artifact_path, db_dict=db_dict, progress_timeout=progress_timeout,
77 total_timeout=total_timeout, config=config, num_units=num_units)
78
79
tierno59d22d22018-09-25 18:10:19 +020080class NsLcm(LcmBase):
tierno63de62e2018-10-31 16:38:52 +010081 timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed
tierno744303e2020-01-13 16:46:31 +000082 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
tiernoe876f672020-02-13 14:34:48 +000083 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
garciadeblasf9b04952019-04-09 18:53:58 +020084 timeout_charm_delete = 10 * 60
David Garciaf6919842020-05-21 16:41:07 +020085 timeout_primitive = 30 * 60 # timeout for primitive execution
86 timeout_progress_primitive = 10 * 60 # timeout for some progress in a primitive execution
tierno59d22d22018-09-25 18:10:19 +020087
kuuseac3a8882019-10-03 10:48:06 +020088 SUBOPERATION_STATUS_NOT_FOUND = -1
89 SUBOPERATION_STATUS_NEW = -2
90 SUBOPERATION_STATUS_SKIP = -3
tiernoa2143262020-03-27 16:20:40 +000091 task_name_deploy_vca = "Deploying VCA"
kuuseac3a8882019-10-03 10:48:06 +020092
tierno89f82902020-07-03 14:52:28 +000093 def __init__(self, db, msg, fs, lcm_tasks, config, loop, prometheus=None):
tierno59d22d22018-09-25 18:10:19 +020094 """
95 Init, Connect to database, filesystem storage, and messaging
96 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
97 :return: None
98 """
quilesj7e13aeb2019-10-08 13:34:55 +020099 super().__init__(
100 db=db,
101 msg=msg,
102 fs=fs,
103 logger=logging.getLogger('lcm.ns')
104 )
105
tierno59d22d22018-09-25 18:10:19 +0200106 self.loop = loop
107 self.lcm_tasks = lcm_tasks
tierno744303e2020-01-13 16:46:31 +0000108 self.timeout = config["timeout"]
109 self.ro_config = config["ro_config"]
tierno69f0d382020-05-07 13:08:09 +0000110 self.ng_ro = config["ro_config"].get("ng")
tierno744303e2020-01-13 16:46:31 +0000111 self.vca_config = config["VCA"].copy()
tierno59d22d22018-09-25 18:10:19 +0200112
quilesj7e13aeb2019-10-08 13:34:55 +0200113 # create N2VC connector
tierno588547c2020-07-01 15:30:20 +0000114 self.n2vc = N2VCJujuConnectorLCM(
quilesj7e13aeb2019-10-08 13:34:55 +0200115 db=self.db,
116 fs=self.fs,
tierno59d22d22018-09-25 18:10:19 +0200117 log=self.logger,
quilesj7e13aeb2019-10-08 13:34:55 +0200118 loop=self.loop,
119 url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
120 username=self.vca_config.get('user', None),
121 vca_config=self.vca_config,
quilesj3655ae02019-12-12 16:08:35 +0000122 on_update_db=self._on_update_n2vc_db
tierno59d22d22018-09-25 18:10:19 +0200123 )
quilesj7e13aeb2019-10-08 13:34:55 +0200124
tierno588547c2020-07-01 15:30:20 +0000125 self.conn_helm_ee = LCMHelmConn(
126 db=self.db,
127 fs=self.fs,
128 log=self.logger,
129 loop=self.loop,
130 url=None,
131 username=None,
132 vca_config=self.vca_config,
133 on_update_db=self._on_update_n2vc_db
134 )
135
calvinosanch9f9c6f22019-11-04 13:37:39 +0100136 self.k8sclusterhelm = K8sHelmConnector(
137 kubectl_command=self.vca_config.get("kubectlpath"),
138 helm_command=self.vca_config.get("helmpath"),
139 fs=self.fs,
140 log=self.logger,
141 db=self.db,
142 on_update_db=None,
143 )
144
Adam Israelbaacc302019-12-01 12:41:39 -0500145 self.k8sclusterjuju = K8sJujuConnector(
146 kubectl_command=self.vca_config.get("kubectlpath"),
147 juju_command=self.vca_config.get("jujupath"),
148 fs=self.fs,
149 log=self.logger,
150 db=self.db,
151 on_update_db=None,
152 )
153
tiernoa2143262020-03-27 16:20:40 +0000154 self.k8scluster_map = {
155 "helm-chart": self.k8sclusterhelm,
156 "chart": self.k8sclusterhelm,
157 "juju-bundle": self.k8sclusterjuju,
158 "juju": self.k8sclusterjuju,
159 }
tierno588547c2020-07-01 15:30:20 +0000160
161 self.vca_map = {
162 "lxc_proxy_charm": self.n2vc,
163 "native_charm": self.n2vc,
164 "k8s_proxy_charm": self.n2vc,
165 "helm": self.conn_helm_ee
166 }
167
tierno89f82902020-07-03 14:52:28 +0000168 self.prometheus = prometheus
169
quilesj7e13aeb2019-10-08 13:34:55 +0200170 # create RO client
tierno69f0d382020-05-07 13:08:09 +0000171 if self.ng_ro:
172 self.RO = NgRoClient(self.loop, **self.ro_config)
173 else:
174 self.RO = ROclient.ROClient(self.loop, **self.ro_config)
tierno59d22d22018-09-25 18:10:19 +0200175
quilesj3655ae02019-12-12 16:08:35 +0000176 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
quilesj7e13aeb2019-10-08 13:34:55 +0200177
quilesj3655ae02019-12-12 16:08:35 +0000178 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
179
180 try:
181 # TODO filter RO descriptor fields...
182
183 # write to database
184 db_dict = dict()
185 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
186 db_dict['deploymentStatus'] = ro_descriptor
187 self.update_db_2("nsrs", nsrs_id, db_dict)
188
189 except Exception as e:
190 self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e))
191
192 async def _on_update_n2vc_db(self, table, filter, path, updated_data):
193
quilesj69a722c2020-01-09 08:30:17 +0000194 # remove last dot from path (if exists)
195 if path.endswith('.'):
196 path = path[:-1]
197
quilesj3655ae02019-12-12 16:08:35 +0000198 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
199 # .format(table, filter, path, updated_data))
200
201 try:
202
203 nsr_id = filter.get('_id')
204
205 # read ns record from database
206 nsr = self.db.get_one(table='nsrs', q_filter=filter)
207 current_ns_status = nsr.get('nsState')
208
209 # get vca status for NS
quilesj69a722c2020-01-09 08:30:17 +0000210 status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False)
quilesj3655ae02019-12-12 16:08:35 +0000211
212 # vcaStatus
213 db_dict = dict()
214 db_dict['vcaStatus'] = status_dict
215
216 # update configurationStatus for this VCA
217 try:
218 vca_index = int(path[path.rfind(".")+1:])
219
220 vca_list = deep_get(target_dict=nsr, key_list=('_admin', 'deployed', 'VCA'))
221 vca_status = vca_list[vca_index].get('status')
222
223 configuration_status_list = nsr.get('configurationStatus')
224 config_status = configuration_status_list[vca_index].get('status')
225
226 if config_status == 'BROKEN' and vca_status != 'failed':
227 db_dict['configurationStatus'][vca_index] = 'READY'
228 elif config_status != 'BROKEN' and vca_status == 'failed':
229 db_dict['configurationStatus'][vca_index] = 'BROKEN'
230 except Exception as e:
231 # not update configurationStatus
232 self.logger.debug('Error updating vca_index (ignore): {}'.format(e))
233
234 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
235 # if nsState = 'DEGRADED' check if all is OK
236 is_degraded = False
237 if current_ns_status in ('READY', 'DEGRADED'):
238 error_description = ''
239 # check machines
240 if status_dict.get('machines'):
241 for machine_id in status_dict.get('machines'):
242 machine = status_dict.get('machines').get(machine_id)
243 # check machine agent-status
244 if machine.get('agent-status'):
245 s = machine.get('agent-status').get('status')
246 if s != 'started':
247 is_degraded = True
248 error_description += 'machine {} agent-status={} ; '.format(machine_id, s)
249 # check machine instance status
250 if machine.get('instance-status'):
251 s = machine.get('instance-status').get('status')
252 if s != 'running':
253 is_degraded = True
254 error_description += 'machine {} instance-status={} ; '.format(machine_id, s)
255 # check applications
256 if status_dict.get('applications'):
257 for app_id in status_dict.get('applications'):
258 app = status_dict.get('applications').get(app_id)
259 # check application status
260 if app.get('status'):
261 s = app.get('status').get('status')
262 if s != 'active':
263 is_degraded = True
264 error_description += 'application {} status={} ; '.format(app_id, s)
265
266 if error_description:
267 db_dict['errorDescription'] = error_description
268 if current_ns_status == 'READY' and is_degraded:
269 db_dict['nsState'] = 'DEGRADED'
270 if current_ns_status == 'DEGRADED' and not is_degraded:
271 db_dict['nsState'] = 'READY'
272
273 # write to database
274 self.update_db_2("nsrs", nsr_id, db_dict)
275
tierno51183952020-04-03 15:48:18 +0000276 except (asyncio.CancelledError, asyncio.TimeoutError):
277 raise
quilesj3655ae02019-12-12 16:08:35 +0000278 except Exception as e:
279 self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +0200280
gcalvino35be9152018-12-20 09:33:12 +0100281 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
tierno59d22d22018-09-25 18:10:19 +0200282 """
283 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
284 :param vnfd: input vnfd
285 :param new_id: overrides vnf id if provided
tierno8a518872018-12-21 13:42:14 +0000286 :param additionalParams: Instantiation params for VNFs provided
gcalvino35be9152018-12-20 09:33:12 +0100287 :param nsrId: Id of the NSR
tierno59d22d22018-09-25 18:10:19 +0200288 :return: copy of vnfd
289 """
tierno59d22d22018-09-25 18:10:19 +0200290 try:
291 vnfd_RO = deepcopy(vnfd)
tierno8a518872018-12-21 13:42:14 +0000292 # remove unused by RO configuration, monitoring, scaling and internal keys
tierno59d22d22018-09-25 18:10:19 +0200293 vnfd_RO.pop("_id", None)
294 vnfd_RO.pop("_admin", None)
tierno8a518872018-12-21 13:42:14 +0000295 vnfd_RO.pop("vnf-configuration", None)
296 vnfd_RO.pop("monitoring-param", None)
297 vnfd_RO.pop("scaling-group-descriptor", None)
calvinosanch9f9c6f22019-11-04 13:37:39 +0100298 vnfd_RO.pop("kdu", None)
299 vnfd_RO.pop("k8s-cluster", None)
tierno59d22d22018-09-25 18:10:19 +0200300 if new_id:
301 vnfd_RO["id"] = new_id
tierno8a518872018-12-21 13:42:14 +0000302
303 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
304 for vdu in get_iterable(vnfd_RO, "vdu"):
305 cloud_init_file = None
306 if vdu.get("cloud-init-file"):
tierno59d22d22018-09-25 18:10:19 +0200307 base_folder = vnfd["_admin"]["storage"]
gcalvino35be9152018-12-20 09:33:12 +0100308 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
309 vdu["cloud-init-file"])
310 with self.fs.file_open(cloud_init_file, "r") as ci_file:
311 cloud_init_content = ci_file.read()
tierno59d22d22018-09-25 18:10:19 +0200312 vdu.pop("cloud-init-file", None)
tierno8a518872018-12-21 13:42:14 +0000313 elif vdu.get("cloud-init"):
gcalvino35be9152018-12-20 09:33:12 +0100314 cloud_init_content = vdu["cloud-init"]
tierno8a518872018-12-21 13:42:14 +0000315 else:
316 continue
317
318 env = Environment()
319 ast = env.parse(cloud_init_content)
320 mandatory_vars = meta.find_undeclared_variables(ast)
321 if mandatory_vars:
322 for var in mandatory_vars:
323 if not additionalParams or var not in additionalParams.keys():
324 raise LcmException("Variable '{}' defined at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
325 "file, must be provided in the instantiation parameters inside the "
326 "'additionalParamsForVnf' block".format(var, vnfd["id"], vdu["id"]))
327 template = Template(cloud_init_content)
tierno2b611dd2019-01-11 10:30:57 +0000328 cloud_init_content = template.render(additionalParams or {})
gcalvino35be9152018-12-20 09:33:12 +0100329 vdu["cloud-init"] = cloud_init_content
tierno8a518872018-12-21 13:42:14 +0000330
tierno59d22d22018-09-25 18:10:19 +0200331 return vnfd_RO
332 except FsException as e:
tierno8a518872018-12-21 13:42:14 +0000333 raise LcmException("Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".
tiernoda964822019-01-14 15:53:47 +0000334 format(vnfd["id"], vdu["id"], cloud_init_file, e))
tierno8a518872018-12-21 13:42:14 +0000335 except (TemplateError, TemplateNotFound, TemplateSyntaxError) as e:
336 raise LcmException("Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".
337 format(vnfd["id"], vdu["id"], e))
tierno59d22d22018-09-25 18:10:19 +0200338
tiernoe95ed362020-04-23 08:24:57 +0000339 def _ns_params_2_RO(self, ns_params, nsd, vnfd_dict, db_vnfrs, n2vc_key_list):
tierno59d22d22018-09-25 18:10:19 +0200340 """
tierno27246d82018-09-27 15:59:09 +0200341 Creates a RO ns descriptor from OSM ns_instantiate params
tierno59d22d22018-09-25 18:10:19 +0200342 :param ns_params: OSM instantiate params
tiernoe95ed362020-04-23 08:24:57 +0000343 :param vnfd_dict: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
344 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index. {member-vnf-index: {vnfr_object}, ...}
tierno59d22d22018-09-25 18:10:19 +0200345 :return: The RO ns descriptor
346 """
347 vim_2_RO = {}
tiernob7f3f0d2019-03-20 17:17:21 +0000348 wim_2_RO = {}
tierno27246d82018-09-27 15:59:09 +0200349 # TODO feature 1417: Check that no instantiation is set over PDU
350 # check if PDU forces a concrete vim-network-id and add it
351 # check if PDU contains a SDN-assist info (dpid, switch, port) and pass it to RO
tierno59d22d22018-09-25 18:10:19 +0200352
353 def vim_account_2_RO(vim_account):
354 if vim_account in vim_2_RO:
355 return vim_2_RO[vim_account]
356
357 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
358 if db_vim["_admin"]["operationalState"] != "ENABLED":
359 raise LcmException("VIM={} is not available. operationalState={}".format(
360 vim_account, db_vim["_admin"]["operationalState"]))
361 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
362 vim_2_RO[vim_account] = RO_vim_id
363 return RO_vim_id
364
tiernob7f3f0d2019-03-20 17:17:21 +0000365 def wim_account_2_RO(wim_account):
366 if isinstance(wim_account, str):
367 if wim_account in wim_2_RO:
368 return wim_2_RO[wim_account]
369
370 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
371 if db_wim["_admin"]["operationalState"] != "ENABLED":
372 raise LcmException("WIM={} is not available. operationalState={}".format(
373 wim_account, db_wim["_admin"]["operationalState"]))
374 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
375 wim_2_RO[wim_account] = RO_wim_id
376 return RO_wim_id
377 else:
378 return wim_account
379
tierno59d22d22018-09-25 18:10:19 +0200380 def ip_profile_2_RO(ip_profile):
381 RO_ip_profile = deepcopy((ip_profile))
382 if "dns-server" in RO_ip_profile:
383 if isinstance(RO_ip_profile["dns-server"], list):
384 RO_ip_profile["dns-address"] = []
385 for ds in RO_ip_profile.pop("dns-server"):
386 RO_ip_profile["dns-address"].append(ds['address'])
387 else:
388 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
389 if RO_ip_profile.get("ip-version") == "ipv4":
390 RO_ip_profile["ip-version"] = "IPv4"
391 if RO_ip_profile.get("ip-version") == "ipv6":
392 RO_ip_profile["ip-version"] = "IPv6"
393 if "dhcp-params" in RO_ip_profile:
394 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
395 return RO_ip_profile
396
397 if not ns_params:
398 return None
399 RO_ns_params = {
400 # "name": ns_params["nsName"],
401 # "description": ns_params.get("nsDescription"),
402 "datacenter": vim_account_2_RO(ns_params["vimAccountId"]),
tiernob7f3f0d2019-03-20 17:17:21 +0000403 "wim_account": wim_account_2_RO(ns_params.get("wimAccountId")),
tierno59d22d22018-09-25 18:10:19 +0200404 # "scenario": ns_params["nsdId"],
tierno59d22d22018-09-25 18:10:19 +0200405 }
tiernoe95ed362020-04-23 08:24:57 +0000406 # set vim_account of each vnf if different from general vim_account.
407 # Get this information from <vnfr> database content, key vim-account-id
408 # Vim account can be set by placement_engine and it may be different from
409 # the instantiate parameters (vnfs.member-vnf-index.datacenter).
410 for vnf_index, vnfr in db_vnfrs.items():
411 if vnfr.get("vim-account-id") and vnfr["vim-account-id"] != ns_params["vimAccountId"]:
412 populate_dict(RO_ns_params, ("vnfs", vnf_index, "datacenter"), vim_account_2_RO(vnfr["vim-account-id"]))
quilesj7e13aeb2019-10-08 13:34:55 +0200413
tiernoe64f7fb2019-09-11 08:55:52 +0000414 n2vc_key_list = n2vc_key_list or []
415 for vnfd_ref, vnfd in vnfd_dict.items():
416 vdu_needed_access = []
417 mgmt_cp = None
418 if vnfd.get("vnf-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000419 ssh_required = deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000420 if ssh_required and vnfd.get("mgmt-interface"):
421 if vnfd["mgmt-interface"].get("vdu-id"):
422 vdu_needed_access.append(vnfd["mgmt-interface"]["vdu-id"])
423 elif vnfd["mgmt-interface"].get("cp"):
424 mgmt_cp = vnfd["mgmt-interface"]["cp"]
tierno27246d82018-09-27 15:59:09 +0200425
tiernoe64f7fb2019-09-11 08:55:52 +0000426 for vdu in vnfd.get("vdu", ()):
427 if vdu.get("vdu-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000428 ssh_required = deep_get(vdu, ("vdu-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000429 if ssh_required:
tierno27246d82018-09-27 15:59:09 +0200430 vdu_needed_access.append(vdu["id"])
tiernoe64f7fb2019-09-11 08:55:52 +0000431 elif mgmt_cp:
432 for vdu_interface in vdu.get("interface"):
433 if vdu_interface.get("external-connection-point-ref") and \
434 vdu_interface["external-connection-point-ref"] == mgmt_cp:
435 vdu_needed_access.append(vdu["id"])
436 mgmt_cp = None
437 break
tierno27246d82018-09-27 15:59:09 +0200438
tiernoe64f7fb2019-09-11 08:55:52 +0000439 if vdu_needed_access:
440 for vnf_member in nsd.get("constituent-vnfd"):
441 if vnf_member["vnfd-id-ref"] != vnfd_ref:
442 continue
443 for vdu in vdu_needed_access:
444 populate_dict(RO_ns_params,
445 ("vnfs", vnf_member["member-vnf-index"], "vdus", vdu, "mgmt_keys"),
446 n2vc_key_list)
tierno27246d82018-09-27 15:59:09 +0200447
tierno25ec7732018-10-24 18:47:11 +0200448 if ns_params.get("vduImage"):
449 RO_ns_params["vduImage"] = ns_params["vduImage"]
450
tiernoc255a822018-10-31 09:41:53 +0100451 if ns_params.get("ssh_keys"):
452 RO_ns_params["cloud-config"] = {"key-pairs": ns_params["ssh_keys"]}
tierno27246d82018-09-27 15:59:09 +0200453 for vnf_params in get_iterable(ns_params, "vnf"):
454 for constituent_vnfd in nsd["constituent-vnfd"]:
455 if constituent_vnfd["member-vnf-index"] == vnf_params["member-vnf-index"]:
456 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
457 break
458 else:
459 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index={} is not present at nsd:"
460 "constituent-vnfd".format(vnf_params["member-vnf-index"]))
tierno59d22d22018-09-25 18:10:19 +0200461
tierno27246d82018-09-27 15:59:09 +0200462 for vdu_params in get_iterable(vnf_params, "vdu"):
463 # TODO feature 1417: check that this VDU exist and it is not a PDU
464 if vdu_params.get("volume"):
465 for volume_params in vdu_params["volume"]:
466 if volume_params.get("vim-volume-id"):
467 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
468 vdu_params["id"], "devices", volume_params["name"], "vim_id"),
469 volume_params["vim-volume-id"])
470 if vdu_params.get("interface"):
471 for interface_params in vdu_params["interface"]:
472 if interface_params.get("ip-address"):
473 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
474 vdu_params["id"], "interfaces", interface_params["name"],
475 "ip_address"),
476 interface_params["ip-address"])
477 if interface_params.get("mac-address"):
478 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
479 vdu_params["id"], "interfaces", interface_params["name"],
480 "mac_address"),
481 interface_params["mac-address"])
482 if interface_params.get("floating-ip-required"):
483 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
484 vdu_params["id"], "interfaces", interface_params["name"],
485 "floating-ip"),
486 interface_params["floating-ip-required"])
487
488 for internal_vld_params in get_iterable(vnf_params, "internal-vld"):
489 if internal_vld_params.get("vim-network-name"):
490 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
491 internal_vld_params["name"], "vim-network-name"),
492 internal_vld_params["vim-network-name"])
gcalvino0d7ac8d2018-12-17 16:24:08 +0100493 if internal_vld_params.get("vim-network-id"):
494 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
495 internal_vld_params["name"], "vim-network-id"),
496 internal_vld_params["vim-network-id"])
tierno27246d82018-09-27 15:59:09 +0200497 if internal_vld_params.get("ip-profile"):
498 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
499 internal_vld_params["name"], "ip-profile"),
500 ip_profile_2_RO(internal_vld_params["ip-profile"]))
kbsub4d761eb2019-10-17 16:28:48 +0000501 if internal_vld_params.get("provider-network"):
502
503 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
504 internal_vld_params["name"], "provider-network"),
505 internal_vld_params["provider-network"].copy())
tierno27246d82018-09-27 15:59:09 +0200506
507 for icp_params in get_iterable(internal_vld_params, "internal-connection-point"):
508 # look for interface
509 iface_found = False
510 for vdu_descriptor in vnf_descriptor["vdu"]:
511 for vdu_interface in vdu_descriptor["interface"]:
512 if vdu_interface.get("internal-connection-point-ref") == icp_params["id-ref"]:
513 if icp_params.get("ip-address"):
514 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
515 vdu_descriptor["id"], "interfaces",
516 vdu_interface["name"], "ip_address"),
517 icp_params["ip-address"])
518
519 if icp_params.get("mac-address"):
520 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
521 vdu_descriptor["id"], "interfaces",
522 vdu_interface["name"], "mac_address"),
523 icp_params["mac-address"])
524 iface_found = True
tierno59d22d22018-09-25 18:10:19 +0200525 break
tierno27246d82018-09-27 15:59:09 +0200526 if iface_found:
527 break
528 else:
529 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index[{}]:"
530 "internal-vld:id-ref={} is not present at vnfd:internal-"
531 "connection-point".format(vnf_params["member-vnf-index"],
532 icp_params["id-ref"]))
533
534 for vld_params in get_iterable(ns_params, "vld"):
535 if "ip-profile" in vld_params:
536 populate_dict(RO_ns_params, ("networks", vld_params["name"], "ip-profile"),
537 ip_profile_2_RO(vld_params["ip-profile"]))
tiernob7f3f0d2019-03-20 17:17:21 +0000538
kbsub4d761eb2019-10-17 16:28:48 +0000539 if vld_params.get("provider-network"):
540
541 populate_dict(RO_ns_params, ("networks", vld_params["name"], "provider-network"),
542 vld_params["provider-network"].copy())
543
tiernob7f3f0d2019-03-20 17:17:21 +0000544 if "wimAccountId" in vld_params and vld_params["wimAccountId"] is not None:
545 populate_dict(RO_ns_params, ("networks", vld_params["name"], "wim_account"),
546 wim_account_2_RO(vld_params["wimAccountId"])),
tierno27246d82018-09-27 15:59:09 +0200547 if vld_params.get("vim-network-name"):
548 RO_vld_sites = []
549 if isinstance(vld_params["vim-network-name"], dict):
550 for vim_account, vim_net in vld_params["vim-network-name"].items():
551 RO_vld_sites.append({
552 "netmap-use": vim_net,
553 "datacenter": vim_account_2_RO(vim_account)
554 })
555 else: # isinstance str
556 RO_vld_sites.append({"netmap-use": vld_params["vim-network-name"]})
557 if RO_vld_sites:
558 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
kbsub4d761eb2019-10-17 16:28:48 +0000559
gcalvino0d7ac8d2018-12-17 16:24:08 +0100560 if vld_params.get("vim-network-id"):
561 RO_vld_sites = []
562 if isinstance(vld_params["vim-network-id"], dict):
563 for vim_account, vim_net in vld_params["vim-network-id"].items():
564 RO_vld_sites.append({
565 "netmap-use": vim_net,
566 "datacenter": vim_account_2_RO(vim_account)
567 })
568 else: # isinstance str
569 RO_vld_sites.append({"netmap-use": vld_params["vim-network-id"]})
570 if RO_vld_sites:
571 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
Felipe Vicens720b07a2019-01-31 02:32:09 +0100572 if vld_params.get("ns-net"):
573 if isinstance(vld_params["ns-net"], dict):
574 for vld_id, instance_scenario_id in vld_params["ns-net"].items():
575 RO_vld_ns_net = {"instance_scenario_id": instance_scenario_id, "osm_id": vld_id}
Felipe Vicensb0e5fe42019-12-05 10:30:38 +0100576 populate_dict(RO_ns_params, ("networks", vld_params["name"], "use-network"), RO_vld_ns_net)
tierno27246d82018-09-27 15:59:09 +0200577 if "vnfd-connection-point-ref" in vld_params:
578 for cp_params in vld_params["vnfd-connection-point-ref"]:
579 # look for interface
580 for constituent_vnfd in nsd["constituent-vnfd"]:
581 if constituent_vnfd["member-vnf-index"] == cp_params["member-vnf-index-ref"]:
582 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
583 break
584 else:
585 raise LcmException(
586 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={} "
587 "is not present at nsd:constituent-vnfd".format(cp_params["member-vnf-index-ref"]))
588 match_cp = False
589 for vdu_descriptor in vnf_descriptor["vdu"]:
590 for interface_descriptor in vdu_descriptor["interface"]:
591 if interface_descriptor.get("external-connection-point-ref") == \
592 cp_params["vnfd-connection-point-ref"]:
593 match_cp = True
tierno59d22d22018-09-25 18:10:19 +0200594 break
tierno27246d82018-09-27 15:59:09 +0200595 if match_cp:
596 break
597 else:
598 raise LcmException(
599 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={}:"
600 "vnfd-connection-point-ref={} is not present at vnfd={}".format(
601 cp_params["member-vnf-index-ref"],
602 cp_params["vnfd-connection-point-ref"],
603 vnf_descriptor["id"]))
604 if cp_params.get("ip-address"):
605 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
606 vdu_descriptor["id"], "interfaces",
607 interface_descriptor["name"], "ip_address"),
608 cp_params["ip-address"])
609 if cp_params.get("mac-address"):
610 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
611 vdu_descriptor["id"], "interfaces",
612 interface_descriptor["name"], "mac_address"),
613 cp_params["mac-address"])
tierno59d22d22018-09-25 18:10:19 +0200614 return RO_ns_params
615
tierno27246d82018-09-27 15:59:09 +0200616 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None):
617 # make a copy to do not change
618 vdu_create = copy(vdu_create)
619 vdu_delete = copy(vdu_delete)
620
621 vdurs = db_vnfr.get("vdur")
622 if vdurs is None:
623 vdurs = []
624 vdu_index = len(vdurs)
625 while vdu_index:
626 vdu_index -= 1
627 vdur = vdurs[vdu_index]
628 if vdur.get("pdu-type"):
629 continue
630 vdu_id_ref = vdur["vdu-id-ref"]
631 if vdu_create and vdu_create.get(vdu_id_ref):
tiernodf24ef82020-09-25 12:33:15 +0000632 vdur_copy = deepcopy(vdur)
633 vdur_copy["status"] = "BUILD"
634 vdur_copy["status-detailed"] = None
635 vdur_copy["ip_address"]: None
636 for iface in vdur_copy["interfaces"]:
637 iface["ip-address"] = None
638 iface["mac-address"] = None
639 iface.pop("mgmt_vnf", None) # only first vdu can be managment of vnf # TODO ALF
tierno27246d82018-09-27 15:59:09 +0200640 for index in range(0, vdu_create[vdu_id_ref]):
tiernodf24ef82020-09-25 12:33:15 +0000641 vdur_copy["_id"] = str(uuid4())
642 vdur_copy["count-index"] += 1
643 vdurs.insert(vdu_index+1+index, vdur_copy)
644 self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
645 vdur_copy = deepcopy(vdur_copy)
646
tierno27246d82018-09-27 15:59:09 +0200647 del vdu_create[vdu_id_ref]
648 if vdu_delete and vdu_delete.get(vdu_id_ref):
649 del vdurs[vdu_index]
650 vdu_delete[vdu_id_ref] -= 1
651 if not vdu_delete[vdu_id_ref]:
652 del vdu_delete[vdu_id_ref]
653 # check all operations are done
654 if vdu_create or vdu_delete:
655 raise LcmException("Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
656 vdu_create))
657 if vdu_delete:
658 raise LcmException("Error scaling IN VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
659 vdu_delete))
660
661 vnfr_update = {"vdur": vdurs}
662 db_vnfr["vdur"] = vdurs
663 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
664
tiernof578e552018-11-08 19:07:20 +0100665 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
666 """
667 Updates database nsr with the RO info for the created vld
668 :param ns_update_nsr: dictionary to be filled with the updated info
669 :param db_nsr: content of db_nsr. This is also modified
670 :param nsr_desc_RO: nsr descriptor from RO
671 :return: Nothing, LcmException is raised on errors
672 """
673
674 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
675 for net_RO in get_iterable(nsr_desc_RO, "nets"):
676 if vld["id"] != net_RO.get("ns_net_osm_id"):
677 continue
678 vld["vim-id"] = net_RO.get("vim_net_id")
679 vld["name"] = net_RO.get("vim_name")
680 vld["status"] = net_RO.get("status")
681 vld["status-detailed"] = net_RO.get("error_msg")
682 ns_update_nsr["vld.{}".format(vld_index)] = vld
683 break
684 else:
685 raise LcmException("ns_update_nsr: Not found vld={} at RO info".format(vld["id"]))
686
tiernoe876f672020-02-13 14:34:48 +0000687 def set_vnfr_at_error(self, db_vnfrs, error_text):
688 try:
689 for db_vnfr in db_vnfrs.values():
690 vnfr_update = {"status": "ERROR"}
691 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
692 if "status" not in vdur:
693 vdur["status"] = "ERROR"
694 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
695 if error_text:
696 vdur["status-detailed"] = str(error_text)
697 vnfr_update["vdur.{}.status-detailed".format(vdu_index)] = "ERROR"
698 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
699 except DbException as e:
700 self.logger.error("Cannot update vnf. {}".format(e))
701
tierno59d22d22018-09-25 18:10:19 +0200702 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
703 """
704 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
tierno27246d82018-09-27 15:59:09 +0200705 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
706 :param nsr_desc_RO: nsr descriptor from RO
707 :return: Nothing, LcmException is raised on errors
tierno59d22d22018-09-25 18:10:19 +0200708 """
709 for vnf_index, db_vnfr in db_vnfrs.items():
710 for vnf_RO in nsr_desc_RO["vnfs"]:
tierno27246d82018-09-27 15:59:09 +0200711 if vnf_RO["member_vnf_index"] != vnf_index:
712 continue
713 vnfr_update = {}
tiernof578e552018-11-08 19:07:20 +0100714 if vnf_RO.get("ip_address"):
tierno1674de82019-04-09 13:03:14 +0000715 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"].split(";")[0]
tiernof578e552018-11-08 19:07:20 +0100716 elif not db_vnfr.get("ip-address"):
tierno0ec0c272020-02-19 17:43:01 +0000717 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
718 raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200719
tierno27246d82018-09-27 15:59:09 +0200720 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
721 vdur_RO_count_index = 0
722 if vdur.get("pdu-type"):
723 continue
724 for vdur_RO in get_iterable(vnf_RO, "vms"):
725 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
726 continue
727 if vdur["count-index"] != vdur_RO_count_index:
728 vdur_RO_count_index += 1
729 continue
730 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
tierno1674de82019-04-09 13:03:14 +0000731 if vdur_RO.get("ip_address"):
732 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
tierno274ed572019-04-04 13:33:27 +0000733 else:
734 vdur["ip-address"] = None
tierno27246d82018-09-27 15:59:09 +0200735 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
736 vdur["name"] = vdur_RO.get("vim_name")
737 vdur["status"] = vdur_RO.get("status")
738 vdur["status-detailed"] = vdur_RO.get("error_msg")
739 for ifacer in get_iterable(vdur, "interfaces"):
740 for interface_RO in get_iterable(vdur_RO, "interfaces"):
741 if ifacer["name"] == interface_RO.get("internal_name"):
742 ifacer["ip-address"] = interface_RO.get("ip_address")
743 ifacer["mac-address"] = interface_RO.get("mac_address")
744 break
745 else:
746 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
quilesj7e13aeb2019-10-08 13:34:55 +0200747 "from VIM info"
748 .format(vnf_index, vdur["vdu-id-ref"], ifacer["name"]))
tierno27246d82018-09-27 15:59:09 +0200749 vnfr_update["vdur.{}".format(vdu_index)] = vdur
750 break
751 else:
tierno15b1cf12019-08-29 13:21:40 +0000752 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
753 "VIM info".format(vnf_index, vdur["vdu-id-ref"], vdur["count-index"]))
tiernof578e552018-11-08 19:07:20 +0100754
755 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
756 for net_RO in get_iterable(nsr_desc_RO, "nets"):
757 if vld["id"] != net_RO.get("vnf_net_osm_id"):
758 continue
759 vld["vim-id"] = net_RO.get("vim_net_id")
760 vld["name"] = net_RO.get("vim_name")
761 vld["status"] = net_RO.get("status")
762 vld["status-detailed"] = net_RO.get("error_msg")
763 vnfr_update["vld.{}".format(vld_index)] = vld
764 break
765 else:
tierno15b1cf12019-08-29 13:21:40 +0000766 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
tiernof578e552018-11-08 19:07:20 +0100767 vnf_index, vld["id"]))
768
tierno27246d82018-09-27 15:59:09 +0200769 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
770 break
tierno59d22d22018-09-25 18:10:19 +0200771
772 else:
tierno15b1cf12019-08-29 13:21:40 +0000773 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200774
tierno5ee02052019-12-05 19:55:02 +0000775 def _get_ns_config_info(self, nsr_id):
tiernoc3f2a822019-11-05 13:45:04 +0000776 """
777 Generates a mapping between vnf,vdu elements and the N2VC id
tierno5ee02052019-12-05 19:55:02 +0000778 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
tiernoc3f2a822019-11-05 13:45:04 +0000779 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
780 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
781 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
782 """
tierno5ee02052019-12-05 19:55:02 +0000783 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
784 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernoc3f2a822019-11-05 13:45:04 +0000785 mapping = {}
786 ns_config_info = {"osm-config-mapping": mapping}
787 for vca in vca_deployed_list:
788 if not vca["member-vnf-index"]:
789 continue
790 if not vca["vdu_id"]:
791 mapping[vca["member-vnf-index"]] = vca["application"]
792 else:
793 mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\
794 vca["application"]
795 return ns_config_info
796
797 @staticmethod
tierno4fa7f8e2020-07-08 15:33:55 +0000798 def _get_initial_config_primitive_list(desc_primitive_list, vca_deployed, ee_descriptor_id):
tiernoc3f2a822019-11-05 13:45:04 +0000799 """
800 Generates a list of initial-config-primitive based on the list provided by the descriptor. It includes internal
801 primitives as verify-ssh-credentials, or config when needed
802 :param desc_primitive_list: information of the descriptor
803 :param vca_deployed: information of the deployed, needed for known if it is related to an NS, VNF, VDU and if
804 this element contains a ssh public key
tierno4fa7f8e2020-07-08 15:33:55 +0000805 :param ee_descriptor_id: execution environment descriptor id. It is the value of
806 XXX_configuration.execution-environment-list.INDEX.id; it can be None
tiernoc3f2a822019-11-05 13:45:04 +0000807 :return: The modified list. Can ba an empty list, but always a list
808 """
tierno4fa7f8e2020-07-08 15:33:55 +0000809
810 primitive_list = desc_primitive_list or []
811
812 # filter primitives by ee_id
813 primitive_list = [p for p in primitive_list if p.get("execution-environment-ref") == ee_descriptor_id]
814
815 # sort by 'seq'
816 if primitive_list:
817 primitive_list.sort(key=lambda val: int(val['seq']))
818
tiernoc3f2a822019-11-05 13:45:04 +0000819 # look for primitive config, and get the position. None if not present
820 config_position = None
821 for index, primitive in enumerate(primitive_list):
822 if primitive["name"] == "config":
823 config_position = index
824 break
825
826 # for NS, add always a config primitive if not present (bug 874)
827 if not vca_deployed["member-vnf-index"] and config_position is None:
828 primitive_list.insert(0, {"name": "config", "parameter": []})
829 config_position = 0
tierno4fa7f8e2020-07-08 15:33:55 +0000830 # TODO revise if needed: for VNF/VDU add verify-ssh-credentials after config
tiernoc3f2a822019-11-05 13:45:04 +0000831 if vca_deployed["member-vnf-index"] and config_position is not None and vca_deployed.get("ssh-public-key"):
832 primitive_list.insert(config_position + 1, {"name": "verify-ssh-credentials", "parameter": []})
833 return primitive_list
834
tierno69f0d382020-05-07 13:08:09 +0000835 async def _instantiate_ng_ro(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
836 n2vc_key_list, stage, start_deploy, timeout_ns_deploy):
837 nslcmop_id = db_nslcmop["_id"]
838 target = {
839 "name": db_nsr["name"],
840 "ns": {"vld": []},
841 "vnf": [],
842 "image": deepcopy(db_nsr["image"]),
843 "flavor": deepcopy(db_nsr["flavor"]),
844 "action_id": nslcmop_id,
845 }
846 for image in target["image"]:
847 image["vim_info"] = []
848 for flavor in target["flavor"]:
849 flavor["vim_info"] = []
850
851 ns_params = db_nslcmop.get("operationParams")
852 ssh_keys = []
853 if ns_params.get("ssh_keys"):
854 ssh_keys += ns_params.get("ssh_keys")
855 if n2vc_key_list:
856 ssh_keys += n2vc_key_list
857
858 cp2target = {}
859 for vld_index, vld in enumerate(nsd.get("vld")):
860 target_vld = {"id": vld["id"],
861 "name": vld["name"],
862 "mgmt-network": vld.get("mgmt-network", False),
863 "type": vld.get("type"),
864 "vim_info": [{"vim-network-name": vld.get("vim-network-name"),
865 "vim_account_id": ns_params["vimAccountId"]}],
866 }
867 for cp in vld["vnfd-connection-point-ref"]:
868 cp2target["member_vnf:{}.{}".format(cp["member-vnf-index-ref"], cp["vnfd-connection-point-ref"])] = \
869 "nsrs:{}:vld.{}".format(nsr_id, vld_index)
870 target["ns"]["vld"].append(target_vld)
871 for vnfr in db_vnfrs.values():
872 vnfd = db_vnfds_ref[vnfr["vnfd-ref"]]
873 target_vnf = deepcopy(vnfr)
874 for vld in target_vnf.get("vld", ()):
875 # check if connected to a ns.vld
876 vnf_cp = next((cp for cp in vnfd.get("connection-point", ()) if
877 cp.get("internal-vld-ref") == vld["id"]), None)
878 if vnf_cp:
879 ns_cp = "member_vnf:{}.{}".format(vnfr["member-vnf-index-ref"], vnf_cp["id"])
880 if cp2target.get(ns_cp):
881 vld["target"] = cp2target[ns_cp]
882 vld["vim_info"] = [{"vim-network-name": vld.get("vim-network-name"),
883 "vim_account_id": vnfr["vim-account-id"]}]
884
885 for vdur in target_vnf.get("vdur", ()):
886 vdur["vim_info"] = [{"vim_account_id": vnfr["vim-account-id"]}]
887 vdud_index, vdud = next(k for k in enumerate(vnfd["vdu"]) if k[1]["id"] == vdur["vdu-id-ref"])
888 # vdur["additionalParams"] = vnfr.get("additionalParamsForVnf") # TODO additional params for VDU
889
890 if ssh_keys:
891 if deep_get(vdud, ("vdu-configuration", "config-access", "ssh-access", "required")):
892 vdur["ssh-keys"] = ssh_keys
893 vdur["ssh-access-required"] = True
894 elif deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required")) and \
895 any(iface.get("mgmt-vnf") for iface in vdur["interfaces"]):
896 vdur["ssh-keys"] = ssh_keys
897 vdur["ssh-access-required"] = True
898
899 # cloud-init
900 if vdud.get("cloud-init-file"):
901 vdur["cloud-init"] = "{}:file:{}".format(vnfd["_id"], vdud.get("cloud-init-file"))
902 elif vdud.get("cloud-init"):
903 vdur["cloud-init"] = "{}:vdu:{}".format(vnfd["_id"], vdud_index)
904
905 # flavor
906 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
907 if not next((vi for vi in ns_flavor["vim_info"] if
908 vi and vi.get("vim_account_id") == vnfr["vim-account-id"]), None):
909 ns_flavor["vim_info"].append({"vim_account_id": vnfr["vim-account-id"]})
910 # image
911 ns_image = target["image"][int(vdur["ns-image-id"])]
912 if not next((vi for vi in ns_image["vim_info"] if
913 vi and vi.get("vim_account_id") == vnfr["vim-account-id"]), None):
914 ns_image["vim_info"].append({"vim_account_id": vnfr["vim-account-id"]})
915
916 vdur["vim_info"] = [{"vim_account_id": vnfr["vim-account-id"]}]
917 target["vnf"].append(target_vnf)
918
919 desc = await self.RO.deploy(nsr_id, target)
920 action_id = desc["action_id"]
921 await self._wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage)
922
923 # Updating NSR
924 db_nsr_update = {
925 "_admin.deployed.RO.operational-status": "running",
926 "detailed-status": " ".join(stage)
927 }
928 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
929 self.update_db_2("nsrs", nsr_id, db_nsr_update)
930 self._write_op_status(nslcmop_id, stage)
931 self.logger.debug(logging_text + "ns deployed at RO. RO_id={}".format(action_id))
932 return
933
934 async def _wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_time, timeout, stage):
935 detailed_status_old = None
936 db_nsr_update = {}
937 while time() <= start_time + timeout:
938 desc_status = await self.RO.status(nsr_id, action_id)
939 if desc_status["status"] == "FAILED":
940 raise NgRoException(desc_status["details"])
941 elif desc_status["status"] == "BUILD":
942 stage[2] = "VIM: ({})".format(desc_status["details"])
943 elif desc_status["status"] == "DONE":
944 stage[2] = "Deployed at VIM"
945 break
946 else:
947 assert False, "ROclient.check_ns_status returns unknown {}".format(desc_status["status"])
948 if stage[2] != detailed_status_old:
949 detailed_status_old = stage[2]
950 db_nsr_update["detailed-status"] = " ".join(stage)
951 self.update_db_2("nsrs", nsr_id, db_nsr_update)
952 self._write_op_status(nslcmop_id, stage)
953 await asyncio.sleep(5, loop=self.loop)
954 else: # timeout_ns_deploy
955 raise NgRoException("Timeout waiting ns to deploy")
956
957 async def _terminate_ng_ro(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
958 db_nsr_update = {}
959 failed_detail = []
960 action_id = None
961 start_deploy = time()
962 try:
963 target = {
964 "ns": {"vld": []},
965 "vnf": [],
966 "image": [],
967 "flavor": [],
968 }
969 desc = await self.RO.deploy(nsr_id, target)
970 action_id = desc["action_id"]
971 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
972 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
973 self.logger.debug(logging_text + "ns terminate action at RO. action_id={}".format(action_id))
974
975 # wait until done
976 delete_timeout = 20 * 60 # 20 minutes
977 await self._wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage)
978
979 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
980 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
981 # delete all nsr
982 await self.RO.delete(nsr_id)
983 except Exception as e:
984 if isinstance(e, NgRoException) and e.http_code == 404: # not found
985 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
986 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
987 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
988 self.logger.debug(logging_text + "RO_action_id={} already deleted".format(action_id))
989 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
990 failed_detail.append("delete conflict: {}".format(e))
991 self.logger.debug(logging_text + "RO_action_id={} delete conflict: {}".format(action_id, e))
992 else:
993 failed_detail.append("delete error: {}".format(e))
994 self.logger.error(logging_text + "RO_action_id={} delete error: {}".format(action_id, e))
995
996 if failed_detail:
997 stage[2] = "Error deleting from VIM"
998 else:
999 stage[2] = "Deleted from VIM"
1000 db_nsr_update["detailed-status"] = " ".join(stage)
1001 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1002 self._write_op_status(nslcmop_id, stage)
1003
1004 if failed_detail:
1005 raise LcmException("; ".join(failed_detail))
1006 return
1007
tiernoe876f672020-02-13 14:34:48 +00001008 async def instantiate_RO(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
1009 n2vc_key_list, stage):
tiernoe95ed362020-04-23 08:24:57 +00001010 """
1011 Instantiate at RO
1012 :param logging_text: preffix text to use at logging
1013 :param nsr_id: nsr identity
1014 :param nsd: database content of ns descriptor
1015 :param db_nsr: database content of ns record
1016 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1017 :param db_vnfrs:
1018 :param db_vnfds_ref: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1019 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1020 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1021 :return: None or exception
1022 """
tiernoe876f672020-02-13 14:34:48 +00001023 try:
1024 db_nsr_update = {}
1025 RO_descriptor_number = 0 # number of descriptors created at RO
1026 vnf_index_2_RO_id = {} # map between vnfd/nsd id to the id used at RO
1027 nslcmop_id = db_nslcmop["_id"]
1028 start_deploy = time()
1029 ns_params = db_nslcmop.get("operationParams")
1030 if ns_params and ns_params.get("timeout_ns_deploy"):
1031 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1032 else:
1033 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +02001034
tiernoe876f672020-02-13 14:34:48 +00001035 # Check for and optionally request placement optimization. Database will be updated if placement activated
1036 stage[2] = "Waiting for Placement."
tierno8790a3d2020-04-23 22:49:52 +00001037 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1038 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1039 for vnfr in db_vnfrs.values():
1040 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1041 break
1042 else:
1043 ns_params["vimAccountId"] == vnfr["vim-account-id"]
quilesj7e13aeb2019-10-08 13:34:55 +02001044
tierno69f0d382020-05-07 13:08:09 +00001045 if self.ng_ro:
1046 return await self._instantiate_ng_ro(logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs,
1047 db_vnfds_ref, n2vc_key_list, stage, start_deploy,
1048 timeout_ns_deploy)
tiernoe876f672020-02-13 14:34:48 +00001049 # deploy RO
tiernoe876f672020-02-13 14:34:48 +00001050 # get vnfds, instantiate at RO
1051 for c_vnf in nsd.get("constituent-vnfd", ()):
1052 member_vnf_index = c_vnf["member-vnf-index"]
1053 vnfd = db_vnfds_ref[c_vnf['vnfd-id-ref']]
1054 vnfd_ref = vnfd["id"]
quilesj7e13aeb2019-10-08 13:34:55 +02001055
tiernoe876f672020-02-13 14:34:48 +00001056 stage[2] = "Creating vnfd='{}' member_vnf_index='{}' at RO".format(vnfd_ref, member_vnf_index)
1057 db_nsr_update["detailed-status"] = " ".join(stage)
1058 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1059 self._write_op_status(nslcmop_id, stage)
calvinosanch9f9c6f22019-11-04 13:37:39 +01001060
tiernoe876f672020-02-13 14:34:48 +00001061 # self.logger.debug(logging_text + stage[2])
1062 vnfd_id_RO = "{}.{}.{}".format(nsr_id, RO_descriptor_number, member_vnf_index[:23])
1063 vnf_index_2_RO_id[member_vnf_index] = vnfd_id_RO
1064 RO_descriptor_number += 1
1065
1066 # look position at deployed.RO.vnfd if not present it will be appended at the end
1067 for index, vnf_deployed in enumerate(db_nsr["_admin"]["deployed"]["RO"]["vnfd"]):
1068 if vnf_deployed["member-vnf-index"] == member_vnf_index:
1069 break
1070 else:
1071 index = len(db_nsr["_admin"]["deployed"]["RO"]["vnfd"])
1072 db_nsr["_admin"]["deployed"]["RO"]["vnfd"].append(None)
1073
1074 # look if present
1075 RO_update = {"member-vnf-index": member_vnf_index}
1076 vnfd_list = await self.RO.get_list("vnfd", filter_by={"osm_id": vnfd_id_RO})
1077 if vnfd_list:
1078 RO_update["id"] = vnfd_list[0]["uuid"]
1079 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' exists at RO. Using RO_id={}".
1080 format(vnfd_ref, member_vnf_index, vnfd_list[0]["uuid"]))
1081 else:
1082 vnfd_RO = self.vnfd2RO(vnfd, vnfd_id_RO, db_vnfrs[c_vnf["member-vnf-index"]].
1083 get("additionalParamsForVnf"), nsr_id)
1084 desc = await self.RO.create("vnfd", descriptor=vnfd_RO)
1085 RO_update["id"] = desc["uuid"]
1086 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' created at RO. RO_id={}".format(
1087 vnfd_ref, member_vnf_index, desc["uuid"]))
1088 db_nsr_update["_admin.deployed.RO.vnfd.{}".format(index)] = RO_update
1089 db_nsr["_admin"]["deployed"]["RO"]["vnfd"][index] = RO_update
1090
1091 # create nsd at RO
1092 nsd_ref = nsd["id"]
1093
1094 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1095 db_nsr_update["detailed-status"] = " ".join(stage)
1096 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1097 self._write_op_status(nslcmop_id, stage)
1098
1099 # self.logger.debug(logging_text + stage[2])
1100 RO_osm_nsd_id = "{}.{}.{}".format(nsr_id, RO_descriptor_number, nsd_ref[:23])
tiernod8323042019-08-09 11:32:23 +00001101 RO_descriptor_number += 1
tiernoe876f672020-02-13 14:34:48 +00001102 nsd_list = await self.RO.get_list("nsd", filter_by={"osm_id": RO_osm_nsd_id})
1103 if nsd_list:
1104 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = nsd_list[0]["uuid"]
1105 self.logger.debug(logging_text + "nsd={} exists at RO. Using RO_id={}".format(
1106 nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +00001107 else:
tiernoe876f672020-02-13 14:34:48 +00001108 nsd_RO = deepcopy(nsd)
1109 nsd_RO["id"] = RO_osm_nsd_id
1110 nsd_RO.pop("_id", None)
1111 nsd_RO.pop("_admin", None)
1112 for c_vnf in nsd_RO.get("constituent-vnfd", ()):
1113 member_vnf_index = c_vnf["member-vnf-index"]
1114 c_vnf["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
1115 for c_vld in nsd_RO.get("vld", ()):
1116 for cp in c_vld.get("vnfd-connection-point-ref", ()):
1117 member_vnf_index = cp["member-vnf-index-ref"]
1118 cp["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
tiernod8323042019-08-09 11:32:23 +00001119
tiernoe876f672020-02-13 14:34:48 +00001120 desc = await self.RO.create("nsd", descriptor=nsd_RO)
1121 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1122 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = desc["uuid"]
1123 self.logger.debug(logging_text + "nsd={} created at RO. RO_id={}".format(nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +00001124 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1125
tiernoe876f672020-02-13 14:34:48 +00001126 # Crate ns at RO
1127 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1128 db_nsr_update["detailed-status"] = " ".join(stage)
1129 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1130 self._write_op_status(nslcmop_id, stage)
tiernod8323042019-08-09 11:32:23 +00001131
tiernoe876f672020-02-13 14:34:48 +00001132 # if present use it unless in error status
1133 RO_nsr_id = deep_get(db_nsr, ("_admin", "deployed", "RO", "nsr_id"))
1134 if RO_nsr_id:
1135 try:
1136 stage[2] = "Looking for existing ns at RO"
1137 db_nsr_update["detailed-status"] = " ".join(stage)
1138 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1139 self._write_op_status(nslcmop_id, stage)
1140 # self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1141 desc = await self.RO.show("ns", RO_nsr_id)
tiernod8323042019-08-09 11:32:23 +00001142
tiernoe876f672020-02-13 14:34:48 +00001143 except ROclient.ROClientException as e:
1144 if e.http_code != HTTPStatus.NOT_FOUND:
1145 raise
1146 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1147 if RO_nsr_id:
1148 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1149 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1150 if ns_status == "ERROR":
1151 stage[2] = "Deleting ns at RO. RO_ns_id={}".format(RO_nsr_id)
1152 self.logger.debug(logging_text + stage[2])
1153 await self.RO.delete("ns", RO_nsr_id)
1154 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1155 if not RO_nsr_id:
1156 stage[2] = "Checking dependencies"
1157 db_nsr_update["detailed-status"] = " ".join(stage)
1158 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1159 self._write_op_status(nslcmop_id, stage)
1160 # self.logger.debug(logging_text + stage[2])
tiernod8323042019-08-09 11:32:23 +00001161
tiernoe876f672020-02-13 14:34:48 +00001162 # check if VIM is creating and wait look if previous tasks in process
1163 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account", ns_params["vimAccountId"])
1164 if task_dependency:
1165 stage[2] = "Waiting for related tasks '{}' to be completed".format(task_name)
1166 self.logger.debug(logging_text + stage[2])
1167 await asyncio.wait(task_dependency, timeout=3600)
1168 if ns_params.get("vnf"):
1169 for vnf in ns_params["vnf"]:
1170 if "vimAccountId" in vnf:
1171 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account",
1172 vnf["vimAccountId"])
1173 if task_dependency:
1174 stage[2] = "Waiting for related tasks '{}' to be completed.".format(task_name)
1175 self.logger.debug(logging_text + stage[2])
1176 await asyncio.wait(task_dependency, timeout=3600)
1177
1178 stage[2] = "Checking instantiation parameters."
tiernoe95ed362020-04-23 08:24:57 +00001179 RO_ns_params = self._ns_params_2_RO(ns_params, nsd, db_vnfds_ref, db_vnfrs, n2vc_key_list)
tiernoe876f672020-02-13 14:34:48 +00001180 stage[2] = "Deploying ns at VIM."
1181 db_nsr_update["detailed-status"] = " ".join(stage)
1182 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1183 self._write_op_status(nslcmop_id, stage)
1184
1185 desc = await self.RO.create("ns", descriptor=RO_ns_params, name=db_nsr["name"], scenario=RO_nsd_uuid)
1186 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = desc["uuid"]
1187 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1188 db_nsr_update["_admin.deployed.RO.nsr_status"] = "BUILD"
1189 self.logger.debug(logging_text + "ns created at RO. RO_id={}".format(desc["uuid"]))
1190
1191 # wait until NS is ready
1192 stage[2] = "Waiting VIM to deploy ns."
1193 db_nsr_update["detailed-status"] = " ".join(stage)
1194 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1195 self._write_op_status(nslcmop_id, stage)
1196 detailed_status_old = None
1197 self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1198
1199 old_desc = None
1200 while time() <= start_deploy + timeout_ns_deploy:
tiernod8323042019-08-09 11:32:23 +00001201 desc = await self.RO.show("ns", RO_nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001202
tiernoe876f672020-02-13 14:34:48 +00001203 # deploymentStatus
1204 if desc != old_desc:
1205 # desc has changed => update db
1206 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
1207 old_desc = desc
tiernod8323042019-08-09 11:32:23 +00001208
tiernoe876f672020-02-13 14:34:48 +00001209 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1210 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1211 if ns_status == "ERROR":
1212 raise ROclient.ROClientException(ns_status_info)
1213 elif ns_status == "BUILD":
1214 stage[2] = "VIM: ({})".format(ns_status_info)
1215 elif ns_status == "ACTIVE":
1216 stage[2] = "Waiting for management IP address reported by the VIM. Updating VNFRs."
1217 try:
1218 self.ns_update_vnfr(db_vnfrs, desc)
1219 break
1220 except LcmExceptionNoMgmtIP:
1221 pass
1222 else:
1223 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
1224 if stage[2] != detailed_status_old:
1225 detailed_status_old = stage[2]
1226 db_nsr_update["detailed-status"] = " ".join(stage)
1227 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1228 self._write_op_status(nslcmop_id, stage)
1229 await asyncio.sleep(5, loop=self.loop)
1230 else: # timeout_ns_deploy
1231 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tiernod8323042019-08-09 11:32:23 +00001232
tiernoe876f672020-02-13 14:34:48 +00001233 # Updating NSR
1234 self.ns_update_nsr(db_nsr_update, db_nsr, desc)
tiernod8323042019-08-09 11:32:23 +00001235
tiernoe876f672020-02-13 14:34:48 +00001236 db_nsr_update["_admin.deployed.RO.operational-status"] = "running"
1237 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1238 stage[2] = "Deployed at VIM"
1239 db_nsr_update["detailed-status"] = " ".join(stage)
1240 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1241 self._write_op_status(nslcmop_id, stage)
1242 # await self._on_update_n2vc_db("nsrs", {"_id": nsr_id}, "_admin.deployed", db_nsr_update)
1243 # self.logger.debug(logging_text + "Deployed at VIM")
tierno69f0d382020-05-07 13:08:09 +00001244 except (ROclient.ROClientException, LcmException, DbException, NgRoException) as e:
tierno067e04a2020-03-31 12:53:13 +00001245 stage[2] = "ERROR deploying at VIM"
tiernoe876f672020-02-13 14:34:48 +00001246 self.set_vnfr_at_error(db_vnfrs, str(e))
1247 raise
quilesj7e13aeb2019-10-08 13:34:55 +02001248
tiernof24bcdd2020-09-21 14:05:39 +00001249 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1250 """
1251 Wait for kdu to be up, get ip address
1252 :param logging_text: prefix use for logging
1253 :param nsr_id:
1254 :param vnfr_id:
1255 :param kdu_name:
1256 :return: IP address
1257 """
1258
1259 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1260 nb_tries = 0
1261
1262 while nb_tries < 360:
1263 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1264 kdur = next((x for x in get_iterable(db_vnfr, "kdur") if x.get("name") == kdu_name), None)
1265 if not kdur:
1266 raise LcmException("Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name))
1267 if kdur.get("status"):
1268 if kdur["status"] in ("READY", "ENABLED"):
1269 return kdur.get("ip-address")
1270 else:
1271 raise LcmException("target KDU={} is in error state".format(kdu_name))
1272
1273 await asyncio.sleep(10, loop=self.loop)
1274 nb_tries += 1
1275 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1276
tiernoa5088192019-11-26 16:12:53 +00001277 async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None):
1278 """
1279 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1280 :param logging_text: prefix use for logging
1281 :param nsr_id:
1282 :param vnfr_id:
1283 :param vdu_id:
1284 :param vdu_index:
1285 :param pub_key: public ssh key to inject, None to skip
1286 :param user: user to apply the public ssh key
1287 :return: IP address
1288 """
quilesj7e13aeb2019-10-08 13:34:55 +02001289
tiernoa5088192019-11-26 16:12:53 +00001290 # self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
tiernod8323042019-08-09 11:32:23 +00001291 ro_nsr_id = None
1292 ip_address = None
1293 nb_tries = 0
1294 target_vdu_id = None
quilesj3149f262019-12-03 10:58:10 +00001295 ro_retries = 0
quilesj7e13aeb2019-10-08 13:34:55 +02001296
tiernod8323042019-08-09 11:32:23 +00001297 while True:
quilesj7e13aeb2019-10-08 13:34:55 +02001298
quilesj3149f262019-12-03 10:58:10 +00001299 ro_retries += 1
1300 if ro_retries >= 360: # 1 hour
1301 raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id))
1302
tiernod8323042019-08-09 11:32:23 +00001303 await asyncio.sleep(10, loop=self.loop)
quilesj7e13aeb2019-10-08 13:34:55 +02001304
1305 # get ip address
tiernod8323042019-08-09 11:32:23 +00001306 if not target_vdu_id:
1307 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
quilesj3149f262019-12-03 10:58:10 +00001308
1309 if not vdu_id: # for the VNF case
tiernoe876f672020-02-13 14:34:48 +00001310 if db_vnfr.get("status") == "ERROR":
1311 raise LcmException("Cannot inject ssh-key because target VNF is in error state")
tiernod8323042019-08-09 11:32:23 +00001312 ip_address = db_vnfr.get("ip-address")
1313 if not ip_address:
1314 continue
quilesj3149f262019-12-03 10:58:10 +00001315 vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None)
1316 else: # VDU case
1317 vdur = next((x for x in get_iterable(db_vnfr, "vdur")
1318 if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None)
1319
tierno0e8c3f02020-03-12 17:18:21 +00001320 if not vdur and len(db_vnfr.get("vdur", ())) == 1: # If only one, this should be the target vdu
1321 vdur = db_vnfr["vdur"][0]
quilesj3149f262019-12-03 10:58:10 +00001322 if not vdur:
tierno0e8c3f02020-03-12 17:18:21 +00001323 raise LcmException("Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(vnfr_id, vdu_id,
1324 vdu_index))
quilesj7e13aeb2019-10-08 13:34:55 +02001325
tierno0e8c3f02020-03-12 17:18:21 +00001326 if vdur.get("pdu-type") or vdur.get("status") == "ACTIVE":
quilesj3149f262019-12-03 10:58:10 +00001327 ip_address = vdur.get("ip-address")
1328 if not ip_address:
1329 continue
1330 target_vdu_id = vdur["vdu-id-ref"]
1331 elif vdur.get("status") == "ERROR":
1332 raise LcmException("Cannot inject ssh-key because target VM is in error state")
1333
tiernod8323042019-08-09 11:32:23 +00001334 if not target_vdu_id:
1335 continue
tiernod8323042019-08-09 11:32:23 +00001336
quilesj7e13aeb2019-10-08 13:34:55 +02001337 # inject public key into machine
1338 if pub_key and user:
tiernoe876f672020-02-13 14:34:48 +00001339 # wait until NS is deployed at RO
1340 if not ro_nsr_id:
1341 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1342 ro_nsr_id = deep_get(db_nsrs, ("_admin", "deployed", "RO", "nsr_id"))
1343 if not ro_nsr_id:
1344 continue
1345
tiernoa5088192019-11-26 16:12:53 +00001346 # self.logger.debug(logging_text + "Inserting RO key")
tierno0e8c3f02020-03-12 17:18:21 +00001347 if vdur.get("pdu-type"):
1348 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1349 return ip_address
quilesj7e13aeb2019-10-08 13:34:55 +02001350 try:
1351 ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index
tierno69f0d382020-05-07 13:08:09 +00001352 if self.ng_ro:
1353 target = {"action": "inject_ssh_key", "key": pub_key, "user": user,
1354 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdu_id}]}],
1355 }
1356 await self.RO.deploy(nsr_id, target)
1357 else:
1358 result_dict = await self.RO.create_action(
1359 item="ns",
1360 item_id_name=ro_nsr_id,
1361 descriptor={"add_public_key": pub_key, "vms": [ro_vm_id], "user": user}
1362 )
1363 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1364 if not result_dict or not isinstance(result_dict, dict):
1365 raise LcmException("Unknown response from RO when injecting key")
1366 for result in result_dict.values():
1367 if result.get("vim_result") == 200:
1368 break
1369 else:
1370 raise ROclient.ROClientException("error injecting key: {}".format(
1371 result.get("description")))
1372 break
1373 except NgRoException as e:
1374 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001375 except ROclient.ROClientException as e:
tiernoa5088192019-11-26 16:12:53 +00001376 if not nb_tries:
1377 self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds".
1378 format(e, 20*10))
quilesj7e13aeb2019-10-08 13:34:55 +02001379 nb_tries += 1
tiernoa5088192019-11-26 16:12:53 +00001380 if nb_tries >= 20:
quilesj7e13aeb2019-10-08 13:34:55 +02001381 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001382 else:
quilesj7e13aeb2019-10-08 13:34:55 +02001383 break
1384
1385 return ip_address
1386
tierno5ee02052019-12-05 19:55:02 +00001387 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1388 """
1389 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1390 """
1391 my_vca = vca_deployed_list[vca_index]
1392 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
quilesj3655ae02019-12-12 16:08:35 +00001393 # vdu or kdu: no dependencies
tierno5ee02052019-12-05 19:55:02 +00001394 return
1395 timeout = 300
1396 while timeout >= 0:
quilesj3655ae02019-12-12 16:08:35 +00001397 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1398 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1399 configuration_status_list = db_nsr["configurationStatus"]
1400 for index, vca_deployed in enumerate(configuration_status_list):
tierno5ee02052019-12-05 19:55:02 +00001401 if index == vca_index:
quilesj3655ae02019-12-12 16:08:35 +00001402 # myself
tierno5ee02052019-12-05 19:55:02 +00001403 continue
1404 if not my_vca.get("member-vnf-index") or \
1405 (vca_deployed.get("member-vnf-index") == my_vca.get("member-vnf-index")):
quilesj3655ae02019-12-12 16:08:35 +00001406 internal_status = configuration_status_list[index].get("status")
1407 if internal_status == 'READY':
1408 continue
1409 elif internal_status == 'BROKEN':
tierno5ee02052019-12-05 19:55:02 +00001410 raise LcmException("Configuration aborted because dependent charm/s has failed")
quilesj3655ae02019-12-12 16:08:35 +00001411 else:
1412 break
tierno5ee02052019-12-05 19:55:02 +00001413 else:
quilesj3655ae02019-12-12 16:08:35 +00001414 # no dependencies, return
tierno5ee02052019-12-05 19:55:02 +00001415 return
1416 await asyncio.sleep(10)
1417 timeout -= 1
tierno5ee02052019-12-05 19:55:02 +00001418
1419 raise LcmException("Configuration aborted because dependent charm/s timeout")
1420
tiernoe876f672020-02-13 14:34:48 +00001421 async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index,
tierno89f82902020-07-03 14:52:28 +00001422 config_descriptor, deploy_params, base_folder, nslcmop_id, stage, vca_type, vca_name,
1423 ee_config_descriptor):
tiernod8323042019-08-09 11:32:23 +00001424 nsr_id = db_nsr["_id"]
1425 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
tiernoda6fb102019-11-23 00:36:52 +00001426 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernod8323042019-08-09 11:32:23 +00001427 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
tierno89f82902020-07-03 14:52:28 +00001428 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
quilesj7e13aeb2019-10-08 13:34:55 +02001429 db_dict = {
1430 'collection': 'nsrs',
1431 'filter': {'_id': nsr_id},
1432 'path': db_update_entry
1433 }
tiernod8323042019-08-09 11:32:23 +00001434 step = ""
1435 try:
quilesj3655ae02019-12-12 16:08:35 +00001436
1437 element_type = 'NS'
1438 element_under_configuration = nsr_id
1439
tiernod8323042019-08-09 11:32:23 +00001440 vnfr_id = None
1441 if db_vnfr:
1442 vnfr_id = db_vnfr["_id"]
tierno89f82902020-07-03 14:52:28 +00001443 osm_config["osm"]["vnf_id"] = vnfr_id
tiernod8323042019-08-09 11:32:23 +00001444
1445 namespace = "{nsi}.{ns}".format(
1446 nsi=nsi_id if nsi_id else "",
1447 ns=nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001448
tiernod8323042019-08-09 11:32:23 +00001449 if vnfr_id:
quilesj3655ae02019-12-12 16:08:35 +00001450 element_type = 'VNF'
1451 element_under_configuration = vnfr_id
quilesjb8a35dd2020-01-09 15:10:14 +00001452 namespace += ".{}".format(vnfr_id)
tiernod8323042019-08-09 11:32:23 +00001453 if vdu_id:
1454 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
quilesj3655ae02019-12-12 16:08:35 +00001455 element_type = 'VDU'
quilesjb8a35dd2020-01-09 15:10:14 +00001456 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
tierno89f82902020-07-03 14:52:28 +00001457 osm_config["osm"]["vdu_id"] = vdu_id
tierno51183952020-04-03 15:48:18 +00001458 elif kdu_name:
1459 namespace += ".{}".format(kdu_name)
1460 element_type = 'KDU'
1461 element_under_configuration = kdu_name
tierno89f82902020-07-03 14:52:28 +00001462 osm_config["osm"]["kdu_name"] = kdu_name
tiernod8323042019-08-09 11:32:23 +00001463
1464 # Get artifact path
tierno588547c2020-07-01 15:30:20 +00001465 artifact_path = "{}/{}/{}/{}".format(
tiernod8323042019-08-09 11:32:23 +00001466 base_folder["folder"],
1467 base_folder["pkg-dir"],
tierno588547c2020-07-01 15:30:20 +00001468 "charms" if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm") else "helm-charts",
1469 vca_name
tiernod8323042019-08-09 11:32:23 +00001470 )
tierno4fa7f8e2020-07-08 15:33:55 +00001471 # get initial_config_primitive_list that applies to this element
1472 initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
1473
1474 # add config if not present for NS charm
1475 ee_descriptor_id = ee_config_descriptor.get("id")
1476 initial_config_primitive_list = self._get_initial_config_primitive_list(initial_config_primitive_list,
1477 vca_deployed, ee_descriptor_id)
tiernod8323042019-08-09 11:32:23 +00001478
tierno588547c2020-07-01 15:30:20 +00001479 # n2vc_redesign STEP 3.1
tierno588547c2020-07-01 15:30:20 +00001480 # find old ee_id if exists
1481 ee_id = vca_deployed.get("ee_id")
tiernod8323042019-08-09 11:32:23 +00001482
tierno588547c2020-07-01 15:30:20 +00001483 # create or register execution environment in VCA
1484 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm"):
quilesj7e13aeb2019-10-08 13:34:55 +02001485
tierno588547c2020-07-01 15:30:20 +00001486 self._write_configuration_status(
1487 nsr_id=nsr_id,
1488 vca_index=vca_index,
1489 status='CREATING',
1490 element_under_configuration=element_under_configuration,
1491 element_type=element_type
1492 )
tiernod8323042019-08-09 11:32:23 +00001493
tierno588547c2020-07-01 15:30:20 +00001494 step = "create execution environment"
1495 self.logger.debug(logging_text + step)
1496 ee_id, credentials = await self.vca_map[vca_type].create_execution_environment(
1497 namespace=namespace,
1498 reuse_ee_id=ee_id,
1499 db_dict=db_dict,
tierno89f82902020-07-03 14:52:28 +00001500 config=osm_config,
tierno588547c2020-07-01 15:30:20 +00001501 artifact_path=artifact_path,
1502 vca_type=vca_type)
quilesj3655ae02019-12-12 16:08:35 +00001503
tierno588547c2020-07-01 15:30:20 +00001504 elif vca_type == "native_charm":
1505 step = "Waiting to VM being up and getting IP address"
1506 self.logger.debug(logging_text + step)
1507 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1508 user=None, pub_key=None)
1509 credentials = {"hostname": rw_mgmt_ip}
1510 # get username
1511 username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1512 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1513 # merged. Meanwhile let's get username from initial-config-primitive
tierno4fa7f8e2020-07-08 15:33:55 +00001514 if not username and initial_config_primitive_list:
1515 for config_primitive in initial_config_primitive_list:
tierno588547c2020-07-01 15:30:20 +00001516 for param in config_primitive.get("parameter", ()):
1517 if param["name"] == "ssh-username":
1518 username = param["value"]
1519 break
1520 if not username:
tierno4fa7f8e2020-07-08 15:33:55 +00001521 raise LcmException("Cannot determine the username neither with 'initial-config-primitive' nor with "
tierno588547c2020-07-01 15:30:20 +00001522 "'config-access.ssh-access.default-user'")
1523 credentials["username"] = username
1524 # n2vc_redesign STEP 3.2
quilesj3655ae02019-12-12 16:08:35 +00001525
tierno588547c2020-07-01 15:30:20 +00001526 self._write_configuration_status(
1527 nsr_id=nsr_id,
1528 vca_index=vca_index,
1529 status='REGISTERING',
1530 element_under_configuration=element_under_configuration,
1531 element_type=element_type
1532 )
quilesj3655ae02019-12-12 16:08:35 +00001533
tierno588547c2020-07-01 15:30:20 +00001534 step = "register execution environment {}".format(credentials)
1535 self.logger.debug(logging_text + step)
1536 ee_id = await self.vca_map[vca_type].register_execution_environment(
1537 credentials=credentials, namespace=namespace, db_dict=db_dict)
tierno3bedc9b2019-11-27 15:46:57 +00001538
tierno588547c2020-07-01 15:30:20 +00001539 # for compatibility with MON/POL modules, the need model and application name at database
1540 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1541 ee_id_parts = ee_id.split('.')
1542 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1543 if len(ee_id_parts) >= 2:
1544 model_name = ee_id_parts[0]
1545 application_name = ee_id_parts[1]
1546 db_nsr_update[db_update_entry + "model"] = model_name
1547 db_nsr_update[db_update_entry + "application"] = application_name
tiernod8323042019-08-09 11:32:23 +00001548
1549 # n2vc_redesign STEP 3.3
tiernod8323042019-08-09 11:32:23 +00001550 step = "Install configuration Software"
quilesj3655ae02019-12-12 16:08:35 +00001551
tiernoc231a872020-01-21 08:49:05 +00001552 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001553 nsr_id=nsr_id,
1554 vca_index=vca_index,
1555 status='INSTALLING SW',
1556 element_under_configuration=element_under_configuration,
tierno51183952020-04-03 15:48:18 +00001557 element_type=element_type,
tierno588547c2020-07-01 15:30:20 +00001558 other_update=db_nsr_update
quilesj3655ae02019-12-12 16:08:35 +00001559 )
1560
tierno3bedc9b2019-11-27 15:46:57 +00001561 # TODO check if already done
quilesj7e13aeb2019-10-08 13:34:55 +02001562 self.logger.debug(logging_text + step)
David Garcia18a63322020-04-01 16:14:59 +02001563 config = None
tierno588547c2020-07-01 15:30:20 +00001564 if vca_type == "native_charm":
tierno4fa7f8e2020-07-08 15:33:55 +00001565 config_primitive = next((p for p in initial_config_primitive_list if p["name"] == "config"), None)
1566 if config_primitive:
1567 config = self._map_primitive_params(
1568 config_primitive,
1569 {},
1570 deploy_params
1571 )
tierno588547c2020-07-01 15:30:20 +00001572 num_units = 1
1573 if vca_type == "lxc_proxy_charm":
1574 if element_type == "NS":
1575 num_units = db_nsr.get("config-units") or 1
1576 elif element_type == "VNF":
1577 num_units = db_vnfr.get("config-units") or 1
1578 elif element_type == "VDU":
1579 for v in db_vnfr["vdur"]:
1580 if vdu_id == v["vdu-id-ref"]:
1581 num_units = v.get("config-units") or 1
1582 break
David Garcia06a11f22020-03-25 18:21:37 +01001583
tierno588547c2020-07-01 15:30:20 +00001584 await self.vca_map[vca_type].install_configuration_sw(
1585 ee_id=ee_id,
1586 artifact_path=artifact_path,
1587 db_dict=db_dict,
1588 config=config,
1589 num_units=num_units,
1590 vca_type=vca_type
1591 )
quilesj7e13aeb2019-10-08 13:34:55 +02001592
quilesj63f90042020-01-17 09:53:55 +00001593 # write in db flag of configuration_sw already installed
1594 self.update_db_2("nsrs", nsr_id, {db_update_entry + "config_sw_installed": True})
1595
1596 # add relations for this VCA (wait for other peers related with this VCA)
tierno588547c2020-07-01 15:30:20 +00001597 await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id,
1598 vca_index=vca_index, vca_type=vca_type)
quilesj63f90042020-01-17 09:53:55 +00001599
quilesj7e13aeb2019-10-08 13:34:55 +02001600 # if SSH access is required, then get execution environment SSH public
David Garciaf36326c2020-07-10 13:12:44 +02001601 # if native charm we have waited already to VM be UP
1602 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm"):
tierno3bedc9b2019-11-27 15:46:57 +00001603 pub_key = None
1604 user = None
tierno588547c2020-07-01 15:30:20 +00001605 # self.logger.debug("get ssh key block")
tierno3bedc9b2019-11-27 15:46:57 +00001606 if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
tierno588547c2020-07-01 15:30:20 +00001607 # self.logger.debug("ssh key needed")
tierno3bedc9b2019-11-27 15:46:57 +00001608 # Needed to inject a ssh key
1609 user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1610 step = "Install configuration Software, getting public ssh key"
tierno588547c2020-07-01 15:30:20 +00001611 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02001612
tiernoacc90452019-12-10 11:06:54 +00001613 step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
tierno3bedc9b2019-11-27 15:46:57 +00001614 else:
tierno588547c2020-07-01 15:30:20 +00001615 # self.logger.debug("no need to get ssh key")
tierno3bedc9b2019-11-27 15:46:57 +00001616 step = "Waiting to VM being up and getting IP address"
1617 self.logger.debug(logging_text + step)
quilesj7e13aeb2019-10-08 13:34:55 +02001618
tierno3bedc9b2019-11-27 15:46:57 +00001619 # n2vc_redesign STEP 5.1
1620 # wait for RO (ip-address) Insert pub_key into VM
tierno5ee02052019-12-05 19:55:02 +00001621 if vnfr_id:
tiernof24bcdd2020-09-21 14:05:39 +00001622 if kdu_name:
1623 rw_mgmt_ip = await self.wait_kdu_up(logging_text, nsr_id, vnfr_id, kdu_name)
1624 else:
1625 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id,
1626 vdu_index, user=user, pub_key=pub_key)
tierno5ee02052019-12-05 19:55:02 +00001627 else:
1628 rw_mgmt_ip = None # This is for a NS configuration
tierno3bedc9b2019-11-27 15:46:57 +00001629
1630 self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
quilesj7e13aeb2019-10-08 13:34:55 +02001631
tiernoa5088192019-11-26 16:12:53 +00001632 # store rw_mgmt_ip in deploy params for later replacement
quilesj7e13aeb2019-10-08 13:34:55 +02001633 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
tiernod8323042019-08-09 11:32:23 +00001634
1635 # n2vc_redesign STEP 6 Execute initial config primitive
quilesj7e13aeb2019-10-08 13:34:55 +02001636 step = 'execute initial config primitive'
quilesj3655ae02019-12-12 16:08:35 +00001637
1638 # wait for dependent primitives execution (NS -> VNF -> VDU)
tierno5ee02052019-12-05 19:55:02 +00001639 if initial_config_primitive_list:
1640 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
quilesj3655ae02019-12-12 16:08:35 +00001641
1642 # stage, in function of element type: vdu, kdu, vnf or ns
1643 my_vca = vca_deployed_list[vca_index]
1644 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1645 # VDU or KDU
tiernoe876f672020-02-13 14:34:48 +00001646 stage[0] = 'Stage 3/5: running Day-1 primitives for VDU.'
quilesj3655ae02019-12-12 16:08:35 +00001647 elif my_vca.get("member-vnf-index"):
1648 # VNF
tiernoe876f672020-02-13 14:34:48 +00001649 stage[0] = 'Stage 4/5: running Day-1 primitives for VNF.'
quilesj3655ae02019-12-12 16:08:35 +00001650 else:
1651 # NS
tiernoe876f672020-02-13 14:34:48 +00001652 stage[0] = 'Stage 5/5: running Day-1 primitives for NS.'
quilesj3655ae02019-12-12 16:08:35 +00001653
tiernoc231a872020-01-21 08:49:05 +00001654 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001655 nsr_id=nsr_id,
1656 vca_index=vca_index,
1657 status='EXECUTING PRIMITIVE'
1658 )
1659
1660 self._write_op_status(
1661 op_id=nslcmop_id,
1662 stage=stage
1663 )
1664
tiernoe876f672020-02-13 14:34:48 +00001665 check_if_terminated_needed = True
tiernod8323042019-08-09 11:32:23 +00001666 for initial_config_primitive in initial_config_primitive_list:
tiernoda6fb102019-11-23 00:36:52 +00001667 # adding information on the vca_deployed if it is a NS execution environment
1668 if not vca_deployed["member-vnf-index"]:
David Garciad4816682019-12-09 14:57:43 +01001669 deploy_params["ns_config_info"] = json.dumps(self._get_ns_config_info(nsr_id))
tiernod8323042019-08-09 11:32:23 +00001670 # TODO check if already done
1671 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
tierno3bedc9b2019-11-27 15:46:57 +00001672
tiernod8323042019-08-09 11:32:23 +00001673 step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
1674 self.logger.debug(logging_text + step)
tierno588547c2020-07-01 15:30:20 +00001675 await self.vca_map[vca_type].exec_primitive(
quilesj7e13aeb2019-10-08 13:34:55 +02001676 ee_id=ee_id,
1677 primitive_name=initial_config_primitive["name"],
1678 params_dict=primitive_params_,
1679 db_dict=db_dict
1680 )
tiernoe876f672020-02-13 14:34:48 +00001681 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1682 if check_if_terminated_needed:
1683 if config_descriptor.get('terminate-config-primitive'):
1684 self.update_db_2("nsrs", nsr_id, {db_update_entry + "needed_terminate": True})
1685 check_if_terminated_needed = False
quilesj3655ae02019-12-12 16:08:35 +00001686
tiernod8323042019-08-09 11:32:23 +00001687 # TODO register in database that primitive is done
quilesj7e13aeb2019-10-08 13:34:55 +02001688
tierno89f82902020-07-03 14:52:28 +00001689 # STEP 7 Configure metrics
1690 if vca_type == "helm":
1691 prometheus_jobs = await self.add_prometheus_metrics(
1692 ee_id=ee_id,
1693 artifact_path=artifact_path,
1694 ee_config_descriptor=ee_config_descriptor,
1695 vnfr_id=vnfr_id,
1696 nsr_id=nsr_id,
1697 target_ip=rw_mgmt_ip,
1698 )
1699 if prometheus_jobs:
1700 self.update_db_2("nsrs", nsr_id, {db_update_entry + "prometheus_jobs": prometheus_jobs})
1701
quilesj7e13aeb2019-10-08 13:34:55 +02001702 step = "instantiated at VCA"
1703 self.logger.debug(logging_text + step)
1704
tiernoc231a872020-01-21 08:49:05 +00001705 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001706 nsr_id=nsr_id,
1707 vca_index=vca_index,
1708 status='READY'
1709 )
1710
tiernod8323042019-08-09 11:32:23 +00001711 except Exception as e: # TODO not use Exception but N2VC exception
quilesj3655ae02019-12-12 16:08:35 +00001712 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
tiernoe876f672020-02-13 14:34:48 +00001713 if not isinstance(e, (DbException, N2VCException, LcmException, asyncio.CancelledError)):
1714 self.logger.error("Exception while {} : {}".format(step, e), exc_info=True)
tiernoc231a872020-01-21 08:49:05 +00001715 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001716 nsr_id=nsr_id,
1717 vca_index=vca_index,
1718 status='BROKEN'
1719 )
tiernoe876f672020-02-13 14:34:48 +00001720 raise LcmException("{} {}".format(step, e)) from e
tiernod8323042019-08-09 11:32:23 +00001721
quilesj4cda56b2019-12-05 10:02:20 +00001722 def _write_ns_status(self, nsr_id: str, ns_state: str, current_operation: str, current_operation_id: str,
tiernoa2143262020-03-27 16:20:40 +00001723 error_description: str = None, error_detail: str = None, other_update: dict = None):
tiernoe876f672020-02-13 14:34:48 +00001724 """
1725 Update db_nsr fields.
1726 :param nsr_id:
1727 :param ns_state:
1728 :param current_operation:
1729 :param current_operation_id:
1730 :param error_description:
tiernoa2143262020-03-27 16:20:40 +00001731 :param error_detail:
tiernoe876f672020-02-13 14:34:48 +00001732 :param other_update: Other required changes at database if provided, will be cleared
1733 :return:
1734 """
quilesj4cda56b2019-12-05 10:02:20 +00001735 try:
tiernoe876f672020-02-13 14:34:48 +00001736 db_dict = other_update or {}
1737 db_dict["_admin.nslcmop"] = current_operation_id # for backward compatibility
1738 db_dict["_admin.current-operation"] = current_operation_id
1739 db_dict["_admin.operation-type"] = current_operation if current_operation != "IDLE" else None
quilesj4cda56b2019-12-05 10:02:20 +00001740 db_dict["currentOperation"] = current_operation
1741 db_dict["currentOperationID"] = current_operation_id
1742 db_dict["errorDescription"] = error_description
tiernoa2143262020-03-27 16:20:40 +00001743 db_dict["errorDetail"] = error_detail
tiernoe876f672020-02-13 14:34:48 +00001744
1745 if ns_state:
1746 db_dict["nsState"] = ns_state
quilesj4cda56b2019-12-05 10:02:20 +00001747 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001748 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001749 self.logger.warn('Error writing NS status, ns={}: {}'.format(nsr_id, e))
1750
tiernoe876f672020-02-13 14:34:48 +00001751 def _write_op_status(self, op_id: str, stage: list = None, error_message: str = None, queuePosition: int = 0,
1752 operation_state: str = None, other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001753 try:
tiernoe876f672020-02-13 14:34:48 +00001754 db_dict = other_update or {}
quilesj3655ae02019-12-12 16:08:35 +00001755 db_dict['queuePosition'] = queuePosition
tiernoe876f672020-02-13 14:34:48 +00001756 if isinstance(stage, list):
1757 db_dict['stage'] = stage[0]
1758 db_dict['detailed-status'] = " ".join(stage)
1759 elif stage is not None:
1760 db_dict['stage'] = str(stage)
1761
1762 if error_message is not None:
quilesj3655ae02019-12-12 16:08:35 +00001763 db_dict['errorMessage'] = error_message
tiernoe876f672020-02-13 14:34:48 +00001764 if operation_state is not None:
1765 db_dict['operationState'] = operation_state
1766 db_dict["statusEnteredTime"] = time()
quilesj3655ae02019-12-12 16:08:35 +00001767 self.update_db_2("nslcmops", op_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001768 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001769 self.logger.warn('Error writing OPERATION status for op_id: {} -> {}'.format(op_id, e))
1770
tierno51183952020-04-03 15:48:18 +00001771 def _write_all_config_status(self, db_nsr: dict, status: str):
quilesj3655ae02019-12-12 16:08:35 +00001772 try:
tierno51183952020-04-03 15:48:18 +00001773 nsr_id = db_nsr["_id"]
quilesj3655ae02019-12-12 16:08:35 +00001774 # configurationStatus
1775 config_status = db_nsr.get('configurationStatus')
1776 if config_status:
tierno51183952020-04-03 15:48:18 +00001777 db_nsr_update = {"configurationStatus.{}.status".format(index): status for index, v in
1778 enumerate(config_status) if v}
quilesj3655ae02019-12-12 16:08:35 +00001779 # update status
tierno51183952020-04-03 15:48:18 +00001780 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00001781
tiernoe876f672020-02-13 14:34:48 +00001782 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001783 self.logger.warn('Error writing all configuration status, ns={}: {}'.format(nsr_id, e))
1784
quilesj63f90042020-01-17 09:53:55 +00001785 def _write_configuration_status(self, nsr_id: str, vca_index: int, status: str = None,
tierno51183952020-04-03 15:48:18 +00001786 element_under_configuration: str = None, element_type: str = None,
1787 other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001788
1789 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
1790 # .format(vca_index, status))
1791
1792 try:
1793 db_path = 'configurationStatus.{}.'.format(vca_index)
tierno51183952020-04-03 15:48:18 +00001794 db_dict = other_update or {}
quilesj63f90042020-01-17 09:53:55 +00001795 if status:
1796 db_dict[db_path + 'status'] = status
quilesj3655ae02019-12-12 16:08:35 +00001797 if element_under_configuration:
1798 db_dict[db_path + 'elementUnderConfiguration'] = element_under_configuration
1799 if element_type:
1800 db_dict[db_path + 'elementType'] = element_type
1801 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001802 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001803 self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}'
1804 .format(status, nsr_id, vca_index, e))
quilesj4cda56b2019-12-05 10:02:20 +00001805
tierno38089af2020-04-16 07:56:58 +00001806 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
1807 """
1808 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
1809 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
1810 Database is used because the result can be obtained from a different LCM worker in case of HA.
1811 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
1812 :param db_nslcmop: database content of nslcmop
1813 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
tierno8790a3d2020-04-23 22:49:52 +00001814 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
1815 computed 'vim-account-id'
tierno38089af2020-04-16 07:56:58 +00001816 """
tierno8790a3d2020-04-23 22:49:52 +00001817 modified = False
tierno38089af2020-04-16 07:56:58 +00001818 nslcmop_id = db_nslcmop['_id']
magnussonle9198bb2020-01-21 13:00:51 +01001819 placement_engine = deep_get(db_nslcmop, ('operationParams', 'placement-engine'))
1820 if placement_engine == "PLA":
tierno38089af2020-04-16 07:56:58 +00001821 self.logger.debug(logging_text + "Invoke and wait for placement optimization")
1822 await self.msg.aiowrite("pla", "get_placement", {'nslcmopId': nslcmop_id}, loop=self.loop)
magnussonle9198bb2020-01-21 13:00:51 +01001823 db_poll_interval = 5
tierno38089af2020-04-16 07:56:58 +00001824 wait = db_poll_interval * 10
magnussonle9198bb2020-01-21 13:00:51 +01001825 pla_result = None
1826 while not pla_result and wait >= 0:
1827 await asyncio.sleep(db_poll_interval)
1828 wait -= db_poll_interval
tierno38089af2020-04-16 07:56:58 +00001829 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
magnussonle9198bb2020-01-21 13:00:51 +01001830 pla_result = deep_get(db_nslcmop, ('_admin', 'pla'))
1831
1832 if not pla_result:
tierno38089af2020-04-16 07:56:58 +00001833 raise LcmException("Placement timeout for nslcmopId={}".format(nslcmop_id))
magnussonle9198bb2020-01-21 13:00:51 +01001834
1835 for pla_vnf in pla_result['vnf']:
1836 vnfr = db_vnfrs.get(pla_vnf['member-vnf-index'])
1837 if not pla_vnf.get('vimAccountId') or not vnfr:
1838 continue
tierno8790a3d2020-04-23 22:49:52 +00001839 modified = True
magnussonle9198bb2020-01-21 13:00:51 +01001840 self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, {"vim-account-id": pla_vnf['vimAccountId']})
tierno38089af2020-04-16 07:56:58 +00001841 # Modifies db_vnfrs
1842 vnfr["vim-account-id"] = pla_vnf['vimAccountId']
tierno8790a3d2020-04-23 22:49:52 +00001843 return modified
magnussonle9198bb2020-01-21 13:00:51 +01001844
1845 def update_nsrs_with_pla_result(self, params):
1846 try:
1847 nslcmop_id = deep_get(params, ('placement', 'nslcmopId'))
1848 self.update_db_2("nslcmops", nslcmop_id, {"_admin.pla": params.get('placement')})
1849 except Exception as e:
1850 self.logger.warn('Update failed for nslcmop_id={}:{}'.format(nslcmop_id, e))
1851
tierno59d22d22018-09-25 18:10:19 +02001852 async def instantiate(self, nsr_id, nslcmop_id):
quilesj7e13aeb2019-10-08 13:34:55 +02001853 """
1854
1855 :param nsr_id: ns instance to deploy
1856 :param nslcmop_id: operation to run
1857 :return:
1858 """
kuused124bfe2019-06-18 12:09:24 +02001859
1860 # Try to lock HA task here
1861 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
1862 if not task_is_locked_by_me:
quilesj3655ae02019-12-12 16:08:35 +00001863 self.logger.debug('instantiate() task is not locked by me, ns={}'.format(nsr_id))
kuused124bfe2019-06-18 12:09:24 +02001864 return
1865
tierno59d22d22018-09-25 18:10:19 +02001866 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
1867 self.logger.debug(logging_text + "Enter")
quilesj7e13aeb2019-10-08 13:34:55 +02001868
tierno59d22d22018-09-25 18:10:19 +02001869 # get all needed from database
quilesj7e13aeb2019-10-08 13:34:55 +02001870
1871 # database nsrs record
tierno59d22d22018-09-25 18:10:19 +02001872 db_nsr = None
quilesj7e13aeb2019-10-08 13:34:55 +02001873
1874 # database nslcmops record
tierno59d22d22018-09-25 18:10:19 +02001875 db_nslcmop = None
quilesj7e13aeb2019-10-08 13:34:55 +02001876
1877 # update operation on nsrs
tiernoe876f672020-02-13 14:34:48 +00001878 db_nsr_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02001879 # update operation on nslcmops
tierno59d22d22018-09-25 18:10:19 +02001880 db_nslcmop_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02001881
tierno59d22d22018-09-25 18:10:19 +02001882 nslcmop_operation_state = None
quilesj7e13aeb2019-10-08 13:34:55 +02001883 db_vnfrs = {} # vnf's info indexed by member-index
1884 # n2vc_info = {}
tiernoe876f672020-02-13 14:34:48 +00001885 tasks_dict_info = {} # from task to info text
tierno59d22d22018-09-25 18:10:19 +02001886 exc = None
tiernoe876f672020-02-13 14:34:48 +00001887 error_list = []
1888 stage = ['Stage 1/5: preparation of the environment.', "Waiting for previous operations to terminate.", ""]
1889 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02001890 try:
kuused124bfe2019-06-18 12:09:24 +02001891 # wait for any previous tasks in process
1892 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
1893
tierno50b41432020-08-11 11:20:13 +00001894 stage[1] = "Sync filesystem from database."
tiernob3edda02020-07-09 13:51:20 +00001895 self.fs.sync() # TODO, make use of partial sync, only for the needed packages
1896
quilesj7e13aeb2019-10-08 13:34:55 +02001897 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
tierno50b41432020-08-11 11:20:13 +00001898 stage[1] = "Reading from database."
quilesj4cda56b2019-12-05 10:02:20 +00001899 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
tiernoe876f672020-02-13 14:34:48 +00001900 db_nsr_update["detailed-status"] = "creating"
1901 db_nsr_update["operational-status"] = "init"
quilesj4cda56b2019-12-05 10:02:20 +00001902 self._write_ns_status(
1903 nsr_id=nsr_id,
1904 ns_state="BUILDING",
1905 current_operation="INSTANTIATING",
tiernoe876f672020-02-13 14:34:48 +00001906 current_operation_id=nslcmop_id,
1907 other_update=db_nsr_update
1908 )
1909 self._write_op_status(
1910 op_id=nslcmop_id,
1911 stage=stage,
1912 queuePosition=0
quilesj4cda56b2019-12-05 10:02:20 +00001913 )
1914
quilesj7e13aeb2019-10-08 13:34:55 +02001915 # read from db: operation
tierno50b41432020-08-11 11:20:13 +00001916 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
tierno59d22d22018-09-25 18:10:19 +02001917 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
tierno744303e2020-01-13 16:46:31 +00001918 ns_params = db_nslcmop.get("operationParams")
1919 if ns_params and ns_params.get("timeout_ns_deploy"):
1920 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1921 else:
1922 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +02001923
1924 # read from db: ns
tierno50b41432020-08-11 11:20:13 +00001925 stage[1] = "Getting nsr={} from db.".format(nsr_id)
tierno59d22d22018-09-25 18:10:19 +02001926 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tierno50b41432020-08-11 11:20:13 +00001927 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
tiernod732fb82020-05-21 13:18:23 +00001928 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
1929 db_nsr["nsd"] = nsd
tiernod8323042019-08-09 11:32:23 +00001930 # nsr_name = db_nsr["name"] # TODO short-name??
tierno47e86b52018-10-10 14:05:55 +02001931
quilesj7e13aeb2019-10-08 13:34:55 +02001932 # read from db: vnf's of this ns
tierno50b41432020-08-11 11:20:13 +00001933 stage[1] = "Getting vnfrs from db."
tiernoe876f672020-02-13 14:34:48 +00001934 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02001935 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
tierno27246d82018-09-27 15:59:09 +02001936
quilesj7e13aeb2019-10-08 13:34:55 +02001937 # read from db: vnfd's for every vnf
1938 db_vnfds_ref = {} # every vnfd data indexed by vnf name
1939 db_vnfds = {} # every vnfd data indexed by vnf id
1940 db_vnfds_index = {} # every vnfd data indexed by vnf member-index
1941
1942 # for each vnf in ns, read vnfd
tierno27246d82018-09-27 15:59:09 +02001943 for vnfr in db_vnfrs_list:
quilesj7e13aeb2019-10-08 13:34:55 +02001944 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr # vnf's dict indexed by member-index: '1', '2', etc
1945 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
1946 vnfd_ref = vnfr["vnfd-ref"] # vnfd name for this vnf
lloretgalleg32ead8c2020-07-22 10:13:46 +00001947
quilesj7e13aeb2019-10-08 13:34:55 +02001948 # if we haven't this vnfd, read it from db
tierno27246d82018-09-27 15:59:09 +02001949 if vnfd_id not in db_vnfds:
quilesj63f90042020-01-17 09:53:55 +00001950 # read from db
tierno50b41432020-08-11 11:20:13 +00001951 stage[1] = "Getting vnfd={} id='{}' from db.".format(vnfd_id, vnfd_ref)
tiernoe876f672020-02-13 14:34:48 +00001952 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02001953 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
tierno27246d82018-09-27 15:59:09 +02001954
quilesj7e13aeb2019-10-08 13:34:55 +02001955 # store vnfd
1956 db_vnfds_ref[vnfd_ref] = vnfd # vnfd's indexed by name
1957 db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id
1958 db_vnfds_index[vnfr["member-vnf-index-ref"]] = db_vnfds[vnfd_id] # vnfd's indexed by member-index
1959
1960 # Get or generates the _admin.deployed.VCA list
tiernoe4f7e6c2018-11-27 14:55:30 +00001961 vca_deployed_list = None
1962 if db_nsr["_admin"].get("deployed"):
1963 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
1964 if vca_deployed_list is None:
1965 vca_deployed_list = []
quilesj3655ae02019-12-12 16:08:35 +00001966 configuration_status_list = []
tiernoe4f7e6c2018-11-27 14:55:30 +00001967 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
quilesj3655ae02019-12-12 16:08:35 +00001968 db_nsr_update["configurationStatus"] = configuration_status_list
quilesj7e13aeb2019-10-08 13:34:55 +02001969 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00001970 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00001971 elif isinstance(vca_deployed_list, dict):
1972 # maintain backward compatibility. Change a dict to list at database
1973 vca_deployed_list = list(vca_deployed_list.values())
1974 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00001975 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00001976
tierno6cf25f52019-09-12 09:33:40 +00001977 if not isinstance(deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list):
tiernoa009e552019-01-30 16:45:44 +00001978 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
1979 db_nsr_update["_admin.deployed.RO.vnfd"] = []
tierno59d22d22018-09-25 18:10:19 +02001980
tiernobaa51102018-12-14 13:16:18 +00001981 # set state to INSTANTIATED. When instantiated NBI will not delete directly
1982 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1983 self.update_db_2("nsrs", nsr_id, db_nsr_update)
lloretgalleg32ead8c2020-07-22 10:13:46 +00001984 self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"})
quilesj3655ae02019-12-12 16:08:35 +00001985
1986 # n2vc_redesign STEP 2 Deploy Network Scenario
tiernoe876f672020-02-13 14:34:48 +00001987 stage[0] = 'Stage 2/5: deployment of KDUs, VMs and execution environments.'
quilesj3655ae02019-12-12 16:08:35 +00001988 self._write_op_status(
1989 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00001990 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00001991 )
1992
tierno50b41432020-08-11 11:20:13 +00001993 stage[1] = "Deploying KDUs."
tiernoe876f672020-02-13 14:34:48 +00001994 # self.logger.debug(logging_text + "Before deploy_kdus")
calvinosanch9f9c6f22019-11-04 13:37:39 +01001995 # Call to deploy_kdus in case exists the "vdu:kdu" param
tiernoe876f672020-02-13 14:34:48 +00001996 await self.deploy_kdus(
1997 logging_text=logging_text,
1998 nsr_id=nsr_id,
1999 nslcmop_id=nslcmop_id,
2000 db_vnfrs=db_vnfrs,
2001 db_vnfds=db_vnfds,
2002 task_instantiation_info=tasks_dict_info,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002003 )
tiernoe876f672020-02-13 14:34:48 +00002004
2005 stage[1] = "Getting VCA public key."
tiernod8323042019-08-09 11:32:23 +00002006 # n2vc_redesign STEP 1 Get VCA public ssh-key
2007 # feature 1429. Add n2vc public key to needed VMs
tierno3bedc9b2019-11-27 15:46:57 +00002008 n2vc_key = self.n2vc.get_public_key()
tiernoa5088192019-11-26 16:12:53 +00002009 n2vc_key_list = [n2vc_key]
2010 if self.vca_config.get("public_key"):
2011 n2vc_key_list.append(self.vca_config["public_key"])
tierno98ad6ea2019-05-30 17:16:28 +00002012
tiernoe876f672020-02-13 14:34:48 +00002013 stage[1] = "Deploying NS at VIM."
tiernod8323042019-08-09 11:32:23 +00002014 task_ro = asyncio.ensure_future(
quilesj7e13aeb2019-10-08 13:34:55 +02002015 self.instantiate_RO(
2016 logging_text=logging_text,
2017 nsr_id=nsr_id,
2018 nsd=nsd,
2019 db_nsr=db_nsr,
2020 db_nslcmop=db_nslcmop,
2021 db_vnfrs=db_vnfrs,
2022 db_vnfds_ref=db_vnfds_ref,
tiernoe876f672020-02-13 14:34:48 +00002023 n2vc_key_list=n2vc_key_list,
2024 stage=stage
tierno98ad6ea2019-05-30 17:16:28 +00002025 )
tiernod8323042019-08-09 11:32:23 +00002026 )
2027 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
tiernoa2143262020-03-27 16:20:40 +00002028 tasks_dict_info[task_ro] = "Deploying at VIM"
tierno98ad6ea2019-05-30 17:16:28 +00002029
tiernod8323042019-08-09 11:32:23 +00002030 # n2vc_redesign STEP 3 to 6 Deploy N2VC
tiernoe876f672020-02-13 14:34:48 +00002031 stage[1] = "Deploying Execution Environments."
2032 self.logger.debug(logging_text + stage[1])
tierno98ad6ea2019-05-30 17:16:28 +00002033
tiernod8323042019-08-09 11:32:23 +00002034 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
quilesj7e13aeb2019-10-08 13:34:55 +02002035 # get_iterable() returns a value from a dict or empty tuple if key does not exist
tierno98ad6ea2019-05-30 17:16:28 +00002036 for c_vnf in get_iterable(nsd, "constituent-vnfd"):
2037 vnfd_id = c_vnf["vnfd-id-ref"]
tierno98ad6ea2019-05-30 17:16:28 +00002038 vnfd = db_vnfds_ref[vnfd_id]
tiernod8323042019-08-09 11:32:23 +00002039 member_vnf_index = str(c_vnf["member-vnf-index"])
2040 db_vnfr = db_vnfrs[member_vnf_index]
2041 base_folder = vnfd["_admin"]["storage"]
2042 vdu_id = None
2043 vdu_index = 0
tierno98ad6ea2019-05-30 17:16:28 +00002044 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002045 kdu_name = None
tierno59d22d22018-09-25 18:10:19 +02002046
tierno8a518872018-12-21 13:42:14 +00002047 # Get additional parameters
tiernod8323042019-08-09 11:32:23 +00002048 deploy_params = {}
2049 if db_vnfr.get("additionalParamsForVnf"):
tierno626e0152019-11-29 14:16:16 +00002050 deploy_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"].copy())
tierno8a518872018-12-21 13:42:14 +00002051
tiernod8323042019-08-09 11:32:23 +00002052 descriptor_config = vnfd.get("vnf-configuration")
tierno588547c2020-07-01 15:30:20 +00002053 if descriptor_config:
quilesj7e13aeb2019-10-08 13:34:55 +02002054 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00002055 logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
quilesj7e13aeb2019-10-08 13:34:55 +02002056 db_nsr=db_nsr,
2057 db_vnfr=db_vnfr,
2058 nslcmop_id=nslcmop_id,
2059 nsr_id=nsr_id,
2060 nsi_id=nsi_id,
2061 vnfd_id=vnfd_id,
2062 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002063 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002064 member_vnf_index=member_vnf_index,
2065 vdu_index=vdu_index,
2066 vdu_name=vdu_name,
2067 deploy_params=deploy_params,
2068 descriptor_config=descriptor_config,
2069 base_folder=base_folder,
tiernoe876f672020-02-13 14:34:48 +00002070 task_instantiation_info=tasks_dict_info,
2071 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002072 )
tierno59d22d22018-09-25 18:10:19 +02002073
2074 # Deploy charms for each VDU that supports one.
tiernod8323042019-08-09 11:32:23 +00002075 for vdud in get_iterable(vnfd, 'vdu'):
2076 vdu_id = vdud["id"]
2077 descriptor_config = vdud.get('vdu-configuration')
tierno626e0152019-11-29 14:16:16 +00002078 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
2079 if vdur.get("additionalParams"):
2080 deploy_params_vdu = self._format_additional_params(vdur["additionalParams"])
2081 else:
2082 deploy_params_vdu = deploy_params
tierno588547c2020-07-01 15:30:20 +00002083 if descriptor_config:
tiernod8323042019-08-09 11:32:23 +00002084 # look for vdu index in the db_vnfr["vdu"] section
2085 # for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
2086 # if vdur["vdu-id-ref"] == vdu_id:
2087 # break
2088 # else:
2089 # raise LcmException("Mismatch vdu_id={} not found in the vnfr['vdur'] list for "
2090 # "member_vnf_index={}".format(vdu_id, member_vnf_index))
2091 # vdu_name = vdur.get("name")
2092 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002093 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002094 for vdu_index in range(int(vdud.get("count", 1))):
2095 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
quilesj7e13aeb2019-10-08 13:34:55 +02002096 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00002097 logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2098 member_vnf_index, vdu_id, vdu_index),
quilesj7e13aeb2019-10-08 13:34:55 +02002099 db_nsr=db_nsr,
2100 db_vnfr=db_vnfr,
2101 nslcmop_id=nslcmop_id,
2102 nsr_id=nsr_id,
2103 nsi_id=nsi_id,
2104 vnfd_id=vnfd_id,
2105 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002106 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002107 member_vnf_index=member_vnf_index,
2108 vdu_index=vdu_index,
2109 vdu_name=vdu_name,
tierno626e0152019-11-29 14:16:16 +00002110 deploy_params=deploy_params_vdu,
quilesj7e13aeb2019-10-08 13:34:55 +02002111 descriptor_config=descriptor_config,
2112 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002113 task_instantiation_info=tasks_dict_info,
2114 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002115 )
calvinosanch9f9c6f22019-11-04 13:37:39 +01002116 for kdud in get_iterable(vnfd, 'kdu'):
2117 kdu_name = kdud["name"]
2118 descriptor_config = kdud.get('kdu-configuration')
tierno588547c2020-07-01 15:30:20 +00002119 if descriptor_config:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002120 vdu_id = None
2121 vdu_index = 0
2122 vdu_name = None
2123 # look for vdu index in the db_vnfr["vdu"] section
2124 # for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
2125 # if vdur["vdu-id-ref"] == vdu_id:
2126 # break
2127 # else:
2128 # raise LcmException("Mismatch vdu_id={} not found in the vnfr['vdur'] list for "
2129 # "member_vnf_index={}".format(vdu_id, member_vnf_index))
2130 # vdu_name = vdur.get("name")
2131 # vdu_name = None
tierno59d22d22018-09-25 18:10:19 +02002132
calvinosanch9f9c6f22019-11-04 13:37:39 +01002133 self._deploy_n2vc(
2134 logging_text=logging_text,
2135 db_nsr=db_nsr,
2136 db_vnfr=db_vnfr,
2137 nslcmop_id=nslcmop_id,
2138 nsr_id=nsr_id,
2139 nsi_id=nsi_id,
2140 vnfd_id=vnfd_id,
2141 vdu_id=vdu_id,
2142 kdu_name=kdu_name,
2143 member_vnf_index=member_vnf_index,
2144 vdu_index=vdu_index,
2145 vdu_name=vdu_name,
2146 deploy_params=deploy_params,
2147 descriptor_config=descriptor_config,
2148 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002149 task_instantiation_info=tasks_dict_info,
2150 stage=stage
calvinosanch9f9c6f22019-11-04 13:37:39 +01002151 )
tierno59d22d22018-09-25 18:10:19 +02002152
tierno1b633412019-02-25 16:48:23 +00002153 # Check if this NS has a charm configuration
tiernod8323042019-08-09 11:32:23 +00002154 descriptor_config = nsd.get("ns-configuration")
2155 if descriptor_config and descriptor_config.get("juju"):
2156 vnfd_id = None
2157 db_vnfr = None
2158 member_vnf_index = None
2159 vdu_id = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002160 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002161 vdu_index = 0
2162 vdu_name = None
tierno1b633412019-02-25 16:48:23 +00002163
tiernod8323042019-08-09 11:32:23 +00002164 # Get additional parameters
2165 deploy_params = {}
2166 if db_nsr.get("additionalParamsForNs"):
tierno626e0152019-11-29 14:16:16 +00002167 deploy_params = self._format_additional_params(db_nsr["additionalParamsForNs"].copy())
tiernod8323042019-08-09 11:32:23 +00002168 base_folder = nsd["_admin"]["storage"]
quilesj7e13aeb2019-10-08 13:34:55 +02002169 self._deploy_n2vc(
2170 logging_text=logging_text,
2171 db_nsr=db_nsr,
2172 db_vnfr=db_vnfr,
2173 nslcmop_id=nslcmop_id,
2174 nsr_id=nsr_id,
2175 nsi_id=nsi_id,
2176 vnfd_id=vnfd_id,
2177 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002178 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002179 member_vnf_index=member_vnf_index,
2180 vdu_index=vdu_index,
2181 vdu_name=vdu_name,
2182 deploy_params=deploy_params,
2183 descriptor_config=descriptor_config,
2184 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002185 task_instantiation_info=tasks_dict_info,
2186 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002187 )
tierno1b633412019-02-25 16:48:23 +00002188
tiernoe876f672020-02-13 14:34:48 +00002189 # rest of staff will be done at finally
tierno1b633412019-02-25 16:48:23 +00002190
tiernoe876f672020-02-13 14:34:48 +00002191 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
2192 self.logger.error(logging_text + "Exit Exception while '{}': {}".format(stage[1], e))
tierno59d22d22018-09-25 18:10:19 +02002193 exc = e
2194 except asyncio.CancelledError:
tiernoe876f672020-02-13 14:34:48 +00002195 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
tierno59d22d22018-09-25 18:10:19 +02002196 exc = "Operation was cancelled"
2197 except Exception as e:
2198 exc = traceback.format_exc()
tiernoe876f672020-02-13 14:34:48 +00002199 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
tierno59d22d22018-09-25 18:10:19 +02002200 finally:
2201 if exc:
tiernoe876f672020-02-13 14:34:48 +00002202 error_list.append(str(exc))
tiernobaa51102018-12-14 13:16:18 +00002203 try:
tiernoe876f672020-02-13 14:34:48 +00002204 # wait for pending tasks
2205 if tasks_dict_info:
2206 stage[1] = "Waiting for instantiate pending tasks."
2207 self.logger.debug(logging_text + stage[1])
2208 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_deploy,
2209 stage, nslcmop_id, nsr_id=nsr_id)
2210 stage[1] = stage[2] = ""
2211 except asyncio.CancelledError:
2212 error_list.append("Cancelled")
2213 # TODO cancel all tasks
2214 except Exception as exc:
2215 error_list.append(str(exc))
quilesj4cda56b2019-12-05 10:02:20 +00002216
tiernoe876f672020-02-13 14:34:48 +00002217 # update operation-status
2218 db_nsr_update["operational-status"] = "running"
2219 # let's begin with VCA 'configured' status (later we can change it)
2220 db_nsr_update["config-status"] = "configured"
2221 for task, task_name in tasks_dict_info.items():
2222 if not task.done() or task.cancelled() or task.exception():
2223 if task_name.startswith(self.task_name_deploy_vca):
2224 # A N2VC task is pending
2225 db_nsr_update["config-status"] = "failed"
quilesj4cda56b2019-12-05 10:02:20 +00002226 else:
tiernoe876f672020-02-13 14:34:48 +00002227 # RO or KDU task is pending
2228 db_nsr_update["operational-status"] = "failed"
quilesj3655ae02019-12-12 16:08:35 +00002229
tiernoe876f672020-02-13 14:34:48 +00002230 # update status at database
2231 if error_list:
tiernoa2143262020-03-27 16:20:40 +00002232 error_detail = ". ".join(error_list)
tiernoe876f672020-02-13 14:34:48 +00002233 self.logger.error(logging_text + error_detail)
tierno50b41432020-08-11 11:20:13 +00002234 error_description_nslcmop = '{} Detail: {}'.format(stage[0], error_detail)
2235 error_description_nsr = 'Operation: INSTANTIATING.{}, {}'.format(nslcmop_id, stage[0])
quilesj3655ae02019-12-12 16:08:35 +00002236
tiernoa2143262020-03-27 16:20:40 +00002237 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00002238 db_nslcmop_update["detailed-status"] = error_detail
2239 nslcmop_operation_state = "FAILED"
2240 ns_state = "BROKEN"
2241 else:
tiernoa2143262020-03-27 16:20:40 +00002242 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00002243 error_description_nsr = error_description_nslcmop = None
2244 ns_state = "READY"
2245 db_nsr_update["detailed-status"] = "Done"
2246 db_nslcmop_update["detailed-status"] = "Done"
2247 nslcmop_operation_state = "COMPLETED"
quilesj4cda56b2019-12-05 10:02:20 +00002248
tiernoe876f672020-02-13 14:34:48 +00002249 if db_nsr:
2250 self._write_ns_status(
2251 nsr_id=nsr_id,
2252 ns_state=ns_state,
2253 current_operation="IDLE",
2254 current_operation_id=None,
2255 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00002256 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00002257 other_update=db_nsr_update
2258 )
tiernoa17d4f42020-04-28 09:59:23 +00002259 self._write_op_status(
2260 op_id=nslcmop_id,
2261 stage="",
2262 error_message=error_description_nslcmop,
2263 operation_state=nslcmop_operation_state,
2264 other_update=db_nslcmop_update,
2265 )
quilesj3655ae02019-12-12 16:08:35 +00002266
tierno59d22d22018-09-25 18:10:19 +02002267 if nslcmop_operation_state:
2268 try:
2269 await self.msg.aiowrite("ns", "instantiated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00002270 "operationState": nslcmop_operation_state},
2271 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02002272 except Exception as e:
2273 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
2274
2275 self.logger.debug(logging_text + "Exit")
2276 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2277
tierno588547c2020-07-01 15:30:20 +00002278 async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int,
2279 timeout: int = 3600, vca_type: str = None) -> bool:
quilesj63f90042020-01-17 09:53:55 +00002280
2281 # steps:
2282 # 1. find all relations for this VCA
2283 # 2. wait for other peers related
2284 # 3. add relations
2285
2286 try:
tierno588547c2020-07-01 15:30:20 +00002287 vca_type = vca_type or "lxc_proxy_charm"
quilesj63f90042020-01-17 09:53:55 +00002288
2289 # STEP 1: find all relations for this VCA
2290
2291 # read nsr record
2292 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
David Garcia171f3542020-05-21 16:41:07 +02002293 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
quilesj63f90042020-01-17 09:53:55 +00002294
2295 # this VCA data
2296 my_vca = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))[vca_index]
2297
2298 # read all ns-configuration relations
2299 ns_relations = list()
David Garcia171f3542020-05-21 16:41:07 +02002300 db_ns_relations = deep_get(nsd, ('ns-configuration', 'relation'))
quilesj63f90042020-01-17 09:53:55 +00002301 if db_ns_relations:
2302 for r in db_ns_relations:
2303 # check if this VCA is in the relation
2304 if my_vca.get('member-vnf-index') in\
2305 (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2306 ns_relations.append(r)
2307
2308 # read all vnf-configuration relations
2309 vnf_relations = list()
2310 db_vnfd_list = db_nsr.get('vnfd-id')
2311 if db_vnfd_list:
2312 for vnfd in db_vnfd_list:
2313 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2314 db_vnf_relations = deep_get(db_vnfd, ('vnf-configuration', 'relation'))
2315 if db_vnf_relations:
2316 for r in db_vnf_relations:
2317 # check if this VCA is in the relation
2318 if my_vca.get('vdu_id') in (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2319 vnf_relations.append(r)
2320
2321 # if no relations, terminate
2322 if not ns_relations and not vnf_relations:
2323 self.logger.debug(logging_text + ' No relations')
2324 return True
2325
2326 self.logger.debug(logging_text + ' adding relations\n {}\n {}'.format(ns_relations, vnf_relations))
2327
2328 # add all relations
2329 start = time()
2330 while True:
2331 # check timeout
2332 now = time()
2333 if now - start >= timeout:
2334 self.logger.error(logging_text + ' : timeout adding relations')
2335 return False
2336
2337 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2338 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2339
2340 # for each defined NS relation, find the VCA's related
2341 for r in ns_relations:
2342 from_vca_ee_id = None
2343 to_vca_ee_id = None
2344 from_vca_endpoint = None
2345 to_vca_endpoint = None
2346 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2347 for vca in vca_list:
2348 if vca.get('member-vnf-index') == r.get('entities')[0].get('id') \
2349 and vca.get('config_sw_installed'):
2350 from_vca_ee_id = vca.get('ee_id')
2351 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2352 if vca.get('member-vnf-index') == r.get('entities')[1].get('id') \
2353 and vca.get('config_sw_installed'):
2354 to_vca_ee_id = vca.get('ee_id')
2355 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2356 if from_vca_ee_id and to_vca_ee_id:
2357 # add relation
tierno588547c2020-07-01 15:30:20 +00002358 await self.vca_map[vca_type].add_relation(
quilesj63f90042020-01-17 09:53:55 +00002359 ee_id_1=from_vca_ee_id,
2360 ee_id_2=to_vca_ee_id,
2361 endpoint_1=from_vca_endpoint,
2362 endpoint_2=to_vca_endpoint)
2363 # remove entry from relations list
2364 ns_relations.remove(r)
2365 else:
2366 # check failed peers
2367 try:
2368 vca_status_list = db_nsr.get('configurationStatus')
2369 if vca_status_list:
2370 for i in range(len(vca_list)):
2371 vca = vca_list[i]
2372 vca_status = vca_status_list[i]
2373 if vca.get('member-vnf-index') == r.get('entities')[0].get('id'):
2374 if vca_status.get('status') == 'BROKEN':
2375 # peer broken: remove relation from list
2376 ns_relations.remove(r)
2377 if vca.get('member-vnf-index') == r.get('entities')[1].get('id'):
2378 if vca_status.get('status') == 'BROKEN':
2379 # peer broken: remove relation from list
2380 ns_relations.remove(r)
2381 except Exception:
2382 # ignore
2383 pass
2384
2385 # for each defined VNF relation, find the VCA's related
2386 for r in vnf_relations:
2387 from_vca_ee_id = None
2388 to_vca_ee_id = None
2389 from_vca_endpoint = None
2390 to_vca_endpoint = None
2391 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2392 for vca in vca_list:
David Garciab3972b92020-09-09 15:40:44 +02002393 key_to_check = "vdu_id"
2394 if vca.get("vdu_id") is None:
2395 key_to_check = "vnfd_id"
2396 if vca.get(key_to_check) == r.get('entities')[0].get('id') and vca.get('config_sw_installed'):
quilesj63f90042020-01-17 09:53:55 +00002397 from_vca_ee_id = vca.get('ee_id')
2398 from_vca_endpoint = r.get('entities')[0].get('endpoint')
David Garciab3972b92020-09-09 15:40:44 +02002399 if vca.get(key_to_check) == r.get('entities')[1].get('id') and vca.get('config_sw_installed'):
quilesj63f90042020-01-17 09:53:55 +00002400 to_vca_ee_id = vca.get('ee_id')
2401 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2402 if from_vca_ee_id and to_vca_ee_id:
2403 # add relation
tierno588547c2020-07-01 15:30:20 +00002404 await self.vca_map[vca_type].add_relation(
quilesj63f90042020-01-17 09:53:55 +00002405 ee_id_1=from_vca_ee_id,
2406 ee_id_2=to_vca_ee_id,
2407 endpoint_1=from_vca_endpoint,
2408 endpoint_2=to_vca_endpoint)
2409 # remove entry from relations list
2410 vnf_relations.remove(r)
2411 else:
2412 # check failed peers
2413 try:
2414 vca_status_list = db_nsr.get('configurationStatus')
2415 if vca_status_list:
2416 for i in range(len(vca_list)):
2417 vca = vca_list[i]
2418 vca_status = vca_status_list[i]
2419 if vca.get('vdu_id') == r.get('entities')[0].get('id'):
2420 if vca_status.get('status') == 'BROKEN':
2421 # peer broken: remove relation from list
2422 ns_relations.remove(r)
2423 if vca.get('vdu_id') == r.get('entities')[1].get('id'):
2424 if vca_status.get('status') == 'BROKEN':
2425 # peer broken: remove relation from list
2426 ns_relations.remove(r)
2427 except Exception:
2428 # ignore
2429 pass
2430
2431 # wait for next try
2432 await asyncio.sleep(5.0)
2433
2434 if not ns_relations and not vnf_relations:
2435 self.logger.debug('Relations added')
2436 break
2437
2438 return True
2439
2440 except Exception as e:
2441 self.logger.warn(logging_text + ' ERROR adding relations: {}'.format(e))
2442 return False
2443
tiernof24bcdd2020-09-21 14:05:39 +00002444 async def _install_kdu(self, nsr_id: str, nsr_db_path: str, vnfr_data: dict, kdu_index: int, kdud: dict,
lloretgalleg80ad9212020-07-08 07:53:22 +00002445 vnfd: dict, k8s_instance_info: dict, k8params: dict = None, timeout: int = 600):
2446
tiernob9018152020-04-16 14:18:24 +00002447 try:
lloretgalleg80ad9212020-07-08 07:53:22 +00002448 k8sclustertype = k8s_instance_info["k8scluster-type"]
2449 # Instantiate kdu
2450 db_dict_install = {"collection": "nsrs",
2451 "filter": {"_id": nsr_id},
2452 "path": nsr_db_path}
2453
2454 kdu_instance = await self.k8scluster_map[k8sclustertype].install(
2455 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2456 kdu_model=k8s_instance_info["kdu-model"],
2457 atomic=True,
2458 params=k8params,
2459 db_dict=db_dict_install,
2460 timeout=timeout,
2461 kdu_name=k8s_instance_info["kdu-name"],
2462 namespace=k8s_instance_info["namespace"])
2463 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance})
2464
2465 # Obtain services to obtain management service ip
2466 services = await self.k8scluster_map[k8sclustertype].get_services(
2467 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2468 kdu_instance=kdu_instance,
2469 namespace=k8s_instance_info["namespace"])
2470
2471 # Obtain management service info (if exists)
tiernof24bcdd2020-09-21 14:05:39 +00002472 vnfr_update_dict = {}
lloretgalleg80ad9212020-07-08 07:53:22 +00002473 if services:
tiernof24bcdd2020-09-21 14:05:39 +00002474 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
lloretgalleg80ad9212020-07-08 07:53:22 +00002475 mgmt_services = [service for service in kdud.get("service", []) if service.get("mgmt-service")]
2476 for mgmt_service in mgmt_services:
2477 for service in services:
2478 if service["name"].startswith(mgmt_service["name"]):
2479 # Mgmt service found, Obtain service ip
2480 ip = service.get("external_ip", service.get("cluster_ip"))
2481 if isinstance(ip, list) and len(ip) == 1:
2482 ip = ip[0]
2483
2484 vnfr_update_dict["kdur.{}.ip-address".format(kdu_index)] = ip
2485
2486 # Check if must update also mgmt ip at the vnf
2487 service_external_cp = mgmt_service.get("external-connection-point-ref")
2488 if service_external_cp:
2489 if deep_get(vnfd, ("mgmt-interface", "cp")) == service_external_cp:
2490 vnfr_update_dict["ip-address"] = ip
2491
2492 break
2493 else:
2494 self.logger.warn("Mgmt service name: {} not found".format(mgmt_service["name"]))
2495
tiernof24bcdd2020-09-21 14:05:39 +00002496 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
2497 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
lloretgalleg80ad9212020-07-08 07:53:22 +00002498
Dominik Fleischmanndd27fd22020-08-19 12:17:51 +02002499 kdu_config = kdud.get("kdu-configuration")
2500 if kdu_config and kdu_config.get("initial-config-primitive") and kdu_config.get("juju") is None:
2501 initial_config_primitive_list = kdu_config.get("initial-config-primitive")
2502 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
2503
2504 for initial_config_primitive in initial_config_primitive_list:
2505 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, {})
2506
2507 await asyncio.wait_for(
2508 self.k8scluster_map[k8sclustertype].exec_primitive(
2509 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2510 kdu_instance=kdu_instance,
2511 primitive_name=initial_config_primitive["name"],
2512 params=primitive_params_, db_dict={}),
2513 timeout=timeout)
2514
tiernob9018152020-04-16 14:18:24 +00002515 except Exception as e:
lloretgalleg80ad9212020-07-08 07:53:22 +00002516 # Prepare update db with error and raise exception
tiernob9018152020-04-16 14:18:24 +00002517 try:
lloretgalleg80ad9212020-07-08 07:53:22 +00002518 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)})
tiernof24bcdd2020-09-21 14:05:39 +00002519 self.update_db_2("vnfrs", vnfr_data.get("_id"), {"kdur.{}.status".format(kdu_index): "ERROR"})
tiernob9018152020-04-16 14:18:24 +00002520 except Exception:
lloretgalleg80ad9212020-07-08 07:53:22 +00002521 # ignore to keep original exception
tiernob9018152020-04-16 14:18:24 +00002522 pass
lloretgalleg80ad9212020-07-08 07:53:22 +00002523 # reraise original error
2524 raise
2525
2526 return kdu_instance
tiernob9018152020-04-16 14:18:24 +00002527
tiernoe876f672020-02-13 14:34:48 +00002528 async def deploy_kdus(self, logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_instantiation_info):
calvinosanch9f9c6f22019-11-04 13:37:39 +01002529 # Launch kdus if present in the descriptor
tierno626e0152019-11-29 14:16:16 +00002530
2531 k8scluster_id_2_uuic = {"helm-chart": {}, "juju-bundle": {}}
2532
tierno430008e2020-07-20 09:05:51 +00002533 async def _get_cluster_id(cluster_id, cluster_type):
tierno626e0152019-11-29 14:16:16 +00002534 nonlocal k8scluster_id_2_uuic
2535 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
2536 return k8scluster_id_2_uuic[cluster_type][cluster_id]
2537
tierno430008e2020-07-20 09:05:51 +00002538 # check if K8scluster is creating and wait look if previous tasks in process
2539 task_name, task_dependency = self.lcm_tasks.lookfor_related("k8scluster", cluster_id)
2540 if task_dependency:
2541 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(task_name, cluster_id)
2542 self.logger.debug(logging_text + text)
2543 await asyncio.wait(task_dependency, timeout=3600)
2544
tierno626e0152019-11-29 14:16:16 +00002545 db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False)
2546 if not db_k8scluster:
2547 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
tierno430008e2020-07-20 09:05:51 +00002548
tierno626e0152019-11-29 14:16:16 +00002549 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
2550 if not k8s_id:
tierno923e16c2020-07-14 10:46:57 +00002551 raise LcmException("K8s cluster '{}' has not been initialized for '{}'".format(cluster_id,
2552 cluster_type))
tierno626e0152019-11-29 14:16:16 +00002553 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
2554 return k8s_id
2555
2556 logging_text += "Deploy kdus: "
tiernoe876f672020-02-13 14:34:48 +00002557 step = ""
calvinosanch9f9c6f22019-11-04 13:37:39 +01002558 try:
tierno626e0152019-11-29 14:16:16 +00002559 db_nsr_update = {"_admin.deployed.K8s": []}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002560 self.update_db_2("nsrs", nsr_id, db_nsr_update)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002561
tierno626e0152019-11-29 14:16:16 +00002562 index = 0
tiernoe876f672020-02-13 14:34:48 +00002563 updated_cluster_list = []
2564
tierno626e0152019-11-29 14:16:16 +00002565 for vnfr_data in db_vnfrs.values():
lloretgalleg80ad9212020-07-08 07:53:22 +00002566 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
2567 # Step 0: Prepare and set parameters
tierno626e0152019-11-29 14:16:16 +00002568 desc_params = self._format_additional_params(kdur.get("additionalParams"))
quilesjacde94f2020-01-23 10:07:08 +00002569 vnfd_id = vnfr_data.get('vnfd-id')
lloretgalleg80ad9212020-07-08 07:53:22 +00002570 kdud = next(kdud for kdud in db_vnfds[vnfd_id]["kdu"] if kdud["name"] == kdur["kdu-name"])
tiernode1584f2020-04-07 09:07:33 +00002571 namespace = kdur.get("k8s-namespace")
tierno626e0152019-11-29 14:16:16 +00002572 if kdur.get("helm-chart"):
2573 kdumodel = kdur["helm-chart"]
tiernoe876f672020-02-13 14:34:48 +00002574 k8sclustertype = "helm-chart"
tierno626e0152019-11-29 14:16:16 +00002575 elif kdur.get("juju-bundle"):
2576 kdumodel = kdur["juju-bundle"]
tiernoe876f672020-02-13 14:34:48 +00002577 k8sclustertype = "juju-bundle"
tierno626e0152019-11-29 14:16:16 +00002578 else:
tiernoe876f672020-02-13 14:34:48 +00002579 raise LcmException("kdu type for kdu='{}.{}' is neither helm-chart nor "
2580 "juju-bundle. Maybe an old NBI version is running".
2581 format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]))
quilesjacde94f2020-01-23 10:07:08 +00002582 # check if kdumodel is a file and exists
2583 try:
tierno51183952020-04-03 15:48:18 +00002584 storage = deep_get(db_vnfds.get(vnfd_id), ('_admin', 'storage'))
2585 if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts
2586 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
Dominik Fleischmann010c0e72020-05-18 15:19:11 +02002587 filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["pkg-dir"], k8sclustertype,
tierno51183952020-04-03 15:48:18 +00002588 kdumodel)
2589 if self.fs.file_exists(filename, mode='file') or self.fs.file_exists(filename, mode='dir'):
2590 kdumodel = self.fs.path + filename
2591 except (asyncio.TimeoutError, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00002592 raise
2593 except Exception: # it is not a file
quilesjacde94f2020-01-23 10:07:08 +00002594 pass
lloretgallegedc5f332020-02-20 11:50:50 +01002595
tiernoe876f672020-02-13 14:34:48 +00002596 k8s_cluster_id = kdur["k8s-cluster"]["id"]
2597 step = "Synchronize repos for k8s cluster '{}'".format(k8s_cluster_id)
tierno430008e2020-07-20 09:05:51 +00002598 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
lloretgallegedc5f332020-02-20 11:50:50 +01002599
lloretgalleg80ad9212020-07-08 07:53:22 +00002600 # Synchronize repos
tiernoe876f672020-02-13 14:34:48 +00002601 if k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list:
2602 del_repo_list, added_repo_dict = await asyncio.ensure_future(
2603 self.k8sclusterhelm.synchronize_repos(cluster_uuid=cluster_uuid))
2604 if del_repo_list or added_repo_dict:
2605 unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
2606 updated = {'_admin.helm_charts_added.' +
2607 item: name for item, name in added_repo_dict.items()}
2608 self.logger.debug(logging_text + "repos synchronized on k8s cluster '{}' to_delete: {}, "
2609 "to_add: {}".format(k8s_cluster_id, del_repo_list,
2610 added_repo_dict))
2611 self.db.set_one("k8sclusters", {"_id": k8s_cluster_id}, updated, unset=unset)
2612 updated_cluster_list.append(cluster_uuid)
lloretgallegedc5f332020-02-20 11:50:50 +01002613
lloretgalleg80ad9212020-07-08 07:53:22 +00002614 # Instantiate kdu
tiernoe876f672020-02-13 14:34:48 +00002615 step = "Instantiating KDU {}.{} in k8s cluster {}".format(vnfr_data["member-vnf-index-ref"],
2616 kdur["kdu-name"], k8s_cluster_id)
lloretgalleg80ad9212020-07-08 07:53:22 +00002617 k8s_instance_info = {"kdu-instance": None,
2618 "k8scluster-uuid": cluster_uuid,
2619 "k8scluster-type": k8sclustertype,
2620 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
2621 "kdu-name": kdur["kdu-name"],
2622 "kdu-model": kdumodel,
2623 "namespace": namespace}
tiernob9018152020-04-16 14:18:24 +00002624 db_path = "_admin.deployed.K8s.{}".format(index)
lloretgalleg80ad9212020-07-08 07:53:22 +00002625 db_nsr_update[db_path] = k8s_instance_info
tierno626e0152019-11-29 14:16:16 +00002626 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tierno626e0152019-11-29 14:16:16 +00002627
tiernoa2143262020-03-27 16:20:40 +00002628 task = asyncio.ensure_future(
tiernof24bcdd2020-09-21 14:05:39 +00002629 self._install_kdu(nsr_id, db_path, vnfr_data, kdu_index, kdud, db_vnfds[vnfd_id],
lloretgalleg80ad9212020-07-08 07:53:22 +00002630 k8s_instance_info, k8params=desc_params, timeout=600))
tiernoe876f672020-02-13 14:34:48 +00002631 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task)
tiernoa2143262020-03-27 16:20:40 +00002632 task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"])
tiernoe876f672020-02-13 14:34:48 +00002633
tierno626e0152019-11-29 14:16:16 +00002634 index += 1
quilesjdd799ac2020-01-23 16:31:11 +00002635
tiernoe876f672020-02-13 14:34:48 +00002636 except (LcmException, asyncio.CancelledError):
2637 raise
calvinosanch9f9c6f22019-11-04 13:37:39 +01002638 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00002639 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
2640 if isinstance(e, (N2VCException, DbException)):
2641 self.logger.error(logging_text + msg)
2642 else:
2643 self.logger.critical(logging_text + msg, exc_info=True)
quilesjdd799ac2020-01-23 16:31:11 +00002644 raise LcmException(msg)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002645 finally:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002646 if db_nsr_update:
2647 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoda6fb102019-11-23 00:36:52 +00002648
quilesj7e13aeb2019-10-08 13:34:55 +02002649 def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002650 kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config,
tiernoe876f672020-02-13 14:34:48 +00002651 base_folder, task_instantiation_info, stage):
quilesj7e13aeb2019-10-08 13:34:55 +02002652 # launch instantiate_N2VC in a asyncio task and register task object
2653 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
2654 # if not found, create one entry and update database
quilesj7e13aeb2019-10-08 13:34:55 +02002655 # fill db_nsr._admin.deployed.VCA.<index>
tierno588547c2020-07-01 15:30:20 +00002656
2657 self.logger.debug(logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id))
2658 if descriptor_config.get("juju"): # There is one execution envioronment of type juju
2659 ee_list = [descriptor_config]
2660 elif descriptor_config.get("execution-environment-list"):
2661 ee_list = descriptor_config.get("execution-environment-list")
2662 else: # other types as script are not supported
2663 ee_list = []
2664
2665 for ee_item in ee_list:
2666 self.logger.debug(logging_text + "_deploy_n2vc ee_item juju={}, helm={}".format(ee_item.get('juju'),
2667 ee_item.get("helm-chart")))
tierno4fa7f8e2020-07-08 15:33:55 +00002668 ee_descriptor_id = ee_item.get("id")
tierno588547c2020-07-01 15:30:20 +00002669 if ee_item.get("juju"):
2670 vca_name = ee_item['juju'].get('charm')
2671 vca_type = "lxc_proxy_charm" if ee_item['juju'].get('charm') is not None else "native_charm"
2672 if ee_item['juju'].get('cloud') == "k8s":
2673 vca_type = "k8s_proxy_charm"
2674 elif ee_item['juju'].get('proxy') is False:
2675 vca_type = "native_charm"
2676 elif ee_item.get("helm-chart"):
2677 vca_name = ee_item['helm-chart']
2678 vca_type = "helm"
2679 else:
2680 self.logger.debug(logging_text + "skipping non juju neither charm configuration")
quilesj7e13aeb2019-10-08 13:34:55 +02002681 continue
quilesj3655ae02019-12-12 16:08:35 +00002682
tierno588547c2020-07-01 15:30:20 +00002683 vca_index = -1
2684 for vca_index, vca_deployed in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
2685 if not vca_deployed:
2686 continue
2687 if vca_deployed.get("member-vnf-index") == member_vnf_index and \
2688 vca_deployed.get("vdu_id") == vdu_id and \
2689 vca_deployed.get("kdu_name") == kdu_name and \
tierno4fa7f8e2020-07-08 15:33:55 +00002690 vca_deployed.get("vdu_count_index", 0) == vdu_index and \
2691 vca_deployed.get("ee_descriptor_id") == ee_descriptor_id:
tierno588547c2020-07-01 15:30:20 +00002692 break
2693 else:
2694 # not found, create one.
tierno4fa7f8e2020-07-08 15:33:55 +00002695 target = "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
2696 if vdu_id:
2697 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
2698 elif kdu_name:
2699 target += "/kdu/{}".format(kdu_name)
tierno588547c2020-07-01 15:30:20 +00002700 vca_deployed = {
tierno4fa7f8e2020-07-08 15:33:55 +00002701 "target_element": target,
2702 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
tierno588547c2020-07-01 15:30:20 +00002703 "member-vnf-index": member_vnf_index,
2704 "vdu_id": vdu_id,
2705 "kdu_name": kdu_name,
2706 "vdu_count_index": vdu_index,
2707 "operational-status": "init", # TODO revise
2708 "detailed-status": "", # TODO revise
2709 "step": "initial-deploy", # TODO revise
2710 "vnfd_id": vnfd_id,
2711 "vdu_name": vdu_name,
tierno4fa7f8e2020-07-08 15:33:55 +00002712 "type": vca_type,
2713 "ee_descriptor_id": ee_descriptor_id
tierno588547c2020-07-01 15:30:20 +00002714 }
2715 vca_index += 1
quilesj3655ae02019-12-12 16:08:35 +00002716
tierno588547c2020-07-01 15:30:20 +00002717 # create VCA and configurationStatus in db
2718 db_dict = {
2719 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
2720 "configurationStatus.{}".format(vca_index): dict()
2721 }
2722 self.update_db_2("nsrs", nsr_id, db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02002723
tierno588547c2020-07-01 15:30:20 +00002724 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
2725
2726 # Launch task
2727 task_n2vc = asyncio.ensure_future(
2728 self.instantiate_N2VC(
2729 logging_text=logging_text,
2730 vca_index=vca_index,
2731 nsi_id=nsi_id,
2732 db_nsr=db_nsr,
2733 db_vnfr=db_vnfr,
2734 vdu_id=vdu_id,
2735 kdu_name=kdu_name,
2736 vdu_index=vdu_index,
2737 deploy_params=deploy_params,
2738 config_descriptor=descriptor_config,
2739 base_folder=base_folder,
2740 nslcmop_id=nslcmop_id,
2741 stage=stage,
2742 vca_type=vca_type,
tierno89f82902020-07-03 14:52:28 +00002743 vca_name=vca_name,
2744 ee_config_descriptor=ee_item
tierno588547c2020-07-01 15:30:20 +00002745 )
quilesj7e13aeb2019-10-08 13:34:55 +02002746 )
tierno588547c2020-07-01 15:30:20 +00002747 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_N2VC-{}".format(vca_index), task_n2vc)
2748 task_instantiation_info[task_n2vc] = self.task_name_deploy_vca + " {}.{}".format(
2749 member_vnf_index or "", vdu_id or "")
tiernobaa51102018-12-14 13:16:18 +00002750
tiernoc9556972019-07-05 15:25:25 +00002751 @staticmethod
tierno4fa7f8e2020-07-08 15:33:55 +00002752 def _get_terminate_config_primitive(primitive_list, vca_deployed):
2753 """ Get a sorted terminate config primitive list. In case ee_descriptor_id is present at vca_deployed,
2754 it get only those primitives for this execution envirom"""
2755
2756 primitive_list = primitive_list or []
2757 # filter primitives by ee_descriptor_id
2758 ee_descriptor_id = vca_deployed.get("ee_descriptor_id")
2759 primitive_list = [p for p in primitive_list if p.get("execution-environment-ref") == ee_descriptor_id]
2760
2761 if primitive_list:
2762 primitive_list.sort(key=lambda val: int(val['seq']))
2763
2764 return primitive_list
kuuse0ca67472019-05-13 15:59:27 +02002765
2766 @staticmethod
2767 def _create_nslcmop(nsr_id, operation, params):
2768 """
2769 Creates a ns-lcm-opp content to be stored at database.
2770 :param nsr_id: internal id of the instance
2771 :param operation: instantiate, terminate, scale, action, ...
2772 :param params: user parameters for the operation
2773 :return: dictionary following SOL005 format
2774 """
2775 # Raise exception if invalid arguments
2776 if not (nsr_id and operation and params):
2777 raise LcmException(
2778 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided")
2779 now = time()
2780 _id = str(uuid4())
2781 nslcmop = {
2782 "id": _id,
2783 "_id": _id,
2784 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
2785 "operationState": "PROCESSING",
2786 "statusEnteredTime": now,
2787 "nsInstanceId": nsr_id,
2788 "lcmOperationType": operation,
2789 "startTime": now,
2790 "isAutomaticInvocation": False,
2791 "operationParams": params,
2792 "isCancelPending": False,
2793 "links": {
2794 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
2795 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
2796 }
2797 }
2798 return nslcmop
2799
calvinosanch9f9c6f22019-11-04 13:37:39 +01002800 def _format_additional_params(self, params):
tierno626e0152019-11-29 14:16:16 +00002801 params = params or {}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002802 for key, value in params.items():
2803 if str(value).startswith("!!yaml "):
2804 params[key] = yaml.safe_load(value[7:])
calvinosanch9f9c6f22019-11-04 13:37:39 +01002805 return params
2806
kuuse8b998e42019-07-30 15:22:16 +02002807 def _get_terminate_primitive_params(self, seq, vnf_index):
2808 primitive = seq.get('name')
2809 primitive_params = {}
2810 params = {
2811 "member_vnf_index": vnf_index,
2812 "primitive": primitive,
2813 "primitive_params": primitive_params,
2814 }
2815 desc_params = {}
2816 return self._map_primitive_params(seq, params, desc_params)
2817
kuuseac3a8882019-10-03 10:48:06 +02002818 # sub-operations
2819
tierno51183952020-04-03 15:48:18 +00002820 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
2821 op = deep_get(db_nslcmop, ('_admin', 'operations'), [])[op_index]
2822 if op.get('operationState') == 'COMPLETED':
kuuseac3a8882019-10-03 10:48:06 +02002823 # b. Skip sub-operation
2824 # _ns_execute_primitive() or RO.create_action() will NOT be executed
2825 return self.SUBOPERATION_STATUS_SKIP
2826 else:
tierno7c4e24c2020-05-13 08:41:35 +00002827 # c. retry executing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02002828 # The sub-operation exists, and operationState != 'COMPLETED'
tierno7c4e24c2020-05-13 08:41:35 +00002829 # Update operationState = 'PROCESSING' to indicate a retry.
kuuseac3a8882019-10-03 10:48:06 +02002830 operationState = 'PROCESSING'
2831 detailed_status = 'In progress'
2832 self._update_suboperation_status(
2833 db_nslcmop, op_index, operationState, detailed_status)
2834 # Return the sub-operation index
2835 # _ns_execute_primitive() or RO.create_action() will be called from scale()
2836 # with arguments extracted from the sub-operation
2837 return op_index
2838
2839 # Find a sub-operation where all keys in a matching dictionary must match
2840 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
2841 def _find_suboperation(self, db_nslcmop, match):
tierno7c4e24c2020-05-13 08:41:35 +00002842 if db_nslcmop and match:
kuuseac3a8882019-10-03 10:48:06 +02002843 op_list = db_nslcmop.get('_admin', {}).get('operations', [])
2844 for i, op in enumerate(op_list):
2845 if all(op.get(k) == match[k] for k in match):
2846 return i
2847 return self.SUBOPERATION_STATUS_NOT_FOUND
2848
2849 # Update status for a sub-operation given its index
2850 def _update_suboperation_status(self, db_nslcmop, op_index, operationState, detailed_status):
2851 # Update DB for HA tasks
2852 q_filter = {'_id': db_nslcmop['_id']}
2853 update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState,
2854 '_admin.operations.{}.detailed-status'.format(op_index): detailed_status}
2855 self.db.set_one("nslcmops",
2856 q_filter=q_filter,
2857 update_dict=update_dict,
2858 fail_on_empty=False)
2859
2860 # Add sub-operation, return the index of the added sub-operation
2861 # Optionally, set operationState, detailed-status, and operationType
2862 # Status and type are currently set for 'scale' sub-operations:
2863 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
2864 # 'detailed-status' : status message
2865 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
2866 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
quilesj7e13aeb2019-10-08 13:34:55 +02002867 def _add_suboperation(self, db_nslcmop, vnf_index, vdu_id, vdu_count_index, vdu_name, primitive,
2868 mapped_primitive_params, operationState=None, detailed_status=None, operationType=None,
kuuseac3a8882019-10-03 10:48:06 +02002869 RO_nsr_id=None, RO_scaling_info=None):
tiernoe876f672020-02-13 14:34:48 +00002870 if not db_nslcmop:
kuuseac3a8882019-10-03 10:48:06 +02002871 return self.SUBOPERATION_STATUS_NOT_FOUND
2872 # Get the "_admin.operations" list, if it exists
2873 db_nslcmop_admin = db_nslcmop.get('_admin', {})
2874 op_list = db_nslcmop_admin.get('operations')
2875 # Create or append to the "_admin.operations" list
kuuse8b998e42019-07-30 15:22:16 +02002876 new_op = {'member_vnf_index': vnf_index,
2877 'vdu_id': vdu_id,
2878 'vdu_count_index': vdu_count_index,
2879 'primitive': primitive,
2880 'primitive_params': mapped_primitive_params}
kuuseac3a8882019-10-03 10:48:06 +02002881 if operationState:
2882 new_op['operationState'] = operationState
2883 if detailed_status:
2884 new_op['detailed-status'] = detailed_status
2885 if operationType:
2886 new_op['lcmOperationType'] = operationType
2887 if RO_nsr_id:
2888 new_op['RO_nsr_id'] = RO_nsr_id
2889 if RO_scaling_info:
2890 new_op['RO_scaling_info'] = RO_scaling_info
2891 if not op_list:
2892 # No existing operations, create key 'operations' with current operation as first list element
2893 db_nslcmop_admin.update({'operations': [new_op]})
2894 op_list = db_nslcmop_admin.get('operations')
2895 else:
2896 # Existing operations, append operation to list
2897 op_list.append(new_op)
kuuse8b998e42019-07-30 15:22:16 +02002898
kuuseac3a8882019-10-03 10:48:06 +02002899 db_nslcmop_update = {'_admin.operations': op_list}
2900 self.update_db_2("nslcmops", db_nslcmop['_id'], db_nslcmop_update)
2901 op_index = len(op_list) - 1
2902 return op_index
2903
2904 # Helper methods for scale() sub-operations
2905
2906 # pre-scale/post-scale:
2907 # Check for 3 different cases:
2908 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
2909 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
tierno7c4e24c2020-05-13 08:41:35 +00002910 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
quilesj7e13aeb2019-10-08 13:34:55 +02002911 def _check_or_add_scale_suboperation(self, db_nslcmop, vnf_index, vnf_config_primitive, primitive_params,
2912 operationType, RO_nsr_id=None, RO_scaling_info=None):
kuuseac3a8882019-10-03 10:48:06 +02002913 # Find this sub-operation
tierno7c4e24c2020-05-13 08:41:35 +00002914 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02002915 operationType = 'SCALE-RO'
2916 match = {
2917 'member_vnf_index': vnf_index,
2918 'RO_nsr_id': RO_nsr_id,
2919 'RO_scaling_info': RO_scaling_info,
2920 }
2921 else:
2922 match = {
2923 'member_vnf_index': vnf_index,
2924 'primitive': vnf_config_primitive,
2925 'primitive_params': primitive_params,
2926 'lcmOperationType': operationType
2927 }
2928 op_index = self._find_suboperation(db_nslcmop, match)
tierno51183952020-04-03 15:48:18 +00002929 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
kuuseac3a8882019-10-03 10:48:06 +02002930 # a. New sub-operation
2931 # The sub-operation does not exist, add it.
2932 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
2933 # The following parameters are set to None for all kind of scaling:
2934 vdu_id = None
2935 vdu_count_index = None
2936 vdu_name = None
tierno51183952020-04-03 15:48:18 +00002937 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02002938 vnf_config_primitive = None
2939 primitive_params = None
2940 else:
2941 RO_nsr_id = None
2942 RO_scaling_info = None
2943 # Initial status for sub-operation
2944 operationState = 'PROCESSING'
2945 detailed_status = 'In progress'
2946 # Add sub-operation for pre/post-scaling (zero or more operations)
2947 self._add_suboperation(db_nslcmop,
2948 vnf_index,
2949 vdu_id,
2950 vdu_count_index,
2951 vdu_name,
2952 vnf_config_primitive,
2953 primitive_params,
2954 operationState,
2955 detailed_status,
2956 operationType,
2957 RO_nsr_id,
2958 RO_scaling_info)
2959 return self.SUBOPERATION_STATUS_NEW
2960 else:
2961 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
2962 # or op_index (operationState != 'COMPLETED')
tierno51183952020-04-03 15:48:18 +00002963 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
kuuseac3a8882019-10-03 10:48:06 +02002964
preethika.pdf7d8e02019-12-10 13:10:48 +00002965 # Function to return execution_environment id
2966
2967 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
tiernoe876f672020-02-13 14:34:48 +00002968 # TODO vdu_index_count
preethika.pdf7d8e02019-12-10 13:10:48 +00002969 for vca in vca_deployed_list:
2970 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
2971 return vca["ee_id"]
2972
tierno588547c2020-07-01 15:30:20 +00002973 async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor,
2974 vca_index, destroy_ee=True, exec_primitives=True):
tiernoe876f672020-02-13 14:34:48 +00002975 """
2976 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
2977 :param logging_text:
2978 :param db_nslcmop:
2979 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
2980 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
2981 :param vca_index: index in the database _admin.deployed.VCA
2982 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
tierno588547c2020-07-01 15:30:20 +00002983 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
2984 not executed properly
tiernoe876f672020-02-13 14:34:48 +00002985 :return: None or exception
2986 """
tiernoe876f672020-02-13 14:34:48 +00002987
tierno588547c2020-07-01 15:30:20 +00002988 self.logger.debug(
2989 logging_text + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
2990 vca_index, vca_deployed, config_descriptor, destroy_ee
2991 )
2992 )
2993
2994 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
2995
2996 # execute terminate_primitives
2997 if exec_primitives:
tierno4fa7f8e2020-07-08 15:33:55 +00002998 terminate_primitives = self._get_terminate_config_primitive(
2999 config_descriptor.get("terminate-config-primitive"), vca_deployed)
tierno588547c2020-07-01 15:30:20 +00003000 vdu_id = vca_deployed.get("vdu_id")
3001 vdu_count_index = vca_deployed.get("vdu_count_index")
3002 vdu_name = vca_deployed.get("vdu_name")
3003 vnf_index = vca_deployed.get("member-vnf-index")
3004 if terminate_primitives and vca_deployed.get("needed_terminate"):
tierno588547c2020-07-01 15:30:20 +00003005 for seq in terminate_primitives:
3006 # For each sequence in list, get primitive and call _ns_execute_primitive()
3007 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
3008 vnf_index, seq.get("name"))
3009 self.logger.debug(logging_text + step)
3010 # Create the primitive for each sequence, i.e. "primitive": "touch"
3011 primitive = seq.get('name')
3012 mapped_primitive_params = self._get_terminate_primitive_params(seq, vnf_index)
tierno588547c2020-07-01 15:30:20 +00003013
3014 # Add sub-operation
3015 self._add_suboperation(db_nslcmop,
3016 vnf_index,
3017 vdu_id,
3018 vdu_count_index,
3019 vdu_name,
3020 primitive,
3021 mapped_primitive_params)
3022 # Sub-operations: Call _ns_execute_primitive() instead of action()
3023 try:
3024 result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive,
3025 mapped_primitive_params,
3026 vca_type=vca_type)
3027 except LcmException:
3028 # this happens when VCA is not deployed. In this case it is not needed to terminate
3029 continue
3030 result_ok = ['COMPLETED', 'PARTIALLY_COMPLETED']
3031 if result not in result_ok:
3032 raise LcmException("terminate_primitive {} for vnf_member_index={} fails with "
3033 "error {}".format(seq.get("name"), vnf_index, result_detail))
3034 # set that this VCA do not need terminated
3035 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(vca_index)
3036 self.update_db_2("nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False})
tiernoe876f672020-02-13 14:34:48 +00003037
tierno89f82902020-07-03 14:52:28 +00003038 if vca_deployed.get("prometheus_jobs") and self.prometheus:
3039 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
3040
tiernoe876f672020-02-13 14:34:48 +00003041 if destroy_ee:
tierno588547c2020-07-01 15:30:20 +00003042 await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"])
kuuse0ca67472019-05-13 15:59:27 +02003043
tierno51183952020-04-03 15:48:18 +00003044 async def _delete_all_N2VC(self, db_nsr: dict):
3045 self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING')
3046 namespace = "." + db_nsr["_id"]
tiernof59ad6c2020-04-08 12:50:52 +00003047 try:
3048 await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete)
3049 except N2VCNotFound: # already deleted. Skip
3050 pass
tierno51183952020-04-03 15:48:18 +00003051 self._write_all_config_status(db_nsr=db_nsr, status='DELETED')
quilesj3655ae02019-12-12 16:08:35 +00003052
tiernoe876f672020-02-13 14:34:48 +00003053 async def _terminate_RO(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
3054 """
3055 Terminates a deployment from RO
3056 :param logging_text:
3057 :param nsr_deployed: db_nsr._admin.deployed
3058 :param nsr_id:
3059 :param nslcmop_id:
3060 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
3061 this method will update only the index 2, but it will write on database the concatenated content of the list
3062 :return:
3063 """
3064 db_nsr_update = {}
3065 failed_detail = []
3066 ro_nsr_id = ro_delete_action = None
3067 if nsr_deployed and nsr_deployed.get("RO"):
3068 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
3069 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
3070 try:
3071 if ro_nsr_id:
3072 stage[2] = "Deleting ns from VIM."
3073 db_nsr_update["detailed-status"] = " ".join(stage)
3074 self._write_op_status(nslcmop_id, stage)
3075 self.logger.debug(logging_text + stage[2])
3076 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3077 self._write_op_status(nslcmop_id, stage)
3078 desc = await self.RO.delete("ns", ro_nsr_id)
3079 ro_delete_action = desc["action_id"]
3080 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = ro_delete_action
3081 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3082 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3083 if ro_delete_action:
3084 # wait until NS is deleted from VIM
3085 stage[2] = "Waiting ns deleted from VIM."
3086 detailed_status_old = None
3087 self.logger.debug(logging_text + stage[2] + " RO_id={} ro_delete_action={}".format(ro_nsr_id,
3088 ro_delete_action))
3089 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3090 self._write_op_status(nslcmop_id, stage)
kuused124bfe2019-06-18 12:09:24 +02003091
tiernoe876f672020-02-13 14:34:48 +00003092 delete_timeout = 20 * 60 # 20 minutes
3093 while delete_timeout > 0:
3094 desc = await self.RO.show(
3095 "ns",
3096 item_id_name=ro_nsr_id,
3097 extra_item="action",
3098 extra_item_id=ro_delete_action)
3099
3100 # deploymentStatus
3101 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3102
3103 ns_status, ns_status_info = self.RO.check_action_status(desc)
3104 if ns_status == "ERROR":
3105 raise ROclient.ROClientException(ns_status_info)
3106 elif ns_status == "BUILD":
3107 stage[2] = "Deleting from VIM {}".format(ns_status_info)
3108 elif ns_status == "ACTIVE":
3109 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3110 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3111 break
3112 else:
3113 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
3114 if stage[2] != detailed_status_old:
3115 detailed_status_old = stage[2]
3116 db_nsr_update["detailed-status"] = " ".join(stage)
3117 self._write_op_status(nslcmop_id, stage)
3118 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3119 await asyncio.sleep(5, loop=self.loop)
3120 delete_timeout -= 5
3121 else: # delete_timeout <= 0:
3122 raise ROclient.ROClientException("Timeout waiting ns deleted from VIM")
3123
3124 except Exception as e:
3125 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3126 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3127 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3128 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3129 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3130 self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id))
3131 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
tiernoa2143262020-03-27 16:20:40 +00003132 failed_detail.append("delete conflict: {}".format(e))
3133 self.logger.debug(logging_text + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00003134 else:
tiernoa2143262020-03-27 16:20:40 +00003135 failed_detail.append("delete error: {}".format(e))
3136 self.logger.error(logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00003137
3138 # Delete nsd
3139 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
3140 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
3141 try:
3142 stage[2] = "Deleting nsd from RO."
3143 db_nsr_update["detailed-status"] = " ".join(stage)
3144 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3145 self._write_op_status(nslcmop_id, stage)
3146 await self.RO.delete("nsd", ro_nsd_id)
3147 self.logger.debug(logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id))
3148 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3149 except Exception as e:
3150 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3151 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3152 self.logger.debug(logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id))
3153 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
3154 failed_detail.append("ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e))
3155 self.logger.debug(logging_text + failed_detail[-1])
3156 else:
3157 failed_detail.append("ro_nsd_id={} delete error: {}".format(ro_nsd_id, e))
3158 self.logger.error(logging_text + failed_detail[-1])
3159
3160 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
3161 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
3162 if not vnf_deployed or not vnf_deployed["id"]:
3163 continue
3164 try:
3165 ro_vnfd_id = vnf_deployed["id"]
3166 stage[2] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
3167 vnf_deployed["member-vnf-index"], ro_vnfd_id)
3168 db_nsr_update["detailed-status"] = " ".join(stage)
3169 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3170 self._write_op_status(nslcmop_id, stage)
3171 await self.RO.delete("vnfd", ro_vnfd_id)
3172 self.logger.debug(logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id))
3173 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3174 except Exception as e:
3175 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3176 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3177 self.logger.debug(logging_text + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id))
3178 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
3179 failed_detail.append("ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e))
3180 self.logger.debug(logging_text + failed_detail[-1])
3181 else:
3182 failed_detail.append("ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e))
3183 self.logger.error(logging_text + failed_detail[-1])
3184
tiernoa2143262020-03-27 16:20:40 +00003185 if failed_detail:
3186 stage[2] = "Error deleting from VIM"
3187 else:
3188 stage[2] = "Deleted from VIM"
tiernoe876f672020-02-13 14:34:48 +00003189 db_nsr_update["detailed-status"] = " ".join(stage)
3190 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3191 self._write_op_status(nslcmop_id, stage)
3192
3193 if failed_detail:
tiernoa2143262020-03-27 16:20:40 +00003194 raise LcmException("; ".join(failed_detail))
tiernoe876f672020-02-13 14:34:48 +00003195
3196 async def terminate(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003197 # Try to lock HA task here
3198 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3199 if not task_is_locked_by_me:
3200 return
3201
tierno59d22d22018-09-25 18:10:19 +02003202 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
3203 self.logger.debug(logging_text + "Enter")
tiernoe876f672020-02-13 14:34:48 +00003204 timeout_ns_terminate = self.timeout_ns_terminate
tierno59d22d22018-09-25 18:10:19 +02003205 db_nsr = None
3206 db_nslcmop = None
tiernoa17d4f42020-04-28 09:59:23 +00003207 operation_params = None
tierno59d22d22018-09-25 18:10:19 +02003208 exc = None
tiernoe876f672020-02-13 14:34:48 +00003209 error_list = [] # annotates all failed error messages
tierno59d22d22018-09-25 18:10:19 +02003210 db_nslcmop_update = {}
tiernoc2564fe2019-01-28 16:18:56 +00003211 autoremove = False # autoremove after terminated
tiernoe876f672020-02-13 14:34:48 +00003212 tasks_dict_info = {}
3213 db_nsr_update = {}
3214 stage = ["Stage 1/3: Preparing task.", "Waiting for previous operations to terminate.", ""]
3215 # ^ contains [stage, step, VIM-status]
tierno59d22d22018-09-25 18:10:19 +02003216 try:
kuused124bfe2019-06-18 12:09:24 +02003217 # wait for any previous tasks in process
3218 await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id)
3219
tiernoe876f672020-02-13 14:34:48 +00003220 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
3221 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3222 operation_params = db_nslcmop.get("operationParams") or {}
3223 if operation_params.get("timeout_ns_terminate"):
3224 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
3225 stage[1] = "Getting nsr={} from db.".format(nsr_id)
3226 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3227
3228 db_nsr_update["operational-status"] = "terminating"
3229 db_nsr_update["config-status"] = "terminating"
quilesj4cda56b2019-12-05 10:02:20 +00003230 self._write_ns_status(
3231 nsr_id=nsr_id,
3232 ns_state="TERMINATING",
3233 current_operation="TERMINATING",
tiernoe876f672020-02-13 14:34:48 +00003234 current_operation_id=nslcmop_id,
3235 other_update=db_nsr_update
quilesj4cda56b2019-12-05 10:02:20 +00003236 )
quilesj3655ae02019-12-12 16:08:35 +00003237 self._write_op_status(
3238 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00003239 queuePosition=0,
3240 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00003241 )
tiernoe876f672020-02-13 14:34:48 +00003242 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
tierno59d22d22018-09-25 18:10:19 +02003243 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
3244 return
tierno59d22d22018-09-25 18:10:19 +02003245
tiernoe876f672020-02-13 14:34:48 +00003246 stage[1] = "Getting vnf descriptors from db."
3247 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
3248 db_vnfds_from_id = {}
3249 db_vnfds_from_member_index = {}
3250 # Loop over VNFRs
3251 for vnfr in db_vnfrs_list:
3252 vnfd_id = vnfr["vnfd-id"]
3253 if vnfd_id not in db_vnfds_from_id:
3254 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
3255 db_vnfds_from_id[vnfd_id] = vnfd
3256 db_vnfds_from_member_index[vnfr["member-vnf-index-ref"]] = db_vnfds_from_id[vnfd_id]
calvinosanch9f9c6f22019-11-04 13:37:39 +01003257
tiernoe876f672020-02-13 14:34:48 +00003258 # Destroy individual execution environments when there are terminating primitives.
3259 # Rest of EE will be deleted at once
tierno588547c2020-07-01 15:30:20 +00003260 # TODO - check before calling _destroy_N2VC
3261 # if not operation_params.get("skip_terminate_primitives"):#
3262 # or not vca.get("needed_terminate"):
3263 stage[0] = "Stage 2/3 execute terminating primitives."
3264 self.logger.debug(logging_text + stage[0])
3265 stage[1] = "Looking execution environment that needs terminate."
3266 self.logger.debug(logging_text + stage[1])
tierno89f82902020-07-03 14:52:28 +00003267 # self.logger.debug("nsr_deployed: {}".format(nsr_deployed))
tierno588547c2020-07-01 15:30:20 +00003268 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
tierno588547c2020-07-01 15:30:20 +00003269 config_descriptor = None
3270 if not vca or not vca.get("ee_id"):
3271 continue
3272 if not vca.get("member-vnf-index"):
3273 # ns
3274 config_descriptor = db_nsr.get("ns-configuration")
3275 elif vca.get("vdu_id"):
3276 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3277 vdud = next((vdu for vdu in db_vnfd.get("vdu", ()) if vdu["id"] == vca.get("vdu_id")), None)
3278 if vdud:
3279 config_descriptor = vdud.get("vdu-configuration")
3280 elif vca.get("kdu_name"):
3281 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3282 kdud = next((kdu for kdu in db_vnfd.get("kdu", ()) if kdu["name"] == vca.get("kdu_name")), None)
3283 if kdud:
3284 config_descriptor = kdud.get("kdu-configuration")
3285 else:
3286 config_descriptor = db_vnfds_from_member_index[vca["member-vnf-index"]].get("vnf-configuration")
tierno588547c2020-07-01 15:30:20 +00003287 vca_type = vca.get("type")
3288 exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and
3289 vca.get("needed_terminate"))
tiernob010eb02020-08-07 06:36:38 +00003290 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
3291 # pending native charms
tiernoc6600ff2020-09-16 14:13:06 +00003292 destroy_ee = True if vca_type in ("helm", "native_charm") else False
3293 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
3294 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
tierno89f82902020-07-03 14:52:28 +00003295 task = asyncio.ensure_future(
3296 self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor, vca_index,
3297 destroy_ee, exec_terminate_primitives))
tierno588547c2020-07-01 15:30:20 +00003298 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
tierno59d22d22018-09-25 18:10:19 +02003299
tierno588547c2020-07-01 15:30:20 +00003300 # wait for pending tasks of terminate primitives
3301 if tasks_dict_info:
tiernoc6600ff2020-09-16 14:13:06 +00003302 self.logger.debug(logging_text + 'Waiting for tasks {}'.format(list(tasks_dict_info.keys())))
tierno588547c2020-07-01 15:30:20 +00003303 error_list = await self._wait_for_tasks(logging_text, tasks_dict_info,
3304 min(self.timeout_charm_delete, timeout_ns_terminate),
3305 stage, nslcmop_id)
tiernoc6600ff2020-09-16 14:13:06 +00003306 tasks_dict_info.clear()
tierno588547c2020-07-01 15:30:20 +00003307 if error_list:
3308 return # raise LcmException("; ".join(error_list))
tierno82974b22018-11-27 21:55:36 +00003309
tiernoe876f672020-02-13 14:34:48 +00003310 # remove All execution environments at once
3311 stage[0] = "Stage 3/3 delete all."
quilesj3655ae02019-12-12 16:08:35 +00003312
tierno49676be2020-04-07 16:34:35 +00003313 if nsr_deployed.get("VCA"):
3314 stage[1] = "Deleting all execution environments."
3315 self.logger.debug(logging_text + stage[1])
3316 task_delete_ee = asyncio.ensure_future(asyncio.wait_for(self._delete_all_N2VC(db_nsr=db_nsr),
3317 timeout=self.timeout_charm_delete))
3318 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
3319 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
tierno59d22d22018-09-25 18:10:19 +02003320
tiernoe876f672020-02-13 14:34:48 +00003321 # Delete from k8scluster
3322 stage[1] = "Deleting KDUs."
3323 self.logger.debug(logging_text + stage[1])
3324 # print(nsr_deployed)
3325 for kdu in get_iterable(nsr_deployed, "K8s"):
3326 if not kdu or not kdu.get("kdu-instance"):
3327 continue
3328 kdu_instance = kdu.get("kdu-instance")
tiernoa2143262020-03-27 16:20:40 +00003329 if kdu.get("k8scluster-type") in self.k8scluster_map:
tiernoe876f672020-02-13 14:34:48 +00003330 task_delete_kdu_instance = asyncio.ensure_future(
tiernoa2143262020-03-27 16:20:40 +00003331 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
3332 cluster_uuid=kdu.get("k8scluster-uuid"),
3333 kdu_instance=kdu_instance))
tiernoe876f672020-02-13 14:34:48 +00003334 else:
3335 self.logger.error(logging_text + "Unknown k8s deployment type {}".
3336 format(kdu.get("k8scluster-type")))
3337 continue
3338 tasks_dict_info[task_delete_kdu_instance] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
tierno59d22d22018-09-25 18:10:19 +02003339
3340 # remove from RO
tiernoe876f672020-02-13 14:34:48 +00003341 stage[1] = "Deleting ns from VIM."
tierno69f0d382020-05-07 13:08:09 +00003342 if self.ng_ro:
3343 task_delete_ro = asyncio.ensure_future(
3344 self._terminate_ng_ro(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
3345 else:
3346 task_delete_ro = asyncio.ensure_future(
3347 self._terminate_RO(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
tiernoe876f672020-02-13 14:34:48 +00003348 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
tierno59d22d22018-09-25 18:10:19 +02003349
tiernoe876f672020-02-13 14:34:48 +00003350 # rest of staff will be done at finally
3351
3352 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
3353 self.logger.error(logging_text + "Exit Exception {}".format(e))
3354 exc = e
3355 except asyncio.CancelledError:
3356 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
3357 exc = "Operation was cancelled"
3358 except Exception as e:
3359 exc = traceback.format_exc()
3360 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
3361 finally:
3362 if exc:
3363 error_list.append(str(exc))
tierno59d22d22018-09-25 18:10:19 +02003364 try:
tiernoe876f672020-02-13 14:34:48 +00003365 # wait for pending tasks
3366 if tasks_dict_info:
3367 stage[1] = "Waiting for terminate pending tasks."
3368 self.logger.debug(logging_text + stage[1])
3369 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_terminate,
3370 stage, nslcmop_id)
3371 stage[1] = stage[2] = ""
3372 except asyncio.CancelledError:
3373 error_list.append("Cancelled")
3374 # TODO cancell all tasks
3375 except Exception as exc:
3376 error_list.append(str(exc))
3377 # update status at database
3378 if error_list:
3379 error_detail = "; ".join(error_list)
3380 # self.logger.error(logging_text + error_detail)
tierno50b41432020-08-11 11:20:13 +00003381 error_description_nslcmop = '{} Detail: {}'.format(stage[0], error_detail)
3382 error_description_nsr = 'Operation: TERMINATING.{}, {}.'.format(nslcmop_id, stage[0])
tierno59d22d22018-09-25 18:10:19 +02003383
tierno59d22d22018-09-25 18:10:19 +02003384 db_nsr_update["operational-status"] = "failed"
tiernoa2143262020-03-27 16:20:40 +00003385 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00003386 db_nslcmop_update["detailed-status"] = error_detail
3387 nslcmop_operation_state = "FAILED"
3388 ns_state = "BROKEN"
tierno59d22d22018-09-25 18:10:19 +02003389 else:
tiernoa2143262020-03-27 16:20:40 +00003390 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00003391 error_description_nsr = error_description_nslcmop = None
3392 ns_state = "NOT_INSTANTIATED"
tierno59d22d22018-09-25 18:10:19 +02003393 db_nsr_update["operational-status"] = "terminated"
3394 db_nsr_update["detailed-status"] = "Done"
3395 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
3396 db_nslcmop_update["detailed-status"] = "Done"
tiernoe876f672020-02-13 14:34:48 +00003397 nslcmop_operation_state = "COMPLETED"
tierno59d22d22018-09-25 18:10:19 +02003398
tiernoe876f672020-02-13 14:34:48 +00003399 if db_nsr:
3400 self._write_ns_status(
3401 nsr_id=nsr_id,
3402 ns_state=ns_state,
3403 current_operation="IDLE",
3404 current_operation_id=None,
3405 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00003406 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00003407 other_update=db_nsr_update
3408 )
tiernoa17d4f42020-04-28 09:59:23 +00003409 self._write_op_status(
3410 op_id=nslcmop_id,
3411 stage="",
3412 error_message=error_description_nslcmop,
3413 operation_state=nslcmop_operation_state,
3414 other_update=db_nslcmop_update,
3415 )
lloretgalleg32ead8c2020-07-22 10:13:46 +00003416 if ns_state == "NOT_INSTANTIATED":
3417 try:
3418 self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "NOT_INSTANTIATED"})
3419 except DbException as e:
3420 self.logger.warn(logging_text + 'Error writing VNFR status for nsr-id-ref: {} -> {}'.
3421 format(nsr_id, e))
tiernoa17d4f42020-04-28 09:59:23 +00003422 if operation_params:
tiernoe876f672020-02-13 14:34:48 +00003423 autoremove = operation_params.get("autoremove", False)
tierno59d22d22018-09-25 18:10:19 +02003424 if nslcmop_operation_state:
3425 try:
3426 await self.msg.aiowrite("ns", "terminated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tiernoc2564fe2019-01-28 16:18:56 +00003427 "operationState": nslcmop_operation_state,
3428 "autoremove": autoremove},
tierno8a518872018-12-21 13:42:14 +00003429 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003430 except Exception as e:
3431 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02003432
tierno59d22d22018-09-25 18:10:19 +02003433 self.logger.debug(logging_text + "Exit")
3434 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
3435
tiernoe876f672020-02-13 14:34:48 +00003436 async def _wait_for_tasks(self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None):
3437 time_start = time()
tiernoa2143262020-03-27 16:20:40 +00003438 error_detail_list = []
tiernoe876f672020-02-13 14:34:48 +00003439 error_list = []
3440 pending_tasks = list(created_tasks_info.keys())
3441 num_tasks = len(pending_tasks)
3442 num_done = 0
3443 stage[1] = "{}/{}.".format(num_done, num_tasks)
3444 self._write_op_status(nslcmop_id, stage)
tiernoe876f672020-02-13 14:34:48 +00003445 while pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00003446 new_error = None
tiernoe876f672020-02-13 14:34:48 +00003447 _timeout = timeout + time_start - time()
3448 done, pending_tasks = await asyncio.wait(pending_tasks, timeout=_timeout,
3449 return_when=asyncio.FIRST_COMPLETED)
3450 num_done += len(done)
3451 if not done: # Timeout
3452 for task in pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00003453 new_error = created_tasks_info[task] + ": Timeout"
3454 error_detail_list.append(new_error)
3455 error_list.append(new_error)
tiernoe876f672020-02-13 14:34:48 +00003456 break
3457 for task in done:
3458 if task.cancelled():
tierno067e04a2020-03-31 12:53:13 +00003459 exc = "Cancelled"
tiernoe876f672020-02-13 14:34:48 +00003460 else:
3461 exc = task.exception()
tierno067e04a2020-03-31 12:53:13 +00003462 if exc:
3463 if isinstance(exc, asyncio.TimeoutError):
3464 exc = "Timeout"
3465 new_error = created_tasks_info[task] + ": {}".format(exc)
3466 error_list.append(created_tasks_info[task])
3467 error_detail_list.append(new_error)
tierno28c63da2020-04-20 16:28:56 +00003468 if isinstance(exc, (str, DbException, N2VCException, ROclient.ROClientException, LcmException,
3469 K8sException)):
tierno067e04a2020-03-31 12:53:13 +00003470 self.logger.error(logging_text + new_error)
tiernoe876f672020-02-13 14:34:48 +00003471 else:
tierno067e04a2020-03-31 12:53:13 +00003472 exc_traceback = "".join(traceback.format_exception(None, exc, exc.__traceback__))
3473 self.logger.error(logging_text + created_tasks_info[task] + exc_traceback)
3474 else:
3475 self.logger.debug(logging_text + created_tasks_info[task] + ": Done")
tiernoe876f672020-02-13 14:34:48 +00003476 stage[1] = "{}/{}.".format(num_done, num_tasks)
3477 if new_error:
tiernoa2143262020-03-27 16:20:40 +00003478 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
tiernoe876f672020-02-13 14:34:48 +00003479 if nsr_id: # update also nsr
tiernoa2143262020-03-27 16:20:40 +00003480 self.update_db_2("nsrs", nsr_id, {"errorDescription": "Error at: " + ", ".join(error_list),
3481 "errorDetail": ". ".join(error_detail_list)})
tiernoe876f672020-02-13 14:34:48 +00003482 self._write_op_status(nslcmop_id, stage)
tiernoa2143262020-03-27 16:20:40 +00003483 return error_detail_list
tiernoe876f672020-02-13 14:34:48 +00003484
tiernoda964822019-01-14 15:53:47 +00003485 @staticmethod
3486 def _map_primitive_params(primitive_desc, params, instantiation_params):
3487 """
3488 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
3489 The default-value is used. If it is between < > it look for a value at instantiation_params
3490 :param primitive_desc: portion of VNFD/NSD that describes primitive
3491 :param params: Params provided by user
3492 :param instantiation_params: Instantiation params provided by user
3493 :return: a dictionary with the calculated params
3494 """
3495 calculated_params = {}
3496 for parameter in primitive_desc.get("parameter", ()):
3497 param_name = parameter["name"]
3498 if param_name in params:
3499 calculated_params[param_name] = params[param_name]
tierno98ad6ea2019-05-30 17:16:28 +00003500 elif "default-value" in parameter or "value" in parameter:
3501 if "value" in parameter:
3502 calculated_params[param_name] = parameter["value"]
3503 else:
3504 calculated_params[param_name] = parameter["default-value"]
3505 if isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("<") \
3506 and calculated_params[param_name].endswith(">"):
3507 if calculated_params[param_name][1:-1] in instantiation_params:
3508 calculated_params[param_name] = instantiation_params[calculated_params[param_name][1:-1]]
tiernoda964822019-01-14 15:53:47 +00003509 else:
3510 raise LcmException("Parameter {} needed to execute primitive {} not provided".
tiernod8323042019-08-09 11:32:23 +00003511 format(calculated_params[param_name], primitive_desc["name"]))
tiernoda964822019-01-14 15:53:47 +00003512 else:
3513 raise LcmException("Parameter {} needed to execute primitive {} not provided".
3514 format(param_name, primitive_desc["name"]))
tierno59d22d22018-09-25 18:10:19 +02003515
tiernoda964822019-01-14 15:53:47 +00003516 if isinstance(calculated_params[param_name], (dict, list, tuple)):
3517 calculated_params[param_name] = yaml.safe_dump(calculated_params[param_name], default_flow_style=True,
3518 width=256)
3519 elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "):
3520 calculated_params[param_name] = calculated_params[param_name][7:]
tiernoc3f2a822019-11-05 13:45:04 +00003521
3522 # add always ns_config_info if primitive name is config
3523 if primitive_desc["name"] == "config":
3524 if "ns_config_info" in instantiation_params:
3525 calculated_params["ns_config_info"] = instantiation_params["ns_config_info"]
tiernoda964822019-01-14 15:53:47 +00003526 return calculated_params
3527
tierno4fa7f8e2020-07-08 15:33:55 +00003528 def _look_for_deployed_vca(self, deployed_vca, member_vnf_index, vdu_id, vdu_count_index, kdu_name=None,
3529 ee_descriptor_id=None):
tiernoe876f672020-02-13 14:34:48 +00003530 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
3531 for vca in deployed_vca:
3532 if not vca:
3533 continue
3534 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
3535 continue
tiernoe876f672020-02-13 14:34:48 +00003536 if vdu_count_index is not None and vdu_count_index != vca["vdu_count_index"]:
3537 continue
3538 if kdu_name and kdu_name != vca["kdu_name"]:
3539 continue
tierno4fa7f8e2020-07-08 15:33:55 +00003540 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
3541 continue
tiernoe876f672020-02-13 14:34:48 +00003542 break
3543 else:
3544 # vca_deployed not found
tierno4fa7f8e2020-07-08 15:33:55 +00003545 raise LcmException("charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
3546 " is not deployed".format(member_vnf_index, vdu_id, vdu_count_index, kdu_name,
3547 ee_descriptor_id))
quilesj7e13aeb2019-10-08 13:34:55 +02003548
tiernoe876f672020-02-13 14:34:48 +00003549 # get ee_id
3550 ee_id = vca.get("ee_id")
tierno588547c2020-07-01 15:30:20 +00003551 vca_type = vca.get("type", "lxc_proxy_charm") # default value for backward compatibility - proxy charm
tiernoe876f672020-02-13 14:34:48 +00003552 if not ee_id:
tierno067e04a2020-03-31 12:53:13 +00003553 raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
tiernoe876f672020-02-13 14:34:48 +00003554 "execution environment"
tierno067e04a2020-03-31 12:53:13 +00003555 .format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
tierno588547c2020-07-01 15:30:20 +00003556 return ee_id, vca_type
tiernoe876f672020-02-13 14:34:48 +00003557
3558 async def _ns_execute_primitive(self, ee_id, primitive, primitive_params, retries=0,
tierno588547c2020-07-01 15:30:20 +00003559 retries_interval=30, timeout=None,
3560 vca_type=None, db_dict=None) -> (str, str):
tiernoda964822019-01-14 15:53:47 +00003561 try:
tierno98ad6ea2019-05-30 17:16:28 +00003562 if primitive == "config":
3563 primitive_params = {"params": primitive_params}
tierno2fc7ce52019-06-11 22:50:01 +00003564
tierno588547c2020-07-01 15:30:20 +00003565 vca_type = vca_type or "lxc_proxy_charm"
3566
quilesj7e13aeb2019-10-08 13:34:55 +02003567 while retries >= 0:
3568 try:
tierno067e04a2020-03-31 12:53:13 +00003569 output = await asyncio.wait_for(
tierno588547c2020-07-01 15:30:20 +00003570 self.vca_map[vca_type].exec_primitive(
tierno067e04a2020-03-31 12:53:13 +00003571 ee_id=ee_id,
3572 primitive_name=primitive,
3573 params_dict=primitive_params,
3574 progress_timeout=self.timeout_progress_primitive,
tierno588547c2020-07-01 15:30:20 +00003575 total_timeout=self.timeout_primitive,
3576 db_dict=db_dict),
tierno067e04a2020-03-31 12:53:13 +00003577 timeout=timeout or self.timeout_primitive)
quilesj7e13aeb2019-10-08 13:34:55 +02003578 # execution was OK
3579 break
tierno067e04a2020-03-31 12:53:13 +00003580 except asyncio.CancelledError:
3581 raise
3582 except Exception as e: # asyncio.TimeoutError
3583 if isinstance(e, asyncio.TimeoutError):
3584 e = "Timeout"
quilesj7e13aeb2019-10-08 13:34:55 +02003585 retries -= 1
3586 if retries >= 0:
tierno73d8bd02019-11-18 17:33:27 +00003587 self.logger.debug('Error executing action {} on {} -> {}'.format(primitive, ee_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +02003588 # wait and retry
3589 await asyncio.sleep(retries_interval, loop=self.loop)
tierno73d8bd02019-11-18 17:33:27 +00003590 else:
tierno067e04a2020-03-31 12:53:13 +00003591 return 'FAILED', str(e)
quilesj7e13aeb2019-10-08 13:34:55 +02003592
tiernoe876f672020-02-13 14:34:48 +00003593 return 'COMPLETED', output
quilesj7e13aeb2019-10-08 13:34:55 +02003594
tierno067e04a2020-03-31 12:53:13 +00003595 except (LcmException, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00003596 raise
quilesj7e13aeb2019-10-08 13:34:55 +02003597 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00003598 return 'FAIL', 'Error executing action {}: {}'.format(primitive, e)
tierno59d22d22018-09-25 18:10:19 +02003599
3600 async def action(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003601
3602 # Try to lock HA task here
3603 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3604 if not task_is_locked_by_me:
3605 return
3606
tierno59d22d22018-09-25 18:10:19 +02003607 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
3608 self.logger.debug(logging_text + "Enter")
3609 # get all needed from database
3610 db_nsr = None
3611 db_nslcmop = None
tiernoe876f672020-02-13 14:34:48 +00003612 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003613 db_nslcmop_update = {}
3614 nslcmop_operation_state = None
tierno067e04a2020-03-31 12:53:13 +00003615 error_description_nslcmop = None
tierno59d22d22018-09-25 18:10:19 +02003616 exc = None
3617 try:
kuused124bfe2019-06-18 12:09:24 +02003618 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003619 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003620 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
3621
quilesj4cda56b2019-12-05 10:02:20 +00003622 self._write_ns_status(
3623 nsr_id=nsr_id,
3624 ns_state=None,
3625 current_operation="RUNNING ACTION",
3626 current_operation_id=nslcmop_id
3627 )
3628
tierno59d22d22018-09-25 18:10:19 +02003629 step = "Getting information from database"
3630 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3631 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernoda964822019-01-14 15:53:47 +00003632
tiernoe4f7e6c2018-11-27 14:55:30 +00003633 nsr_deployed = db_nsr["_admin"].get("deployed")
tierno1b633412019-02-25 16:48:23 +00003634 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tierno59d22d22018-09-25 18:10:19 +02003635 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003636 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
tiernoe4f7e6c2018-11-27 14:55:30 +00003637 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
tierno067e04a2020-03-31 12:53:13 +00003638 primitive = db_nslcmop["operationParams"]["primitive"]
3639 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
3640 timeout_ns_action = db_nslcmop["operationParams"].get("timeout_ns_action", self.timeout_primitive)
tierno59d22d22018-09-25 18:10:19 +02003641
tierno1b633412019-02-25 16:48:23 +00003642 if vnf_index:
3643 step = "Getting vnfr from database"
3644 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3645 step = "Getting vnfd from database"
3646 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
3647 else:
tierno067e04a2020-03-31 12:53:13 +00003648 step = "Getting nsd from database"
3649 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
tiernoda964822019-01-14 15:53:47 +00003650
tierno82974b22018-11-27 21:55:36 +00003651 # for backward compatibility
3652 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3653 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3654 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3655 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3656
tiernoda964822019-01-14 15:53:47 +00003657 # look for primitive
tierno4fa7f8e2020-07-08 15:33:55 +00003658 config_primitive_desc = descriptor_configuration = None
tiernoda964822019-01-14 15:53:47 +00003659 if vdu_id:
3660 for vdu in get_iterable(db_vnfd, "vdu"):
3661 if vdu_id == vdu["id"]:
tierno4fa7f8e2020-07-08 15:33:55 +00003662 descriptor_configuration = vdu.get("vdu-configuration")
tierno067e04a2020-03-31 12:53:13 +00003663 break
calvinosanch9f9c6f22019-11-04 13:37:39 +01003664 elif kdu_name:
tierno067e04a2020-03-31 12:53:13 +00003665 for kdu in get_iterable(db_vnfd, "kdu"):
3666 if kdu_name == kdu["name"]:
tierno4fa7f8e2020-07-08 15:33:55 +00003667 descriptor_configuration = kdu.get("kdu-configuration")
tierno067e04a2020-03-31 12:53:13 +00003668 break
tierno1b633412019-02-25 16:48:23 +00003669 elif vnf_index:
tierno4fa7f8e2020-07-08 15:33:55 +00003670 descriptor_configuration = db_vnfd.get("vnf-configuration")
tierno1b633412019-02-25 16:48:23 +00003671 else:
tierno4fa7f8e2020-07-08 15:33:55 +00003672 descriptor_configuration = db_nsd.get("ns-configuration")
3673
3674 if descriptor_configuration and descriptor_configuration.get("config-primitive"):
3675 for config_primitive in descriptor_configuration["config-primitive"]:
tierno1b633412019-02-25 16:48:23 +00003676 if config_primitive["name"] == primitive:
3677 config_primitive_desc = config_primitive
3678 break
tiernoda964822019-01-14 15:53:47 +00003679
garciadeblas003ac802020-07-20 11:05:42 +00003680 if not config_primitive_desc:
3681 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
3682 raise LcmException("Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".
3683 format(primitive))
3684 primitive_name = primitive
3685 ee_descriptor_id = None
3686 else:
3687 primitive_name = config_primitive_desc.get("execution-environment-primitive", primitive)
3688 ee_descriptor_id = config_primitive_desc.get("execution-environment-ref")
tierno1b633412019-02-25 16:48:23 +00003689
tierno1b633412019-02-25 16:48:23 +00003690 if vnf_index:
tierno626e0152019-11-29 14:16:16 +00003691 if vdu_id:
3692 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
tierno067e04a2020-03-31 12:53:13 +00003693 desc_params = self._format_additional_params(vdur.get("additionalParams"))
3694 elif kdu_name:
3695 kdur = next((x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None)
3696 desc_params = self._format_additional_params(kdur.get("additionalParams"))
3697 else:
3698 desc_params = self._format_additional_params(db_vnfr.get("additionalParamsForVnf"))
tierno1b633412019-02-25 16:48:23 +00003699 else:
tierno067e04a2020-03-31 12:53:13 +00003700 desc_params = self._format_additional_params(db_nsr.get("additionalParamsForNs"))
tiernoda964822019-01-14 15:53:47 +00003701
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003702 if kdu_name:
3703 kdu_action = True if not deep_get(kdu, ("kdu-configuration", "juju")) else False
3704
tiernoda964822019-01-14 15:53:47 +00003705 # TODO check if ns is in a proper status
tierno4fa7f8e2020-07-08 15:33:55 +00003706 if kdu_name and (primitive_name in ("upgrade", "rollback", "status") or kdu_action):
tierno067e04a2020-03-31 12:53:13 +00003707 # kdur and desc_params already set from before
3708 if primitive_params:
3709 desc_params.update(primitive_params)
3710 # TODO Check if we will need something at vnf level
3711 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
3712 if kdu_name == kdu["kdu-name"] and kdu["member-vnf-index"] == vnf_index:
3713 break
3714 else:
3715 raise LcmException("KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index))
quilesj7e13aeb2019-10-08 13:34:55 +02003716
tierno067e04a2020-03-31 12:53:13 +00003717 if kdu.get("k8scluster-type") not in self.k8scluster_map:
3718 msg = "unknown k8scluster-type '{}'".format(kdu.get("k8scluster-type"))
3719 raise LcmException(msg)
3720
3721 db_dict = {"collection": "nsrs",
3722 "filter": {"_id": nsr_id},
3723 "path": "_admin.deployed.K8s.{}".format(index)}
tierno4fa7f8e2020-07-08 15:33:55 +00003724 self.logger.debug(logging_text + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name))
3725 step = "Executing kdu {}".format(primitive_name)
3726 if primitive_name == "upgrade":
tierno067e04a2020-03-31 12:53:13 +00003727 if desc_params.get("kdu_model"):
3728 kdu_model = desc_params.get("kdu_model")
3729 del desc_params["kdu_model"]
3730 else:
3731 kdu_model = kdu.get("kdu-model")
3732 parts = kdu_model.split(sep=":")
3733 if len(parts) == 2:
3734 kdu_model = parts[0]
3735
3736 detailed_status = await asyncio.wait_for(
3737 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
3738 cluster_uuid=kdu.get("k8scluster-uuid"),
3739 kdu_instance=kdu.get("kdu-instance"),
3740 atomic=True, kdu_model=kdu_model,
3741 params=desc_params, db_dict=db_dict,
3742 timeout=timeout_ns_action),
3743 timeout=timeout_ns_action + 10)
3744 self.logger.debug(logging_text + " Upgrade of kdu {} done".format(detailed_status))
tierno4fa7f8e2020-07-08 15:33:55 +00003745 elif primitive_name == "rollback":
tierno067e04a2020-03-31 12:53:13 +00003746 detailed_status = await asyncio.wait_for(
3747 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
3748 cluster_uuid=kdu.get("k8scluster-uuid"),
3749 kdu_instance=kdu.get("kdu-instance"),
3750 db_dict=db_dict),
3751 timeout=timeout_ns_action)
tierno4fa7f8e2020-07-08 15:33:55 +00003752 elif primitive_name == "status":
tierno067e04a2020-03-31 12:53:13 +00003753 detailed_status = await asyncio.wait_for(
3754 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
3755 cluster_uuid=kdu.get("k8scluster-uuid"),
3756 kdu_instance=kdu.get("kdu-instance")),
3757 timeout=timeout_ns_action)
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003758 else:
3759 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(kdu["kdu-name"], nsr_id)
3760 params = self._map_primitive_params(config_primitive_desc, primitive_params, desc_params)
3761
3762 detailed_status = await asyncio.wait_for(
3763 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
3764 cluster_uuid=kdu.get("k8scluster-uuid"),
3765 kdu_instance=kdu_instance,
tierno4fa7f8e2020-07-08 15:33:55 +00003766 primitive_name=primitive_name,
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003767 params=params, db_dict=db_dict,
3768 timeout=timeout_ns_action),
3769 timeout=timeout_ns_action)
tierno067e04a2020-03-31 12:53:13 +00003770
3771 if detailed_status:
3772 nslcmop_operation_state = 'COMPLETED'
3773 else:
3774 detailed_status = ''
3775 nslcmop_operation_state = 'FAILED'
tierno067e04a2020-03-31 12:53:13 +00003776 else:
tierno588547c2020-07-01 15:30:20 +00003777 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
3778 member_vnf_index=vnf_index,
3779 vdu_id=vdu_id,
tierno4fa7f8e2020-07-08 15:33:55 +00003780 vdu_count_index=vdu_count_index,
3781 ee_descriptor_id=ee_descriptor_id)
tierno588547c2020-07-01 15:30:20 +00003782 db_nslcmop_notif = {"collection": "nslcmops",
3783 "filter": {"_id": nslcmop_id},
3784 "path": "admin.VCA"}
tierno067e04a2020-03-31 12:53:13 +00003785 nslcmop_operation_state, detailed_status = await self._ns_execute_primitive(
tierno588547c2020-07-01 15:30:20 +00003786 ee_id,
tierno4fa7f8e2020-07-08 15:33:55 +00003787 primitive=primitive_name,
tierno067e04a2020-03-31 12:53:13 +00003788 primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params),
tierno588547c2020-07-01 15:30:20 +00003789 timeout=timeout_ns_action,
3790 vca_type=vca_type,
3791 db_dict=db_nslcmop_notif)
tierno067e04a2020-03-31 12:53:13 +00003792
3793 db_nslcmop_update["detailed-status"] = detailed_status
3794 error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else ""
3795 self.logger.debug(logging_text + " task Done with result {} {}".format(nslcmop_operation_state,
3796 detailed_status))
tierno59d22d22018-09-25 18:10:19 +02003797 return # database update is called inside finally
3798
tiernof59ad6c2020-04-08 12:50:52 +00003799 except (DbException, LcmException, N2VCException, K8sException) as e:
tierno59d22d22018-09-25 18:10:19 +02003800 self.logger.error(logging_text + "Exit Exception {}".format(e))
3801 exc = e
3802 except asyncio.CancelledError:
3803 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
3804 exc = "Operation was cancelled"
tierno067e04a2020-03-31 12:53:13 +00003805 except asyncio.TimeoutError:
3806 self.logger.error(logging_text + "Timeout while '{}'".format(step))
3807 exc = "Timeout"
tierno59d22d22018-09-25 18:10:19 +02003808 except Exception as e:
3809 exc = traceback.format_exc()
3810 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
3811 finally:
tierno067e04a2020-03-31 12:53:13 +00003812 if exc:
3813 db_nslcmop_update["detailed-status"] = detailed_status = error_description_nslcmop = \
kuuse0ca67472019-05-13 15:59:27 +02003814 "FAILED {}: {}".format(step, exc)
tierno067e04a2020-03-31 12:53:13 +00003815 nslcmop_operation_state = "FAILED"
3816 if db_nsr:
3817 self._write_ns_status(
3818 nsr_id=nsr_id,
3819 ns_state=db_nsr["nsState"], # TODO check if degraded. For the moment use previous status
3820 current_operation="IDLE",
3821 current_operation_id=None,
3822 # error_description=error_description_nsr,
3823 # error_detail=error_detail,
3824 other_update=db_nsr_update
3825 )
3826
tiernoa17d4f42020-04-28 09:59:23 +00003827 self._write_op_status(
3828 op_id=nslcmop_id,
3829 stage="",
3830 error_message=error_description_nslcmop,
3831 operation_state=nslcmop_operation_state,
3832 other_update=db_nslcmop_update,
3833 )
tierno067e04a2020-03-31 12:53:13 +00003834
tierno59d22d22018-09-25 18:10:19 +02003835 if nslcmop_operation_state:
3836 try:
3837 await self.msg.aiowrite("ns", "actioned", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00003838 "operationState": nslcmop_operation_state},
3839 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003840 except Exception as e:
3841 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
3842 self.logger.debug(logging_text + "Exit")
3843 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
tierno067e04a2020-03-31 12:53:13 +00003844 return nslcmop_operation_state, detailed_status
tierno59d22d22018-09-25 18:10:19 +02003845
3846 async def scale(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003847
3848 # Try to lock HA task here
3849 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3850 if not task_is_locked_by_me:
3851 return
3852
tierno59d22d22018-09-25 18:10:19 +02003853 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
3854 self.logger.debug(logging_text + "Enter")
3855 # get all needed from database
3856 db_nsr = None
3857 db_nslcmop = None
3858 db_nslcmop_update = {}
3859 nslcmop_operation_state = None
tiernoe876f672020-02-13 14:34:48 +00003860 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003861 exc = None
tierno9ab95942018-10-10 16:44:22 +02003862 # in case of error, indicates what part of scale was failed to put nsr at error status
3863 scale_process = None
tiernod6de1992018-10-11 13:05:52 +02003864 old_operational_status = ""
3865 old_config_status = ""
tierno59d22d22018-09-25 18:10:19 +02003866 try:
kuused124bfe2019-06-18 12:09:24 +02003867 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003868 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003869 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
tierno47e86b52018-10-10 14:05:55 +02003870
quilesj4cda56b2019-12-05 10:02:20 +00003871 self._write_ns_status(
3872 nsr_id=nsr_id,
3873 ns_state=None,
3874 current_operation="SCALING",
3875 current_operation_id=nslcmop_id
3876 )
3877
ikalyvas02d9e7b2019-05-27 18:16:01 +03003878 step = "Getting nslcmop from database"
ikalyvas02d9e7b2019-05-27 18:16:01 +03003879 self.logger.debug(step + " after having waited for previous tasks to be completed")
3880 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3881 step = "Getting nsr from database"
3882 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3883
3884 old_operational_status = db_nsr["operational-status"]
3885 old_config_status = db_nsr["config-status"]
tierno59d22d22018-09-25 18:10:19 +02003886 step = "Parsing scaling parameters"
tierno9babfda2019-06-07 12:36:50 +00003887 # self.logger.debug(step)
tierno59d22d22018-09-25 18:10:19 +02003888 db_nsr_update["operational-status"] = "scaling"
3889 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoe4f7e6c2018-11-27 14:55:30 +00003890 nsr_deployed = db_nsr["_admin"].get("deployed")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003891
3892 #######
3893 nsr_deployed = db_nsr["_admin"].get("deployed")
3894 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tiernoda6fb102019-11-23 00:36:52 +00003895 # vdu_id = db_nslcmop["operationParams"].get("vdu_id")
3896 # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
3897 # vdu_name = db_nslcmop["operationParams"].get("vdu_name")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003898 #######
3899
tiernoe4f7e6c2018-11-27 14:55:30 +00003900 RO_nsr_id = nsr_deployed["RO"]["nsr_id"]
tierno59d22d22018-09-25 18:10:19 +02003901 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["member-vnf-index"]
3902 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"]
3903 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
3904 # scaling_policy = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"].get("scaling-policy")
3905
tierno82974b22018-11-27 21:55:36 +00003906 # for backward compatibility
3907 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3908 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3909 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3910 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3911
tierno59d22d22018-09-25 18:10:19 +02003912 step = "Getting vnfr from database"
3913 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3914 step = "Getting vnfd from database"
3915 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
ikalyvas02d9e7b2019-05-27 18:16:01 +03003916
tierno59d22d22018-09-25 18:10:19 +02003917 step = "Getting scaling-group-descriptor"
3918 for scaling_descriptor in db_vnfd["scaling-group-descriptor"]:
3919 if scaling_descriptor["name"] == scaling_group:
3920 break
3921 else:
3922 raise LcmException("input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
3923 "at vnfd:scaling-group-descriptor".format(scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03003924
tierno59d22d22018-09-25 18:10:19 +02003925 # cooldown_time = 0
3926 # for scaling_policy_descriptor in scaling_descriptor.get("scaling-policy", ()):
3927 # cooldown_time = scaling_policy_descriptor.get("cooldown-time", 0)
3928 # if scaling_policy and scaling_policy == scaling_policy_descriptor.get("name"):
3929 # break
3930
3931 # TODO check if ns is in a proper status
tierno15b1cf12019-08-29 13:21:40 +00003932 step = "Sending scale order to VIM"
tierno59d22d22018-09-25 18:10:19 +02003933 nb_scale_op = 0
3934 if not db_nsr["_admin"].get("scaling-group"):
3935 self.update_db_2("nsrs", nsr_id, {"_admin.scaling-group": [{"name": scaling_group, "nb-scale-op": 0}]})
3936 admin_scale_index = 0
3937 else:
3938 for admin_scale_index, admin_scale_info in enumerate(db_nsr["_admin"]["scaling-group"]):
3939 if admin_scale_info["name"] == scaling_group:
3940 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
3941 break
tierno9ab95942018-10-10 16:44:22 +02003942 else: # not found, set index one plus last element and add new entry with the name
3943 admin_scale_index += 1
3944 db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group
tierno59d22d22018-09-25 18:10:19 +02003945 RO_scaling_info = []
3946 vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []}
3947 if scaling_type == "SCALE_OUT":
3948 # count if max-instance-count is reached
kuuse818d70c2019-08-07 14:43:44 +02003949 max_instance_count = scaling_descriptor.get("max-instance-count", 10)
3950 # self.logger.debug("MAX_INSTANCE_COUNT is {}".format(max_instance_count))
3951 if nb_scale_op >= max_instance_count:
3952 raise LcmException("reached the limit of {} (max-instance-count) "
3953 "scaling-out operations for the "
3954 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
kuuse8b998e42019-07-30 15:22:16 +02003955
ikalyvas02d9e7b2019-05-27 18:16:01 +03003956 nb_scale_op += 1
tierno59d22d22018-09-25 18:10:19 +02003957 vdu_scaling_info["scaling_direction"] = "OUT"
3958 vdu_scaling_info["vdu-create"] = {}
3959 for vdu_scale_info in scaling_descriptor["vdu"]:
3960 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
3961 "type": "create", "count": vdu_scale_info.get("count", 1)})
3962 vdu_scaling_info["vdu-create"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
ikalyvas02d9e7b2019-05-27 18:16:01 +03003963
tierno59d22d22018-09-25 18:10:19 +02003964 elif scaling_type == "SCALE_IN":
3965 # count if min-instance-count is reached
tierno27246d82018-09-27 15:59:09 +02003966 min_instance_count = 0
tierno59d22d22018-09-25 18:10:19 +02003967 if "min-instance-count" in scaling_descriptor and scaling_descriptor["min-instance-count"] is not None:
3968 min_instance_count = int(scaling_descriptor["min-instance-count"])
tierno9babfda2019-06-07 12:36:50 +00003969 if nb_scale_op <= min_instance_count:
3970 raise LcmException("reached the limit of {} (min-instance-count) scaling-in operations for the "
3971 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03003972 nb_scale_op -= 1
tierno59d22d22018-09-25 18:10:19 +02003973 vdu_scaling_info["scaling_direction"] = "IN"
3974 vdu_scaling_info["vdu-delete"] = {}
3975 for vdu_scale_info in scaling_descriptor["vdu"]:
3976 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
3977 "type": "delete", "count": vdu_scale_info.get("count", 1)})
3978 vdu_scaling_info["vdu-delete"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
3979
3980 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
tierno27246d82018-09-27 15:59:09 +02003981 vdu_create = vdu_scaling_info.get("vdu-create")
3982 vdu_delete = copy(vdu_scaling_info.get("vdu-delete"))
tierno59d22d22018-09-25 18:10:19 +02003983 if vdu_scaling_info["scaling_direction"] == "IN":
3984 for vdur in reversed(db_vnfr["vdur"]):
tierno27246d82018-09-27 15:59:09 +02003985 if vdu_delete.get(vdur["vdu-id-ref"]):
3986 vdu_delete[vdur["vdu-id-ref"]] -= 1
tierno59d22d22018-09-25 18:10:19 +02003987 vdu_scaling_info["vdu"].append({
3988 "name": vdur["name"],
3989 "vdu_id": vdur["vdu-id-ref"],
3990 "interface": []
3991 })
3992 for interface in vdur["interfaces"]:
3993 vdu_scaling_info["vdu"][-1]["interface"].append({
3994 "name": interface["name"],
3995 "ip_address": interface["ip-address"],
3996 "mac_address": interface.get("mac-address"),
3997 })
tierno27246d82018-09-27 15:59:09 +02003998 vdu_delete = vdu_scaling_info.pop("vdu-delete")
tierno59d22d22018-09-25 18:10:19 +02003999
kuuseac3a8882019-10-03 10:48:06 +02004000 # PRE-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02004001 step = "Executing pre-scale vnf-config-primitive"
4002 if scaling_descriptor.get("scaling-config-action"):
4003 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02004004 if (scaling_config_action.get("trigger") == "pre-scale-in" and scaling_type == "SCALE_IN") \
4005 or (scaling_config_action.get("trigger") == "pre-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02004006 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
4007 step = db_nslcmop_update["detailed-status"] = \
4008 "executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00004009
tierno59d22d22018-09-25 18:10:19 +02004010 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02004011 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
4012 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02004013 break
4014 else:
4015 raise LcmException(
4016 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
tiernoda964822019-01-14 15:53:47 +00004017 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
tierno4fa7f8e2020-07-08 15:33:55 +00004018 "primitive".format(scaling_group, vnf_config_primitive))
tiernoda964822019-01-14 15:53:47 +00004019
tierno16fedf52019-05-24 08:38:26 +00004020 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00004021 if db_vnfr.get("additionalParamsForVnf"):
4022 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
quilesj7e13aeb2019-10-08 13:34:55 +02004023
tierno9ab95942018-10-10 16:44:22 +02004024 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02004025 db_nsr_update["config-status"] = "configuring pre-scaling"
kuuseac3a8882019-10-03 10:48:06 +02004026 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
4027
tierno7c4e24c2020-05-13 08:41:35 +00004028 # Pre-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02004029 op_index = self._check_or_add_scale_suboperation(
4030 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'PRE-SCALE')
tierno7c4e24c2020-05-13 08:41:35 +00004031 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02004032 # Skip sub-operation
4033 result = 'COMPLETED'
4034 result_detail = 'Done'
4035 self.logger.debug(logging_text +
4036 "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
4037 vnf_config_primitive, result, result_detail))
4038 else:
tierno7c4e24c2020-05-13 08:41:35 +00004039 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02004040 # New sub-operation: Get index of this sub-operation
4041 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4042 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
4043 format(vnf_config_primitive))
4044 else:
tierno7c4e24c2020-05-13 08:41:35 +00004045 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02004046 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4047 vnf_index = op.get('member_vnf_index')
4048 vnf_config_primitive = op.get('primitive')
4049 primitive_params = op.get('primitive_params')
tierno7c4e24c2020-05-13 08:41:35 +00004050 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
kuuseac3a8882019-10-03 10:48:06 +02004051 format(vnf_config_primitive))
tierno588547c2020-07-01 15:30:20 +00004052 # Execute the primitive, either with new (first-time) or registered (reintent) args
tierno4fa7f8e2020-07-08 15:33:55 +00004053 ee_descriptor_id = config_primitive.get("execution-environment-ref")
4054 primitive_name = config_primitive.get("execution-environment-primitive",
4055 vnf_config_primitive)
tierno588547c2020-07-01 15:30:20 +00004056 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
4057 member_vnf_index=vnf_index,
4058 vdu_id=None,
tierno4fa7f8e2020-07-08 15:33:55 +00004059 vdu_count_index=None,
4060 ee_descriptor_id=ee_descriptor_id)
kuuseac3a8882019-10-03 10:48:06 +02004061 result, result_detail = await self._ns_execute_primitive(
tierno4fa7f8e2020-07-08 15:33:55 +00004062 ee_id, primitive_name, primitive_params, vca_type)
kuuseac3a8882019-10-03 10:48:06 +02004063 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
4064 vnf_config_primitive, result, result_detail))
4065 # Update operationState = COMPLETED | FAILED
4066 self._update_suboperation_status(
4067 db_nslcmop, op_index, result, result_detail)
4068
tierno59d22d22018-09-25 18:10:19 +02004069 if result == "FAILED":
4070 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02004071 db_nsr_update["config-status"] = old_config_status
4072 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02004073 # PRE-SCALE END
tierno59d22d22018-09-25 18:10:19 +02004074
kuuseac3a8882019-10-03 10:48:06 +02004075 # SCALE RO - BEGIN
4076 # Should this block be skipped if 'RO_nsr_id' == None ?
4077 # if (RO_nsr_id and RO_scaling_info):
tierno59d22d22018-09-25 18:10:19 +02004078 if RO_scaling_info:
tierno9ab95942018-10-10 16:44:22 +02004079 scale_process = "RO"
tierno7c4e24c2020-05-13 08:41:35 +00004080 # Scale RO retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02004081 op_index = self._check_or_add_scale_suboperation(
4082 db_nslcmop, vnf_index, None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info)
tierno7c4e24c2020-05-13 08:41:35 +00004083 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02004084 # Skip sub-operation
4085 result = 'COMPLETED'
4086 result_detail = 'Done'
4087 self.logger.debug(logging_text + "Skipped sub-operation RO, result {} {}".format(
4088 result, result_detail))
4089 else:
tierno7c4e24c2020-05-13 08:41:35 +00004090 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02004091 # New sub-operation: Get index of this sub-operation
4092 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4093 self.logger.debug(logging_text + "New sub-operation RO")
tierno59d22d22018-09-25 18:10:19 +02004094 else:
tierno7c4e24c2020-05-13 08:41:35 +00004095 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02004096 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4097 RO_nsr_id = op.get('RO_nsr_id')
4098 RO_scaling_info = op.get('RO_scaling_info')
tierno7c4e24c2020-05-13 08:41:35 +00004099 self.logger.debug(logging_text + "Sub-operation RO retry for primitive {}".format(
kuuseac3a8882019-10-03 10:48:06 +02004100 vnf_config_primitive))
4101
4102 RO_desc = await self.RO.create_action("ns", RO_nsr_id, {"vdu-scaling": RO_scaling_info})
4103 db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op
4104 db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time()
4105 # wait until ready
4106 RO_nslcmop_id = RO_desc["instance_action_id"]
4107 db_nslcmop_update["_admin.deploy.RO"] = RO_nslcmop_id
4108
4109 RO_task_done = False
tiernodf24ef82020-09-25 12:33:15 +00004110 step = detailed_status = "Waiting for VIM to scale. RO_task_id={}.".format(RO_nslcmop_id)
kuuseac3a8882019-10-03 10:48:06 +02004111 detailed_status_old = None
4112 self.logger.debug(logging_text + step)
4113
4114 deployment_timeout = 1 * 3600 # One hour
4115 while deployment_timeout > 0:
4116 if not RO_task_done:
4117 desc = await self.RO.show("ns", item_id_name=RO_nsr_id, extra_item="action",
4118 extra_item_id=RO_nslcmop_id)
quilesj3655ae02019-12-12 16:08:35 +00004119
4120 # deploymentStatus
4121 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4122
kuuseac3a8882019-10-03 10:48:06 +02004123 ns_status, ns_status_info = self.RO.check_action_status(desc)
4124 if ns_status == "ERROR":
4125 raise ROclient.ROClientException(ns_status_info)
4126 elif ns_status == "BUILD":
4127 detailed_status = step + "; {}".format(ns_status_info)
4128 elif ns_status == "ACTIVE":
4129 RO_task_done = True
tiernodf24ef82020-09-25 12:33:15 +00004130 self.scale_vnfr(db_vnfr, vdu_create=vdu_create, vdu_delete=vdu_delete)
kuuseac3a8882019-10-03 10:48:06 +02004131 step = detailed_status = "Waiting ns ready at RO. RO_id={}".format(RO_nsr_id)
4132 self.logger.debug(logging_text + step)
4133 else:
4134 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
tierno59d22d22018-09-25 18:10:19 +02004135 else:
tiernodf24ef82020-09-25 12:33:15 +00004136 desc = await self.RO.show("ns", RO_nsr_id)
4137 ns_status, ns_status_info = self.RO.check_ns_status(desc)
4138 # deploymentStatus
4139 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
quilesj7e13aeb2019-10-08 13:34:55 +02004140
kuuseac3a8882019-10-03 10:48:06 +02004141 if ns_status == "ERROR":
4142 raise ROclient.ROClientException(ns_status_info)
4143 elif ns_status == "BUILD":
4144 detailed_status = step + "; {}".format(ns_status_info)
4145 elif ns_status == "ACTIVE":
4146 step = detailed_status = \
4147 "Waiting for management IP address reported by the VIM. Updating VNFRs"
kuuseac3a8882019-10-03 10:48:06 +02004148 try:
kuuseac3a8882019-10-03 10:48:06 +02004149 # nsr_deployed["nsr_ip"] = RO.get_ns_vnf_info(desc)
4150 self.ns_update_vnfr({db_vnfr["member-vnf-index-ref"]: db_vnfr}, desc)
4151 break
4152 except LcmExceptionNoMgmtIP:
4153 pass
4154 else:
4155 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
4156 if detailed_status != detailed_status_old:
4157 self._update_suboperation_status(
4158 db_nslcmop, op_index, 'COMPLETED', detailed_status)
4159 detailed_status_old = db_nslcmop_update["detailed-status"] = detailed_status
4160 self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
tierno59d22d22018-09-25 18:10:19 +02004161
kuuseac3a8882019-10-03 10:48:06 +02004162 await asyncio.sleep(5, loop=self.loop)
4163 deployment_timeout -= 5
4164 if deployment_timeout <= 0:
4165 self._update_suboperation_status(
4166 db_nslcmop, nslcmop_id, op_index, 'FAILED', "Timeout when waiting for ns to get ready")
4167 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tierno59d22d22018-09-25 18:10:19 +02004168
kuuseac3a8882019-10-03 10:48:06 +02004169 # update VDU_SCALING_INFO with the obtained ip_addresses
4170 if vdu_scaling_info["scaling_direction"] == "OUT":
4171 for vdur in reversed(db_vnfr["vdur"]):
4172 if vdu_scaling_info["vdu-create"].get(vdur["vdu-id-ref"]):
4173 vdu_scaling_info["vdu-create"][vdur["vdu-id-ref"]] -= 1
4174 vdu_scaling_info["vdu"].append({
4175 "name": vdur["name"],
4176 "vdu_id": vdur["vdu-id-ref"],
4177 "interface": []
tierno59d22d22018-09-25 18:10:19 +02004178 })
kuuseac3a8882019-10-03 10:48:06 +02004179 for interface in vdur["interfaces"]:
4180 vdu_scaling_info["vdu"][-1]["interface"].append({
4181 "name": interface["name"],
4182 "ip_address": interface["ip-address"],
4183 "mac_address": interface.get("mac-address"),
4184 })
4185 del vdu_scaling_info["vdu-create"]
4186
4187 self._update_suboperation_status(db_nslcmop, op_index, 'COMPLETED', 'Done')
4188 # SCALE RO - END
tierno59d22d22018-09-25 18:10:19 +02004189
tierno9ab95942018-10-10 16:44:22 +02004190 scale_process = None
tierno59d22d22018-09-25 18:10:19 +02004191 if db_nsr_update:
4192 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4193
kuuseac3a8882019-10-03 10:48:06 +02004194 # POST-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02004195 # execute primitive service POST-SCALING
4196 step = "Executing post-scale vnf-config-primitive"
4197 if scaling_descriptor.get("scaling-config-action"):
4198 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02004199 if (scaling_config_action.get("trigger") == "post-scale-in" and scaling_type == "SCALE_IN") \
4200 or (scaling_config_action.get("trigger") == "post-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02004201 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
4202 step = db_nslcmop_update["detailed-status"] = \
4203 "executing post-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00004204
tierno589befb2019-05-29 07:06:23 +00004205 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00004206 if db_vnfr.get("additionalParamsForVnf"):
4207 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
4208
tierno59d22d22018-09-25 18:10:19 +02004209 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02004210 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
4211 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02004212 break
4213 else:
tierno4fa7f8e2020-07-08 15:33:55 +00004214 raise LcmException(
4215 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
4216 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
4217 "config-primitive".format(scaling_group, vnf_config_primitive))
tierno9ab95942018-10-10 16:44:22 +02004218 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02004219 db_nsr_update["config-status"] = "configuring post-scaling"
kuuseac3a8882019-10-03 10:48:06 +02004220 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
tiernod6de1992018-10-11 13:05:52 +02004221
tierno7c4e24c2020-05-13 08:41:35 +00004222 # Post-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02004223 op_index = self._check_or_add_scale_suboperation(
4224 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'POST-SCALE')
quilesj4cda56b2019-12-05 10:02:20 +00004225 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02004226 # Skip sub-operation
4227 result = 'COMPLETED'
4228 result_detail = 'Done'
4229 self.logger.debug(logging_text +
4230 "vnf_config_primitive={} Skipped sub-operation, result {} {}".
4231 format(vnf_config_primitive, result, result_detail))
4232 else:
quilesj4cda56b2019-12-05 10:02:20 +00004233 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02004234 # New sub-operation: Get index of this sub-operation
4235 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4236 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
4237 format(vnf_config_primitive))
4238 else:
tierno7c4e24c2020-05-13 08:41:35 +00004239 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02004240 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4241 vnf_index = op.get('member_vnf_index')
4242 vnf_config_primitive = op.get('primitive')
4243 primitive_params = op.get('primitive_params')
tierno7c4e24c2020-05-13 08:41:35 +00004244 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
kuuseac3a8882019-10-03 10:48:06 +02004245 format(vnf_config_primitive))
tierno588547c2020-07-01 15:30:20 +00004246 # Execute the primitive, either with new (first-time) or registered (reintent) args
tierno4fa7f8e2020-07-08 15:33:55 +00004247 ee_descriptor_id = config_primitive.get("execution-environment-ref")
4248 primitive_name = config_primitive.get("execution-environment-primitive",
4249 vnf_config_primitive)
tierno588547c2020-07-01 15:30:20 +00004250 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
4251 member_vnf_index=vnf_index,
4252 vdu_id=None,
tierno4fa7f8e2020-07-08 15:33:55 +00004253 vdu_count_index=None,
4254 ee_descriptor_id=ee_descriptor_id)
kuuseac3a8882019-10-03 10:48:06 +02004255 result, result_detail = await self._ns_execute_primitive(
tierno4fa7f8e2020-07-08 15:33:55 +00004256 ee_id, primitive_name, primitive_params, vca_type)
kuuseac3a8882019-10-03 10:48:06 +02004257 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
4258 vnf_config_primitive, result, result_detail))
4259 # Update operationState = COMPLETED | FAILED
4260 self._update_suboperation_status(
4261 db_nslcmop, op_index, result, result_detail)
4262
tierno59d22d22018-09-25 18:10:19 +02004263 if result == "FAILED":
4264 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02004265 db_nsr_update["config-status"] = old_config_status
4266 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02004267 # POST-SCALE END
tierno59d22d22018-09-25 18:10:19 +02004268
tiernod6de1992018-10-11 13:05:52 +02004269 db_nsr_update["detailed-status"] = "" # "scaled {} {}".format(scaling_group, scaling_type)
ikalyvas02d9e7b2019-05-27 18:16:01 +03004270 db_nsr_update["operational-status"] = "running" if old_operational_status == "failed" \
4271 else old_operational_status
tiernod6de1992018-10-11 13:05:52 +02004272 db_nsr_update["config-status"] = old_config_status
tierno59d22d22018-09-25 18:10:19 +02004273 return
4274 except (ROclient.ROClientException, DbException, LcmException) as e:
4275 self.logger.error(logging_text + "Exit Exception {}".format(e))
4276 exc = e
4277 except asyncio.CancelledError:
4278 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
4279 exc = "Operation was cancelled"
4280 except Exception as e:
4281 exc = traceback.format_exc()
4282 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
4283 finally:
quilesj3655ae02019-12-12 16:08:35 +00004284 self._write_ns_status(
4285 nsr_id=nsr_id,
4286 ns_state=None,
4287 current_operation="IDLE",
4288 current_operation_id=None
4289 )
tierno59d22d22018-09-25 18:10:19 +02004290 if exc:
tiernoa17d4f42020-04-28 09:59:23 +00004291 db_nslcmop_update["detailed-status"] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4292 nslcmop_operation_state = "FAILED"
tierno59d22d22018-09-25 18:10:19 +02004293 if db_nsr:
tiernod6de1992018-10-11 13:05:52 +02004294 db_nsr_update["operational-status"] = old_operational_status
4295 db_nsr_update["config-status"] = old_config_status
4296 db_nsr_update["detailed-status"] = ""
4297 if scale_process:
4298 if "VCA" in scale_process:
4299 db_nsr_update["config-status"] = "failed"
4300 if "RO" in scale_process:
4301 db_nsr_update["operational-status"] = "failed"
4302 db_nsr_update["detailed-status"] = "FAILED scaling nslcmop={} {}: {}".format(nslcmop_id, step,
4303 exc)
tiernoa17d4f42020-04-28 09:59:23 +00004304 else:
4305 error_description_nslcmop = None
4306 nslcmop_operation_state = "COMPLETED"
4307 db_nslcmop_update["detailed-status"] = "Done"
quilesj4cda56b2019-12-05 10:02:20 +00004308
tiernoa17d4f42020-04-28 09:59:23 +00004309 self._write_op_status(
4310 op_id=nslcmop_id,
4311 stage="",
4312 error_message=error_description_nslcmop,
4313 operation_state=nslcmop_operation_state,
4314 other_update=db_nslcmop_update,
4315 )
4316 if db_nsr:
4317 self._write_ns_status(
4318 nsr_id=nsr_id,
4319 ns_state=None,
4320 current_operation="IDLE",
4321 current_operation_id=None,
4322 other_update=db_nsr_update
4323 )
4324
tierno59d22d22018-09-25 18:10:19 +02004325 if nslcmop_operation_state:
4326 try:
4327 await self.msg.aiowrite("ns", "scaled", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00004328 "operationState": nslcmop_operation_state},
4329 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004330 # if cooldown_time:
tiernod8323042019-08-09 11:32:23 +00004331 # await asyncio.sleep(cooldown_time, loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004332 # await self.msg.aiowrite("ns","scaled-cooldown-time", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id})
4333 except Exception as e:
4334 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
4335 self.logger.debug(logging_text + "Exit")
4336 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
tierno89f82902020-07-03 14:52:28 +00004337
4338 async def add_prometheus_metrics(self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip):
4339 if not self.prometheus:
4340 return
4341 # look if exist a file called 'prometheus*.j2' and
4342 artifact_content = self.fs.dir_ls(artifact_path)
4343 job_file = next((f for f in artifact_content if f.startswith("prometheus") and f.endswith(".j2")), None)
4344 if not job_file:
4345 return
4346 with self.fs.file_open((artifact_path, job_file), "r") as f:
4347 job_data = f.read()
4348
4349 # TODO get_service
4350 _, _, service = ee_id.partition(".") # remove prefix "namespace."
4351 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
4352 host_port = "80"
4353 vnfr_id = vnfr_id.replace("-", "")
4354 variables = {
4355 "JOB_NAME": vnfr_id,
4356 "TARGET_IP": target_ip,
4357 "EXPORTER_POD_IP": host_name,
4358 "EXPORTER_POD_PORT": host_port,
4359 }
4360 job_list = self.prometheus.parse_job(job_data, variables)
4361 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
4362 for job in job_list:
4363 if not isinstance(job.get("job_name"), str) or vnfr_id not in job["job_name"]:
4364 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
4365 job["nsr_id"] = nsr_id
4366 job_dict = {jl["job_name"]: jl for jl in job_list}
4367 if await self.prometheus.update(job_dict):
4368 return list(job_dict.keys())