blob: 24783107924ce24945feeb092f2ff0e7ab830590 [file] [log] [blame]
tierno59d22d22018-09-25 18:10:19 +02001# -*- coding: utf-8 -*-
2
tierno2e215512018-11-28 09:37:52 +00003##
4# Copyright 2018 Telefonica S.A.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17##
18
tierno59d22d22018-09-25 18:10:19 +020019import asyncio
20import yaml
21import logging
22import logging.handlers
tierno59d22d22018-09-25 18:10:19 +020023import traceback
David Garciad4816682019-12-09 14:57:43 +010024import json
tierno72ef84f2020-10-06 08:22:07 +000025from jinja2 import Environment, TemplateError, TemplateNotFound, StrictUndefined, UndefinedError
tierno59d22d22018-09-25 18:10:19 +020026
tierno77677d92019-08-22 13:46:35 +000027from osm_lcm import ROclient
tierno69f0d382020-05-07 13:08:09 +000028from osm_lcm.ng_ro import NgRoClient, NgRoException
tierno744303e2020-01-13 16:46:31 +000029from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict
calvinosanch9f9c6f22019-11-04 13:37:39 +010030from n2vc.k8s_helm_conn import K8sHelmConnector
lloretgalleg18ebc3a2020-10-22 09:54:51 +000031from n2vc.k8s_helm3_conn import K8sHelm3Connector
Adam Israelbaacc302019-12-01 12:41:39 -050032from n2vc.k8s_juju_conn import K8sJujuConnector
tierno59d22d22018-09-25 18:10:19 +020033
tierno27246d82018-09-27 15:59:09 +020034from osm_common.dbbase import DbException
tierno59d22d22018-09-25 18:10:19 +020035from osm_common.fsbase import FsException
quilesj7e13aeb2019-10-08 13:34:55 +020036
37from n2vc.n2vc_juju_conn import N2VCJujuConnector
tiernof59ad6c2020-04-08 12:50:52 +000038from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
tierno59d22d22018-09-25 18:10:19 +020039
tierno588547c2020-07-01 15:30:20 +000040from osm_lcm.lcm_helm_conn import LCMHelmConn
41
tierno27246d82018-09-27 15:59:09 +020042from copy import copy, deepcopy
tierno59d22d22018-09-25 18:10:19 +020043from http import HTTPStatus
44from time import time
tierno27246d82018-09-27 15:59:09 +020045from uuid import uuid4
lloretgalleg7c121132020-07-08 07:53:22 +000046
tiernob996d942020-07-03 14:52:28 +000047from random import randint
tierno59d22d22018-09-25 18:10:19 +020048
tierno69f0d382020-05-07 13:08:09 +000049__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
tierno59d22d22018-09-25 18:10:19 +020050
51
52class NsLcm(LcmBase):
tierno63de62e2018-10-31 16:38:52 +010053 timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed
tierno744303e2020-01-13 16:46:31 +000054 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
tiernoe876f672020-02-13 14:34:48 +000055 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
garciadeblasf9b04952019-04-09 18:53:58 +020056 timeout_charm_delete = 10 * 60
David Garciaf6919842020-05-21 16:41:07 +020057 timeout_primitive = 30 * 60 # timeout for primitive execution
58 timeout_progress_primitive = 10 * 60 # timeout for some progress in a primitive execution
tierno59d22d22018-09-25 18:10:19 +020059
kuuseac3a8882019-10-03 10:48:06 +020060 SUBOPERATION_STATUS_NOT_FOUND = -1
61 SUBOPERATION_STATUS_NEW = -2
62 SUBOPERATION_STATUS_SKIP = -3
tiernoa2143262020-03-27 16:20:40 +000063 task_name_deploy_vca = "Deploying VCA"
kuuseac3a8882019-10-03 10:48:06 +020064
tiernob996d942020-07-03 14:52:28 +000065 def __init__(self, db, msg, fs, lcm_tasks, config, loop, prometheus=None):
tierno59d22d22018-09-25 18:10:19 +020066 """
67 Init, Connect to database, filesystem storage, and messaging
68 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
69 :return: None
70 """
quilesj7e13aeb2019-10-08 13:34:55 +020071 super().__init__(
72 db=db,
73 msg=msg,
74 fs=fs,
75 logger=logging.getLogger('lcm.ns')
76 )
77
tierno59d22d22018-09-25 18:10:19 +020078 self.loop = loop
79 self.lcm_tasks = lcm_tasks
tierno744303e2020-01-13 16:46:31 +000080 self.timeout = config["timeout"]
81 self.ro_config = config["ro_config"]
tierno69f0d382020-05-07 13:08:09 +000082 self.ng_ro = config["ro_config"].get("ng")
tierno744303e2020-01-13 16:46:31 +000083 self.vca_config = config["VCA"].copy()
tierno59d22d22018-09-25 18:10:19 +020084
quilesj7e13aeb2019-10-08 13:34:55 +020085 # create N2VC connector
David Garciaaae391f2020-11-09 11:12:54 +010086 self.n2vc = N2VCJujuConnector(
quilesj7e13aeb2019-10-08 13:34:55 +020087 db=self.db,
88 fs=self.fs,
tierno59d22d22018-09-25 18:10:19 +020089 log=self.logger,
quilesj7e13aeb2019-10-08 13:34:55 +020090 loop=self.loop,
91 url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
92 username=self.vca_config.get('user', None),
93 vca_config=self.vca_config,
quilesj3655ae02019-12-12 16:08:35 +000094 on_update_db=self._on_update_n2vc_db
tierno59d22d22018-09-25 18:10:19 +020095 )
quilesj7e13aeb2019-10-08 13:34:55 +020096
tierno588547c2020-07-01 15:30:20 +000097 self.conn_helm_ee = LCMHelmConn(
98 db=self.db,
99 fs=self.fs,
100 log=self.logger,
101 loop=self.loop,
102 url=None,
103 username=None,
104 vca_config=self.vca_config,
105 on_update_db=self._on_update_n2vc_db
106 )
107
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000108 self.k8sclusterhelm2 = K8sHelmConnector(
calvinosanch9f9c6f22019-11-04 13:37:39 +0100109 kubectl_command=self.vca_config.get("kubectlpath"),
110 helm_command=self.vca_config.get("helmpath"),
111 fs=self.fs,
112 log=self.logger,
113 db=self.db,
114 on_update_db=None,
115 )
116
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000117 self.k8sclusterhelm3 = K8sHelm3Connector(
118 kubectl_command=self.vca_config.get("kubectlpath"),
119 helm_command=self.vca_config.get("helm3path"),
120 fs=self.fs,
121 log=self.logger,
122 db=self.db,
123 on_update_db=None,
124 )
125
Adam Israelbaacc302019-12-01 12:41:39 -0500126 self.k8sclusterjuju = K8sJujuConnector(
127 kubectl_command=self.vca_config.get("kubectlpath"),
128 juju_command=self.vca_config.get("jujupath"),
129 fs=self.fs,
130 log=self.logger,
131 db=self.db,
David Garciaba89cbb2020-10-16 13:05:34 +0200132 loop=self.loop,
Adam Israelbaacc302019-12-01 12:41:39 -0500133 on_update_db=None,
David Garciaba89cbb2020-10-16 13:05:34 +0200134 vca_config=self.vca_config,
Adam Israelbaacc302019-12-01 12:41:39 -0500135 )
136
tiernoa2143262020-03-27 16:20:40 +0000137 self.k8scluster_map = {
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000138 "helm-chart": self.k8sclusterhelm2,
139 "helm-chart-v3": self.k8sclusterhelm3,
140 "chart": self.k8sclusterhelm3,
tiernoa2143262020-03-27 16:20:40 +0000141 "juju-bundle": self.k8sclusterjuju,
142 "juju": self.k8sclusterjuju,
143 }
tierno588547c2020-07-01 15:30:20 +0000144
145 self.vca_map = {
146 "lxc_proxy_charm": self.n2vc,
147 "native_charm": self.n2vc,
148 "k8s_proxy_charm": self.n2vc,
lloretgalleg18ebc3a2020-10-22 09:54:51 +0000149 "helm": self.conn_helm_ee,
150 "helm-v3": self.conn_helm_ee
tierno588547c2020-07-01 15:30:20 +0000151 }
152
tiernob996d942020-07-03 14:52:28 +0000153 self.prometheus = prometheus
154
quilesj7e13aeb2019-10-08 13:34:55 +0200155 # create RO client
tierno69f0d382020-05-07 13:08:09 +0000156 if self.ng_ro:
157 self.RO = NgRoClient(self.loop, **self.ro_config)
158 else:
159 self.RO = ROclient.ROClient(self.loop, **self.ro_config)
tierno59d22d22018-09-25 18:10:19 +0200160
tierno2357f4e2020-10-19 16:38:59 +0000161 @staticmethod
162 def increment_ip_mac(ip_mac, vm_index=1):
163 if not isinstance(ip_mac, str):
164 return ip_mac
165 try:
166 # try with ipv4 look for last dot
167 i = ip_mac.rfind(".")
168 if i > 0:
169 i += 1
170 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
171 # try with ipv6 or mac look for last colon. Operate in hex
172 i = ip_mac.rfind(":")
173 if i > 0:
174 i += 1
175 # format in hex, len can be 2 for mac or 4 for ipv6
176 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(ip_mac[:i], int(ip_mac[i:], 16) + vm_index)
177 except Exception:
178 pass
179 return None
180
quilesj3655ae02019-12-12 16:08:35 +0000181 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
quilesj7e13aeb2019-10-08 13:34:55 +0200182
quilesj3655ae02019-12-12 16:08:35 +0000183 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
184
185 try:
186 # TODO filter RO descriptor fields...
187
188 # write to database
189 db_dict = dict()
190 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
191 db_dict['deploymentStatus'] = ro_descriptor
192 self.update_db_2("nsrs", nsrs_id, db_dict)
193
194 except Exception as e:
195 self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e))
196
197 async def _on_update_n2vc_db(self, table, filter, path, updated_data):
198
quilesj69a722c2020-01-09 08:30:17 +0000199 # remove last dot from path (if exists)
200 if path.endswith('.'):
201 path = path[:-1]
202
quilesj3655ae02019-12-12 16:08:35 +0000203 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
204 # .format(table, filter, path, updated_data))
205
206 try:
207
208 nsr_id = filter.get('_id')
209
210 # read ns record from database
211 nsr = self.db.get_one(table='nsrs', q_filter=filter)
212 current_ns_status = nsr.get('nsState')
213
214 # get vca status for NS
quilesj69a722c2020-01-09 08:30:17 +0000215 status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False)
quilesj3655ae02019-12-12 16:08:35 +0000216
217 # vcaStatus
218 db_dict = dict()
219 db_dict['vcaStatus'] = status_dict
220
221 # update configurationStatus for this VCA
222 try:
223 vca_index = int(path[path.rfind(".")+1:])
224
225 vca_list = deep_get(target_dict=nsr, key_list=('_admin', 'deployed', 'VCA'))
226 vca_status = vca_list[vca_index].get('status')
227
228 configuration_status_list = nsr.get('configurationStatus')
229 config_status = configuration_status_list[vca_index].get('status')
230
231 if config_status == 'BROKEN' and vca_status != 'failed':
232 db_dict['configurationStatus'][vca_index] = 'READY'
233 elif config_status != 'BROKEN' and vca_status == 'failed':
234 db_dict['configurationStatus'][vca_index] = 'BROKEN'
235 except Exception as e:
236 # not update configurationStatus
237 self.logger.debug('Error updating vca_index (ignore): {}'.format(e))
238
239 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
240 # if nsState = 'DEGRADED' check if all is OK
241 is_degraded = False
242 if current_ns_status in ('READY', 'DEGRADED'):
243 error_description = ''
244 # check machines
245 if status_dict.get('machines'):
246 for machine_id in status_dict.get('machines'):
247 machine = status_dict.get('machines').get(machine_id)
248 # check machine agent-status
249 if machine.get('agent-status'):
250 s = machine.get('agent-status').get('status')
251 if s != 'started':
252 is_degraded = True
253 error_description += 'machine {} agent-status={} ; '.format(machine_id, s)
254 # check machine instance status
255 if machine.get('instance-status'):
256 s = machine.get('instance-status').get('status')
257 if s != 'running':
258 is_degraded = True
259 error_description += 'machine {} instance-status={} ; '.format(machine_id, s)
260 # check applications
261 if status_dict.get('applications'):
262 for app_id in status_dict.get('applications'):
263 app = status_dict.get('applications').get(app_id)
264 # check application status
265 if app.get('status'):
266 s = app.get('status').get('status')
267 if s != 'active':
268 is_degraded = True
269 error_description += 'application {} status={} ; '.format(app_id, s)
270
271 if error_description:
272 db_dict['errorDescription'] = error_description
273 if current_ns_status == 'READY' and is_degraded:
274 db_dict['nsState'] = 'DEGRADED'
275 if current_ns_status == 'DEGRADED' and not is_degraded:
276 db_dict['nsState'] = 'READY'
277
278 # write to database
279 self.update_db_2("nsrs", nsr_id, db_dict)
280
tierno51183952020-04-03 15:48:18 +0000281 except (asyncio.CancelledError, asyncio.TimeoutError):
282 raise
quilesj3655ae02019-12-12 16:08:35 +0000283 except Exception as e:
284 self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +0200285
tierno72ef84f2020-10-06 08:22:07 +0000286 @staticmethod
287 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
288 try:
289 env = Environment(undefined=StrictUndefined)
290 template = env.from_string(cloud_init_text)
291 return template.render(additional_params or {})
292 except UndefinedError as e:
293 raise LcmException("Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
294 "file, must be provided in the instantiation parameters inside the "
295 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id))
296 except (TemplateError, TemplateNotFound) as e:
297 raise LcmException("Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".
298 format(vnfd_id, vdu_id, e))
299
300 def _get_cloud_init(self, vdu, vnfd):
301 try:
302 cloud_init_content = cloud_init_file = None
303 if vdu.get("cloud-init-file"):
304 base_folder = vnfd["_admin"]["storage"]
305 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
306 vdu["cloud-init-file"])
307 with self.fs.file_open(cloud_init_file, "r") as ci_file:
308 cloud_init_content = ci_file.read()
309 elif vdu.get("cloud-init"):
310 cloud_init_content = vdu["cloud-init"]
311
312 return cloud_init_content
313 except FsException as e:
314 raise LcmException("Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".
315 format(vnfd["id"], vdu["id"], cloud_init_file, e))
316
317 def _get_osm_params(self, db_vnfr, vdu_id=None, vdu_count_index=0):
318 osm_params = {x.replace("-", "_"): db_vnfr[x] for x in ("ip-address", "vim-account-id", "vnfd-id", "vnfd-ref")
319 if db_vnfr.get(x) is not None}
320 osm_params["ns_id"] = db_vnfr["nsr-id-ref"]
321 osm_params["vnf_id"] = db_vnfr["_id"]
322 osm_params["member_vnf_index"] = db_vnfr["member-vnf-index-ref"]
323 if db_vnfr.get("vdur"):
324 osm_params["vdu"] = {}
325 for vdur in db_vnfr["vdur"]:
326 vdu = {
327 "count_index": vdur["count-index"],
328 "vdu_id": vdur["vdu-id-ref"],
329 "interfaces": {}
330 }
331 if vdur.get("ip-address"):
332 vdu["ip_address"] = vdur["ip-address"]
333 for iface in vdur["interfaces"]:
334 vdu["interfaces"][iface["name"]] = \
335 {x.replace("-", "_"): iface[x] for x in ("mac-address", "ip-address", "vnf-vld-id", "name")
336 if iface.get(x) is not None}
337 vdu_id_index = "{}-{}".format(vdur["vdu-id-ref"], vdur["count-index"])
338 osm_params["vdu"][vdu_id_index] = vdu
339 if vdu_id:
340 osm_params["vdu_id"] = vdu_id
341 osm_params["count_index"] = vdu_count_index
342 return osm_params
343
344 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
345 vdur = next(vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"])
346 additional_params = vdur.get("additionalParams")
347 return self._format_additional_params(additional_params)
348
gcalvino35be9152018-12-20 09:33:12 +0100349 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
tierno59d22d22018-09-25 18:10:19 +0200350 """
351 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
352 :param vnfd: input vnfd
353 :param new_id: overrides vnf id if provided
tierno8a518872018-12-21 13:42:14 +0000354 :param additionalParams: Instantiation params for VNFs provided
gcalvino35be9152018-12-20 09:33:12 +0100355 :param nsrId: Id of the NSR
tierno59d22d22018-09-25 18:10:19 +0200356 :return: copy of vnfd
357 """
tierno72ef84f2020-10-06 08:22:07 +0000358 vnfd_RO = deepcopy(vnfd)
359 # remove unused by RO configuration, monitoring, scaling and internal keys
360 vnfd_RO.pop("_id", None)
361 vnfd_RO.pop("_admin", None)
362 vnfd_RO.pop("vnf-configuration", None)
363 vnfd_RO.pop("monitoring-param", None)
364 vnfd_RO.pop("scaling-group-descriptor", None)
365 vnfd_RO.pop("kdu", None)
366 vnfd_RO.pop("k8s-cluster", None)
367 if new_id:
368 vnfd_RO["id"] = new_id
tierno8a518872018-12-21 13:42:14 +0000369
tierno72ef84f2020-10-06 08:22:07 +0000370 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
371 for vdu in get_iterable(vnfd_RO, "vdu"):
372 vdu.pop("cloud-init-file", None)
373 vdu.pop("cloud-init", None)
374 return vnfd_RO
tierno59d22d22018-09-25 18:10:19 +0200375
tierno2357f4e2020-10-19 16:38:59 +0000376 @staticmethod
377 def ip_profile_2_RO(ip_profile):
378 RO_ip_profile = deepcopy(ip_profile)
379 if "dns-server" in RO_ip_profile:
380 if isinstance(RO_ip_profile["dns-server"], list):
381 RO_ip_profile["dns-address"] = []
382 for ds in RO_ip_profile.pop("dns-server"):
383 RO_ip_profile["dns-address"].append(ds['address'])
384 else:
385 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
386 if RO_ip_profile.get("ip-version") == "ipv4":
387 RO_ip_profile["ip-version"] = "IPv4"
388 if RO_ip_profile.get("ip-version") == "ipv6":
389 RO_ip_profile["ip-version"] = "IPv6"
390 if "dhcp-params" in RO_ip_profile:
391 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
392 return RO_ip_profile
393
tiernoe95ed362020-04-23 08:24:57 +0000394 def _ns_params_2_RO(self, ns_params, nsd, vnfd_dict, db_vnfrs, n2vc_key_list):
tierno59d22d22018-09-25 18:10:19 +0200395 """
tierno27246d82018-09-27 15:59:09 +0200396 Creates a RO ns descriptor from OSM ns_instantiate params
tierno59d22d22018-09-25 18:10:19 +0200397 :param ns_params: OSM instantiate params
tiernoe95ed362020-04-23 08:24:57 +0000398 :param vnfd_dict: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
399 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index. {member-vnf-index: {vnfr_object}, ...}
tierno59d22d22018-09-25 18:10:19 +0200400 :return: The RO ns descriptor
401 """
402 vim_2_RO = {}
tiernob7f3f0d2019-03-20 17:17:21 +0000403 wim_2_RO = {}
tierno27246d82018-09-27 15:59:09 +0200404 # TODO feature 1417: Check that no instantiation is set over PDU
405 # check if PDU forces a concrete vim-network-id and add it
406 # check if PDU contains a SDN-assist info (dpid, switch, port) and pass it to RO
tierno59d22d22018-09-25 18:10:19 +0200407
408 def vim_account_2_RO(vim_account):
409 if vim_account in vim_2_RO:
410 return vim_2_RO[vim_account]
411
412 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
413 if db_vim["_admin"]["operationalState"] != "ENABLED":
414 raise LcmException("VIM={} is not available. operationalState={}".format(
415 vim_account, db_vim["_admin"]["operationalState"]))
416 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
417 vim_2_RO[vim_account] = RO_vim_id
418 return RO_vim_id
419
tiernob7f3f0d2019-03-20 17:17:21 +0000420 def wim_account_2_RO(wim_account):
421 if isinstance(wim_account, str):
422 if wim_account in wim_2_RO:
423 return wim_2_RO[wim_account]
424
425 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
426 if db_wim["_admin"]["operationalState"] != "ENABLED":
427 raise LcmException("WIM={} is not available. operationalState={}".format(
428 wim_account, db_wim["_admin"]["operationalState"]))
429 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
430 wim_2_RO[wim_account] = RO_wim_id
431 return RO_wim_id
432 else:
433 return wim_account
434
tierno59d22d22018-09-25 18:10:19 +0200435 if not ns_params:
436 return None
437 RO_ns_params = {
438 # "name": ns_params["nsName"],
439 # "description": ns_params.get("nsDescription"),
440 "datacenter": vim_account_2_RO(ns_params["vimAccountId"]),
tiernob7f3f0d2019-03-20 17:17:21 +0000441 "wim_account": wim_account_2_RO(ns_params.get("wimAccountId")),
tierno59d22d22018-09-25 18:10:19 +0200442 # "scenario": ns_params["nsdId"],
tierno59d22d22018-09-25 18:10:19 +0200443 }
tiernoe95ed362020-04-23 08:24:57 +0000444 # set vim_account of each vnf if different from general vim_account.
445 # Get this information from <vnfr> database content, key vim-account-id
446 # Vim account can be set by placement_engine and it may be different from
447 # the instantiate parameters (vnfs.member-vnf-index.datacenter).
448 for vnf_index, vnfr in db_vnfrs.items():
449 if vnfr.get("vim-account-id") and vnfr["vim-account-id"] != ns_params["vimAccountId"]:
450 populate_dict(RO_ns_params, ("vnfs", vnf_index, "datacenter"), vim_account_2_RO(vnfr["vim-account-id"]))
quilesj7e13aeb2019-10-08 13:34:55 +0200451
tiernoe64f7fb2019-09-11 08:55:52 +0000452 n2vc_key_list = n2vc_key_list or []
453 for vnfd_ref, vnfd in vnfd_dict.items():
454 vdu_needed_access = []
455 mgmt_cp = None
456 if vnfd.get("vnf-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000457 ssh_required = deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000458 if ssh_required and vnfd.get("mgmt-interface"):
459 if vnfd["mgmt-interface"].get("vdu-id"):
460 vdu_needed_access.append(vnfd["mgmt-interface"]["vdu-id"])
461 elif vnfd["mgmt-interface"].get("cp"):
462 mgmt_cp = vnfd["mgmt-interface"]["cp"]
tierno27246d82018-09-27 15:59:09 +0200463
tiernoe64f7fb2019-09-11 08:55:52 +0000464 for vdu in vnfd.get("vdu", ()):
465 if vdu.get("vdu-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000466 ssh_required = deep_get(vdu, ("vdu-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000467 if ssh_required:
tierno27246d82018-09-27 15:59:09 +0200468 vdu_needed_access.append(vdu["id"])
tiernoe64f7fb2019-09-11 08:55:52 +0000469 elif mgmt_cp:
470 for vdu_interface in vdu.get("interface"):
471 if vdu_interface.get("external-connection-point-ref") and \
472 vdu_interface["external-connection-point-ref"] == mgmt_cp:
473 vdu_needed_access.append(vdu["id"])
474 mgmt_cp = None
475 break
tierno27246d82018-09-27 15:59:09 +0200476
tiernoe64f7fb2019-09-11 08:55:52 +0000477 if vdu_needed_access:
478 for vnf_member in nsd.get("constituent-vnfd"):
479 if vnf_member["vnfd-id-ref"] != vnfd_ref:
480 continue
481 for vdu in vdu_needed_access:
482 populate_dict(RO_ns_params,
483 ("vnfs", vnf_member["member-vnf-index"], "vdus", vdu, "mgmt_keys"),
484 n2vc_key_list)
tierno72ef84f2020-10-06 08:22:07 +0000485 # cloud init
486 for vdu in get_iterable(vnfd, "vdu"):
487 cloud_init_text = self._get_cloud_init(vdu, vnfd)
488 if not cloud_init_text:
489 continue
490 for vnf_member in nsd.get("constituent-vnfd"):
491 if vnf_member["vnfd-id-ref"] != vnfd_ref:
492 continue
493 db_vnfr = db_vnfrs[vnf_member["member-vnf-index"]]
494 additional_params = self._get_vdu_additional_params(db_vnfr, vdu["id"]) or {}
495
496 cloud_init_list = []
497 for vdu_index in range(0, int(vdu.get("count", 1))):
498 additional_params["OSM"] = self._get_osm_params(db_vnfr, vdu["id"], vdu_index)
499 cloud_init_list.append(self._parse_cloud_init(cloud_init_text, additional_params, vnfd["id"],
500 vdu["id"]))
501 populate_dict(RO_ns_params,
502 ("vnfs", vnf_member["member-vnf-index"], "vdus", vdu["id"], "cloud_init"),
503 cloud_init_list)
tierno27246d82018-09-27 15:59:09 +0200504
tierno25ec7732018-10-24 18:47:11 +0200505 if ns_params.get("vduImage"):
506 RO_ns_params["vduImage"] = ns_params["vduImage"]
507
tiernoc255a822018-10-31 09:41:53 +0100508 if ns_params.get("ssh_keys"):
509 RO_ns_params["cloud-config"] = {"key-pairs": ns_params["ssh_keys"]}
tierno27246d82018-09-27 15:59:09 +0200510 for vnf_params in get_iterable(ns_params, "vnf"):
511 for constituent_vnfd in nsd["constituent-vnfd"]:
512 if constituent_vnfd["member-vnf-index"] == vnf_params["member-vnf-index"]:
513 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
514 break
515 else:
516 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index={} is not present at nsd:"
517 "constituent-vnfd".format(vnf_params["member-vnf-index"]))
tierno59d22d22018-09-25 18:10:19 +0200518
tierno27246d82018-09-27 15:59:09 +0200519 for vdu_params in get_iterable(vnf_params, "vdu"):
520 # TODO feature 1417: check that this VDU exist and it is not a PDU
521 if vdu_params.get("volume"):
522 for volume_params in vdu_params["volume"]:
523 if volume_params.get("vim-volume-id"):
524 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
525 vdu_params["id"], "devices", volume_params["name"], "vim_id"),
526 volume_params["vim-volume-id"])
527 if vdu_params.get("interface"):
528 for interface_params in vdu_params["interface"]:
529 if interface_params.get("ip-address"):
530 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
531 vdu_params["id"], "interfaces", interface_params["name"],
532 "ip_address"),
533 interface_params["ip-address"])
534 if interface_params.get("mac-address"):
535 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
536 vdu_params["id"], "interfaces", interface_params["name"],
537 "mac_address"),
538 interface_params["mac-address"])
539 if interface_params.get("floating-ip-required"):
540 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
541 vdu_params["id"], "interfaces", interface_params["name"],
542 "floating-ip"),
543 interface_params["floating-ip-required"])
544
545 for internal_vld_params in get_iterable(vnf_params, "internal-vld"):
546 if internal_vld_params.get("vim-network-name"):
547 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
548 internal_vld_params["name"], "vim-network-name"),
549 internal_vld_params["vim-network-name"])
gcalvino0d7ac8d2018-12-17 16:24:08 +0100550 if internal_vld_params.get("vim-network-id"):
551 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
552 internal_vld_params["name"], "vim-network-id"),
553 internal_vld_params["vim-network-id"])
tierno27246d82018-09-27 15:59:09 +0200554 if internal_vld_params.get("ip-profile"):
555 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
556 internal_vld_params["name"], "ip-profile"),
tierno2357f4e2020-10-19 16:38:59 +0000557 self.ip_profile_2_RO(internal_vld_params["ip-profile"]))
kbsub4d761eb2019-10-17 16:28:48 +0000558 if internal_vld_params.get("provider-network"):
559
560 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
561 internal_vld_params["name"], "provider-network"),
562 internal_vld_params["provider-network"].copy())
tierno27246d82018-09-27 15:59:09 +0200563
564 for icp_params in get_iterable(internal_vld_params, "internal-connection-point"):
565 # look for interface
566 iface_found = False
567 for vdu_descriptor in vnf_descriptor["vdu"]:
568 for vdu_interface in vdu_descriptor["interface"]:
569 if vdu_interface.get("internal-connection-point-ref") == icp_params["id-ref"]:
570 if icp_params.get("ip-address"):
571 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
572 vdu_descriptor["id"], "interfaces",
573 vdu_interface["name"], "ip_address"),
574 icp_params["ip-address"])
575
576 if icp_params.get("mac-address"):
577 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
578 vdu_descriptor["id"], "interfaces",
579 vdu_interface["name"], "mac_address"),
580 icp_params["mac-address"])
581 iface_found = True
tierno59d22d22018-09-25 18:10:19 +0200582 break
tierno27246d82018-09-27 15:59:09 +0200583 if iface_found:
584 break
585 else:
586 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index[{}]:"
587 "internal-vld:id-ref={} is not present at vnfd:internal-"
588 "connection-point".format(vnf_params["member-vnf-index"],
589 icp_params["id-ref"]))
590
591 for vld_params in get_iterable(ns_params, "vld"):
592 if "ip-profile" in vld_params:
593 populate_dict(RO_ns_params, ("networks", vld_params["name"], "ip-profile"),
tierno2357f4e2020-10-19 16:38:59 +0000594 self.ip_profile_2_RO(vld_params["ip-profile"]))
tiernob7f3f0d2019-03-20 17:17:21 +0000595
kbsub4d761eb2019-10-17 16:28:48 +0000596 if vld_params.get("provider-network"):
597
598 populate_dict(RO_ns_params, ("networks", vld_params["name"], "provider-network"),
599 vld_params["provider-network"].copy())
600
tiernob7f3f0d2019-03-20 17:17:21 +0000601 if "wimAccountId" in vld_params and vld_params["wimAccountId"] is not None:
602 populate_dict(RO_ns_params, ("networks", vld_params["name"], "wim_account"),
603 wim_account_2_RO(vld_params["wimAccountId"])),
tierno27246d82018-09-27 15:59:09 +0200604 if vld_params.get("vim-network-name"):
605 RO_vld_sites = []
606 if isinstance(vld_params["vim-network-name"], dict):
607 for vim_account, vim_net in vld_params["vim-network-name"].items():
608 RO_vld_sites.append({
609 "netmap-use": vim_net,
610 "datacenter": vim_account_2_RO(vim_account)
611 })
612 else: # isinstance str
613 RO_vld_sites.append({"netmap-use": vld_params["vim-network-name"]})
614 if RO_vld_sites:
615 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
kbsub4d761eb2019-10-17 16:28:48 +0000616
gcalvino0d7ac8d2018-12-17 16:24:08 +0100617 if vld_params.get("vim-network-id"):
618 RO_vld_sites = []
619 if isinstance(vld_params["vim-network-id"], dict):
620 for vim_account, vim_net in vld_params["vim-network-id"].items():
621 RO_vld_sites.append({
622 "netmap-use": vim_net,
623 "datacenter": vim_account_2_RO(vim_account)
624 })
625 else: # isinstance str
626 RO_vld_sites.append({"netmap-use": vld_params["vim-network-id"]})
627 if RO_vld_sites:
628 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
Felipe Vicens720b07a2019-01-31 02:32:09 +0100629 if vld_params.get("ns-net"):
630 if isinstance(vld_params["ns-net"], dict):
631 for vld_id, instance_scenario_id in vld_params["ns-net"].items():
632 RO_vld_ns_net = {"instance_scenario_id": instance_scenario_id, "osm_id": vld_id}
Felipe Vicensb0e5fe42019-12-05 10:30:38 +0100633 populate_dict(RO_ns_params, ("networks", vld_params["name"], "use-network"), RO_vld_ns_net)
tierno27246d82018-09-27 15:59:09 +0200634 if "vnfd-connection-point-ref" in vld_params:
635 for cp_params in vld_params["vnfd-connection-point-ref"]:
636 # look for interface
637 for constituent_vnfd in nsd["constituent-vnfd"]:
638 if constituent_vnfd["member-vnf-index"] == cp_params["member-vnf-index-ref"]:
639 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
640 break
641 else:
642 raise LcmException(
643 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={} "
644 "is not present at nsd:constituent-vnfd".format(cp_params["member-vnf-index-ref"]))
645 match_cp = False
646 for vdu_descriptor in vnf_descriptor["vdu"]:
647 for interface_descriptor in vdu_descriptor["interface"]:
648 if interface_descriptor.get("external-connection-point-ref") == \
649 cp_params["vnfd-connection-point-ref"]:
650 match_cp = True
tierno59d22d22018-09-25 18:10:19 +0200651 break
tierno27246d82018-09-27 15:59:09 +0200652 if match_cp:
653 break
654 else:
655 raise LcmException(
656 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={}:"
657 "vnfd-connection-point-ref={} is not present at vnfd={}".format(
658 cp_params["member-vnf-index-ref"],
659 cp_params["vnfd-connection-point-ref"],
660 vnf_descriptor["id"]))
661 if cp_params.get("ip-address"):
662 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
663 vdu_descriptor["id"], "interfaces",
664 interface_descriptor["name"], "ip_address"),
665 cp_params["ip-address"])
666 if cp_params.get("mac-address"):
667 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
668 vdu_descriptor["id"], "interfaces",
669 interface_descriptor["name"], "mac_address"),
670 cp_params["mac-address"])
tierno59d22d22018-09-25 18:10:19 +0200671 return RO_ns_params
672
tierno2357f4e2020-10-19 16:38:59 +0000673 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
tierno27246d82018-09-27 15:59:09 +0200674
tierno2357f4e2020-10-19 16:38:59 +0000675 db_vdu_push_list = []
676 db_update = {"_admin.modified": time()}
677 if vdu_create:
678 for vdu_id, vdu_count in vdu_create.items():
679 vdur = next((vdur for vdur in reversed(db_vnfr["vdur"]) if vdur["vdu-id-ref"] == vdu_id), None)
680 if not vdur:
681 raise LcmException("Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".
682 format(vdu_id))
683
684 for count in range(vdu_count):
685 vdur_copy = deepcopy(vdur)
686 vdur_copy["status"] = "BUILD"
687 vdur_copy["status-detailed"] = None
688 vdur_copy["ip-address"]: None
tierno683eb392020-09-25 12:33:15 +0000689 vdur_copy["_id"] = str(uuid4())
tierno2357f4e2020-10-19 16:38:59 +0000690 vdur_copy["count-index"] += count + 1
691 vdur_copy["id"] = "{}-{}".format(vdur_copy["vdu-id-ref"], vdur_copy["count-index"])
692 vdur_copy.pop("vim_info", None)
693 for iface in vdur_copy["interfaces"]:
694 if iface.get("fixed-ip"):
695 iface["ip-address"] = self.increment_ip_mac(iface["ip-address"], count+1)
696 else:
697 iface.pop("ip-address", None)
698 if iface.get("fixed-mac"):
699 iface["mac-address"] = self.increment_ip_mac(iface["mac-address"], count+1)
700 else:
701 iface.pop("mac-address", None)
702 iface.pop("mgmt_vnf", None) # only first vdu can be managment of vnf
703 db_vdu_push_list.append(vdur_copy)
704 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
tierno27246d82018-09-27 15:59:09 +0200705 if vdu_delete:
tierno2357f4e2020-10-19 16:38:59 +0000706 for vdu_id, vdu_count in vdu_delete.items():
707 if mark_delete:
708 indexes_to_delete = [iv[0] for iv in enumerate(db_vnfr["vdur"]) if iv[1]["vdu-id-ref"] == vdu_id]
709 db_update.update({"vdur.{}.status".format(i): "DELETING" for i in indexes_to_delete[-vdu_count:]})
710 else:
711 # it must be deleted one by one because common.db does not allow otherwise
712 vdus_to_delete = [v for v in reversed(db_vnfr["vdur"]) if v["vdu-id-ref"] == vdu_id]
713 for vdu in vdus_to_delete[:vdu_count]:
714 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, None, pull={"vdur": {"_id": vdu["_id"]}})
715 db_push = {"vdur": db_vdu_push_list} if db_vdu_push_list else None
716 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
717 # modify passed dictionary db_vnfr
718 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
719 db_vnfr["vdur"] = db_vnfr_["vdur"]
tierno27246d82018-09-27 15:59:09 +0200720
tiernof578e552018-11-08 19:07:20 +0100721 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
722 """
723 Updates database nsr with the RO info for the created vld
724 :param ns_update_nsr: dictionary to be filled with the updated info
725 :param db_nsr: content of db_nsr. This is also modified
726 :param nsr_desc_RO: nsr descriptor from RO
727 :return: Nothing, LcmException is raised on errors
728 """
729
730 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
731 for net_RO in get_iterable(nsr_desc_RO, "nets"):
732 if vld["id"] != net_RO.get("ns_net_osm_id"):
733 continue
734 vld["vim-id"] = net_RO.get("vim_net_id")
735 vld["name"] = net_RO.get("vim_name")
736 vld["status"] = net_RO.get("status")
737 vld["status-detailed"] = net_RO.get("error_msg")
738 ns_update_nsr["vld.{}".format(vld_index)] = vld
739 break
740 else:
741 raise LcmException("ns_update_nsr: Not found vld={} at RO info".format(vld["id"]))
742
tiernoe876f672020-02-13 14:34:48 +0000743 def set_vnfr_at_error(self, db_vnfrs, error_text):
744 try:
745 for db_vnfr in db_vnfrs.values():
746 vnfr_update = {"status": "ERROR"}
747 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
748 if "status" not in vdur:
749 vdur["status"] = "ERROR"
750 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
751 if error_text:
752 vdur["status-detailed"] = str(error_text)
753 vnfr_update["vdur.{}.status-detailed".format(vdu_index)] = "ERROR"
754 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
755 except DbException as e:
756 self.logger.error("Cannot update vnf. {}".format(e))
757
tierno59d22d22018-09-25 18:10:19 +0200758 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
759 """
760 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
tierno27246d82018-09-27 15:59:09 +0200761 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
762 :param nsr_desc_RO: nsr descriptor from RO
763 :return: Nothing, LcmException is raised on errors
tierno59d22d22018-09-25 18:10:19 +0200764 """
765 for vnf_index, db_vnfr in db_vnfrs.items():
766 for vnf_RO in nsr_desc_RO["vnfs"]:
tierno27246d82018-09-27 15:59:09 +0200767 if vnf_RO["member_vnf_index"] != vnf_index:
768 continue
769 vnfr_update = {}
tiernof578e552018-11-08 19:07:20 +0100770 if vnf_RO.get("ip_address"):
tierno1674de82019-04-09 13:03:14 +0000771 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"].split(";")[0]
tiernof578e552018-11-08 19:07:20 +0100772 elif not db_vnfr.get("ip-address"):
tierno0ec0c272020-02-19 17:43:01 +0000773 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
774 raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200775
tierno27246d82018-09-27 15:59:09 +0200776 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
777 vdur_RO_count_index = 0
778 if vdur.get("pdu-type"):
779 continue
780 for vdur_RO in get_iterable(vnf_RO, "vms"):
781 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
782 continue
783 if vdur["count-index"] != vdur_RO_count_index:
784 vdur_RO_count_index += 1
785 continue
786 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
tierno1674de82019-04-09 13:03:14 +0000787 if vdur_RO.get("ip_address"):
788 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
tierno274ed572019-04-04 13:33:27 +0000789 else:
790 vdur["ip-address"] = None
tierno27246d82018-09-27 15:59:09 +0200791 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
792 vdur["name"] = vdur_RO.get("vim_name")
793 vdur["status"] = vdur_RO.get("status")
794 vdur["status-detailed"] = vdur_RO.get("error_msg")
795 for ifacer in get_iterable(vdur, "interfaces"):
796 for interface_RO in get_iterable(vdur_RO, "interfaces"):
797 if ifacer["name"] == interface_RO.get("internal_name"):
798 ifacer["ip-address"] = interface_RO.get("ip_address")
799 ifacer["mac-address"] = interface_RO.get("mac_address")
800 break
801 else:
802 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
quilesj7e13aeb2019-10-08 13:34:55 +0200803 "from VIM info"
804 .format(vnf_index, vdur["vdu-id-ref"], ifacer["name"]))
tierno27246d82018-09-27 15:59:09 +0200805 vnfr_update["vdur.{}".format(vdu_index)] = vdur
806 break
807 else:
tierno15b1cf12019-08-29 13:21:40 +0000808 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
809 "VIM info".format(vnf_index, vdur["vdu-id-ref"], vdur["count-index"]))
tiernof578e552018-11-08 19:07:20 +0100810
811 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
812 for net_RO in get_iterable(nsr_desc_RO, "nets"):
813 if vld["id"] != net_RO.get("vnf_net_osm_id"):
814 continue
815 vld["vim-id"] = net_RO.get("vim_net_id")
816 vld["name"] = net_RO.get("vim_name")
817 vld["status"] = net_RO.get("status")
818 vld["status-detailed"] = net_RO.get("error_msg")
819 vnfr_update["vld.{}".format(vld_index)] = vld
820 break
821 else:
tierno15b1cf12019-08-29 13:21:40 +0000822 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
tiernof578e552018-11-08 19:07:20 +0100823 vnf_index, vld["id"]))
824
tierno27246d82018-09-27 15:59:09 +0200825 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
826 break
tierno59d22d22018-09-25 18:10:19 +0200827
828 else:
tierno15b1cf12019-08-29 13:21:40 +0000829 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200830
tierno5ee02052019-12-05 19:55:02 +0000831 def _get_ns_config_info(self, nsr_id):
tiernoc3f2a822019-11-05 13:45:04 +0000832 """
833 Generates a mapping between vnf,vdu elements and the N2VC id
tierno5ee02052019-12-05 19:55:02 +0000834 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
tiernoc3f2a822019-11-05 13:45:04 +0000835 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
836 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
837 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
838 """
tierno5ee02052019-12-05 19:55:02 +0000839 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
840 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernoc3f2a822019-11-05 13:45:04 +0000841 mapping = {}
842 ns_config_info = {"osm-config-mapping": mapping}
843 for vca in vca_deployed_list:
844 if not vca["member-vnf-index"]:
845 continue
846 if not vca["vdu_id"]:
847 mapping[vca["member-vnf-index"]] = vca["application"]
848 else:
849 mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\
850 vca["application"]
851 return ns_config_info
852
853 @staticmethod
tiernoa278b842020-07-08 15:33:55 +0000854 def _get_initial_config_primitive_list(desc_primitive_list, vca_deployed, ee_descriptor_id):
tiernoc3f2a822019-11-05 13:45:04 +0000855 """
856 Generates a list of initial-config-primitive based on the list provided by the descriptor. It includes internal
857 primitives as verify-ssh-credentials, or config when needed
858 :param desc_primitive_list: information of the descriptor
859 :param vca_deployed: information of the deployed, needed for known if it is related to an NS, VNF, VDU and if
860 this element contains a ssh public key
tiernoa278b842020-07-08 15:33:55 +0000861 :param ee_descriptor_id: execution environment descriptor id. It is the value of
862 XXX_configuration.execution-environment-list.INDEX.id; it can be None
tiernoc3f2a822019-11-05 13:45:04 +0000863 :return: The modified list. Can ba an empty list, but always a list
864 """
tiernoa278b842020-07-08 15:33:55 +0000865
866 primitive_list = desc_primitive_list or []
867
868 # filter primitives by ee_id
869 primitive_list = [p for p in primitive_list if p.get("execution-environment-ref") == ee_descriptor_id]
870
871 # sort by 'seq'
872 if primitive_list:
873 primitive_list.sort(key=lambda val: int(val['seq']))
874
tiernoc3f2a822019-11-05 13:45:04 +0000875 # look for primitive config, and get the position. None if not present
876 config_position = None
877 for index, primitive in enumerate(primitive_list):
878 if primitive["name"] == "config":
879 config_position = index
880 break
881
882 # for NS, add always a config primitive if not present (bug 874)
883 if not vca_deployed["member-vnf-index"] and config_position is None:
884 primitive_list.insert(0, {"name": "config", "parameter": []})
885 config_position = 0
tiernoa278b842020-07-08 15:33:55 +0000886 # TODO revise if needed: for VNF/VDU add verify-ssh-credentials after config
tiernoc3f2a822019-11-05 13:45:04 +0000887 if vca_deployed["member-vnf-index"] and config_position is not None and vca_deployed.get("ssh-public-key"):
888 primitive_list.insert(config_position + 1, {"name": "verify-ssh-credentials", "parameter": []})
889 return primitive_list
890
tierno69f0d382020-05-07 13:08:09 +0000891 async def _instantiate_ng_ro(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
892 n2vc_key_list, stage, start_deploy, timeout_ns_deploy):
tierno2357f4e2020-10-19 16:38:59 +0000893
894 db_vims = {}
895
896 def get_vim_account(vim_account_id):
897 nonlocal db_vims
898 if vim_account_id in db_vims:
899 return db_vims[vim_account_id]
900 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
901 db_vims[vim_account_id] = db_vim
902 return db_vim
903
904 # modify target_vld info with instantiation parameters
905 def parse_vld_instantiation_params(target_vim, target_vld, vld_params, target_sdn):
906 if vld_params.get("ip-profile"):
907 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params["ip-profile"]
908 if vld_params.get("provider-network"):
909 target_vld["vim_info"][target_vim]["provider_network"] = vld_params["provider-network"]
910 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
911 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params["provider-network"]["sdn-ports"]
912 if vld_params.get("wimAccountId"):
913 target_wim = "wim:{}".format(vld_params["wimAccountId"])
914 target_vld["vim_info"][target_wim] = {}
915 for param in ("vim-network-name", "vim-network-id"):
916 if vld_params.get(param):
917 if isinstance(vld_params[param], dict):
918 pass
919 # for vim_account, vim_net in vld_params[param].items():
920 # TODO populate vim_info RO_vld_sites.append({
921 else: # isinstance str
922 target_vld["vim_info"][target_vim][param.replace("-", "_")] = vld_params[param]
923 # TODO if vld_params.get("ns-net"):
924
tierno69f0d382020-05-07 13:08:09 +0000925 nslcmop_id = db_nslcmop["_id"]
926 target = {
927 "name": db_nsr["name"],
928 "ns": {"vld": []},
929 "vnf": [],
930 "image": deepcopy(db_nsr["image"]),
931 "flavor": deepcopy(db_nsr["flavor"]),
932 "action_id": nslcmop_id,
tierno2357f4e2020-10-19 16:38:59 +0000933 "cloud_init_content": {},
tierno69f0d382020-05-07 13:08:09 +0000934 }
935 for image in target["image"]:
tierno2357f4e2020-10-19 16:38:59 +0000936 image["vim_info"] = {}
tierno69f0d382020-05-07 13:08:09 +0000937 for flavor in target["flavor"]:
tierno2357f4e2020-10-19 16:38:59 +0000938 flavor["vim_info"] = {}
tierno69f0d382020-05-07 13:08:09 +0000939
tierno2357f4e2020-10-19 16:38:59 +0000940 if db_nslcmop.get("lcmOperationType") != "instantiate":
941 # get parameters of instantiation:
942 db_nslcmop_instantiate = self.db.get_list("nslcmops", {"nsInstanceId": db_nslcmop["nsInstanceId"],
943 "lcmOperationType": "instantiate"})[-1]
944 ns_params = db_nslcmop_instantiate.get("operationParams")
945 else:
946 ns_params = db_nslcmop.get("operationParams")
tierno69f0d382020-05-07 13:08:09 +0000947 ssh_keys = []
948 if ns_params.get("ssh_keys"):
949 ssh_keys += ns_params.get("ssh_keys")
950 if n2vc_key_list:
951 ssh_keys += n2vc_key_list
952
953 cp2target = {}
tierno2357f4e2020-10-19 16:38:59 +0000954 for vld_index, vld in enumerate(db_nsr.get("vld")):
955 target_vim = "vim:{}".format(ns_params["vimAccountId"])
956 target_vld = {
957 "id": vld["id"],
958 "name": vld["name"],
959 "mgmt-network": vld.get("mgmt-network", False),
960 "type": vld.get("type"),
961 "vim_info": {
962 target_vim: {"vim-network-name": vld.get("vim-network-name")}
963 }
964 }
965 # check if this network needs SDN assist
966 target_sdn = None
967 if vld.get("pci-interfaces"):
968 db_vim = get_vim_account(ns_params["vimAccountId"])
969 sdnc_id = db_vim["config"].get("sdn-controller")
970 if sdnc_id:
971 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
972 target_sdn = "sdn:{}".format(sdnc_id)
973 target_vld["vim_info"][target_sdn] = {
974 "sdn": True, "target_vim": target_vim, "vlds": [sdn_vld], "type": vld.get("type")}
975
976 nsd_vld = next(v for v in nsd["vld"] if v["id"] == vld["id"])
977 for cp in nsd_vld["vnfd-connection-point-ref"]:
tierno69f0d382020-05-07 13:08:09 +0000978 cp2target["member_vnf:{}.{}".format(cp["member-vnf-index-ref"], cp["vnfd-connection-point-ref"])] = \
979 "nsrs:{}:vld.{}".format(nsr_id, vld_index)
tierno2357f4e2020-10-19 16:38:59 +0000980
981 # check at nsd descriptor, if there is an ip-profile
982 vld_params = {}
983 if nsd_vld.get("ip-profile-ref"):
984 ip_profile = next(ipp for ipp in nsd["ip-profiles"] if ipp["name"] == nsd_vld["ip-profile-ref"])
985 vld_params["ip-profile"] = ip_profile["ip-profile-params"]
986 # update vld_params with instantiation params
987 vld_instantiation_params = next((v for v in get_iterable(ns_params, "vld")
988 if v["name"] in (vld["name"], vld["id"])), None)
989 if vld_instantiation_params:
990 vld_params.update(vld_instantiation_params)
991 parse_vld_instantiation_params(target_vim, target_vld, vld_params, target_sdn)
tierno69f0d382020-05-07 13:08:09 +0000992 target["ns"]["vld"].append(target_vld)
993 for vnfr in db_vnfrs.values():
994 vnfd = db_vnfds_ref[vnfr["vnfd-ref"]]
tierno2357f4e2020-10-19 16:38:59 +0000995 vnf_params = next((v for v in get_iterable(ns_params, "vnf")
996 if v["member-vnf-index"] == vnfr["member-vnf-index-ref"]), None)
tierno69f0d382020-05-07 13:08:09 +0000997 target_vnf = deepcopy(vnfr)
tierno2357f4e2020-10-19 16:38:59 +0000998 target_vim = "vim:{}".format(vnfr["vim-account-id"])
tierno69f0d382020-05-07 13:08:09 +0000999 for vld in target_vnf.get("vld", ()):
tierno2357f4e2020-10-19 16:38:59 +00001000 # check if connected to a ns.vld, to fill target'
tierno69f0d382020-05-07 13:08:09 +00001001 vnf_cp = next((cp for cp in vnfd.get("connection-point", ()) if
1002 cp.get("internal-vld-ref") == vld["id"]), None)
1003 if vnf_cp:
1004 ns_cp = "member_vnf:{}.{}".format(vnfr["member-vnf-index-ref"], vnf_cp["id"])
1005 if cp2target.get(ns_cp):
1006 vld["target"] = cp2target[ns_cp]
tierno2357f4e2020-10-19 16:38:59 +00001007 vld["vim_info"] = {target_vim: {"vim-network-name": vld.get("vim-network-name")}}
1008 # check if this network needs SDN assist
1009 target_sdn = None
1010 if vld.get("pci-interfaces"):
1011 db_vim = get_vim_account(vnfr["vim-account-id"])
1012 sdnc_id = db_vim["config"].get("sdn-controller")
1013 if sdnc_id:
1014 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1015 target_sdn = "sdn:{}".format(sdnc_id)
1016 vld["vim_info"][target_sdn] = {
1017 "sdn": True, "target_vim": target_vim, "vlds": [sdn_vld], "type": vld.get("type")}
tierno69f0d382020-05-07 13:08:09 +00001018
tierno2357f4e2020-10-19 16:38:59 +00001019 # check at vnfd descriptor, if there is an ip-profile
1020 vld_params = {}
1021 vnfd_vld = next(v for v in vnfd["internal-vld"] if v["id"] == vld["id"])
1022 if vnfd_vld.get("ip-profile-ref"):
1023 ip_profile = next(ipp for ipp in vnfd["ip-profiles"] if ipp["name"] == vnfd_vld["ip-profile-ref"])
1024 vld_params["ip-profile"] = ip_profile["ip-profile-params"]
1025 # update vld_params with instantiation params
1026 if vnf_params:
1027 vld_instantiation_params = next((v for v in get_iterable(vnf_params, "internal-vld")
1028 if v["name"] == vld["id"]), None)
1029 if vld_instantiation_params:
1030 vld_params.update(vld_instantiation_params)
1031 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1032
1033 vdur_list = []
tierno69f0d382020-05-07 13:08:09 +00001034 for vdur in target_vnf.get("vdur", ()):
tierno2357f4e2020-10-19 16:38:59 +00001035 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1036 continue # This vdu must not be created
1037 vdur["vim_info"] = {target_vim: {}}
tierno69f0d382020-05-07 13:08:09 +00001038 vdud_index, vdud = next(k for k in enumerate(vnfd["vdu"]) if k[1]["id"] == vdur["vdu-id-ref"])
tierno69f0d382020-05-07 13:08:09 +00001039
1040 if ssh_keys:
1041 if deep_get(vdud, ("vdu-configuration", "config-access", "ssh-access", "required")):
1042 vdur["ssh-keys"] = ssh_keys
1043 vdur["ssh-access-required"] = True
1044 elif deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required")) and \
1045 any(iface.get("mgmt-vnf") for iface in vdur["interfaces"]):
1046 vdur["ssh-keys"] = ssh_keys
1047 vdur["ssh-access-required"] = True
1048
1049 # cloud-init
1050 if vdud.get("cloud-init-file"):
1051 vdur["cloud-init"] = "{}:file:{}".format(vnfd["_id"], vdud.get("cloud-init-file"))
tierno2357f4e2020-10-19 16:38:59 +00001052 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1053 if vdur["cloud-init"] not in target["cloud_init_content"]:
1054 base_folder = vnfd["_admin"]["storage"]
1055 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
1056 vdud.get("cloud-init-file"))
1057 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1058 target["cloud_init_content"][vdur["cloud-init"]] = ci_file.read()
tierno69f0d382020-05-07 13:08:09 +00001059 elif vdud.get("cloud-init"):
1060 vdur["cloud-init"] = "{}:vdu:{}".format(vnfd["_id"], vdud_index)
tierno2357f4e2020-10-19 16:38:59 +00001061 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1062 target["cloud_init_content"][vdur["cloud-init"]] = vdud["cloud-init"]
1063 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1064 deploy_params_vdu = self._format_additional_params(vdur.get("additionalParams") or {})
1065 deploy_params_vdu["OSM"] = self._get_osm_params(vnfr, vdur["vdu-id-ref"], vdur["count-index"])
1066 vdur["additionalParams"] = deploy_params_vdu
tierno69f0d382020-05-07 13:08:09 +00001067
1068 # flavor
1069 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
tierno2357f4e2020-10-19 16:38:59 +00001070 if target_vim not in ns_flavor["vim_info"]:
1071 ns_flavor["vim_info"][target_vim] = {}
tierno69f0d382020-05-07 13:08:09 +00001072 # image
1073 ns_image = target["image"][int(vdur["ns-image-id"])]
tierno2357f4e2020-10-19 16:38:59 +00001074 if target_vim not in ns_image["vim_info"]:
1075 ns_image["vim_info"][target_vim] = {}
tierno69f0d382020-05-07 13:08:09 +00001076
tierno2357f4e2020-10-19 16:38:59 +00001077 vdur["vim_info"] = {target_vim: {}}
1078 # instantiation parameters
1079 # if vnf_params:
1080 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
1081 # vdud["id"]), None)
1082 vdur_list.append(vdur)
1083 target_vnf["vdur"] = vdur_list
tierno69f0d382020-05-07 13:08:09 +00001084 target["vnf"].append(target_vnf)
1085
1086 desc = await self.RO.deploy(nsr_id, target)
1087 action_id = desc["action_id"]
tierno2357f4e2020-10-19 16:38:59 +00001088 await self._wait_ng_ro(nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage)
tierno69f0d382020-05-07 13:08:09 +00001089
1090 # Updating NSR
1091 db_nsr_update = {
1092 "_admin.deployed.RO.operational-status": "running",
1093 "detailed-status": " ".join(stage)
1094 }
1095 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1096 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1097 self._write_op_status(nslcmop_id, stage)
1098 self.logger.debug(logging_text + "ns deployed at RO. RO_id={}".format(action_id))
1099 return
1100
tierno2357f4e2020-10-19 16:38:59 +00001101 async def _wait_ng_ro(self, nsr_id, action_id, nslcmop_id=None, start_time=None, timeout=600, stage=None):
tierno69f0d382020-05-07 13:08:09 +00001102 detailed_status_old = None
1103 db_nsr_update = {}
tierno2357f4e2020-10-19 16:38:59 +00001104 start_time = start_time or time()
tierno69f0d382020-05-07 13:08:09 +00001105 while time() <= start_time + timeout:
1106 desc_status = await self.RO.status(nsr_id, action_id)
1107 if desc_status["status"] == "FAILED":
1108 raise NgRoException(desc_status["details"])
1109 elif desc_status["status"] == "BUILD":
tierno2357f4e2020-10-19 16:38:59 +00001110 if stage:
1111 stage[2] = "VIM: ({})".format(desc_status["details"])
tierno69f0d382020-05-07 13:08:09 +00001112 elif desc_status["status"] == "DONE":
tierno2357f4e2020-10-19 16:38:59 +00001113 if stage:
1114 stage[2] = "Deployed at VIM"
tierno69f0d382020-05-07 13:08:09 +00001115 break
1116 else:
1117 assert False, "ROclient.check_ns_status returns unknown {}".format(desc_status["status"])
tierno2357f4e2020-10-19 16:38:59 +00001118 if stage and nslcmop_id and stage[2] != detailed_status_old:
tierno69f0d382020-05-07 13:08:09 +00001119 detailed_status_old = stage[2]
1120 db_nsr_update["detailed-status"] = " ".join(stage)
1121 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1122 self._write_op_status(nslcmop_id, stage)
1123 await asyncio.sleep(5, loop=self.loop)
1124 else: # timeout_ns_deploy
1125 raise NgRoException("Timeout waiting ns to deploy")
1126
1127 async def _terminate_ng_ro(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
1128 db_nsr_update = {}
1129 failed_detail = []
1130 action_id = None
1131 start_deploy = time()
1132 try:
1133 target = {
1134 "ns": {"vld": []},
1135 "vnf": [],
1136 "image": [],
1137 "flavor": [],
tierno2357f4e2020-10-19 16:38:59 +00001138 "action_id": nslcmop_id
tierno69f0d382020-05-07 13:08:09 +00001139 }
1140 desc = await self.RO.deploy(nsr_id, target)
1141 action_id = desc["action_id"]
1142 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1143 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1144 self.logger.debug(logging_text + "ns terminate action at RO. action_id={}".format(action_id))
1145
1146 # wait until done
1147 delete_timeout = 20 * 60 # 20 minutes
tierno2357f4e2020-10-19 16:38:59 +00001148 await self._wait_ng_ro(nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage)
tierno69f0d382020-05-07 13:08:09 +00001149
1150 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1151 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1152 # delete all nsr
1153 await self.RO.delete(nsr_id)
1154 except Exception as e:
1155 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1156 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1157 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1158 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1159 self.logger.debug(logging_text + "RO_action_id={} already deleted".format(action_id))
1160 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1161 failed_detail.append("delete conflict: {}".format(e))
1162 self.logger.debug(logging_text + "RO_action_id={} delete conflict: {}".format(action_id, e))
1163 else:
1164 failed_detail.append("delete error: {}".format(e))
1165 self.logger.error(logging_text + "RO_action_id={} delete error: {}".format(action_id, e))
1166
1167 if failed_detail:
1168 stage[2] = "Error deleting from VIM"
1169 else:
1170 stage[2] = "Deleted from VIM"
1171 db_nsr_update["detailed-status"] = " ".join(stage)
1172 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1173 self._write_op_status(nslcmop_id, stage)
1174
1175 if failed_detail:
1176 raise LcmException("; ".join(failed_detail))
1177 return
1178
tiernoe876f672020-02-13 14:34:48 +00001179 async def instantiate_RO(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
1180 n2vc_key_list, stage):
tiernoe95ed362020-04-23 08:24:57 +00001181 """
1182 Instantiate at RO
1183 :param logging_text: preffix text to use at logging
1184 :param nsr_id: nsr identity
1185 :param nsd: database content of ns descriptor
1186 :param db_nsr: database content of ns record
1187 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1188 :param db_vnfrs:
1189 :param db_vnfds_ref: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1190 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1191 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1192 :return: None or exception
1193 """
tiernoe876f672020-02-13 14:34:48 +00001194 try:
1195 db_nsr_update = {}
1196 RO_descriptor_number = 0 # number of descriptors created at RO
1197 vnf_index_2_RO_id = {} # map between vnfd/nsd id to the id used at RO
1198 nslcmop_id = db_nslcmop["_id"]
1199 start_deploy = time()
1200 ns_params = db_nslcmop.get("operationParams")
1201 if ns_params and ns_params.get("timeout_ns_deploy"):
1202 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1203 else:
1204 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +02001205
tiernoe876f672020-02-13 14:34:48 +00001206 # Check for and optionally request placement optimization. Database will be updated if placement activated
1207 stage[2] = "Waiting for Placement."
tierno8790a3d2020-04-23 22:49:52 +00001208 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1209 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1210 for vnfr in db_vnfrs.values():
1211 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1212 break
1213 else:
1214 ns_params["vimAccountId"] == vnfr["vim-account-id"]
quilesj7e13aeb2019-10-08 13:34:55 +02001215
tierno69f0d382020-05-07 13:08:09 +00001216 if self.ng_ro:
1217 return await self._instantiate_ng_ro(logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs,
1218 db_vnfds_ref, n2vc_key_list, stage, start_deploy,
1219 timeout_ns_deploy)
tiernoe876f672020-02-13 14:34:48 +00001220 # deploy RO
tiernoe876f672020-02-13 14:34:48 +00001221 # get vnfds, instantiate at RO
1222 for c_vnf in nsd.get("constituent-vnfd", ()):
1223 member_vnf_index = c_vnf["member-vnf-index"]
1224 vnfd = db_vnfds_ref[c_vnf['vnfd-id-ref']]
1225 vnfd_ref = vnfd["id"]
quilesj7e13aeb2019-10-08 13:34:55 +02001226
tiernoe876f672020-02-13 14:34:48 +00001227 stage[2] = "Creating vnfd='{}' member_vnf_index='{}' at RO".format(vnfd_ref, member_vnf_index)
1228 db_nsr_update["detailed-status"] = " ".join(stage)
1229 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1230 self._write_op_status(nslcmop_id, stage)
calvinosanch9f9c6f22019-11-04 13:37:39 +01001231
tiernoe876f672020-02-13 14:34:48 +00001232 # self.logger.debug(logging_text + stage[2])
1233 vnfd_id_RO = "{}.{}.{}".format(nsr_id, RO_descriptor_number, member_vnf_index[:23])
1234 vnf_index_2_RO_id[member_vnf_index] = vnfd_id_RO
1235 RO_descriptor_number += 1
1236
1237 # look position at deployed.RO.vnfd if not present it will be appended at the end
1238 for index, vnf_deployed in enumerate(db_nsr["_admin"]["deployed"]["RO"]["vnfd"]):
1239 if vnf_deployed["member-vnf-index"] == member_vnf_index:
1240 break
1241 else:
1242 index = len(db_nsr["_admin"]["deployed"]["RO"]["vnfd"])
1243 db_nsr["_admin"]["deployed"]["RO"]["vnfd"].append(None)
1244
1245 # look if present
1246 RO_update = {"member-vnf-index": member_vnf_index}
1247 vnfd_list = await self.RO.get_list("vnfd", filter_by={"osm_id": vnfd_id_RO})
1248 if vnfd_list:
1249 RO_update["id"] = vnfd_list[0]["uuid"]
1250 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' exists at RO. Using RO_id={}".
1251 format(vnfd_ref, member_vnf_index, vnfd_list[0]["uuid"]))
1252 else:
1253 vnfd_RO = self.vnfd2RO(vnfd, vnfd_id_RO, db_vnfrs[c_vnf["member-vnf-index"]].
1254 get("additionalParamsForVnf"), nsr_id)
1255 desc = await self.RO.create("vnfd", descriptor=vnfd_RO)
1256 RO_update["id"] = desc["uuid"]
1257 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' created at RO. RO_id={}".format(
1258 vnfd_ref, member_vnf_index, desc["uuid"]))
1259 db_nsr_update["_admin.deployed.RO.vnfd.{}".format(index)] = RO_update
1260 db_nsr["_admin"]["deployed"]["RO"]["vnfd"][index] = RO_update
1261
1262 # create nsd at RO
1263 nsd_ref = nsd["id"]
1264
1265 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1266 db_nsr_update["detailed-status"] = " ".join(stage)
1267 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1268 self._write_op_status(nslcmop_id, stage)
1269
1270 # self.logger.debug(logging_text + stage[2])
1271 RO_osm_nsd_id = "{}.{}.{}".format(nsr_id, RO_descriptor_number, nsd_ref[:23])
tiernod8323042019-08-09 11:32:23 +00001272 RO_descriptor_number += 1
tiernoe876f672020-02-13 14:34:48 +00001273 nsd_list = await self.RO.get_list("nsd", filter_by={"osm_id": RO_osm_nsd_id})
1274 if nsd_list:
1275 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = nsd_list[0]["uuid"]
1276 self.logger.debug(logging_text + "nsd={} exists at RO. Using RO_id={}".format(
1277 nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +00001278 else:
tiernoe876f672020-02-13 14:34:48 +00001279 nsd_RO = deepcopy(nsd)
1280 nsd_RO["id"] = RO_osm_nsd_id
1281 nsd_RO.pop("_id", None)
1282 nsd_RO.pop("_admin", None)
1283 for c_vnf in nsd_RO.get("constituent-vnfd", ()):
1284 member_vnf_index = c_vnf["member-vnf-index"]
1285 c_vnf["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
1286 for c_vld in nsd_RO.get("vld", ()):
1287 for cp in c_vld.get("vnfd-connection-point-ref", ()):
1288 member_vnf_index = cp["member-vnf-index-ref"]
1289 cp["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
tiernod8323042019-08-09 11:32:23 +00001290
tiernoe876f672020-02-13 14:34:48 +00001291 desc = await self.RO.create("nsd", descriptor=nsd_RO)
1292 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1293 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = desc["uuid"]
1294 self.logger.debug(logging_text + "nsd={} created at RO. RO_id={}".format(nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +00001295 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1296
tiernoe876f672020-02-13 14:34:48 +00001297 # Crate ns at RO
1298 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1299 db_nsr_update["detailed-status"] = " ".join(stage)
1300 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1301 self._write_op_status(nslcmop_id, stage)
tiernod8323042019-08-09 11:32:23 +00001302
tiernoe876f672020-02-13 14:34:48 +00001303 # if present use it unless in error status
1304 RO_nsr_id = deep_get(db_nsr, ("_admin", "deployed", "RO", "nsr_id"))
1305 if RO_nsr_id:
1306 try:
1307 stage[2] = "Looking for existing ns at RO"
1308 db_nsr_update["detailed-status"] = " ".join(stage)
1309 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1310 self._write_op_status(nslcmop_id, stage)
1311 # self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1312 desc = await self.RO.show("ns", RO_nsr_id)
tiernod8323042019-08-09 11:32:23 +00001313
tiernoe876f672020-02-13 14:34:48 +00001314 except ROclient.ROClientException as e:
1315 if e.http_code != HTTPStatus.NOT_FOUND:
1316 raise
1317 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1318 if RO_nsr_id:
1319 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1320 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1321 if ns_status == "ERROR":
1322 stage[2] = "Deleting ns at RO. RO_ns_id={}".format(RO_nsr_id)
1323 self.logger.debug(logging_text + stage[2])
1324 await self.RO.delete("ns", RO_nsr_id)
1325 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1326 if not RO_nsr_id:
1327 stage[2] = "Checking dependencies"
1328 db_nsr_update["detailed-status"] = " ".join(stage)
1329 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1330 self._write_op_status(nslcmop_id, stage)
1331 # self.logger.debug(logging_text + stage[2])
tiernod8323042019-08-09 11:32:23 +00001332
tiernoe876f672020-02-13 14:34:48 +00001333 # check if VIM is creating and wait look if previous tasks in process
1334 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account", ns_params["vimAccountId"])
1335 if task_dependency:
1336 stage[2] = "Waiting for related tasks '{}' to be completed".format(task_name)
1337 self.logger.debug(logging_text + stage[2])
1338 await asyncio.wait(task_dependency, timeout=3600)
1339 if ns_params.get("vnf"):
1340 for vnf in ns_params["vnf"]:
1341 if "vimAccountId" in vnf:
1342 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account",
1343 vnf["vimAccountId"])
1344 if task_dependency:
1345 stage[2] = "Waiting for related tasks '{}' to be completed.".format(task_name)
1346 self.logger.debug(logging_text + stage[2])
1347 await asyncio.wait(task_dependency, timeout=3600)
1348
1349 stage[2] = "Checking instantiation parameters."
tiernoe95ed362020-04-23 08:24:57 +00001350 RO_ns_params = self._ns_params_2_RO(ns_params, nsd, db_vnfds_ref, db_vnfrs, n2vc_key_list)
tiernoe876f672020-02-13 14:34:48 +00001351 stage[2] = "Deploying ns at VIM."
1352 db_nsr_update["detailed-status"] = " ".join(stage)
1353 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1354 self._write_op_status(nslcmop_id, stage)
1355
1356 desc = await self.RO.create("ns", descriptor=RO_ns_params, name=db_nsr["name"], scenario=RO_nsd_uuid)
1357 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = desc["uuid"]
1358 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1359 db_nsr_update["_admin.deployed.RO.nsr_status"] = "BUILD"
1360 self.logger.debug(logging_text + "ns created at RO. RO_id={}".format(desc["uuid"]))
1361
1362 # wait until NS is ready
1363 stage[2] = "Waiting VIM to deploy ns."
1364 db_nsr_update["detailed-status"] = " ".join(stage)
1365 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1366 self._write_op_status(nslcmop_id, stage)
1367 detailed_status_old = None
1368 self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1369
1370 old_desc = None
1371 while time() <= start_deploy + timeout_ns_deploy:
tiernod8323042019-08-09 11:32:23 +00001372 desc = await self.RO.show("ns", RO_nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001373
tiernoe876f672020-02-13 14:34:48 +00001374 # deploymentStatus
1375 if desc != old_desc:
1376 # desc has changed => update db
1377 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
1378 old_desc = desc
tiernod8323042019-08-09 11:32:23 +00001379
tiernoe876f672020-02-13 14:34:48 +00001380 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1381 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1382 if ns_status == "ERROR":
1383 raise ROclient.ROClientException(ns_status_info)
1384 elif ns_status == "BUILD":
1385 stage[2] = "VIM: ({})".format(ns_status_info)
1386 elif ns_status == "ACTIVE":
1387 stage[2] = "Waiting for management IP address reported by the VIM. Updating VNFRs."
1388 try:
1389 self.ns_update_vnfr(db_vnfrs, desc)
1390 break
1391 except LcmExceptionNoMgmtIP:
1392 pass
1393 else:
1394 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
1395 if stage[2] != detailed_status_old:
1396 detailed_status_old = stage[2]
1397 db_nsr_update["detailed-status"] = " ".join(stage)
1398 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1399 self._write_op_status(nslcmop_id, stage)
1400 await asyncio.sleep(5, loop=self.loop)
1401 else: # timeout_ns_deploy
1402 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tiernod8323042019-08-09 11:32:23 +00001403
tiernoe876f672020-02-13 14:34:48 +00001404 # Updating NSR
1405 self.ns_update_nsr(db_nsr_update, db_nsr, desc)
tiernod8323042019-08-09 11:32:23 +00001406
tiernoe876f672020-02-13 14:34:48 +00001407 db_nsr_update["_admin.deployed.RO.operational-status"] = "running"
1408 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1409 stage[2] = "Deployed at VIM"
1410 db_nsr_update["detailed-status"] = " ".join(stage)
1411 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1412 self._write_op_status(nslcmop_id, stage)
1413 # await self._on_update_n2vc_db("nsrs", {"_id": nsr_id}, "_admin.deployed", db_nsr_update)
1414 # self.logger.debug(logging_text + "Deployed at VIM")
tierno2357f4e2020-10-19 16:38:59 +00001415 except Exception as e:
tierno067e04a2020-03-31 12:53:13 +00001416 stage[2] = "ERROR deploying at VIM"
tiernoe876f672020-02-13 14:34:48 +00001417 self.set_vnfr_at_error(db_vnfrs, str(e))
tierno2357f4e2020-10-19 16:38:59 +00001418 self.logger.error("Error deploying at VIM {}".format(e),
1419 exc_info=not isinstance(e, (ROclient.ROClientException, LcmException, DbException,
1420 NgRoException)))
tiernoe876f672020-02-13 14:34:48 +00001421 raise
quilesj7e13aeb2019-10-08 13:34:55 +02001422
tierno7ecbc342020-09-21 14:05:39 +00001423 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1424 """
1425 Wait for kdu to be up, get ip address
1426 :param logging_text: prefix use for logging
1427 :param nsr_id:
1428 :param vnfr_id:
1429 :param kdu_name:
1430 :return: IP address
1431 """
1432
1433 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1434 nb_tries = 0
1435
1436 while nb_tries < 360:
1437 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
tiernoe5d05972020-10-09 12:03:24 +00001438 kdur = next((x for x in get_iterable(db_vnfr, "kdur") if x.get("kdu-name") == kdu_name), None)
tierno7ecbc342020-09-21 14:05:39 +00001439 if not kdur:
1440 raise LcmException("Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name))
1441 if kdur.get("status"):
1442 if kdur["status"] in ("READY", "ENABLED"):
1443 return kdur.get("ip-address")
1444 else:
1445 raise LcmException("target KDU={} is in error state".format(kdu_name))
1446
1447 await asyncio.sleep(10, loop=self.loop)
1448 nb_tries += 1
1449 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1450
tiernoa5088192019-11-26 16:12:53 +00001451 async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None):
1452 """
1453 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1454 :param logging_text: prefix use for logging
1455 :param nsr_id:
1456 :param vnfr_id:
1457 :param vdu_id:
1458 :param vdu_index:
1459 :param pub_key: public ssh key to inject, None to skip
1460 :param user: user to apply the public ssh key
1461 :return: IP address
1462 """
quilesj7e13aeb2019-10-08 13:34:55 +02001463
tierno2357f4e2020-10-19 16:38:59 +00001464 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
tiernod8323042019-08-09 11:32:23 +00001465 ro_nsr_id = None
1466 ip_address = None
1467 nb_tries = 0
1468 target_vdu_id = None
quilesj3149f262019-12-03 10:58:10 +00001469 ro_retries = 0
quilesj7e13aeb2019-10-08 13:34:55 +02001470
tiernod8323042019-08-09 11:32:23 +00001471 while True:
quilesj7e13aeb2019-10-08 13:34:55 +02001472
quilesj3149f262019-12-03 10:58:10 +00001473 ro_retries += 1
1474 if ro_retries >= 360: # 1 hour
1475 raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id))
1476
tiernod8323042019-08-09 11:32:23 +00001477 await asyncio.sleep(10, loop=self.loop)
quilesj7e13aeb2019-10-08 13:34:55 +02001478
1479 # get ip address
tiernod8323042019-08-09 11:32:23 +00001480 if not target_vdu_id:
1481 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
quilesj3149f262019-12-03 10:58:10 +00001482
1483 if not vdu_id: # for the VNF case
tiernoe876f672020-02-13 14:34:48 +00001484 if db_vnfr.get("status") == "ERROR":
1485 raise LcmException("Cannot inject ssh-key because target VNF is in error state")
tiernod8323042019-08-09 11:32:23 +00001486 ip_address = db_vnfr.get("ip-address")
1487 if not ip_address:
1488 continue
quilesj3149f262019-12-03 10:58:10 +00001489 vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None)
1490 else: # VDU case
1491 vdur = next((x for x in get_iterable(db_vnfr, "vdur")
1492 if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None)
1493
tierno0e8c3f02020-03-12 17:18:21 +00001494 if not vdur and len(db_vnfr.get("vdur", ())) == 1: # If only one, this should be the target vdu
1495 vdur = db_vnfr["vdur"][0]
quilesj3149f262019-12-03 10:58:10 +00001496 if not vdur:
tierno0e8c3f02020-03-12 17:18:21 +00001497 raise LcmException("Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(vnfr_id, vdu_id,
1498 vdu_index))
tierno2357f4e2020-10-19 16:38:59 +00001499 # New generation RO stores information at "vim_info"
1500 ng_ro_status = None
1501 if vdur.get("vim_info"):
1502 target_vim = next(t for t in vdur["vim_info"]) # there should be only one key
1503 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1504 if vdur.get("pdu-type") or vdur.get("status") == "ACTIVE" or ng_ro_status == "ACTIVE":
quilesj3149f262019-12-03 10:58:10 +00001505 ip_address = vdur.get("ip-address")
1506 if not ip_address:
1507 continue
1508 target_vdu_id = vdur["vdu-id-ref"]
tierno2357f4e2020-10-19 16:38:59 +00001509 elif vdur.get("status") == "ERROR" or vdur["vim_info"][target_vim].get("vim_status") == "ERROR":
quilesj3149f262019-12-03 10:58:10 +00001510 raise LcmException("Cannot inject ssh-key because target VM is in error state")
1511
tiernod8323042019-08-09 11:32:23 +00001512 if not target_vdu_id:
1513 continue
tiernod8323042019-08-09 11:32:23 +00001514
quilesj7e13aeb2019-10-08 13:34:55 +02001515 # inject public key into machine
1516 if pub_key and user:
tierno2357f4e2020-10-19 16:38:59 +00001517 self.logger.debug(logging_text + "Inserting RO key")
tierno0e8c3f02020-03-12 17:18:21 +00001518 if vdur.get("pdu-type"):
1519 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1520 return ip_address
quilesj7e13aeb2019-10-08 13:34:55 +02001521 try:
1522 ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index
tierno69f0d382020-05-07 13:08:09 +00001523 if self.ng_ro:
tierno2357f4e2020-10-19 16:38:59 +00001524 self.logger.debug(logging_text + "ALF lanzando orden")
tierno69f0d382020-05-07 13:08:09 +00001525 target = {"action": "inject_ssh_key", "key": pub_key, "user": user,
tierno2357f4e2020-10-19 16:38:59 +00001526 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
tierno69f0d382020-05-07 13:08:09 +00001527 }
tierno2357f4e2020-10-19 16:38:59 +00001528 desc = await self.RO.deploy(nsr_id, target)
1529 action_id = desc["action_id"]
1530 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1531 break
tierno69f0d382020-05-07 13:08:09 +00001532 else:
tierno2357f4e2020-10-19 16:38:59 +00001533 # wait until NS is deployed at RO
1534 if not ro_nsr_id:
1535 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1536 ro_nsr_id = deep_get(db_nsrs, ("_admin", "deployed", "RO", "nsr_id"))
1537 if not ro_nsr_id:
1538 continue
tierno69f0d382020-05-07 13:08:09 +00001539 result_dict = await self.RO.create_action(
1540 item="ns",
1541 item_id_name=ro_nsr_id,
1542 descriptor={"add_public_key": pub_key, "vms": [ro_vm_id], "user": user}
1543 )
1544 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1545 if not result_dict or not isinstance(result_dict, dict):
1546 raise LcmException("Unknown response from RO when injecting key")
1547 for result in result_dict.values():
1548 if result.get("vim_result") == 200:
1549 break
1550 else:
1551 raise ROclient.ROClientException("error injecting key: {}".format(
1552 result.get("description")))
1553 break
1554 except NgRoException as e:
1555 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001556 except ROclient.ROClientException as e:
tiernoa5088192019-11-26 16:12:53 +00001557 if not nb_tries:
1558 self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds".
1559 format(e, 20*10))
quilesj7e13aeb2019-10-08 13:34:55 +02001560 nb_tries += 1
tiernoa5088192019-11-26 16:12:53 +00001561 if nb_tries >= 20:
quilesj7e13aeb2019-10-08 13:34:55 +02001562 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001563 else:
quilesj7e13aeb2019-10-08 13:34:55 +02001564 break
1565
1566 return ip_address
1567
tierno5ee02052019-12-05 19:55:02 +00001568 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1569 """
1570 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1571 """
1572 my_vca = vca_deployed_list[vca_index]
1573 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
quilesj3655ae02019-12-12 16:08:35 +00001574 # vdu or kdu: no dependencies
tierno5ee02052019-12-05 19:55:02 +00001575 return
1576 timeout = 300
1577 while timeout >= 0:
quilesj3655ae02019-12-12 16:08:35 +00001578 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1579 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1580 configuration_status_list = db_nsr["configurationStatus"]
1581 for index, vca_deployed in enumerate(configuration_status_list):
tierno5ee02052019-12-05 19:55:02 +00001582 if index == vca_index:
quilesj3655ae02019-12-12 16:08:35 +00001583 # myself
tierno5ee02052019-12-05 19:55:02 +00001584 continue
1585 if not my_vca.get("member-vnf-index") or \
1586 (vca_deployed.get("member-vnf-index") == my_vca.get("member-vnf-index")):
quilesj3655ae02019-12-12 16:08:35 +00001587 internal_status = configuration_status_list[index].get("status")
1588 if internal_status == 'READY':
1589 continue
1590 elif internal_status == 'BROKEN':
tierno5ee02052019-12-05 19:55:02 +00001591 raise LcmException("Configuration aborted because dependent charm/s has failed")
quilesj3655ae02019-12-12 16:08:35 +00001592 else:
1593 break
tierno5ee02052019-12-05 19:55:02 +00001594 else:
quilesj3655ae02019-12-12 16:08:35 +00001595 # no dependencies, return
tierno5ee02052019-12-05 19:55:02 +00001596 return
1597 await asyncio.sleep(10)
1598 timeout -= 1
tierno5ee02052019-12-05 19:55:02 +00001599
1600 raise LcmException("Configuration aborted because dependent charm/s timeout")
1601
tiernoe876f672020-02-13 14:34:48 +00001602 async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index,
tiernob996d942020-07-03 14:52:28 +00001603 config_descriptor, deploy_params, base_folder, nslcmop_id, stage, vca_type, vca_name,
1604 ee_config_descriptor):
tiernod8323042019-08-09 11:32:23 +00001605 nsr_id = db_nsr["_id"]
1606 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
tiernoda6fb102019-11-23 00:36:52 +00001607 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernod8323042019-08-09 11:32:23 +00001608 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
tiernob996d942020-07-03 14:52:28 +00001609 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
quilesj7e13aeb2019-10-08 13:34:55 +02001610 db_dict = {
1611 'collection': 'nsrs',
1612 'filter': {'_id': nsr_id},
1613 'path': db_update_entry
1614 }
tiernod8323042019-08-09 11:32:23 +00001615 step = ""
1616 try:
quilesj3655ae02019-12-12 16:08:35 +00001617
1618 element_type = 'NS'
1619 element_under_configuration = nsr_id
1620
tiernod8323042019-08-09 11:32:23 +00001621 vnfr_id = None
1622 if db_vnfr:
1623 vnfr_id = db_vnfr["_id"]
tiernob996d942020-07-03 14:52:28 +00001624 osm_config["osm"]["vnf_id"] = vnfr_id
tiernod8323042019-08-09 11:32:23 +00001625
1626 namespace = "{nsi}.{ns}".format(
1627 nsi=nsi_id if nsi_id else "",
1628 ns=nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001629
tiernod8323042019-08-09 11:32:23 +00001630 if vnfr_id:
quilesj3655ae02019-12-12 16:08:35 +00001631 element_type = 'VNF'
1632 element_under_configuration = vnfr_id
quilesjb8a35dd2020-01-09 15:10:14 +00001633 namespace += ".{}".format(vnfr_id)
tiernod8323042019-08-09 11:32:23 +00001634 if vdu_id:
1635 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
quilesj3655ae02019-12-12 16:08:35 +00001636 element_type = 'VDU'
quilesjb8a35dd2020-01-09 15:10:14 +00001637 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
tiernob996d942020-07-03 14:52:28 +00001638 osm_config["osm"]["vdu_id"] = vdu_id
tierno51183952020-04-03 15:48:18 +00001639 elif kdu_name:
1640 namespace += ".{}".format(kdu_name)
1641 element_type = 'KDU'
1642 element_under_configuration = kdu_name
tiernob996d942020-07-03 14:52:28 +00001643 osm_config["osm"]["kdu_name"] = kdu_name
tiernod8323042019-08-09 11:32:23 +00001644
1645 # Get artifact path
tierno588547c2020-07-01 15:30:20 +00001646 artifact_path = "{}/{}/{}/{}".format(
tiernod8323042019-08-09 11:32:23 +00001647 base_folder["folder"],
1648 base_folder["pkg-dir"],
tierno588547c2020-07-01 15:30:20 +00001649 "charms" if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm") else "helm-charts",
1650 vca_name
tiernod8323042019-08-09 11:32:23 +00001651 )
tiernoa278b842020-07-08 15:33:55 +00001652 # get initial_config_primitive_list that applies to this element
1653 initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
1654
1655 # add config if not present for NS charm
1656 ee_descriptor_id = ee_config_descriptor.get("id")
1657 initial_config_primitive_list = self._get_initial_config_primitive_list(initial_config_primitive_list,
1658 vca_deployed, ee_descriptor_id)
tiernod8323042019-08-09 11:32:23 +00001659
tierno588547c2020-07-01 15:30:20 +00001660 # n2vc_redesign STEP 3.1
tierno588547c2020-07-01 15:30:20 +00001661 # find old ee_id if exists
1662 ee_id = vca_deployed.get("ee_id")
tiernod8323042019-08-09 11:32:23 +00001663
David Garciaaae391f2020-11-09 11:12:54 +01001664 vim_account_id = (
1665 deep_get(db_vnfr, ("vim-account-id",)) or
1666 deep_get(deploy_params, ("OSM", "vim_account_id"))
1667 )
1668 vca_cloud, vca_cloud_credential = self.get_vca_cloud_and_credentials(vim_account_id)
1669 vca_k8s_cloud, vca_k8s_cloud_credential = self.get_vca_k8s_cloud_and_credentials(vim_account_id)
tierno588547c2020-07-01 15:30:20 +00001670 # create or register execution environment in VCA
lloretgalleg18ebc3a2020-10-22 09:54:51 +00001671 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
quilesj7e13aeb2019-10-08 13:34:55 +02001672
tierno588547c2020-07-01 15:30:20 +00001673 self._write_configuration_status(
1674 nsr_id=nsr_id,
1675 vca_index=vca_index,
1676 status='CREATING',
1677 element_under_configuration=element_under_configuration,
1678 element_type=element_type
1679 )
tiernod8323042019-08-09 11:32:23 +00001680
tierno588547c2020-07-01 15:30:20 +00001681 step = "create execution environment"
1682 self.logger.debug(logging_text + step)
David Garciaaae391f2020-11-09 11:12:54 +01001683
1684 ee_id = None
1685 credentials = None
1686 if vca_type == "k8s_proxy_charm":
1687 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1688 charm_name=artifact_path[artifact_path.rfind("/") + 1:],
1689 namespace=namespace,
1690 artifact_path=artifact_path,
1691 db_dict=db_dict,
1692 cloud_name=vca_k8s_cloud,
1693 credential_name=vca_k8s_cloud_credential,
1694 )
1695 else:
1696 ee_id, credentials = await self.vca_map[vca_type].create_execution_environment(
1697 namespace=namespace,
1698 reuse_ee_id=ee_id,
1699 db_dict=db_dict,
1700 config=osm_config,
1701 cloud_name=vca_cloud,
1702 credential_name=vca_cloud_credential,
1703 )
quilesj3655ae02019-12-12 16:08:35 +00001704
tierno588547c2020-07-01 15:30:20 +00001705 elif vca_type == "native_charm":
1706 step = "Waiting to VM being up and getting IP address"
1707 self.logger.debug(logging_text + step)
1708 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1709 user=None, pub_key=None)
1710 credentials = {"hostname": rw_mgmt_ip}
1711 # get username
1712 username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1713 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1714 # merged. Meanwhile let's get username from initial-config-primitive
tiernoa278b842020-07-08 15:33:55 +00001715 if not username and initial_config_primitive_list:
1716 for config_primitive in initial_config_primitive_list:
tierno588547c2020-07-01 15:30:20 +00001717 for param in config_primitive.get("parameter", ()):
1718 if param["name"] == "ssh-username":
1719 username = param["value"]
1720 break
1721 if not username:
tiernoa278b842020-07-08 15:33:55 +00001722 raise LcmException("Cannot determine the username neither with 'initial-config-primitive' nor with "
tierno588547c2020-07-01 15:30:20 +00001723 "'config-access.ssh-access.default-user'")
1724 credentials["username"] = username
1725 # n2vc_redesign STEP 3.2
quilesj3655ae02019-12-12 16:08:35 +00001726
tierno588547c2020-07-01 15:30:20 +00001727 self._write_configuration_status(
1728 nsr_id=nsr_id,
1729 vca_index=vca_index,
1730 status='REGISTERING',
1731 element_under_configuration=element_under_configuration,
1732 element_type=element_type
1733 )
quilesj3655ae02019-12-12 16:08:35 +00001734
tierno588547c2020-07-01 15:30:20 +00001735 step = "register execution environment {}".format(credentials)
1736 self.logger.debug(logging_text + step)
1737 ee_id = await self.vca_map[vca_type].register_execution_environment(
David Garciaaae391f2020-11-09 11:12:54 +01001738 credentials=credentials,
1739 namespace=namespace,
1740 db_dict=db_dict,
1741 cloud_name=vca_cloud,
1742 credential_name=vca_cloud_credential,
1743 )
tierno3bedc9b2019-11-27 15:46:57 +00001744
tierno588547c2020-07-01 15:30:20 +00001745 # for compatibility with MON/POL modules, the need model and application name at database
1746 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1747 ee_id_parts = ee_id.split('.')
1748 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1749 if len(ee_id_parts) >= 2:
1750 model_name = ee_id_parts[0]
1751 application_name = ee_id_parts[1]
1752 db_nsr_update[db_update_entry + "model"] = model_name
1753 db_nsr_update[db_update_entry + "application"] = application_name
tiernod8323042019-08-09 11:32:23 +00001754
1755 # n2vc_redesign STEP 3.3
tiernod8323042019-08-09 11:32:23 +00001756 step = "Install configuration Software"
quilesj3655ae02019-12-12 16:08:35 +00001757
tiernoc231a872020-01-21 08:49:05 +00001758 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001759 nsr_id=nsr_id,
1760 vca_index=vca_index,
1761 status='INSTALLING SW',
1762 element_under_configuration=element_under_configuration,
tierno51183952020-04-03 15:48:18 +00001763 element_type=element_type,
tierno588547c2020-07-01 15:30:20 +00001764 other_update=db_nsr_update
quilesj3655ae02019-12-12 16:08:35 +00001765 )
1766
tierno3bedc9b2019-11-27 15:46:57 +00001767 # TODO check if already done
quilesj7e13aeb2019-10-08 13:34:55 +02001768 self.logger.debug(logging_text + step)
David Garcia18a63322020-04-01 16:14:59 +02001769 config = None
tierno588547c2020-07-01 15:30:20 +00001770 if vca_type == "native_charm":
tiernoa278b842020-07-08 15:33:55 +00001771 config_primitive = next((p for p in initial_config_primitive_list if p["name"] == "config"), None)
1772 if config_primitive:
1773 config = self._map_primitive_params(
1774 config_primitive,
1775 {},
1776 deploy_params
1777 )
tierno588547c2020-07-01 15:30:20 +00001778 num_units = 1
1779 if vca_type == "lxc_proxy_charm":
1780 if element_type == "NS":
1781 num_units = db_nsr.get("config-units") or 1
1782 elif element_type == "VNF":
1783 num_units = db_vnfr.get("config-units") or 1
1784 elif element_type == "VDU":
1785 for v in db_vnfr["vdur"]:
1786 if vdu_id == v["vdu-id-ref"]:
1787 num_units = v.get("config-units") or 1
1788 break
David Garciaaae391f2020-11-09 11:12:54 +01001789 if vca_type != "k8s_proxy_charm":
1790 await self.vca_map[vca_type].install_configuration_sw(
1791 ee_id=ee_id,
1792 artifact_path=artifact_path,
1793 db_dict=db_dict,
1794 config=config,
1795 num_units=num_units,
1796 )
quilesj7e13aeb2019-10-08 13:34:55 +02001797
quilesj63f90042020-01-17 09:53:55 +00001798 # write in db flag of configuration_sw already installed
1799 self.update_db_2("nsrs", nsr_id, {db_update_entry + "config_sw_installed": True})
1800
1801 # add relations for this VCA (wait for other peers related with this VCA)
tierno588547c2020-07-01 15:30:20 +00001802 await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id,
1803 vca_index=vca_index, vca_type=vca_type)
quilesj63f90042020-01-17 09:53:55 +00001804
quilesj7e13aeb2019-10-08 13:34:55 +02001805 # if SSH access is required, then get execution environment SSH public
David Garciaa27e20a2020-07-10 13:12:44 +02001806 # if native charm we have waited already to VM be UP
lloretgalleg18ebc3a2020-10-22 09:54:51 +00001807 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
tierno3bedc9b2019-11-27 15:46:57 +00001808 pub_key = None
1809 user = None
tierno588547c2020-07-01 15:30:20 +00001810 # self.logger.debug("get ssh key block")
tierno3bedc9b2019-11-27 15:46:57 +00001811 if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
tierno588547c2020-07-01 15:30:20 +00001812 # self.logger.debug("ssh key needed")
tierno3bedc9b2019-11-27 15:46:57 +00001813 # Needed to inject a ssh key
1814 user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1815 step = "Install configuration Software, getting public ssh key"
tierno588547c2020-07-01 15:30:20 +00001816 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02001817
tiernoacc90452019-12-10 11:06:54 +00001818 step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
tierno3bedc9b2019-11-27 15:46:57 +00001819 else:
tierno588547c2020-07-01 15:30:20 +00001820 # self.logger.debug("no need to get ssh key")
tierno3bedc9b2019-11-27 15:46:57 +00001821 step = "Waiting to VM being up and getting IP address"
1822 self.logger.debug(logging_text + step)
quilesj7e13aeb2019-10-08 13:34:55 +02001823
tierno3bedc9b2019-11-27 15:46:57 +00001824 # n2vc_redesign STEP 5.1
1825 # wait for RO (ip-address) Insert pub_key into VM
tierno5ee02052019-12-05 19:55:02 +00001826 if vnfr_id:
tierno7ecbc342020-09-21 14:05:39 +00001827 if kdu_name:
1828 rw_mgmt_ip = await self.wait_kdu_up(logging_text, nsr_id, vnfr_id, kdu_name)
1829 else:
1830 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id,
1831 vdu_index, user=user, pub_key=pub_key)
tierno5ee02052019-12-05 19:55:02 +00001832 else:
1833 rw_mgmt_ip = None # This is for a NS configuration
tierno3bedc9b2019-11-27 15:46:57 +00001834
1835 self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
quilesj7e13aeb2019-10-08 13:34:55 +02001836
tiernoa5088192019-11-26 16:12:53 +00001837 # store rw_mgmt_ip in deploy params for later replacement
quilesj7e13aeb2019-10-08 13:34:55 +02001838 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
tiernod8323042019-08-09 11:32:23 +00001839
1840 # n2vc_redesign STEP 6 Execute initial config primitive
quilesj7e13aeb2019-10-08 13:34:55 +02001841 step = 'execute initial config primitive'
quilesj3655ae02019-12-12 16:08:35 +00001842
1843 # wait for dependent primitives execution (NS -> VNF -> VDU)
tierno5ee02052019-12-05 19:55:02 +00001844 if initial_config_primitive_list:
1845 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
quilesj3655ae02019-12-12 16:08:35 +00001846
1847 # stage, in function of element type: vdu, kdu, vnf or ns
1848 my_vca = vca_deployed_list[vca_index]
1849 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1850 # VDU or KDU
tiernoe876f672020-02-13 14:34:48 +00001851 stage[0] = 'Stage 3/5: running Day-1 primitives for VDU.'
quilesj3655ae02019-12-12 16:08:35 +00001852 elif my_vca.get("member-vnf-index"):
1853 # VNF
tiernoe876f672020-02-13 14:34:48 +00001854 stage[0] = 'Stage 4/5: running Day-1 primitives for VNF.'
quilesj3655ae02019-12-12 16:08:35 +00001855 else:
1856 # NS
tiernoe876f672020-02-13 14:34:48 +00001857 stage[0] = 'Stage 5/5: running Day-1 primitives for NS.'
quilesj3655ae02019-12-12 16:08:35 +00001858
tiernoc231a872020-01-21 08:49:05 +00001859 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001860 nsr_id=nsr_id,
1861 vca_index=vca_index,
1862 status='EXECUTING PRIMITIVE'
1863 )
1864
1865 self._write_op_status(
1866 op_id=nslcmop_id,
1867 stage=stage
1868 )
1869
tiernoe876f672020-02-13 14:34:48 +00001870 check_if_terminated_needed = True
tiernod8323042019-08-09 11:32:23 +00001871 for initial_config_primitive in initial_config_primitive_list:
tiernoda6fb102019-11-23 00:36:52 +00001872 # adding information on the vca_deployed if it is a NS execution environment
1873 if not vca_deployed["member-vnf-index"]:
David Garciad4816682019-12-09 14:57:43 +01001874 deploy_params["ns_config_info"] = json.dumps(self._get_ns_config_info(nsr_id))
tiernod8323042019-08-09 11:32:23 +00001875 # TODO check if already done
1876 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
tierno3bedc9b2019-11-27 15:46:57 +00001877
tiernod8323042019-08-09 11:32:23 +00001878 step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
1879 self.logger.debug(logging_text + step)
tierno588547c2020-07-01 15:30:20 +00001880 await self.vca_map[vca_type].exec_primitive(
quilesj7e13aeb2019-10-08 13:34:55 +02001881 ee_id=ee_id,
1882 primitive_name=initial_config_primitive["name"],
1883 params_dict=primitive_params_,
1884 db_dict=db_dict
1885 )
tiernoe876f672020-02-13 14:34:48 +00001886 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1887 if check_if_terminated_needed:
1888 if config_descriptor.get('terminate-config-primitive'):
1889 self.update_db_2("nsrs", nsr_id, {db_update_entry + "needed_terminate": True})
1890 check_if_terminated_needed = False
quilesj3655ae02019-12-12 16:08:35 +00001891
tiernod8323042019-08-09 11:32:23 +00001892 # TODO register in database that primitive is done
quilesj7e13aeb2019-10-08 13:34:55 +02001893
tiernob996d942020-07-03 14:52:28 +00001894 # STEP 7 Configure metrics
lloretgalleg18ebc3a2020-10-22 09:54:51 +00001895 if vca_type == "helm" or vca_type == "helm-v3":
tiernob996d942020-07-03 14:52:28 +00001896 prometheus_jobs = await self.add_prometheus_metrics(
1897 ee_id=ee_id,
1898 artifact_path=artifact_path,
1899 ee_config_descriptor=ee_config_descriptor,
1900 vnfr_id=vnfr_id,
1901 nsr_id=nsr_id,
1902 target_ip=rw_mgmt_ip,
1903 )
1904 if prometheus_jobs:
1905 self.update_db_2("nsrs", nsr_id, {db_update_entry + "prometheus_jobs": prometheus_jobs})
1906
quilesj7e13aeb2019-10-08 13:34:55 +02001907 step = "instantiated at VCA"
1908 self.logger.debug(logging_text + step)
1909
tiernoc231a872020-01-21 08:49:05 +00001910 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001911 nsr_id=nsr_id,
1912 vca_index=vca_index,
1913 status='READY'
1914 )
1915
tiernod8323042019-08-09 11:32:23 +00001916 except Exception as e: # TODO not use Exception but N2VC exception
quilesj3655ae02019-12-12 16:08:35 +00001917 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
tiernoe876f672020-02-13 14:34:48 +00001918 if not isinstance(e, (DbException, N2VCException, LcmException, asyncio.CancelledError)):
1919 self.logger.error("Exception while {} : {}".format(step, e), exc_info=True)
tiernoc231a872020-01-21 08:49:05 +00001920 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001921 nsr_id=nsr_id,
1922 vca_index=vca_index,
1923 status='BROKEN'
1924 )
tiernoe876f672020-02-13 14:34:48 +00001925 raise LcmException("{} {}".format(step, e)) from e
tiernod8323042019-08-09 11:32:23 +00001926
quilesj4cda56b2019-12-05 10:02:20 +00001927 def _write_ns_status(self, nsr_id: str, ns_state: str, current_operation: str, current_operation_id: str,
tiernoa2143262020-03-27 16:20:40 +00001928 error_description: str = None, error_detail: str = None, other_update: dict = None):
tiernoe876f672020-02-13 14:34:48 +00001929 """
1930 Update db_nsr fields.
1931 :param nsr_id:
1932 :param ns_state:
1933 :param current_operation:
1934 :param current_operation_id:
1935 :param error_description:
tiernoa2143262020-03-27 16:20:40 +00001936 :param error_detail:
tiernoe876f672020-02-13 14:34:48 +00001937 :param other_update: Other required changes at database if provided, will be cleared
1938 :return:
1939 """
quilesj4cda56b2019-12-05 10:02:20 +00001940 try:
tiernoe876f672020-02-13 14:34:48 +00001941 db_dict = other_update or {}
1942 db_dict["_admin.nslcmop"] = current_operation_id # for backward compatibility
1943 db_dict["_admin.current-operation"] = current_operation_id
1944 db_dict["_admin.operation-type"] = current_operation if current_operation != "IDLE" else None
quilesj4cda56b2019-12-05 10:02:20 +00001945 db_dict["currentOperation"] = current_operation
1946 db_dict["currentOperationID"] = current_operation_id
1947 db_dict["errorDescription"] = error_description
tiernoa2143262020-03-27 16:20:40 +00001948 db_dict["errorDetail"] = error_detail
tiernoe876f672020-02-13 14:34:48 +00001949
1950 if ns_state:
1951 db_dict["nsState"] = ns_state
quilesj4cda56b2019-12-05 10:02:20 +00001952 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001953 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001954 self.logger.warn('Error writing NS status, ns={}: {}'.format(nsr_id, e))
1955
tiernoe876f672020-02-13 14:34:48 +00001956 def _write_op_status(self, op_id: str, stage: list = None, error_message: str = None, queuePosition: int = 0,
1957 operation_state: str = None, other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001958 try:
tiernoe876f672020-02-13 14:34:48 +00001959 db_dict = other_update or {}
quilesj3655ae02019-12-12 16:08:35 +00001960 db_dict['queuePosition'] = queuePosition
tiernoe876f672020-02-13 14:34:48 +00001961 if isinstance(stage, list):
1962 db_dict['stage'] = stage[0]
1963 db_dict['detailed-status'] = " ".join(stage)
1964 elif stage is not None:
1965 db_dict['stage'] = str(stage)
1966
1967 if error_message is not None:
quilesj3655ae02019-12-12 16:08:35 +00001968 db_dict['errorMessage'] = error_message
tiernoe876f672020-02-13 14:34:48 +00001969 if operation_state is not None:
1970 db_dict['operationState'] = operation_state
1971 db_dict["statusEnteredTime"] = time()
quilesj3655ae02019-12-12 16:08:35 +00001972 self.update_db_2("nslcmops", op_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001973 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001974 self.logger.warn('Error writing OPERATION status for op_id: {} -> {}'.format(op_id, e))
1975
tierno51183952020-04-03 15:48:18 +00001976 def _write_all_config_status(self, db_nsr: dict, status: str):
quilesj3655ae02019-12-12 16:08:35 +00001977 try:
tierno51183952020-04-03 15:48:18 +00001978 nsr_id = db_nsr["_id"]
quilesj3655ae02019-12-12 16:08:35 +00001979 # configurationStatus
1980 config_status = db_nsr.get('configurationStatus')
1981 if config_status:
tierno51183952020-04-03 15:48:18 +00001982 db_nsr_update = {"configurationStatus.{}.status".format(index): status for index, v in
1983 enumerate(config_status) if v}
quilesj3655ae02019-12-12 16:08:35 +00001984 # update status
tierno51183952020-04-03 15:48:18 +00001985 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00001986
tiernoe876f672020-02-13 14:34:48 +00001987 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001988 self.logger.warn('Error writing all configuration status, ns={}: {}'.format(nsr_id, e))
1989
quilesj63f90042020-01-17 09:53:55 +00001990 def _write_configuration_status(self, nsr_id: str, vca_index: int, status: str = None,
tierno51183952020-04-03 15:48:18 +00001991 element_under_configuration: str = None, element_type: str = None,
1992 other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001993
1994 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
1995 # .format(vca_index, status))
1996
1997 try:
1998 db_path = 'configurationStatus.{}.'.format(vca_index)
tierno51183952020-04-03 15:48:18 +00001999 db_dict = other_update or {}
quilesj63f90042020-01-17 09:53:55 +00002000 if status:
2001 db_dict[db_path + 'status'] = status
quilesj3655ae02019-12-12 16:08:35 +00002002 if element_under_configuration:
2003 db_dict[db_path + 'elementUnderConfiguration'] = element_under_configuration
2004 if element_type:
2005 db_dict[db_path + 'elementType'] = element_type
2006 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00002007 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00002008 self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}'
2009 .format(status, nsr_id, vca_index, e))
quilesj4cda56b2019-12-05 10:02:20 +00002010
tierno38089af2020-04-16 07:56:58 +00002011 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2012 """
2013 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2014 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2015 Database is used because the result can be obtained from a different LCM worker in case of HA.
2016 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2017 :param db_nslcmop: database content of nslcmop
2018 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
tierno8790a3d2020-04-23 22:49:52 +00002019 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2020 computed 'vim-account-id'
tierno38089af2020-04-16 07:56:58 +00002021 """
tierno8790a3d2020-04-23 22:49:52 +00002022 modified = False
tierno38089af2020-04-16 07:56:58 +00002023 nslcmop_id = db_nslcmop['_id']
magnussonle9198bb2020-01-21 13:00:51 +01002024 placement_engine = deep_get(db_nslcmop, ('operationParams', 'placement-engine'))
2025 if placement_engine == "PLA":
tierno38089af2020-04-16 07:56:58 +00002026 self.logger.debug(logging_text + "Invoke and wait for placement optimization")
2027 await self.msg.aiowrite("pla", "get_placement", {'nslcmopId': nslcmop_id}, loop=self.loop)
magnussonle9198bb2020-01-21 13:00:51 +01002028 db_poll_interval = 5
tierno38089af2020-04-16 07:56:58 +00002029 wait = db_poll_interval * 10
magnussonle9198bb2020-01-21 13:00:51 +01002030 pla_result = None
2031 while not pla_result and wait >= 0:
2032 await asyncio.sleep(db_poll_interval)
2033 wait -= db_poll_interval
tierno38089af2020-04-16 07:56:58 +00002034 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
magnussonle9198bb2020-01-21 13:00:51 +01002035 pla_result = deep_get(db_nslcmop, ('_admin', 'pla'))
2036
2037 if not pla_result:
tierno38089af2020-04-16 07:56:58 +00002038 raise LcmException("Placement timeout for nslcmopId={}".format(nslcmop_id))
magnussonle9198bb2020-01-21 13:00:51 +01002039
2040 for pla_vnf in pla_result['vnf']:
2041 vnfr = db_vnfrs.get(pla_vnf['member-vnf-index'])
2042 if not pla_vnf.get('vimAccountId') or not vnfr:
2043 continue
tierno8790a3d2020-04-23 22:49:52 +00002044 modified = True
magnussonle9198bb2020-01-21 13:00:51 +01002045 self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, {"vim-account-id": pla_vnf['vimAccountId']})
tierno38089af2020-04-16 07:56:58 +00002046 # Modifies db_vnfrs
2047 vnfr["vim-account-id"] = pla_vnf['vimAccountId']
tierno8790a3d2020-04-23 22:49:52 +00002048 return modified
magnussonle9198bb2020-01-21 13:00:51 +01002049
2050 def update_nsrs_with_pla_result(self, params):
2051 try:
2052 nslcmop_id = deep_get(params, ('placement', 'nslcmopId'))
2053 self.update_db_2("nslcmops", nslcmop_id, {"_admin.pla": params.get('placement')})
2054 except Exception as e:
2055 self.logger.warn('Update failed for nslcmop_id={}:{}'.format(nslcmop_id, e))
2056
tierno59d22d22018-09-25 18:10:19 +02002057 async def instantiate(self, nsr_id, nslcmop_id):
quilesj7e13aeb2019-10-08 13:34:55 +02002058 """
2059
2060 :param nsr_id: ns instance to deploy
2061 :param nslcmop_id: operation to run
2062 :return:
2063 """
kuused124bfe2019-06-18 12:09:24 +02002064
2065 # Try to lock HA task here
2066 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
2067 if not task_is_locked_by_me:
quilesj3655ae02019-12-12 16:08:35 +00002068 self.logger.debug('instantiate() task is not locked by me, ns={}'.format(nsr_id))
kuused124bfe2019-06-18 12:09:24 +02002069 return
2070
tierno59d22d22018-09-25 18:10:19 +02002071 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2072 self.logger.debug(logging_text + "Enter")
quilesj7e13aeb2019-10-08 13:34:55 +02002073
tierno59d22d22018-09-25 18:10:19 +02002074 # get all needed from database
quilesj7e13aeb2019-10-08 13:34:55 +02002075
2076 # database nsrs record
tierno59d22d22018-09-25 18:10:19 +02002077 db_nsr = None
quilesj7e13aeb2019-10-08 13:34:55 +02002078
2079 # database nslcmops record
tierno59d22d22018-09-25 18:10:19 +02002080 db_nslcmop = None
quilesj7e13aeb2019-10-08 13:34:55 +02002081
2082 # update operation on nsrs
tiernoe876f672020-02-13 14:34:48 +00002083 db_nsr_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02002084 # update operation on nslcmops
tierno59d22d22018-09-25 18:10:19 +02002085 db_nslcmop_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02002086
tierno59d22d22018-09-25 18:10:19 +02002087 nslcmop_operation_state = None
quilesj7e13aeb2019-10-08 13:34:55 +02002088 db_vnfrs = {} # vnf's info indexed by member-index
2089 # n2vc_info = {}
tiernoe876f672020-02-13 14:34:48 +00002090 tasks_dict_info = {} # from task to info text
tierno59d22d22018-09-25 18:10:19 +02002091 exc = None
tiernoe876f672020-02-13 14:34:48 +00002092 error_list = []
2093 stage = ['Stage 1/5: preparation of the environment.', "Waiting for previous operations to terminate.", ""]
2094 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02002095 try:
kuused124bfe2019-06-18 12:09:24 +02002096 # wait for any previous tasks in process
2097 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
2098
tiernob5203912020-08-11 11:20:13 +00002099 stage[1] = "Sync filesystem from database."
tierno21e42212020-07-09 13:51:20 +00002100 self.fs.sync() # TODO, make use of partial sync, only for the needed packages
2101
quilesj7e13aeb2019-10-08 13:34:55 +02002102 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
tiernob5203912020-08-11 11:20:13 +00002103 stage[1] = "Reading from database."
quilesj4cda56b2019-12-05 10:02:20 +00002104 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
tiernoe876f672020-02-13 14:34:48 +00002105 db_nsr_update["detailed-status"] = "creating"
2106 db_nsr_update["operational-status"] = "init"
quilesj4cda56b2019-12-05 10:02:20 +00002107 self._write_ns_status(
2108 nsr_id=nsr_id,
2109 ns_state="BUILDING",
2110 current_operation="INSTANTIATING",
tiernoe876f672020-02-13 14:34:48 +00002111 current_operation_id=nslcmop_id,
2112 other_update=db_nsr_update
2113 )
2114 self._write_op_status(
2115 op_id=nslcmop_id,
2116 stage=stage,
2117 queuePosition=0
quilesj4cda56b2019-12-05 10:02:20 +00002118 )
2119
quilesj7e13aeb2019-10-08 13:34:55 +02002120 # read from db: operation
tiernob5203912020-08-11 11:20:13 +00002121 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
tierno59d22d22018-09-25 18:10:19 +02002122 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
tierno744303e2020-01-13 16:46:31 +00002123 ns_params = db_nslcmop.get("operationParams")
2124 if ns_params and ns_params.get("timeout_ns_deploy"):
2125 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2126 else:
2127 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +02002128
2129 # read from db: ns
tiernob5203912020-08-11 11:20:13 +00002130 stage[1] = "Getting nsr={} from db.".format(nsr_id)
tierno59d22d22018-09-25 18:10:19 +02002131 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernob5203912020-08-11 11:20:13 +00002132 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
tiernod732fb82020-05-21 13:18:23 +00002133 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2134 db_nsr["nsd"] = nsd
tiernod8323042019-08-09 11:32:23 +00002135 # nsr_name = db_nsr["name"] # TODO short-name??
tierno47e86b52018-10-10 14:05:55 +02002136
quilesj7e13aeb2019-10-08 13:34:55 +02002137 # read from db: vnf's of this ns
tiernob5203912020-08-11 11:20:13 +00002138 stage[1] = "Getting vnfrs from db."
tiernoe876f672020-02-13 14:34:48 +00002139 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02002140 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
tierno27246d82018-09-27 15:59:09 +02002141
quilesj7e13aeb2019-10-08 13:34:55 +02002142 # read from db: vnfd's for every vnf
2143 db_vnfds_ref = {} # every vnfd data indexed by vnf name
2144 db_vnfds = {} # every vnfd data indexed by vnf id
2145 db_vnfds_index = {} # every vnfd data indexed by vnf member-index
2146
2147 # for each vnf in ns, read vnfd
tierno27246d82018-09-27 15:59:09 +02002148 for vnfr in db_vnfrs_list:
quilesj7e13aeb2019-10-08 13:34:55 +02002149 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr # vnf's dict indexed by member-index: '1', '2', etc
2150 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
2151 vnfd_ref = vnfr["vnfd-ref"] # vnfd name for this vnf
lloretgalleg6d488782020-07-22 10:13:46 +00002152
quilesj7e13aeb2019-10-08 13:34:55 +02002153 # if we haven't this vnfd, read it from db
tierno27246d82018-09-27 15:59:09 +02002154 if vnfd_id not in db_vnfds:
quilesj63f90042020-01-17 09:53:55 +00002155 # read from db
tiernob5203912020-08-11 11:20:13 +00002156 stage[1] = "Getting vnfd={} id='{}' from db.".format(vnfd_id, vnfd_ref)
tiernoe876f672020-02-13 14:34:48 +00002157 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02002158 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
tierno27246d82018-09-27 15:59:09 +02002159
quilesj7e13aeb2019-10-08 13:34:55 +02002160 # store vnfd
2161 db_vnfds_ref[vnfd_ref] = vnfd # vnfd's indexed by name
2162 db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id
2163 db_vnfds_index[vnfr["member-vnf-index-ref"]] = db_vnfds[vnfd_id] # vnfd's indexed by member-index
2164
2165 # Get or generates the _admin.deployed.VCA list
tiernoe4f7e6c2018-11-27 14:55:30 +00002166 vca_deployed_list = None
2167 if db_nsr["_admin"].get("deployed"):
2168 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2169 if vca_deployed_list is None:
2170 vca_deployed_list = []
quilesj3655ae02019-12-12 16:08:35 +00002171 configuration_status_list = []
tiernoe4f7e6c2018-11-27 14:55:30 +00002172 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
quilesj3655ae02019-12-12 16:08:35 +00002173 db_nsr_update["configurationStatus"] = configuration_status_list
quilesj7e13aeb2019-10-08 13:34:55 +02002174 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00002175 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00002176 elif isinstance(vca_deployed_list, dict):
2177 # maintain backward compatibility. Change a dict to list at database
2178 vca_deployed_list = list(vca_deployed_list.values())
2179 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00002180 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00002181
tierno6cf25f52019-09-12 09:33:40 +00002182 if not isinstance(deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list):
tiernoa009e552019-01-30 16:45:44 +00002183 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2184 db_nsr_update["_admin.deployed.RO.vnfd"] = []
tierno59d22d22018-09-25 18:10:19 +02002185
tiernobaa51102018-12-14 13:16:18 +00002186 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2187 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2188 self.update_db_2("nsrs", nsr_id, db_nsr_update)
lloretgalleg6d488782020-07-22 10:13:46 +00002189 self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"})
quilesj3655ae02019-12-12 16:08:35 +00002190
2191 # n2vc_redesign STEP 2 Deploy Network Scenario
tiernoe876f672020-02-13 14:34:48 +00002192 stage[0] = 'Stage 2/5: deployment of KDUs, VMs and execution environments.'
quilesj3655ae02019-12-12 16:08:35 +00002193 self._write_op_status(
2194 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00002195 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00002196 )
2197
tiernob5203912020-08-11 11:20:13 +00002198 stage[1] = "Deploying KDUs."
tiernoe876f672020-02-13 14:34:48 +00002199 # self.logger.debug(logging_text + "Before deploy_kdus")
calvinosanch9f9c6f22019-11-04 13:37:39 +01002200 # Call to deploy_kdus in case exists the "vdu:kdu" param
tiernoe876f672020-02-13 14:34:48 +00002201 await self.deploy_kdus(
2202 logging_text=logging_text,
2203 nsr_id=nsr_id,
2204 nslcmop_id=nslcmop_id,
2205 db_vnfrs=db_vnfrs,
2206 db_vnfds=db_vnfds,
2207 task_instantiation_info=tasks_dict_info,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002208 )
tiernoe876f672020-02-13 14:34:48 +00002209
2210 stage[1] = "Getting VCA public key."
tiernod8323042019-08-09 11:32:23 +00002211 # n2vc_redesign STEP 1 Get VCA public ssh-key
2212 # feature 1429. Add n2vc public key to needed VMs
tierno3bedc9b2019-11-27 15:46:57 +00002213 n2vc_key = self.n2vc.get_public_key()
tiernoa5088192019-11-26 16:12:53 +00002214 n2vc_key_list = [n2vc_key]
2215 if self.vca_config.get("public_key"):
2216 n2vc_key_list.append(self.vca_config["public_key"])
tierno98ad6ea2019-05-30 17:16:28 +00002217
tiernoe876f672020-02-13 14:34:48 +00002218 stage[1] = "Deploying NS at VIM."
tiernod8323042019-08-09 11:32:23 +00002219 task_ro = asyncio.ensure_future(
quilesj7e13aeb2019-10-08 13:34:55 +02002220 self.instantiate_RO(
2221 logging_text=logging_text,
2222 nsr_id=nsr_id,
2223 nsd=nsd,
2224 db_nsr=db_nsr,
2225 db_nslcmop=db_nslcmop,
2226 db_vnfrs=db_vnfrs,
2227 db_vnfds_ref=db_vnfds_ref,
tiernoe876f672020-02-13 14:34:48 +00002228 n2vc_key_list=n2vc_key_list,
2229 stage=stage
tierno98ad6ea2019-05-30 17:16:28 +00002230 )
tiernod8323042019-08-09 11:32:23 +00002231 )
2232 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
tiernoa2143262020-03-27 16:20:40 +00002233 tasks_dict_info[task_ro] = "Deploying at VIM"
tierno98ad6ea2019-05-30 17:16:28 +00002234
tiernod8323042019-08-09 11:32:23 +00002235 # n2vc_redesign STEP 3 to 6 Deploy N2VC
tiernoe876f672020-02-13 14:34:48 +00002236 stage[1] = "Deploying Execution Environments."
2237 self.logger.debug(logging_text + stage[1])
tierno98ad6ea2019-05-30 17:16:28 +00002238
tiernod8323042019-08-09 11:32:23 +00002239 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
quilesj7e13aeb2019-10-08 13:34:55 +02002240 # get_iterable() returns a value from a dict or empty tuple if key does not exist
tierno98ad6ea2019-05-30 17:16:28 +00002241 for c_vnf in get_iterable(nsd, "constituent-vnfd"):
2242 vnfd_id = c_vnf["vnfd-id-ref"]
tierno98ad6ea2019-05-30 17:16:28 +00002243 vnfd = db_vnfds_ref[vnfd_id]
tiernod8323042019-08-09 11:32:23 +00002244 member_vnf_index = str(c_vnf["member-vnf-index"])
2245 db_vnfr = db_vnfrs[member_vnf_index]
2246 base_folder = vnfd["_admin"]["storage"]
2247 vdu_id = None
2248 vdu_index = 0
tierno98ad6ea2019-05-30 17:16:28 +00002249 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002250 kdu_name = None
tierno59d22d22018-09-25 18:10:19 +02002251
tierno8a518872018-12-21 13:42:14 +00002252 # Get additional parameters
tierno72ef84f2020-10-06 08:22:07 +00002253 deploy_params = {"OSM": self._get_osm_params(db_vnfr)}
tiernod8323042019-08-09 11:32:23 +00002254 if db_vnfr.get("additionalParamsForVnf"):
tierno72ef84f2020-10-06 08:22:07 +00002255 deploy_params.update(self._format_additional_params(db_vnfr["additionalParamsForVnf"].copy()))
tierno8a518872018-12-21 13:42:14 +00002256
tiernod8323042019-08-09 11:32:23 +00002257 descriptor_config = vnfd.get("vnf-configuration")
tierno588547c2020-07-01 15:30:20 +00002258 if descriptor_config:
quilesj7e13aeb2019-10-08 13:34:55 +02002259 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00002260 logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
quilesj7e13aeb2019-10-08 13:34:55 +02002261 db_nsr=db_nsr,
2262 db_vnfr=db_vnfr,
2263 nslcmop_id=nslcmop_id,
2264 nsr_id=nsr_id,
2265 nsi_id=nsi_id,
2266 vnfd_id=vnfd_id,
2267 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002268 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002269 member_vnf_index=member_vnf_index,
2270 vdu_index=vdu_index,
2271 vdu_name=vdu_name,
2272 deploy_params=deploy_params,
2273 descriptor_config=descriptor_config,
2274 base_folder=base_folder,
tiernoe876f672020-02-13 14:34:48 +00002275 task_instantiation_info=tasks_dict_info,
2276 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002277 )
tierno59d22d22018-09-25 18:10:19 +02002278
2279 # Deploy charms for each VDU that supports one.
tiernod8323042019-08-09 11:32:23 +00002280 for vdud in get_iterable(vnfd, 'vdu'):
2281 vdu_id = vdud["id"]
2282 descriptor_config = vdud.get('vdu-configuration')
tierno626e0152019-11-29 14:16:16 +00002283 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
2284 if vdur.get("additionalParams"):
2285 deploy_params_vdu = self._format_additional_params(vdur["additionalParams"])
2286 else:
2287 deploy_params_vdu = deploy_params
tierno72ef84f2020-10-06 08:22:07 +00002288 deploy_params_vdu["OSM"] = self._get_osm_params(db_vnfr, vdu_id, vdu_count_index=0)
tierno588547c2020-07-01 15:30:20 +00002289 if descriptor_config:
tiernod8323042019-08-09 11:32:23 +00002290 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002291 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002292 for vdu_index in range(int(vdud.get("count", 1))):
2293 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
quilesj7e13aeb2019-10-08 13:34:55 +02002294 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00002295 logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2296 member_vnf_index, vdu_id, vdu_index),
quilesj7e13aeb2019-10-08 13:34:55 +02002297 db_nsr=db_nsr,
2298 db_vnfr=db_vnfr,
2299 nslcmop_id=nslcmop_id,
2300 nsr_id=nsr_id,
2301 nsi_id=nsi_id,
2302 vnfd_id=vnfd_id,
2303 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002304 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002305 member_vnf_index=member_vnf_index,
2306 vdu_index=vdu_index,
2307 vdu_name=vdu_name,
tierno626e0152019-11-29 14:16:16 +00002308 deploy_params=deploy_params_vdu,
quilesj7e13aeb2019-10-08 13:34:55 +02002309 descriptor_config=descriptor_config,
2310 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002311 task_instantiation_info=tasks_dict_info,
2312 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002313 )
calvinosanch9f9c6f22019-11-04 13:37:39 +01002314 for kdud in get_iterable(vnfd, 'kdu'):
2315 kdu_name = kdud["name"]
2316 descriptor_config = kdud.get('kdu-configuration')
tierno588547c2020-07-01 15:30:20 +00002317 if descriptor_config:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002318 vdu_id = None
2319 vdu_index = 0
2320 vdu_name = None
tierno72ef84f2020-10-06 08:22:07 +00002321 kdur = next(x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name)
2322 deploy_params_kdu = {"OSM": self._get_osm_params(db_vnfr)}
2323 if kdur.get("additionalParams"):
2324 deploy_params_kdu = self._format_additional_params(kdur["additionalParams"])
tierno59d22d22018-09-25 18:10:19 +02002325
calvinosanch9f9c6f22019-11-04 13:37:39 +01002326 self._deploy_n2vc(
2327 logging_text=logging_text,
2328 db_nsr=db_nsr,
2329 db_vnfr=db_vnfr,
2330 nslcmop_id=nslcmop_id,
2331 nsr_id=nsr_id,
2332 nsi_id=nsi_id,
2333 vnfd_id=vnfd_id,
2334 vdu_id=vdu_id,
2335 kdu_name=kdu_name,
2336 member_vnf_index=member_vnf_index,
2337 vdu_index=vdu_index,
2338 vdu_name=vdu_name,
tierno72ef84f2020-10-06 08:22:07 +00002339 deploy_params=deploy_params_kdu,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002340 descriptor_config=descriptor_config,
2341 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002342 task_instantiation_info=tasks_dict_info,
2343 stage=stage
calvinosanch9f9c6f22019-11-04 13:37:39 +01002344 )
tierno59d22d22018-09-25 18:10:19 +02002345
tierno1b633412019-02-25 16:48:23 +00002346 # Check if this NS has a charm configuration
tiernod8323042019-08-09 11:32:23 +00002347 descriptor_config = nsd.get("ns-configuration")
2348 if descriptor_config and descriptor_config.get("juju"):
2349 vnfd_id = None
2350 db_vnfr = None
2351 member_vnf_index = None
2352 vdu_id = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002353 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002354 vdu_index = 0
2355 vdu_name = None
tierno1b633412019-02-25 16:48:23 +00002356
tiernod8323042019-08-09 11:32:23 +00002357 # Get additional parameters
tierno72ef84f2020-10-06 08:22:07 +00002358 deploy_params = {"OSM": self._get_osm_params(db_vnfr)}
tiernod8323042019-08-09 11:32:23 +00002359 if db_nsr.get("additionalParamsForNs"):
tierno72ef84f2020-10-06 08:22:07 +00002360 deploy_params.update(self._format_additional_params(db_nsr["additionalParamsForNs"].copy()))
tiernod8323042019-08-09 11:32:23 +00002361 base_folder = nsd["_admin"]["storage"]
quilesj7e13aeb2019-10-08 13:34:55 +02002362 self._deploy_n2vc(
2363 logging_text=logging_text,
2364 db_nsr=db_nsr,
2365 db_vnfr=db_vnfr,
2366 nslcmop_id=nslcmop_id,
2367 nsr_id=nsr_id,
2368 nsi_id=nsi_id,
2369 vnfd_id=vnfd_id,
2370 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002371 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002372 member_vnf_index=member_vnf_index,
2373 vdu_index=vdu_index,
2374 vdu_name=vdu_name,
2375 deploy_params=deploy_params,
2376 descriptor_config=descriptor_config,
2377 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002378 task_instantiation_info=tasks_dict_info,
2379 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002380 )
tierno1b633412019-02-25 16:48:23 +00002381
tiernoe876f672020-02-13 14:34:48 +00002382 # rest of staff will be done at finally
tierno1b633412019-02-25 16:48:23 +00002383
tiernoe876f672020-02-13 14:34:48 +00002384 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
2385 self.logger.error(logging_text + "Exit Exception while '{}': {}".format(stage[1], e))
tierno59d22d22018-09-25 18:10:19 +02002386 exc = e
2387 except asyncio.CancelledError:
tiernoe876f672020-02-13 14:34:48 +00002388 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
tierno59d22d22018-09-25 18:10:19 +02002389 exc = "Operation was cancelled"
2390 except Exception as e:
2391 exc = traceback.format_exc()
tiernoe876f672020-02-13 14:34:48 +00002392 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
tierno59d22d22018-09-25 18:10:19 +02002393 finally:
2394 if exc:
tiernoe876f672020-02-13 14:34:48 +00002395 error_list.append(str(exc))
tiernobaa51102018-12-14 13:16:18 +00002396 try:
tiernoe876f672020-02-13 14:34:48 +00002397 # wait for pending tasks
2398 if tasks_dict_info:
2399 stage[1] = "Waiting for instantiate pending tasks."
2400 self.logger.debug(logging_text + stage[1])
2401 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_deploy,
2402 stage, nslcmop_id, nsr_id=nsr_id)
2403 stage[1] = stage[2] = ""
2404 except asyncio.CancelledError:
2405 error_list.append("Cancelled")
2406 # TODO cancel all tasks
2407 except Exception as exc:
2408 error_list.append(str(exc))
quilesj4cda56b2019-12-05 10:02:20 +00002409
tiernoe876f672020-02-13 14:34:48 +00002410 # update operation-status
2411 db_nsr_update["operational-status"] = "running"
2412 # let's begin with VCA 'configured' status (later we can change it)
2413 db_nsr_update["config-status"] = "configured"
2414 for task, task_name in tasks_dict_info.items():
2415 if not task.done() or task.cancelled() or task.exception():
2416 if task_name.startswith(self.task_name_deploy_vca):
2417 # A N2VC task is pending
2418 db_nsr_update["config-status"] = "failed"
quilesj4cda56b2019-12-05 10:02:20 +00002419 else:
tiernoe876f672020-02-13 14:34:48 +00002420 # RO or KDU task is pending
2421 db_nsr_update["operational-status"] = "failed"
quilesj3655ae02019-12-12 16:08:35 +00002422
tiernoe876f672020-02-13 14:34:48 +00002423 # update status at database
2424 if error_list:
tiernoa2143262020-03-27 16:20:40 +00002425 error_detail = ". ".join(error_list)
tiernoe876f672020-02-13 14:34:48 +00002426 self.logger.error(logging_text + error_detail)
tiernob5203912020-08-11 11:20:13 +00002427 error_description_nslcmop = '{} Detail: {}'.format(stage[0], error_detail)
2428 error_description_nsr = 'Operation: INSTANTIATING.{}, {}'.format(nslcmop_id, stage[0])
quilesj3655ae02019-12-12 16:08:35 +00002429
tiernoa2143262020-03-27 16:20:40 +00002430 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00002431 db_nslcmop_update["detailed-status"] = error_detail
2432 nslcmop_operation_state = "FAILED"
2433 ns_state = "BROKEN"
2434 else:
tiernoa2143262020-03-27 16:20:40 +00002435 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00002436 error_description_nsr = error_description_nslcmop = None
2437 ns_state = "READY"
2438 db_nsr_update["detailed-status"] = "Done"
2439 db_nslcmop_update["detailed-status"] = "Done"
2440 nslcmop_operation_state = "COMPLETED"
quilesj4cda56b2019-12-05 10:02:20 +00002441
tiernoe876f672020-02-13 14:34:48 +00002442 if db_nsr:
2443 self._write_ns_status(
2444 nsr_id=nsr_id,
2445 ns_state=ns_state,
2446 current_operation="IDLE",
2447 current_operation_id=None,
2448 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00002449 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00002450 other_update=db_nsr_update
2451 )
tiernoa17d4f42020-04-28 09:59:23 +00002452 self._write_op_status(
2453 op_id=nslcmop_id,
2454 stage="",
2455 error_message=error_description_nslcmop,
2456 operation_state=nslcmop_operation_state,
2457 other_update=db_nslcmop_update,
2458 )
quilesj3655ae02019-12-12 16:08:35 +00002459
tierno59d22d22018-09-25 18:10:19 +02002460 if nslcmop_operation_state:
2461 try:
2462 await self.msg.aiowrite("ns", "instantiated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00002463 "operationState": nslcmop_operation_state},
2464 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02002465 except Exception as e:
2466 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
2467
2468 self.logger.debug(logging_text + "Exit")
2469 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2470
tierno588547c2020-07-01 15:30:20 +00002471 async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int,
2472 timeout: int = 3600, vca_type: str = None) -> bool:
quilesj63f90042020-01-17 09:53:55 +00002473
2474 # steps:
2475 # 1. find all relations for this VCA
2476 # 2. wait for other peers related
2477 # 3. add relations
2478
2479 try:
tierno588547c2020-07-01 15:30:20 +00002480 vca_type = vca_type or "lxc_proxy_charm"
quilesj63f90042020-01-17 09:53:55 +00002481
2482 # STEP 1: find all relations for this VCA
2483
2484 # read nsr record
2485 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
David Garcia171f3542020-05-21 16:41:07 +02002486 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
quilesj63f90042020-01-17 09:53:55 +00002487
2488 # this VCA data
2489 my_vca = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))[vca_index]
2490
2491 # read all ns-configuration relations
2492 ns_relations = list()
David Garcia171f3542020-05-21 16:41:07 +02002493 db_ns_relations = deep_get(nsd, ('ns-configuration', 'relation'))
quilesj63f90042020-01-17 09:53:55 +00002494 if db_ns_relations:
2495 for r in db_ns_relations:
2496 # check if this VCA is in the relation
2497 if my_vca.get('member-vnf-index') in\
2498 (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2499 ns_relations.append(r)
2500
2501 # read all vnf-configuration relations
2502 vnf_relations = list()
2503 db_vnfd_list = db_nsr.get('vnfd-id')
2504 if db_vnfd_list:
2505 for vnfd in db_vnfd_list:
2506 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2507 db_vnf_relations = deep_get(db_vnfd, ('vnf-configuration', 'relation'))
2508 if db_vnf_relations:
2509 for r in db_vnf_relations:
2510 # check if this VCA is in the relation
2511 if my_vca.get('vdu_id') in (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2512 vnf_relations.append(r)
2513
2514 # if no relations, terminate
2515 if not ns_relations and not vnf_relations:
2516 self.logger.debug(logging_text + ' No relations')
2517 return True
2518
2519 self.logger.debug(logging_text + ' adding relations\n {}\n {}'.format(ns_relations, vnf_relations))
2520
2521 # add all relations
2522 start = time()
2523 while True:
2524 # check timeout
2525 now = time()
2526 if now - start >= timeout:
2527 self.logger.error(logging_text + ' : timeout adding relations')
2528 return False
2529
2530 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2531 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2532
2533 # for each defined NS relation, find the VCA's related
tierno364c4572020-09-14 12:11:32 +00002534 for r in ns_relations.copy():
quilesj63f90042020-01-17 09:53:55 +00002535 from_vca_ee_id = None
2536 to_vca_ee_id = None
2537 from_vca_endpoint = None
2538 to_vca_endpoint = None
2539 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2540 for vca in vca_list:
2541 if vca.get('member-vnf-index') == r.get('entities')[0].get('id') \
2542 and vca.get('config_sw_installed'):
2543 from_vca_ee_id = vca.get('ee_id')
2544 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2545 if vca.get('member-vnf-index') == r.get('entities')[1].get('id') \
2546 and vca.get('config_sw_installed'):
2547 to_vca_ee_id = vca.get('ee_id')
2548 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2549 if from_vca_ee_id and to_vca_ee_id:
2550 # add relation
tierno588547c2020-07-01 15:30:20 +00002551 await self.vca_map[vca_type].add_relation(
quilesj63f90042020-01-17 09:53:55 +00002552 ee_id_1=from_vca_ee_id,
2553 ee_id_2=to_vca_ee_id,
2554 endpoint_1=from_vca_endpoint,
2555 endpoint_2=to_vca_endpoint)
2556 # remove entry from relations list
2557 ns_relations.remove(r)
2558 else:
2559 # check failed peers
2560 try:
2561 vca_status_list = db_nsr.get('configurationStatus')
2562 if vca_status_list:
2563 for i in range(len(vca_list)):
2564 vca = vca_list[i]
2565 vca_status = vca_status_list[i]
2566 if vca.get('member-vnf-index') == r.get('entities')[0].get('id'):
2567 if vca_status.get('status') == 'BROKEN':
2568 # peer broken: remove relation from list
2569 ns_relations.remove(r)
2570 if vca.get('member-vnf-index') == r.get('entities')[1].get('id'):
2571 if vca_status.get('status') == 'BROKEN':
2572 # peer broken: remove relation from list
2573 ns_relations.remove(r)
2574 except Exception:
2575 # ignore
2576 pass
2577
2578 # for each defined VNF relation, find the VCA's related
tierno364c4572020-09-14 12:11:32 +00002579 for r in vnf_relations.copy():
quilesj63f90042020-01-17 09:53:55 +00002580 from_vca_ee_id = None
2581 to_vca_ee_id = None
2582 from_vca_endpoint = None
2583 to_vca_endpoint = None
2584 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2585 for vca in vca_list:
David Garcia97be6832020-09-09 15:40:44 +02002586 key_to_check = "vdu_id"
2587 if vca.get("vdu_id") is None:
2588 key_to_check = "vnfd_id"
2589 if vca.get(key_to_check) == r.get('entities')[0].get('id') and vca.get('config_sw_installed'):
quilesj63f90042020-01-17 09:53:55 +00002590 from_vca_ee_id = vca.get('ee_id')
2591 from_vca_endpoint = r.get('entities')[0].get('endpoint')
David Garcia97be6832020-09-09 15:40:44 +02002592 if vca.get(key_to_check) == r.get('entities')[1].get('id') and vca.get('config_sw_installed'):
quilesj63f90042020-01-17 09:53:55 +00002593 to_vca_ee_id = vca.get('ee_id')
2594 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2595 if from_vca_ee_id and to_vca_ee_id:
2596 # add relation
tierno588547c2020-07-01 15:30:20 +00002597 await self.vca_map[vca_type].add_relation(
quilesj63f90042020-01-17 09:53:55 +00002598 ee_id_1=from_vca_ee_id,
2599 ee_id_2=to_vca_ee_id,
2600 endpoint_1=from_vca_endpoint,
2601 endpoint_2=to_vca_endpoint)
2602 # remove entry from relations list
2603 vnf_relations.remove(r)
2604 else:
2605 # check failed peers
2606 try:
2607 vca_status_list = db_nsr.get('configurationStatus')
2608 if vca_status_list:
2609 for i in range(len(vca_list)):
2610 vca = vca_list[i]
2611 vca_status = vca_status_list[i]
2612 if vca.get('vdu_id') == r.get('entities')[0].get('id'):
2613 if vca_status.get('status') == 'BROKEN':
2614 # peer broken: remove relation from list
David Garcia092afbd2020-08-25 13:17:25 +02002615 vnf_relations.remove(r)
quilesj63f90042020-01-17 09:53:55 +00002616 if vca.get('vdu_id') == r.get('entities')[1].get('id'):
2617 if vca_status.get('status') == 'BROKEN':
2618 # peer broken: remove relation from list
David Garcia092afbd2020-08-25 13:17:25 +02002619 vnf_relations.remove(r)
quilesj63f90042020-01-17 09:53:55 +00002620 except Exception:
2621 # ignore
2622 pass
2623
2624 # wait for next try
2625 await asyncio.sleep(5.0)
2626
2627 if not ns_relations and not vnf_relations:
2628 self.logger.debug('Relations added')
2629 break
2630
2631 return True
2632
2633 except Exception as e:
2634 self.logger.warn(logging_text + ' ERROR adding relations: {}'.format(e))
2635 return False
2636
tierno7ecbc342020-09-21 14:05:39 +00002637 async def _install_kdu(self, nsr_id: str, nsr_db_path: str, vnfr_data: dict, kdu_index: int, kdud: dict,
lloretgalleg7c121132020-07-08 07:53:22 +00002638 vnfd: dict, k8s_instance_info: dict, k8params: dict = None, timeout: int = 600):
2639
tiernob9018152020-04-16 14:18:24 +00002640 try:
lloretgalleg7c121132020-07-08 07:53:22 +00002641 k8sclustertype = k8s_instance_info["k8scluster-type"]
2642 # Instantiate kdu
2643 db_dict_install = {"collection": "nsrs",
2644 "filter": {"_id": nsr_id},
2645 "path": nsr_db_path}
2646
2647 kdu_instance = await self.k8scluster_map[k8sclustertype].install(
2648 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2649 kdu_model=k8s_instance_info["kdu-model"],
2650 atomic=True,
2651 params=k8params,
2652 db_dict=db_dict_install,
2653 timeout=timeout,
2654 kdu_name=k8s_instance_info["kdu-name"],
2655 namespace=k8s_instance_info["namespace"])
2656 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance})
2657
2658 # Obtain services to obtain management service ip
2659 services = await self.k8scluster_map[k8sclustertype].get_services(
2660 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2661 kdu_instance=kdu_instance,
2662 namespace=k8s_instance_info["namespace"])
2663
2664 # Obtain management service info (if exists)
tierno7ecbc342020-09-21 14:05:39 +00002665 vnfr_update_dict = {}
lloretgalleg7c121132020-07-08 07:53:22 +00002666 if services:
tierno7ecbc342020-09-21 14:05:39 +00002667 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
lloretgalleg7c121132020-07-08 07:53:22 +00002668 mgmt_services = [service for service in kdud.get("service", []) if service.get("mgmt-service")]
2669 for mgmt_service in mgmt_services:
2670 for service in services:
2671 if service["name"].startswith(mgmt_service["name"]):
2672 # Mgmt service found, Obtain service ip
2673 ip = service.get("external_ip", service.get("cluster_ip"))
2674 if isinstance(ip, list) and len(ip) == 1:
2675 ip = ip[0]
2676
2677 vnfr_update_dict["kdur.{}.ip-address".format(kdu_index)] = ip
2678
2679 # Check if must update also mgmt ip at the vnf
2680 service_external_cp = mgmt_service.get("external-connection-point-ref")
2681 if service_external_cp:
2682 if deep_get(vnfd, ("mgmt-interface", "cp")) == service_external_cp:
2683 vnfr_update_dict["ip-address"] = ip
2684
2685 break
2686 else:
2687 self.logger.warn("Mgmt service name: {} not found".format(mgmt_service["name"]))
2688
tierno7ecbc342020-09-21 14:05:39 +00002689 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
2690 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
lloretgalleg7c121132020-07-08 07:53:22 +00002691
Dominik Fleischmannc1975dd2020-08-19 12:17:51 +02002692 kdu_config = kdud.get("kdu-configuration")
2693 if kdu_config and kdu_config.get("initial-config-primitive") and kdu_config.get("juju") is None:
2694 initial_config_primitive_list = kdu_config.get("initial-config-primitive")
2695 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
2696
2697 for initial_config_primitive in initial_config_primitive_list:
2698 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, {})
2699
2700 await asyncio.wait_for(
2701 self.k8scluster_map[k8sclustertype].exec_primitive(
2702 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2703 kdu_instance=kdu_instance,
2704 primitive_name=initial_config_primitive["name"],
2705 params=primitive_params_, db_dict={}),
2706 timeout=timeout)
2707
tiernob9018152020-04-16 14:18:24 +00002708 except Exception as e:
lloretgalleg7c121132020-07-08 07:53:22 +00002709 # Prepare update db with error and raise exception
tiernob9018152020-04-16 14:18:24 +00002710 try:
lloretgalleg7c121132020-07-08 07:53:22 +00002711 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)})
tierno7ecbc342020-09-21 14:05:39 +00002712 self.update_db_2("vnfrs", vnfr_data.get("_id"), {"kdur.{}.status".format(kdu_index): "ERROR"})
tiernob9018152020-04-16 14:18:24 +00002713 except Exception:
lloretgalleg7c121132020-07-08 07:53:22 +00002714 # ignore to keep original exception
tiernob9018152020-04-16 14:18:24 +00002715 pass
lloretgalleg7c121132020-07-08 07:53:22 +00002716 # reraise original error
2717 raise
2718
2719 return kdu_instance
tiernob9018152020-04-16 14:18:24 +00002720
tiernoe876f672020-02-13 14:34:48 +00002721 async def deploy_kdus(self, logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_instantiation_info):
calvinosanch9f9c6f22019-11-04 13:37:39 +01002722 # Launch kdus if present in the descriptor
tierno626e0152019-11-29 14:16:16 +00002723
lloretgalleg18ebc3a2020-10-22 09:54:51 +00002724 k8scluster_id_2_uuic = {"helm-chart-v3": {}, "helm-chart": {}, "juju-bundle": {}}
tierno626e0152019-11-29 14:16:16 +00002725
tierno16f4a4e2020-07-20 09:05:51 +00002726 async def _get_cluster_id(cluster_id, cluster_type):
tierno626e0152019-11-29 14:16:16 +00002727 nonlocal k8scluster_id_2_uuic
2728 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
2729 return k8scluster_id_2_uuic[cluster_type][cluster_id]
2730
tierno16f4a4e2020-07-20 09:05:51 +00002731 # check if K8scluster is creating and wait look if previous tasks in process
2732 task_name, task_dependency = self.lcm_tasks.lookfor_related("k8scluster", cluster_id)
2733 if task_dependency:
2734 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(task_name, cluster_id)
2735 self.logger.debug(logging_text + text)
2736 await asyncio.wait(task_dependency, timeout=3600)
2737
tierno626e0152019-11-29 14:16:16 +00002738 db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False)
2739 if not db_k8scluster:
2740 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
tierno16f4a4e2020-07-20 09:05:51 +00002741
tierno626e0152019-11-29 14:16:16 +00002742 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
2743 if not k8s_id:
lloretgalleg18ebc3a2020-10-22 09:54:51 +00002744 if cluster_type == "helm-chart-v3":
2745 try:
2746 # backward compatibility for existing clusters that have not been initialized for helm v3
2747 k8s_credentials = yaml.safe_dump(db_k8scluster.get("credentials"))
2748 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(k8s_credentials,
2749 reuse_cluster_uuid=cluster_id)
2750 db_k8scluster_update = {}
2751 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
2752 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
2753 db_k8scluster_update["_admin.helm-chart-v3.created"] = uninstall_sw
2754 db_k8scluster_update["_admin.helm-chart-v3.operationalState"] = "ENABLED"
2755 self.update_db_2("k8sclusters", cluster_id, db_k8scluster_update)
2756 except Exception as e:
2757 self.logger.error(logging_text + "error initializing helm-v3 cluster: {}".format(str(e)))
2758 raise LcmException("K8s cluster '{}' has not been initialized for '{}'".format(cluster_id,
2759 cluster_type))
2760 else:
2761 raise LcmException("K8s cluster '{}' has not been initialized for '{}'".
2762 format(cluster_id, cluster_type))
tierno626e0152019-11-29 14:16:16 +00002763 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
2764 return k8s_id
2765
2766 logging_text += "Deploy kdus: "
tiernoe876f672020-02-13 14:34:48 +00002767 step = ""
calvinosanch9f9c6f22019-11-04 13:37:39 +01002768 try:
tierno626e0152019-11-29 14:16:16 +00002769 db_nsr_update = {"_admin.deployed.K8s": []}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002770 self.update_db_2("nsrs", nsr_id, db_nsr_update)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002771
tierno626e0152019-11-29 14:16:16 +00002772 index = 0
tiernoe876f672020-02-13 14:34:48 +00002773 updated_cluster_list = []
lloretgalleg18ebc3a2020-10-22 09:54:51 +00002774 updated_v3_cluster_list = []
tiernoe876f672020-02-13 14:34:48 +00002775
tierno626e0152019-11-29 14:16:16 +00002776 for vnfr_data in db_vnfrs.values():
lloretgalleg7c121132020-07-08 07:53:22 +00002777 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
2778 # Step 0: Prepare and set parameters
tierno626e0152019-11-29 14:16:16 +00002779 desc_params = self._format_additional_params(kdur.get("additionalParams"))
quilesjacde94f2020-01-23 10:07:08 +00002780 vnfd_id = vnfr_data.get('vnfd-id')
lloretgalleg7c121132020-07-08 07:53:22 +00002781 kdud = next(kdud for kdud in db_vnfds[vnfd_id]["kdu"] if kdud["name"] == kdur["kdu-name"])
tiernode1584f2020-04-07 09:07:33 +00002782 namespace = kdur.get("k8s-namespace")
tierno626e0152019-11-29 14:16:16 +00002783 if kdur.get("helm-chart"):
2784 kdumodel = kdur["helm-chart"]
lloretgalleg18ebc3a2020-10-22 09:54:51 +00002785 # Default version: helm3, if helm-version is v2 assign v2
2786 k8sclustertype = "helm-chart-v3"
2787 self.logger.debug("kdur: {}".format(kdur))
2788 if kdur.get("helm-version") and kdur.get("helm-version") == "v2":
2789 k8sclustertype = "helm-chart"
tierno626e0152019-11-29 14:16:16 +00002790 elif kdur.get("juju-bundle"):
2791 kdumodel = kdur["juju-bundle"]
tiernoe876f672020-02-13 14:34:48 +00002792 k8sclustertype = "juju-bundle"
tierno626e0152019-11-29 14:16:16 +00002793 else:
tiernoe876f672020-02-13 14:34:48 +00002794 raise LcmException("kdu type for kdu='{}.{}' is neither helm-chart nor "
2795 "juju-bundle. Maybe an old NBI version is running".
2796 format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]))
quilesjacde94f2020-01-23 10:07:08 +00002797 # check if kdumodel is a file and exists
2798 try:
tierno51183952020-04-03 15:48:18 +00002799 storage = deep_get(db_vnfds.get(vnfd_id), ('_admin', 'storage'))
2800 if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts
2801 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
Dominik Fleischmann010c0e72020-05-18 15:19:11 +02002802 filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["pkg-dir"], k8sclustertype,
tierno51183952020-04-03 15:48:18 +00002803 kdumodel)
2804 if self.fs.file_exists(filename, mode='file') or self.fs.file_exists(filename, mode='dir'):
2805 kdumodel = self.fs.path + filename
2806 except (asyncio.TimeoutError, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00002807 raise
2808 except Exception: # it is not a file
quilesjacde94f2020-01-23 10:07:08 +00002809 pass
lloretgallegedc5f332020-02-20 11:50:50 +01002810
tiernoe876f672020-02-13 14:34:48 +00002811 k8s_cluster_id = kdur["k8s-cluster"]["id"]
2812 step = "Synchronize repos for k8s cluster '{}'".format(k8s_cluster_id)
tierno16f4a4e2020-07-20 09:05:51 +00002813 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
lloretgallegedc5f332020-02-20 11:50:50 +01002814
lloretgalleg7c121132020-07-08 07:53:22 +00002815 # Synchronize repos
lloretgalleg18ebc3a2020-10-22 09:54:51 +00002816 if (k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list)\
2817 or (k8sclustertype == "helm-chart-v3" and cluster_uuid not in updated_v3_cluster_list):
tiernoe876f672020-02-13 14:34:48 +00002818 del_repo_list, added_repo_dict = await asyncio.ensure_future(
lloretgalleg18ebc3a2020-10-22 09:54:51 +00002819 self.k8scluster_map[k8sclustertype].synchronize_repos(cluster_uuid=cluster_uuid))
tiernoe876f672020-02-13 14:34:48 +00002820 if del_repo_list or added_repo_dict:
lloretgalleg18ebc3a2020-10-22 09:54:51 +00002821 if k8sclustertype == "helm-chart":
2822 unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
2823 updated = {'_admin.helm_charts_added.' +
2824 item: name for item, name in added_repo_dict.items()}
2825 updated_cluster_list.append(cluster_uuid)
2826 elif k8sclustertype == "helm-chart-v3":
2827 unset = {'_admin.helm_charts_v3_added.' + item: None for item in del_repo_list}
2828 updated = {'_admin.helm_charts_v3_added.' +
2829 item: name for item, name in added_repo_dict.items()}
2830 updated_v3_cluster_list.append(cluster_uuid)
2831 self.logger.debug(logging_text + "repos synchronized on k8s cluster "
2832 "'{}' to_delete: {}, to_add: {}".
2833 format(k8s_cluster_id, del_repo_list, added_repo_dict))
tiernoe876f672020-02-13 14:34:48 +00002834 self.db.set_one("k8sclusters", {"_id": k8s_cluster_id}, updated, unset=unset)
lloretgallegedc5f332020-02-20 11:50:50 +01002835
lloretgalleg7c121132020-07-08 07:53:22 +00002836 # Instantiate kdu
tiernoe876f672020-02-13 14:34:48 +00002837 step = "Instantiating KDU {}.{} in k8s cluster {}".format(vnfr_data["member-vnf-index-ref"],
2838 kdur["kdu-name"], k8s_cluster_id)
lloretgalleg7c121132020-07-08 07:53:22 +00002839 k8s_instance_info = {"kdu-instance": None,
2840 "k8scluster-uuid": cluster_uuid,
2841 "k8scluster-type": k8sclustertype,
2842 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
2843 "kdu-name": kdur["kdu-name"],
2844 "kdu-model": kdumodel,
2845 "namespace": namespace}
tiernob9018152020-04-16 14:18:24 +00002846 db_path = "_admin.deployed.K8s.{}".format(index)
lloretgalleg7c121132020-07-08 07:53:22 +00002847 db_nsr_update[db_path] = k8s_instance_info
tierno626e0152019-11-29 14:16:16 +00002848 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tierno626e0152019-11-29 14:16:16 +00002849
tiernoa2143262020-03-27 16:20:40 +00002850 task = asyncio.ensure_future(
tierno7ecbc342020-09-21 14:05:39 +00002851 self._install_kdu(nsr_id, db_path, vnfr_data, kdu_index, kdud, db_vnfds[vnfd_id],
lloretgalleg7c121132020-07-08 07:53:22 +00002852 k8s_instance_info, k8params=desc_params, timeout=600))
tiernoe876f672020-02-13 14:34:48 +00002853 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task)
tiernoa2143262020-03-27 16:20:40 +00002854 task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"])
tiernoe876f672020-02-13 14:34:48 +00002855
tierno626e0152019-11-29 14:16:16 +00002856 index += 1
quilesjdd799ac2020-01-23 16:31:11 +00002857
tiernoe876f672020-02-13 14:34:48 +00002858 except (LcmException, asyncio.CancelledError):
2859 raise
calvinosanch9f9c6f22019-11-04 13:37:39 +01002860 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00002861 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
2862 if isinstance(e, (N2VCException, DbException)):
2863 self.logger.error(logging_text + msg)
2864 else:
2865 self.logger.critical(logging_text + msg, exc_info=True)
quilesjdd799ac2020-01-23 16:31:11 +00002866 raise LcmException(msg)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002867 finally:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002868 if db_nsr_update:
2869 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoda6fb102019-11-23 00:36:52 +00002870
quilesj7e13aeb2019-10-08 13:34:55 +02002871 def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002872 kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config,
tiernoe876f672020-02-13 14:34:48 +00002873 base_folder, task_instantiation_info, stage):
quilesj7e13aeb2019-10-08 13:34:55 +02002874 # launch instantiate_N2VC in a asyncio task and register task object
2875 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
2876 # if not found, create one entry and update database
quilesj7e13aeb2019-10-08 13:34:55 +02002877 # fill db_nsr._admin.deployed.VCA.<index>
tierno588547c2020-07-01 15:30:20 +00002878
2879 self.logger.debug(logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id))
2880 if descriptor_config.get("juju"): # There is one execution envioronment of type juju
2881 ee_list = [descriptor_config]
2882 elif descriptor_config.get("execution-environment-list"):
2883 ee_list = descriptor_config.get("execution-environment-list")
2884 else: # other types as script are not supported
2885 ee_list = []
2886
2887 for ee_item in ee_list:
2888 self.logger.debug(logging_text + "_deploy_n2vc ee_item juju={}, helm={}".format(ee_item.get('juju'),
2889 ee_item.get("helm-chart")))
tiernoa278b842020-07-08 15:33:55 +00002890 ee_descriptor_id = ee_item.get("id")
tierno588547c2020-07-01 15:30:20 +00002891 if ee_item.get("juju"):
2892 vca_name = ee_item['juju'].get('charm')
2893 vca_type = "lxc_proxy_charm" if ee_item['juju'].get('charm') is not None else "native_charm"
2894 if ee_item['juju'].get('cloud') == "k8s":
2895 vca_type = "k8s_proxy_charm"
2896 elif ee_item['juju'].get('proxy') is False:
2897 vca_type = "native_charm"
2898 elif ee_item.get("helm-chart"):
2899 vca_name = ee_item['helm-chart']
lloretgalleg18ebc3a2020-10-22 09:54:51 +00002900 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
2901 vca_type = "helm"
2902 else:
2903 vca_type = "helm-v3"
tierno588547c2020-07-01 15:30:20 +00002904 else:
2905 self.logger.debug(logging_text + "skipping non juju neither charm configuration")
quilesj7e13aeb2019-10-08 13:34:55 +02002906 continue
quilesj3655ae02019-12-12 16:08:35 +00002907
tierno588547c2020-07-01 15:30:20 +00002908 vca_index = -1
2909 for vca_index, vca_deployed in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
2910 if not vca_deployed:
2911 continue
2912 if vca_deployed.get("member-vnf-index") == member_vnf_index and \
2913 vca_deployed.get("vdu_id") == vdu_id and \
2914 vca_deployed.get("kdu_name") == kdu_name and \
tiernoa278b842020-07-08 15:33:55 +00002915 vca_deployed.get("vdu_count_index", 0) == vdu_index and \
2916 vca_deployed.get("ee_descriptor_id") == ee_descriptor_id:
tierno588547c2020-07-01 15:30:20 +00002917 break
2918 else:
2919 # not found, create one.
tiernoa278b842020-07-08 15:33:55 +00002920 target = "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
2921 if vdu_id:
2922 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
2923 elif kdu_name:
2924 target += "/kdu/{}".format(kdu_name)
tierno588547c2020-07-01 15:30:20 +00002925 vca_deployed = {
tiernoa278b842020-07-08 15:33:55 +00002926 "target_element": target,
2927 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
tierno588547c2020-07-01 15:30:20 +00002928 "member-vnf-index": member_vnf_index,
2929 "vdu_id": vdu_id,
2930 "kdu_name": kdu_name,
2931 "vdu_count_index": vdu_index,
2932 "operational-status": "init", # TODO revise
2933 "detailed-status": "", # TODO revise
2934 "step": "initial-deploy", # TODO revise
2935 "vnfd_id": vnfd_id,
2936 "vdu_name": vdu_name,
tiernoa278b842020-07-08 15:33:55 +00002937 "type": vca_type,
2938 "ee_descriptor_id": ee_descriptor_id
tierno588547c2020-07-01 15:30:20 +00002939 }
2940 vca_index += 1
quilesj3655ae02019-12-12 16:08:35 +00002941
tierno588547c2020-07-01 15:30:20 +00002942 # create VCA and configurationStatus in db
2943 db_dict = {
2944 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
2945 "configurationStatus.{}".format(vca_index): dict()
2946 }
2947 self.update_db_2("nsrs", nsr_id, db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02002948
tierno588547c2020-07-01 15:30:20 +00002949 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
2950
2951 # Launch task
2952 task_n2vc = asyncio.ensure_future(
2953 self.instantiate_N2VC(
2954 logging_text=logging_text,
2955 vca_index=vca_index,
2956 nsi_id=nsi_id,
2957 db_nsr=db_nsr,
2958 db_vnfr=db_vnfr,
2959 vdu_id=vdu_id,
2960 kdu_name=kdu_name,
2961 vdu_index=vdu_index,
2962 deploy_params=deploy_params,
2963 config_descriptor=descriptor_config,
2964 base_folder=base_folder,
2965 nslcmop_id=nslcmop_id,
2966 stage=stage,
2967 vca_type=vca_type,
tiernob996d942020-07-03 14:52:28 +00002968 vca_name=vca_name,
2969 ee_config_descriptor=ee_item
tierno588547c2020-07-01 15:30:20 +00002970 )
quilesj7e13aeb2019-10-08 13:34:55 +02002971 )
tierno588547c2020-07-01 15:30:20 +00002972 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_N2VC-{}".format(vca_index), task_n2vc)
2973 task_instantiation_info[task_n2vc] = self.task_name_deploy_vca + " {}.{}".format(
2974 member_vnf_index or "", vdu_id or "")
tiernobaa51102018-12-14 13:16:18 +00002975
tiernoc9556972019-07-05 15:25:25 +00002976 @staticmethod
tiernoa278b842020-07-08 15:33:55 +00002977 def _get_terminate_config_primitive(primitive_list, vca_deployed):
2978 """ Get a sorted terminate config primitive list. In case ee_descriptor_id is present at vca_deployed,
2979 it get only those primitives for this execution envirom"""
2980
2981 primitive_list = primitive_list or []
2982 # filter primitives by ee_descriptor_id
2983 ee_descriptor_id = vca_deployed.get("ee_descriptor_id")
2984 primitive_list = [p for p in primitive_list if p.get("execution-environment-ref") == ee_descriptor_id]
2985
2986 if primitive_list:
2987 primitive_list.sort(key=lambda val: int(val['seq']))
2988
2989 return primitive_list
kuuse0ca67472019-05-13 15:59:27 +02002990
2991 @staticmethod
2992 def _create_nslcmop(nsr_id, operation, params):
2993 """
2994 Creates a ns-lcm-opp content to be stored at database.
2995 :param nsr_id: internal id of the instance
2996 :param operation: instantiate, terminate, scale, action, ...
2997 :param params: user parameters for the operation
2998 :return: dictionary following SOL005 format
2999 """
3000 # Raise exception if invalid arguments
3001 if not (nsr_id and operation and params):
3002 raise LcmException(
3003 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided")
3004 now = time()
3005 _id = str(uuid4())
3006 nslcmop = {
3007 "id": _id,
3008 "_id": _id,
3009 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3010 "operationState": "PROCESSING",
3011 "statusEnteredTime": now,
3012 "nsInstanceId": nsr_id,
3013 "lcmOperationType": operation,
3014 "startTime": now,
3015 "isAutomaticInvocation": False,
3016 "operationParams": params,
3017 "isCancelPending": False,
3018 "links": {
3019 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3020 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3021 }
3022 }
3023 return nslcmop
3024
calvinosanch9f9c6f22019-11-04 13:37:39 +01003025 def _format_additional_params(self, params):
tierno626e0152019-11-29 14:16:16 +00003026 params = params or {}
calvinosanch9f9c6f22019-11-04 13:37:39 +01003027 for key, value in params.items():
3028 if str(value).startswith("!!yaml "):
3029 params[key] = yaml.safe_load(value[7:])
calvinosanch9f9c6f22019-11-04 13:37:39 +01003030 return params
3031
kuuse8b998e42019-07-30 15:22:16 +02003032 def _get_terminate_primitive_params(self, seq, vnf_index):
3033 primitive = seq.get('name')
3034 primitive_params = {}
3035 params = {
3036 "member_vnf_index": vnf_index,
3037 "primitive": primitive,
3038 "primitive_params": primitive_params,
3039 }
3040 desc_params = {}
3041 return self._map_primitive_params(seq, params, desc_params)
3042
kuuseac3a8882019-10-03 10:48:06 +02003043 # sub-operations
3044
tierno51183952020-04-03 15:48:18 +00003045 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3046 op = deep_get(db_nslcmop, ('_admin', 'operations'), [])[op_index]
3047 if op.get('operationState') == 'COMPLETED':
kuuseac3a8882019-10-03 10:48:06 +02003048 # b. Skip sub-operation
3049 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3050 return self.SUBOPERATION_STATUS_SKIP
3051 else:
tierno7c4e24c2020-05-13 08:41:35 +00003052 # c. retry executing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02003053 # The sub-operation exists, and operationState != 'COMPLETED'
tierno7c4e24c2020-05-13 08:41:35 +00003054 # Update operationState = 'PROCESSING' to indicate a retry.
kuuseac3a8882019-10-03 10:48:06 +02003055 operationState = 'PROCESSING'
3056 detailed_status = 'In progress'
3057 self._update_suboperation_status(
3058 db_nslcmop, op_index, operationState, detailed_status)
3059 # Return the sub-operation index
3060 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3061 # with arguments extracted from the sub-operation
3062 return op_index
3063
3064 # Find a sub-operation where all keys in a matching dictionary must match
3065 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3066 def _find_suboperation(self, db_nslcmop, match):
tierno7c4e24c2020-05-13 08:41:35 +00003067 if db_nslcmop and match:
kuuseac3a8882019-10-03 10:48:06 +02003068 op_list = db_nslcmop.get('_admin', {}).get('operations', [])
3069 for i, op in enumerate(op_list):
3070 if all(op.get(k) == match[k] for k in match):
3071 return i
3072 return self.SUBOPERATION_STATUS_NOT_FOUND
3073
3074 # Update status for a sub-operation given its index
3075 def _update_suboperation_status(self, db_nslcmop, op_index, operationState, detailed_status):
3076 # Update DB for HA tasks
3077 q_filter = {'_id': db_nslcmop['_id']}
3078 update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState,
3079 '_admin.operations.{}.detailed-status'.format(op_index): detailed_status}
3080 self.db.set_one("nslcmops",
3081 q_filter=q_filter,
3082 update_dict=update_dict,
3083 fail_on_empty=False)
3084
3085 # Add sub-operation, return the index of the added sub-operation
3086 # Optionally, set operationState, detailed-status, and operationType
3087 # Status and type are currently set for 'scale' sub-operations:
3088 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3089 # 'detailed-status' : status message
3090 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3091 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
tierno2357f4e2020-10-19 16:38:59 +00003092 def _add_suboperation(self, db_nslcmop, vnf_index, vdu_id, vdu_count_index, vdu_name, primitive,
quilesj7e13aeb2019-10-08 13:34:55 +02003093 mapped_primitive_params, operationState=None, detailed_status=None, operationType=None,
kuuseac3a8882019-10-03 10:48:06 +02003094 RO_nsr_id=None, RO_scaling_info=None):
tiernoe876f672020-02-13 14:34:48 +00003095 if not db_nslcmop:
kuuseac3a8882019-10-03 10:48:06 +02003096 return self.SUBOPERATION_STATUS_NOT_FOUND
3097 # Get the "_admin.operations" list, if it exists
3098 db_nslcmop_admin = db_nslcmop.get('_admin', {})
3099 op_list = db_nslcmop_admin.get('operations')
3100 # Create or append to the "_admin.operations" list
kuuse8b998e42019-07-30 15:22:16 +02003101 new_op = {'member_vnf_index': vnf_index,
3102 'vdu_id': vdu_id,
3103 'vdu_count_index': vdu_count_index,
3104 'primitive': primitive,
3105 'primitive_params': mapped_primitive_params}
kuuseac3a8882019-10-03 10:48:06 +02003106 if operationState:
3107 new_op['operationState'] = operationState
3108 if detailed_status:
3109 new_op['detailed-status'] = detailed_status
3110 if operationType:
3111 new_op['lcmOperationType'] = operationType
3112 if RO_nsr_id:
3113 new_op['RO_nsr_id'] = RO_nsr_id
3114 if RO_scaling_info:
3115 new_op['RO_scaling_info'] = RO_scaling_info
3116 if not op_list:
3117 # No existing operations, create key 'operations' with current operation as first list element
3118 db_nslcmop_admin.update({'operations': [new_op]})
3119 op_list = db_nslcmop_admin.get('operations')
3120 else:
3121 # Existing operations, append operation to list
3122 op_list.append(new_op)
kuuse8b998e42019-07-30 15:22:16 +02003123
kuuseac3a8882019-10-03 10:48:06 +02003124 db_nslcmop_update = {'_admin.operations': op_list}
3125 self.update_db_2("nslcmops", db_nslcmop['_id'], db_nslcmop_update)
3126 op_index = len(op_list) - 1
3127 return op_index
3128
3129 # Helper methods for scale() sub-operations
3130
3131 # pre-scale/post-scale:
3132 # Check for 3 different cases:
3133 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
3134 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
tierno7c4e24c2020-05-13 08:41:35 +00003135 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
quilesj7e13aeb2019-10-08 13:34:55 +02003136 def _check_or_add_scale_suboperation(self, db_nslcmop, vnf_index, vnf_config_primitive, primitive_params,
3137 operationType, RO_nsr_id=None, RO_scaling_info=None):
kuuseac3a8882019-10-03 10:48:06 +02003138 # Find this sub-operation
tierno7c4e24c2020-05-13 08:41:35 +00003139 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02003140 operationType = 'SCALE-RO'
3141 match = {
3142 'member_vnf_index': vnf_index,
3143 'RO_nsr_id': RO_nsr_id,
3144 'RO_scaling_info': RO_scaling_info,
3145 }
3146 else:
3147 match = {
3148 'member_vnf_index': vnf_index,
3149 'primitive': vnf_config_primitive,
3150 'primitive_params': primitive_params,
3151 'lcmOperationType': operationType
3152 }
3153 op_index = self._find_suboperation(db_nslcmop, match)
tierno51183952020-04-03 15:48:18 +00003154 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
kuuseac3a8882019-10-03 10:48:06 +02003155 # a. New sub-operation
3156 # The sub-operation does not exist, add it.
3157 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
3158 # The following parameters are set to None for all kind of scaling:
3159 vdu_id = None
3160 vdu_count_index = None
3161 vdu_name = None
tierno51183952020-04-03 15:48:18 +00003162 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02003163 vnf_config_primitive = None
3164 primitive_params = None
3165 else:
3166 RO_nsr_id = None
3167 RO_scaling_info = None
3168 # Initial status for sub-operation
3169 operationState = 'PROCESSING'
3170 detailed_status = 'In progress'
3171 # Add sub-operation for pre/post-scaling (zero or more operations)
3172 self._add_suboperation(db_nslcmop,
3173 vnf_index,
3174 vdu_id,
3175 vdu_count_index,
3176 vdu_name,
3177 vnf_config_primitive,
3178 primitive_params,
3179 operationState,
3180 detailed_status,
3181 operationType,
3182 RO_nsr_id,
3183 RO_scaling_info)
3184 return self.SUBOPERATION_STATUS_NEW
3185 else:
3186 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
3187 # or op_index (operationState != 'COMPLETED')
tierno51183952020-04-03 15:48:18 +00003188 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
kuuseac3a8882019-10-03 10:48:06 +02003189
preethika.pdf7d8e02019-12-10 13:10:48 +00003190 # Function to return execution_environment id
3191
3192 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
tiernoe876f672020-02-13 14:34:48 +00003193 # TODO vdu_index_count
preethika.pdf7d8e02019-12-10 13:10:48 +00003194 for vca in vca_deployed_list:
3195 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
3196 return vca["ee_id"]
3197
tierno588547c2020-07-01 15:30:20 +00003198 async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor,
3199 vca_index, destroy_ee=True, exec_primitives=True):
tiernoe876f672020-02-13 14:34:48 +00003200 """
3201 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
3202 :param logging_text:
3203 :param db_nslcmop:
3204 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
3205 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
3206 :param vca_index: index in the database _admin.deployed.VCA
3207 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
tierno588547c2020-07-01 15:30:20 +00003208 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
3209 not executed properly
tiernoe876f672020-02-13 14:34:48 +00003210 :return: None or exception
3211 """
tiernoe876f672020-02-13 14:34:48 +00003212
tierno588547c2020-07-01 15:30:20 +00003213 self.logger.debug(
3214 logging_text + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
3215 vca_index, vca_deployed, config_descriptor, destroy_ee
3216 )
3217 )
3218
3219 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
3220
3221 # execute terminate_primitives
3222 if exec_primitives:
tiernoa278b842020-07-08 15:33:55 +00003223 terminate_primitives = self._get_terminate_config_primitive(
3224 config_descriptor.get("terminate-config-primitive"), vca_deployed)
tierno588547c2020-07-01 15:30:20 +00003225 vdu_id = vca_deployed.get("vdu_id")
3226 vdu_count_index = vca_deployed.get("vdu_count_index")
3227 vdu_name = vca_deployed.get("vdu_name")
3228 vnf_index = vca_deployed.get("member-vnf-index")
3229 if terminate_primitives and vca_deployed.get("needed_terminate"):
tierno588547c2020-07-01 15:30:20 +00003230 for seq in terminate_primitives:
3231 # For each sequence in list, get primitive and call _ns_execute_primitive()
3232 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
3233 vnf_index, seq.get("name"))
3234 self.logger.debug(logging_text + step)
3235 # Create the primitive for each sequence, i.e. "primitive": "touch"
3236 primitive = seq.get('name')
3237 mapped_primitive_params = self._get_terminate_primitive_params(seq, vnf_index)
tierno588547c2020-07-01 15:30:20 +00003238
3239 # Add sub-operation
3240 self._add_suboperation(db_nslcmop,
3241 vnf_index,
3242 vdu_id,
3243 vdu_count_index,
3244 vdu_name,
3245 primitive,
3246 mapped_primitive_params)
3247 # Sub-operations: Call _ns_execute_primitive() instead of action()
3248 try:
3249 result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive,
3250 mapped_primitive_params,
3251 vca_type=vca_type)
3252 except LcmException:
3253 # this happens when VCA is not deployed. In this case it is not needed to terminate
3254 continue
3255 result_ok = ['COMPLETED', 'PARTIALLY_COMPLETED']
3256 if result not in result_ok:
3257 raise LcmException("terminate_primitive {} for vnf_member_index={} fails with "
3258 "error {}".format(seq.get("name"), vnf_index, result_detail))
3259 # set that this VCA do not need terminated
3260 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(vca_index)
3261 self.update_db_2("nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False})
tiernoe876f672020-02-13 14:34:48 +00003262
tiernob996d942020-07-03 14:52:28 +00003263 if vca_deployed.get("prometheus_jobs") and self.prometheus:
3264 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
3265
tiernoe876f672020-02-13 14:34:48 +00003266 if destroy_ee:
tierno588547c2020-07-01 15:30:20 +00003267 await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"])
kuuse0ca67472019-05-13 15:59:27 +02003268
tierno51183952020-04-03 15:48:18 +00003269 async def _delete_all_N2VC(self, db_nsr: dict):
3270 self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING')
3271 namespace = "." + db_nsr["_id"]
tiernof59ad6c2020-04-08 12:50:52 +00003272 try:
3273 await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete)
3274 except N2VCNotFound: # already deleted. Skip
3275 pass
tierno51183952020-04-03 15:48:18 +00003276 self._write_all_config_status(db_nsr=db_nsr, status='DELETED')
quilesj3655ae02019-12-12 16:08:35 +00003277
tiernoe876f672020-02-13 14:34:48 +00003278 async def _terminate_RO(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
3279 """
3280 Terminates a deployment from RO
3281 :param logging_text:
3282 :param nsr_deployed: db_nsr._admin.deployed
3283 :param nsr_id:
3284 :param nslcmop_id:
3285 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
3286 this method will update only the index 2, but it will write on database the concatenated content of the list
3287 :return:
3288 """
3289 db_nsr_update = {}
3290 failed_detail = []
3291 ro_nsr_id = ro_delete_action = None
3292 if nsr_deployed and nsr_deployed.get("RO"):
3293 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
3294 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
3295 try:
3296 if ro_nsr_id:
3297 stage[2] = "Deleting ns from VIM."
3298 db_nsr_update["detailed-status"] = " ".join(stage)
3299 self._write_op_status(nslcmop_id, stage)
3300 self.logger.debug(logging_text + stage[2])
3301 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3302 self._write_op_status(nslcmop_id, stage)
3303 desc = await self.RO.delete("ns", ro_nsr_id)
3304 ro_delete_action = desc["action_id"]
3305 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = ro_delete_action
3306 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3307 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3308 if ro_delete_action:
3309 # wait until NS is deleted from VIM
3310 stage[2] = "Waiting ns deleted from VIM."
3311 detailed_status_old = None
3312 self.logger.debug(logging_text + stage[2] + " RO_id={} ro_delete_action={}".format(ro_nsr_id,
3313 ro_delete_action))
3314 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3315 self._write_op_status(nslcmop_id, stage)
kuused124bfe2019-06-18 12:09:24 +02003316
tiernoe876f672020-02-13 14:34:48 +00003317 delete_timeout = 20 * 60 # 20 minutes
3318 while delete_timeout > 0:
3319 desc = await self.RO.show(
3320 "ns",
3321 item_id_name=ro_nsr_id,
3322 extra_item="action",
3323 extra_item_id=ro_delete_action)
3324
3325 # deploymentStatus
3326 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3327
3328 ns_status, ns_status_info = self.RO.check_action_status(desc)
3329 if ns_status == "ERROR":
3330 raise ROclient.ROClientException(ns_status_info)
3331 elif ns_status == "BUILD":
3332 stage[2] = "Deleting from VIM {}".format(ns_status_info)
3333 elif ns_status == "ACTIVE":
3334 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3335 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3336 break
3337 else:
3338 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
3339 if stage[2] != detailed_status_old:
3340 detailed_status_old = stage[2]
3341 db_nsr_update["detailed-status"] = " ".join(stage)
3342 self._write_op_status(nslcmop_id, stage)
3343 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3344 await asyncio.sleep(5, loop=self.loop)
3345 delete_timeout -= 5
3346 else: # delete_timeout <= 0:
3347 raise ROclient.ROClientException("Timeout waiting ns deleted from VIM")
3348
3349 except Exception as e:
3350 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3351 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3352 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3353 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3354 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3355 self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id))
3356 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
tiernoa2143262020-03-27 16:20:40 +00003357 failed_detail.append("delete conflict: {}".format(e))
3358 self.logger.debug(logging_text + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00003359 else:
tiernoa2143262020-03-27 16:20:40 +00003360 failed_detail.append("delete error: {}".format(e))
3361 self.logger.error(logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00003362
3363 # Delete nsd
3364 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
3365 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
3366 try:
3367 stage[2] = "Deleting nsd from RO."
3368 db_nsr_update["detailed-status"] = " ".join(stage)
3369 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3370 self._write_op_status(nslcmop_id, stage)
3371 await self.RO.delete("nsd", ro_nsd_id)
3372 self.logger.debug(logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id))
3373 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3374 except Exception as e:
3375 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3376 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3377 self.logger.debug(logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id))
3378 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
3379 failed_detail.append("ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e))
3380 self.logger.debug(logging_text + failed_detail[-1])
3381 else:
3382 failed_detail.append("ro_nsd_id={} delete error: {}".format(ro_nsd_id, e))
3383 self.logger.error(logging_text + failed_detail[-1])
3384
3385 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
3386 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
3387 if not vnf_deployed or not vnf_deployed["id"]:
3388 continue
3389 try:
3390 ro_vnfd_id = vnf_deployed["id"]
3391 stage[2] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
3392 vnf_deployed["member-vnf-index"], ro_vnfd_id)
3393 db_nsr_update["detailed-status"] = " ".join(stage)
3394 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3395 self._write_op_status(nslcmop_id, stage)
3396 await self.RO.delete("vnfd", ro_vnfd_id)
3397 self.logger.debug(logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id))
3398 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3399 except Exception as e:
3400 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3401 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3402 self.logger.debug(logging_text + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id))
3403 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
3404 failed_detail.append("ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e))
3405 self.logger.debug(logging_text + failed_detail[-1])
3406 else:
3407 failed_detail.append("ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e))
3408 self.logger.error(logging_text + failed_detail[-1])
3409
tiernoa2143262020-03-27 16:20:40 +00003410 if failed_detail:
3411 stage[2] = "Error deleting from VIM"
3412 else:
3413 stage[2] = "Deleted from VIM"
tiernoe876f672020-02-13 14:34:48 +00003414 db_nsr_update["detailed-status"] = " ".join(stage)
3415 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3416 self._write_op_status(nslcmop_id, stage)
3417
3418 if failed_detail:
tiernoa2143262020-03-27 16:20:40 +00003419 raise LcmException("; ".join(failed_detail))
tiernoe876f672020-02-13 14:34:48 +00003420
3421 async def terminate(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003422 # Try to lock HA task here
3423 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3424 if not task_is_locked_by_me:
3425 return
3426
tierno59d22d22018-09-25 18:10:19 +02003427 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
3428 self.logger.debug(logging_text + "Enter")
tiernoe876f672020-02-13 14:34:48 +00003429 timeout_ns_terminate = self.timeout_ns_terminate
tierno59d22d22018-09-25 18:10:19 +02003430 db_nsr = None
3431 db_nslcmop = None
tiernoa17d4f42020-04-28 09:59:23 +00003432 operation_params = None
tierno59d22d22018-09-25 18:10:19 +02003433 exc = None
tiernoe876f672020-02-13 14:34:48 +00003434 error_list = [] # annotates all failed error messages
tierno59d22d22018-09-25 18:10:19 +02003435 db_nslcmop_update = {}
tiernoc2564fe2019-01-28 16:18:56 +00003436 autoremove = False # autoremove after terminated
tiernoe876f672020-02-13 14:34:48 +00003437 tasks_dict_info = {}
3438 db_nsr_update = {}
3439 stage = ["Stage 1/3: Preparing task.", "Waiting for previous operations to terminate.", ""]
3440 # ^ contains [stage, step, VIM-status]
tierno59d22d22018-09-25 18:10:19 +02003441 try:
kuused124bfe2019-06-18 12:09:24 +02003442 # wait for any previous tasks in process
3443 await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id)
3444
tiernoe876f672020-02-13 14:34:48 +00003445 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
3446 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3447 operation_params = db_nslcmop.get("operationParams") or {}
3448 if operation_params.get("timeout_ns_terminate"):
3449 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
3450 stage[1] = "Getting nsr={} from db.".format(nsr_id)
3451 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3452
3453 db_nsr_update["operational-status"] = "terminating"
3454 db_nsr_update["config-status"] = "terminating"
quilesj4cda56b2019-12-05 10:02:20 +00003455 self._write_ns_status(
3456 nsr_id=nsr_id,
3457 ns_state="TERMINATING",
3458 current_operation="TERMINATING",
tiernoe876f672020-02-13 14:34:48 +00003459 current_operation_id=nslcmop_id,
3460 other_update=db_nsr_update
quilesj4cda56b2019-12-05 10:02:20 +00003461 )
quilesj3655ae02019-12-12 16:08:35 +00003462 self._write_op_status(
3463 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00003464 queuePosition=0,
3465 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00003466 )
tiernoe876f672020-02-13 14:34:48 +00003467 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
tierno59d22d22018-09-25 18:10:19 +02003468 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
3469 return
tierno59d22d22018-09-25 18:10:19 +02003470
tiernoe876f672020-02-13 14:34:48 +00003471 stage[1] = "Getting vnf descriptors from db."
3472 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
3473 db_vnfds_from_id = {}
3474 db_vnfds_from_member_index = {}
3475 # Loop over VNFRs
3476 for vnfr in db_vnfrs_list:
3477 vnfd_id = vnfr["vnfd-id"]
3478 if vnfd_id not in db_vnfds_from_id:
3479 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
3480 db_vnfds_from_id[vnfd_id] = vnfd
3481 db_vnfds_from_member_index[vnfr["member-vnf-index-ref"]] = db_vnfds_from_id[vnfd_id]
calvinosanch9f9c6f22019-11-04 13:37:39 +01003482
tiernoe876f672020-02-13 14:34:48 +00003483 # Destroy individual execution environments when there are terminating primitives.
3484 # Rest of EE will be deleted at once
tierno588547c2020-07-01 15:30:20 +00003485 # TODO - check before calling _destroy_N2VC
3486 # if not operation_params.get("skip_terminate_primitives"):#
3487 # or not vca.get("needed_terminate"):
3488 stage[0] = "Stage 2/3 execute terminating primitives."
3489 self.logger.debug(logging_text + stage[0])
3490 stage[1] = "Looking execution environment that needs terminate."
3491 self.logger.debug(logging_text + stage[1])
tiernob996d942020-07-03 14:52:28 +00003492 # self.logger.debug("nsr_deployed: {}".format(nsr_deployed))
tierno588547c2020-07-01 15:30:20 +00003493 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
tierno588547c2020-07-01 15:30:20 +00003494 config_descriptor = None
3495 if not vca or not vca.get("ee_id"):
3496 continue
3497 if not vca.get("member-vnf-index"):
3498 # ns
3499 config_descriptor = db_nsr.get("ns-configuration")
3500 elif vca.get("vdu_id"):
3501 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3502 vdud = next((vdu for vdu in db_vnfd.get("vdu", ()) if vdu["id"] == vca.get("vdu_id")), None)
3503 if vdud:
3504 config_descriptor = vdud.get("vdu-configuration")
3505 elif vca.get("kdu_name"):
3506 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3507 kdud = next((kdu for kdu in db_vnfd.get("kdu", ()) if kdu["name"] == vca.get("kdu_name")), None)
3508 if kdud:
3509 config_descriptor = kdud.get("kdu-configuration")
3510 else:
3511 config_descriptor = db_vnfds_from_member_index[vca["member-vnf-index"]].get("vnf-configuration")
tierno588547c2020-07-01 15:30:20 +00003512 vca_type = vca.get("type")
3513 exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and
3514 vca.get("needed_terminate"))
tiernoaebd7da2020-08-07 06:36:38 +00003515 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
3516 # pending native charms
lloretgalleg18ebc3a2020-10-22 09:54:51 +00003517 destroy_ee = True if vca_type in ("helm", "helm-v3", "native_charm") else False
tierno86e33612020-09-16 14:13:06 +00003518 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
3519 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
tiernob996d942020-07-03 14:52:28 +00003520 task = asyncio.ensure_future(
3521 self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor, vca_index,
3522 destroy_ee, exec_terminate_primitives))
tierno588547c2020-07-01 15:30:20 +00003523 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
tierno59d22d22018-09-25 18:10:19 +02003524
tierno588547c2020-07-01 15:30:20 +00003525 # wait for pending tasks of terminate primitives
3526 if tasks_dict_info:
tierno86e33612020-09-16 14:13:06 +00003527 self.logger.debug(logging_text + 'Waiting for tasks {}'.format(list(tasks_dict_info.keys())))
tierno588547c2020-07-01 15:30:20 +00003528 error_list = await self._wait_for_tasks(logging_text, tasks_dict_info,
3529 min(self.timeout_charm_delete, timeout_ns_terminate),
3530 stage, nslcmop_id)
tierno86e33612020-09-16 14:13:06 +00003531 tasks_dict_info.clear()
tierno588547c2020-07-01 15:30:20 +00003532 if error_list:
3533 return # raise LcmException("; ".join(error_list))
tierno82974b22018-11-27 21:55:36 +00003534
tiernoe876f672020-02-13 14:34:48 +00003535 # remove All execution environments at once
3536 stage[0] = "Stage 3/3 delete all."
quilesj3655ae02019-12-12 16:08:35 +00003537
tierno49676be2020-04-07 16:34:35 +00003538 if nsr_deployed.get("VCA"):
3539 stage[1] = "Deleting all execution environments."
3540 self.logger.debug(logging_text + stage[1])
3541 task_delete_ee = asyncio.ensure_future(asyncio.wait_for(self._delete_all_N2VC(db_nsr=db_nsr),
3542 timeout=self.timeout_charm_delete))
3543 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
3544 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
tierno59d22d22018-09-25 18:10:19 +02003545
tiernoe876f672020-02-13 14:34:48 +00003546 # Delete from k8scluster
3547 stage[1] = "Deleting KDUs."
3548 self.logger.debug(logging_text + stage[1])
3549 # print(nsr_deployed)
3550 for kdu in get_iterable(nsr_deployed, "K8s"):
3551 if not kdu or not kdu.get("kdu-instance"):
3552 continue
3553 kdu_instance = kdu.get("kdu-instance")
tiernoa2143262020-03-27 16:20:40 +00003554 if kdu.get("k8scluster-type") in self.k8scluster_map:
tiernoe876f672020-02-13 14:34:48 +00003555 task_delete_kdu_instance = asyncio.ensure_future(
tiernoa2143262020-03-27 16:20:40 +00003556 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
3557 cluster_uuid=kdu.get("k8scluster-uuid"),
3558 kdu_instance=kdu_instance))
tiernoe876f672020-02-13 14:34:48 +00003559 else:
3560 self.logger.error(logging_text + "Unknown k8s deployment type {}".
3561 format(kdu.get("k8scluster-type")))
3562 continue
3563 tasks_dict_info[task_delete_kdu_instance] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
tierno59d22d22018-09-25 18:10:19 +02003564
3565 # remove from RO
tiernoe876f672020-02-13 14:34:48 +00003566 stage[1] = "Deleting ns from VIM."
tierno69f0d382020-05-07 13:08:09 +00003567 if self.ng_ro:
3568 task_delete_ro = asyncio.ensure_future(
3569 self._terminate_ng_ro(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
3570 else:
3571 task_delete_ro = asyncio.ensure_future(
3572 self._terminate_RO(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
tiernoe876f672020-02-13 14:34:48 +00003573 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
tierno59d22d22018-09-25 18:10:19 +02003574
tiernoe876f672020-02-13 14:34:48 +00003575 # rest of staff will be done at finally
3576
3577 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
3578 self.logger.error(logging_text + "Exit Exception {}".format(e))
3579 exc = e
3580 except asyncio.CancelledError:
3581 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
3582 exc = "Operation was cancelled"
3583 except Exception as e:
3584 exc = traceback.format_exc()
3585 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
3586 finally:
3587 if exc:
3588 error_list.append(str(exc))
tierno59d22d22018-09-25 18:10:19 +02003589 try:
tiernoe876f672020-02-13 14:34:48 +00003590 # wait for pending tasks
3591 if tasks_dict_info:
3592 stage[1] = "Waiting for terminate pending tasks."
3593 self.logger.debug(logging_text + stage[1])
3594 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_terminate,
3595 stage, nslcmop_id)
3596 stage[1] = stage[2] = ""
3597 except asyncio.CancelledError:
3598 error_list.append("Cancelled")
3599 # TODO cancell all tasks
3600 except Exception as exc:
3601 error_list.append(str(exc))
3602 # update status at database
3603 if error_list:
3604 error_detail = "; ".join(error_list)
3605 # self.logger.error(logging_text + error_detail)
tiernob5203912020-08-11 11:20:13 +00003606 error_description_nslcmop = '{} Detail: {}'.format(stage[0], error_detail)
3607 error_description_nsr = 'Operation: TERMINATING.{}, {}.'.format(nslcmop_id, stage[0])
tierno59d22d22018-09-25 18:10:19 +02003608
tierno59d22d22018-09-25 18:10:19 +02003609 db_nsr_update["operational-status"] = "failed"
tiernoa2143262020-03-27 16:20:40 +00003610 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00003611 db_nslcmop_update["detailed-status"] = error_detail
3612 nslcmop_operation_state = "FAILED"
3613 ns_state = "BROKEN"
tierno59d22d22018-09-25 18:10:19 +02003614 else:
tiernoa2143262020-03-27 16:20:40 +00003615 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00003616 error_description_nsr = error_description_nslcmop = None
3617 ns_state = "NOT_INSTANTIATED"
tierno59d22d22018-09-25 18:10:19 +02003618 db_nsr_update["operational-status"] = "terminated"
3619 db_nsr_update["detailed-status"] = "Done"
3620 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
3621 db_nslcmop_update["detailed-status"] = "Done"
tiernoe876f672020-02-13 14:34:48 +00003622 nslcmop_operation_state = "COMPLETED"
tierno59d22d22018-09-25 18:10:19 +02003623
tiernoe876f672020-02-13 14:34:48 +00003624 if db_nsr:
3625 self._write_ns_status(
3626 nsr_id=nsr_id,
3627 ns_state=ns_state,
3628 current_operation="IDLE",
3629 current_operation_id=None,
3630 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00003631 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00003632 other_update=db_nsr_update
3633 )
tiernoa17d4f42020-04-28 09:59:23 +00003634 self._write_op_status(
3635 op_id=nslcmop_id,
3636 stage="",
3637 error_message=error_description_nslcmop,
3638 operation_state=nslcmop_operation_state,
3639 other_update=db_nslcmop_update,
3640 )
lloretgalleg6d488782020-07-22 10:13:46 +00003641 if ns_state == "NOT_INSTANTIATED":
3642 try:
3643 self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "NOT_INSTANTIATED"})
3644 except DbException as e:
3645 self.logger.warn(logging_text + 'Error writing VNFR status for nsr-id-ref: {} -> {}'.
3646 format(nsr_id, e))
tiernoa17d4f42020-04-28 09:59:23 +00003647 if operation_params:
tiernoe876f672020-02-13 14:34:48 +00003648 autoremove = operation_params.get("autoremove", False)
tierno59d22d22018-09-25 18:10:19 +02003649 if nslcmop_operation_state:
3650 try:
3651 await self.msg.aiowrite("ns", "terminated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tiernoc2564fe2019-01-28 16:18:56 +00003652 "operationState": nslcmop_operation_state,
3653 "autoremove": autoremove},
tierno8a518872018-12-21 13:42:14 +00003654 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003655 except Exception as e:
3656 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02003657
tierno59d22d22018-09-25 18:10:19 +02003658 self.logger.debug(logging_text + "Exit")
3659 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
3660
tiernoe876f672020-02-13 14:34:48 +00003661 async def _wait_for_tasks(self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None):
3662 time_start = time()
tiernoa2143262020-03-27 16:20:40 +00003663 error_detail_list = []
tiernoe876f672020-02-13 14:34:48 +00003664 error_list = []
3665 pending_tasks = list(created_tasks_info.keys())
3666 num_tasks = len(pending_tasks)
3667 num_done = 0
3668 stage[1] = "{}/{}.".format(num_done, num_tasks)
3669 self._write_op_status(nslcmop_id, stage)
tiernoe876f672020-02-13 14:34:48 +00003670 while pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00003671 new_error = None
tiernoe876f672020-02-13 14:34:48 +00003672 _timeout = timeout + time_start - time()
3673 done, pending_tasks = await asyncio.wait(pending_tasks, timeout=_timeout,
3674 return_when=asyncio.FIRST_COMPLETED)
3675 num_done += len(done)
3676 if not done: # Timeout
3677 for task in pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00003678 new_error = created_tasks_info[task] + ": Timeout"
3679 error_detail_list.append(new_error)
3680 error_list.append(new_error)
tiernoe876f672020-02-13 14:34:48 +00003681 break
3682 for task in done:
3683 if task.cancelled():
tierno067e04a2020-03-31 12:53:13 +00003684 exc = "Cancelled"
tiernoe876f672020-02-13 14:34:48 +00003685 else:
3686 exc = task.exception()
tierno067e04a2020-03-31 12:53:13 +00003687 if exc:
3688 if isinstance(exc, asyncio.TimeoutError):
3689 exc = "Timeout"
3690 new_error = created_tasks_info[task] + ": {}".format(exc)
3691 error_list.append(created_tasks_info[task])
3692 error_detail_list.append(new_error)
tierno28c63da2020-04-20 16:28:56 +00003693 if isinstance(exc, (str, DbException, N2VCException, ROclient.ROClientException, LcmException,
tierno2357f4e2020-10-19 16:38:59 +00003694 K8sException, NgRoException)):
tierno067e04a2020-03-31 12:53:13 +00003695 self.logger.error(logging_text + new_error)
tiernoe876f672020-02-13 14:34:48 +00003696 else:
tierno067e04a2020-03-31 12:53:13 +00003697 exc_traceback = "".join(traceback.format_exception(None, exc, exc.__traceback__))
tierno2357f4e2020-10-19 16:38:59 +00003698 self.logger.error(logging_text + created_tasks_info[task] + " " + exc_traceback)
tierno067e04a2020-03-31 12:53:13 +00003699 else:
3700 self.logger.debug(logging_text + created_tasks_info[task] + ": Done")
tiernoe876f672020-02-13 14:34:48 +00003701 stage[1] = "{}/{}.".format(num_done, num_tasks)
3702 if new_error:
tiernoa2143262020-03-27 16:20:40 +00003703 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
tiernoe876f672020-02-13 14:34:48 +00003704 if nsr_id: # update also nsr
tiernoa2143262020-03-27 16:20:40 +00003705 self.update_db_2("nsrs", nsr_id, {"errorDescription": "Error at: " + ", ".join(error_list),
3706 "errorDetail": ". ".join(error_detail_list)})
tiernoe876f672020-02-13 14:34:48 +00003707 self._write_op_status(nslcmop_id, stage)
tiernoa2143262020-03-27 16:20:40 +00003708 return error_detail_list
tiernoe876f672020-02-13 14:34:48 +00003709
tiernoda1ff8c2020-10-22 14:12:46 +00003710 @staticmethod
3711 def _map_primitive_params(primitive_desc, params, instantiation_params):
tiernoda964822019-01-14 15:53:47 +00003712 """
3713 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
3714 The default-value is used. If it is between < > it look for a value at instantiation_params
3715 :param primitive_desc: portion of VNFD/NSD that describes primitive
3716 :param params: Params provided by user
3717 :param instantiation_params: Instantiation params provided by user
3718 :return: a dictionary with the calculated params
3719 """
3720 calculated_params = {}
3721 for parameter in primitive_desc.get("parameter", ()):
3722 param_name = parameter["name"]
3723 if param_name in params:
3724 calculated_params[param_name] = params[param_name]
tierno98ad6ea2019-05-30 17:16:28 +00003725 elif "default-value" in parameter or "value" in parameter:
3726 if "value" in parameter:
3727 calculated_params[param_name] = parameter["value"]
3728 else:
3729 calculated_params[param_name] = parameter["default-value"]
3730 if isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("<") \
3731 and calculated_params[param_name].endswith(">"):
3732 if calculated_params[param_name][1:-1] in instantiation_params:
3733 calculated_params[param_name] = instantiation_params[calculated_params[param_name][1:-1]]
tiernoda964822019-01-14 15:53:47 +00003734 else:
3735 raise LcmException("Parameter {} needed to execute primitive {} not provided".
tiernod8323042019-08-09 11:32:23 +00003736 format(calculated_params[param_name], primitive_desc["name"]))
tiernoda964822019-01-14 15:53:47 +00003737 else:
3738 raise LcmException("Parameter {} needed to execute primitive {} not provided".
3739 format(param_name, primitive_desc["name"]))
tierno59d22d22018-09-25 18:10:19 +02003740
tiernoda964822019-01-14 15:53:47 +00003741 if isinstance(calculated_params[param_name], (dict, list, tuple)):
3742 calculated_params[param_name] = yaml.safe_dump(calculated_params[param_name], default_flow_style=True,
3743 width=256)
3744 elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "):
3745 calculated_params[param_name] = calculated_params[param_name][7:]
tiernofa40e692020-10-14 14:59:36 +00003746 if parameter.get("data-type") == "INTEGER":
3747 try:
3748 calculated_params[param_name] = int(calculated_params[param_name])
3749 except ValueError: # error converting string to int
3750 raise LcmException(
3751 "Parameter {} of primitive {} must be integer".format(param_name, primitive_desc["name"]))
3752 elif parameter.get("data-type") == "BOOLEAN":
3753 calculated_params[param_name] = not ((str(calculated_params[param_name])).lower() == 'false')
tiernoc3f2a822019-11-05 13:45:04 +00003754
3755 # add always ns_config_info if primitive name is config
3756 if primitive_desc["name"] == "config":
3757 if "ns_config_info" in instantiation_params:
3758 calculated_params["ns_config_info"] = instantiation_params["ns_config_info"]
tiernoda964822019-01-14 15:53:47 +00003759 return calculated_params
3760
tiernoa278b842020-07-08 15:33:55 +00003761 def _look_for_deployed_vca(self, deployed_vca, member_vnf_index, vdu_id, vdu_count_index, kdu_name=None,
3762 ee_descriptor_id=None):
tiernoe876f672020-02-13 14:34:48 +00003763 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
3764 for vca in deployed_vca:
3765 if not vca:
3766 continue
3767 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
3768 continue
tiernoe876f672020-02-13 14:34:48 +00003769 if vdu_count_index is not None and vdu_count_index != vca["vdu_count_index"]:
3770 continue
3771 if kdu_name and kdu_name != vca["kdu_name"]:
3772 continue
tiernoa278b842020-07-08 15:33:55 +00003773 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
3774 continue
tiernoe876f672020-02-13 14:34:48 +00003775 break
3776 else:
3777 # vca_deployed not found
tiernoa278b842020-07-08 15:33:55 +00003778 raise LcmException("charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
3779 " is not deployed".format(member_vnf_index, vdu_id, vdu_count_index, kdu_name,
3780 ee_descriptor_id))
quilesj7e13aeb2019-10-08 13:34:55 +02003781
tiernoe876f672020-02-13 14:34:48 +00003782 # get ee_id
3783 ee_id = vca.get("ee_id")
tierno588547c2020-07-01 15:30:20 +00003784 vca_type = vca.get("type", "lxc_proxy_charm") # default value for backward compatibility - proxy charm
tiernoe876f672020-02-13 14:34:48 +00003785 if not ee_id:
tierno067e04a2020-03-31 12:53:13 +00003786 raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
tiernoe876f672020-02-13 14:34:48 +00003787 "execution environment"
tierno067e04a2020-03-31 12:53:13 +00003788 .format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
tierno588547c2020-07-01 15:30:20 +00003789 return ee_id, vca_type
tiernoe876f672020-02-13 14:34:48 +00003790
3791 async def _ns_execute_primitive(self, ee_id, primitive, primitive_params, retries=0,
tierno588547c2020-07-01 15:30:20 +00003792 retries_interval=30, timeout=None,
3793 vca_type=None, db_dict=None) -> (str, str):
tiernoda964822019-01-14 15:53:47 +00003794 try:
tierno98ad6ea2019-05-30 17:16:28 +00003795 if primitive == "config":
3796 primitive_params = {"params": primitive_params}
tierno2fc7ce52019-06-11 22:50:01 +00003797
tierno588547c2020-07-01 15:30:20 +00003798 vca_type = vca_type or "lxc_proxy_charm"
3799
quilesj7e13aeb2019-10-08 13:34:55 +02003800 while retries >= 0:
3801 try:
tierno067e04a2020-03-31 12:53:13 +00003802 output = await asyncio.wait_for(
tierno588547c2020-07-01 15:30:20 +00003803 self.vca_map[vca_type].exec_primitive(
tierno067e04a2020-03-31 12:53:13 +00003804 ee_id=ee_id,
3805 primitive_name=primitive,
3806 params_dict=primitive_params,
3807 progress_timeout=self.timeout_progress_primitive,
tierno588547c2020-07-01 15:30:20 +00003808 total_timeout=self.timeout_primitive,
3809 db_dict=db_dict),
tierno067e04a2020-03-31 12:53:13 +00003810 timeout=timeout or self.timeout_primitive)
quilesj7e13aeb2019-10-08 13:34:55 +02003811 # execution was OK
3812 break
tierno067e04a2020-03-31 12:53:13 +00003813 except asyncio.CancelledError:
3814 raise
3815 except Exception as e: # asyncio.TimeoutError
3816 if isinstance(e, asyncio.TimeoutError):
3817 e = "Timeout"
quilesj7e13aeb2019-10-08 13:34:55 +02003818 retries -= 1
3819 if retries >= 0:
tierno73d8bd02019-11-18 17:33:27 +00003820 self.logger.debug('Error executing action {} on {} -> {}'.format(primitive, ee_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +02003821 # wait and retry
3822 await asyncio.sleep(retries_interval, loop=self.loop)
tierno73d8bd02019-11-18 17:33:27 +00003823 else:
tierno067e04a2020-03-31 12:53:13 +00003824 return 'FAILED', str(e)
quilesj7e13aeb2019-10-08 13:34:55 +02003825
tiernoe876f672020-02-13 14:34:48 +00003826 return 'COMPLETED', output
quilesj7e13aeb2019-10-08 13:34:55 +02003827
tierno067e04a2020-03-31 12:53:13 +00003828 except (LcmException, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00003829 raise
quilesj7e13aeb2019-10-08 13:34:55 +02003830 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00003831 return 'FAIL', 'Error executing action {}: {}'.format(primitive, e)
tierno59d22d22018-09-25 18:10:19 +02003832
3833 async def action(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003834
3835 # Try to lock HA task here
3836 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3837 if not task_is_locked_by_me:
3838 return
3839
tierno59d22d22018-09-25 18:10:19 +02003840 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
3841 self.logger.debug(logging_text + "Enter")
3842 # get all needed from database
3843 db_nsr = None
3844 db_nslcmop = None
tiernoe876f672020-02-13 14:34:48 +00003845 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003846 db_nslcmop_update = {}
3847 nslcmop_operation_state = None
tierno067e04a2020-03-31 12:53:13 +00003848 error_description_nslcmop = None
tierno59d22d22018-09-25 18:10:19 +02003849 exc = None
3850 try:
kuused124bfe2019-06-18 12:09:24 +02003851 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003852 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003853 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
3854
quilesj4cda56b2019-12-05 10:02:20 +00003855 self._write_ns_status(
3856 nsr_id=nsr_id,
3857 ns_state=None,
3858 current_operation="RUNNING ACTION",
3859 current_operation_id=nslcmop_id
3860 )
3861
tierno59d22d22018-09-25 18:10:19 +02003862 step = "Getting information from database"
3863 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3864 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernoda964822019-01-14 15:53:47 +00003865
tiernoe4f7e6c2018-11-27 14:55:30 +00003866 nsr_deployed = db_nsr["_admin"].get("deployed")
tierno1b633412019-02-25 16:48:23 +00003867 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tierno59d22d22018-09-25 18:10:19 +02003868 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003869 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
tiernoe4f7e6c2018-11-27 14:55:30 +00003870 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
tierno067e04a2020-03-31 12:53:13 +00003871 primitive = db_nslcmop["operationParams"]["primitive"]
3872 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
3873 timeout_ns_action = db_nslcmop["operationParams"].get("timeout_ns_action", self.timeout_primitive)
tierno59d22d22018-09-25 18:10:19 +02003874
tierno1b633412019-02-25 16:48:23 +00003875 if vnf_index:
3876 step = "Getting vnfr from database"
3877 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3878 step = "Getting vnfd from database"
3879 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
3880 else:
tierno067e04a2020-03-31 12:53:13 +00003881 step = "Getting nsd from database"
3882 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
tiernoda964822019-01-14 15:53:47 +00003883
tierno82974b22018-11-27 21:55:36 +00003884 # for backward compatibility
3885 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3886 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3887 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3888 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3889
tiernoda964822019-01-14 15:53:47 +00003890 # look for primitive
tiernoa278b842020-07-08 15:33:55 +00003891 config_primitive_desc = descriptor_configuration = None
tiernoda964822019-01-14 15:53:47 +00003892 if vdu_id:
3893 for vdu in get_iterable(db_vnfd, "vdu"):
3894 if vdu_id == vdu["id"]:
tiernoa278b842020-07-08 15:33:55 +00003895 descriptor_configuration = vdu.get("vdu-configuration")
tierno067e04a2020-03-31 12:53:13 +00003896 break
calvinosanch9f9c6f22019-11-04 13:37:39 +01003897 elif kdu_name:
tierno067e04a2020-03-31 12:53:13 +00003898 for kdu in get_iterable(db_vnfd, "kdu"):
3899 if kdu_name == kdu["name"]:
tiernoa278b842020-07-08 15:33:55 +00003900 descriptor_configuration = kdu.get("kdu-configuration")
tierno067e04a2020-03-31 12:53:13 +00003901 break
tierno1b633412019-02-25 16:48:23 +00003902 elif vnf_index:
tiernoa278b842020-07-08 15:33:55 +00003903 descriptor_configuration = db_vnfd.get("vnf-configuration")
tierno1b633412019-02-25 16:48:23 +00003904 else:
tiernoa278b842020-07-08 15:33:55 +00003905 descriptor_configuration = db_nsd.get("ns-configuration")
3906
3907 if descriptor_configuration and descriptor_configuration.get("config-primitive"):
3908 for config_primitive in descriptor_configuration["config-primitive"]:
tierno1b633412019-02-25 16:48:23 +00003909 if config_primitive["name"] == primitive:
3910 config_primitive_desc = config_primitive
3911 break
tiernoda964822019-01-14 15:53:47 +00003912
garciadeblas6bed6b32020-07-20 11:05:42 +00003913 if not config_primitive_desc:
3914 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
3915 raise LcmException("Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".
3916 format(primitive))
3917 primitive_name = primitive
3918 ee_descriptor_id = None
3919 else:
3920 primitive_name = config_primitive_desc.get("execution-environment-primitive", primitive)
3921 ee_descriptor_id = config_primitive_desc.get("execution-environment-ref")
tierno1b633412019-02-25 16:48:23 +00003922
tierno1b633412019-02-25 16:48:23 +00003923 if vnf_index:
tierno626e0152019-11-29 14:16:16 +00003924 if vdu_id:
3925 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
tierno067e04a2020-03-31 12:53:13 +00003926 desc_params = self._format_additional_params(vdur.get("additionalParams"))
3927 elif kdu_name:
3928 kdur = next((x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None)
3929 desc_params = self._format_additional_params(kdur.get("additionalParams"))
3930 else:
3931 desc_params = self._format_additional_params(db_vnfr.get("additionalParamsForVnf"))
tierno1b633412019-02-25 16:48:23 +00003932 else:
tierno067e04a2020-03-31 12:53:13 +00003933 desc_params = self._format_additional_params(db_nsr.get("additionalParamsForNs"))
tiernoda964822019-01-14 15:53:47 +00003934
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003935 if kdu_name:
3936 kdu_action = True if not deep_get(kdu, ("kdu-configuration", "juju")) else False
3937
tiernoda964822019-01-14 15:53:47 +00003938 # TODO check if ns is in a proper status
tiernoa278b842020-07-08 15:33:55 +00003939 if kdu_name and (primitive_name in ("upgrade", "rollback", "status") or kdu_action):
tierno067e04a2020-03-31 12:53:13 +00003940 # kdur and desc_params already set from before
3941 if primitive_params:
3942 desc_params.update(primitive_params)
3943 # TODO Check if we will need something at vnf level
3944 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
3945 if kdu_name == kdu["kdu-name"] and kdu["member-vnf-index"] == vnf_index:
3946 break
3947 else:
3948 raise LcmException("KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index))
quilesj7e13aeb2019-10-08 13:34:55 +02003949
tierno067e04a2020-03-31 12:53:13 +00003950 if kdu.get("k8scluster-type") not in self.k8scluster_map:
3951 msg = "unknown k8scluster-type '{}'".format(kdu.get("k8scluster-type"))
3952 raise LcmException(msg)
3953
3954 db_dict = {"collection": "nsrs",
3955 "filter": {"_id": nsr_id},
3956 "path": "_admin.deployed.K8s.{}".format(index)}
tiernoa278b842020-07-08 15:33:55 +00003957 self.logger.debug(logging_text + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name))
3958 step = "Executing kdu {}".format(primitive_name)
3959 if primitive_name == "upgrade":
tierno067e04a2020-03-31 12:53:13 +00003960 if desc_params.get("kdu_model"):
3961 kdu_model = desc_params.get("kdu_model")
3962 del desc_params["kdu_model"]
3963 else:
3964 kdu_model = kdu.get("kdu-model")
3965 parts = kdu_model.split(sep=":")
3966 if len(parts) == 2:
3967 kdu_model = parts[0]
3968
3969 detailed_status = await asyncio.wait_for(
3970 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
3971 cluster_uuid=kdu.get("k8scluster-uuid"),
3972 kdu_instance=kdu.get("kdu-instance"),
3973 atomic=True, kdu_model=kdu_model,
3974 params=desc_params, db_dict=db_dict,
3975 timeout=timeout_ns_action),
3976 timeout=timeout_ns_action + 10)
3977 self.logger.debug(logging_text + " Upgrade of kdu {} done".format(detailed_status))
tiernoa278b842020-07-08 15:33:55 +00003978 elif primitive_name == "rollback":
tierno067e04a2020-03-31 12:53:13 +00003979 detailed_status = await asyncio.wait_for(
3980 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
3981 cluster_uuid=kdu.get("k8scluster-uuid"),
3982 kdu_instance=kdu.get("kdu-instance"),
3983 db_dict=db_dict),
3984 timeout=timeout_ns_action)
tiernoa278b842020-07-08 15:33:55 +00003985 elif primitive_name == "status":
tierno067e04a2020-03-31 12:53:13 +00003986 detailed_status = await asyncio.wait_for(
3987 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
3988 cluster_uuid=kdu.get("k8scluster-uuid"),
3989 kdu_instance=kdu.get("kdu-instance")),
3990 timeout=timeout_ns_action)
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003991 else:
3992 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(kdu["kdu-name"], nsr_id)
3993 params = self._map_primitive_params(config_primitive_desc, primitive_params, desc_params)
3994
3995 detailed_status = await asyncio.wait_for(
3996 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
3997 cluster_uuid=kdu.get("k8scluster-uuid"),
3998 kdu_instance=kdu_instance,
tiernoa278b842020-07-08 15:33:55 +00003999 primitive_name=primitive_name,
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02004000 params=params, db_dict=db_dict,
4001 timeout=timeout_ns_action),
4002 timeout=timeout_ns_action)
tierno067e04a2020-03-31 12:53:13 +00004003
4004 if detailed_status:
4005 nslcmop_operation_state = 'COMPLETED'
4006 else:
4007 detailed_status = ''
4008 nslcmop_operation_state = 'FAILED'
tierno067e04a2020-03-31 12:53:13 +00004009 else:
tierno588547c2020-07-01 15:30:20 +00004010 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
4011 member_vnf_index=vnf_index,
4012 vdu_id=vdu_id,
tiernoa278b842020-07-08 15:33:55 +00004013 vdu_count_index=vdu_count_index,
4014 ee_descriptor_id=ee_descriptor_id)
tierno588547c2020-07-01 15:30:20 +00004015 db_nslcmop_notif = {"collection": "nslcmops",
4016 "filter": {"_id": nslcmop_id},
4017 "path": "admin.VCA"}
tierno067e04a2020-03-31 12:53:13 +00004018 nslcmop_operation_state, detailed_status = await self._ns_execute_primitive(
tierno588547c2020-07-01 15:30:20 +00004019 ee_id,
tiernoa278b842020-07-08 15:33:55 +00004020 primitive=primitive_name,
tierno067e04a2020-03-31 12:53:13 +00004021 primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params),
tierno588547c2020-07-01 15:30:20 +00004022 timeout=timeout_ns_action,
4023 vca_type=vca_type,
4024 db_dict=db_nslcmop_notif)
tierno067e04a2020-03-31 12:53:13 +00004025
4026 db_nslcmop_update["detailed-status"] = detailed_status
4027 error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else ""
4028 self.logger.debug(logging_text + " task Done with result {} {}".format(nslcmop_operation_state,
4029 detailed_status))
tierno59d22d22018-09-25 18:10:19 +02004030 return # database update is called inside finally
4031
tiernof59ad6c2020-04-08 12:50:52 +00004032 except (DbException, LcmException, N2VCException, K8sException) as e:
tierno59d22d22018-09-25 18:10:19 +02004033 self.logger.error(logging_text + "Exit Exception {}".format(e))
4034 exc = e
4035 except asyncio.CancelledError:
4036 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
4037 exc = "Operation was cancelled"
tierno067e04a2020-03-31 12:53:13 +00004038 except asyncio.TimeoutError:
4039 self.logger.error(logging_text + "Timeout while '{}'".format(step))
4040 exc = "Timeout"
tierno59d22d22018-09-25 18:10:19 +02004041 except Exception as e:
4042 exc = traceback.format_exc()
4043 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
4044 finally:
tierno067e04a2020-03-31 12:53:13 +00004045 if exc:
4046 db_nslcmop_update["detailed-status"] = detailed_status = error_description_nslcmop = \
kuuse0ca67472019-05-13 15:59:27 +02004047 "FAILED {}: {}".format(step, exc)
tierno067e04a2020-03-31 12:53:13 +00004048 nslcmop_operation_state = "FAILED"
4049 if db_nsr:
4050 self._write_ns_status(
4051 nsr_id=nsr_id,
4052 ns_state=db_nsr["nsState"], # TODO check if degraded. For the moment use previous status
4053 current_operation="IDLE",
4054 current_operation_id=None,
4055 # error_description=error_description_nsr,
4056 # error_detail=error_detail,
4057 other_update=db_nsr_update
4058 )
4059
tiernoa17d4f42020-04-28 09:59:23 +00004060 self._write_op_status(
4061 op_id=nslcmop_id,
4062 stage="",
4063 error_message=error_description_nslcmop,
4064 operation_state=nslcmop_operation_state,
4065 other_update=db_nslcmop_update,
4066 )
tierno067e04a2020-03-31 12:53:13 +00004067
tierno59d22d22018-09-25 18:10:19 +02004068 if nslcmop_operation_state:
4069 try:
4070 await self.msg.aiowrite("ns", "actioned", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00004071 "operationState": nslcmop_operation_state},
4072 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004073 except Exception as e:
4074 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
4075 self.logger.debug(logging_text + "Exit")
4076 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
tierno067e04a2020-03-31 12:53:13 +00004077 return nslcmop_operation_state, detailed_status
tierno59d22d22018-09-25 18:10:19 +02004078
4079 async def scale(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02004080
4081 # Try to lock HA task here
4082 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
4083 if not task_is_locked_by_me:
4084 return
4085
tierno59d22d22018-09-25 18:10:19 +02004086 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
tierno2357f4e2020-10-19 16:38:59 +00004087 stage = ['', '', '']
4088 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02004089 self.logger.debug(logging_text + "Enter")
4090 # get all needed from database
4091 db_nsr = None
4092 db_nslcmop = None
4093 db_nslcmop_update = {}
4094 nslcmop_operation_state = None
tiernoe876f672020-02-13 14:34:48 +00004095 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02004096 exc = None
tierno9ab95942018-10-10 16:44:22 +02004097 # in case of error, indicates what part of scale was failed to put nsr at error status
4098 scale_process = None
tiernod6de1992018-10-11 13:05:52 +02004099 old_operational_status = ""
4100 old_config_status = ""
tierno59d22d22018-09-25 18:10:19 +02004101 try:
kuused124bfe2019-06-18 12:09:24 +02004102 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00004103 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02004104 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
tierno47e86b52018-10-10 14:05:55 +02004105
quilesj4cda56b2019-12-05 10:02:20 +00004106 self._write_ns_status(
4107 nsr_id=nsr_id,
4108 ns_state=None,
4109 current_operation="SCALING",
4110 current_operation_id=nslcmop_id
4111 )
4112
ikalyvas02d9e7b2019-05-27 18:16:01 +03004113 step = "Getting nslcmop from database"
ikalyvas02d9e7b2019-05-27 18:16:01 +03004114 self.logger.debug(step + " after having waited for previous tasks to be completed")
4115 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4116 step = "Getting nsr from database"
4117 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4118
4119 old_operational_status = db_nsr["operational-status"]
4120 old_config_status = db_nsr["config-status"]
tierno59d22d22018-09-25 18:10:19 +02004121 step = "Parsing scaling parameters"
tierno9babfda2019-06-07 12:36:50 +00004122 # self.logger.debug(step)
tierno59d22d22018-09-25 18:10:19 +02004123 db_nsr_update["operational-status"] = "scaling"
4124 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoe4f7e6c2018-11-27 14:55:30 +00004125 nsr_deployed = db_nsr["_admin"].get("deployed")
calvinosanch9f9c6f22019-11-04 13:37:39 +01004126
4127 #######
4128 nsr_deployed = db_nsr["_admin"].get("deployed")
4129 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tiernoda6fb102019-11-23 00:36:52 +00004130 # vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4131 # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4132 # vdu_name = db_nslcmop["operationParams"].get("vdu_name")
calvinosanch9f9c6f22019-11-04 13:37:39 +01004133 #######
4134
tierno2357f4e2020-10-19 16:38:59 +00004135 RO_nsr_id = nsr_deployed["RO"].get("nsr_id")
tierno59d22d22018-09-25 18:10:19 +02004136 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["member-vnf-index"]
4137 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"]
4138 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
4139 # scaling_policy = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"].get("scaling-policy")
4140
tierno82974b22018-11-27 21:55:36 +00004141 # for backward compatibility
4142 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4143 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4144 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4145 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4146
tierno59d22d22018-09-25 18:10:19 +02004147 step = "Getting vnfr from database"
4148 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
4149 step = "Getting vnfd from database"
4150 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
ikalyvas02d9e7b2019-05-27 18:16:01 +03004151
tierno59d22d22018-09-25 18:10:19 +02004152 step = "Getting scaling-group-descriptor"
4153 for scaling_descriptor in db_vnfd["scaling-group-descriptor"]:
4154 if scaling_descriptor["name"] == scaling_group:
4155 break
4156 else:
4157 raise LcmException("input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
4158 "at vnfd:scaling-group-descriptor".format(scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03004159
tierno59d22d22018-09-25 18:10:19 +02004160 # cooldown_time = 0
4161 # for scaling_policy_descriptor in scaling_descriptor.get("scaling-policy", ()):
4162 # cooldown_time = scaling_policy_descriptor.get("cooldown-time", 0)
4163 # if scaling_policy and scaling_policy == scaling_policy_descriptor.get("name"):
4164 # break
4165
4166 # TODO check if ns is in a proper status
tierno15b1cf12019-08-29 13:21:40 +00004167 step = "Sending scale order to VIM"
tierno59d22d22018-09-25 18:10:19 +02004168 nb_scale_op = 0
4169 if not db_nsr["_admin"].get("scaling-group"):
4170 self.update_db_2("nsrs", nsr_id, {"_admin.scaling-group": [{"name": scaling_group, "nb-scale-op": 0}]})
4171 admin_scale_index = 0
4172 else:
4173 for admin_scale_index, admin_scale_info in enumerate(db_nsr["_admin"]["scaling-group"]):
4174 if admin_scale_info["name"] == scaling_group:
4175 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
4176 break
tierno9ab95942018-10-10 16:44:22 +02004177 else: # not found, set index one plus last element and add new entry with the name
4178 admin_scale_index += 1
4179 db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group
tierno59d22d22018-09-25 18:10:19 +02004180 RO_scaling_info = []
4181 vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []}
4182 if scaling_type == "SCALE_OUT":
4183 # count if max-instance-count is reached
kuuse818d70c2019-08-07 14:43:44 +02004184 max_instance_count = scaling_descriptor.get("max-instance-count", 10)
4185 # self.logger.debug("MAX_INSTANCE_COUNT is {}".format(max_instance_count))
4186 if nb_scale_op >= max_instance_count:
4187 raise LcmException("reached the limit of {} (max-instance-count) "
4188 "scaling-out operations for the "
4189 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
kuuse8b998e42019-07-30 15:22:16 +02004190
ikalyvas02d9e7b2019-05-27 18:16:01 +03004191 nb_scale_op += 1
tierno59d22d22018-09-25 18:10:19 +02004192 vdu_scaling_info["scaling_direction"] = "OUT"
4193 vdu_scaling_info["vdu-create"] = {}
4194 for vdu_scale_info in scaling_descriptor["vdu"]:
tierno72ef84f2020-10-06 08:22:07 +00004195 vdud = next(vdu for vdu in db_vnfd.get("vdu") if vdu["id"] == vdu_scale_info["vdu-id-ref"])
4196 vdu_index = len([x for x in db_vnfr.get("vdur", ())
4197 if x.get("vdu-id-ref") == vdu_scale_info["vdu-id-ref"] and
4198 x.get("member-vnf-index-ref") == vnf_index])
4199 cloud_init_text = self._get_cloud_init(vdud, db_vnfd)
4200 if cloud_init_text:
4201 additional_params = self._get_vdu_additional_params(db_vnfr, vdud["id"]) or {}
4202 cloud_init_list = []
4203 for x in range(vdu_scale_info.get("count", 1)):
4204 if cloud_init_text:
4205 # TODO Information of its own ip is not available because db_vnfr is not updated.
4206 additional_params["OSM"] = self._get_osm_params(db_vnfr, vdu_scale_info["vdu-id-ref"],
4207 vdu_index + x)
4208 cloud_init_list.append(self._parse_cloud_init(cloud_init_text, additional_params,
4209 db_vnfd["id"], vdud["id"]))
tierno59d22d22018-09-25 18:10:19 +02004210 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
4211 "type": "create", "count": vdu_scale_info.get("count", 1)})
tierno72ef84f2020-10-06 08:22:07 +00004212 if cloud_init_list:
4213 RO_scaling_info[-1]["cloud_init"] = cloud_init_list
tierno59d22d22018-09-25 18:10:19 +02004214 vdu_scaling_info["vdu-create"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
ikalyvas02d9e7b2019-05-27 18:16:01 +03004215
tierno59d22d22018-09-25 18:10:19 +02004216 elif scaling_type == "SCALE_IN":
4217 # count if min-instance-count is reached
tierno27246d82018-09-27 15:59:09 +02004218 min_instance_count = 0
tierno59d22d22018-09-25 18:10:19 +02004219 if "min-instance-count" in scaling_descriptor and scaling_descriptor["min-instance-count"] is not None:
4220 min_instance_count = int(scaling_descriptor["min-instance-count"])
tierno9babfda2019-06-07 12:36:50 +00004221 if nb_scale_op <= min_instance_count:
4222 raise LcmException("reached the limit of {} (min-instance-count) scaling-in operations for the "
4223 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03004224 nb_scale_op -= 1
tierno59d22d22018-09-25 18:10:19 +02004225 vdu_scaling_info["scaling_direction"] = "IN"
4226 vdu_scaling_info["vdu-delete"] = {}
4227 for vdu_scale_info in scaling_descriptor["vdu"]:
4228 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
4229 "type": "delete", "count": vdu_scale_info.get("count", 1)})
4230 vdu_scaling_info["vdu-delete"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
4231
4232 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
tierno27246d82018-09-27 15:59:09 +02004233 vdu_delete = copy(vdu_scaling_info.get("vdu-delete"))
tierno59d22d22018-09-25 18:10:19 +02004234 if vdu_scaling_info["scaling_direction"] == "IN":
4235 for vdur in reversed(db_vnfr["vdur"]):
tierno27246d82018-09-27 15:59:09 +02004236 if vdu_delete.get(vdur["vdu-id-ref"]):
4237 vdu_delete[vdur["vdu-id-ref"]] -= 1
tierno59d22d22018-09-25 18:10:19 +02004238 vdu_scaling_info["vdu"].append({
tierno2357f4e2020-10-19 16:38:59 +00004239 "name": vdur.get("name") or vdur.get("vdu-name"),
tierno59d22d22018-09-25 18:10:19 +02004240 "vdu_id": vdur["vdu-id-ref"],
4241 "interface": []
4242 })
4243 for interface in vdur["interfaces"]:
4244 vdu_scaling_info["vdu"][-1]["interface"].append({
4245 "name": interface["name"],
4246 "ip_address": interface["ip-address"],
4247 "mac_address": interface.get("mac-address"),
4248 })
tierno2357f4e2020-10-19 16:38:59 +00004249 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
tierno59d22d22018-09-25 18:10:19 +02004250
kuuseac3a8882019-10-03 10:48:06 +02004251 # PRE-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02004252 step = "Executing pre-scale vnf-config-primitive"
4253 if scaling_descriptor.get("scaling-config-action"):
4254 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02004255 if (scaling_config_action.get("trigger") == "pre-scale-in" and scaling_type == "SCALE_IN") \
4256 or (scaling_config_action.get("trigger") == "pre-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02004257 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
4258 step = db_nslcmop_update["detailed-status"] = \
4259 "executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00004260
tierno59d22d22018-09-25 18:10:19 +02004261 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02004262 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
4263 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02004264 break
4265 else:
4266 raise LcmException(
4267 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
tiernoda964822019-01-14 15:53:47 +00004268 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
tiernoa278b842020-07-08 15:33:55 +00004269 "primitive".format(scaling_group, vnf_config_primitive))
tiernoda964822019-01-14 15:53:47 +00004270
tierno16fedf52019-05-24 08:38:26 +00004271 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00004272 if db_vnfr.get("additionalParamsForVnf"):
4273 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
quilesj7e13aeb2019-10-08 13:34:55 +02004274
tierno9ab95942018-10-10 16:44:22 +02004275 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02004276 db_nsr_update["config-status"] = "configuring pre-scaling"
kuuseac3a8882019-10-03 10:48:06 +02004277 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
4278
tierno7c4e24c2020-05-13 08:41:35 +00004279 # Pre-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02004280 op_index = self._check_or_add_scale_suboperation(
4281 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'PRE-SCALE')
tierno7c4e24c2020-05-13 08:41:35 +00004282 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02004283 # Skip sub-operation
4284 result = 'COMPLETED'
4285 result_detail = 'Done'
4286 self.logger.debug(logging_text +
4287 "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
4288 vnf_config_primitive, result, result_detail))
4289 else:
tierno7c4e24c2020-05-13 08:41:35 +00004290 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02004291 # New sub-operation: Get index of this sub-operation
4292 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4293 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
4294 format(vnf_config_primitive))
4295 else:
tierno7c4e24c2020-05-13 08:41:35 +00004296 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02004297 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4298 vnf_index = op.get('member_vnf_index')
4299 vnf_config_primitive = op.get('primitive')
4300 primitive_params = op.get('primitive_params')
tierno7c4e24c2020-05-13 08:41:35 +00004301 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
kuuseac3a8882019-10-03 10:48:06 +02004302 format(vnf_config_primitive))
tierno588547c2020-07-01 15:30:20 +00004303 # Execute the primitive, either with new (first-time) or registered (reintent) args
tiernoa278b842020-07-08 15:33:55 +00004304 ee_descriptor_id = config_primitive.get("execution-environment-ref")
4305 primitive_name = config_primitive.get("execution-environment-primitive",
4306 vnf_config_primitive)
tierno588547c2020-07-01 15:30:20 +00004307 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
4308 member_vnf_index=vnf_index,
4309 vdu_id=None,
tiernoa278b842020-07-08 15:33:55 +00004310 vdu_count_index=None,
4311 ee_descriptor_id=ee_descriptor_id)
kuuseac3a8882019-10-03 10:48:06 +02004312 result, result_detail = await self._ns_execute_primitive(
tiernoa278b842020-07-08 15:33:55 +00004313 ee_id, primitive_name, primitive_params, vca_type)
kuuseac3a8882019-10-03 10:48:06 +02004314 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
4315 vnf_config_primitive, result, result_detail))
4316 # Update operationState = COMPLETED | FAILED
4317 self._update_suboperation_status(
4318 db_nslcmop, op_index, result, result_detail)
4319
tierno59d22d22018-09-25 18:10:19 +02004320 if result == "FAILED":
4321 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02004322 db_nsr_update["config-status"] = old_config_status
4323 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02004324 # PRE-SCALE END
tierno59d22d22018-09-25 18:10:19 +02004325
tierno2357f4e2020-10-19 16:38:59 +00004326 db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op
4327 db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time()
4328
kuuseac3a8882019-10-03 10:48:06 +02004329 # SCALE RO - BEGIN
tierno59d22d22018-09-25 18:10:19 +02004330 if RO_scaling_info:
tierno9ab95942018-10-10 16:44:22 +02004331 scale_process = "RO"
tierno2357f4e2020-10-19 16:38:59 +00004332 if self.ro_config.get("ng"):
4333 await self._scale_ng_ro(logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage)
kuuseac3a8882019-10-03 10:48:06 +02004334 else:
tierno2357f4e2020-10-19 16:38:59 +00004335 await self._RO_scale(logging_text, RO_nsr_id, RO_scaling_info, db_nslcmop, db_vnfr,
4336 db_nslcmop_update, vdu_scaling_info)
4337 vdu_scaling_info.pop("vdu-create", None)
4338 vdu_scaling_info.pop("vdu-delete", None)
tierno59d22d22018-09-25 18:10:19 +02004339
tierno9ab95942018-10-10 16:44:22 +02004340 scale_process = None
tierno59d22d22018-09-25 18:10:19 +02004341 if db_nsr_update:
4342 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4343
kuuseac3a8882019-10-03 10:48:06 +02004344 # POST-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02004345 # execute primitive service POST-SCALING
4346 step = "Executing post-scale vnf-config-primitive"
4347 if scaling_descriptor.get("scaling-config-action"):
4348 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02004349 if (scaling_config_action.get("trigger") == "post-scale-in" and scaling_type == "SCALE_IN") \
4350 or (scaling_config_action.get("trigger") == "post-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02004351 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
4352 step = db_nslcmop_update["detailed-status"] = \
4353 "executing post-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00004354
tierno589befb2019-05-29 07:06:23 +00004355 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00004356 if db_vnfr.get("additionalParamsForVnf"):
4357 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
4358
tierno59d22d22018-09-25 18:10:19 +02004359 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02004360 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
4361 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02004362 break
4363 else:
tiernoa278b842020-07-08 15:33:55 +00004364 raise LcmException(
4365 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
4366 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
4367 "config-primitive".format(scaling_group, vnf_config_primitive))
tierno9ab95942018-10-10 16:44:22 +02004368 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02004369 db_nsr_update["config-status"] = "configuring post-scaling"
kuuseac3a8882019-10-03 10:48:06 +02004370 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
tiernod6de1992018-10-11 13:05:52 +02004371
tierno7c4e24c2020-05-13 08:41:35 +00004372 # Post-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02004373 op_index = self._check_or_add_scale_suboperation(
4374 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'POST-SCALE')
quilesj4cda56b2019-12-05 10:02:20 +00004375 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02004376 # Skip sub-operation
4377 result = 'COMPLETED'
4378 result_detail = 'Done'
4379 self.logger.debug(logging_text +
4380 "vnf_config_primitive={} Skipped sub-operation, result {} {}".
4381 format(vnf_config_primitive, result, result_detail))
4382 else:
quilesj4cda56b2019-12-05 10:02:20 +00004383 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02004384 # New sub-operation: Get index of this sub-operation
4385 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4386 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
4387 format(vnf_config_primitive))
4388 else:
tierno7c4e24c2020-05-13 08:41:35 +00004389 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02004390 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4391 vnf_index = op.get('member_vnf_index')
4392 vnf_config_primitive = op.get('primitive')
4393 primitive_params = op.get('primitive_params')
tierno7c4e24c2020-05-13 08:41:35 +00004394 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
kuuseac3a8882019-10-03 10:48:06 +02004395 format(vnf_config_primitive))
tierno588547c2020-07-01 15:30:20 +00004396 # Execute the primitive, either with new (first-time) or registered (reintent) args
tiernoa278b842020-07-08 15:33:55 +00004397 ee_descriptor_id = config_primitive.get("execution-environment-ref")
4398 primitive_name = config_primitive.get("execution-environment-primitive",
4399 vnf_config_primitive)
tierno588547c2020-07-01 15:30:20 +00004400 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
4401 member_vnf_index=vnf_index,
4402 vdu_id=None,
tiernoa278b842020-07-08 15:33:55 +00004403 vdu_count_index=None,
4404 ee_descriptor_id=ee_descriptor_id)
kuuseac3a8882019-10-03 10:48:06 +02004405 result, result_detail = await self._ns_execute_primitive(
tiernoa278b842020-07-08 15:33:55 +00004406 ee_id, primitive_name, primitive_params, vca_type)
kuuseac3a8882019-10-03 10:48:06 +02004407 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
4408 vnf_config_primitive, result, result_detail))
4409 # Update operationState = COMPLETED | FAILED
4410 self._update_suboperation_status(
4411 db_nslcmop, op_index, result, result_detail)
4412
tierno59d22d22018-09-25 18:10:19 +02004413 if result == "FAILED":
4414 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02004415 db_nsr_update["config-status"] = old_config_status
4416 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02004417 # POST-SCALE END
tierno59d22d22018-09-25 18:10:19 +02004418
tiernod6de1992018-10-11 13:05:52 +02004419 db_nsr_update["detailed-status"] = "" # "scaled {} {}".format(scaling_group, scaling_type)
ikalyvas02d9e7b2019-05-27 18:16:01 +03004420 db_nsr_update["operational-status"] = "running" if old_operational_status == "failed" \
4421 else old_operational_status
tiernod6de1992018-10-11 13:05:52 +02004422 db_nsr_update["config-status"] = old_config_status
tierno59d22d22018-09-25 18:10:19 +02004423 return
tierno2357f4e2020-10-19 16:38:59 +00004424 except (ROclient.ROClientException, DbException, LcmException, NgRoException) as e:
tierno59d22d22018-09-25 18:10:19 +02004425 self.logger.error(logging_text + "Exit Exception {}".format(e))
4426 exc = e
4427 except asyncio.CancelledError:
4428 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
4429 exc = "Operation was cancelled"
4430 except Exception as e:
4431 exc = traceback.format_exc()
4432 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
4433 finally:
quilesj3655ae02019-12-12 16:08:35 +00004434 self._write_ns_status(
4435 nsr_id=nsr_id,
4436 ns_state=None,
4437 current_operation="IDLE",
4438 current_operation_id=None
4439 )
tierno59d22d22018-09-25 18:10:19 +02004440 if exc:
tiernoa17d4f42020-04-28 09:59:23 +00004441 db_nslcmop_update["detailed-status"] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4442 nslcmop_operation_state = "FAILED"
tierno59d22d22018-09-25 18:10:19 +02004443 if db_nsr:
tiernod6de1992018-10-11 13:05:52 +02004444 db_nsr_update["operational-status"] = old_operational_status
4445 db_nsr_update["config-status"] = old_config_status
4446 db_nsr_update["detailed-status"] = ""
4447 if scale_process:
4448 if "VCA" in scale_process:
4449 db_nsr_update["config-status"] = "failed"
4450 if "RO" in scale_process:
4451 db_nsr_update["operational-status"] = "failed"
4452 db_nsr_update["detailed-status"] = "FAILED scaling nslcmop={} {}: {}".format(nslcmop_id, step,
4453 exc)
tiernoa17d4f42020-04-28 09:59:23 +00004454 else:
4455 error_description_nslcmop = None
4456 nslcmop_operation_state = "COMPLETED"
4457 db_nslcmop_update["detailed-status"] = "Done"
quilesj4cda56b2019-12-05 10:02:20 +00004458
tiernoa17d4f42020-04-28 09:59:23 +00004459 self._write_op_status(
4460 op_id=nslcmop_id,
4461 stage="",
4462 error_message=error_description_nslcmop,
4463 operation_state=nslcmop_operation_state,
4464 other_update=db_nslcmop_update,
4465 )
4466 if db_nsr:
4467 self._write_ns_status(
4468 nsr_id=nsr_id,
4469 ns_state=None,
4470 current_operation="IDLE",
4471 current_operation_id=None,
4472 other_update=db_nsr_update
4473 )
4474
tierno59d22d22018-09-25 18:10:19 +02004475 if nslcmop_operation_state:
4476 try:
4477 await self.msg.aiowrite("ns", "scaled", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00004478 "operationState": nslcmop_operation_state},
4479 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004480 # if cooldown_time:
tiernod8323042019-08-09 11:32:23 +00004481 # await asyncio.sleep(cooldown_time, loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004482 # await self.msg.aiowrite("ns","scaled-cooldown-time", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id})
4483 except Exception as e:
4484 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
4485 self.logger.debug(logging_text + "Exit")
4486 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
tiernob996d942020-07-03 14:52:28 +00004487
tierno2357f4e2020-10-19 16:38:59 +00004488 async def _scale_ng_ro(self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage):
4489 nsr_id = db_nslcmop["nsInstanceId"]
4490 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4491 db_vnfrs = {}
4492
4493 # read from db: vnfd's for every vnf
4494 db_vnfds = {} # every vnfd data indexed by vnf id
4495 db_vnfds_ref = {} # every vnfd data indexed by vnfd id
4496 db_vnfds = {}
4497
4498 # for each vnf in ns, read vnfd
4499 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
4500 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
4501 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
4502 vnfd_ref = vnfr["vnfd-ref"] # vnfd name for this vnf
4503 # if we haven't this vnfd, read it from db
4504 if vnfd_id not in db_vnfds:
4505 # read from db
4506 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4507 db_vnfds_ref[vnfd_ref] = vnfd # vnfd's indexed by name
4508 db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id
4509 n2vc_key = self.n2vc.get_public_key()
4510 n2vc_key_list = [n2vc_key]
4511 self.scale_vnfr(db_vnfr, vdu_scaling_info.get("vdu-create"), vdu_scaling_info.get("vdu-delete"),
4512 mark_delete=True)
4513 # db_vnfr has been updated, update db_vnfrs to use it
4514 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
4515 await self._instantiate_ng_ro(logging_text, nsr_id, db_nsd, db_nsr, db_nslcmop, db_vnfrs,
4516 db_vnfds_ref, n2vc_key_list, stage=stage, start_deploy=time(),
4517 timeout_ns_deploy=self.timeout_ns_deploy)
4518 if vdu_scaling_info.get("vdu-delete"):
4519 self.scale_vnfr(db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False)
4520
4521 async def _RO_scale(self, logging_text, RO_nsr_id, RO_scaling_info, db_nslcmop, db_vnfr, db_nslcmop_update,
4522 vdu_scaling_info):
4523 nslcmop_id = db_nslcmop["_id"]
4524 nsr_id = db_nslcmop["nsInstanceId"]
4525 vdu_create = vdu_scaling_info.get("vdu-create")
4526 vdu_delete = vdu_scaling_info.get("vdu-delete")
4527 # Scale RO retry check: Check if this sub-operation has been executed before
4528 op_index = self._check_or_add_scale_suboperation(
4529 db_nslcmop, db_vnfr["member-vnf-index-ref"], None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info)
4530 if op_index == self.SUBOPERATION_STATUS_SKIP:
4531 # Skip sub-operation
4532 result = 'COMPLETED'
4533 result_detail = 'Done'
4534 self.logger.debug(logging_text + "Skipped sub-operation RO, result {} {}".format(result, result_detail))
4535 else:
4536 if op_index == self.SUBOPERATION_STATUS_NEW:
4537 # New sub-operation: Get index of this sub-operation
4538 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4539 self.logger.debug(logging_text + "New sub-operation RO")
4540 else:
4541 # retry: Get registered params for this existing sub-operation
4542 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4543 RO_nsr_id = op.get('RO_nsr_id')
4544 RO_scaling_info = op.get('RO_scaling_info')
4545 self.logger.debug(logging_text + "Sub-operation RO retry")
4546
4547 RO_desc = await self.RO.create_action("ns", RO_nsr_id, {"vdu-scaling": RO_scaling_info})
4548 # wait until ready
4549 RO_nslcmop_id = RO_desc["instance_action_id"]
4550 db_nslcmop_update["_admin.deploy.RO"] = RO_nslcmop_id
4551
4552 RO_task_done = False
4553 step = detailed_status = "Waiting for VIM to scale. RO_task_id={}.".format(RO_nslcmop_id)
4554 detailed_status_old = None
4555 self.logger.debug(logging_text + step)
4556
4557 deployment_timeout = 1 * 3600 # One hour
4558 while deployment_timeout > 0:
4559 if not RO_task_done:
4560 desc = await self.RO.show("ns", item_id_name=RO_nsr_id, extra_item="action",
4561 extra_item_id=RO_nslcmop_id)
4562
4563 # deploymentStatus
4564 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4565
4566 ns_status, ns_status_info = self.RO.check_action_status(desc)
4567 if ns_status == "ERROR":
4568 raise ROclient.ROClientException(ns_status_info)
4569 elif ns_status == "BUILD":
4570 detailed_status = step + "; {}".format(ns_status_info)
4571 elif ns_status == "ACTIVE":
4572 RO_task_done = True
4573 self.scale_vnfr(db_vnfr, vdu_create=vdu_create, vdu_delete=vdu_delete)
4574 step = detailed_status = "Waiting ns ready at RO. RO_id={}".format(RO_nsr_id)
4575 self.logger.debug(logging_text + step)
4576 else:
4577 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
4578 else:
4579 desc = await self.RO.show("ns", RO_nsr_id)
4580 ns_status, ns_status_info = self.RO.check_ns_status(desc)
4581 # deploymentStatus
4582 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4583
4584 if ns_status == "ERROR":
4585 raise ROclient.ROClientException(ns_status_info)
4586 elif ns_status == "BUILD":
4587 detailed_status = step + "; {}".format(ns_status_info)
4588 elif ns_status == "ACTIVE":
4589 step = detailed_status = \
4590 "Waiting for management IP address reported by the VIM. Updating VNFRs"
4591 try:
4592 # nsr_deployed["nsr_ip"] = RO.get_ns_vnf_info(desc)
4593 self.ns_update_vnfr({db_vnfr["member-vnf-index-ref"]: db_vnfr}, desc)
4594 break
4595 except LcmExceptionNoMgmtIP:
4596 pass
4597 else:
4598 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
4599 if detailed_status != detailed_status_old:
4600 self._update_suboperation_status(
4601 db_nslcmop, op_index, 'COMPLETED', detailed_status)
4602 detailed_status_old = db_nslcmop_update["detailed-status"] = detailed_status
4603 self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
4604
4605 await asyncio.sleep(5, loop=self.loop)
4606 deployment_timeout -= 5
4607 if deployment_timeout <= 0:
4608 self._update_suboperation_status(
4609 db_nslcmop, nslcmop_id, op_index, 'FAILED', "Timeout when waiting for ns to get ready")
4610 raise ROclient.ROClientException("Timeout waiting ns to be ready")
4611
4612 # update VDU_SCALING_INFO with the obtained ip_addresses
4613 if vdu_scaling_info["scaling_direction"] == "OUT":
4614 for vdur in reversed(db_vnfr["vdur"]):
4615 if vdu_scaling_info["vdu-create"].get(vdur["vdu-id-ref"]):
4616 vdu_scaling_info["vdu-create"][vdur["vdu-id-ref"]] -= 1
4617 vdu_scaling_info["vdu"].append({
4618 "name": vdur["name"] or vdur.get("vdu-name"),
4619 "vdu_id": vdur["vdu-id-ref"],
4620 "interface": []
4621 })
4622 for interface in vdur["interfaces"]:
4623 vdu_scaling_info["vdu"][-1]["interface"].append({
4624 "name": interface["name"],
4625 "ip_address": interface["ip-address"],
4626 "mac_address": interface.get("mac-address"),
4627 })
4628 self._update_suboperation_status(db_nslcmop, op_index, 'COMPLETED', 'Done')
4629
tiernob996d942020-07-03 14:52:28 +00004630 async def add_prometheus_metrics(self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip):
4631 if not self.prometheus:
4632 return
4633 # look if exist a file called 'prometheus*.j2' and
4634 artifact_content = self.fs.dir_ls(artifact_path)
4635 job_file = next((f for f in artifact_content if f.startswith("prometheus") and f.endswith(".j2")), None)
4636 if not job_file:
4637 return
4638 with self.fs.file_open((artifact_path, job_file), "r") as f:
4639 job_data = f.read()
4640
4641 # TODO get_service
4642 _, _, service = ee_id.partition(".") # remove prefix "namespace."
4643 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
4644 host_port = "80"
4645 vnfr_id = vnfr_id.replace("-", "")
4646 variables = {
4647 "JOB_NAME": vnfr_id,
4648 "TARGET_IP": target_ip,
4649 "EXPORTER_POD_IP": host_name,
4650 "EXPORTER_POD_PORT": host_port,
4651 }
4652 job_list = self.prometheus.parse_job(job_data, variables)
4653 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
4654 for job in job_list:
4655 if not isinstance(job.get("job_name"), str) or vnfr_id not in job["job_name"]:
4656 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
4657 job["nsr_id"] = nsr_id
4658 job_dict = {jl["job_name"]: jl for jl in job_list}
4659 if await self.prometheus.update(job_dict):
4660 return list(job_dict.keys())
David Garciaaae391f2020-11-09 11:12:54 +01004661
4662 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
4663 """
4664 Get VCA Cloud and VCA Cloud Credentials for the VIM account
4665
4666 :param: vim_account_id: VIM Account ID
4667
4668 :return: (cloud_name, cloud_credential)
4669 """
4670 config = self.get_vim_account_config(vim_account_id)
4671 return config.get("vca_cloud"), config.get("vca_cloud_credential")
4672
4673 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
4674 """
4675 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
4676
4677 :param: vim_account_id: VIM Account ID
4678
4679 :return: (cloud_name, cloud_credential)
4680 """
4681 config = self.get_vim_account_config(vim_account_id)
4682 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
4683
4684 def get_vim_account_config(self, vim_account_id: str) -> dict:
4685 """
4686 Get VIM Account config from the OSM Database
4687
4688 :param: vim_account_id: VIM Account ID
4689
4690 :return: Dictionary with the config of the vim account
4691 """
4692 vim_account = self.db.get_one(table="vim_accounts", q_filter={"_id": vim_account_id}, fail_on_empty=False)
4693 return vim_account.get("config", {}) if vim_account else {}