blob: 1fbe84512e1677513a907a193d428fe6dccae675 [file] [log] [blame]
tierno59d22d22018-09-25 18:10:19 +02001# -*- coding: utf-8 -*-
2
tierno2e215512018-11-28 09:37:52 +00003##
4# Copyright 2018 Telefonica S.A.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17##
18
tierno59d22d22018-09-25 18:10:19 +020019import asyncio
20import yaml
21import logging
22import logging.handlers
tierno59d22d22018-09-25 18:10:19 +020023import traceback
David Garciad4816682019-12-09 14:57:43 +010024import json
gcalvino35be9152018-12-20 09:33:12 +010025from jinja2 import Environment, Template, meta, TemplateError, TemplateNotFound, TemplateSyntaxError
tierno59d22d22018-09-25 18:10:19 +020026
tierno77677d92019-08-22 13:46:35 +000027from osm_lcm import ROclient
tierno69f0d382020-05-07 13:08:09 +000028from osm_lcm.ng_ro import NgRoClient, NgRoException
tierno744303e2020-01-13 16:46:31 +000029from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict
calvinosanch9f9c6f22019-11-04 13:37:39 +010030from n2vc.k8s_helm_conn import K8sHelmConnector
Adam Israelbaacc302019-12-01 12:41:39 -050031from n2vc.k8s_juju_conn import K8sJujuConnector
tierno59d22d22018-09-25 18:10:19 +020032
tierno27246d82018-09-27 15:59:09 +020033from osm_common.dbbase import DbException
tierno59d22d22018-09-25 18:10:19 +020034from osm_common.fsbase import FsException
quilesj7e13aeb2019-10-08 13:34:55 +020035
36from n2vc.n2vc_juju_conn import N2VCJujuConnector
tiernof59ad6c2020-04-08 12:50:52 +000037from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
tierno59d22d22018-09-25 18:10:19 +020038
tierno27246d82018-09-27 15:59:09 +020039from copy import copy, deepcopy
tierno59d22d22018-09-25 18:10:19 +020040from http import HTTPStatus
41from time import time
tierno27246d82018-09-27 15:59:09 +020042from uuid import uuid4
tiernob9018152020-04-16 14:18:24 +000043from functools import partial
tierno59d22d22018-09-25 18:10:19 +020044
tierno69f0d382020-05-07 13:08:09 +000045__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
tierno59d22d22018-09-25 18:10:19 +020046
47
48class NsLcm(LcmBase):
tierno63de62e2018-10-31 16:38:52 +010049 timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed
tierno744303e2020-01-13 16:46:31 +000050 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
tiernoe876f672020-02-13 14:34:48 +000051 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
garciadeblasf9b04952019-04-09 18:53:58 +020052 timeout_charm_delete = 10 * 60
David Garciaf6919842020-05-21 16:41:07 +020053 timeout_primitive = 30 * 60 # timeout for primitive execution
54 timeout_progress_primitive = 10 * 60 # timeout for some progress in a primitive execution
tierno59d22d22018-09-25 18:10:19 +020055
kuuseac3a8882019-10-03 10:48:06 +020056 SUBOPERATION_STATUS_NOT_FOUND = -1
57 SUBOPERATION_STATUS_NEW = -2
58 SUBOPERATION_STATUS_SKIP = -3
tiernoa2143262020-03-27 16:20:40 +000059 task_name_deploy_vca = "Deploying VCA"
kuuseac3a8882019-10-03 10:48:06 +020060
tierno744303e2020-01-13 16:46:31 +000061 def __init__(self, db, msg, fs, lcm_tasks, config, loop):
tierno59d22d22018-09-25 18:10:19 +020062 """
63 Init, Connect to database, filesystem storage, and messaging
64 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
65 :return: None
66 """
quilesj7e13aeb2019-10-08 13:34:55 +020067 super().__init__(
68 db=db,
69 msg=msg,
70 fs=fs,
71 logger=logging.getLogger('lcm.ns')
72 )
73
tierno59d22d22018-09-25 18:10:19 +020074 self.loop = loop
75 self.lcm_tasks = lcm_tasks
tierno744303e2020-01-13 16:46:31 +000076 self.timeout = config["timeout"]
77 self.ro_config = config["ro_config"]
tierno69f0d382020-05-07 13:08:09 +000078 self.ng_ro = config["ro_config"].get("ng")
tierno744303e2020-01-13 16:46:31 +000079 self.vca_config = config["VCA"].copy()
tierno59d22d22018-09-25 18:10:19 +020080
quilesj7e13aeb2019-10-08 13:34:55 +020081 # create N2VC connector
82 self.n2vc = N2VCJujuConnector(
83 db=self.db,
84 fs=self.fs,
tierno59d22d22018-09-25 18:10:19 +020085 log=self.logger,
quilesj7e13aeb2019-10-08 13:34:55 +020086 loop=self.loop,
87 url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
88 username=self.vca_config.get('user', None),
89 vca_config=self.vca_config,
quilesj3655ae02019-12-12 16:08:35 +000090 on_update_db=self._on_update_n2vc_db
tierno59d22d22018-09-25 18:10:19 +020091 )
quilesj7e13aeb2019-10-08 13:34:55 +020092
calvinosanch9f9c6f22019-11-04 13:37:39 +010093 self.k8sclusterhelm = K8sHelmConnector(
94 kubectl_command=self.vca_config.get("kubectlpath"),
95 helm_command=self.vca_config.get("helmpath"),
96 fs=self.fs,
97 log=self.logger,
98 db=self.db,
99 on_update_db=None,
100 )
101
Adam Israelbaacc302019-12-01 12:41:39 -0500102 self.k8sclusterjuju = K8sJujuConnector(
103 kubectl_command=self.vca_config.get("kubectlpath"),
104 juju_command=self.vca_config.get("jujupath"),
105 fs=self.fs,
106 log=self.logger,
107 db=self.db,
108 on_update_db=None,
109 )
110
tiernoa2143262020-03-27 16:20:40 +0000111 self.k8scluster_map = {
112 "helm-chart": self.k8sclusterhelm,
113 "chart": self.k8sclusterhelm,
114 "juju-bundle": self.k8sclusterjuju,
115 "juju": self.k8sclusterjuju,
116 }
quilesj7e13aeb2019-10-08 13:34:55 +0200117 # create RO client
tierno69f0d382020-05-07 13:08:09 +0000118 if self.ng_ro:
119 self.RO = NgRoClient(self.loop, **self.ro_config)
120 else:
121 self.RO = ROclient.ROClient(self.loop, **self.ro_config)
tierno59d22d22018-09-25 18:10:19 +0200122
quilesj3655ae02019-12-12 16:08:35 +0000123 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
quilesj7e13aeb2019-10-08 13:34:55 +0200124
quilesj3655ae02019-12-12 16:08:35 +0000125 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
126
127 try:
128 # TODO filter RO descriptor fields...
129
130 # write to database
131 db_dict = dict()
132 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
133 db_dict['deploymentStatus'] = ro_descriptor
134 self.update_db_2("nsrs", nsrs_id, db_dict)
135
136 except Exception as e:
137 self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e))
138
139 async def _on_update_n2vc_db(self, table, filter, path, updated_data):
140
quilesj69a722c2020-01-09 08:30:17 +0000141 # remove last dot from path (if exists)
142 if path.endswith('.'):
143 path = path[:-1]
144
quilesj3655ae02019-12-12 16:08:35 +0000145 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
146 # .format(table, filter, path, updated_data))
147
148 try:
149
150 nsr_id = filter.get('_id')
151
152 # read ns record from database
153 nsr = self.db.get_one(table='nsrs', q_filter=filter)
154 current_ns_status = nsr.get('nsState')
155
156 # get vca status for NS
quilesj69a722c2020-01-09 08:30:17 +0000157 status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False)
quilesj3655ae02019-12-12 16:08:35 +0000158
159 # vcaStatus
160 db_dict = dict()
161 db_dict['vcaStatus'] = status_dict
162
163 # update configurationStatus for this VCA
164 try:
165 vca_index = int(path[path.rfind(".")+1:])
166
167 vca_list = deep_get(target_dict=nsr, key_list=('_admin', 'deployed', 'VCA'))
168 vca_status = vca_list[vca_index].get('status')
169
170 configuration_status_list = nsr.get('configurationStatus')
171 config_status = configuration_status_list[vca_index].get('status')
172
173 if config_status == 'BROKEN' and vca_status != 'failed':
174 db_dict['configurationStatus'][vca_index] = 'READY'
175 elif config_status != 'BROKEN' and vca_status == 'failed':
176 db_dict['configurationStatus'][vca_index] = 'BROKEN'
177 except Exception as e:
178 # not update configurationStatus
179 self.logger.debug('Error updating vca_index (ignore): {}'.format(e))
180
181 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
182 # if nsState = 'DEGRADED' check if all is OK
183 is_degraded = False
184 if current_ns_status in ('READY', 'DEGRADED'):
185 error_description = ''
186 # check machines
187 if status_dict.get('machines'):
188 for machine_id in status_dict.get('machines'):
189 machine = status_dict.get('machines').get(machine_id)
190 # check machine agent-status
191 if machine.get('agent-status'):
192 s = machine.get('agent-status').get('status')
193 if s != 'started':
194 is_degraded = True
195 error_description += 'machine {} agent-status={} ; '.format(machine_id, s)
196 # check machine instance status
197 if machine.get('instance-status'):
198 s = machine.get('instance-status').get('status')
199 if s != 'running':
200 is_degraded = True
201 error_description += 'machine {} instance-status={} ; '.format(machine_id, s)
202 # check applications
203 if status_dict.get('applications'):
204 for app_id in status_dict.get('applications'):
205 app = status_dict.get('applications').get(app_id)
206 # check application status
207 if app.get('status'):
208 s = app.get('status').get('status')
209 if s != 'active':
210 is_degraded = True
211 error_description += 'application {} status={} ; '.format(app_id, s)
212
213 if error_description:
214 db_dict['errorDescription'] = error_description
215 if current_ns_status == 'READY' and is_degraded:
216 db_dict['nsState'] = 'DEGRADED'
217 if current_ns_status == 'DEGRADED' and not is_degraded:
218 db_dict['nsState'] = 'READY'
219
220 # write to database
221 self.update_db_2("nsrs", nsr_id, db_dict)
222
tierno51183952020-04-03 15:48:18 +0000223 except (asyncio.CancelledError, asyncio.TimeoutError):
224 raise
quilesj3655ae02019-12-12 16:08:35 +0000225 except Exception as e:
226 self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +0200227
gcalvino35be9152018-12-20 09:33:12 +0100228 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
tierno59d22d22018-09-25 18:10:19 +0200229 """
230 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
231 :param vnfd: input vnfd
232 :param new_id: overrides vnf id if provided
tierno8a518872018-12-21 13:42:14 +0000233 :param additionalParams: Instantiation params for VNFs provided
gcalvino35be9152018-12-20 09:33:12 +0100234 :param nsrId: Id of the NSR
tierno59d22d22018-09-25 18:10:19 +0200235 :return: copy of vnfd
236 """
tierno59d22d22018-09-25 18:10:19 +0200237 try:
238 vnfd_RO = deepcopy(vnfd)
tierno8a518872018-12-21 13:42:14 +0000239 # remove unused by RO configuration, monitoring, scaling and internal keys
tierno59d22d22018-09-25 18:10:19 +0200240 vnfd_RO.pop("_id", None)
241 vnfd_RO.pop("_admin", None)
tierno8a518872018-12-21 13:42:14 +0000242 vnfd_RO.pop("vnf-configuration", None)
243 vnfd_RO.pop("monitoring-param", None)
244 vnfd_RO.pop("scaling-group-descriptor", None)
calvinosanch9f9c6f22019-11-04 13:37:39 +0100245 vnfd_RO.pop("kdu", None)
246 vnfd_RO.pop("k8s-cluster", None)
tierno59d22d22018-09-25 18:10:19 +0200247 if new_id:
248 vnfd_RO["id"] = new_id
tierno8a518872018-12-21 13:42:14 +0000249
250 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
251 for vdu in get_iterable(vnfd_RO, "vdu"):
252 cloud_init_file = None
253 if vdu.get("cloud-init-file"):
tierno59d22d22018-09-25 18:10:19 +0200254 base_folder = vnfd["_admin"]["storage"]
gcalvino35be9152018-12-20 09:33:12 +0100255 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
256 vdu["cloud-init-file"])
257 with self.fs.file_open(cloud_init_file, "r") as ci_file:
258 cloud_init_content = ci_file.read()
tierno59d22d22018-09-25 18:10:19 +0200259 vdu.pop("cloud-init-file", None)
tierno8a518872018-12-21 13:42:14 +0000260 elif vdu.get("cloud-init"):
gcalvino35be9152018-12-20 09:33:12 +0100261 cloud_init_content = vdu["cloud-init"]
tierno8a518872018-12-21 13:42:14 +0000262 else:
263 continue
264
265 env = Environment()
266 ast = env.parse(cloud_init_content)
267 mandatory_vars = meta.find_undeclared_variables(ast)
268 if mandatory_vars:
269 for var in mandatory_vars:
270 if not additionalParams or var not in additionalParams.keys():
271 raise LcmException("Variable '{}' defined at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
272 "file, must be provided in the instantiation parameters inside the "
273 "'additionalParamsForVnf' block".format(var, vnfd["id"], vdu["id"]))
274 template = Template(cloud_init_content)
tierno2b611dd2019-01-11 10:30:57 +0000275 cloud_init_content = template.render(additionalParams or {})
gcalvino35be9152018-12-20 09:33:12 +0100276 vdu["cloud-init"] = cloud_init_content
tierno8a518872018-12-21 13:42:14 +0000277
tierno59d22d22018-09-25 18:10:19 +0200278 return vnfd_RO
279 except FsException as e:
tierno8a518872018-12-21 13:42:14 +0000280 raise LcmException("Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".
tiernoda964822019-01-14 15:53:47 +0000281 format(vnfd["id"], vdu["id"], cloud_init_file, e))
tierno8a518872018-12-21 13:42:14 +0000282 except (TemplateError, TemplateNotFound, TemplateSyntaxError) as e:
283 raise LcmException("Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".
284 format(vnfd["id"], vdu["id"], e))
tierno59d22d22018-09-25 18:10:19 +0200285
tiernoe95ed362020-04-23 08:24:57 +0000286 def _ns_params_2_RO(self, ns_params, nsd, vnfd_dict, db_vnfrs, n2vc_key_list):
tierno59d22d22018-09-25 18:10:19 +0200287 """
tierno27246d82018-09-27 15:59:09 +0200288 Creates a RO ns descriptor from OSM ns_instantiate params
tierno59d22d22018-09-25 18:10:19 +0200289 :param ns_params: OSM instantiate params
tiernoe95ed362020-04-23 08:24:57 +0000290 :param vnfd_dict: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
291 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index. {member-vnf-index: {vnfr_object}, ...}
tierno59d22d22018-09-25 18:10:19 +0200292 :return: The RO ns descriptor
293 """
294 vim_2_RO = {}
tiernob7f3f0d2019-03-20 17:17:21 +0000295 wim_2_RO = {}
tierno27246d82018-09-27 15:59:09 +0200296 # TODO feature 1417: Check that no instantiation is set over PDU
297 # check if PDU forces a concrete vim-network-id and add it
298 # check if PDU contains a SDN-assist info (dpid, switch, port) and pass it to RO
tierno59d22d22018-09-25 18:10:19 +0200299
300 def vim_account_2_RO(vim_account):
301 if vim_account in vim_2_RO:
302 return vim_2_RO[vim_account]
303
304 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
305 if db_vim["_admin"]["operationalState"] != "ENABLED":
306 raise LcmException("VIM={} is not available. operationalState={}".format(
307 vim_account, db_vim["_admin"]["operationalState"]))
308 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
309 vim_2_RO[vim_account] = RO_vim_id
310 return RO_vim_id
311
tiernob7f3f0d2019-03-20 17:17:21 +0000312 def wim_account_2_RO(wim_account):
313 if isinstance(wim_account, str):
314 if wim_account in wim_2_RO:
315 return wim_2_RO[wim_account]
316
317 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
318 if db_wim["_admin"]["operationalState"] != "ENABLED":
319 raise LcmException("WIM={} is not available. operationalState={}".format(
320 wim_account, db_wim["_admin"]["operationalState"]))
321 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
322 wim_2_RO[wim_account] = RO_wim_id
323 return RO_wim_id
324 else:
325 return wim_account
326
tierno59d22d22018-09-25 18:10:19 +0200327 def ip_profile_2_RO(ip_profile):
328 RO_ip_profile = deepcopy((ip_profile))
329 if "dns-server" in RO_ip_profile:
330 if isinstance(RO_ip_profile["dns-server"], list):
331 RO_ip_profile["dns-address"] = []
332 for ds in RO_ip_profile.pop("dns-server"):
333 RO_ip_profile["dns-address"].append(ds['address'])
334 else:
335 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
336 if RO_ip_profile.get("ip-version") == "ipv4":
337 RO_ip_profile["ip-version"] = "IPv4"
338 if RO_ip_profile.get("ip-version") == "ipv6":
339 RO_ip_profile["ip-version"] = "IPv6"
340 if "dhcp-params" in RO_ip_profile:
341 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
342 return RO_ip_profile
343
344 if not ns_params:
345 return None
346 RO_ns_params = {
347 # "name": ns_params["nsName"],
348 # "description": ns_params.get("nsDescription"),
349 "datacenter": vim_account_2_RO(ns_params["vimAccountId"]),
tiernob7f3f0d2019-03-20 17:17:21 +0000350 "wim_account": wim_account_2_RO(ns_params.get("wimAccountId")),
tierno59d22d22018-09-25 18:10:19 +0200351 # "scenario": ns_params["nsdId"],
tierno59d22d22018-09-25 18:10:19 +0200352 }
tiernoe95ed362020-04-23 08:24:57 +0000353 # set vim_account of each vnf if different from general vim_account.
354 # Get this information from <vnfr> database content, key vim-account-id
355 # Vim account can be set by placement_engine and it may be different from
356 # the instantiate parameters (vnfs.member-vnf-index.datacenter).
357 for vnf_index, vnfr in db_vnfrs.items():
358 if vnfr.get("vim-account-id") and vnfr["vim-account-id"] != ns_params["vimAccountId"]:
359 populate_dict(RO_ns_params, ("vnfs", vnf_index, "datacenter"), vim_account_2_RO(vnfr["vim-account-id"]))
quilesj7e13aeb2019-10-08 13:34:55 +0200360
tiernoe64f7fb2019-09-11 08:55:52 +0000361 n2vc_key_list = n2vc_key_list or []
362 for vnfd_ref, vnfd in vnfd_dict.items():
363 vdu_needed_access = []
364 mgmt_cp = None
365 if vnfd.get("vnf-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000366 ssh_required = deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000367 if ssh_required and vnfd.get("mgmt-interface"):
368 if vnfd["mgmt-interface"].get("vdu-id"):
369 vdu_needed_access.append(vnfd["mgmt-interface"]["vdu-id"])
370 elif vnfd["mgmt-interface"].get("cp"):
371 mgmt_cp = vnfd["mgmt-interface"]["cp"]
tierno27246d82018-09-27 15:59:09 +0200372
tiernoe64f7fb2019-09-11 08:55:52 +0000373 for vdu in vnfd.get("vdu", ()):
374 if vdu.get("vdu-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000375 ssh_required = deep_get(vdu, ("vdu-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000376 if ssh_required:
tierno27246d82018-09-27 15:59:09 +0200377 vdu_needed_access.append(vdu["id"])
tiernoe64f7fb2019-09-11 08:55:52 +0000378 elif mgmt_cp:
379 for vdu_interface in vdu.get("interface"):
380 if vdu_interface.get("external-connection-point-ref") and \
381 vdu_interface["external-connection-point-ref"] == mgmt_cp:
382 vdu_needed_access.append(vdu["id"])
383 mgmt_cp = None
384 break
tierno27246d82018-09-27 15:59:09 +0200385
tiernoe64f7fb2019-09-11 08:55:52 +0000386 if vdu_needed_access:
387 for vnf_member in nsd.get("constituent-vnfd"):
388 if vnf_member["vnfd-id-ref"] != vnfd_ref:
389 continue
390 for vdu in vdu_needed_access:
391 populate_dict(RO_ns_params,
392 ("vnfs", vnf_member["member-vnf-index"], "vdus", vdu, "mgmt_keys"),
393 n2vc_key_list)
tierno27246d82018-09-27 15:59:09 +0200394
tierno25ec7732018-10-24 18:47:11 +0200395 if ns_params.get("vduImage"):
396 RO_ns_params["vduImage"] = ns_params["vduImage"]
397
tiernoc255a822018-10-31 09:41:53 +0100398 if ns_params.get("ssh_keys"):
399 RO_ns_params["cloud-config"] = {"key-pairs": ns_params["ssh_keys"]}
tierno27246d82018-09-27 15:59:09 +0200400 for vnf_params in get_iterable(ns_params, "vnf"):
401 for constituent_vnfd in nsd["constituent-vnfd"]:
402 if constituent_vnfd["member-vnf-index"] == vnf_params["member-vnf-index"]:
403 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
404 break
405 else:
406 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index={} is not present at nsd:"
407 "constituent-vnfd".format(vnf_params["member-vnf-index"]))
tierno59d22d22018-09-25 18:10:19 +0200408
tierno27246d82018-09-27 15:59:09 +0200409 for vdu_params in get_iterable(vnf_params, "vdu"):
410 # TODO feature 1417: check that this VDU exist and it is not a PDU
411 if vdu_params.get("volume"):
412 for volume_params in vdu_params["volume"]:
413 if volume_params.get("vim-volume-id"):
414 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
415 vdu_params["id"], "devices", volume_params["name"], "vim_id"),
416 volume_params["vim-volume-id"])
417 if vdu_params.get("interface"):
418 for interface_params in vdu_params["interface"]:
419 if interface_params.get("ip-address"):
420 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
421 vdu_params["id"], "interfaces", interface_params["name"],
422 "ip_address"),
423 interface_params["ip-address"])
424 if interface_params.get("mac-address"):
425 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
426 vdu_params["id"], "interfaces", interface_params["name"],
427 "mac_address"),
428 interface_params["mac-address"])
429 if interface_params.get("floating-ip-required"):
430 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
431 vdu_params["id"], "interfaces", interface_params["name"],
432 "floating-ip"),
433 interface_params["floating-ip-required"])
434
435 for internal_vld_params in get_iterable(vnf_params, "internal-vld"):
436 if internal_vld_params.get("vim-network-name"):
437 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
438 internal_vld_params["name"], "vim-network-name"),
439 internal_vld_params["vim-network-name"])
gcalvino0d7ac8d2018-12-17 16:24:08 +0100440 if internal_vld_params.get("vim-network-id"):
441 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
442 internal_vld_params["name"], "vim-network-id"),
443 internal_vld_params["vim-network-id"])
tierno27246d82018-09-27 15:59:09 +0200444 if internal_vld_params.get("ip-profile"):
445 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
446 internal_vld_params["name"], "ip-profile"),
447 ip_profile_2_RO(internal_vld_params["ip-profile"]))
kbsub4d761eb2019-10-17 16:28:48 +0000448 if internal_vld_params.get("provider-network"):
449
450 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
451 internal_vld_params["name"], "provider-network"),
452 internal_vld_params["provider-network"].copy())
tierno27246d82018-09-27 15:59:09 +0200453
454 for icp_params in get_iterable(internal_vld_params, "internal-connection-point"):
455 # look for interface
456 iface_found = False
457 for vdu_descriptor in vnf_descriptor["vdu"]:
458 for vdu_interface in vdu_descriptor["interface"]:
459 if vdu_interface.get("internal-connection-point-ref") == icp_params["id-ref"]:
460 if icp_params.get("ip-address"):
461 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
462 vdu_descriptor["id"], "interfaces",
463 vdu_interface["name"], "ip_address"),
464 icp_params["ip-address"])
465
466 if icp_params.get("mac-address"):
467 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
468 vdu_descriptor["id"], "interfaces",
469 vdu_interface["name"], "mac_address"),
470 icp_params["mac-address"])
471 iface_found = True
tierno59d22d22018-09-25 18:10:19 +0200472 break
tierno27246d82018-09-27 15:59:09 +0200473 if iface_found:
474 break
475 else:
476 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index[{}]:"
477 "internal-vld:id-ref={} is not present at vnfd:internal-"
478 "connection-point".format(vnf_params["member-vnf-index"],
479 icp_params["id-ref"]))
480
481 for vld_params in get_iterable(ns_params, "vld"):
482 if "ip-profile" in vld_params:
483 populate_dict(RO_ns_params, ("networks", vld_params["name"], "ip-profile"),
484 ip_profile_2_RO(vld_params["ip-profile"]))
tiernob7f3f0d2019-03-20 17:17:21 +0000485
kbsub4d761eb2019-10-17 16:28:48 +0000486 if vld_params.get("provider-network"):
487
488 populate_dict(RO_ns_params, ("networks", vld_params["name"], "provider-network"),
489 vld_params["provider-network"].copy())
490
tiernob7f3f0d2019-03-20 17:17:21 +0000491 if "wimAccountId" in vld_params and vld_params["wimAccountId"] is not None:
492 populate_dict(RO_ns_params, ("networks", vld_params["name"], "wim_account"),
493 wim_account_2_RO(vld_params["wimAccountId"])),
tierno27246d82018-09-27 15:59:09 +0200494 if vld_params.get("vim-network-name"):
495 RO_vld_sites = []
496 if isinstance(vld_params["vim-network-name"], dict):
497 for vim_account, vim_net in vld_params["vim-network-name"].items():
498 RO_vld_sites.append({
499 "netmap-use": vim_net,
500 "datacenter": vim_account_2_RO(vim_account)
501 })
502 else: # isinstance str
503 RO_vld_sites.append({"netmap-use": vld_params["vim-network-name"]})
504 if RO_vld_sites:
505 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
kbsub4d761eb2019-10-17 16:28:48 +0000506
gcalvino0d7ac8d2018-12-17 16:24:08 +0100507 if vld_params.get("vim-network-id"):
508 RO_vld_sites = []
509 if isinstance(vld_params["vim-network-id"], dict):
510 for vim_account, vim_net in vld_params["vim-network-id"].items():
511 RO_vld_sites.append({
512 "netmap-use": vim_net,
513 "datacenter": vim_account_2_RO(vim_account)
514 })
515 else: # isinstance str
516 RO_vld_sites.append({"netmap-use": vld_params["vim-network-id"]})
517 if RO_vld_sites:
518 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
Felipe Vicens720b07a2019-01-31 02:32:09 +0100519 if vld_params.get("ns-net"):
520 if isinstance(vld_params["ns-net"], dict):
521 for vld_id, instance_scenario_id in vld_params["ns-net"].items():
522 RO_vld_ns_net = {"instance_scenario_id": instance_scenario_id, "osm_id": vld_id}
Felipe Vicensb0e5fe42019-12-05 10:30:38 +0100523 populate_dict(RO_ns_params, ("networks", vld_params["name"], "use-network"), RO_vld_ns_net)
tierno27246d82018-09-27 15:59:09 +0200524 if "vnfd-connection-point-ref" in vld_params:
525 for cp_params in vld_params["vnfd-connection-point-ref"]:
526 # look for interface
527 for constituent_vnfd in nsd["constituent-vnfd"]:
528 if constituent_vnfd["member-vnf-index"] == cp_params["member-vnf-index-ref"]:
529 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
530 break
531 else:
532 raise LcmException(
533 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={} "
534 "is not present at nsd:constituent-vnfd".format(cp_params["member-vnf-index-ref"]))
535 match_cp = False
536 for vdu_descriptor in vnf_descriptor["vdu"]:
537 for interface_descriptor in vdu_descriptor["interface"]:
538 if interface_descriptor.get("external-connection-point-ref") == \
539 cp_params["vnfd-connection-point-ref"]:
540 match_cp = True
tierno59d22d22018-09-25 18:10:19 +0200541 break
tierno27246d82018-09-27 15:59:09 +0200542 if match_cp:
543 break
544 else:
545 raise LcmException(
546 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={}:"
547 "vnfd-connection-point-ref={} is not present at vnfd={}".format(
548 cp_params["member-vnf-index-ref"],
549 cp_params["vnfd-connection-point-ref"],
550 vnf_descriptor["id"]))
551 if cp_params.get("ip-address"):
552 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
553 vdu_descriptor["id"], "interfaces",
554 interface_descriptor["name"], "ip_address"),
555 cp_params["ip-address"])
556 if cp_params.get("mac-address"):
557 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
558 vdu_descriptor["id"], "interfaces",
559 interface_descriptor["name"], "mac_address"),
560 cp_params["mac-address"])
tierno59d22d22018-09-25 18:10:19 +0200561 return RO_ns_params
562
tierno27246d82018-09-27 15:59:09 +0200563 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None):
564 # make a copy to do not change
565 vdu_create = copy(vdu_create)
566 vdu_delete = copy(vdu_delete)
567
568 vdurs = db_vnfr.get("vdur")
569 if vdurs is None:
570 vdurs = []
571 vdu_index = len(vdurs)
572 while vdu_index:
573 vdu_index -= 1
574 vdur = vdurs[vdu_index]
575 if vdur.get("pdu-type"):
576 continue
577 vdu_id_ref = vdur["vdu-id-ref"]
578 if vdu_create and vdu_create.get(vdu_id_ref):
579 for index in range(0, vdu_create[vdu_id_ref]):
580 vdur = deepcopy(vdur)
581 vdur["_id"] = str(uuid4())
582 vdur["count-index"] += 1
583 vdurs.insert(vdu_index+1+index, vdur)
584 del vdu_create[vdu_id_ref]
585 if vdu_delete and vdu_delete.get(vdu_id_ref):
586 del vdurs[vdu_index]
587 vdu_delete[vdu_id_ref] -= 1
588 if not vdu_delete[vdu_id_ref]:
589 del vdu_delete[vdu_id_ref]
590 # check all operations are done
591 if vdu_create or vdu_delete:
592 raise LcmException("Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
593 vdu_create))
594 if vdu_delete:
595 raise LcmException("Error scaling IN VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
596 vdu_delete))
597
598 vnfr_update = {"vdur": vdurs}
599 db_vnfr["vdur"] = vdurs
600 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
601
tiernof578e552018-11-08 19:07:20 +0100602 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
603 """
604 Updates database nsr with the RO info for the created vld
605 :param ns_update_nsr: dictionary to be filled with the updated info
606 :param db_nsr: content of db_nsr. This is also modified
607 :param nsr_desc_RO: nsr descriptor from RO
608 :return: Nothing, LcmException is raised on errors
609 """
610
611 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
612 for net_RO in get_iterable(nsr_desc_RO, "nets"):
613 if vld["id"] != net_RO.get("ns_net_osm_id"):
614 continue
615 vld["vim-id"] = net_RO.get("vim_net_id")
616 vld["name"] = net_RO.get("vim_name")
617 vld["status"] = net_RO.get("status")
618 vld["status-detailed"] = net_RO.get("error_msg")
619 ns_update_nsr["vld.{}".format(vld_index)] = vld
620 break
621 else:
622 raise LcmException("ns_update_nsr: Not found vld={} at RO info".format(vld["id"]))
623
tiernoe876f672020-02-13 14:34:48 +0000624 def set_vnfr_at_error(self, db_vnfrs, error_text):
625 try:
626 for db_vnfr in db_vnfrs.values():
627 vnfr_update = {"status": "ERROR"}
628 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
629 if "status" not in vdur:
630 vdur["status"] = "ERROR"
631 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
632 if error_text:
633 vdur["status-detailed"] = str(error_text)
634 vnfr_update["vdur.{}.status-detailed".format(vdu_index)] = "ERROR"
635 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
636 except DbException as e:
637 self.logger.error("Cannot update vnf. {}".format(e))
638
tierno59d22d22018-09-25 18:10:19 +0200639 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
640 """
641 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
tierno27246d82018-09-27 15:59:09 +0200642 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
643 :param nsr_desc_RO: nsr descriptor from RO
644 :return: Nothing, LcmException is raised on errors
tierno59d22d22018-09-25 18:10:19 +0200645 """
646 for vnf_index, db_vnfr in db_vnfrs.items():
647 for vnf_RO in nsr_desc_RO["vnfs"]:
tierno27246d82018-09-27 15:59:09 +0200648 if vnf_RO["member_vnf_index"] != vnf_index:
649 continue
650 vnfr_update = {}
tiernof578e552018-11-08 19:07:20 +0100651 if vnf_RO.get("ip_address"):
tierno1674de82019-04-09 13:03:14 +0000652 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"].split(";")[0]
tiernof578e552018-11-08 19:07:20 +0100653 elif not db_vnfr.get("ip-address"):
tierno0ec0c272020-02-19 17:43:01 +0000654 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
655 raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200656
tierno27246d82018-09-27 15:59:09 +0200657 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
658 vdur_RO_count_index = 0
659 if vdur.get("pdu-type"):
660 continue
661 for vdur_RO in get_iterable(vnf_RO, "vms"):
662 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
663 continue
664 if vdur["count-index"] != vdur_RO_count_index:
665 vdur_RO_count_index += 1
666 continue
667 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
tierno1674de82019-04-09 13:03:14 +0000668 if vdur_RO.get("ip_address"):
669 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
tierno274ed572019-04-04 13:33:27 +0000670 else:
671 vdur["ip-address"] = None
tierno27246d82018-09-27 15:59:09 +0200672 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
673 vdur["name"] = vdur_RO.get("vim_name")
674 vdur["status"] = vdur_RO.get("status")
675 vdur["status-detailed"] = vdur_RO.get("error_msg")
676 for ifacer in get_iterable(vdur, "interfaces"):
677 for interface_RO in get_iterable(vdur_RO, "interfaces"):
678 if ifacer["name"] == interface_RO.get("internal_name"):
679 ifacer["ip-address"] = interface_RO.get("ip_address")
680 ifacer["mac-address"] = interface_RO.get("mac_address")
681 break
682 else:
683 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
quilesj7e13aeb2019-10-08 13:34:55 +0200684 "from VIM info"
685 .format(vnf_index, vdur["vdu-id-ref"], ifacer["name"]))
tierno27246d82018-09-27 15:59:09 +0200686 vnfr_update["vdur.{}".format(vdu_index)] = vdur
687 break
688 else:
tierno15b1cf12019-08-29 13:21:40 +0000689 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
690 "VIM info".format(vnf_index, vdur["vdu-id-ref"], vdur["count-index"]))
tiernof578e552018-11-08 19:07:20 +0100691
692 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
693 for net_RO in get_iterable(nsr_desc_RO, "nets"):
694 if vld["id"] != net_RO.get("vnf_net_osm_id"):
695 continue
696 vld["vim-id"] = net_RO.get("vim_net_id")
697 vld["name"] = net_RO.get("vim_name")
698 vld["status"] = net_RO.get("status")
699 vld["status-detailed"] = net_RO.get("error_msg")
700 vnfr_update["vld.{}".format(vld_index)] = vld
701 break
702 else:
tierno15b1cf12019-08-29 13:21:40 +0000703 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
tiernof578e552018-11-08 19:07:20 +0100704 vnf_index, vld["id"]))
705
tierno27246d82018-09-27 15:59:09 +0200706 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
707 break
tierno59d22d22018-09-25 18:10:19 +0200708
709 else:
tierno15b1cf12019-08-29 13:21:40 +0000710 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200711
tierno5ee02052019-12-05 19:55:02 +0000712 def _get_ns_config_info(self, nsr_id):
tiernoc3f2a822019-11-05 13:45:04 +0000713 """
714 Generates a mapping between vnf,vdu elements and the N2VC id
tierno5ee02052019-12-05 19:55:02 +0000715 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
tiernoc3f2a822019-11-05 13:45:04 +0000716 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
717 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
718 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
719 """
tierno5ee02052019-12-05 19:55:02 +0000720 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
721 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernoc3f2a822019-11-05 13:45:04 +0000722 mapping = {}
723 ns_config_info = {"osm-config-mapping": mapping}
724 for vca in vca_deployed_list:
725 if not vca["member-vnf-index"]:
726 continue
727 if not vca["vdu_id"]:
728 mapping[vca["member-vnf-index"]] = vca["application"]
729 else:
730 mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\
731 vca["application"]
732 return ns_config_info
733
734 @staticmethod
735 def _get_initial_config_primitive_list(desc_primitive_list, vca_deployed):
736 """
737 Generates a list of initial-config-primitive based on the list provided by the descriptor. It includes internal
738 primitives as verify-ssh-credentials, or config when needed
739 :param desc_primitive_list: information of the descriptor
740 :param vca_deployed: information of the deployed, needed for known if it is related to an NS, VNF, VDU and if
741 this element contains a ssh public key
742 :return: The modified list. Can ba an empty list, but always a list
743 """
744 if desc_primitive_list:
745 primitive_list = desc_primitive_list.copy()
746 else:
747 primitive_list = []
748 # look for primitive config, and get the position. None if not present
749 config_position = None
750 for index, primitive in enumerate(primitive_list):
751 if primitive["name"] == "config":
752 config_position = index
753 break
754
755 # for NS, add always a config primitive if not present (bug 874)
756 if not vca_deployed["member-vnf-index"] and config_position is None:
757 primitive_list.insert(0, {"name": "config", "parameter": []})
758 config_position = 0
759 # for VNF/VDU add verify-ssh-credentials after config
760 if vca_deployed["member-vnf-index"] and config_position is not None and vca_deployed.get("ssh-public-key"):
761 primitive_list.insert(config_position + 1, {"name": "verify-ssh-credentials", "parameter": []})
762 return primitive_list
763
tierno69f0d382020-05-07 13:08:09 +0000764 async def _instantiate_ng_ro(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
765 n2vc_key_list, stage, start_deploy, timeout_ns_deploy):
766 nslcmop_id = db_nslcmop["_id"]
767 target = {
768 "name": db_nsr["name"],
769 "ns": {"vld": []},
770 "vnf": [],
771 "image": deepcopy(db_nsr["image"]),
772 "flavor": deepcopy(db_nsr["flavor"]),
773 "action_id": nslcmop_id,
774 }
775 for image in target["image"]:
776 image["vim_info"] = []
777 for flavor in target["flavor"]:
778 flavor["vim_info"] = []
779
780 ns_params = db_nslcmop.get("operationParams")
781 ssh_keys = []
782 if ns_params.get("ssh_keys"):
783 ssh_keys += ns_params.get("ssh_keys")
784 if n2vc_key_list:
785 ssh_keys += n2vc_key_list
786
787 cp2target = {}
788 for vld_index, vld in enumerate(nsd.get("vld")):
789 target_vld = {"id": vld["id"],
790 "name": vld["name"],
791 "mgmt-network": vld.get("mgmt-network", False),
792 "type": vld.get("type"),
793 "vim_info": [{"vim-network-name": vld.get("vim-network-name"),
794 "vim_account_id": ns_params["vimAccountId"]}],
795 }
796 for cp in vld["vnfd-connection-point-ref"]:
797 cp2target["member_vnf:{}.{}".format(cp["member-vnf-index-ref"], cp["vnfd-connection-point-ref"])] = \
798 "nsrs:{}:vld.{}".format(nsr_id, vld_index)
799 target["ns"]["vld"].append(target_vld)
800 for vnfr in db_vnfrs.values():
801 vnfd = db_vnfds_ref[vnfr["vnfd-ref"]]
802 target_vnf = deepcopy(vnfr)
803 for vld in target_vnf.get("vld", ()):
804 # check if connected to a ns.vld
805 vnf_cp = next((cp for cp in vnfd.get("connection-point", ()) if
806 cp.get("internal-vld-ref") == vld["id"]), None)
807 if vnf_cp:
808 ns_cp = "member_vnf:{}.{}".format(vnfr["member-vnf-index-ref"], vnf_cp["id"])
809 if cp2target.get(ns_cp):
810 vld["target"] = cp2target[ns_cp]
811 vld["vim_info"] = [{"vim-network-name": vld.get("vim-network-name"),
812 "vim_account_id": vnfr["vim-account-id"]}]
813
814 for vdur in target_vnf.get("vdur", ()):
815 vdur["vim_info"] = [{"vim_account_id": vnfr["vim-account-id"]}]
816 vdud_index, vdud = next(k for k in enumerate(vnfd["vdu"]) if k[1]["id"] == vdur["vdu-id-ref"])
817 # vdur["additionalParams"] = vnfr.get("additionalParamsForVnf") # TODO additional params for VDU
818
819 if ssh_keys:
820 if deep_get(vdud, ("vdu-configuration", "config-access", "ssh-access", "required")):
821 vdur["ssh-keys"] = ssh_keys
822 vdur["ssh-access-required"] = True
823 elif deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required")) and \
824 any(iface.get("mgmt-vnf") for iface in vdur["interfaces"]):
825 vdur["ssh-keys"] = ssh_keys
826 vdur["ssh-access-required"] = True
827
828 # cloud-init
829 if vdud.get("cloud-init-file"):
830 vdur["cloud-init"] = "{}:file:{}".format(vnfd["_id"], vdud.get("cloud-init-file"))
831 elif vdud.get("cloud-init"):
832 vdur["cloud-init"] = "{}:vdu:{}".format(vnfd["_id"], vdud_index)
833
834 # flavor
835 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
836 if not next((vi for vi in ns_flavor["vim_info"] if
837 vi and vi.get("vim_account_id") == vnfr["vim-account-id"]), None):
838 ns_flavor["vim_info"].append({"vim_account_id": vnfr["vim-account-id"]})
839 # image
840 ns_image = target["image"][int(vdur["ns-image-id"])]
841 if not next((vi for vi in ns_image["vim_info"] if
842 vi and vi.get("vim_account_id") == vnfr["vim-account-id"]), None):
843 ns_image["vim_info"].append({"vim_account_id": vnfr["vim-account-id"]})
844
845 vdur["vim_info"] = [{"vim_account_id": vnfr["vim-account-id"]}]
846 target["vnf"].append(target_vnf)
847
848 desc = await self.RO.deploy(nsr_id, target)
849 action_id = desc["action_id"]
850 await self._wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage)
851
852 # Updating NSR
853 db_nsr_update = {
854 "_admin.deployed.RO.operational-status": "running",
855 "detailed-status": " ".join(stage)
856 }
857 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
858 self.update_db_2("nsrs", nsr_id, db_nsr_update)
859 self._write_op_status(nslcmop_id, stage)
860 self.logger.debug(logging_text + "ns deployed at RO. RO_id={}".format(action_id))
861 return
862
863 async def _wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_time, timeout, stage):
864 detailed_status_old = None
865 db_nsr_update = {}
866 while time() <= start_time + timeout:
867 desc_status = await self.RO.status(nsr_id, action_id)
868 if desc_status["status"] == "FAILED":
869 raise NgRoException(desc_status["details"])
870 elif desc_status["status"] == "BUILD":
871 stage[2] = "VIM: ({})".format(desc_status["details"])
872 elif desc_status["status"] == "DONE":
873 stage[2] = "Deployed at VIM"
874 break
875 else:
876 assert False, "ROclient.check_ns_status returns unknown {}".format(desc_status["status"])
877 if stage[2] != detailed_status_old:
878 detailed_status_old = stage[2]
879 db_nsr_update["detailed-status"] = " ".join(stage)
880 self.update_db_2("nsrs", nsr_id, db_nsr_update)
881 self._write_op_status(nslcmop_id, stage)
882 await asyncio.sleep(5, loop=self.loop)
883 else: # timeout_ns_deploy
884 raise NgRoException("Timeout waiting ns to deploy")
885
886 async def _terminate_ng_ro(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
887 db_nsr_update = {}
888 failed_detail = []
889 action_id = None
890 start_deploy = time()
891 try:
892 target = {
893 "ns": {"vld": []},
894 "vnf": [],
895 "image": [],
896 "flavor": [],
897 }
898 desc = await self.RO.deploy(nsr_id, target)
899 action_id = desc["action_id"]
900 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
901 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
902 self.logger.debug(logging_text + "ns terminate action at RO. action_id={}".format(action_id))
903
904 # wait until done
905 delete_timeout = 20 * 60 # 20 minutes
906 await self._wait_ng_ro(self, nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage)
907
908 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
909 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
910 # delete all nsr
911 await self.RO.delete(nsr_id)
912 except Exception as e:
913 if isinstance(e, NgRoException) and e.http_code == 404: # not found
914 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
915 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
916 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
917 self.logger.debug(logging_text + "RO_action_id={} already deleted".format(action_id))
918 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
919 failed_detail.append("delete conflict: {}".format(e))
920 self.logger.debug(logging_text + "RO_action_id={} delete conflict: {}".format(action_id, e))
921 else:
922 failed_detail.append("delete error: {}".format(e))
923 self.logger.error(logging_text + "RO_action_id={} delete error: {}".format(action_id, e))
924
925 if failed_detail:
926 stage[2] = "Error deleting from VIM"
927 else:
928 stage[2] = "Deleted from VIM"
929 db_nsr_update["detailed-status"] = " ".join(stage)
930 self.update_db_2("nsrs", nsr_id, db_nsr_update)
931 self._write_op_status(nslcmop_id, stage)
932
933 if failed_detail:
934 raise LcmException("; ".join(failed_detail))
935 return
936
tiernoe876f672020-02-13 14:34:48 +0000937 async def instantiate_RO(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
938 n2vc_key_list, stage):
tiernoe95ed362020-04-23 08:24:57 +0000939 """
940 Instantiate at RO
941 :param logging_text: preffix text to use at logging
942 :param nsr_id: nsr identity
943 :param nsd: database content of ns descriptor
944 :param db_nsr: database content of ns record
945 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
946 :param db_vnfrs:
947 :param db_vnfds_ref: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
948 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
949 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
950 :return: None or exception
951 """
tiernoe876f672020-02-13 14:34:48 +0000952 try:
953 db_nsr_update = {}
954 RO_descriptor_number = 0 # number of descriptors created at RO
955 vnf_index_2_RO_id = {} # map between vnfd/nsd id to the id used at RO
956 nslcmop_id = db_nslcmop["_id"]
957 start_deploy = time()
958 ns_params = db_nslcmop.get("operationParams")
959 if ns_params and ns_params.get("timeout_ns_deploy"):
960 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
961 else:
962 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +0200963
tiernoe876f672020-02-13 14:34:48 +0000964 # Check for and optionally request placement optimization. Database will be updated if placement activated
965 stage[2] = "Waiting for Placement."
tierno8790a3d2020-04-23 22:49:52 +0000966 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
967 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
968 for vnfr in db_vnfrs.values():
969 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
970 break
971 else:
972 ns_params["vimAccountId"] == vnfr["vim-account-id"]
quilesj7e13aeb2019-10-08 13:34:55 +0200973
tierno69f0d382020-05-07 13:08:09 +0000974 if self.ng_ro:
975 return await self._instantiate_ng_ro(logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs,
976 db_vnfds_ref, n2vc_key_list, stage, start_deploy,
977 timeout_ns_deploy)
tiernoe876f672020-02-13 14:34:48 +0000978 # deploy RO
tiernoe876f672020-02-13 14:34:48 +0000979 # get vnfds, instantiate at RO
980 for c_vnf in nsd.get("constituent-vnfd", ()):
981 member_vnf_index = c_vnf["member-vnf-index"]
982 vnfd = db_vnfds_ref[c_vnf['vnfd-id-ref']]
983 vnfd_ref = vnfd["id"]
quilesj7e13aeb2019-10-08 13:34:55 +0200984
tiernoe876f672020-02-13 14:34:48 +0000985 stage[2] = "Creating vnfd='{}' member_vnf_index='{}' at RO".format(vnfd_ref, member_vnf_index)
986 db_nsr_update["detailed-status"] = " ".join(stage)
987 self.update_db_2("nsrs", nsr_id, db_nsr_update)
988 self._write_op_status(nslcmop_id, stage)
calvinosanch9f9c6f22019-11-04 13:37:39 +0100989
tiernoe876f672020-02-13 14:34:48 +0000990 # self.logger.debug(logging_text + stage[2])
991 vnfd_id_RO = "{}.{}.{}".format(nsr_id, RO_descriptor_number, member_vnf_index[:23])
992 vnf_index_2_RO_id[member_vnf_index] = vnfd_id_RO
993 RO_descriptor_number += 1
994
995 # look position at deployed.RO.vnfd if not present it will be appended at the end
996 for index, vnf_deployed in enumerate(db_nsr["_admin"]["deployed"]["RO"]["vnfd"]):
997 if vnf_deployed["member-vnf-index"] == member_vnf_index:
998 break
999 else:
1000 index = len(db_nsr["_admin"]["deployed"]["RO"]["vnfd"])
1001 db_nsr["_admin"]["deployed"]["RO"]["vnfd"].append(None)
1002
1003 # look if present
1004 RO_update = {"member-vnf-index": member_vnf_index}
1005 vnfd_list = await self.RO.get_list("vnfd", filter_by={"osm_id": vnfd_id_RO})
1006 if vnfd_list:
1007 RO_update["id"] = vnfd_list[0]["uuid"]
1008 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' exists at RO. Using RO_id={}".
1009 format(vnfd_ref, member_vnf_index, vnfd_list[0]["uuid"]))
1010 else:
1011 vnfd_RO = self.vnfd2RO(vnfd, vnfd_id_RO, db_vnfrs[c_vnf["member-vnf-index"]].
1012 get("additionalParamsForVnf"), nsr_id)
1013 desc = await self.RO.create("vnfd", descriptor=vnfd_RO)
1014 RO_update["id"] = desc["uuid"]
1015 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' created at RO. RO_id={}".format(
1016 vnfd_ref, member_vnf_index, desc["uuid"]))
1017 db_nsr_update["_admin.deployed.RO.vnfd.{}".format(index)] = RO_update
1018 db_nsr["_admin"]["deployed"]["RO"]["vnfd"][index] = RO_update
1019
1020 # create nsd at RO
1021 nsd_ref = nsd["id"]
1022
1023 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1024 db_nsr_update["detailed-status"] = " ".join(stage)
1025 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1026 self._write_op_status(nslcmop_id, stage)
1027
1028 # self.logger.debug(logging_text + stage[2])
1029 RO_osm_nsd_id = "{}.{}.{}".format(nsr_id, RO_descriptor_number, nsd_ref[:23])
tiernod8323042019-08-09 11:32:23 +00001030 RO_descriptor_number += 1
tiernoe876f672020-02-13 14:34:48 +00001031 nsd_list = await self.RO.get_list("nsd", filter_by={"osm_id": RO_osm_nsd_id})
1032 if nsd_list:
1033 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = nsd_list[0]["uuid"]
1034 self.logger.debug(logging_text + "nsd={} exists at RO. Using RO_id={}".format(
1035 nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +00001036 else:
tiernoe876f672020-02-13 14:34:48 +00001037 nsd_RO = deepcopy(nsd)
1038 nsd_RO["id"] = RO_osm_nsd_id
1039 nsd_RO.pop("_id", None)
1040 nsd_RO.pop("_admin", None)
1041 for c_vnf in nsd_RO.get("constituent-vnfd", ()):
1042 member_vnf_index = c_vnf["member-vnf-index"]
1043 c_vnf["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
1044 for c_vld in nsd_RO.get("vld", ()):
1045 for cp in c_vld.get("vnfd-connection-point-ref", ()):
1046 member_vnf_index = cp["member-vnf-index-ref"]
1047 cp["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
tiernod8323042019-08-09 11:32:23 +00001048
tiernoe876f672020-02-13 14:34:48 +00001049 desc = await self.RO.create("nsd", descriptor=nsd_RO)
1050 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1051 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = desc["uuid"]
1052 self.logger.debug(logging_text + "nsd={} created at RO. RO_id={}".format(nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +00001053 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1054
tiernoe876f672020-02-13 14:34:48 +00001055 # Crate ns at RO
1056 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1057 db_nsr_update["detailed-status"] = " ".join(stage)
1058 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1059 self._write_op_status(nslcmop_id, stage)
tiernod8323042019-08-09 11:32:23 +00001060
tiernoe876f672020-02-13 14:34:48 +00001061 # if present use it unless in error status
1062 RO_nsr_id = deep_get(db_nsr, ("_admin", "deployed", "RO", "nsr_id"))
1063 if RO_nsr_id:
1064 try:
1065 stage[2] = "Looking for existing ns at RO"
1066 db_nsr_update["detailed-status"] = " ".join(stage)
1067 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1068 self._write_op_status(nslcmop_id, stage)
1069 # self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1070 desc = await self.RO.show("ns", RO_nsr_id)
tiernod8323042019-08-09 11:32:23 +00001071
tiernoe876f672020-02-13 14:34:48 +00001072 except ROclient.ROClientException as e:
1073 if e.http_code != HTTPStatus.NOT_FOUND:
1074 raise
1075 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1076 if RO_nsr_id:
1077 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1078 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1079 if ns_status == "ERROR":
1080 stage[2] = "Deleting ns at RO. RO_ns_id={}".format(RO_nsr_id)
1081 self.logger.debug(logging_text + stage[2])
1082 await self.RO.delete("ns", RO_nsr_id)
1083 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1084 if not RO_nsr_id:
1085 stage[2] = "Checking dependencies"
1086 db_nsr_update["detailed-status"] = " ".join(stage)
1087 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1088 self._write_op_status(nslcmop_id, stage)
1089 # self.logger.debug(logging_text + stage[2])
tiernod8323042019-08-09 11:32:23 +00001090
tiernoe876f672020-02-13 14:34:48 +00001091 # check if VIM is creating and wait look if previous tasks in process
1092 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account", ns_params["vimAccountId"])
1093 if task_dependency:
1094 stage[2] = "Waiting for related tasks '{}' to be completed".format(task_name)
1095 self.logger.debug(logging_text + stage[2])
1096 await asyncio.wait(task_dependency, timeout=3600)
1097 if ns_params.get("vnf"):
1098 for vnf in ns_params["vnf"]:
1099 if "vimAccountId" in vnf:
1100 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account",
1101 vnf["vimAccountId"])
1102 if task_dependency:
1103 stage[2] = "Waiting for related tasks '{}' to be completed.".format(task_name)
1104 self.logger.debug(logging_text + stage[2])
1105 await asyncio.wait(task_dependency, timeout=3600)
1106
1107 stage[2] = "Checking instantiation parameters."
tiernoe95ed362020-04-23 08:24:57 +00001108 RO_ns_params = self._ns_params_2_RO(ns_params, nsd, db_vnfds_ref, db_vnfrs, n2vc_key_list)
tiernoe876f672020-02-13 14:34:48 +00001109 stage[2] = "Deploying ns at VIM."
1110 db_nsr_update["detailed-status"] = " ".join(stage)
1111 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1112 self._write_op_status(nslcmop_id, stage)
1113
1114 desc = await self.RO.create("ns", descriptor=RO_ns_params, name=db_nsr["name"], scenario=RO_nsd_uuid)
1115 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = desc["uuid"]
1116 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1117 db_nsr_update["_admin.deployed.RO.nsr_status"] = "BUILD"
1118 self.logger.debug(logging_text + "ns created at RO. RO_id={}".format(desc["uuid"]))
1119
1120 # wait until NS is ready
1121 stage[2] = "Waiting VIM to deploy ns."
1122 db_nsr_update["detailed-status"] = " ".join(stage)
1123 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1124 self._write_op_status(nslcmop_id, stage)
1125 detailed_status_old = None
1126 self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1127
1128 old_desc = None
1129 while time() <= start_deploy + timeout_ns_deploy:
tiernod8323042019-08-09 11:32:23 +00001130 desc = await self.RO.show("ns", RO_nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001131
tiernoe876f672020-02-13 14:34:48 +00001132 # deploymentStatus
1133 if desc != old_desc:
1134 # desc has changed => update db
1135 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
1136 old_desc = desc
tiernod8323042019-08-09 11:32:23 +00001137
tiernoe876f672020-02-13 14:34:48 +00001138 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1139 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1140 if ns_status == "ERROR":
1141 raise ROclient.ROClientException(ns_status_info)
1142 elif ns_status == "BUILD":
1143 stage[2] = "VIM: ({})".format(ns_status_info)
1144 elif ns_status == "ACTIVE":
1145 stage[2] = "Waiting for management IP address reported by the VIM. Updating VNFRs."
1146 try:
1147 self.ns_update_vnfr(db_vnfrs, desc)
1148 break
1149 except LcmExceptionNoMgmtIP:
1150 pass
1151 else:
1152 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
1153 if stage[2] != detailed_status_old:
1154 detailed_status_old = stage[2]
1155 db_nsr_update["detailed-status"] = " ".join(stage)
1156 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1157 self._write_op_status(nslcmop_id, stage)
1158 await asyncio.sleep(5, loop=self.loop)
1159 else: # timeout_ns_deploy
1160 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tiernod8323042019-08-09 11:32:23 +00001161
tiernoe876f672020-02-13 14:34:48 +00001162 # Updating NSR
1163 self.ns_update_nsr(db_nsr_update, db_nsr, desc)
tiernod8323042019-08-09 11:32:23 +00001164
tiernoe876f672020-02-13 14:34:48 +00001165 db_nsr_update["_admin.deployed.RO.operational-status"] = "running"
1166 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1167 stage[2] = "Deployed at VIM"
1168 db_nsr_update["detailed-status"] = " ".join(stage)
1169 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1170 self._write_op_status(nslcmop_id, stage)
1171 # await self._on_update_n2vc_db("nsrs", {"_id": nsr_id}, "_admin.deployed", db_nsr_update)
1172 # self.logger.debug(logging_text + "Deployed at VIM")
tierno69f0d382020-05-07 13:08:09 +00001173 except (ROclient.ROClientException, LcmException, DbException, NgRoException) as e:
tierno067e04a2020-03-31 12:53:13 +00001174 stage[2] = "ERROR deploying at VIM"
tiernoe876f672020-02-13 14:34:48 +00001175 self.set_vnfr_at_error(db_vnfrs, str(e))
1176 raise
quilesj7e13aeb2019-10-08 13:34:55 +02001177
tiernoa5088192019-11-26 16:12:53 +00001178 async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None):
1179 """
1180 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1181 :param logging_text: prefix use for logging
1182 :param nsr_id:
1183 :param vnfr_id:
1184 :param vdu_id:
1185 :param vdu_index:
1186 :param pub_key: public ssh key to inject, None to skip
1187 :param user: user to apply the public ssh key
1188 :return: IP address
1189 """
quilesj7e13aeb2019-10-08 13:34:55 +02001190
tiernoa5088192019-11-26 16:12:53 +00001191 # self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
tiernod8323042019-08-09 11:32:23 +00001192 ro_nsr_id = None
1193 ip_address = None
1194 nb_tries = 0
1195 target_vdu_id = None
quilesj3149f262019-12-03 10:58:10 +00001196 ro_retries = 0
quilesj7e13aeb2019-10-08 13:34:55 +02001197
tiernod8323042019-08-09 11:32:23 +00001198 while True:
quilesj7e13aeb2019-10-08 13:34:55 +02001199
quilesj3149f262019-12-03 10:58:10 +00001200 ro_retries += 1
1201 if ro_retries >= 360: # 1 hour
1202 raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id))
1203
tiernod8323042019-08-09 11:32:23 +00001204 await asyncio.sleep(10, loop=self.loop)
quilesj7e13aeb2019-10-08 13:34:55 +02001205
1206 # get ip address
tiernod8323042019-08-09 11:32:23 +00001207 if not target_vdu_id:
1208 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
quilesj3149f262019-12-03 10:58:10 +00001209
1210 if not vdu_id: # for the VNF case
tiernoe876f672020-02-13 14:34:48 +00001211 if db_vnfr.get("status") == "ERROR":
1212 raise LcmException("Cannot inject ssh-key because target VNF is in error state")
tiernod8323042019-08-09 11:32:23 +00001213 ip_address = db_vnfr.get("ip-address")
1214 if not ip_address:
1215 continue
quilesj3149f262019-12-03 10:58:10 +00001216 vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None)
1217 else: # VDU case
1218 vdur = next((x for x in get_iterable(db_vnfr, "vdur")
1219 if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None)
1220
tierno0e8c3f02020-03-12 17:18:21 +00001221 if not vdur and len(db_vnfr.get("vdur", ())) == 1: # If only one, this should be the target vdu
1222 vdur = db_vnfr["vdur"][0]
quilesj3149f262019-12-03 10:58:10 +00001223 if not vdur:
tierno0e8c3f02020-03-12 17:18:21 +00001224 raise LcmException("Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(vnfr_id, vdu_id,
1225 vdu_index))
quilesj7e13aeb2019-10-08 13:34:55 +02001226
tierno0e8c3f02020-03-12 17:18:21 +00001227 if vdur.get("pdu-type") or vdur.get("status") == "ACTIVE":
quilesj3149f262019-12-03 10:58:10 +00001228 ip_address = vdur.get("ip-address")
1229 if not ip_address:
1230 continue
1231 target_vdu_id = vdur["vdu-id-ref"]
1232 elif vdur.get("status") == "ERROR":
1233 raise LcmException("Cannot inject ssh-key because target VM is in error state")
1234
tiernod8323042019-08-09 11:32:23 +00001235 if not target_vdu_id:
1236 continue
tiernod8323042019-08-09 11:32:23 +00001237
quilesj7e13aeb2019-10-08 13:34:55 +02001238 # inject public key into machine
1239 if pub_key and user:
tiernoe876f672020-02-13 14:34:48 +00001240 # wait until NS is deployed at RO
1241 if not ro_nsr_id:
1242 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1243 ro_nsr_id = deep_get(db_nsrs, ("_admin", "deployed", "RO", "nsr_id"))
1244 if not ro_nsr_id:
1245 continue
1246
tiernoa5088192019-11-26 16:12:53 +00001247 # self.logger.debug(logging_text + "Inserting RO key")
tierno0e8c3f02020-03-12 17:18:21 +00001248 if vdur.get("pdu-type"):
1249 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1250 return ip_address
quilesj7e13aeb2019-10-08 13:34:55 +02001251 try:
1252 ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index
tierno69f0d382020-05-07 13:08:09 +00001253 if self.ng_ro:
1254 target = {"action": "inject_ssh_key", "key": pub_key, "user": user,
1255 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdu_id}]}],
1256 }
1257 await self.RO.deploy(nsr_id, target)
1258 else:
1259 result_dict = await self.RO.create_action(
1260 item="ns",
1261 item_id_name=ro_nsr_id,
1262 descriptor={"add_public_key": pub_key, "vms": [ro_vm_id], "user": user}
1263 )
1264 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1265 if not result_dict or not isinstance(result_dict, dict):
1266 raise LcmException("Unknown response from RO when injecting key")
1267 for result in result_dict.values():
1268 if result.get("vim_result") == 200:
1269 break
1270 else:
1271 raise ROclient.ROClientException("error injecting key: {}".format(
1272 result.get("description")))
1273 break
1274 except NgRoException as e:
1275 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001276 except ROclient.ROClientException as e:
tiernoa5088192019-11-26 16:12:53 +00001277 if not nb_tries:
1278 self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds".
1279 format(e, 20*10))
quilesj7e13aeb2019-10-08 13:34:55 +02001280 nb_tries += 1
tiernoa5088192019-11-26 16:12:53 +00001281 if nb_tries >= 20:
quilesj7e13aeb2019-10-08 13:34:55 +02001282 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001283 else:
quilesj7e13aeb2019-10-08 13:34:55 +02001284 break
1285
1286 return ip_address
1287
tierno5ee02052019-12-05 19:55:02 +00001288 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1289 """
1290 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1291 """
1292 my_vca = vca_deployed_list[vca_index]
1293 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
quilesj3655ae02019-12-12 16:08:35 +00001294 # vdu or kdu: no dependencies
tierno5ee02052019-12-05 19:55:02 +00001295 return
1296 timeout = 300
1297 while timeout >= 0:
quilesj3655ae02019-12-12 16:08:35 +00001298 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1299 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1300 configuration_status_list = db_nsr["configurationStatus"]
1301 for index, vca_deployed in enumerate(configuration_status_list):
tierno5ee02052019-12-05 19:55:02 +00001302 if index == vca_index:
quilesj3655ae02019-12-12 16:08:35 +00001303 # myself
tierno5ee02052019-12-05 19:55:02 +00001304 continue
1305 if not my_vca.get("member-vnf-index") or \
1306 (vca_deployed.get("member-vnf-index") == my_vca.get("member-vnf-index")):
quilesj3655ae02019-12-12 16:08:35 +00001307 internal_status = configuration_status_list[index].get("status")
1308 if internal_status == 'READY':
1309 continue
1310 elif internal_status == 'BROKEN':
tierno5ee02052019-12-05 19:55:02 +00001311 raise LcmException("Configuration aborted because dependent charm/s has failed")
quilesj3655ae02019-12-12 16:08:35 +00001312 else:
1313 break
tierno5ee02052019-12-05 19:55:02 +00001314 else:
quilesj3655ae02019-12-12 16:08:35 +00001315 # no dependencies, return
tierno5ee02052019-12-05 19:55:02 +00001316 return
1317 await asyncio.sleep(10)
1318 timeout -= 1
tierno5ee02052019-12-05 19:55:02 +00001319
1320 raise LcmException("Configuration aborted because dependent charm/s timeout")
1321
tiernoe876f672020-02-13 14:34:48 +00001322 async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index,
1323 config_descriptor, deploy_params, base_folder, nslcmop_id, stage):
tiernod8323042019-08-09 11:32:23 +00001324 nsr_id = db_nsr["_id"]
1325 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
tiernoda6fb102019-11-23 00:36:52 +00001326 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernod8323042019-08-09 11:32:23 +00001327 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
quilesj7e13aeb2019-10-08 13:34:55 +02001328 db_dict = {
1329 'collection': 'nsrs',
1330 'filter': {'_id': nsr_id},
1331 'path': db_update_entry
1332 }
tiernod8323042019-08-09 11:32:23 +00001333 step = ""
1334 try:
quilesj3655ae02019-12-12 16:08:35 +00001335
1336 element_type = 'NS'
1337 element_under_configuration = nsr_id
1338
tiernod8323042019-08-09 11:32:23 +00001339 vnfr_id = None
1340 if db_vnfr:
1341 vnfr_id = db_vnfr["_id"]
1342
1343 namespace = "{nsi}.{ns}".format(
1344 nsi=nsi_id if nsi_id else "",
1345 ns=nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001346
tiernod8323042019-08-09 11:32:23 +00001347 if vnfr_id:
quilesj3655ae02019-12-12 16:08:35 +00001348 element_type = 'VNF'
1349 element_under_configuration = vnfr_id
quilesjb8a35dd2020-01-09 15:10:14 +00001350 namespace += ".{}".format(vnfr_id)
tiernod8323042019-08-09 11:32:23 +00001351 if vdu_id:
1352 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
quilesj3655ae02019-12-12 16:08:35 +00001353 element_type = 'VDU'
quilesjb8a35dd2020-01-09 15:10:14 +00001354 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
tierno51183952020-04-03 15:48:18 +00001355 elif kdu_name:
1356 namespace += ".{}".format(kdu_name)
1357 element_type = 'KDU'
1358 element_under_configuration = kdu_name
tiernod8323042019-08-09 11:32:23 +00001359
1360 # Get artifact path
David Garcia485b2912019-12-04 14:01:50 +01001361 artifact_path = "{}/{}/charms/{}".format(
tiernod8323042019-08-09 11:32:23 +00001362 base_folder["folder"],
1363 base_folder["pkg-dir"],
1364 config_descriptor["juju"]["charm"]
1365 )
1366
quilesj7e13aeb2019-10-08 13:34:55 +02001367 is_proxy_charm = deep_get(config_descriptor, ('juju', 'charm')) is not None
1368 if deep_get(config_descriptor, ('juju', 'proxy')) is False:
tiernod8323042019-08-09 11:32:23 +00001369 is_proxy_charm = False
1370
1371 # n2vc_redesign STEP 3.1
quilesj7e13aeb2019-10-08 13:34:55 +02001372
1373 # find old ee_id if exists
tiernod8323042019-08-09 11:32:23 +00001374 ee_id = vca_deployed.get("ee_id")
tiernod8323042019-08-09 11:32:23 +00001375
quilesj7e13aeb2019-10-08 13:34:55 +02001376 # create or register execution environment in VCA
1377 if is_proxy_charm:
quilesj3655ae02019-12-12 16:08:35 +00001378
tiernoc231a872020-01-21 08:49:05 +00001379 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001380 nsr_id=nsr_id,
1381 vca_index=vca_index,
1382 status='CREATING',
1383 element_under_configuration=element_under_configuration,
1384 element_type=element_type
1385 )
1386
quilesj7e13aeb2019-10-08 13:34:55 +02001387 step = "create execution environment"
1388 self.logger.debug(logging_text + step)
tierno3bedc9b2019-11-27 15:46:57 +00001389 ee_id, credentials = await self.n2vc.create_execution_environment(namespace=namespace,
1390 reuse_ee_id=ee_id,
1391 db_dict=db_dict)
quilesj3655ae02019-12-12 16:08:35 +00001392
quilesj7e13aeb2019-10-08 13:34:55 +02001393 else:
tierno3bedc9b2019-11-27 15:46:57 +00001394 step = "Waiting to VM being up and getting IP address"
1395 self.logger.debug(logging_text + step)
1396 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1397 user=None, pub_key=None)
1398 credentials = {"hostname": rw_mgmt_ip}
quilesj7e13aeb2019-10-08 13:34:55 +02001399 # get username
tierno3bedc9b2019-11-27 15:46:57 +00001400 username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
quilesj7e13aeb2019-10-08 13:34:55 +02001401 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1402 # merged. Meanwhile let's get username from initial-config-primitive
tierno3bedc9b2019-11-27 15:46:57 +00001403 if not username and config_descriptor.get("initial-config-primitive"):
1404 for config_primitive in config_descriptor["initial-config-primitive"]:
1405 for param in config_primitive.get("parameter", ()):
1406 if param["name"] == "ssh-username":
1407 username = param["value"]
1408 break
1409 if not username:
1410 raise LcmException("Cannot determine the username neither with 'initial-config-promitive' nor with "
1411 "'config-access.ssh-access.default-user'")
1412 credentials["username"] = username
quilesj7e13aeb2019-10-08 13:34:55 +02001413 # n2vc_redesign STEP 3.2
tierno3bedc9b2019-11-27 15:46:57 +00001414
tiernoc231a872020-01-21 08:49:05 +00001415 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001416 nsr_id=nsr_id,
1417 vca_index=vca_index,
1418 status='REGISTERING',
1419 element_under_configuration=element_under_configuration,
1420 element_type=element_type
1421 )
1422
tierno3bedc9b2019-11-27 15:46:57 +00001423 step = "register execution environment {}".format(credentials)
quilesj7e13aeb2019-10-08 13:34:55 +02001424 self.logger.debug(logging_text + step)
tierno3bedc9b2019-11-27 15:46:57 +00001425 ee_id = await self.n2vc.register_execution_environment(credentials=credentials, namespace=namespace,
1426 db_dict=db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02001427
1428 # for compatibility with MON/POL modules, the need model and application name at database
1429 # TODO ask to N2VC instead of assuming the format "model_name.application_name"
1430 ee_id_parts = ee_id.split('.')
1431 model_name = ee_id_parts[0]
1432 application_name = ee_id_parts[1]
tierno51183952020-04-03 15:48:18 +00001433 db_nsr_update = {db_update_entry + "model": model_name,
1434 db_update_entry + "application": application_name,
1435 db_update_entry + "ee_id": ee_id}
tiernod8323042019-08-09 11:32:23 +00001436
1437 # n2vc_redesign STEP 3.3
tierno3bedc9b2019-11-27 15:46:57 +00001438
tiernod8323042019-08-09 11:32:23 +00001439 step = "Install configuration Software"
quilesj3655ae02019-12-12 16:08:35 +00001440
tiernoc231a872020-01-21 08:49:05 +00001441 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001442 nsr_id=nsr_id,
1443 vca_index=vca_index,
1444 status='INSTALLING SW',
1445 element_under_configuration=element_under_configuration,
tierno51183952020-04-03 15:48:18 +00001446 element_type=element_type,
1447 other_update=db_nsr_update
quilesj3655ae02019-12-12 16:08:35 +00001448 )
1449
tierno3bedc9b2019-11-27 15:46:57 +00001450 # TODO check if already done
quilesj7e13aeb2019-10-08 13:34:55 +02001451 self.logger.debug(logging_text + step)
David Garcia18a63322020-04-01 16:14:59 +02001452 config = None
1453 if not is_proxy_charm:
1454 initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
1455 if initial_config_primitive_list:
1456 for primitive in initial_config_primitive_list:
1457 if primitive["name"] == "config":
1458 config = self._map_primitive_params(
1459 primitive,
1460 {},
1461 deploy_params
1462 )
1463 break
David Garcia06a11f22020-03-25 18:21:37 +01001464 num_units = 1
1465 if is_proxy_charm:
1466 if element_type == "NS":
1467 num_units = db_nsr.get("config-units") or 1
1468 elif element_type == "VNF":
1469 num_units = db_vnfr.get("config-units") or 1
1470 elif element_type == "VDU":
1471 for v in db_vnfr["vdur"]:
1472 if vdu_id == v["vdu-id-ref"]:
1473 num_units = v.get("config-units") or 1
1474 break
1475
David Garcia18a63322020-04-01 16:14:59 +02001476 await self.n2vc.install_configuration_sw(
1477 ee_id=ee_id,
1478 artifact_path=artifact_path,
1479 db_dict=db_dict,
David Garcia06a11f22020-03-25 18:21:37 +01001480 config=config,
1481 num_units=num_units
David Garcia18a63322020-04-01 16:14:59 +02001482 )
quilesj7e13aeb2019-10-08 13:34:55 +02001483
quilesj63f90042020-01-17 09:53:55 +00001484 # write in db flag of configuration_sw already installed
1485 self.update_db_2("nsrs", nsr_id, {db_update_entry + "config_sw_installed": True})
1486
1487 # add relations for this VCA (wait for other peers related with this VCA)
1488 await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id, vca_index=vca_index)
1489
quilesj7e13aeb2019-10-08 13:34:55 +02001490 # if SSH access is required, then get execution environment SSH public
tierno3bedc9b2019-11-27 15:46:57 +00001491 if is_proxy_charm: # if native charm we have waited already to VM be UP
1492 pub_key = None
1493 user = None
1494 if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
1495 # Needed to inject a ssh key
1496 user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1497 step = "Install configuration Software, getting public ssh key"
1498 pub_key = await self.n2vc.get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02001499
tiernoacc90452019-12-10 11:06:54 +00001500 step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
tierno3bedc9b2019-11-27 15:46:57 +00001501 else:
1502 step = "Waiting to VM being up and getting IP address"
1503 self.logger.debug(logging_text + step)
quilesj7e13aeb2019-10-08 13:34:55 +02001504
tierno3bedc9b2019-11-27 15:46:57 +00001505 # n2vc_redesign STEP 5.1
1506 # wait for RO (ip-address) Insert pub_key into VM
tierno5ee02052019-12-05 19:55:02 +00001507 if vnfr_id:
1508 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1509 user=user, pub_key=pub_key)
1510 else:
1511 rw_mgmt_ip = None # This is for a NS configuration
tierno3bedc9b2019-11-27 15:46:57 +00001512
1513 self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
quilesj7e13aeb2019-10-08 13:34:55 +02001514
tiernoa5088192019-11-26 16:12:53 +00001515 # store rw_mgmt_ip in deploy params for later replacement
quilesj7e13aeb2019-10-08 13:34:55 +02001516 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
tiernod8323042019-08-09 11:32:23 +00001517
1518 # n2vc_redesign STEP 6 Execute initial config primitive
quilesj7e13aeb2019-10-08 13:34:55 +02001519 step = 'execute initial config primitive'
tiernoa5088192019-11-26 16:12:53 +00001520 initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
quilesj7e13aeb2019-10-08 13:34:55 +02001521
1522 # sort initial config primitives by 'seq'
quilesj63f90042020-01-17 09:53:55 +00001523 if initial_config_primitive_list:
1524 try:
1525 initial_config_primitive_list.sort(key=lambda val: int(val['seq']))
1526 except Exception as e:
1527 self.logger.error(logging_text + step + ": " + str(e))
1528 else:
1529 self.logger.debug(logging_text + step + ": No initial-config-primitive")
quilesj7e13aeb2019-10-08 13:34:55 +02001530
tiernoda6fb102019-11-23 00:36:52 +00001531 # add config if not present for NS charm
1532 initial_config_primitive_list = self._get_initial_config_primitive_list(initial_config_primitive_list,
1533 vca_deployed)
quilesj3655ae02019-12-12 16:08:35 +00001534
1535 # wait for dependent primitives execution (NS -> VNF -> VDU)
tierno5ee02052019-12-05 19:55:02 +00001536 if initial_config_primitive_list:
1537 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
quilesj3655ae02019-12-12 16:08:35 +00001538
1539 # stage, in function of element type: vdu, kdu, vnf or ns
1540 my_vca = vca_deployed_list[vca_index]
1541 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1542 # VDU or KDU
tiernoe876f672020-02-13 14:34:48 +00001543 stage[0] = 'Stage 3/5: running Day-1 primitives for VDU.'
quilesj3655ae02019-12-12 16:08:35 +00001544 elif my_vca.get("member-vnf-index"):
1545 # VNF
tiernoe876f672020-02-13 14:34:48 +00001546 stage[0] = 'Stage 4/5: running Day-1 primitives for VNF.'
quilesj3655ae02019-12-12 16:08:35 +00001547 else:
1548 # NS
tiernoe876f672020-02-13 14:34:48 +00001549 stage[0] = 'Stage 5/5: running Day-1 primitives for NS.'
quilesj3655ae02019-12-12 16:08:35 +00001550
tiernoc231a872020-01-21 08:49:05 +00001551 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001552 nsr_id=nsr_id,
1553 vca_index=vca_index,
1554 status='EXECUTING PRIMITIVE'
1555 )
1556
1557 self._write_op_status(
1558 op_id=nslcmop_id,
1559 stage=stage
1560 )
1561
tiernoe876f672020-02-13 14:34:48 +00001562 check_if_terminated_needed = True
tiernod8323042019-08-09 11:32:23 +00001563 for initial_config_primitive in initial_config_primitive_list:
tiernoda6fb102019-11-23 00:36:52 +00001564 # adding information on the vca_deployed if it is a NS execution environment
1565 if not vca_deployed["member-vnf-index"]:
David Garciad4816682019-12-09 14:57:43 +01001566 deploy_params["ns_config_info"] = json.dumps(self._get_ns_config_info(nsr_id))
tiernod8323042019-08-09 11:32:23 +00001567 # TODO check if already done
1568 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
tierno3bedc9b2019-11-27 15:46:57 +00001569
tiernod8323042019-08-09 11:32:23 +00001570 step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
1571 self.logger.debug(logging_text + step)
quilesj7e13aeb2019-10-08 13:34:55 +02001572 await self.n2vc.exec_primitive(
1573 ee_id=ee_id,
1574 primitive_name=initial_config_primitive["name"],
1575 params_dict=primitive_params_,
1576 db_dict=db_dict
1577 )
tiernoe876f672020-02-13 14:34:48 +00001578 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1579 if check_if_terminated_needed:
1580 if config_descriptor.get('terminate-config-primitive'):
1581 self.update_db_2("nsrs", nsr_id, {db_update_entry + "needed_terminate": True})
1582 check_if_terminated_needed = False
quilesj3655ae02019-12-12 16:08:35 +00001583
tiernod8323042019-08-09 11:32:23 +00001584 # TODO register in database that primitive is done
quilesj7e13aeb2019-10-08 13:34:55 +02001585
1586 step = "instantiated at VCA"
1587 self.logger.debug(logging_text + step)
1588
tiernoc231a872020-01-21 08:49:05 +00001589 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001590 nsr_id=nsr_id,
1591 vca_index=vca_index,
1592 status='READY'
1593 )
1594
tiernod8323042019-08-09 11:32:23 +00001595 except Exception as e: # TODO not use Exception but N2VC exception
quilesj3655ae02019-12-12 16:08:35 +00001596 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
tiernoe876f672020-02-13 14:34:48 +00001597 if not isinstance(e, (DbException, N2VCException, LcmException, asyncio.CancelledError)):
1598 self.logger.error("Exception while {} : {}".format(step, e), exc_info=True)
tiernoc231a872020-01-21 08:49:05 +00001599 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001600 nsr_id=nsr_id,
1601 vca_index=vca_index,
1602 status='BROKEN'
1603 )
tiernoe876f672020-02-13 14:34:48 +00001604 raise LcmException("{} {}".format(step, e)) from e
tiernod8323042019-08-09 11:32:23 +00001605
quilesj4cda56b2019-12-05 10:02:20 +00001606 def _write_ns_status(self, nsr_id: str, ns_state: str, current_operation: str, current_operation_id: str,
tiernoa2143262020-03-27 16:20:40 +00001607 error_description: str = None, error_detail: str = None, other_update: dict = None):
tiernoe876f672020-02-13 14:34:48 +00001608 """
1609 Update db_nsr fields.
1610 :param nsr_id:
1611 :param ns_state:
1612 :param current_operation:
1613 :param current_operation_id:
1614 :param error_description:
tiernoa2143262020-03-27 16:20:40 +00001615 :param error_detail:
tiernoe876f672020-02-13 14:34:48 +00001616 :param other_update: Other required changes at database if provided, will be cleared
1617 :return:
1618 """
quilesj4cda56b2019-12-05 10:02:20 +00001619 try:
tiernoe876f672020-02-13 14:34:48 +00001620 db_dict = other_update or {}
1621 db_dict["_admin.nslcmop"] = current_operation_id # for backward compatibility
1622 db_dict["_admin.current-operation"] = current_operation_id
1623 db_dict["_admin.operation-type"] = current_operation if current_operation != "IDLE" else None
quilesj4cda56b2019-12-05 10:02:20 +00001624 db_dict["currentOperation"] = current_operation
1625 db_dict["currentOperationID"] = current_operation_id
1626 db_dict["errorDescription"] = error_description
tiernoa2143262020-03-27 16:20:40 +00001627 db_dict["errorDetail"] = error_detail
tiernoe876f672020-02-13 14:34:48 +00001628
1629 if ns_state:
1630 db_dict["nsState"] = ns_state
quilesj4cda56b2019-12-05 10:02:20 +00001631 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001632 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001633 self.logger.warn('Error writing NS status, ns={}: {}'.format(nsr_id, e))
1634
tiernoe876f672020-02-13 14:34:48 +00001635 def _write_op_status(self, op_id: str, stage: list = None, error_message: str = None, queuePosition: int = 0,
1636 operation_state: str = None, other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001637 try:
tiernoe876f672020-02-13 14:34:48 +00001638 db_dict = other_update or {}
quilesj3655ae02019-12-12 16:08:35 +00001639 db_dict['queuePosition'] = queuePosition
tiernoe876f672020-02-13 14:34:48 +00001640 if isinstance(stage, list):
1641 db_dict['stage'] = stage[0]
1642 db_dict['detailed-status'] = " ".join(stage)
1643 elif stage is not None:
1644 db_dict['stage'] = str(stage)
1645
1646 if error_message is not None:
quilesj3655ae02019-12-12 16:08:35 +00001647 db_dict['errorMessage'] = error_message
tiernoe876f672020-02-13 14:34:48 +00001648 if operation_state is not None:
1649 db_dict['operationState'] = operation_state
1650 db_dict["statusEnteredTime"] = time()
quilesj3655ae02019-12-12 16:08:35 +00001651 self.update_db_2("nslcmops", op_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001652 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001653 self.logger.warn('Error writing OPERATION status for op_id: {} -> {}'.format(op_id, e))
1654
tierno51183952020-04-03 15:48:18 +00001655 def _write_all_config_status(self, db_nsr: dict, status: str):
quilesj3655ae02019-12-12 16:08:35 +00001656 try:
tierno51183952020-04-03 15:48:18 +00001657 nsr_id = db_nsr["_id"]
quilesj3655ae02019-12-12 16:08:35 +00001658 # configurationStatus
1659 config_status = db_nsr.get('configurationStatus')
1660 if config_status:
tierno51183952020-04-03 15:48:18 +00001661 db_nsr_update = {"configurationStatus.{}.status".format(index): status for index, v in
1662 enumerate(config_status) if v}
quilesj3655ae02019-12-12 16:08:35 +00001663 # update status
tierno51183952020-04-03 15:48:18 +00001664 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00001665
tiernoe876f672020-02-13 14:34:48 +00001666 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001667 self.logger.warn('Error writing all configuration status, ns={}: {}'.format(nsr_id, e))
1668
quilesj63f90042020-01-17 09:53:55 +00001669 def _write_configuration_status(self, nsr_id: str, vca_index: int, status: str = None,
tierno51183952020-04-03 15:48:18 +00001670 element_under_configuration: str = None, element_type: str = None,
1671 other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001672
1673 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
1674 # .format(vca_index, status))
1675
1676 try:
1677 db_path = 'configurationStatus.{}.'.format(vca_index)
tierno51183952020-04-03 15:48:18 +00001678 db_dict = other_update or {}
quilesj63f90042020-01-17 09:53:55 +00001679 if status:
1680 db_dict[db_path + 'status'] = status
quilesj3655ae02019-12-12 16:08:35 +00001681 if element_under_configuration:
1682 db_dict[db_path + 'elementUnderConfiguration'] = element_under_configuration
1683 if element_type:
1684 db_dict[db_path + 'elementType'] = element_type
1685 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001686 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001687 self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}'
1688 .format(status, nsr_id, vca_index, e))
quilesj4cda56b2019-12-05 10:02:20 +00001689
tierno38089af2020-04-16 07:56:58 +00001690 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
1691 """
1692 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
1693 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
1694 Database is used because the result can be obtained from a different LCM worker in case of HA.
1695 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
1696 :param db_nslcmop: database content of nslcmop
1697 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
tierno8790a3d2020-04-23 22:49:52 +00001698 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
1699 computed 'vim-account-id'
tierno38089af2020-04-16 07:56:58 +00001700 """
tierno8790a3d2020-04-23 22:49:52 +00001701 modified = False
tierno38089af2020-04-16 07:56:58 +00001702 nslcmop_id = db_nslcmop['_id']
magnussonle9198bb2020-01-21 13:00:51 +01001703 placement_engine = deep_get(db_nslcmop, ('operationParams', 'placement-engine'))
1704 if placement_engine == "PLA":
tierno38089af2020-04-16 07:56:58 +00001705 self.logger.debug(logging_text + "Invoke and wait for placement optimization")
1706 await self.msg.aiowrite("pla", "get_placement", {'nslcmopId': nslcmop_id}, loop=self.loop)
magnussonle9198bb2020-01-21 13:00:51 +01001707 db_poll_interval = 5
tierno38089af2020-04-16 07:56:58 +00001708 wait = db_poll_interval * 10
magnussonle9198bb2020-01-21 13:00:51 +01001709 pla_result = None
1710 while not pla_result and wait >= 0:
1711 await asyncio.sleep(db_poll_interval)
1712 wait -= db_poll_interval
tierno38089af2020-04-16 07:56:58 +00001713 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
magnussonle9198bb2020-01-21 13:00:51 +01001714 pla_result = deep_get(db_nslcmop, ('_admin', 'pla'))
1715
1716 if not pla_result:
tierno38089af2020-04-16 07:56:58 +00001717 raise LcmException("Placement timeout for nslcmopId={}".format(nslcmop_id))
magnussonle9198bb2020-01-21 13:00:51 +01001718
1719 for pla_vnf in pla_result['vnf']:
1720 vnfr = db_vnfrs.get(pla_vnf['member-vnf-index'])
1721 if not pla_vnf.get('vimAccountId') or not vnfr:
1722 continue
tierno8790a3d2020-04-23 22:49:52 +00001723 modified = True
magnussonle9198bb2020-01-21 13:00:51 +01001724 self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, {"vim-account-id": pla_vnf['vimAccountId']})
tierno38089af2020-04-16 07:56:58 +00001725 # Modifies db_vnfrs
1726 vnfr["vim-account-id"] = pla_vnf['vimAccountId']
tierno8790a3d2020-04-23 22:49:52 +00001727 return modified
magnussonle9198bb2020-01-21 13:00:51 +01001728
1729 def update_nsrs_with_pla_result(self, params):
1730 try:
1731 nslcmop_id = deep_get(params, ('placement', 'nslcmopId'))
1732 self.update_db_2("nslcmops", nslcmop_id, {"_admin.pla": params.get('placement')})
1733 except Exception as e:
1734 self.logger.warn('Update failed for nslcmop_id={}:{}'.format(nslcmop_id, e))
1735
tierno59d22d22018-09-25 18:10:19 +02001736 async def instantiate(self, nsr_id, nslcmop_id):
quilesj7e13aeb2019-10-08 13:34:55 +02001737 """
1738
1739 :param nsr_id: ns instance to deploy
1740 :param nslcmop_id: operation to run
1741 :return:
1742 """
kuused124bfe2019-06-18 12:09:24 +02001743
1744 # Try to lock HA task here
1745 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
1746 if not task_is_locked_by_me:
quilesj3655ae02019-12-12 16:08:35 +00001747 self.logger.debug('instantiate() task is not locked by me, ns={}'.format(nsr_id))
kuused124bfe2019-06-18 12:09:24 +02001748 return
1749
tierno59d22d22018-09-25 18:10:19 +02001750 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
1751 self.logger.debug(logging_text + "Enter")
quilesj7e13aeb2019-10-08 13:34:55 +02001752
sousaedua0deb2d2020-04-21 12:08:14 +01001753 # Sync from FSMongo
1754 self.fs.sync()
1755
tierno59d22d22018-09-25 18:10:19 +02001756 # get all needed from database
quilesj7e13aeb2019-10-08 13:34:55 +02001757
1758 # database nsrs record
tierno59d22d22018-09-25 18:10:19 +02001759 db_nsr = None
quilesj7e13aeb2019-10-08 13:34:55 +02001760
1761 # database nslcmops record
tierno59d22d22018-09-25 18:10:19 +02001762 db_nslcmop = None
quilesj7e13aeb2019-10-08 13:34:55 +02001763
1764 # update operation on nsrs
tiernoe876f672020-02-13 14:34:48 +00001765 db_nsr_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02001766 # update operation on nslcmops
tierno59d22d22018-09-25 18:10:19 +02001767 db_nslcmop_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02001768
tierno59d22d22018-09-25 18:10:19 +02001769 nslcmop_operation_state = None
quilesj7e13aeb2019-10-08 13:34:55 +02001770 db_vnfrs = {} # vnf's info indexed by member-index
1771 # n2vc_info = {}
tiernoe876f672020-02-13 14:34:48 +00001772 tasks_dict_info = {} # from task to info text
tierno59d22d22018-09-25 18:10:19 +02001773 exc = None
tiernoe876f672020-02-13 14:34:48 +00001774 error_list = []
1775 stage = ['Stage 1/5: preparation of the environment.', "Waiting for previous operations to terminate.", ""]
1776 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02001777 try:
kuused124bfe2019-06-18 12:09:24 +02001778 # wait for any previous tasks in process
1779 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
1780
quilesj7e13aeb2019-10-08 13:34:55 +02001781 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
tiernoe876f672020-02-13 14:34:48 +00001782 stage[1] = "Reading from database,"
quilesj4cda56b2019-12-05 10:02:20 +00001783 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
tiernoe876f672020-02-13 14:34:48 +00001784 db_nsr_update["detailed-status"] = "creating"
1785 db_nsr_update["operational-status"] = "init"
quilesj4cda56b2019-12-05 10:02:20 +00001786 self._write_ns_status(
1787 nsr_id=nsr_id,
1788 ns_state="BUILDING",
1789 current_operation="INSTANTIATING",
tiernoe876f672020-02-13 14:34:48 +00001790 current_operation_id=nslcmop_id,
1791 other_update=db_nsr_update
1792 )
1793 self._write_op_status(
1794 op_id=nslcmop_id,
1795 stage=stage,
1796 queuePosition=0
quilesj4cda56b2019-12-05 10:02:20 +00001797 )
1798
quilesj7e13aeb2019-10-08 13:34:55 +02001799 # read from db: operation
tiernoe876f672020-02-13 14:34:48 +00001800 stage[1] = "Getting nslcmop={} from db".format(nslcmop_id)
tierno59d22d22018-09-25 18:10:19 +02001801 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
tierno744303e2020-01-13 16:46:31 +00001802 ns_params = db_nslcmop.get("operationParams")
1803 if ns_params and ns_params.get("timeout_ns_deploy"):
1804 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1805 else:
1806 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +02001807
1808 # read from db: ns
tiernoe876f672020-02-13 14:34:48 +00001809 stage[1] = "Getting nsr={} from db".format(nsr_id)
tierno59d22d22018-09-25 18:10:19 +02001810 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernod732fb82020-05-21 13:18:23 +00001811 stage[1] = "Getting nsd={} from db".format(db_nsr["nsd-id"])
1812 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
1813 db_nsr["nsd"] = nsd
tiernod8323042019-08-09 11:32:23 +00001814 # nsr_name = db_nsr["name"] # TODO short-name??
tierno47e86b52018-10-10 14:05:55 +02001815
quilesj7e13aeb2019-10-08 13:34:55 +02001816 # read from db: vnf's of this ns
tiernoe876f672020-02-13 14:34:48 +00001817 stage[1] = "Getting vnfrs from db"
1818 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02001819 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
tierno27246d82018-09-27 15:59:09 +02001820
quilesj7e13aeb2019-10-08 13:34:55 +02001821 # read from db: vnfd's for every vnf
1822 db_vnfds_ref = {} # every vnfd data indexed by vnf name
1823 db_vnfds = {} # every vnfd data indexed by vnf id
1824 db_vnfds_index = {} # every vnfd data indexed by vnf member-index
1825
1826 # for each vnf in ns, read vnfd
tierno27246d82018-09-27 15:59:09 +02001827 for vnfr in db_vnfrs_list:
quilesj7e13aeb2019-10-08 13:34:55 +02001828 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr # vnf's dict indexed by member-index: '1', '2', etc
1829 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
1830 vnfd_ref = vnfr["vnfd-ref"] # vnfd name for this vnf
1831 # if we haven't this vnfd, read it from db
tierno27246d82018-09-27 15:59:09 +02001832 if vnfd_id not in db_vnfds:
quilesj63f90042020-01-17 09:53:55 +00001833 # read from db
tiernoe876f672020-02-13 14:34:48 +00001834 stage[1] = "Getting vnfd={} id='{}' from db".format(vnfd_id, vnfd_ref)
1835 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02001836 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
tierno27246d82018-09-27 15:59:09 +02001837
quilesj7e13aeb2019-10-08 13:34:55 +02001838 # store vnfd
1839 db_vnfds_ref[vnfd_ref] = vnfd # vnfd's indexed by name
1840 db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id
1841 db_vnfds_index[vnfr["member-vnf-index-ref"]] = db_vnfds[vnfd_id] # vnfd's indexed by member-index
1842
1843 # Get or generates the _admin.deployed.VCA list
tiernoe4f7e6c2018-11-27 14:55:30 +00001844 vca_deployed_list = None
1845 if db_nsr["_admin"].get("deployed"):
1846 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
1847 if vca_deployed_list is None:
1848 vca_deployed_list = []
quilesj3655ae02019-12-12 16:08:35 +00001849 configuration_status_list = []
tiernoe4f7e6c2018-11-27 14:55:30 +00001850 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
quilesj3655ae02019-12-12 16:08:35 +00001851 db_nsr_update["configurationStatus"] = configuration_status_list
quilesj7e13aeb2019-10-08 13:34:55 +02001852 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00001853 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00001854 elif isinstance(vca_deployed_list, dict):
1855 # maintain backward compatibility. Change a dict to list at database
1856 vca_deployed_list = list(vca_deployed_list.values())
1857 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00001858 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00001859
tierno6cf25f52019-09-12 09:33:40 +00001860 if not isinstance(deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list):
tiernoa009e552019-01-30 16:45:44 +00001861 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
1862 db_nsr_update["_admin.deployed.RO.vnfd"] = []
tierno59d22d22018-09-25 18:10:19 +02001863
tiernobaa51102018-12-14 13:16:18 +00001864 # set state to INSTANTIATED. When instantiated NBI will not delete directly
1865 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1866 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00001867
1868 # n2vc_redesign STEP 2 Deploy Network Scenario
tiernoe876f672020-02-13 14:34:48 +00001869 stage[0] = 'Stage 2/5: deployment of KDUs, VMs and execution environments.'
quilesj3655ae02019-12-12 16:08:35 +00001870 self._write_op_status(
1871 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00001872 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00001873 )
1874
tiernoe876f672020-02-13 14:34:48 +00001875 stage[1] = "Deploying KDUs,"
1876 # self.logger.debug(logging_text + "Before deploy_kdus")
calvinosanch9f9c6f22019-11-04 13:37:39 +01001877 # Call to deploy_kdus in case exists the "vdu:kdu" param
tiernoe876f672020-02-13 14:34:48 +00001878 await self.deploy_kdus(
1879 logging_text=logging_text,
1880 nsr_id=nsr_id,
1881 nslcmop_id=nslcmop_id,
1882 db_vnfrs=db_vnfrs,
1883 db_vnfds=db_vnfds,
1884 task_instantiation_info=tasks_dict_info,
calvinosanch9f9c6f22019-11-04 13:37:39 +01001885 )
tiernoe876f672020-02-13 14:34:48 +00001886
1887 stage[1] = "Getting VCA public key."
tiernod8323042019-08-09 11:32:23 +00001888 # n2vc_redesign STEP 1 Get VCA public ssh-key
1889 # feature 1429. Add n2vc public key to needed VMs
tierno3bedc9b2019-11-27 15:46:57 +00001890 n2vc_key = self.n2vc.get_public_key()
tiernoa5088192019-11-26 16:12:53 +00001891 n2vc_key_list = [n2vc_key]
1892 if self.vca_config.get("public_key"):
1893 n2vc_key_list.append(self.vca_config["public_key"])
tierno98ad6ea2019-05-30 17:16:28 +00001894
tiernoe876f672020-02-13 14:34:48 +00001895 stage[1] = "Deploying NS at VIM."
tiernod8323042019-08-09 11:32:23 +00001896 task_ro = asyncio.ensure_future(
quilesj7e13aeb2019-10-08 13:34:55 +02001897 self.instantiate_RO(
1898 logging_text=logging_text,
1899 nsr_id=nsr_id,
1900 nsd=nsd,
1901 db_nsr=db_nsr,
1902 db_nslcmop=db_nslcmop,
1903 db_vnfrs=db_vnfrs,
1904 db_vnfds_ref=db_vnfds_ref,
tiernoe876f672020-02-13 14:34:48 +00001905 n2vc_key_list=n2vc_key_list,
1906 stage=stage
tierno98ad6ea2019-05-30 17:16:28 +00001907 )
tiernod8323042019-08-09 11:32:23 +00001908 )
1909 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
tiernoa2143262020-03-27 16:20:40 +00001910 tasks_dict_info[task_ro] = "Deploying at VIM"
tierno98ad6ea2019-05-30 17:16:28 +00001911
tiernod8323042019-08-09 11:32:23 +00001912 # n2vc_redesign STEP 3 to 6 Deploy N2VC
tiernoe876f672020-02-13 14:34:48 +00001913 stage[1] = "Deploying Execution Environments."
1914 self.logger.debug(logging_text + stage[1])
tierno98ad6ea2019-05-30 17:16:28 +00001915
tiernod8323042019-08-09 11:32:23 +00001916 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
quilesj7e13aeb2019-10-08 13:34:55 +02001917 # get_iterable() returns a value from a dict or empty tuple if key does not exist
tierno98ad6ea2019-05-30 17:16:28 +00001918 for c_vnf in get_iterable(nsd, "constituent-vnfd"):
1919 vnfd_id = c_vnf["vnfd-id-ref"]
tierno98ad6ea2019-05-30 17:16:28 +00001920 vnfd = db_vnfds_ref[vnfd_id]
tiernod8323042019-08-09 11:32:23 +00001921 member_vnf_index = str(c_vnf["member-vnf-index"])
1922 db_vnfr = db_vnfrs[member_vnf_index]
1923 base_folder = vnfd["_admin"]["storage"]
1924 vdu_id = None
1925 vdu_index = 0
tierno98ad6ea2019-05-30 17:16:28 +00001926 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01001927 kdu_name = None
tierno59d22d22018-09-25 18:10:19 +02001928
tierno8a518872018-12-21 13:42:14 +00001929 # Get additional parameters
tiernod8323042019-08-09 11:32:23 +00001930 deploy_params = {}
1931 if db_vnfr.get("additionalParamsForVnf"):
tierno626e0152019-11-29 14:16:16 +00001932 deploy_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"].copy())
tierno8a518872018-12-21 13:42:14 +00001933
tiernod8323042019-08-09 11:32:23 +00001934 descriptor_config = vnfd.get("vnf-configuration")
1935 if descriptor_config and descriptor_config.get("juju"):
quilesj7e13aeb2019-10-08 13:34:55 +02001936 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00001937 logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
quilesj7e13aeb2019-10-08 13:34:55 +02001938 db_nsr=db_nsr,
1939 db_vnfr=db_vnfr,
1940 nslcmop_id=nslcmop_id,
1941 nsr_id=nsr_id,
1942 nsi_id=nsi_id,
1943 vnfd_id=vnfd_id,
1944 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01001945 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02001946 member_vnf_index=member_vnf_index,
1947 vdu_index=vdu_index,
1948 vdu_name=vdu_name,
1949 deploy_params=deploy_params,
1950 descriptor_config=descriptor_config,
1951 base_folder=base_folder,
tiernoe876f672020-02-13 14:34:48 +00001952 task_instantiation_info=tasks_dict_info,
1953 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02001954 )
tierno59d22d22018-09-25 18:10:19 +02001955
1956 # Deploy charms for each VDU that supports one.
tiernod8323042019-08-09 11:32:23 +00001957 for vdud in get_iterable(vnfd, 'vdu'):
1958 vdu_id = vdud["id"]
1959 descriptor_config = vdud.get('vdu-configuration')
tierno626e0152019-11-29 14:16:16 +00001960 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
1961 if vdur.get("additionalParams"):
1962 deploy_params_vdu = self._format_additional_params(vdur["additionalParams"])
1963 else:
1964 deploy_params_vdu = deploy_params
tiernod8323042019-08-09 11:32:23 +00001965 if descriptor_config and descriptor_config.get("juju"):
1966 # look for vdu index in the db_vnfr["vdu"] section
1967 # for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
1968 # if vdur["vdu-id-ref"] == vdu_id:
1969 # break
1970 # else:
1971 # raise LcmException("Mismatch vdu_id={} not found in the vnfr['vdur'] list for "
1972 # "member_vnf_index={}".format(vdu_id, member_vnf_index))
1973 # vdu_name = vdur.get("name")
1974 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01001975 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00001976 for vdu_index in range(int(vdud.get("count", 1))):
1977 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
quilesj7e13aeb2019-10-08 13:34:55 +02001978 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00001979 logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
1980 member_vnf_index, vdu_id, vdu_index),
quilesj7e13aeb2019-10-08 13:34:55 +02001981 db_nsr=db_nsr,
1982 db_vnfr=db_vnfr,
1983 nslcmop_id=nslcmop_id,
1984 nsr_id=nsr_id,
1985 nsi_id=nsi_id,
1986 vnfd_id=vnfd_id,
1987 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01001988 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02001989 member_vnf_index=member_vnf_index,
1990 vdu_index=vdu_index,
1991 vdu_name=vdu_name,
tierno626e0152019-11-29 14:16:16 +00001992 deploy_params=deploy_params_vdu,
quilesj7e13aeb2019-10-08 13:34:55 +02001993 descriptor_config=descriptor_config,
1994 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00001995 task_instantiation_info=tasks_dict_info,
1996 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02001997 )
calvinosanch9f9c6f22019-11-04 13:37:39 +01001998 for kdud in get_iterable(vnfd, 'kdu'):
1999 kdu_name = kdud["name"]
2000 descriptor_config = kdud.get('kdu-configuration')
2001 if descriptor_config and descriptor_config.get("juju"):
2002 vdu_id = None
2003 vdu_index = 0
2004 vdu_name = None
2005 # look for vdu index in the db_vnfr["vdu"] section
2006 # for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
2007 # if vdur["vdu-id-ref"] == vdu_id:
2008 # break
2009 # else:
2010 # raise LcmException("Mismatch vdu_id={} not found in the vnfr['vdur'] list for "
2011 # "member_vnf_index={}".format(vdu_id, member_vnf_index))
2012 # vdu_name = vdur.get("name")
2013 # vdu_name = None
tierno59d22d22018-09-25 18:10:19 +02002014
calvinosanch9f9c6f22019-11-04 13:37:39 +01002015 self._deploy_n2vc(
2016 logging_text=logging_text,
2017 db_nsr=db_nsr,
2018 db_vnfr=db_vnfr,
2019 nslcmop_id=nslcmop_id,
2020 nsr_id=nsr_id,
2021 nsi_id=nsi_id,
2022 vnfd_id=vnfd_id,
2023 vdu_id=vdu_id,
2024 kdu_name=kdu_name,
2025 member_vnf_index=member_vnf_index,
2026 vdu_index=vdu_index,
2027 vdu_name=vdu_name,
2028 deploy_params=deploy_params,
2029 descriptor_config=descriptor_config,
2030 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002031 task_instantiation_info=tasks_dict_info,
2032 stage=stage
calvinosanch9f9c6f22019-11-04 13:37:39 +01002033 )
tierno59d22d22018-09-25 18:10:19 +02002034
tierno1b633412019-02-25 16:48:23 +00002035 # Check if this NS has a charm configuration
tiernod8323042019-08-09 11:32:23 +00002036 descriptor_config = nsd.get("ns-configuration")
2037 if descriptor_config and descriptor_config.get("juju"):
2038 vnfd_id = None
2039 db_vnfr = None
2040 member_vnf_index = None
2041 vdu_id = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01002042 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00002043 vdu_index = 0
2044 vdu_name = None
tierno1b633412019-02-25 16:48:23 +00002045
tiernod8323042019-08-09 11:32:23 +00002046 # Get additional parameters
2047 deploy_params = {}
2048 if db_nsr.get("additionalParamsForNs"):
tierno626e0152019-11-29 14:16:16 +00002049 deploy_params = self._format_additional_params(db_nsr["additionalParamsForNs"].copy())
tiernod8323042019-08-09 11:32:23 +00002050 base_folder = nsd["_admin"]["storage"]
quilesj7e13aeb2019-10-08 13:34:55 +02002051 self._deploy_n2vc(
2052 logging_text=logging_text,
2053 db_nsr=db_nsr,
2054 db_vnfr=db_vnfr,
2055 nslcmop_id=nslcmop_id,
2056 nsr_id=nsr_id,
2057 nsi_id=nsi_id,
2058 vnfd_id=vnfd_id,
2059 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002060 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002061 member_vnf_index=member_vnf_index,
2062 vdu_index=vdu_index,
2063 vdu_name=vdu_name,
2064 deploy_params=deploy_params,
2065 descriptor_config=descriptor_config,
2066 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00002067 task_instantiation_info=tasks_dict_info,
2068 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002069 )
tierno1b633412019-02-25 16:48:23 +00002070
tiernoe876f672020-02-13 14:34:48 +00002071 # rest of staff will be done at finally
tierno1b633412019-02-25 16:48:23 +00002072
tiernoe876f672020-02-13 14:34:48 +00002073 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
2074 self.logger.error(logging_text + "Exit Exception while '{}': {}".format(stage[1], e))
tierno59d22d22018-09-25 18:10:19 +02002075 exc = e
2076 except asyncio.CancelledError:
tiernoe876f672020-02-13 14:34:48 +00002077 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
tierno59d22d22018-09-25 18:10:19 +02002078 exc = "Operation was cancelled"
2079 except Exception as e:
2080 exc = traceback.format_exc()
tiernoe876f672020-02-13 14:34:48 +00002081 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
tierno59d22d22018-09-25 18:10:19 +02002082 finally:
2083 if exc:
tiernoe876f672020-02-13 14:34:48 +00002084 error_list.append(str(exc))
tiernobaa51102018-12-14 13:16:18 +00002085 try:
tiernoe876f672020-02-13 14:34:48 +00002086 # wait for pending tasks
2087 if tasks_dict_info:
2088 stage[1] = "Waiting for instantiate pending tasks."
2089 self.logger.debug(logging_text + stage[1])
2090 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_deploy,
2091 stage, nslcmop_id, nsr_id=nsr_id)
2092 stage[1] = stage[2] = ""
2093 except asyncio.CancelledError:
2094 error_list.append("Cancelled")
2095 # TODO cancel all tasks
2096 except Exception as exc:
2097 error_list.append(str(exc))
quilesj4cda56b2019-12-05 10:02:20 +00002098
tiernoe876f672020-02-13 14:34:48 +00002099 # update operation-status
2100 db_nsr_update["operational-status"] = "running"
2101 # let's begin with VCA 'configured' status (later we can change it)
2102 db_nsr_update["config-status"] = "configured"
2103 for task, task_name in tasks_dict_info.items():
2104 if not task.done() or task.cancelled() or task.exception():
2105 if task_name.startswith(self.task_name_deploy_vca):
2106 # A N2VC task is pending
2107 db_nsr_update["config-status"] = "failed"
quilesj4cda56b2019-12-05 10:02:20 +00002108 else:
tiernoe876f672020-02-13 14:34:48 +00002109 # RO or KDU task is pending
2110 db_nsr_update["operational-status"] = "failed"
quilesj3655ae02019-12-12 16:08:35 +00002111
tiernoe876f672020-02-13 14:34:48 +00002112 # update status at database
2113 if error_list:
tiernoa2143262020-03-27 16:20:40 +00002114 error_detail = ". ".join(error_list)
tiernoe876f672020-02-13 14:34:48 +00002115 self.logger.error(logging_text + error_detail)
tiernoa2143262020-03-27 16:20:40 +00002116 error_description_nslcmop = 'Stage: {}. Detail: {}'.format(stage[0], error_detail)
2117 error_description_nsr = 'Operation: INSTANTIATING.{}, Stage {}'.format(nslcmop_id, stage[0])
quilesj3655ae02019-12-12 16:08:35 +00002118
tiernoa2143262020-03-27 16:20:40 +00002119 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00002120 db_nslcmop_update["detailed-status"] = error_detail
2121 nslcmop_operation_state = "FAILED"
2122 ns_state = "BROKEN"
2123 else:
tiernoa2143262020-03-27 16:20:40 +00002124 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00002125 error_description_nsr = error_description_nslcmop = None
2126 ns_state = "READY"
2127 db_nsr_update["detailed-status"] = "Done"
2128 db_nslcmop_update["detailed-status"] = "Done"
2129 nslcmop_operation_state = "COMPLETED"
quilesj4cda56b2019-12-05 10:02:20 +00002130
tiernoe876f672020-02-13 14:34:48 +00002131 if db_nsr:
2132 self._write_ns_status(
2133 nsr_id=nsr_id,
2134 ns_state=ns_state,
2135 current_operation="IDLE",
2136 current_operation_id=None,
2137 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00002138 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00002139 other_update=db_nsr_update
2140 )
tiernoa17d4f42020-04-28 09:59:23 +00002141 self._write_op_status(
2142 op_id=nslcmop_id,
2143 stage="",
2144 error_message=error_description_nslcmop,
2145 operation_state=nslcmop_operation_state,
2146 other_update=db_nslcmop_update,
2147 )
quilesj3655ae02019-12-12 16:08:35 +00002148
tierno59d22d22018-09-25 18:10:19 +02002149 if nslcmop_operation_state:
2150 try:
2151 await self.msg.aiowrite("ns", "instantiated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00002152 "operationState": nslcmop_operation_state},
2153 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02002154 except Exception as e:
2155 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
2156
2157 self.logger.debug(logging_text + "Exit")
2158 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2159
quilesj63f90042020-01-17 09:53:55 +00002160 async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int, timeout: int = 3600) -> bool:
2161
2162 # steps:
2163 # 1. find all relations for this VCA
2164 # 2. wait for other peers related
2165 # 3. add relations
2166
2167 try:
2168
2169 # STEP 1: find all relations for this VCA
2170
2171 # read nsr record
2172 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
David Garcia171f3542020-05-21 16:41:07 +02002173 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
quilesj63f90042020-01-17 09:53:55 +00002174
2175 # this VCA data
2176 my_vca = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))[vca_index]
2177
2178 # read all ns-configuration relations
2179 ns_relations = list()
David Garcia171f3542020-05-21 16:41:07 +02002180 db_ns_relations = deep_get(nsd, ('ns-configuration', 'relation'))
quilesj63f90042020-01-17 09:53:55 +00002181 if db_ns_relations:
2182 for r in db_ns_relations:
2183 # check if this VCA is in the relation
2184 if my_vca.get('member-vnf-index') in\
2185 (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2186 ns_relations.append(r)
2187
2188 # read all vnf-configuration relations
2189 vnf_relations = list()
2190 db_vnfd_list = db_nsr.get('vnfd-id')
2191 if db_vnfd_list:
2192 for vnfd in db_vnfd_list:
2193 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2194 db_vnf_relations = deep_get(db_vnfd, ('vnf-configuration', 'relation'))
2195 if db_vnf_relations:
2196 for r in db_vnf_relations:
2197 # check if this VCA is in the relation
2198 if my_vca.get('vdu_id') in (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2199 vnf_relations.append(r)
2200
2201 # if no relations, terminate
2202 if not ns_relations and not vnf_relations:
2203 self.logger.debug(logging_text + ' No relations')
2204 return True
2205
2206 self.logger.debug(logging_text + ' adding relations\n {}\n {}'.format(ns_relations, vnf_relations))
2207
2208 # add all relations
2209 start = time()
2210 while True:
2211 # check timeout
2212 now = time()
2213 if now - start >= timeout:
2214 self.logger.error(logging_text + ' : timeout adding relations')
2215 return False
2216
2217 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2218 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2219
2220 # for each defined NS relation, find the VCA's related
2221 for r in ns_relations:
2222 from_vca_ee_id = None
2223 to_vca_ee_id = None
2224 from_vca_endpoint = None
2225 to_vca_endpoint = None
2226 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2227 for vca in vca_list:
2228 if vca.get('member-vnf-index') == r.get('entities')[0].get('id') \
2229 and vca.get('config_sw_installed'):
2230 from_vca_ee_id = vca.get('ee_id')
2231 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2232 if vca.get('member-vnf-index') == r.get('entities')[1].get('id') \
2233 and vca.get('config_sw_installed'):
2234 to_vca_ee_id = vca.get('ee_id')
2235 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2236 if from_vca_ee_id and to_vca_ee_id:
2237 # add relation
2238 await self.n2vc.add_relation(
2239 ee_id_1=from_vca_ee_id,
2240 ee_id_2=to_vca_ee_id,
2241 endpoint_1=from_vca_endpoint,
2242 endpoint_2=to_vca_endpoint)
2243 # remove entry from relations list
2244 ns_relations.remove(r)
2245 else:
2246 # check failed peers
2247 try:
2248 vca_status_list = db_nsr.get('configurationStatus')
2249 if vca_status_list:
2250 for i in range(len(vca_list)):
2251 vca = vca_list[i]
2252 vca_status = vca_status_list[i]
2253 if vca.get('member-vnf-index') == r.get('entities')[0].get('id'):
2254 if vca_status.get('status') == 'BROKEN':
2255 # peer broken: remove relation from list
2256 ns_relations.remove(r)
2257 if vca.get('member-vnf-index') == r.get('entities')[1].get('id'):
2258 if vca_status.get('status') == 'BROKEN':
2259 # peer broken: remove relation from list
2260 ns_relations.remove(r)
2261 except Exception:
2262 # ignore
2263 pass
2264
2265 # for each defined VNF relation, find the VCA's related
2266 for r in vnf_relations:
2267 from_vca_ee_id = None
2268 to_vca_ee_id = None
2269 from_vca_endpoint = None
2270 to_vca_endpoint = None
2271 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2272 for vca in vca_list:
2273 if vca.get('vdu_id') == r.get('entities')[0].get('id') and vca.get('config_sw_installed'):
2274 from_vca_ee_id = vca.get('ee_id')
2275 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2276 if vca.get('vdu_id') == r.get('entities')[1].get('id') and vca.get('config_sw_installed'):
2277 to_vca_ee_id = vca.get('ee_id')
2278 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2279 if from_vca_ee_id and to_vca_ee_id:
2280 # add relation
2281 await self.n2vc.add_relation(
2282 ee_id_1=from_vca_ee_id,
2283 ee_id_2=to_vca_ee_id,
2284 endpoint_1=from_vca_endpoint,
2285 endpoint_2=to_vca_endpoint)
2286 # remove entry from relations list
2287 vnf_relations.remove(r)
2288 else:
2289 # check failed peers
2290 try:
2291 vca_status_list = db_nsr.get('configurationStatus')
2292 if vca_status_list:
2293 for i in range(len(vca_list)):
2294 vca = vca_list[i]
2295 vca_status = vca_status_list[i]
2296 if vca.get('vdu_id') == r.get('entities')[0].get('id'):
2297 if vca_status.get('status') == 'BROKEN':
2298 # peer broken: remove relation from list
2299 ns_relations.remove(r)
2300 if vca.get('vdu_id') == r.get('entities')[1].get('id'):
2301 if vca_status.get('status') == 'BROKEN':
2302 # peer broken: remove relation from list
2303 ns_relations.remove(r)
2304 except Exception:
2305 # ignore
2306 pass
2307
2308 # wait for next try
2309 await asyncio.sleep(5.0)
2310
2311 if not ns_relations and not vnf_relations:
2312 self.logger.debug('Relations added')
2313 break
2314
2315 return True
2316
2317 except Exception as e:
2318 self.logger.warn(logging_text + ' ERROR adding relations: {}'.format(e))
2319 return False
2320
tiernob9018152020-04-16 14:18:24 +00002321 def _write_db_callback(self, task, item, _id, on_done=None, on_exc=None):
2322 """
2323 callback for kdu install intended to store the returned kdu_instance at database
2324 :return: None
2325 """
2326 db_update = {}
2327 try:
2328 result = task.result()
2329 if on_done:
2330 db_update[on_done] = str(result)
2331 except Exception as e:
2332 if on_exc:
2333 db_update[on_exc] = str(e)
2334 if db_update:
2335 try:
2336 self.update_db_2(item, _id, db_update)
2337 except Exception:
2338 pass
2339
tiernoe876f672020-02-13 14:34:48 +00002340 async def deploy_kdus(self, logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_instantiation_info):
calvinosanch9f9c6f22019-11-04 13:37:39 +01002341 # Launch kdus if present in the descriptor
tierno626e0152019-11-29 14:16:16 +00002342
2343 k8scluster_id_2_uuic = {"helm-chart": {}, "juju-bundle": {}}
2344
2345 def _get_cluster_id(cluster_id, cluster_type):
2346 nonlocal k8scluster_id_2_uuic
2347 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
2348 return k8scluster_id_2_uuic[cluster_type][cluster_id]
2349
2350 db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False)
2351 if not db_k8scluster:
2352 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
2353 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
2354 if not k8s_id:
2355 raise LcmException("K8s cluster '{}' has not been initilized for '{}'".format(cluster_id, cluster_type))
2356 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
2357 return k8s_id
2358
2359 logging_text += "Deploy kdus: "
tiernoe876f672020-02-13 14:34:48 +00002360 step = ""
calvinosanch9f9c6f22019-11-04 13:37:39 +01002361 try:
tierno626e0152019-11-29 14:16:16 +00002362 db_nsr_update = {"_admin.deployed.K8s": []}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002363 self.update_db_2("nsrs", nsr_id, db_nsr_update)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002364
tierno626e0152019-11-29 14:16:16 +00002365 index = 0
tiernoe876f672020-02-13 14:34:48 +00002366 updated_cluster_list = []
2367
tierno626e0152019-11-29 14:16:16 +00002368 for vnfr_data in db_vnfrs.values():
2369 for kdur in get_iterable(vnfr_data, "kdur"):
2370 desc_params = self._format_additional_params(kdur.get("additionalParams"))
quilesjacde94f2020-01-23 10:07:08 +00002371 vnfd_id = vnfr_data.get('vnfd-id')
tiernode1584f2020-04-07 09:07:33 +00002372 namespace = kdur.get("k8s-namespace")
tierno626e0152019-11-29 14:16:16 +00002373 if kdur.get("helm-chart"):
2374 kdumodel = kdur["helm-chart"]
tiernoe876f672020-02-13 14:34:48 +00002375 k8sclustertype = "helm-chart"
tierno626e0152019-11-29 14:16:16 +00002376 elif kdur.get("juju-bundle"):
2377 kdumodel = kdur["juju-bundle"]
tiernoe876f672020-02-13 14:34:48 +00002378 k8sclustertype = "juju-bundle"
tierno626e0152019-11-29 14:16:16 +00002379 else:
tiernoe876f672020-02-13 14:34:48 +00002380 raise LcmException("kdu type for kdu='{}.{}' is neither helm-chart nor "
2381 "juju-bundle. Maybe an old NBI version is running".
2382 format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]))
quilesjacde94f2020-01-23 10:07:08 +00002383 # check if kdumodel is a file and exists
2384 try:
tierno51183952020-04-03 15:48:18 +00002385 storage = deep_get(db_vnfds.get(vnfd_id), ('_admin', 'storage'))
2386 if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts
2387 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
Dominik Fleischmann010c0e72020-05-18 15:19:11 +02002388 filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["pkg-dir"], k8sclustertype,
tierno51183952020-04-03 15:48:18 +00002389 kdumodel)
2390 if self.fs.file_exists(filename, mode='file') or self.fs.file_exists(filename, mode='dir'):
2391 kdumodel = self.fs.path + filename
2392 except (asyncio.TimeoutError, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00002393 raise
2394 except Exception: # it is not a file
quilesjacde94f2020-01-23 10:07:08 +00002395 pass
lloretgallegedc5f332020-02-20 11:50:50 +01002396
tiernoe876f672020-02-13 14:34:48 +00002397 k8s_cluster_id = kdur["k8s-cluster"]["id"]
2398 step = "Synchronize repos for k8s cluster '{}'".format(k8s_cluster_id)
2399 cluster_uuid = _get_cluster_id(k8s_cluster_id, k8sclustertype)
lloretgallegedc5f332020-02-20 11:50:50 +01002400
tiernoe876f672020-02-13 14:34:48 +00002401 if k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list:
2402 del_repo_list, added_repo_dict = await asyncio.ensure_future(
2403 self.k8sclusterhelm.synchronize_repos(cluster_uuid=cluster_uuid))
2404 if del_repo_list or added_repo_dict:
2405 unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
2406 updated = {'_admin.helm_charts_added.' +
2407 item: name for item, name in added_repo_dict.items()}
2408 self.logger.debug(logging_text + "repos synchronized on k8s cluster '{}' to_delete: {}, "
2409 "to_add: {}".format(k8s_cluster_id, del_repo_list,
2410 added_repo_dict))
2411 self.db.set_one("k8sclusters", {"_id": k8s_cluster_id}, updated, unset=unset)
2412 updated_cluster_list.append(cluster_uuid)
lloretgallegedc5f332020-02-20 11:50:50 +01002413
tiernoe876f672020-02-13 14:34:48 +00002414 step = "Instantiating KDU {}.{} in k8s cluster {}".format(vnfr_data["member-vnf-index-ref"],
2415 kdur["kdu-name"], k8s_cluster_id)
tierno626e0152019-11-29 14:16:16 +00002416
tierno067e04a2020-03-31 12:53:13 +00002417 k8s_instace_info = {"kdu-instance": None,
2418 "k8scluster-uuid": cluster_uuid,
tierno626e0152019-11-29 14:16:16 +00002419 "k8scluster-type": k8sclustertype,
tierno067e04a2020-03-31 12:53:13 +00002420 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
2421 "kdu-name": kdur["kdu-name"],
tiernode1584f2020-04-07 09:07:33 +00002422 "kdu-model": kdumodel,
2423 "namespace": namespace}
tiernob9018152020-04-16 14:18:24 +00002424 db_path = "_admin.deployed.K8s.{}".format(index)
2425 db_nsr_update[db_path] = k8s_instace_info
tierno626e0152019-11-29 14:16:16 +00002426 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tierno626e0152019-11-29 14:16:16 +00002427
tiernoe876f672020-02-13 14:34:48 +00002428 db_dict = {"collection": "nsrs",
2429 "filter": {"_id": nsr_id},
tiernob9018152020-04-16 14:18:24 +00002430 "path": db_path}
lloretgallegedc5f332020-02-20 11:50:50 +01002431
tiernoa2143262020-03-27 16:20:40 +00002432 task = asyncio.ensure_future(
2433 self.k8scluster_map[k8sclustertype].install(cluster_uuid=cluster_uuid, kdu_model=kdumodel,
2434 atomic=True, params=desc_params,
2435 db_dict=db_dict, timeout=600,
tiernode1584f2020-04-07 09:07:33 +00002436 kdu_name=kdur["kdu-name"], namespace=namespace))
Adam Israelbaacc302019-12-01 12:41:39 -05002437
tiernob9018152020-04-16 14:18:24 +00002438 task.add_done_callback(partial(self._write_db_callback, item="nsrs", _id=nsr_id,
2439 on_done=db_path + ".kdu-instance",
2440 on_exc=db_path + ".detailed-status"))
tiernoe876f672020-02-13 14:34:48 +00002441 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task)
tiernoa2143262020-03-27 16:20:40 +00002442 task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"])
tiernoe876f672020-02-13 14:34:48 +00002443
tierno626e0152019-11-29 14:16:16 +00002444 index += 1
quilesjdd799ac2020-01-23 16:31:11 +00002445
tiernoe876f672020-02-13 14:34:48 +00002446 except (LcmException, asyncio.CancelledError):
2447 raise
calvinosanch9f9c6f22019-11-04 13:37:39 +01002448 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00002449 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
2450 if isinstance(e, (N2VCException, DbException)):
2451 self.logger.error(logging_text + msg)
2452 else:
2453 self.logger.critical(logging_text + msg, exc_info=True)
quilesjdd799ac2020-01-23 16:31:11 +00002454 raise LcmException(msg)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002455 finally:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002456 if db_nsr_update:
2457 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoda6fb102019-11-23 00:36:52 +00002458
quilesj7e13aeb2019-10-08 13:34:55 +02002459 def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002460 kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config,
tiernoe876f672020-02-13 14:34:48 +00002461 base_folder, task_instantiation_info, stage):
quilesj7e13aeb2019-10-08 13:34:55 +02002462 # launch instantiate_N2VC in a asyncio task and register task object
2463 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
2464 # if not found, create one entry and update database
tiernobaa51102018-12-14 13:16:18 +00002465
quilesj7e13aeb2019-10-08 13:34:55 +02002466 # fill db_nsr._admin.deployed.VCA.<index>
2467 vca_index = -1
2468 for vca_index, vca_deployed in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
2469 if not vca_deployed:
2470 continue
2471 if vca_deployed.get("member-vnf-index") == member_vnf_index and \
2472 vca_deployed.get("vdu_id") == vdu_id and \
calvinosanch9f9c6f22019-11-04 13:37:39 +01002473 vca_deployed.get("kdu_name") == kdu_name and \
quilesj7e13aeb2019-10-08 13:34:55 +02002474 vca_deployed.get("vdu_count_index", 0) == vdu_index:
2475 break
2476 else:
2477 # not found, create one.
2478 vca_deployed = {
2479 "member-vnf-index": member_vnf_index,
2480 "vdu_id": vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002481 "kdu_name": kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002482 "vdu_count_index": vdu_index,
2483 "operational-status": "init", # TODO revise
2484 "detailed-status": "", # TODO revise
2485 "step": "initial-deploy", # TODO revise
2486 "vnfd_id": vnfd_id,
2487 "vdu_name": vdu_name,
2488 }
2489 vca_index += 1
quilesj3655ae02019-12-12 16:08:35 +00002490
2491 # create VCA and configurationStatus in db
2492 db_dict = {
2493 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
2494 "configurationStatus.{}".format(vca_index): dict()
2495 }
2496 self.update_db_2("nsrs", nsr_id, db_dict)
2497
quilesj7e13aeb2019-10-08 13:34:55 +02002498 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
2499
2500 # Launch task
2501 task_n2vc = asyncio.ensure_future(
2502 self.instantiate_N2VC(
2503 logging_text=logging_text,
2504 vca_index=vca_index,
2505 nsi_id=nsi_id,
2506 db_nsr=db_nsr,
2507 db_vnfr=db_vnfr,
2508 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002509 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002510 vdu_index=vdu_index,
2511 deploy_params=deploy_params,
2512 config_descriptor=descriptor_config,
2513 base_folder=base_folder,
tiernoe876f672020-02-13 14:34:48 +00002514 nslcmop_id=nslcmop_id,
2515 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002516 )
2517 )
2518 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_N2VC-{}".format(vca_index), task_n2vc)
tiernoe876f672020-02-13 14:34:48 +00002519 task_instantiation_info[task_n2vc] = self.task_name_deploy_vca + " {}.{}".format(
2520 member_vnf_index or "", vdu_id or "")
tiernobaa51102018-12-14 13:16:18 +00002521
kuuse0ca67472019-05-13 15:59:27 +02002522 # Check if this VNFD has a configured terminate action
2523 def _has_terminate_config_primitive(self, vnfd):
2524 vnf_config = vnfd.get("vnf-configuration")
2525 if vnf_config and vnf_config.get("terminate-config-primitive"):
2526 return True
2527 else:
2528 return False
2529
tiernoc9556972019-07-05 15:25:25 +00002530 @staticmethod
2531 def _get_terminate_config_primitive_seq_list(vnfd):
2532 """ Get a numerically sorted list of the sequences for this VNFD's terminate action """
kuuse0ca67472019-05-13 15:59:27 +02002533 # No need to check for existing primitive twice, already done before
2534 vnf_config = vnfd.get("vnf-configuration")
2535 seq_list = vnf_config.get("terminate-config-primitive")
2536 # Get all 'seq' tags in seq_list, order sequences numerically, ascending.
2537 seq_list_sorted = sorted(seq_list, key=lambda x: int(x['seq']))
2538 return seq_list_sorted
2539
2540 @staticmethod
2541 def _create_nslcmop(nsr_id, operation, params):
2542 """
2543 Creates a ns-lcm-opp content to be stored at database.
2544 :param nsr_id: internal id of the instance
2545 :param operation: instantiate, terminate, scale, action, ...
2546 :param params: user parameters for the operation
2547 :return: dictionary following SOL005 format
2548 """
2549 # Raise exception if invalid arguments
2550 if not (nsr_id and operation and params):
2551 raise LcmException(
2552 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided")
2553 now = time()
2554 _id = str(uuid4())
2555 nslcmop = {
2556 "id": _id,
2557 "_id": _id,
2558 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
2559 "operationState": "PROCESSING",
2560 "statusEnteredTime": now,
2561 "nsInstanceId": nsr_id,
2562 "lcmOperationType": operation,
2563 "startTime": now,
2564 "isAutomaticInvocation": False,
2565 "operationParams": params,
2566 "isCancelPending": False,
2567 "links": {
2568 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
2569 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
2570 }
2571 }
2572 return nslcmop
2573
calvinosanch9f9c6f22019-11-04 13:37:39 +01002574 def _format_additional_params(self, params):
tierno626e0152019-11-29 14:16:16 +00002575 params = params or {}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002576 for key, value in params.items():
2577 if str(value).startswith("!!yaml "):
2578 params[key] = yaml.safe_load(value[7:])
calvinosanch9f9c6f22019-11-04 13:37:39 +01002579 return params
2580
kuuse8b998e42019-07-30 15:22:16 +02002581 def _get_terminate_primitive_params(self, seq, vnf_index):
2582 primitive = seq.get('name')
2583 primitive_params = {}
2584 params = {
2585 "member_vnf_index": vnf_index,
2586 "primitive": primitive,
2587 "primitive_params": primitive_params,
2588 }
2589 desc_params = {}
2590 return self._map_primitive_params(seq, params, desc_params)
2591
kuuseac3a8882019-10-03 10:48:06 +02002592 # sub-operations
2593
tierno51183952020-04-03 15:48:18 +00002594 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
2595 op = deep_get(db_nslcmop, ('_admin', 'operations'), [])[op_index]
2596 if op.get('operationState') == 'COMPLETED':
kuuseac3a8882019-10-03 10:48:06 +02002597 # b. Skip sub-operation
2598 # _ns_execute_primitive() or RO.create_action() will NOT be executed
2599 return self.SUBOPERATION_STATUS_SKIP
2600 else:
tierno7c4e24c2020-05-13 08:41:35 +00002601 # c. retry executing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02002602 # The sub-operation exists, and operationState != 'COMPLETED'
tierno7c4e24c2020-05-13 08:41:35 +00002603 # Update operationState = 'PROCESSING' to indicate a retry.
kuuseac3a8882019-10-03 10:48:06 +02002604 operationState = 'PROCESSING'
2605 detailed_status = 'In progress'
2606 self._update_suboperation_status(
2607 db_nslcmop, op_index, operationState, detailed_status)
2608 # Return the sub-operation index
2609 # _ns_execute_primitive() or RO.create_action() will be called from scale()
2610 # with arguments extracted from the sub-operation
2611 return op_index
2612
2613 # Find a sub-operation where all keys in a matching dictionary must match
2614 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
2615 def _find_suboperation(self, db_nslcmop, match):
tierno7c4e24c2020-05-13 08:41:35 +00002616 if db_nslcmop and match:
kuuseac3a8882019-10-03 10:48:06 +02002617 op_list = db_nslcmop.get('_admin', {}).get('operations', [])
2618 for i, op in enumerate(op_list):
2619 if all(op.get(k) == match[k] for k in match):
2620 return i
2621 return self.SUBOPERATION_STATUS_NOT_FOUND
2622
2623 # Update status for a sub-operation given its index
2624 def _update_suboperation_status(self, db_nslcmop, op_index, operationState, detailed_status):
2625 # Update DB for HA tasks
2626 q_filter = {'_id': db_nslcmop['_id']}
2627 update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState,
2628 '_admin.operations.{}.detailed-status'.format(op_index): detailed_status}
2629 self.db.set_one("nslcmops",
2630 q_filter=q_filter,
2631 update_dict=update_dict,
2632 fail_on_empty=False)
2633
2634 # Add sub-operation, return the index of the added sub-operation
2635 # Optionally, set operationState, detailed-status, and operationType
2636 # Status and type are currently set for 'scale' sub-operations:
2637 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
2638 # 'detailed-status' : status message
2639 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
2640 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
quilesj7e13aeb2019-10-08 13:34:55 +02002641 def _add_suboperation(self, db_nslcmop, vnf_index, vdu_id, vdu_count_index, vdu_name, primitive,
2642 mapped_primitive_params, operationState=None, detailed_status=None, operationType=None,
kuuseac3a8882019-10-03 10:48:06 +02002643 RO_nsr_id=None, RO_scaling_info=None):
tiernoe876f672020-02-13 14:34:48 +00002644 if not db_nslcmop:
kuuseac3a8882019-10-03 10:48:06 +02002645 return self.SUBOPERATION_STATUS_NOT_FOUND
2646 # Get the "_admin.operations" list, if it exists
2647 db_nslcmop_admin = db_nslcmop.get('_admin', {})
2648 op_list = db_nslcmop_admin.get('operations')
2649 # Create or append to the "_admin.operations" list
kuuse8b998e42019-07-30 15:22:16 +02002650 new_op = {'member_vnf_index': vnf_index,
2651 'vdu_id': vdu_id,
2652 'vdu_count_index': vdu_count_index,
2653 'primitive': primitive,
2654 'primitive_params': mapped_primitive_params}
kuuseac3a8882019-10-03 10:48:06 +02002655 if operationState:
2656 new_op['operationState'] = operationState
2657 if detailed_status:
2658 new_op['detailed-status'] = detailed_status
2659 if operationType:
2660 new_op['lcmOperationType'] = operationType
2661 if RO_nsr_id:
2662 new_op['RO_nsr_id'] = RO_nsr_id
2663 if RO_scaling_info:
2664 new_op['RO_scaling_info'] = RO_scaling_info
2665 if not op_list:
2666 # No existing operations, create key 'operations' with current operation as first list element
2667 db_nslcmop_admin.update({'operations': [new_op]})
2668 op_list = db_nslcmop_admin.get('operations')
2669 else:
2670 # Existing operations, append operation to list
2671 op_list.append(new_op)
kuuse8b998e42019-07-30 15:22:16 +02002672
kuuseac3a8882019-10-03 10:48:06 +02002673 db_nslcmop_update = {'_admin.operations': op_list}
2674 self.update_db_2("nslcmops", db_nslcmop['_id'], db_nslcmop_update)
2675 op_index = len(op_list) - 1
2676 return op_index
2677
2678 # Helper methods for scale() sub-operations
2679
2680 # pre-scale/post-scale:
2681 # Check for 3 different cases:
2682 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
2683 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
tierno7c4e24c2020-05-13 08:41:35 +00002684 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
quilesj7e13aeb2019-10-08 13:34:55 +02002685 def _check_or_add_scale_suboperation(self, db_nslcmop, vnf_index, vnf_config_primitive, primitive_params,
2686 operationType, RO_nsr_id=None, RO_scaling_info=None):
kuuseac3a8882019-10-03 10:48:06 +02002687 # Find this sub-operation
tierno7c4e24c2020-05-13 08:41:35 +00002688 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02002689 operationType = 'SCALE-RO'
2690 match = {
2691 'member_vnf_index': vnf_index,
2692 'RO_nsr_id': RO_nsr_id,
2693 'RO_scaling_info': RO_scaling_info,
2694 }
2695 else:
2696 match = {
2697 'member_vnf_index': vnf_index,
2698 'primitive': vnf_config_primitive,
2699 'primitive_params': primitive_params,
2700 'lcmOperationType': operationType
2701 }
2702 op_index = self._find_suboperation(db_nslcmop, match)
tierno51183952020-04-03 15:48:18 +00002703 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
kuuseac3a8882019-10-03 10:48:06 +02002704 # a. New sub-operation
2705 # The sub-operation does not exist, add it.
2706 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
2707 # The following parameters are set to None for all kind of scaling:
2708 vdu_id = None
2709 vdu_count_index = None
2710 vdu_name = None
tierno51183952020-04-03 15:48:18 +00002711 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02002712 vnf_config_primitive = None
2713 primitive_params = None
2714 else:
2715 RO_nsr_id = None
2716 RO_scaling_info = None
2717 # Initial status for sub-operation
2718 operationState = 'PROCESSING'
2719 detailed_status = 'In progress'
2720 # Add sub-operation for pre/post-scaling (zero or more operations)
2721 self._add_suboperation(db_nslcmop,
2722 vnf_index,
2723 vdu_id,
2724 vdu_count_index,
2725 vdu_name,
2726 vnf_config_primitive,
2727 primitive_params,
2728 operationState,
2729 detailed_status,
2730 operationType,
2731 RO_nsr_id,
2732 RO_scaling_info)
2733 return self.SUBOPERATION_STATUS_NEW
2734 else:
2735 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
2736 # or op_index (operationState != 'COMPLETED')
tierno51183952020-04-03 15:48:18 +00002737 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
kuuseac3a8882019-10-03 10:48:06 +02002738
preethika.pdf7d8e02019-12-10 13:10:48 +00002739 # Function to return execution_environment id
2740
2741 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
tiernoe876f672020-02-13 14:34:48 +00002742 # TODO vdu_index_count
preethika.pdf7d8e02019-12-10 13:10:48 +00002743 for vca in vca_deployed_list:
2744 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
2745 return vca["ee_id"]
2746
tiernoe876f672020-02-13 14:34:48 +00002747 async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor, vca_index, destroy_ee=True):
2748 """
2749 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
2750 :param logging_text:
2751 :param db_nslcmop:
2752 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
2753 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
2754 :param vca_index: index in the database _admin.deployed.VCA
2755 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
2756 :return: None or exception
2757 """
2758 # execute terminate_primitives
2759 terminate_primitives = config_descriptor.get("terminate-config-primitive")
2760 vdu_id = vca_deployed.get("vdu_id")
2761 vdu_count_index = vca_deployed.get("vdu_count_index")
2762 vdu_name = vca_deployed.get("vdu_name")
2763 vnf_index = vca_deployed.get("member-vnf-index")
2764 if terminate_primitives and vca_deployed.get("needed_terminate"):
2765 # Get all 'seq' tags in seq_list, order sequences numerically, ascending.
2766 terminate_primitives = sorted(terminate_primitives, key=lambda x: int(x['seq']))
2767 for seq in terminate_primitives:
kuuse8b998e42019-07-30 15:22:16 +02002768 # For each sequence in list, get primitive and call _ns_execute_primitive()
kuuse0ca67472019-05-13 15:59:27 +02002769 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
2770 vnf_index, seq.get("name"))
2771 self.logger.debug(logging_text + step)
kuuse8b998e42019-07-30 15:22:16 +02002772 # Create the primitive for each sequence, i.e. "primitive": "touch"
kuuse0ca67472019-05-13 15:59:27 +02002773 primitive = seq.get('name')
kuuse8b998e42019-07-30 15:22:16 +02002774 mapped_primitive_params = self._get_terminate_primitive_params(seq, vnf_index)
2775 # The following 3 parameters are currently set to None for 'terminate':
2776 # vdu_id, vdu_count_index, vdu_name
tiernoe876f672020-02-13 14:34:48 +00002777
kuuseac3a8882019-10-03 10:48:06 +02002778 # Add sub-operation
kuuse8b998e42019-07-30 15:22:16 +02002779 self._add_suboperation(db_nslcmop,
kuuse8b998e42019-07-30 15:22:16 +02002780 vnf_index,
2781 vdu_id,
2782 vdu_count_index,
2783 vdu_name,
2784 primitive,
2785 mapped_primitive_params)
kuuseac3a8882019-10-03 10:48:06 +02002786 # Sub-operations: Call _ns_execute_primitive() instead of action()
quilesj7e13aeb2019-10-08 13:34:55 +02002787 try:
tiernoe876f672020-02-13 14:34:48 +00002788 result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive,
2789 mapped_primitive_params)
2790 except LcmException:
2791 # this happens when VCA is not deployed. In this case it is not needed to terminate
2792 continue
2793 result_ok = ['COMPLETED', 'PARTIALLY_COMPLETED']
2794 if result not in result_ok:
2795 raise LcmException("terminate_primitive {} for vnf_member_index={} fails with "
2796 "error {}".format(seq.get("name"), vnf_index, result_detail))
2797 # set that this VCA do not need terminated
2798 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(vca_index)
2799 self.update_db_2("nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False})
2800
2801 if destroy_ee:
2802 await self.n2vc.delete_execution_environment(vca_deployed["ee_id"])
kuuse0ca67472019-05-13 15:59:27 +02002803
tierno51183952020-04-03 15:48:18 +00002804 async def _delete_all_N2VC(self, db_nsr: dict):
2805 self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING')
2806 namespace = "." + db_nsr["_id"]
tiernof59ad6c2020-04-08 12:50:52 +00002807 try:
2808 await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete)
2809 except N2VCNotFound: # already deleted. Skip
2810 pass
tierno51183952020-04-03 15:48:18 +00002811 self._write_all_config_status(db_nsr=db_nsr, status='DELETED')
quilesj3655ae02019-12-12 16:08:35 +00002812
tiernoe876f672020-02-13 14:34:48 +00002813 async def _terminate_RO(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
2814 """
2815 Terminates a deployment from RO
2816 :param logging_text:
2817 :param nsr_deployed: db_nsr._admin.deployed
2818 :param nsr_id:
2819 :param nslcmop_id:
2820 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
2821 this method will update only the index 2, but it will write on database the concatenated content of the list
2822 :return:
2823 """
2824 db_nsr_update = {}
2825 failed_detail = []
2826 ro_nsr_id = ro_delete_action = None
2827 if nsr_deployed and nsr_deployed.get("RO"):
2828 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
2829 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
2830 try:
2831 if ro_nsr_id:
2832 stage[2] = "Deleting ns from VIM."
2833 db_nsr_update["detailed-status"] = " ".join(stage)
2834 self._write_op_status(nslcmop_id, stage)
2835 self.logger.debug(logging_text + stage[2])
2836 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2837 self._write_op_status(nslcmop_id, stage)
2838 desc = await self.RO.delete("ns", ro_nsr_id)
2839 ro_delete_action = desc["action_id"]
2840 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = ro_delete_action
2841 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
2842 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2843 if ro_delete_action:
2844 # wait until NS is deleted from VIM
2845 stage[2] = "Waiting ns deleted from VIM."
2846 detailed_status_old = None
2847 self.logger.debug(logging_text + stage[2] + " RO_id={} ro_delete_action={}".format(ro_nsr_id,
2848 ro_delete_action))
2849 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2850 self._write_op_status(nslcmop_id, stage)
kuused124bfe2019-06-18 12:09:24 +02002851
tiernoe876f672020-02-13 14:34:48 +00002852 delete_timeout = 20 * 60 # 20 minutes
2853 while delete_timeout > 0:
2854 desc = await self.RO.show(
2855 "ns",
2856 item_id_name=ro_nsr_id,
2857 extra_item="action",
2858 extra_item_id=ro_delete_action)
2859
2860 # deploymentStatus
2861 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
2862
2863 ns_status, ns_status_info = self.RO.check_action_status(desc)
2864 if ns_status == "ERROR":
2865 raise ROclient.ROClientException(ns_status_info)
2866 elif ns_status == "BUILD":
2867 stage[2] = "Deleting from VIM {}".format(ns_status_info)
2868 elif ns_status == "ACTIVE":
2869 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
2870 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2871 break
2872 else:
2873 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
2874 if stage[2] != detailed_status_old:
2875 detailed_status_old = stage[2]
2876 db_nsr_update["detailed-status"] = " ".join(stage)
2877 self._write_op_status(nslcmop_id, stage)
2878 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2879 await asyncio.sleep(5, loop=self.loop)
2880 delete_timeout -= 5
2881 else: # delete_timeout <= 0:
2882 raise ROclient.ROClientException("Timeout waiting ns deleted from VIM")
2883
2884 except Exception as e:
2885 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2886 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2887 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
2888 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2889 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
2890 self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id))
2891 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
tiernoa2143262020-03-27 16:20:40 +00002892 failed_detail.append("delete conflict: {}".format(e))
2893 self.logger.debug(logging_text + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00002894 else:
tiernoa2143262020-03-27 16:20:40 +00002895 failed_detail.append("delete error: {}".format(e))
2896 self.logger.error(logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00002897
2898 # Delete nsd
2899 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
2900 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
2901 try:
2902 stage[2] = "Deleting nsd from RO."
2903 db_nsr_update["detailed-status"] = " ".join(stage)
2904 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2905 self._write_op_status(nslcmop_id, stage)
2906 await self.RO.delete("nsd", ro_nsd_id)
2907 self.logger.debug(logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id))
2908 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
2909 except Exception as e:
2910 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2911 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
2912 self.logger.debug(logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id))
2913 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
2914 failed_detail.append("ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e))
2915 self.logger.debug(logging_text + failed_detail[-1])
2916 else:
2917 failed_detail.append("ro_nsd_id={} delete error: {}".format(ro_nsd_id, e))
2918 self.logger.error(logging_text + failed_detail[-1])
2919
2920 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
2921 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
2922 if not vnf_deployed or not vnf_deployed["id"]:
2923 continue
2924 try:
2925 ro_vnfd_id = vnf_deployed["id"]
2926 stage[2] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
2927 vnf_deployed["member-vnf-index"], ro_vnfd_id)
2928 db_nsr_update["detailed-status"] = " ".join(stage)
2929 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2930 self._write_op_status(nslcmop_id, stage)
2931 await self.RO.delete("vnfd", ro_vnfd_id)
2932 self.logger.debug(logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id))
2933 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
2934 except Exception as e:
2935 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2936 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
2937 self.logger.debug(logging_text + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id))
2938 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
2939 failed_detail.append("ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e))
2940 self.logger.debug(logging_text + failed_detail[-1])
2941 else:
2942 failed_detail.append("ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e))
2943 self.logger.error(logging_text + failed_detail[-1])
2944
tiernoa2143262020-03-27 16:20:40 +00002945 if failed_detail:
2946 stage[2] = "Error deleting from VIM"
2947 else:
2948 stage[2] = "Deleted from VIM"
tiernoe876f672020-02-13 14:34:48 +00002949 db_nsr_update["detailed-status"] = " ".join(stage)
2950 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2951 self._write_op_status(nslcmop_id, stage)
2952
2953 if failed_detail:
tiernoa2143262020-03-27 16:20:40 +00002954 raise LcmException("; ".join(failed_detail))
tiernoe876f672020-02-13 14:34:48 +00002955
2956 async def terminate(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02002957 # Try to lock HA task here
2958 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
2959 if not task_is_locked_by_me:
2960 return
2961
tierno59d22d22018-09-25 18:10:19 +02002962 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
2963 self.logger.debug(logging_text + "Enter")
tiernoe876f672020-02-13 14:34:48 +00002964 timeout_ns_terminate = self.timeout_ns_terminate
tierno59d22d22018-09-25 18:10:19 +02002965 db_nsr = None
2966 db_nslcmop = None
tiernoa17d4f42020-04-28 09:59:23 +00002967 operation_params = None
tierno59d22d22018-09-25 18:10:19 +02002968 exc = None
tiernoe876f672020-02-13 14:34:48 +00002969 error_list = [] # annotates all failed error messages
tierno59d22d22018-09-25 18:10:19 +02002970 db_nslcmop_update = {}
tiernoc2564fe2019-01-28 16:18:56 +00002971 autoremove = False # autoremove after terminated
tiernoe876f672020-02-13 14:34:48 +00002972 tasks_dict_info = {}
2973 db_nsr_update = {}
2974 stage = ["Stage 1/3: Preparing task.", "Waiting for previous operations to terminate.", ""]
2975 # ^ contains [stage, step, VIM-status]
tierno59d22d22018-09-25 18:10:19 +02002976 try:
kuused124bfe2019-06-18 12:09:24 +02002977 # wait for any previous tasks in process
2978 await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id)
2979
tiernoe876f672020-02-13 14:34:48 +00002980 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2981 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2982 operation_params = db_nslcmop.get("operationParams") or {}
2983 if operation_params.get("timeout_ns_terminate"):
2984 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
2985 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2986 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2987
2988 db_nsr_update["operational-status"] = "terminating"
2989 db_nsr_update["config-status"] = "terminating"
quilesj4cda56b2019-12-05 10:02:20 +00002990 self._write_ns_status(
2991 nsr_id=nsr_id,
2992 ns_state="TERMINATING",
2993 current_operation="TERMINATING",
tiernoe876f672020-02-13 14:34:48 +00002994 current_operation_id=nslcmop_id,
2995 other_update=db_nsr_update
quilesj4cda56b2019-12-05 10:02:20 +00002996 )
quilesj3655ae02019-12-12 16:08:35 +00002997 self._write_op_status(
2998 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00002999 queuePosition=0,
3000 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00003001 )
tiernoe876f672020-02-13 14:34:48 +00003002 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
tierno59d22d22018-09-25 18:10:19 +02003003 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
3004 return
tierno59d22d22018-09-25 18:10:19 +02003005
tiernoe876f672020-02-13 14:34:48 +00003006 stage[1] = "Getting vnf descriptors from db."
3007 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
3008 db_vnfds_from_id = {}
3009 db_vnfds_from_member_index = {}
3010 # Loop over VNFRs
3011 for vnfr in db_vnfrs_list:
3012 vnfd_id = vnfr["vnfd-id"]
3013 if vnfd_id not in db_vnfds_from_id:
3014 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
3015 db_vnfds_from_id[vnfd_id] = vnfd
3016 db_vnfds_from_member_index[vnfr["member-vnf-index-ref"]] = db_vnfds_from_id[vnfd_id]
calvinosanch9f9c6f22019-11-04 13:37:39 +01003017
tiernoe876f672020-02-13 14:34:48 +00003018 # Destroy individual execution environments when there are terminating primitives.
3019 # Rest of EE will be deleted at once
3020 if not operation_params.get("skip_terminate_primitives"):
3021 stage[0] = "Stage 2/3 execute terminating primitives."
3022 stage[1] = "Looking execution environment that needs terminate."
3023 self.logger.debug(logging_text + stage[1])
3024 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
3025 config_descriptor = None
3026 if not vca or not vca.get("ee_id") or not vca.get("needed_terminate"):
3027 continue
3028 if not vca.get("member-vnf-index"):
3029 # ns
3030 config_descriptor = db_nsr.get("ns-configuration")
3031 elif vca.get("vdu_id"):
3032 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3033 vdud = next((vdu for vdu in db_vnfd.get("vdu", ()) if vdu["id"] == vca.get("vdu_id")), None)
3034 if vdud:
3035 config_descriptor = vdud.get("vdu-configuration")
3036 elif vca.get("kdu_name"):
3037 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3038 kdud = next((kdu for kdu in db_vnfd.get("kdu", ()) if kdu["name"] == vca.get("kdu_name")), None)
3039 if kdud:
3040 config_descriptor = kdud.get("kdu-configuration")
3041 else:
3042 config_descriptor = db_vnfds_from_member_index[vca["member-vnf-index"]].get("vnf-configuration")
3043 task = asyncio.ensure_future(self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor,
3044 vca_index, False))
3045 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
tierno59d22d22018-09-25 18:10:19 +02003046
tiernoe876f672020-02-13 14:34:48 +00003047 # wait for pending tasks of terminate primitives
3048 if tasks_dict_info:
3049 self.logger.debug(logging_text + 'Waiting for terminate primitive pending tasks...')
3050 error_list = await self._wait_for_tasks(logging_text, tasks_dict_info,
3051 min(self.timeout_charm_delete, timeout_ns_terminate),
3052 stage, nslcmop_id)
3053 if error_list:
3054 return # raise LcmException("; ".join(error_list))
3055 tasks_dict_info.clear()
tierno82974b22018-11-27 21:55:36 +00003056
tiernoe876f672020-02-13 14:34:48 +00003057 # remove All execution environments at once
3058 stage[0] = "Stage 3/3 delete all."
quilesj3655ae02019-12-12 16:08:35 +00003059
tierno49676be2020-04-07 16:34:35 +00003060 if nsr_deployed.get("VCA"):
3061 stage[1] = "Deleting all execution environments."
3062 self.logger.debug(logging_text + stage[1])
3063 task_delete_ee = asyncio.ensure_future(asyncio.wait_for(self._delete_all_N2VC(db_nsr=db_nsr),
3064 timeout=self.timeout_charm_delete))
3065 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
3066 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
tierno59d22d22018-09-25 18:10:19 +02003067
tiernoe876f672020-02-13 14:34:48 +00003068 # Delete from k8scluster
3069 stage[1] = "Deleting KDUs."
3070 self.logger.debug(logging_text + stage[1])
3071 # print(nsr_deployed)
3072 for kdu in get_iterable(nsr_deployed, "K8s"):
3073 if not kdu or not kdu.get("kdu-instance"):
3074 continue
3075 kdu_instance = kdu.get("kdu-instance")
tiernoa2143262020-03-27 16:20:40 +00003076 if kdu.get("k8scluster-type") in self.k8scluster_map:
tiernoe876f672020-02-13 14:34:48 +00003077 task_delete_kdu_instance = asyncio.ensure_future(
tiernoa2143262020-03-27 16:20:40 +00003078 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
3079 cluster_uuid=kdu.get("k8scluster-uuid"),
3080 kdu_instance=kdu_instance))
tiernoe876f672020-02-13 14:34:48 +00003081 else:
3082 self.logger.error(logging_text + "Unknown k8s deployment type {}".
3083 format(kdu.get("k8scluster-type")))
3084 continue
3085 tasks_dict_info[task_delete_kdu_instance] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
tierno59d22d22018-09-25 18:10:19 +02003086
3087 # remove from RO
tiernoe876f672020-02-13 14:34:48 +00003088 stage[1] = "Deleting ns from VIM."
tierno69f0d382020-05-07 13:08:09 +00003089 if self.ng_ro:
3090 task_delete_ro = asyncio.ensure_future(
3091 self._terminate_ng_ro(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
3092 else:
3093 task_delete_ro = asyncio.ensure_future(
3094 self._terminate_RO(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
tiernoe876f672020-02-13 14:34:48 +00003095 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
tierno59d22d22018-09-25 18:10:19 +02003096
tiernoe876f672020-02-13 14:34:48 +00003097 # rest of staff will be done at finally
3098
3099 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
3100 self.logger.error(logging_text + "Exit Exception {}".format(e))
3101 exc = e
3102 except asyncio.CancelledError:
3103 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
3104 exc = "Operation was cancelled"
3105 except Exception as e:
3106 exc = traceback.format_exc()
3107 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
3108 finally:
3109 if exc:
3110 error_list.append(str(exc))
tierno59d22d22018-09-25 18:10:19 +02003111 try:
tiernoe876f672020-02-13 14:34:48 +00003112 # wait for pending tasks
3113 if tasks_dict_info:
3114 stage[1] = "Waiting for terminate pending tasks."
3115 self.logger.debug(logging_text + stage[1])
3116 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_terminate,
3117 stage, nslcmop_id)
3118 stage[1] = stage[2] = ""
3119 except asyncio.CancelledError:
3120 error_list.append("Cancelled")
3121 # TODO cancell all tasks
3122 except Exception as exc:
3123 error_list.append(str(exc))
3124 # update status at database
3125 if error_list:
3126 error_detail = "; ".join(error_list)
3127 # self.logger.error(logging_text + error_detail)
tiernoa2143262020-03-27 16:20:40 +00003128 error_description_nslcmop = 'Stage: {}. Detail: {}'.format(stage[0], error_detail)
3129 error_description_nsr = 'Operation: TERMINATING.{}, Stage {}.'.format(nslcmop_id, stage[0])
tierno59d22d22018-09-25 18:10:19 +02003130
tierno59d22d22018-09-25 18:10:19 +02003131 db_nsr_update["operational-status"] = "failed"
tiernoa2143262020-03-27 16:20:40 +00003132 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00003133 db_nslcmop_update["detailed-status"] = error_detail
3134 nslcmop_operation_state = "FAILED"
3135 ns_state = "BROKEN"
tierno59d22d22018-09-25 18:10:19 +02003136 else:
tiernoa2143262020-03-27 16:20:40 +00003137 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00003138 error_description_nsr = error_description_nslcmop = None
3139 ns_state = "NOT_INSTANTIATED"
tierno59d22d22018-09-25 18:10:19 +02003140 db_nsr_update["operational-status"] = "terminated"
3141 db_nsr_update["detailed-status"] = "Done"
3142 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
3143 db_nslcmop_update["detailed-status"] = "Done"
tiernoe876f672020-02-13 14:34:48 +00003144 nslcmop_operation_state = "COMPLETED"
tierno59d22d22018-09-25 18:10:19 +02003145
tiernoe876f672020-02-13 14:34:48 +00003146 if db_nsr:
3147 self._write_ns_status(
3148 nsr_id=nsr_id,
3149 ns_state=ns_state,
3150 current_operation="IDLE",
3151 current_operation_id=None,
3152 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00003153 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00003154 other_update=db_nsr_update
3155 )
tiernoa17d4f42020-04-28 09:59:23 +00003156 self._write_op_status(
3157 op_id=nslcmop_id,
3158 stage="",
3159 error_message=error_description_nslcmop,
3160 operation_state=nslcmop_operation_state,
3161 other_update=db_nslcmop_update,
3162 )
3163 if operation_params:
tiernoe876f672020-02-13 14:34:48 +00003164 autoremove = operation_params.get("autoremove", False)
tierno59d22d22018-09-25 18:10:19 +02003165 if nslcmop_operation_state:
3166 try:
3167 await self.msg.aiowrite("ns", "terminated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tiernoc2564fe2019-01-28 16:18:56 +00003168 "operationState": nslcmop_operation_state,
3169 "autoremove": autoremove},
tierno8a518872018-12-21 13:42:14 +00003170 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003171 except Exception as e:
3172 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02003173
tierno59d22d22018-09-25 18:10:19 +02003174 self.logger.debug(logging_text + "Exit")
3175 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
3176
tiernoe876f672020-02-13 14:34:48 +00003177 async def _wait_for_tasks(self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None):
3178 time_start = time()
tiernoa2143262020-03-27 16:20:40 +00003179 error_detail_list = []
tiernoe876f672020-02-13 14:34:48 +00003180 error_list = []
3181 pending_tasks = list(created_tasks_info.keys())
3182 num_tasks = len(pending_tasks)
3183 num_done = 0
3184 stage[1] = "{}/{}.".format(num_done, num_tasks)
3185 self._write_op_status(nslcmop_id, stage)
tiernoe876f672020-02-13 14:34:48 +00003186 while pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00003187 new_error = None
tiernoe876f672020-02-13 14:34:48 +00003188 _timeout = timeout + time_start - time()
3189 done, pending_tasks = await asyncio.wait(pending_tasks, timeout=_timeout,
3190 return_when=asyncio.FIRST_COMPLETED)
3191 num_done += len(done)
3192 if not done: # Timeout
3193 for task in pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00003194 new_error = created_tasks_info[task] + ": Timeout"
3195 error_detail_list.append(new_error)
3196 error_list.append(new_error)
tiernoe876f672020-02-13 14:34:48 +00003197 break
3198 for task in done:
3199 if task.cancelled():
tierno067e04a2020-03-31 12:53:13 +00003200 exc = "Cancelled"
tiernoe876f672020-02-13 14:34:48 +00003201 else:
3202 exc = task.exception()
tierno067e04a2020-03-31 12:53:13 +00003203 if exc:
3204 if isinstance(exc, asyncio.TimeoutError):
3205 exc = "Timeout"
3206 new_error = created_tasks_info[task] + ": {}".format(exc)
3207 error_list.append(created_tasks_info[task])
3208 error_detail_list.append(new_error)
tierno28c63da2020-04-20 16:28:56 +00003209 if isinstance(exc, (str, DbException, N2VCException, ROclient.ROClientException, LcmException,
3210 K8sException)):
tierno067e04a2020-03-31 12:53:13 +00003211 self.logger.error(logging_text + new_error)
tiernoe876f672020-02-13 14:34:48 +00003212 else:
tierno067e04a2020-03-31 12:53:13 +00003213 exc_traceback = "".join(traceback.format_exception(None, exc, exc.__traceback__))
3214 self.logger.error(logging_text + created_tasks_info[task] + exc_traceback)
3215 else:
3216 self.logger.debug(logging_text + created_tasks_info[task] + ": Done")
tiernoe876f672020-02-13 14:34:48 +00003217 stage[1] = "{}/{}.".format(num_done, num_tasks)
3218 if new_error:
tiernoa2143262020-03-27 16:20:40 +00003219 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
tiernoe876f672020-02-13 14:34:48 +00003220 if nsr_id: # update also nsr
tiernoa2143262020-03-27 16:20:40 +00003221 self.update_db_2("nsrs", nsr_id, {"errorDescription": "Error at: " + ", ".join(error_list),
3222 "errorDetail": ". ".join(error_detail_list)})
tiernoe876f672020-02-13 14:34:48 +00003223 self._write_op_status(nslcmop_id, stage)
tiernoa2143262020-03-27 16:20:40 +00003224 return error_detail_list
tiernoe876f672020-02-13 14:34:48 +00003225
tiernoda964822019-01-14 15:53:47 +00003226 @staticmethod
3227 def _map_primitive_params(primitive_desc, params, instantiation_params):
3228 """
3229 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
3230 The default-value is used. If it is between < > it look for a value at instantiation_params
3231 :param primitive_desc: portion of VNFD/NSD that describes primitive
3232 :param params: Params provided by user
3233 :param instantiation_params: Instantiation params provided by user
3234 :return: a dictionary with the calculated params
3235 """
3236 calculated_params = {}
3237 for parameter in primitive_desc.get("parameter", ()):
3238 param_name = parameter["name"]
3239 if param_name in params:
3240 calculated_params[param_name] = params[param_name]
tierno98ad6ea2019-05-30 17:16:28 +00003241 elif "default-value" in parameter or "value" in parameter:
3242 if "value" in parameter:
3243 calculated_params[param_name] = parameter["value"]
3244 else:
3245 calculated_params[param_name] = parameter["default-value"]
3246 if isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("<") \
3247 and calculated_params[param_name].endswith(">"):
3248 if calculated_params[param_name][1:-1] in instantiation_params:
3249 calculated_params[param_name] = instantiation_params[calculated_params[param_name][1:-1]]
tiernoda964822019-01-14 15:53:47 +00003250 else:
3251 raise LcmException("Parameter {} needed to execute primitive {} not provided".
tiernod8323042019-08-09 11:32:23 +00003252 format(calculated_params[param_name], primitive_desc["name"]))
tiernoda964822019-01-14 15:53:47 +00003253 else:
3254 raise LcmException("Parameter {} needed to execute primitive {} not provided".
3255 format(param_name, primitive_desc["name"]))
tierno59d22d22018-09-25 18:10:19 +02003256
tiernoda964822019-01-14 15:53:47 +00003257 if isinstance(calculated_params[param_name], (dict, list, tuple)):
3258 calculated_params[param_name] = yaml.safe_dump(calculated_params[param_name], default_flow_style=True,
3259 width=256)
3260 elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "):
3261 calculated_params[param_name] = calculated_params[param_name][7:]
tiernoc3f2a822019-11-05 13:45:04 +00003262
3263 # add always ns_config_info if primitive name is config
3264 if primitive_desc["name"] == "config":
3265 if "ns_config_info" in instantiation_params:
3266 calculated_params["ns_config_info"] = instantiation_params["ns_config_info"]
tiernoda964822019-01-14 15:53:47 +00003267 return calculated_params
3268
tierno067e04a2020-03-31 12:53:13 +00003269 def _look_for_deployed_vca(self, deployed_vca, member_vnf_index, vdu_id, vdu_count_index, kdu_name=None):
tiernoe876f672020-02-13 14:34:48 +00003270 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
3271 for vca in deployed_vca:
3272 if not vca:
3273 continue
3274 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
3275 continue
tiernoe876f672020-02-13 14:34:48 +00003276 if vdu_count_index is not None and vdu_count_index != vca["vdu_count_index"]:
3277 continue
3278 if kdu_name and kdu_name != vca["kdu_name"]:
3279 continue
3280 break
3281 else:
3282 # vca_deployed not found
tierno067e04a2020-03-31 12:53:13 +00003283 raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} is not "
3284 "deployed".format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
quilesj7e13aeb2019-10-08 13:34:55 +02003285
tiernoe876f672020-02-13 14:34:48 +00003286 # get ee_id
3287 ee_id = vca.get("ee_id")
3288 if not ee_id:
tierno067e04a2020-03-31 12:53:13 +00003289 raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
tiernoe876f672020-02-13 14:34:48 +00003290 "execution environment"
tierno067e04a2020-03-31 12:53:13 +00003291 .format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
tiernoe876f672020-02-13 14:34:48 +00003292 return ee_id
3293
3294 async def _ns_execute_primitive(self, ee_id, primitive, primitive_params, retries=0,
tierno067e04a2020-03-31 12:53:13 +00003295 retries_interval=30, timeout=None) -> (str, str):
tiernoda964822019-01-14 15:53:47 +00003296 try:
tierno98ad6ea2019-05-30 17:16:28 +00003297 if primitive == "config":
3298 primitive_params = {"params": primitive_params}
tierno2fc7ce52019-06-11 22:50:01 +00003299
quilesj7e13aeb2019-10-08 13:34:55 +02003300 while retries >= 0:
3301 try:
tierno067e04a2020-03-31 12:53:13 +00003302 output = await asyncio.wait_for(
3303 self.n2vc.exec_primitive(
3304 ee_id=ee_id,
3305 primitive_name=primitive,
3306 params_dict=primitive_params,
3307 progress_timeout=self.timeout_progress_primitive,
3308 total_timeout=self.timeout_primitive),
3309 timeout=timeout or self.timeout_primitive)
quilesj7e13aeb2019-10-08 13:34:55 +02003310 # execution was OK
3311 break
tierno067e04a2020-03-31 12:53:13 +00003312 except asyncio.CancelledError:
3313 raise
3314 except Exception as e: # asyncio.TimeoutError
3315 if isinstance(e, asyncio.TimeoutError):
3316 e = "Timeout"
quilesj7e13aeb2019-10-08 13:34:55 +02003317 retries -= 1
3318 if retries >= 0:
tierno73d8bd02019-11-18 17:33:27 +00003319 self.logger.debug('Error executing action {} on {} -> {}'.format(primitive, ee_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +02003320 # wait and retry
3321 await asyncio.sleep(retries_interval, loop=self.loop)
tierno73d8bd02019-11-18 17:33:27 +00003322 else:
tierno067e04a2020-03-31 12:53:13 +00003323 return 'FAILED', str(e)
quilesj7e13aeb2019-10-08 13:34:55 +02003324
tiernoe876f672020-02-13 14:34:48 +00003325 return 'COMPLETED', output
quilesj7e13aeb2019-10-08 13:34:55 +02003326
tierno067e04a2020-03-31 12:53:13 +00003327 except (LcmException, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00003328 raise
quilesj7e13aeb2019-10-08 13:34:55 +02003329 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00003330 return 'FAIL', 'Error executing action {}: {}'.format(primitive, e)
tierno59d22d22018-09-25 18:10:19 +02003331
3332 async def action(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003333
3334 # Try to lock HA task here
3335 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3336 if not task_is_locked_by_me:
3337 return
3338
tierno59d22d22018-09-25 18:10:19 +02003339 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
3340 self.logger.debug(logging_text + "Enter")
3341 # get all needed from database
3342 db_nsr = None
3343 db_nslcmop = None
tiernoe876f672020-02-13 14:34:48 +00003344 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003345 db_nslcmop_update = {}
3346 nslcmop_operation_state = None
tierno067e04a2020-03-31 12:53:13 +00003347 error_description_nslcmop = None
tierno59d22d22018-09-25 18:10:19 +02003348 exc = None
3349 try:
kuused124bfe2019-06-18 12:09:24 +02003350 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003351 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003352 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
3353
quilesj4cda56b2019-12-05 10:02:20 +00003354 self._write_ns_status(
3355 nsr_id=nsr_id,
3356 ns_state=None,
3357 current_operation="RUNNING ACTION",
3358 current_operation_id=nslcmop_id
3359 )
3360
tierno59d22d22018-09-25 18:10:19 +02003361 step = "Getting information from database"
3362 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3363 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernoda964822019-01-14 15:53:47 +00003364
tiernoe4f7e6c2018-11-27 14:55:30 +00003365 nsr_deployed = db_nsr["_admin"].get("deployed")
tierno1b633412019-02-25 16:48:23 +00003366 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tierno59d22d22018-09-25 18:10:19 +02003367 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003368 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
tiernoe4f7e6c2018-11-27 14:55:30 +00003369 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
tierno067e04a2020-03-31 12:53:13 +00003370 primitive = db_nslcmop["operationParams"]["primitive"]
3371 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
3372 timeout_ns_action = db_nslcmop["operationParams"].get("timeout_ns_action", self.timeout_primitive)
tierno59d22d22018-09-25 18:10:19 +02003373
tierno1b633412019-02-25 16:48:23 +00003374 if vnf_index:
3375 step = "Getting vnfr from database"
3376 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3377 step = "Getting vnfd from database"
3378 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
3379 else:
tierno067e04a2020-03-31 12:53:13 +00003380 step = "Getting nsd from database"
3381 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
tiernoda964822019-01-14 15:53:47 +00003382
tierno82974b22018-11-27 21:55:36 +00003383 # for backward compatibility
3384 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3385 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3386 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3387 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3388
tiernoda964822019-01-14 15:53:47 +00003389 # look for primitive
3390 config_primitive_desc = None
3391 if vdu_id:
3392 for vdu in get_iterable(db_vnfd, "vdu"):
3393 if vdu_id == vdu["id"]:
tierno067e04a2020-03-31 12:53:13 +00003394 for config_primitive in deep_get(vdu, ("vdu-configuration", "config-primitive"), ()):
tiernoda964822019-01-14 15:53:47 +00003395 if config_primitive["name"] == primitive:
3396 config_primitive_desc = config_primitive
3397 break
tierno067e04a2020-03-31 12:53:13 +00003398 break
calvinosanch9f9c6f22019-11-04 13:37:39 +01003399 elif kdu_name:
tierno067e04a2020-03-31 12:53:13 +00003400 for kdu in get_iterable(db_vnfd, "kdu"):
3401 if kdu_name == kdu["name"]:
3402 for config_primitive in deep_get(kdu, ("kdu-configuration", "config-primitive"), ()):
3403 if config_primitive["name"] == primitive:
3404 config_primitive_desc = config_primitive
3405 break
3406 break
tierno1b633412019-02-25 16:48:23 +00003407 elif vnf_index:
tierno067e04a2020-03-31 12:53:13 +00003408 for config_primitive in deep_get(db_vnfd, ("vnf-configuration", "config-primitive"), ()):
tierno1b633412019-02-25 16:48:23 +00003409 if config_primitive["name"] == primitive:
3410 config_primitive_desc = config_primitive
3411 break
3412 else:
tierno067e04a2020-03-31 12:53:13 +00003413 for config_primitive in deep_get(db_nsd, ("ns-configuration", "config-primitive"), ()):
tierno1b633412019-02-25 16:48:23 +00003414 if config_primitive["name"] == primitive:
3415 config_primitive_desc = config_primitive
3416 break
tiernoda964822019-01-14 15:53:47 +00003417
tierno067e04a2020-03-31 12:53:13 +00003418 if not config_primitive_desc and not (kdu_name and primitive in ("upgrade", "rollback", "status")):
tierno1b633412019-02-25 16:48:23 +00003419 raise LcmException("Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".
3420 format(primitive))
3421
tierno1b633412019-02-25 16:48:23 +00003422 if vnf_index:
tierno626e0152019-11-29 14:16:16 +00003423 if vdu_id:
3424 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
tierno067e04a2020-03-31 12:53:13 +00003425 desc_params = self._format_additional_params(vdur.get("additionalParams"))
3426 elif kdu_name:
3427 kdur = next((x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None)
3428 desc_params = self._format_additional_params(kdur.get("additionalParams"))
3429 else:
3430 desc_params = self._format_additional_params(db_vnfr.get("additionalParamsForVnf"))
tierno1b633412019-02-25 16:48:23 +00003431 else:
tierno067e04a2020-03-31 12:53:13 +00003432 desc_params = self._format_additional_params(db_nsr.get("additionalParamsForNs"))
tiernoda964822019-01-14 15:53:47 +00003433
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003434 if kdu_name:
3435 kdu_action = True if not deep_get(kdu, ("kdu-configuration", "juju")) else False
3436
tiernoda964822019-01-14 15:53:47 +00003437 # TODO check if ns is in a proper status
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003438 if kdu_name and (primitive in ("upgrade", "rollback", "status") or kdu_action):
tierno067e04a2020-03-31 12:53:13 +00003439 # kdur and desc_params already set from before
3440 if primitive_params:
3441 desc_params.update(primitive_params)
3442 # TODO Check if we will need something at vnf level
3443 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
3444 if kdu_name == kdu["kdu-name"] and kdu["member-vnf-index"] == vnf_index:
3445 break
3446 else:
3447 raise LcmException("KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index))
quilesj7e13aeb2019-10-08 13:34:55 +02003448
tierno067e04a2020-03-31 12:53:13 +00003449 if kdu.get("k8scluster-type") not in self.k8scluster_map:
3450 msg = "unknown k8scluster-type '{}'".format(kdu.get("k8scluster-type"))
3451 raise LcmException(msg)
3452
3453 db_dict = {"collection": "nsrs",
3454 "filter": {"_id": nsr_id},
3455 "path": "_admin.deployed.K8s.{}".format(index)}
3456 self.logger.debug(logging_text + "Exec k8s {} on {}.{}".format(primitive, vnf_index, kdu_name))
3457 step = "Executing kdu {}".format(primitive)
3458 if primitive == "upgrade":
3459 if desc_params.get("kdu_model"):
3460 kdu_model = desc_params.get("kdu_model")
3461 del desc_params["kdu_model"]
3462 else:
3463 kdu_model = kdu.get("kdu-model")
3464 parts = kdu_model.split(sep=":")
3465 if len(parts) == 2:
3466 kdu_model = parts[0]
3467
3468 detailed_status = await asyncio.wait_for(
3469 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
3470 cluster_uuid=kdu.get("k8scluster-uuid"),
3471 kdu_instance=kdu.get("kdu-instance"),
3472 atomic=True, kdu_model=kdu_model,
3473 params=desc_params, db_dict=db_dict,
3474 timeout=timeout_ns_action),
3475 timeout=timeout_ns_action + 10)
3476 self.logger.debug(logging_text + " Upgrade of kdu {} done".format(detailed_status))
3477 elif primitive == "rollback":
3478 detailed_status = await asyncio.wait_for(
3479 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
3480 cluster_uuid=kdu.get("k8scluster-uuid"),
3481 kdu_instance=kdu.get("kdu-instance"),
3482 db_dict=db_dict),
3483 timeout=timeout_ns_action)
3484 elif primitive == "status":
3485 detailed_status = await asyncio.wait_for(
3486 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
3487 cluster_uuid=kdu.get("k8scluster-uuid"),
3488 kdu_instance=kdu.get("kdu-instance")),
3489 timeout=timeout_ns_action)
Dominik Fleischmann771c32b2020-04-07 12:39:36 +02003490 else:
3491 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(kdu["kdu-name"], nsr_id)
3492 params = self._map_primitive_params(config_primitive_desc, primitive_params, desc_params)
3493
3494 detailed_status = await asyncio.wait_for(
3495 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
3496 cluster_uuid=kdu.get("k8scluster-uuid"),
3497 kdu_instance=kdu_instance,
3498 primitive_name=primitive,
3499 params=params, db_dict=db_dict,
3500 timeout=timeout_ns_action),
3501 timeout=timeout_ns_action)
tierno067e04a2020-03-31 12:53:13 +00003502
3503 if detailed_status:
3504 nslcmop_operation_state = 'COMPLETED'
3505 else:
3506 detailed_status = ''
3507 nslcmop_operation_state = 'FAILED'
tierno067e04a2020-03-31 12:53:13 +00003508 else:
3509 nslcmop_operation_state, detailed_status = await self._ns_execute_primitive(
3510 self._look_for_deployed_vca(nsr_deployed["VCA"],
3511 member_vnf_index=vnf_index,
3512 vdu_id=vdu_id,
3513 vdu_count_index=vdu_count_index),
3514 primitive=primitive,
3515 primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params),
3516 timeout=timeout_ns_action)
3517
3518 db_nslcmop_update["detailed-status"] = detailed_status
3519 error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else ""
3520 self.logger.debug(logging_text + " task Done with result {} {}".format(nslcmop_operation_state,
3521 detailed_status))
tierno59d22d22018-09-25 18:10:19 +02003522 return # database update is called inside finally
3523
tiernof59ad6c2020-04-08 12:50:52 +00003524 except (DbException, LcmException, N2VCException, K8sException) as e:
tierno59d22d22018-09-25 18:10:19 +02003525 self.logger.error(logging_text + "Exit Exception {}".format(e))
3526 exc = e
3527 except asyncio.CancelledError:
3528 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
3529 exc = "Operation was cancelled"
tierno067e04a2020-03-31 12:53:13 +00003530 except asyncio.TimeoutError:
3531 self.logger.error(logging_text + "Timeout while '{}'".format(step))
3532 exc = "Timeout"
tierno59d22d22018-09-25 18:10:19 +02003533 except Exception as e:
3534 exc = traceback.format_exc()
3535 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
3536 finally:
tierno067e04a2020-03-31 12:53:13 +00003537 if exc:
3538 db_nslcmop_update["detailed-status"] = detailed_status = error_description_nslcmop = \
kuuse0ca67472019-05-13 15:59:27 +02003539 "FAILED {}: {}".format(step, exc)
tierno067e04a2020-03-31 12:53:13 +00003540 nslcmop_operation_state = "FAILED"
3541 if db_nsr:
3542 self._write_ns_status(
3543 nsr_id=nsr_id,
3544 ns_state=db_nsr["nsState"], # TODO check if degraded. For the moment use previous status
3545 current_operation="IDLE",
3546 current_operation_id=None,
3547 # error_description=error_description_nsr,
3548 # error_detail=error_detail,
3549 other_update=db_nsr_update
3550 )
3551
tiernoa17d4f42020-04-28 09:59:23 +00003552 self._write_op_status(
3553 op_id=nslcmop_id,
3554 stage="",
3555 error_message=error_description_nslcmop,
3556 operation_state=nslcmop_operation_state,
3557 other_update=db_nslcmop_update,
3558 )
tierno067e04a2020-03-31 12:53:13 +00003559
tierno59d22d22018-09-25 18:10:19 +02003560 if nslcmop_operation_state:
3561 try:
3562 await self.msg.aiowrite("ns", "actioned", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00003563 "operationState": nslcmop_operation_state},
3564 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003565 except Exception as e:
3566 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
3567 self.logger.debug(logging_text + "Exit")
3568 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
tierno067e04a2020-03-31 12:53:13 +00003569 return nslcmop_operation_state, detailed_status
tierno59d22d22018-09-25 18:10:19 +02003570
3571 async def scale(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003572
3573 # Try to lock HA task here
3574 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3575 if not task_is_locked_by_me:
3576 return
3577
tierno59d22d22018-09-25 18:10:19 +02003578 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
3579 self.logger.debug(logging_text + "Enter")
3580 # get all needed from database
3581 db_nsr = None
3582 db_nslcmop = None
3583 db_nslcmop_update = {}
3584 nslcmop_operation_state = None
tiernoe876f672020-02-13 14:34:48 +00003585 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003586 exc = None
tierno9ab95942018-10-10 16:44:22 +02003587 # in case of error, indicates what part of scale was failed to put nsr at error status
3588 scale_process = None
tiernod6de1992018-10-11 13:05:52 +02003589 old_operational_status = ""
3590 old_config_status = ""
tiernof578e552018-11-08 19:07:20 +01003591 vnfr_scaled = False
tierno59d22d22018-09-25 18:10:19 +02003592 try:
kuused124bfe2019-06-18 12:09:24 +02003593 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003594 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003595 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
tierno47e86b52018-10-10 14:05:55 +02003596
quilesj4cda56b2019-12-05 10:02:20 +00003597 self._write_ns_status(
3598 nsr_id=nsr_id,
3599 ns_state=None,
3600 current_operation="SCALING",
3601 current_operation_id=nslcmop_id
3602 )
3603
ikalyvas02d9e7b2019-05-27 18:16:01 +03003604 step = "Getting nslcmop from database"
ikalyvas02d9e7b2019-05-27 18:16:01 +03003605 self.logger.debug(step + " after having waited for previous tasks to be completed")
3606 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3607 step = "Getting nsr from database"
3608 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3609
3610 old_operational_status = db_nsr["operational-status"]
3611 old_config_status = db_nsr["config-status"]
tierno59d22d22018-09-25 18:10:19 +02003612 step = "Parsing scaling parameters"
tierno9babfda2019-06-07 12:36:50 +00003613 # self.logger.debug(step)
tierno59d22d22018-09-25 18:10:19 +02003614 db_nsr_update["operational-status"] = "scaling"
3615 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoe4f7e6c2018-11-27 14:55:30 +00003616 nsr_deployed = db_nsr["_admin"].get("deployed")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003617
3618 #######
3619 nsr_deployed = db_nsr["_admin"].get("deployed")
3620 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tiernoda6fb102019-11-23 00:36:52 +00003621 # vdu_id = db_nslcmop["operationParams"].get("vdu_id")
3622 # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
3623 # vdu_name = db_nslcmop["operationParams"].get("vdu_name")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003624 #######
3625
tiernoe4f7e6c2018-11-27 14:55:30 +00003626 RO_nsr_id = nsr_deployed["RO"]["nsr_id"]
tierno59d22d22018-09-25 18:10:19 +02003627 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["member-vnf-index"]
3628 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"]
3629 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
3630 # scaling_policy = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"].get("scaling-policy")
3631
tierno82974b22018-11-27 21:55:36 +00003632 # for backward compatibility
3633 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3634 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3635 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3636 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3637
tierno59d22d22018-09-25 18:10:19 +02003638 step = "Getting vnfr from database"
3639 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3640 step = "Getting vnfd from database"
3641 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
ikalyvas02d9e7b2019-05-27 18:16:01 +03003642
tierno59d22d22018-09-25 18:10:19 +02003643 step = "Getting scaling-group-descriptor"
3644 for scaling_descriptor in db_vnfd["scaling-group-descriptor"]:
3645 if scaling_descriptor["name"] == scaling_group:
3646 break
3647 else:
3648 raise LcmException("input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
3649 "at vnfd:scaling-group-descriptor".format(scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03003650
tierno59d22d22018-09-25 18:10:19 +02003651 # cooldown_time = 0
3652 # for scaling_policy_descriptor in scaling_descriptor.get("scaling-policy", ()):
3653 # cooldown_time = scaling_policy_descriptor.get("cooldown-time", 0)
3654 # if scaling_policy and scaling_policy == scaling_policy_descriptor.get("name"):
3655 # break
3656
3657 # TODO check if ns is in a proper status
tierno15b1cf12019-08-29 13:21:40 +00003658 step = "Sending scale order to VIM"
tierno59d22d22018-09-25 18:10:19 +02003659 nb_scale_op = 0
3660 if not db_nsr["_admin"].get("scaling-group"):
3661 self.update_db_2("nsrs", nsr_id, {"_admin.scaling-group": [{"name": scaling_group, "nb-scale-op": 0}]})
3662 admin_scale_index = 0
3663 else:
3664 for admin_scale_index, admin_scale_info in enumerate(db_nsr["_admin"]["scaling-group"]):
3665 if admin_scale_info["name"] == scaling_group:
3666 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
3667 break
tierno9ab95942018-10-10 16:44:22 +02003668 else: # not found, set index one plus last element and add new entry with the name
3669 admin_scale_index += 1
3670 db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group
tierno59d22d22018-09-25 18:10:19 +02003671 RO_scaling_info = []
3672 vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []}
3673 if scaling_type == "SCALE_OUT":
3674 # count if max-instance-count is reached
kuuse818d70c2019-08-07 14:43:44 +02003675 max_instance_count = scaling_descriptor.get("max-instance-count", 10)
3676 # self.logger.debug("MAX_INSTANCE_COUNT is {}".format(max_instance_count))
3677 if nb_scale_op >= max_instance_count:
3678 raise LcmException("reached the limit of {} (max-instance-count) "
3679 "scaling-out operations for the "
3680 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
kuuse8b998e42019-07-30 15:22:16 +02003681
ikalyvas02d9e7b2019-05-27 18:16:01 +03003682 nb_scale_op += 1
tierno59d22d22018-09-25 18:10:19 +02003683 vdu_scaling_info["scaling_direction"] = "OUT"
3684 vdu_scaling_info["vdu-create"] = {}
3685 for vdu_scale_info in scaling_descriptor["vdu"]:
3686 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
3687 "type": "create", "count": vdu_scale_info.get("count", 1)})
3688 vdu_scaling_info["vdu-create"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
ikalyvas02d9e7b2019-05-27 18:16:01 +03003689
tierno59d22d22018-09-25 18:10:19 +02003690 elif scaling_type == "SCALE_IN":
3691 # count if min-instance-count is reached
tierno27246d82018-09-27 15:59:09 +02003692 min_instance_count = 0
tierno59d22d22018-09-25 18:10:19 +02003693 if "min-instance-count" in scaling_descriptor and scaling_descriptor["min-instance-count"] is not None:
3694 min_instance_count = int(scaling_descriptor["min-instance-count"])
tierno9babfda2019-06-07 12:36:50 +00003695 if nb_scale_op <= min_instance_count:
3696 raise LcmException("reached the limit of {} (min-instance-count) scaling-in operations for the "
3697 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03003698 nb_scale_op -= 1
tierno59d22d22018-09-25 18:10:19 +02003699 vdu_scaling_info["scaling_direction"] = "IN"
3700 vdu_scaling_info["vdu-delete"] = {}
3701 for vdu_scale_info in scaling_descriptor["vdu"]:
3702 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
3703 "type": "delete", "count": vdu_scale_info.get("count", 1)})
3704 vdu_scaling_info["vdu-delete"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
3705
3706 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
tierno27246d82018-09-27 15:59:09 +02003707 vdu_create = vdu_scaling_info.get("vdu-create")
3708 vdu_delete = copy(vdu_scaling_info.get("vdu-delete"))
tierno59d22d22018-09-25 18:10:19 +02003709 if vdu_scaling_info["scaling_direction"] == "IN":
3710 for vdur in reversed(db_vnfr["vdur"]):
tierno27246d82018-09-27 15:59:09 +02003711 if vdu_delete.get(vdur["vdu-id-ref"]):
3712 vdu_delete[vdur["vdu-id-ref"]] -= 1
tierno59d22d22018-09-25 18:10:19 +02003713 vdu_scaling_info["vdu"].append({
3714 "name": vdur["name"],
3715 "vdu_id": vdur["vdu-id-ref"],
3716 "interface": []
3717 })
3718 for interface in vdur["interfaces"]:
3719 vdu_scaling_info["vdu"][-1]["interface"].append({
3720 "name": interface["name"],
3721 "ip_address": interface["ip-address"],
3722 "mac_address": interface.get("mac-address"),
3723 })
tierno27246d82018-09-27 15:59:09 +02003724 vdu_delete = vdu_scaling_info.pop("vdu-delete")
tierno59d22d22018-09-25 18:10:19 +02003725
kuuseac3a8882019-10-03 10:48:06 +02003726 # PRE-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02003727 step = "Executing pre-scale vnf-config-primitive"
3728 if scaling_descriptor.get("scaling-config-action"):
3729 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02003730 if (scaling_config_action.get("trigger") == "pre-scale-in" and scaling_type == "SCALE_IN") \
3731 or (scaling_config_action.get("trigger") == "pre-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02003732 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
3733 step = db_nslcmop_update["detailed-status"] = \
3734 "executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00003735
tierno59d22d22018-09-25 18:10:19 +02003736 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02003737 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
3738 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02003739 break
3740 else:
3741 raise LcmException(
3742 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
tiernoda964822019-01-14 15:53:47 +00003743 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
tierno59d22d22018-09-25 18:10:19 +02003744 "primitive".format(scaling_group, config_primitive))
tiernoda964822019-01-14 15:53:47 +00003745
tierno16fedf52019-05-24 08:38:26 +00003746 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00003747 if db_vnfr.get("additionalParamsForVnf"):
3748 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
quilesj7e13aeb2019-10-08 13:34:55 +02003749
tierno9ab95942018-10-10 16:44:22 +02003750 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02003751 db_nsr_update["config-status"] = "configuring pre-scaling"
kuuseac3a8882019-10-03 10:48:06 +02003752 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
3753
tierno7c4e24c2020-05-13 08:41:35 +00003754 # Pre-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02003755 op_index = self._check_or_add_scale_suboperation(
3756 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'PRE-SCALE')
tierno7c4e24c2020-05-13 08:41:35 +00003757 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02003758 # Skip sub-operation
3759 result = 'COMPLETED'
3760 result_detail = 'Done'
3761 self.logger.debug(logging_text +
3762 "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
3763 vnf_config_primitive, result, result_detail))
3764 else:
tierno7c4e24c2020-05-13 08:41:35 +00003765 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02003766 # New sub-operation: Get index of this sub-operation
3767 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3768 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
3769 format(vnf_config_primitive))
3770 else:
tierno7c4e24c2020-05-13 08:41:35 +00003771 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02003772 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3773 vnf_index = op.get('member_vnf_index')
3774 vnf_config_primitive = op.get('primitive')
3775 primitive_params = op.get('primitive_params')
tierno7c4e24c2020-05-13 08:41:35 +00003776 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
kuuseac3a8882019-10-03 10:48:06 +02003777 format(vnf_config_primitive))
tierno7c4e24c2020-05-13 08:41:35 +00003778 # Execute the primitive, either with new (first-time) or registered (retry) args
kuuseac3a8882019-10-03 10:48:06 +02003779 result, result_detail = await self._ns_execute_primitive(
tiernoe876f672020-02-13 14:34:48 +00003780 self._look_for_deployed_vca(nsr_deployed["VCA"],
3781 member_vnf_index=vnf_index,
3782 vdu_id=None,
tiernoe876f672020-02-13 14:34:48 +00003783 vdu_count_index=None),
3784 vnf_config_primitive, primitive_params)
kuuseac3a8882019-10-03 10:48:06 +02003785 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
3786 vnf_config_primitive, result, result_detail))
3787 # Update operationState = COMPLETED | FAILED
3788 self._update_suboperation_status(
3789 db_nslcmop, op_index, result, result_detail)
3790
tierno59d22d22018-09-25 18:10:19 +02003791 if result == "FAILED":
3792 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02003793 db_nsr_update["config-status"] = old_config_status
3794 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02003795 # PRE-SCALE END
tierno59d22d22018-09-25 18:10:19 +02003796
kuuseac3a8882019-10-03 10:48:06 +02003797 # SCALE RO - BEGIN
3798 # Should this block be skipped if 'RO_nsr_id' == None ?
3799 # if (RO_nsr_id and RO_scaling_info):
tierno59d22d22018-09-25 18:10:19 +02003800 if RO_scaling_info:
tierno9ab95942018-10-10 16:44:22 +02003801 scale_process = "RO"
tierno7c4e24c2020-05-13 08:41:35 +00003802 # Scale RO retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02003803 op_index = self._check_or_add_scale_suboperation(
3804 db_nslcmop, vnf_index, None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info)
tierno7c4e24c2020-05-13 08:41:35 +00003805 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02003806 # Skip sub-operation
3807 result = 'COMPLETED'
3808 result_detail = 'Done'
3809 self.logger.debug(logging_text + "Skipped sub-operation RO, result {} {}".format(
3810 result, result_detail))
3811 else:
tierno7c4e24c2020-05-13 08:41:35 +00003812 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02003813 # New sub-operation: Get index of this sub-operation
3814 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3815 self.logger.debug(logging_text + "New sub-operation RO")
tierno59d22d22018-09-25 18:10:19 +02003816 else:
tierno7c4e24c2020-05-13 08:41:35 +00003817 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02003818 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3819 RO_nsr_id = op.get('RO_nsr_id')
3820 RO_scaling_info = op.get('RO_scaling_info')
tierno7c4e24c2020-05-13 08:41:35 +00003821 self.logger.debug(logging_text + "Sub-operation RO retry for primitive {}".format(
kuuseac3a8882019-10-03 10:48:06 +02003822 vnf_config_primitive))
3823
3824 RO_desc = await self.RO.create_action("ns", RO_nsr_id, {"vdu-scaling": RO_scaling_info})
3825 db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op
3826 db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time()
3827 # wait until ready
3828 RO_nslcmop_id = RO_desc["instance_action_id"]
3829 db_nslcmop_update["_admin.deploy.RO"] = RO_nslcmop_id
3830
3831 RO_task_done = False
3832 step = detailed_status = "Waiting RO_task_id={} to complete the scale action.".format(RO_nslcmop_id)
3833 detailed_status_old = None
3834 self.logger.debug(logging_text + step)
3835
3836 deployment_timeout = 1 * 3600 # One hour
3837 while deployment_timeout > 0:
3838 if not RO_task_done:
3839 desc = await self.RO.show("ns", item_id_name=RO_nsr_id, extra_item="action",
3840 extra_item_id=RO_nslcmop_id)
quilesj3655ae02019-12-12 16:08:35 +00003841
3842 # deploymentStatus
3843 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3844
kuuseac3a8882019-10-03 10:48:06 +02003845 ns_status, ns_status_info = self.RO.check_action_status(desc)
3846 if ns_status == "ERROR":
3847 raise ROclient.ROClientException(ns_status_info)
3848 elif ns_status == "BUILD":
3849 detailed_status = step + "; {}".format(ns_status_info)
3850 elif ns_status == "ACTIVE":
3851 RO_task_done = True
3852 step = detailed_status = "Waiting ns ready at RO. RO_id={}".format(RO_nsr_id)
3853 self.logger.debug(logging_text + step)
3854 else:
3855 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
tierno59d22d22018-09-25 18:10:19 +02003856 else:
quilesj7e13aeb2019-10-08 13:34:55 +02003857
kuuseac3a8882019-10-03 10:48:06 +02003858 if ns_status == "ERROR":
3859 raise ROclient.ROClientException(ns_status_info)
3860 elif ns_status == "BUILD":
3861 detailed_status = step + "; {}".format(ns_status_info)
3862 elif ns_status == "ACTIVE":
3863 step = detailed_status = \
3864 "Waiting for management IP address reported by the VIM. Updating VNFRs"
3865 if not vnfr_scaled:
3866 self.scale_vnfr(db_vnfr, vdu_create=vdu_create, vdu_delete=vdu_delete)
3867 vnfr_scaled = True
3868 try:
3869 desc = await self.RO.show("ns", RO_nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00003870
3871 # deploymentStatus
3872 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3873
kuuseac3a8882019-10-03 10:48:06 +02003874 # nsr_deployed["nsr_ip"] = RO.get_ns_vnf_info(desc)
3875 self.ns_update_vnfr({db_vnfr["member-vnf-index-ref"]: db_vnfr}, desc)
3876 break
3877 except LcmExceptionNoMgmtIP:
3878 pass
3879 else:
3880 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
3881 if detailed_status != detailed_status_old:
3882 self._update_suboperation_status(
3883 db_nslcmop, op_index, 'COMPLETED', detailed_status)
3884 detailed_status_old = db_nslcmop_update["detailed-status"] = detailed_status
3885 self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
tierno59d22d22018-09-25 18:10:19 +02003886
kuuseac3a8882019-10-03 10:48:06 +02003887 await asyncio.sleep(5, loop=self.loop)
3888 deployment_timeout -= 5
3889 if deployment_timeout <= 0:
3890 self._update_suboperation_status(
3891 db_nslcmop, nslcmop_id, op_index, 'FAILED', "Timeout when waiting for ns to get ready")
3892 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tierno59d22d22018-09-25 18:10:19 +02003893
kuuseac3a8882019-10-03 10:48:06 +02003894 # update VDU_SCALING_INFO with the obtained ip_addresses
3895 if vdu_scaling_info["scaling_direction"] == "OUT":
3896 for vdur in reversed(db_vnfr["vdur"]):
3897 if vdu_scaling_info["vdu-create"].get(vdur["vdu-id-ref"]):
3898 vdu_scaling_info["vdu-create"][vdur["vdu-id-ref"]] -= 1
3899 vdu_scaling_info["vdu"].append({
3900 "name": vdur["name"],
3901 "vdu_id": vdur["vdu-id-ref"],
3902 "interface": []
tierno59d22d22018-09-25 18:10:19 +02003903 })
kuuseac3a8882019-10-03 10:48:06 +02003904 for interface in vdur["interfaces"]:
3905 vdu_scaling_info["vdu"][-1]["interface"].append({
3906 "name": interface["name"],
3907 "ip_address": interface["ip-address"],
3908 "mac_address": interface.get("mac-address"),
3909 })
3910 del vdu_scaling_info["vdu-create"]
3911
3912 self._update_suboperation_status(db_nslcmop, op_index, 'COMPLETED', 'Done')
3913 # SCALE RO - END
tierno59d22d22018-09-25 18:10:19 +02003914
tierno9ab95942018-10-10 16:44:22 +02003915 scale_process = None
tierno59d22d22018-09-25 18:10:19 +02003916 if db_nsr_update:
3917 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3918
kuuseac3a8882019-10-03 10:48:06 +02003919 # POST-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02003920 # execute primitive service POST-SCALING
3921 step = "Executing post-scale vnf-config-primitive"
3922 if scaling_descriptor.get("scaling-config-action"):
3923 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02003924 if (scaling_config_action.get("trigger") == "post-scale-in" and scaling_type == "SCALE_IN") \
3925 or (scaling_config_action.get("trigger") == "post-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02003926 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
3927 step = db_nslcmop_update["detailed-status"] = \
3928 "executing post-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00003929
tierno589befb2019-05-29 07:06:23 +00003930 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00003931 if db_vnfr.get("additionalParamsForVnf"):
3932 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
3933
tierno59d22d22018-09-25 18:10:19 +02003934 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02003935 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
3936 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02003937 break
3938 else:
3939 raise LcmException("Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:"
3940 "scaling-config-action[vnf-config-primitive-name-ref='{}'] does not "
tierno47e86b52018-10-10 14:05:55 +02003941 "match any vnf-configuration:config-primitive".format(scaling_group,
3942 config_primitive))
tierno9ab95942018-10-10 16:44:22 +02003943 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02003944 db_nsr_update["config-status"] = "configuring post-scaling"
kuuseac3a8882019-10-03 10:48:06 +02003945 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
tiernod6de1992018-10-11 13:05:52 +02003946
tierno7c4e24c2020-05-13 08:41:35 +00003947 # Post-scale retry check: Check if this sub-operation has been executed before
kuuseac3a8882019-10-03 10:48:06 +02003948 op_index = self._check_or_add_scale_suboperation(
3949 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'POST-SCALE')
quilesj4cda56b2019-12-05 10:02:20 +00003950 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02003951 # Skip sub-operation
3952 result = 'COMPLETED'
3953 result_detail = 'Done'
3954 self.logger.debug(logging_text +
3955 "vnf_config_primitive={} Skipped sub-operation, result {} {}".
3956 format(vnf_config_primitive, result, result_detail))
3957 else:
quilesj4cda56b2019-12-05 10:02:20 +00003958 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02003959 # New sub-operation: Get index of this sub-operation
3960 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3961 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
3962 format(vnf_config_primitive))
3963 else:
tierno7c4e24c2020-05-13 08:41:35 +00003964 # retry: Get registered params for this existing sub-operation
kuuseac3a8882019-10-03 10:48:06 +02003965 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3966 vnf_index = op.get('member_vnf_index')
3967 vnf_config_primitive = op.get('primitive')
3968 primitive_params = op.get('primitive_params')
tierno7c4e24c2020-05-13 08:41:35 +00003969 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
kuuseac3a8882019-10-03 10:48:06 +02003970 format(vnf_config_primitive))
tierno7c4e24c2020-05-13 08:41:35 +00003971 # Execute the primitive, either with new (first-time) or registered (retry) args
kuuseac3a8882019-10-03 10:48:06 +02003972 result, result_detail = await self._ns_execute_primitive(
tiernoe876f672020-02-13 14:34:48 +00003973 self._look_for_deployed_vca(nsr_deployed["VCA"],
3974 member_vnf_index=vnf_index,
3975 vdu_id=None,
tiernoe876f672020-02-13 14:34:48 +00003976 vdu_count_index=None),
3977 vnf_config_primitive, primitive_params)
kuuseac3a8882019-10-03 10:48:06 +02003978 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
3979 vnf_config_primitive, result, result_detail))
3980 # Update operationState = COMPLETED | FAILED
3981 self._update_suboperation_status(
3982 db_nslcmop, op_index, result, result_detail)
3983
tierno59d22d22018-09-25 18:10:19 +02003984 if result == "FAILED":
3985 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02003986 db_nsr_update["config-status"] = old_config_status
3987 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02003988 # POST-SCALE END
tierno59d22d22018-09-25 18:10:19 +02003989
tiernod6de1992018-10-11 13:05:52 +02003990 db_nsr_update["detailed-status"] = "" # "scaled {} {}".format(scaling_group, scaling_type)
ikalyvas02d9e7b2019-05-27 18:16:01 +03003991 db_nsr_update["operational-status"] = "running" if old_operational_status == "failed" \
3992 else old_operational_status
tiernod6de1992018-10-11 13:05:52 +02003993 db_nsr_update["config-status"] = old_config_status
tierno59d22d22018-09-25 18:10:19 +02003994 return
3995 except (ROclient.ROClientException, DbException, LcmException) as e:
3996 self.logger.error(logging_text + "Exit Exception {}".format(e))
3997 exc = e
3998 except asyncio.CancelledError:
3999 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
4000 exc = "Operation was cancelled"
4001 except Exception as e:
4002 exc = traceback.format_exc()
4003 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
4004 finally:
quilesj3655ae02019-12-12 16:08:35 +00004005 self._write_ns_status(
4006 nsr_id=nsr_id,
4007 ns_state=None,
4008 current_operation="IDLE",
4009 current_operation_id=None
4010 )
tierno59d22d22018-09-25 18:10:19 +02004011 if exc:
tiernoa17d4f42020-04-28 09:59:23 +00004012 db_nslcmop_update["detailed-status"] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4013 nslcmop_operation_state = "FAILED"
tierno59d22d22018-09-25 18:10:19 +02004014 if db_nsr:
tiernod6de1992018-10-11 13:05:52 +02004015 db_nsr_update["operational-status"] = old_operational_status
4016 db_nsr_update["config-status"] = old_config_status
4017 db_nsr_update["detailed-status"] = ""
4018 if scale_process:
4019 if "VCA" in scale_process:
4020 db_nsr_update["config-status"] = "failed"
4021 if "RO" in scale_process:
4022 db_nsr_update["operational-status"] = "failed"
4023 db_nsr_update["detailed-status"] = "FAILED scaling nslcmop={} {}: {}".format(nslcmop_id, step,
4024 exc)
tiernoa17d4f42020-04-28 09:59:23 +00004025 else:
4026 error_description_nslcmop = None
4027 nslcmop_operation_state = "COMPLETED"
4028 db_nslcmop_update["detailed-status"] = "Done"
quilesj4cda56b2019-12-05 10:02:20 +00004029
tiernoa17d4f42020-04-28 09:59:23 +00004030 self._write_op_status(
4031 op_id=nslcmop_id,
4032 stage="",
4033 error_message=error_description_nslcmop,
4034 operation_state=nslcmop_operation_state,
4035 other_update=db_nslcmop_update,
4036 )
4037 if db_nsr:
4038 self._write_ns_status(
4039 nsr_id=nsr_id,
4040 ns_state=None,
4041 current_operation="IDLE",
4042 current_operation_id=None,
4043 other_update=db_nsr_update
4044 )
4045
tierno59d22d22018-09-25 18:10:19 +02004046 if nslcmop_operation_state:
4047 try:
4048 await self.msg.aiowrite("ns", "scaled", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00004049 "operationState": nslcmop_operation_state},
4050 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004051 # if cooldown_time:
tiernod8323042019-08-09 11:32:23 +00004052 # await asyncio.sleep(cooldown_time, loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02004053 # await self.msg.aiowrite("ns","scaled-cooldown-time", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id})
4054 except Exception as e:
4055 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
4056 self.logger.debug(logging_text + "Exit")
4057 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")