1 |
|
# -*- coding: utf-8 -*- |
2 |
|
|
3 |
|
# Licensed under the Apache License, Version 2.0 (the "License"); |
4 |
|
# you may not use this file except in compliance with the License. |
5 |
|
# You may obtain a copy of the License at |
6 |
|
# |
7 |
|
# http://www.apache.org/licenses/LICENSE-2.0 |
8 |
|
# |
9 |
|
# Unless required by applicable law or agreed to in writing, software |
10 |
|
# distributed under the License is distributed on an "AS IS" BASIS, |
11 |
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
12 |
|
# implied. |
13 |
|
# See the License for the specific language governing permissions and |
14 |
|
# limitations under the License. |
15 |
|
|
16 |
|
# import logging |
17 |
1 |
import json |
18 |
1 |
from uuid import uuid4 |
19 |
1 |
from http import HTTPStatus |
20 |
1 |
from time import time |
21 |
1 |
from copy import copy, deepcopy |
22 |
1 |
from osm_nbi.validation import ( |
23 |
|
validate_input, |
24 |
|
ValidationError, |
25 |
|
ns_instantiate, |
26 |
|
ns_terminate, |
27 |
|
ns_action, |
28 |
|
ns_scale, |
29 |
|
ns_update, |
30 |
|
ns_heal, |
31 |
|
nsi_instantiate, |
32 |
|
ns_migrate, |
33 |
|
ns_verticalscale, |
34 |
|
) |
35 |
1 |
from osm_nbi.base_topic import ( |
36 |
|
BaseTopic, |
37 |
|
EngineException, |
38 |
|
get_iterable, |
39 |
|
deep_get, |
40 |
|
increment_ip_mac, |
41 |
|
update_descriptor_usage_state, |
42 |
|
) |
43 |
1 |
from yaml import safe_dump |
44 |
1 |
from osm_common.dbbase import DbException |
45 |
1 |
from osm_common.msgbase import MsgException |
46 |
1 |
from osm_common.fsbase import FsException |
47 |
1 |
from osm_nbi import utils |
48 |
1 |
from re import ( |
49 |
|
match, |
50 |
|
) # For checking that additional parameter names are valid Jinja2 identifiers |
51 |
|
|
52 |
1 |
__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>" |
53 |
|
|
54 |
|
|
55 |
1 |
class NsrTopic(BaseTopic): |
56 |
1 |
topic = "nsrs" |
57 |
1 |
topic_msg = "ns" |
58 |
1 |
quota_name = "ns_instances" |
59 |
1 |
schema_new = ns_instantiate |
60 |
|
|
61 |
1 |
def __init__(self, db, fs, msg, auth): |
62 |
1 |
BaseTopic.__init__(self, db, fs, msg, auth) |
63 |
|
|
64 |
1 |
def _check_descriptor_dependencies(self, session, descriptor): |
65 |
|
""" |
66 |
|
Check that the dependent descriptors exist on a new descriptor or edition |
67 |
|
:param session: client session information |
68 |
|
:param descriptor: descriptor to be inserted or edit |
69 |
|
:return: None or raises exception |
70 |
|
""" |
71 |
0 |
if not descriptor.get("nsdId"): |
72 |
0 |
return |
73 |
0 |
nsd_id = descriptor["nsdId"] |
74 |
0 |
if not self.get_item_list(session, "nsds", {"id": nsd_id}): |
75 |
0 |
raise EngineException( |
76 |
|
"Descriptor error at nsdId='{}' references a non exist nsd".format( |
77 |
|
nsd_id |
78 |
|
), |
79 |
|
http_code=HTTPStatus.CONFLICT, |
80 |
|
) |
81 |
|
|
82 |
1 |
@staticmethod |
83 |
1 |
def format_on_new(content, project_id=None, make_public=False): |
84 |
1 |
BaseTopic.format_on_new(content, project_id=project_id, make_public=make_public) |
85 |
1 |
content["_admin"]["nsState"] = "NOT_INSTANTIATED" |
86 |
1 |
return None |
87 |
|
|
88 |
1 |
def check_conflict_on_del(self, session, _id, db_content): |
89 |
|
""" |
90 |
|
Check that NSR is not instantiated |
91 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
92 |
|
:param _id: nsr internal id |
93 |
|
:param db_content: The database content of the nsr |
94 |
|
:return: None or raises EngineException with the conflict |
95 |
|
""" |
96 |
1 |
if session["force"]: |
97 |
1 |
return |
98 |
1 |
nsr = db_content |
99 |
1 |
if nsr["_admin"].get("nsState") == "INSTANTIATED": |
100 |
1 |
raise EngineException( |
101 |
|
"nsr '{}' cannot be deleted because it is in 'INSTANTIATED' state. " |
102 |
|
"Launch 'terminate' operation first; or force deletion".format(_id), |
103 |
|
http_code=HTTPStatus.CONFLICT, |
104 |
|
) |
105 |
|
|
106 |
1 |
def delete_extra(self, session, _id, db_content, not_send_msg=None): |
107 |
|
""" |
108 |
|
Deletes associated nslcmops and vnfrs from database. Deletes associated filesystem. |
109 |
|
Set usageState of pdu, vnfd, nsd |
110 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
111 |
|
:param _id: server internal id |
112 |
|
:param db_content: The database content of the descriptor |
113 |
|
:param not_send_msg: To not send message (False) or store content (list) instead |
114 |
|
:return: None if ok or raises EngineException with the problem |
115 |
|
""" |
116 |
1 |
self.fs.file_delete(_id, ignore_non_exist=True) |
117 |
1 |
self.db.del_list("nslcmops", {"nsInstanceId": _id}) |
118 |
1 |
self.db.del_list("vnfrs", {"nsr-id-ref": _id}) |
119 |
|
|
120 |
|
# set all used pdus as free |
121 |
1 |
self.db.set_list( |
122 |
|
"pdus", |
123 |
|
{"_admin.usage.nsr_id": _id}, |
124 |
|
{"_admin.usageState": "NOT_IN_USE", "_admin.usage": None}, |
125 |
|
) |
126 |
|
|
127 |
|
# Set NSD usageState |
128 |
1 |
nsr = db_content |
129 |
1 |
used_nsd_id = nsr.get("nsd-id") |
130 |
1 |
if used_nsd_id: |
131 |
|
# check if used by another NSR |
132 |
1 |
nsrs_list = self.db.get_one( |
133 |
|
"nsrs", {"nsd-id": used_nsd_id}, fail_on_empty=False, fail_on_more=False |
134 |
|
) |
135 |
1 |
if not nsrs_list: |
136 |
1 |
self.db.set_one( |
137 |
|
"nsds", {"_id": used_nsd_id}, {"_admin.usageState": "NOT_IN_USE"} |
138 |
|
) |
139 |
|
|
140 |
|
# Set VNFD usageState |
141 |
1 |
used_vnfd_id_list = nsr.get("vnfd-id") |
142 |
1 |
if used_vnfd_id_list: |
143 |
1 |
for used_vnfd_id in used_vnfd_id_list: |
144 |
|
# check if used by another NSR |
145 |
1 |
nsrs_list = self.db.get_one( |
146 |
|
"nsrs", |
147 |
|
{"vnfd-id": used_vnfd_id}, |
148 |
|
fail_on_empty=False, |
149 |
|
fail_on_more=False, |
150 |
|
) |
151 |
1 |
if not nsrs_list: |
152 |
1 |
self.db.set_one( |
153 |
|
"vnfds", |
154 |
|
{"_id": used_vnfd_id}, |
155 |
|
{"_admin.usageState": "NOT_IN_USE"}, |
156 |
|
) |
157 |
|
|
158 |
|
# delete extra ro_nsrs used for internal RO module |
159 |
1 |
self.db.del_one("ro_nsrs", q_filter={"_id": _id}, fail_on_empty=False) |
160 |
|
|
161 |
1 |
@staticmethod |
162 |
1 |
def _format_ns_request(ns_request): |
163 |
1 |
formated_request = copy(ns_request) |
164 |
1 |
formated_request.pop("additionalParamsForNs", None) |
165 |
1 |
formated_request.pop("additionalParamsForVnf", None) |
166 |
1 |
return formated_request |
167 |
|
|
168 |
1 |
@staticmethod |
169 |
1 |
def _format_additional_params( |
170 |
|
ns_request, member_vnf_index=None, vdu_id=None, kdu_name=None, descriptor=None |
171 |
|
): |
172 |
|
""" |
173 |
|
Get and format user additional params for NS or VNF. |
174 |
|
The vdu_id and kdu_name params are mutually exclusive! If none of them are given, then the method will |
175 |
|
exclusively search for the VNF/NS LCM additional params. |
176 |
|
|
177 |
|
:param ns_request: User instantiation additional parameters |
178 |
|
:param member_vnf_index: None for extract NS params, or member_vnf_index to extract VNF params |
179 |
|
:vdu_id: VDU's ID against which we want to format the additional params |
180 |
|
:kdu_name: KDU's name against which we want to format the additional params |
181 |
|
:param descriptor: If not None it check that needed parameters of descriptor are supplied |
182 |
|
:return: tuple with a formatted copy of additional params or None if not supplied, plus other parameters |
183 |
|
""" |
184 |
1 |
additional_params = None |
185 |
1 |
other_params = None |
186 |
1 |
if not member_vnf_index: |
187 |
1 |
additional_params = copy(ns_request.get("additionalParamsForNs")) |
188 |
1 |
where_ = "additionalParamsForNs" |
189 |
1 |
elif ns_request.get("additionalParamsForVnf"): |
190 |
1 |
where_ = "additionalParamsForVnf[member-vnf-index={}]".format( |
191 |
|
member_vnf_index |
192 |
|
) |
193 |
1 |
item = next( |
194 |
|
( |
195 |
|
x |
196 |
|
for x in ns_request["additionalParamsForVnf"] |
197 |
|
if x["member-vnf-index"] == member_vnf_index |
198 |
|
), |
199 |
|
None, |
200 |
|
) |
201 |
1 |
if item: |
202 |
1 |
if not vdu_id and not kdu_name: |
203 |
1 |
other_params = item |
204 |
1 |
additional_params = copy(item.get("additionalParams")) or {} |
205 |
1 |
if vdu_id and item.get("additionalParamsForVdu"): |
206 |
0 |
item_vdu = next( |
207 |
|
( |
208 |
|
x |
209 |
|
for x in item["additionalParamsForVdu"] |
210 |
|
if x["vdu_id"] == vdu_id |
211 |
|
), |
212 |
|
None, |
213 |
|
) |
214 |
0 |
other_params = item_vdu |
215 |
0 |
if item_vdu and item_vdu.get("additionalParams"): |
216 |
0 |
where_ += ".additionalParamsForVdu[vdu_id={}]".format(vdu_id) |
217 |
0 |
additional_params = item_vdu["additionalParams"] |
218 |
1 |
if kdu_name: |
219 |
0 |
additional_params = {} |
220 |
0 |
if item.get("additionalParamsForKdu"): |
221 |
0 |
item_kdu = next( |
222 |
|
( |
223 |
|
x |
224 |
|
for x in item["additionalParamsForKdu"] |
225 |
|
if x["kdu_name"] == kdu_name |
226 |
|
), |
227 |
|
None, |
228 |
|
) |
229 |
0 |
other_params = item_kdu |
230 |
0 |
if item_kdu and item_kdu.get("additionalParams"): |
231 |
0 |
where_ += ".additionalParamsForKdu[kdu_name={}]".format( |
232 |
|
kdu_name |
233 |
|
) |
234 |
0 |
additional_params = item_kdu["additionalParams"] |
235 |
|
|
236 |
1 |
if additional_params: |
237 |
1 |
for k, v in additional_params.items(): |
238 |
|
# BEGIN Check that additional parameter names are valid Jinja2 identifiers if target is not Kdu |
239 |
1 |
if not kdu_name and not match("^[a-zA-Z_][a-zA-Z0-9_]*$", k): |
240 |
0 |
raise EngineException( |
241 |
|
"Invalid param name at {}:{}. Must contain only alphanumeric characters " |
242 |
|
"and underscores, and cannot start with a digit".format( |
243 |
|
where_, k |
244 |
|
) |
245 |
|
) |
246 |
|
# END Check that additional parameter names are valid Jinja2 identifiers |
247 |
1 |
if not isinstance(k, str): |
248 |
0 |
raise EngineException( |
249 |
|
"Invalid param at {}:{}. Only string keys are allowed".format( |
250 |
|
where_, k |
251 |
|
) |
252 |
|
) |
253 |
1 |
if "$" in k: |
254 |
0 |
raise EngineException( |
255 |
|
"Invalid param at {}:{}. Keys must not contain $ symbol".format( |
256 |
|
where_, k |
257 |
|
) |
258 |
|
) |
259 |
1 |
if isinstance(v, (dict, tuple, list)): |
260 |
0 |
additional_params[k] = "!!yaml " + safe_dump(v) |
261 |
1 |
if kdu_name: |
262 |
0 |
additional_params = json.dumps(additional_params) |
263 |
|
|
264 |
|
# Select the VDU ID, KDU name or NS/VNF ID, depending on the method's call intent |
265 |
1 |
selector = vdu_id if vdu_id else kdu_name if kdu_name else descriptor.get("id") |
266 |
|
|
267 |
1 |
if descriptor: |
268 |
1 |
for df in descriptor.get("df", []): |
269 |
|
# check that enough parameters are supplied for the initial-config-primitive |
270 |
|
# TODO: check for cloud-init |
271 |
1 |
if member_vnf_index: |
272 |
1 |
initial_primitives = [] |
273 |
1 |
if ( |
274 |
|
"lcm-operations-configuration" in df |
275 |
|
and "operate-vnf-op-config" |
276 |
|
in df["lcm-operations-configuration"] |
277 |
|
): |
278 |
1 |
for config in df["lcm-operations-configuration"][ |
279 |
|
"operate-vnf-op-config" |
280 |
|
].get("day1-2", []): |
281 |
|
# Verify the target object (VNF|NS|VDU|KDU) where we need to populate |
282 |
|
# the params with the additional ones given by the user |
283 |
1 |
if config.get("id") == selector: |
284 |
1 |
for primitive in get_iterable( |
285 |
|
config.get("initial-config-primitive") |
286 |
|
): |
287 |
1 |
initial_primitives.append(primitive) |
288 |
|
else: |
289 |
1 |
initial_primitives = deep_get( |
290 |
|
descriptor, ("ns-configuration", "initial-config-primitive") |
291 |
|
) |
292 |
|
|
293 |
1 |
for initial_primitive in get_iterable(initial_primitives): |
294 |
1 |
for param in get_iterable(initial_primitive.get("parameter")): |
295 |
1 |
if param["value"].startswith("<") and param["value"].endswith( |
296 |
|
">" |
297 |
|
): |
298 |
1 |
if param["value"] in ( |
299 |
|
"<rw_mgmt_ip>", |
300 |
|
"<VDU_SCALE_INFO>", |
301 |
|
"<ns_config_info>", |
302 |
|
"<OSM>" |
303 |
|
): |
304 |
1 |
continue |
305 |
1 |
if ( |
306 |
|
not additional_params |
307 |
|
or param["value"][1:-1] not in additional_params |
308 |
|
): |
309 |
1 |
raise EngineException( |
310 |
|
"Parameter '{}' needed for vnfd[id={}]:day1-2 configuration:" |
311 |
|
"initial-config-primitive[name={}] not supplied".format( |
312 |
|
param["value"], |
313 |
|
descriptor["id"], |
314 |
|
initial_primitive["name"], |
315 |
|
) |
316 |
|
) |
317 |
|
|
318 |
1 |
return additional_params or None, other_params or None |
319 |
|
|
320 |
1 |
def new(self, rollback, session, indata=None, kwargs=None, headers=None): |
321 |
|
""" |
322 |
|
Creates a new nsr into database. It also creates needed vnfrs |
323 |
|
:param rollback: list to append the created items at database in case a rollback must be done |
324 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
325 |
|
:param indata: params to be used for the nsr |
326 |
|
:param kwargs: used to override the indata descriptor |
327 |
|
:param headers: http request headers |
328 |
|
:return: the _id of nsr descriptor created at database. Or an exception of type |
329 |
|
EngineException, ValidationError, DbException, FsException, MsgException. |
330 |
|
Note: Exceptions are not captured on purpose. They should be captured at called |
331 |
|
""" |
332 |
1 |
try: |
333 |
1 |
step = "checking quotas" |
334 |
1 |
self.check_quota(session) |
335 |
|
|
336 |
1 |
step = "validating input parameters" |
337 |
1 |
ns_request = self._remove_envelop(indata) |
338 |
1 |
self._update_input_with_kwargs(ns_request, kwargs) |
339 |
1 |
ns_request = self._validate_input_new(ns_request, session["force"]) |
340 |
|
|
341 |
1 |
step = "getting nsd id='{}' from database".format(ns_request.get("nsdId")) |
342 |
1 |
nsd = self._get_nsd_from_db(ns_request["nsdId"], session) |
343 |
1 |
ns_k8s_namespace = self._get_ns_k8s_namespace(nsd, ns_request, session) |
344 |
|
|
345 |
1 |
step = "checking nsdOperationalState" |
346 |
1 |
self._check_nsd_operational_state(nsd, ns_request) |
347 |
|
|
348 |
1 |
step = "filling nsr from input data" |
349 |
1 |
nsr_id = str(uuid4()) |
350 |
1 |
nsr_descriptor = self._create_nsr_descriptor_from_nsd( |
351 |
|
nsd, ns_request, nsr_id, session |
352 |
|
) |
353 |
|
|
354 |
|
# Create VNFRs |
355 |
1 |
needed_vnfds = {} |
356 |
|
# TODO: Change for multiple df support |
357 |
1 |
vnf_profiles = nsd.get("df", [{}])[0].get("vnf-profile", ()) |
358 |
1 |
for vnfp in vnf_profiles: |
359 |
1 |
vnfd_id = vnfp.get("vnfd-id") |
360 |
1 |
vnf_index = vnfp.get("id") |
361 |
1 |
step = ( |
362 |
|
"getting vnfd id='{}' constituent-vnfd='{}' from database".format( |
363 |
|
vnfd_id, vnf_index |
364 |
|
) |
365 |
|
) |
366 |
1 |
if vnfd_id not in needed_vnfds: |
367 |
1 |
vnfd = self._get_vnfd_from_db(vnfd_id, session) |
368 |
1 |
if "revision" in vnfd["_admin"]: |
369 |
0 |
vnfd["revision"] = vnfd["_admin"]["revision"] |
370 |
1 |
vnfd.pop("_admin") |
371 |
1 |
needed_vnfds[vnfd_id] = vnfd |
372 |
1 |
nsr_descriptor["vnfd-id"].append(vnfd["_id"]) |
373 |
|
else: |
374 |
1 |
vnfd = needed_vnfds[vnfd_id] |
375 |
|
|
376 |
1 |
step = "filling vnfr vnfd-id='{}' constituent-vnfd='{}'".format( |
377 |
|
vnfd_id, vnf_index |
378 |
|
) |
379 |
1 |
vnfr_descriptor = self._create_vnfr_descriptor_from_vnfd( |
380 |
|
nsd, |
381 |
|
vnfd, |
382 |
|
vnfd_id, |
383 |
|
vnf_index, |
384 |
|
nsr_descriptor, |
385 |
|
ns_request, |
386 |
|
ns_k8s_namespace, |
387 |
|
) |
388 |
|
|
389 |
1 |
step = "creating vnfr vnfd-id='{}' constituent-vnfd='{}' at database".format( |
390 |
|
vnfd_id, vnf_index |
391 |
|
) |
392 |
1 |
self._add_vnfr_to_db(vnfr_descriptor, rollback, session) |
393 |
1 |
nsr_descriptor["constituent-vnfr-ref"].append(vnfr_descriptor["id"]) |
394 |
1 |
step = "Updating VNFD usageState" |
395 |
1 |
update_descriptor_usage_state(vnfd, "vnfds", self.db) |
396 |
|
|
397 |
1 |
step = "creating nsr at database" |
398 |
1 |
self._add_nsr_to_db(nsr_descriptor, rollback, session) |
399 |
1 |
step = "Updating NSD usageState" |
400 |
1 |
update_descriptor_usage_state(nsd, "nsds", self.db) |
401 |
|
|
402 |
1 |
step = "creating nsr temporal folder" |
403 |
1 |
self.fs.mkdir(nsr_id) |
404 |
|
|
405 |
1 |
return nsr_id, None |
406 |
1 |
except ( |
407 |
|
ValidationError, |
408 |
|
EngineException, |
409 |
|
DbException, |
410 |
|
MsgException, |
411 |
|
FsException, |
412 |
|
) as e: |
413 |
1 |
raise type(e)("{} while '{}'".format(e, step), http_code=e.http_code) |
414 |
|
|
415 |
1 |
def _get_nsd_from_db(self, nsd_id, session): |
416 |
1 |
_filter = self._get_project_filter(session) |
417 |
1 |
_filter["_id"] = nsd_id |
418 |
1 |
return self.db.get_one("nsds", _filter) |
419 |
|
|
420 |
1 |
def _get_vnfd_from_db(self, vnfd_id, session): |
421 |
1 |
_filter = self._get_project_filter(session) |
422 |
1 |
_filter["id"] = vnfd_id |
423 |
1 |
vnfd = self.db.get_one("vnfds", _filter, fail_on_empty=True, fail_on_more=True) |
424 |
1 |
return vnfd |
425 |
|
|
426 |
1 |
def _add_nsr_to_db(self, nsr_descriptor, rollback, session): |
427 |
1 |
self.format_on_new( |
428 |
|
nsr_descriptor, session["project_id"], make_public=session["public"] |
429 |
|
) |
430 |
1 |
self.db.create("nsrs", nsr_descriptor) |
431 |
1 |
rollback.append({"topic": "nsrs", "_id": nsr_descriptor["id"]}) |
432 |
|
|
433 |
1 |
def _add_vnfr_to_db(self, vnfr_descriptor, rollback, session): |
434 |
1 |
self.format_on_new( |
435 |
|
vnfr_descriptor, session["project_id"], make_public=session["public"] |
436 |
|
) |
437 |
1 |
self.db.create("vnfrs", vnfr_descriptor) |
438 |
1 |
rollback.append({"topic": "vnfrs", "_id": vnfr_descriptor["id"]}) |
439 |
|
|
440 |
1 |
def _check_nsd_operational_state(self, nsd, ns_request): |
441 |
1 |
if nsd["_admin"]["operationalState"] == "DISABLED": |
442 |
0 |
raise EngineException( |
443 |
|
"nsd with id '{}' is DISABLED, and thus cannot be used to create " |
444 |
|
"a network service".format(ns_request["nsdId"]), |
445 |
|
http_code=HTTPStatus.CONFLICT, |
446 |
|
) |
447 |
|
|
448 |
1 |
def _get_ns_k8s_namespace(self, nsd, ns_request, session): |
449 |
1 |
additional_params, _ = self._format_additional_params( |
450 |
|
ns_request, descriptor=nsd |
451 |
|
) |
452 |
|
# use for k8s-namespace from ns_request or additionalParamsForNs. By default, the project_id |
453 |
1 |
ns_k8s_namespace = session["project_id"][0] if session["project_id"] else None |
454 |
1 |
if ns_request and ns_request.get("k8s-namespace"): |
455 |
0 |
ns_k8s_namespace = ns_request["k8s-namespace"] |
456 |
1 |
if additional_params and additional_params.get("k8s-namespace"): |
457 |
0 |
ns_k8s_namespace = additional_params["k8s-namespace"] |
458 |
|
|
459 |
1 |
return ns_k8s_namespace |
460 |
|
|
461 |
1 |
def _add_flavor_to_nsr(self, vdu, vnfd, nsr_descriptor, member_vnf_index, revision=None): |
462 |
1 |
flavor_data = {} |
463 |
1 |
guest_epa = {} |
464 |
|
# Find this vdu compute and storage descriptors |
465 |
1 |
vdu_virtual_compute = {} |
466 |
1 |
vdu_virtual_storage = {} |
467 |
1 |
for vcd in vnfd.get("virtual-compute-desc", ()): |
468 |
1 |
if vcd.get("id") == vdu.get("virtual-compute-desc"): |
469 |
1 |
vdu_virtual_compute = vcd |
470 |
1 |
for vsd in vnfd.get("virtual-storage-desc", ()): |
471 |
1 |
if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]: |
472 |
1 |
vdu_virtual_storage = vsd |
473 |
|
# Get this vdu vcpus, memory and storage info for flavor_data |
474 |
1 |
if vdu_virtual_compute.get("virtual-cpu", {}).get( |
475 |
|
"num-virtual-cpu" |
476 |
|
): |
477 |
1 |
flavor_data["vcpu-count"] = vdu_virtual_compute["virtual-cpu"][ |
478 |
|
"num-virtual-cpu" |
479 |
|
] |
480 |
1 |
if vdu_virtual_compute.get("virtual-memory", {}).get("size"): |
481 |
1 |
flavor_data["memory-mb"] = ( |
482 |
|
float(vdu_virtual_compute["virtual-memory"]["size"]) |
483 |
|
* 1024.0 |
484 |
|
) |
485 |
1 |
if vdu_virtual_storage.get("size-of-storage"): |
486 |
1 |
flavor_data["storage-gb"] = vdu_virtual_storage[ |
487 |
|
"size-of-storage" |
488 |
|
] |
489 |
|
# Get this vdu EPA info for guest_epa |
490 |
1 |
if vdu_virtual_compute.get("virtual-cpu", {}).get("cpu-quota"): |
491 |
0 |
guest_epa["cpu-quota"] = vdu_virtual_compute["virtual-cpu"][ |
492 |
|
"cpu-quota" |
493 |
|
] |
494 |
1 |
if vdu_virtual_compute.get("virtual-cpu", {}).get("pinning"): |
495 |
0 |
vcpu_pinning = vdu_virtual_compute["virtual-cpu"]["pinning"] |
496 |
0 |
if vcpu_pinning.get("thread-policy"): |
497 |
0 |
guest_epa["cpu-thread-pinning-policy"] = vcpu_pinning[ |
498 |
|
"thread-policy" |
499 |
|
] |
500 |
0 |
if vcpu_pinning.get("policy"): |
501 |
0 |
cpu_policy = ( |
502 |
|
"SHARED" |
503 |
|
if vcpu_pinning["policy"] == "dynamic" |
504 |
|
else "DEDICATED" |
505 |
|
) |
506 |
0 |
guest_epa["cpu-pinning-policy"] = cpu_policy |
507 |
1 |
if vdu_virtual_compute.get("virtual-memory", {}).get("mem-quota"): |
508 |
0 |
guest_epa["mem-quota"] = vdu_virtual_compute["virtual-memory"][ |
509 |
|
"mem-quota" |
510 |
|
] |
511 |
1 |
if vdu_virtual_compute.get("virtual-memory", {}).get( |
512 |
|
"mempage-size" |
513 |
|
): |
514 |
0 |
guest_epa["mempage-size"] = vdu_virtual_compute[ |
515 |
|
"virtual-memory" |
516 |
|
]["mempage-size"] |
517 |
1 |
if vdu_virtual_compute.get("virtual-memory", {}).get( |
518 |
|
"numa-node-policy" |
519 |
|
): |
520 |
0 |
guest_epa["numa-node-policy"] = vdu_virtual_compute[ |
521 |
|
"virtual-memory" |
522 |
|
]["numa-node-policy"] |
523 |
1 |
if vdu_virtual_storage.get("disk-io-quota"): |
524 |
0 |
guest_epa["disk-io-quota"] = vdu_virtual_storage[ |
525 |
|
"disk-io-quota" |
526 |
|
] |
527 |
|
|
528 |
1 |
if guest_epa: |
529 |
0 |
flavor_data["guest-epa"] = guest_epa |
530 |
|
|
531 |
1 |
revision = revision if revision is not None else 1 |
532 |
1 |
flavor_data["name"] = vdu["id"][:56] + "-" + member_vnf_index + "-" + str(revision) + "-flv" |
533 |
1 |
flavor_data["id"] = str(len(nsr_descriptor["flavor"])) |
534 |
1 |
nsr_descriptor["flavor"].append(flavor_data) |
535 |
|
|
536 |
1 |
def _create_nsr_descriptor_from_nsd(self, nsd, ns_request, nsr_id, session): |
537 |
1 |
now = time() |
538 |
1 |
additional_params, _ = self._format_additional_params( |
539 |
|
ns_request, descriptor=nsd |
540 |
|
) |
541 |
|
|
542 |
1 |
nsr_descriptor = { |
543 |
|
"name": ns_request["nsName"], |
544 |
|
"name-ref": ns_request["nsName"], |
545 |
|
"short-name": ns_request["nsName"], |
546 |
|
"admin-status": "ENABLED", |
547 |
|
"nsState": "NOT_INSTANTIATED", |
548 |
|
"currentOperation": "IDLE", |
549 |
|
"currentOperationID": None, |
550 |
|
"errorDescription": None, |
551 |
|
"errorDetail": None, |
552 |
|
"deploymentStatus": None, |
553 |
|
"configurationStatus": None, |
554 |
|
"vcaStatus": None, |
555 |
|
"nsd": {k: v for k, v in nsd.items()}, |
556 |
|
"datacenter": ns_request["vimAccountId"], |
557 |
|
"resource-orchestrator": "osmopenmano", |
558 |
|
"description": ns_request.get("nsDescription", ""), |
559 |
|
"constituent-vnfr-ref": [], |
560 |
|
"operational-status": "init", # typedef ns-operational- |
561 |
|
"config-status": "init", # typedef config-states |
562 |
|
"detailed-status": "scheduled", |
563 |
|
"orchestration-progress": {}, |
564 |
|
"create-time": now, |
565 |
|
"nsd-name-ref": nsd["name"], |
566 |
|
"operational-events": [], # "id", "timestamp", "description", "event", |
567 |
|
"nsd-ref": nsd["id"], |
568 |
|
"nsd-id": nsd["_id"], |
569 |
|
"vnfd-id": [], |
570 |
|
"instantiate_params": self._format_ns_request(ns_request), |
571 |
|
"additionalParamsForNs": additional_params, |
572 |
|
"ns-instance-config-ref": nsr_id, |
573 |
|
"id": nsr_id, |
574 |
|
"_id": nsr_id, |
575 |
|
"ssh-authorized-key": ns_request.get("ssh_keys"), # TODO remove |
576 |
|
"flavor": [], |
577 |
|
"image": [], |
578 |
|
"affinity-or-anti-affinity-group": [], |
579 |
|
} |
580 |
1 |
if "revision" in nsd["_admin"]: |
581 |
1 |
nsr_descriptor["revision"] = nsd["_admin"]["revision"] |
582 |
|
|
583 |
1 |
ns_request["nsr_id"] = nsr_id |
584 |
1 |
if ns_request and ns_request.get("config-units"): |
585 |
0 |
nsr_descriptor["config-units"] = ns_request["config-units"] |
586 |
|
# Create vld |
587 |
1 |
if nsd.get("virtual-link-desc"): |
588 |
1 |
nsr_vld = deepcopy(nsd.get("virtual-link-desc", [])) |
589 |
|
# Fill each vld with vnfd-connection-point-ref data |
590 |
|
# TODO: Change for multiple df support |
591 |
1 |
all_vld_connection_point_data = {vld.get("id"): [] for vld in nsr_vld} |
592 |
1 |
vnf_profiles = nsd.get("df", [[]])[0].get("vnf-profile", ()) |
593 |
1 |
for vnf_profile in vnf_profiles: |
594 |
1 |
for vlc in vnf_profile.get("virtual-link-connectivity", ()): |
595 |
1 |
for cpd in vlc.get("constituent-cpd-id", ()): |
596 |
1 |
all_vld_connection_point_data[ |
597 |
|
vlc.get("virtual-link-profile-id") |
598 |
|
].append( |
599 |
|
{ |
600 |
|
"member-vnf-index-ref": cpd.get( |
601 |
|
"constituent-base-element-id" |
602 |
|
), |
603 |
|
"vnfd-connection-point-ref": cpd.get( |
604 |
|
"constituent-cpd-id" |
605 |
|
), |
606 |
|
"vnfd-id-ref": vnf_profile.get("vnfd-id"), |
607 |
|
} |
608 |
|
) |
609 |
|
|
610 |
1 |
vnfd = self._get_vnfd_from_db(vnf_profile.get("vnfd-id"), session) |
611 |
1 |
vnfd.pop("_admin") |
612 |
|
|
613 |
1 |
for vdu in vnfd.get("vdu", ()): |
614 |
1 |
member_vnf_index = vnf_profile.get("id") |
615 |
1 |
self._add_flavor_to_nsr(vdu, vnfd, nsr_descriptor, member_vnf_index) |
616 |
1 |
sw_image_id = vdu.get("sw-image-desc") |
617 |
1 |
if sw_image_id: |
618 |
1 |
image_data = self._get_image_data_from_vnfd(vnfd, sw_image_id) |
619 |
1 |
self._add_image_to_nsr(nsr_descriptor, image_data) |
620 |
|
|
621 |
|
# also add alternative images to the list of images |
622 |
1 |
for alt_image in vdu.get("alternative-sw-image-desc", ()): |
623 |
1 |
image_data = self._get_image_data_from_vnfd(vnfd, alt_image) |
624 |
1 |
self._add_image_to_nsr(nsr_descriptor, image_data) |
625 |
|
|
626 |
|
# Add Affinity or Anti-affinity group information to NSR |
627 |
1 |
vdu_profiles = vnfd.get("df", [[]])[0].get("vdu-profile", ()) |
628 |
1 |
affinity_group_prefix_name = "{}-{}".format( |
629 |
|
nsr_descriptor["name"][:16], vnf_profile.get("id")[:16] |
630 |
|
) |
631 |
|
|
632 |
1 |
for vdu_profile in vdu_profiles: |
633 |
1 |
affinity_group_data = {} |
634 |
1 |
for affinity_group in vdu_profile.get( |
635 |
|
"affinity-or-anti-affinity-group", () |
636 |
|
): |
637 |
0 |
affinity_group_data = ( |
638 |
|
self._get_affinity_or_anti_affinity_group_data_from_vnfd( |
639 |
|
vnfd, affinity_group["id"] |
640 |
|
) |
641 |
|
) |
642 |
0 |
affinity_group_data["member-vnf-index"] = vnf_profile.get("id") |
643 |
0 |
self._add_affinity_or_anti_affinity_group_to_nsr( |
644 |
|
nsr_descriptor, |
645 |
|
affinity_group_data, |
646 |
|
affinity_group_prefix_name, |
647 |
|
) |
648 |
|
|
649 |
1 |
for vld in nsr_vld: |
650 |
1 |
vld["vnfd-connection-point-ref"] = all_vld_connection_point_data.get( |
651 |
|
vld.get("id"), [] |
652 |
|
) |
653 |
1 |
vld["name"] = vld["id"] |
654 |
1 |
nsr_descriptor["vld"] = nsr_vld |
655 |
|
|
656 |
1 |
return nsr_descriptor |
657 |
|
|
658 |
1 |
def _get_affinity_or_anti_affinity_group_data_from_vnfd( |
659 |
|
self, vnfd, affinity_group_id |
660 |
|
): |
661 |
|
""" |
662 |
|
Gets affinity-or-anti-affinity-group info from df and returns the desired affinity group |
663 |
|
""" |
664 |
0 |
affinity_group = utils.find_in_list( |
665 |
|
vnfd.get("df", [[]])[0].get("affinity-or-anti-affinity-group", ()), |
666 |
|
lambda ag: ag["id"] == affinity_group_id, |
667 |
|
) |
668 |
0 |
affinity_group_data = {} |
669 |
0 |
if affinity_group: |
670 |
0 |
if affinity_group.get("id"): |
671 |
0 |
affinity_group_data["ag-id"] = affinity_group["id"] |
672 |
0 |
if affinity_group.get("type"): |
673 |
0 |
affinity_group_data["type"] = affinity_group["type"] |
674 |
0 |
if affinity_group.get("scope"): |
675 |
0 |
affinity_group_data["scope"] = affinity_group["scope"] |
676 |
0 |
return affinity_group_data |
677 |
|
|
678 |
1 |
def _add_affinity_or_anti_affinity_group_to_nsr( |
679 |
|
self, nsr_descriptor, affinity_group_data, affinity_group_prefix_name |
680 |
|
): |
681 |
|
""" |
682 |
|
Adds affinity-or-anti-affinity-group to nsr checking first it is not already added |
683 |
|
""" |
684 |
0 |
affinity_group = next( |
685 |
|
( |
686 |
|
f |
687 |
|
for f in nsr_descriptor["affinity-or-anti-affinity-group"] |
688 |
|
if all(f.get(k) == affinity_group_data[k] for k in affinity_group_data) |
689 |
|
), |
690 |
|
None, |
691 |
|
) |
692 |
0 |
if not affinity_group: |
693 |
0 |
affinity_group_data["id"] = str( |
694 |
|
len(nsr_descriptor["affinity-or-anti-affinity-group"]) |
695 |
|
) |
696 |
0 |
affinity_group_data["name"] = "{}-{}".format( |
697 |
|
affinity_group_prefix_name, affinity_group_data["ag-id"][:32] |
698 |
|
) |
699 |
0 |
nsr_descriptor["affinity-or-anti-affinity-group"].append( |
700 |
|
affinity_group_data |
701 |
|
) |
702 |
|
|
703 |
1 |
def _get_image_data_from_vnfd(self, vnfd, sw_image_id): |
704 |
1 |
sw_image_desc = utils.find_in_list( |
705 |
|
vnfd.get("sw-image-desc", ()), lambda sw: sw["id"] == sw_image_id |
706 |
|
) |
707 |
1 |
image_data = {} |
708 |
1 |
if sw_image_desc.get("image"): |
709 |
1 |
image_data["image"] = sw_image_desc["image"] |
710 |
1 |
if sw_image_desc.get("checksum"): |
711 |
0 |
image_data["image_checksum"] = sw_image_desc["checksum"]["hash"] |
712 |
1 |
if sw_image_desc.get("vim-type"): |
713 |
1 |
image_data["vim-type"] = sw_image_desc["vim-type"] |
714 |
1 |
return image_data |
715 |
|
|
716 |
1 |
def _add_image_to_nsr(self, nsr_descriptor, image_data): |
717 |
|
""" |
718 |
|
Adds image to nsr checking first it is not already added |
719 |
|
""" |
720 |
1 |
img = next( |
721 |
|
( |
722 |
|
f |
723 |
|
for f in nsr_descriptor["image"] |
724 |
|
if all(f.get(k) == image_data[k] for k in image_data) |
725 |
|
), |
726 |
|
None, |
727 |
|
) |
728 |
1 |
if not img: |
729 |
1 |
image_data["id"] = str(len(nsr_descriptor["image"])) |
730 |
1 |
nsr_descriptor["image"].append(image_data) |
731 |
|
|
732 |
1 |
def _create_vnfr_descriptor_from_vnfd( |
733 |
|
self, |
734 |
|
nsd, |
735 |
|
vnfd, |
736 |
|
vnfd_id, |
737 |
|
vnf_index, |
738 |
|
nsr_descriptor, |
739 |
|
ns_request, |
740 |
|
ns_k8s_namespace, |
741 |
|
revision=None, |
742 |
|
): |
743 |
1 |
vnfr_id = str(uuid4()) |
744 |
1 |
nsr_id = nsr_descriptor["id"] |
745 |
1 |
now = time() |
746 |
1 |
additional_params, vnf_params = self._format_additional_params( |
747 |
|
ns_request, vnf_index, descriptor=vnfd |
748 |
|
) |
749 |
|
|
750 |
1 |
vnfr_descriptor = { |
751 |
|
"id": vnfr_id, |
752 |
|
"_id": vnfr_id, |
753 |
|
"nsr-id-ref": nsr_id, |
754 |
|
"member-vnf-index-ref": vnf_index, |
755 |
|
"additionalParamsForVnf": additional_params, |
756 |
|
"created-time": now, |
757 |
|
# "vnfd": vnfd, # at OSM model.but removed to avoid data duplication TODO: revise |
758 |
|
"vnfd-ref": vnfd_id, |
759 |
|
"vnfd-id": vnfd["_id"], # not at OSM model, but useful |
760 |
|
"vim-account-id": None, |
761 |
|
"vca-id": None, |
762 |
|
"vdur": [], |
763 |
|
"connection-point": [], |
764 |
|
"ip-address": None, # mgmt-interface filled by LCM |
765 |
|
} |
766 |
|
|
767 |
|
# Revision backwards compatility. Only specify the revision in the record if |
768 |
|
# the original VNFD has a revision. |
769 |
1 |
if "revision" in vnfd: |
770 |
0 |
vnfr_descriptor["revision"] = vnfd["revision"] |
771 |
|
|
772 |
|
|
773 |
1 |
vnf_k8s_namespace = ns_k8s_namespace |
774 |
1 |
if vnf_params: |
775 |
1 |
if vnf_params.get("k8s-namespace"): |
776 |
0 |
vnf_k8s_namespace = vnf_params["k8s-namespace"] |
777 |
1 |
if vnf_params.get("config-units"): |
778 |
0 |
vnfr_descriptor["config-units"] = vnf_params["config-units"] |
779 |
|
|
780 |
|
# Create vld |
781 |
1 |
if vnfd.get("int-virtual-link-desc"): |
782 |
1 |
vnfr_descriptor["vld"] = [] |
783 |
1 |
for vnfd_vld in vnfd.get("int-virtual-link-desc"): |
784 |
1 |
vnfr_descriptor["vld"].append({key: vnfd_vld[key] for key in vnfd_vld}) |
785 |
|
|
786 |
1 |
for cp in vnfd.get("ext-cpd", ()): |
787 |
1 |
vnf_cp = { |
788 |
|
"name": cp.get("id"), |
789 |
|
"connection-point-id": cp.get("int-cpd", {}).get("cpd"), |
790 |
|
"connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"), |
791 |
|
"id": cp.get("id"), |
792 |
|
# "ip-address", "mac-address" # filled by LCM |
793 |
|
# vim-id # TODO it would be nice having a vim port id |
794 |
|
} |
795 |
1 |
vnfr_descriptor["connection-point"].append(vnf_cp) |
796 |
|
|
797 |
|
# Create k8s-cluster information |
798 |
|
# TODO: Validate if a k8s-cluster net can have more than one ext-cpd ? |
799 |
1 |
if vnfd.get("k8s-cluster"): |
800 |
0 |
vnfr_descriptor["k8s-cluster"] = vnfd["k8s-cluster"] |
801 |
0 |
all_k8s_cluster_nets_cpds = {} |
802 |
0 |
for cpd in get_iterable(vnfd.get("ext-cpd")): |
803 |
0 |
if cpd.get("k8s-cluster-net"): |
804 |
0 |
all_k8s_cluster_nets_cpds[cpd.get("k8s-cluster-net")] = cpd.get( |
805 |
|
"id" |
806 |
|
) |
807 |
0 |
for net in get_iterable(vnfr_descriptor["k8s-cluster"].get("nets")): |
808 |
0 |
if net.get("id") in all_k8s_cluster_nets_cpds: |
809 |
0 |
net["external-connection-point-ref"] = all_k8s_cluster_nets_cpds[ |
810 |
|
net.get("id") |
811 |
|
] |
812 |
|
|
813 |
|
# update kdus |
814 |
1 |
for kdu in get_iterable(vnfd.get("kdu")): |
815 |
0 |
additional_params, kdu_params = self._format_additional_params( |
816 |
|
ns_request, vnf_index, kdu_name=kdu["name"], descriptor=vnfd |
817 |
|
) |
818 |
0 |
kdu_k8s_namespace = vnf_k8s_namespace |
819 |
0 |
kdu_model = kdu_params.get("kdu_model") if kdu_params else None |
820 |
0 |
if kdu_params and kdu_params.get("k8s-namespace"): |
821 |
0 |
kdu_k8s_namespace = kdu_params["k8s-namespace"] |
822 |
|
|
823 |
0 |
kdu_deployment_name = "" |
824 |
0 |
if kdu_params and kdu_params.get("kdu-deployment-name"): |
825 |
0 |
kdu_deployment_name = kdu_params.get("kdu-deployment-name") |
826 |
|
|
827 |
0 |
kdur = { |
828 |
|
"additionalParams": additional_params, |
829 |
|
"k8s-namespace": kdu_k8s_namespace, |
830 |
|
"kdu-deployment-name": kdu_deployment_name, |
831 |
|
"kdu-name": kdu["name"], |
832 |
|
# TODO "name": "" Name of the VDU in the VIM |
833 |
|
"ip-address": None, # mgmt-interface filled by LCM |
834 |
|
"k8s-cluster": {}, |
835 |
|
} |
836 |
0 |
if kdu_params and kdu_params.get("config-units"): |
837 |
0 |
kdur["config-units"] = kdu_params["config-units"] |
838 |
0 |
if kdu.get("helm-version"): |
839 |
0 |
kdur["helm-version"] = kdu["helm-version"] |
840 |
0 |
for k8s_type in ("helm-chart", "juju-bundle"): |
841 |
0 |
if kdu.get(k8s_type): |
842 |
0 |
kdur[k8s_type] = kdu_model or kdu[k8s_type] |
843 |
0 |
if not vnfr_descriptor.get("kdur"): |
844 |
0 |
vnfr_descriptor["kdur"] = [] |
845 |
0 |
vnfr_descriptor["kdur"].append(kdur) |
846 |
|
|
847 |
1 |
vnfd_mgmt_cp = vnfd.get("mgmt-cp") |
848 |
|
|
849 |
1 |
for vdu in vnfd.get("vdu", ()): |
850 |
1 |
vdu_mgmt_cp = [] |
851 |
1 |
try: |
852 |
1 |
configs = vnfd.get("df")[0]["lcm-operations-configuration"][ |
853 |
|
"operate-vnf-op-config" |
854 |
|
]["day1-2"] |
855 |
1 |
vdu_config = utils.find_in_list( |
856 |
|
configs, lambda config: config["id"] == vdu["id"] |
857 |
|
) |
858 |
1 |
except Exception: |
859 |
1 |
vdu_config = None |
860 |
|
|
861 |
1 |
try: |
862 |
1 |
vdu_instantiation_level = utils.find_in_list( |
863 |
|
vnfd.get("df")[0]["instantiation-level"][0]["vdu-level"], |
864 |
|
lambda a_vdu_profile: a_vdu_profile["vdu-id"] == vdu["id"], |
865 |
|
) |
866 |
0 |
except Exception: |
867 |
0 |
vdu_instantiation_level = None |
868 |
|
|
869 |
1 |
if vdu_config: |
870 |
0 |
external_connection_ee = utils.filter_in_list( |
871 |
|
vdu_config.get("execution-environment-list", []), |
872 |
|
lambda ee: "external-connection-point-ref" in ee, |
873 |
|
) |
874 |
0 |
for ee in external_connection_ee: |
875 |
0 |
vdu_mgmt_cp.append(ee["external-connection-point-ref"]) |
876 |
|
|
877 |
1 |
additional_params, vdu_params = self._format_additional_params( |
878 |
|
ns_request, vnf_index, vdu_id=vdu["id"], descriptor=vnfd |
879 |
|
) |
880 |
|
|
881 |
1 |
try: |
882 |
1 |
vdu_virtual_storage_descriptors = utils.filter_in_list( |
883 |
|
vnfd.get("virtual-storage-desc", []), |
884 |
|
lambda stg_desc: stg_desc["id"] in vdu["virtual-storage-desc"] |
885 |
|
) |
886 |
0 |
except Exception: |
887 |
0 |
vdu_virtual_storage_descriptors = [] |
888 |
1 |
vdur = { |
889 |
|
"vdu-id-ref": vdu["id"], |
890 |
|
# TODO "name": "" Name of the VDU in the VIM |
891 |
|
"ip-address": None, # mgmt-interface filled by LCM |
892 |
|
# "vim-id", "flavor-id", "image-id", "management-ip" # filled by LCM |
893 |
|
"internal-connection-point": [], |
894 |
|
"interfaces": [], |
895 |
|
"additionalParams": additional_params, |
896 |
|
"vdu-name": vdu["name"], |
897 |
|
"virtual-storages": vdu_virtual_storage_descriptors |
898 |
|
} |
899 |
1 |
if vdu_params and vdu_params.get("config-units"): |
900 |
0 |
vdur["config-units"] = vdu_params["config-units"] |
901 |
1 |
if deep_get(vdu, ("supplemental-boot-data", "boot-data-drive")): |
902 |
0 |
vdur["boot-data-drive"] = vdu["supplemental-boot-data"][ |
903 |
|
"boot-data-drive" |
904 |
|
] |
905 |
1 |
if vdu.get("pdu-type"): |
906 |
0 |
vdur["pdu-type"] = vdu["pdu-type"] |
907 |
0 |
vdur["name"] = vdu["pdu-type"] |
908 |
|
# TODO volumes: name, volume-id |
909 |
1 |
for icp in vdu.get("int-cpd", ()): |
910 |
1 |
vdu_icp = { |
911 |
|
"id": icp["id"], |
912 |
|
"connection-point-id": icp["id"], |
913 |
|
"name": icp.get("id"), |
914 |
|
} |
915 |
|
|
916 |
1 |
vdur["internal-connection-point"].append(vdu_icp) |
917 |
|
|
918 |
1 |
for iface in icp.get("virtual-network-interface-requirement", ()): |
919 |
|
# Name, mac-address and interface position is taken from VNFD |
920 |
|
# and included into VNFR. By this way RO can process this information |
921 |
|
# while creating the VDU. |
922 |
1 |
iface_fields = ("name", "mac-address", "position", "ip-address") |
923 |
1 |
vdu_iface = { |
924 |
|
x: iface[x] for x in iface_fields if iface.get(x) is not None |
925 |
|
} |
926 |
|
|
927 |
1 |
vdu_iface["internal-connection-point-ref"] = vdu_icp["id"] |
928 |
1 |
if "port-security-enabled" in icp: |
929 |
0 |
vdu_iface["port-security-enabled"] = icp[ |
930 |
|
"port-security-enabled" |
931 |
|
] |
932 |
|
|
933 |
1 |
if "port-security-disable-strategy" in icp: |
934 |
0 |
vdu_iface["port-security-disable-strategy"] = icp[ |
935 |
|
"port-security-disable-strategy" |
936 |
|
] |
937 |
|
|
938 |
1 |
for ext_cp in vnfd.get("ext-cpd", ()): |
939 |
1 |
if not ext_cp.get("int-cpd"): |
940 |
0 |
continue |
941 |
1 |
if ext_cp["int-cpd"].get("vdu-id") != vdu["id"]: |
942 |
1 |
continue |
943 |
1 |
if icp["id"] == ext_cp["int-cpd"].get("cpd"): |
944 |
1 |
vdu_iface["external-connection-point-ref"] = ext_cp.get( |
945 |
|
"id" |
946 |
|
) |
947 |
|
|
948 |
1 |
if "port-security-enabled" in ext_cp: |
949 |
0 |
vdu_iface["port-security-enabled"] = ext_cp[ |
950 |
|
"port-security-enabled" |
951 |
|
] |
952 |
|
|
953 |
1 |
if "port-security-disable-strategy" in ext_cp: |
954 |
0 |
vdu_iface["port-security-disable-strategy"] = ext_cp[ |
955 |
|
"port-security-disable-strategy" |
956 |
|
] |
957 |
|
|
958 |
1 |
break |
959 |
|
|
960 |
1 |
if ( |
961 |
|
vnfd_mgmt_cp |
962 |
|
and vdu_iface.get("external-connection-point-ref") |
963 |
|
== vnfd_mgmt_cp |
964 |
|
): |
965 |
1 |
vdu_iface["mgmt-vnf"] = True |
966 |
1 |
vdu_iface["mgmt-interface"] = True |
967 |
|
|
968 |
1 |
for ecp in vdu_mgmt_cp: |
969 |
0 |
if vdu_iface.get("external-connection-point-ref") == ecp: |
970 |
0 |
vdu_iface["mgmt-interface"] = True |
971 |
|
|
972 |
1 |
if iface.get("virtual-interface"): |
973 |
1 |
vdu_iface.update(deepcopy(iface["virtual-interface"])) |
974 |
|
|
975 |
|
# look for network where this interface is connected |
976 |
1 |
iface_ext_cp = vdu_iface.get("external-connection-point-ref") |
977 |
1 |
if iface_ext_cp: |
978 |
|
# TODO: Change for multiple df support |
979 |
1 |
for df in get_iterable(nsd.get("df")): |
980 |
1 |
for vnf_profile in get_iterable(df.get("vnf-profile")): |
981 |
1 |
for vlc_index, vlc in enumerate( |
982 |
|
get_iterable( |
983 |
|
vnf_profile.get("virtual-link-connectivity") |
984 |
|
) |
985 |
|
): |
986 |
1 |
for cpd in get_iterable( |
987 |
|
vlc.get("constituent-cpd-id") |
988 |
|
): |
989 |
1 |
if ( |
990 |
|
cpd.get("constituent-cpd-id") |
991 |
|
== iface_ext_cp |
992 |
|
): |
993 |
1 |
vdu_iface["ns-vld-id"] = vlc.get( |
994 |
|
"virtual-link-profile-id" |
995 |
|
) |
996 |
|
# if iface type is SRIOV or PASSTHROUGH, set pci-interfaces flag to True |
997 |
1 |
if vdu_iface.get("type") in ( |
998 |
|
"SR-IOV", |
999 |
|
"PCI-PASSTHROUGH", |
1000 |
|
): |
1001 |
0 |
nsr_descriptor["vld"][vlc_index][ |
1002 |
|
"pci-interfaces" |
1003 |
|
] = True |
1004 |
1 |
break |
1005 |
1 |
elif vdu_iface.get("internal-connection-point-ref"): |
1006 |
1 |
vdu_iface["vnf-vld-id"] = icp.get("int-virtual-link-desc") |
1007 |
|
# TODO: store fixed IP address in the record (if it exists in the ICP) |
1008 |
|
# if iface type is SRIOV or PASSTHROUGH, set pci-interfaces flag to True |
1009 |
1 |
if vdu_iface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"): |
1010 |
0 |
ivld_index = utils.find_index_in_list( |
1011 |
|
vnfd.get("int-virtual-link-desc", ()), |
1012 |
|
lambda ivld: ivld["id"] |
1013 |
|
== icp.get("int-virtual-link-desc"), |
1014 |
|
) |
1015 |
0 |
vnfr_descriptor["vld"][ivld_index]["pci-interfaces"] = True |
1016 |
|
|
1017 |
1 |
vdur["interfaces"].append(vdu_iface) |
1018 |
|
|
1019 |
1 |
if vdu.get("sw-image-desc"): |
1020 |
1 |
sw_image = utils.find_in_list( |
1021 |
|
vnfd.get("sw-image-desc", ()), |
1022 |
|
lambda image: image["id"] == vdu.get("sw-image-desc"), |
1023 |
|
) |
1024 |
1 |
nsr_sw_image_data = utils.find_in_list( |
1025 |
|
nsr_descriptor["image"], |
1026 |
|
lambda nsr_image: (nsr_image.get("image") == sw_image.get("image")), |
1027 |
|
) |
1028 |
1 |
vdur["ns-image-id"] = nsr_sw_image_data["id"] |
1029 |
|
|
1030 |
1 |
if vdu.get("alternative-sw-image-desc"): |
1031 |
1 |
alt_image_ids = [] |
1032 |
1 |
for alt_image_id in vdu.get("alternative-sw-image-desc", ()): |
1033 |
1 |
sw_image = utils.find_in_list( |
1034 |
|
vnfd.get("sw-image-desc", ()), |
1035 |
|
lambda image: image["id"] == alt_image_id, |
1036 |
|
) |
1037 |
1 |
nsr_sw_image_data = utils.find_in_list( |
1038 |
|
nsr_descriptor["image"], |
1039 |
|
lambda nsr_image: ( |
1040 |
|
nsr_image.get("image") == sw_image.get("image") |
1041 |
|
), |
1042 |
|
) |
1043 |
1 |
alt_image_ids.append(nsr_sw_image_data["id"]) |
1044 |
1 |
vdur["alt-image-ids"] = alt_image_ids |
1045 |
|
|
1046 |
1 |
revision = revision if revision is not None else 1 |
1047 |
1 |
flavor_data_name = vdu["id"][:56] + "-" + vnf_index + "-" + str(revision) + "-flv" |
1048 |
1 |
nsr_flavor_desc = utils.find_in_list( |
1049 |
|
nsr_descriptor["flavor"], |
1050 |
|
lambda flavor: flavor["name"] == flavor_data_name, |
1051 |
|
) |
1052 |
|
|
1053 |
1 |
if nsr_flavor_desc: |
1054 |
1 |
vdur["ns-flavor-id"] = nsr_flavor_desc["id"] |
1055 |
|
|
1056 |
|
# Adding Affinity groups information to vdur |
1057 |
1 |
try: |
1058 |
1 |
vdu_profile_affinity_group = utils.find_in_list( |
1059 |
|
vnfd.get("df")[0]["vdu-profile"], |
1060 |
|
lambda a_vdu: a_vdu["id"] == vdu["id"], |
1061 |
|
) |
1062 |
0 |
except Exception: |
1063 |
0 |
vdu_profile_affinity_group = None |
1064 |
|
|
1065 |
1 |
if vdu_profile_affinity_group: |
1066 |
1 |
affinity_group_ids = [] |
1067 |
1 |
for affinity_group in vdu_profile_affinity_group.get( |
1068 |
|
"affinity-or-anti-affinity-group", () |
1069 |
|
): |
1070 |
0 |
vdu_affinity_group = utils.find_in_list( |
1071 |
|
vdu_profile_affinity_group.get( |
1072 |
|
"affinity-or-anti-affinity-group", () |
1073 |
|
), |
1074 |
|
lambda ag_fp: ag_fp["id"] == affinity_group["id"], |
1075 |
|
) |
1076 |
0 |
nsr_affinity_group = utils.find_in_list( |
1077 |
|
nsr_descriptor["affinity-or-anti-affinity-group"], |
1078 |
|
lambda nsr_ag: ( |
1079 |
|
nsr_ag.get("ag-id") == vdu_affinity_group.get("id") |
1080 |
|
and nsr_ag.get("member-vnf-index") |
1081 |
|
== vnfr_descriptor.get("member-vnf-index-ref") |
1082 |
|
), |
1083 |
|
) |
1084 |
|
# Update Affinity Group VIM name if VDU instantiation parameter is present |
1085 |
0 |
if vnf_params and vnf_params.get("affinity-or-anti-affinity-group"): |
1086 |
0 |
vnf_params_affinity_group = utils.find_in_list( |
1087 |
|
vnf_params["affinity-or-anti-affinity-group"], |
1088 |
|
lambda vnfp_ag: ( |
1089 |
|
vnfp_ag.get("id") == vdu_affinity_group.get("id") |
1090 |
|
), |
1091 |
|
) |
1092 |
0 |
if vnf_params_affinity_group.get("vim-affinity-group-id"): |
1093 |
0 |
nsr_affinity_group[ |
1094 |
|
"vim-affinity-group-id" |
1095 |
|
] = vnf_params_affinity_group["vim-affinity-group-id"] |
1096 |
0 |
affinity_group_ids.append(nsr_affinity_group["id"]) |
1097 |
1 |
vdur["affinity-or-anti-affinity-group-id"] = affinity_group_ids |
1098 |
|
|
1099 |
1 |
if vdu_instantiation_level: |
1100 |
1 |
count = vdu_instantiation_level.get("number-of-instances") |
1101 |
|
else: |
1102 |
0 |
count = 1 |
1103 |
|
|
1104 |
1 |
for index in range(0, count): |
1105 |
1 |
vdur = deepcopy(vdur) |
1106 |
1 |
for iface in vdur["interfaces"]: |
1107 |
1 |
if iface.get("ip-address") and index != 0: |
1108 |
0 |
iface["ip-address"] = increment_ip_mac(iface["ip-address"]) |
1109 |
1 |
if iface.get("mac-address") and index != 0: |
1110 |
0 |
iface["mac-address"] = increment_ip_mac(iface["mac-address"]) |
1111 |
|
|
1112 |
1 |
vdur["_id"] = str(uuid4()) |
1113 |
1 |
vdur["id"] = vdur["_id"] |
1114 |
1 |
vdur["count-index"] = index |
1115 |
1 |
vnfr_descriptor["vdur"].append(vdur) |
1116 |
|
|
1117 |
1 |
return vnfr_descriptor |
1118 |
|
|
1119 |
1 |
def vca_status_refresh(self, session, ns_instance_content, filter_q): |
1120 |
|
""" |
1121 |
|
vcaStatus in ns_instance_content maybe stale, check if it is stale and create lcm op |
1122 |
|
to refresh vca status by sending message to LCM when it is stale. Ignore otherwise. |
1123 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1124 |
|
:param ns_instance_content: ns instance content |
1125 |
|
:param filter_q: dict: query parameter containing vcaStatus-refresh as true or false |
1126 |
|
:return: None |
1127 |
|
""" |
1128 |
1 |
time_now, time_delta = time(), time() - ns_instance_content["_admin"]["modified"] |
1129 |
1 |
force_refresh = isinstance(filter_q, dict) and filter_q.get('vcaStatusRefresh') == 'true' |
1130 |
1 |
threshold_reached = time_delta > 120 |
1131 |
1 |
if force_refresh or threshold_reached: |
1132 |
1 |
operation, _id = "vca_status_refresh", ns_instance_content["_id"] |
1133 |
1 |
ns_instance_content["_admin"]["modified"] = time_now |
1134 |
1 |
self.db.set_one(self.topic, {"_id": _id}, ns_instance_content) |
1135 |
1 |
nslcmop_desc = NsLcmOpTopic._create_nslcmop(_id, operation, None) |
1136 |
1 |
self.format_on_new(nslcmop_desc, session["project_id"], make_public=session["public"]) |
1137 |
1 |
nslcmop_desc["_admin"].pop("nsState") |
1138 |
1 |
self.msg.write("ns", operation, nslcmop_desc) |
1139 |
1 |
return |
1140 |
|
|
1141 |
1 |
def show(self, session, _id, filter_q=None, api_req=False): |
1142 |
|
""" |
1143 |
|
Get complete information on an ns instance. |
1144 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1145 |
|
:param _id: string, ns instance id |
1146 |
|
:param filter_q: dict: query parameter containing vcaStatusRefresh as true or false |
1147 |
|
:param api_req: True if this call is serving an external API request. False if serving internal request. |
1148 |
|
:return: dictionary, raise exception if not found. |
1149 |
|
""" |
1150 |
1 |
ns_instance_content = super().show(session, _id, api_req) |
1151 |
1 |
self.vca_status_refresh(session, ns_instance_content, filter_q) |
1152 |
1 |
return ns_instance_content |
1153 |
|
|
1154 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
1155 |
0 |
raise EngineException( |
1156 |
|
"Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
1157 |
|
) |
1158 |
|
|
1159 |
|
|
1160 |
1 |
class VnfrTopic(BaseTopic): |
1161 |
1 |
topic = "vnfrs" |
1162 |
1 |
topic_msg = None |
1163 |
|
|
1164 |
1 |
def __init__(self, db, fs, msg, auth): |
1165 |
1 |
BaseTopic.__init__(self, db, fs, msg, auth) |
1166 |
|
|
1167 |
1 |
def delete(self, session, _id, dry_run=False, not_send_msg=None): |
1168 |
0 |
raise EngineException( |
1169 |
|
"Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
1170 |
|
) |
1171 |
|
|
1172 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
1173 |
0 |
raise EngineException( |
1174 |
|
"Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
1175 |
|
) |
1176 |
|
|
1177 |
1 |
def new(self, rollback, session, indata=None, kwargs=None, headers=None): |
1178 |
|
# Not used because vnfrs are created and deleted by NsrTopic class directly |
1179 |
0 |
raise EngineException( |
1180 |
|
"Method new called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
1181 |
|
) |
1182 |
|
|
1183 |
|
|
1184 |
1 |
class NsLcmOpTopic(BaseTopic): |
1185 |
1 |
topic = "nslcmops" |
1186 |
1 |
topic_msg = "ns" |
1187 |
1 |
operation_schema = { # mapping between operation and jsonschema to validate |
1188 |
|
"instantiate": ns_instantiate, |
1189 |
|
"action": ns_action, |
1190 |
|
"update": ns_update, |
1191 |
|
"scale": ns_scale, |
1192 |
|
"heal": ns_heal, |
1193 |
|
"terminate": ns_terminate, |
1194 |
|
"migrate": ns_migrate, |
1195 |
|
"verticalscale": ns_verticalscale, |
1196 |
|
} |
1197 |
|
|
1198 |
1 |
def __init__(self, db, fs, msg, auth): |
1199 |
1 |
BaseTopic.__init__(self, db, fs, msg, auth) |
1200 |
1 |
self.nsrtopic = NsrTopic(db, fs, msg, auth) |
1201 |
|
|
1202 |
1 |
def _check_ns_operation(self, session, nsr, operation, indata): |
1203 |
|
""" |
1204 |
|
Check that user has enter right parameters for the operation |
1205 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1206 |
|
:param operation: it can be: instantiate, terminate, action, update, heal |
1207 |
|
:param indata: descriptor with the parameters of the operation |
1208 |
|
:return: None |
1209 |
|
""" |
1210 |
1 |
if operation == "action": |
1211 |
1 |
self._check_action_ns_operation(indata, nsr) |
1212 |
1 |
elif operation == "scale": |
1213 |
0 |
self._check_scale_ns_operation(indata, nsr) |
1214 |
1 |
elif operation == "update": |
1215 |
1 |
self._check_update_ns_operation(indata, nsr) |
1216 |
1 |
elif operation == "heal": |
1217 |
0 |
self._check_heal_ns_operation(indata, nsr) |
1218 |
1 |
elif operation == "instantiate": |
1219 |
1 |
self._check_instantiate_ns_operation(indata, nsr, session) |
1220 |
|
|
1221 |
1 |
def _check_action_ns_operation(self, indata, nsr): |
1222 |
1 |
nsd = nsr["nsd"] |
1223 |
|
# check vnf_member_index |
1224 |
1 |
if indata.get("vnf_member_index"): |
1225 |
0 |
indata["member_vnf_index"] = indata.pop( |
1226 |
|
"vnf_member_index" |
1227 |
|
) # for backward compatibility |
1228 |
1 |
if indata.get("member_vnf_index"): |
1229 |
1 |
vnfd = self._get_vnfd_from_vnf_member_index( |
1230 |
|
indata["member_vnf_index"], nsr["_id"] |
1231 |
|
) |
1232 |
1 |
try: |
1233 |
1 |
configs = vnfd.get("df")[0]["lcm-operations-configuration"][ |
1234 |
|
"operate-vnf-op-config" |
1235 |
|
]["day1-2"] |
1236 |
0 |
except Exception: |
1237 |
0 |
configs = [] |
1238 |
|
|
1239 |
1 |
if indata.get("vdu_id"): |
1240 |
1 |
self._check_valid_vdu(vnfd, indata["vdu_id"]) |
1241 |
0 |
descriptor_configuration = utils.find_in_list( |
1242 |
|
configs, lambda config: config["id"] == indata["vdu_id"] |
1243 |
|
) |
1244 |
1 |
elif indata.get("kdu_name"): |
1245 |
0 |
self._check_valid_kdu(vnfd, indata["kdu_name"]) |
1246 |
0 |
descriptor_configuration = utils.find_in_list( |
1247 |
|
configs, lambda config: config["id"] == indata.get("kdu_name") |
1248 |
|
) |
1249 |
|
else: |
1250 |
1 |
descriptor_configuration = utils.find_in_list( |
1251 |
|
configs, lambda config: config["id"] == vnfd["id"] |
1252 |
|
) |
1253 |
1 |
if descriptor_configuration is not None: |
1254 |
1 |
descriptor_configuration = descriptor_configuration.get( |
1255 |
|
"config-primitive" |
1256 |
|
) |
1257 |
|
else: # use a NSD |
1258 |
0 |
descriptor_configuration = nsd.get("ns-configuration", {}).get( |
1259 |
|
"config-primitive" |
1260 |
|
) |
1261 |
|
|
1262 |
|
# For k8s allows default primitives without validating the parameters |
1263 |
1 |
if indata.get("kdu_name") and indata["primitive"] in ( |
1264 |
|
"upgrade", |
1265 |
|
"rollback", |
1266 |
|
"status", |
1267 |
|
"inspect", |
1268 |
|
"readme", |
1269 |
|
): |
1270 |
|
# TODO should be checked that rollback only can contains revsision_numbe???? |
1271 |
0 |
if not indata.get("member_vnf_index"): |
1272 |
0 |
raise EngineException( |
1273 |
|
"Missing action parameter 'member_vnf_index' for default KDU primitive '{}'".format( |
1274 |
|
indata["primitive"] |
1275 |
|
) |
1276 |
|
) |
1277 |
0 |
return |
1278 |
|
# if not, check primitive |
1279 |
1 |
for config_primitive in get_iterable(descriptor_configuration): |
1280 |
1 |
if indata["primitive"] == config_primitive["name"]: |
1281 |
|
# check needed primitive_params are provided |
1282 |
1 |
if indata.get("primitive_params"): |
1283 |
1 |
in_primitive_params_copy = copy(indata["primitive_params"]) |
1284 |
|
else: |
1285 |
0 |
in_primitive_params_copy = {} |
1286 |
1 |
for paramd in get_iterable(config_primitive.get("parameter")): |
1287 |
1 |
if paramd["name"] in in_primitive_params_copy: |
1288 |
1 |
del in_primitive_params_copy[paramd["name"]] |
1289 |
0 |
elif not paramd.get("default-value"): |
1290 |
0 |
raise EngineException( |
1291 |
|
"Needed parameter {} not provided for primitive '{}'".format( |
1292 |
|
paramd["name"], indata["primitive"] |
1293 |
|
) |
1294 |
|
) |
1295 |
|
# check no extra primitive params are provided |
1296 |
1 |
if in_primitive_params_copy: |
1297 |
0 |
raise EngineException( |
1298 |
|
"parameter/s '{}' not present at vnfd /nsd for primitive '{}'".format( |
1299 |
|
list(in_primitive_params_copy.keys()), indata["primitive"] |
1300 |
|
) |
1301 |
|
) |
1302 |
1 |
break |
1303 |
|
else: |
1304 |
1 |
raise EngineException( |
1305 |
|
"Invalid primitive '{}' is not present at vnfd/nsd".format( |
1306 |
|
indata["primitive"] |
1307 |
|
) |
1308 |
|
) |
1309 |
|
|
1310 |
1 |
def _check_update_ns_operation(self, indata, nsr) -> None: |
1311 |
|
"""Validates the ns-update request according to updateType |
1312 |
|
|
1313 |
|
If updateType is CHANGE_VNFPKG: |
1314 |
|
- it checks the vnfInstanceId, whether it's available under ns instance |
1315 |
|
- it checks the vnfdId whether it matches with the vnfd-id in the vnf-record of specified VNF. |
1316 |
|
Otherwise exception will be raised. |
1317 |
|
If updateType is REMOVE_VNF: |
1318 |
|
- it checks if the vnfInstanceId is available in the ns instance |
1319 |
|
- Otherwise exception will be raised. |
1320 |
|
|
1321 |
|
Args: |
1322 |
|
indata: includes updateType such as CHANGE_VNFPKG, |
1323 |
|
nsr: network service record |
1324 |
|
|
1325 |
|
Raises: |
1326 |
|
EngineException: |
1327 |
|
a meaningful error if given update parameters are not proper such as |
1328 |
|
"Error in validating ns-update request: <ID> does not match |
1329 |
|
with the vnfd-id of vnfinstance |
1330 |
|
http_code=HTTPStatus.UNPROCESSABLE_ENTITY" |
1331 |
|
|
1332 |
|
""" |
1333 |
1 |
try: |
1334 |
1 |
if indata["updateType"] == "CHANGE_VNFPKG": |
1335 |
|
# vnfInstanceId, nsInstanceId, vnfdId are mandatory |
1336 |
1 |
vnf_instance_id = indata["changeVnfPackageData"]["vnfInstanceId"] |
1337 |
1 |
ns_instance_id = indata["nsInstanceId"] |
1338 |
1 |
vnfd_id_2update = indata["changeVnfPackageData"]["vnfdId"] |
1339 |
|
|
1340 |
1 |
if vnf_instance_id not in nsr["constituent-vnfr-ref"]: |
1341 |
|
|
1342 |
1 |
raise EngineException( |
1343 |
|
f"Error in validating ns-update request: vnf {vnf_instance_id} does not " |
1344 |
|
f"belong to NS {ns_instance_id}", |
1345 |
|
http_code=HTTPStatus.UNPROCESSABLE_ENTITY, |
1346 |
|
) |
1347 |
|
|
1348 |
|
# Getting vnfrs through the ns_instance_id |
1349 |
1 |
vnfrs = self.db.get_list("vnfrs", {"nsr-id-ref": ns_instance_id}) |
1350 |
1 |
constituent_vnfd_id = next( |
1351 |
|
( |
1352 |
|
vnfr["vnfd-id"] |
1353 |
|
for vnfr in vnfrs |
1354 |
|
if vnfr["id"] == vnf_instance_id |
1355 |
|
), |
1356 |
|
None, |
1357 |
|
) |
1358 |
|
|
1359 |
|
# Check the given vnfd-id belongs to given vnf instance |
1360 |
1 |
if constituent_vnfd_id and (vnfd_id_2update != constituent_vnfd_id): |
1361 |
|
|
1362 |
1 |
raise EngineException( |
1363 |
|
f"Error in validating ns-update request: vnfd-id {vnfd_id_2update} does not " |
1364 |
|
f"match with the vnfd-id: {constituent_vnfd_id} of VNF instance: {vnf_instance_id}", |
1365 |
|
http_code=HTTPStatus.UNPROCESSABLE_ENTITY, |
1366 |
|
) |
1367 |
|
|
1368 |
|
# Validating the ns update timeout |
1369 |
1 |
if ( |
1370 |
|
indata.get("timeout_ns_update") |
1371 |
|
and indata["timeout_ns_update"] < 300 |
1372 |
|
): |
1373 |
1 |
raise EngineException( |
1374 |
|
"Error in validating ns-update request: {} second is not enough " |
1375 |
|
"to upgrade the VNF instance: {}".format( |
1376 |
|
indata["timeout_ns_update"], vnf_instance_id |
1377 |
|
), |
1378 |
|
http_code=HTTPStatus.UNPROCESSABLE_ENTITY, |
1379 |
|
) |
1380 |
1 |
elif indata["updateType"] == "REMOVE_VNF": |
1381 |
1 |
vnf_instance_id = indata["removeVnfInstanceId"] |
1382 |
1 |
ns_instance_id = indata["nsInstanceId"] |
1383 |
1 |
if vnf_instance_id not in nsr["constituent-vnfr-ref"]: |
1384 |
0 |
raise EngineException( |
1385 |
|
"Invalid VNF Instance Id. '{}' is not " |
1386 |
|
"present in the NS '{}'".format(vnf_instance_id, ns_instance_id) |
1387 |
|
) |
1388 |
|
|
1389 |
1 |
except ( |
1390 |
|
DbException, |
1391 |
|
AttributeError, |
1392 |
|
IndexError, |
1393 |
|
KeyError, |
1394 |
|
ValueError, |
1395 |
|
) as e: |
1396 |
0 |
raise type(e)( |
1397 |
|
"Ns update request could not be processed with error: {}.".format(e) |
1398 |
|
) |
1399 |
|
|
1400 |
1 |
def _check_scale_ns_operation(self, indata, nsr): |
1401 |
0 |
vnfd = self._get_vnfd_from_vnf_member_index( |
1402 |
|
indata["scaleVnfData"]["scaleByStepData"]["member-vnf-index"], nsr["_id"] |
1403 |
|
) |
1404 |
0 |
for scaling_aspect in get_iterable(vnfd.get("df", ())[0]["scaling-aspect"]): |
1405 |
0 |
if ( |
1406 |
|
indata["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"] |
1407 |
|
== scaling_aspect["id"] |
1408 |
|
): |
1409 |
0 |
break |
1410 |
|
else: |
1411 |
0 |
raise EngineException( |
1412 |
|
"Invalid scaleVnfData:scaleByStepData:scaling-group-descriptor '{}' is not " |
1413 |
|
"present at vnfd:scaling-aspect".format( |
1414 |
|
indata["scaleVnfData"]["scaleByStepData"][ |
1415 |
|
"scaling-group-descriptor" |
1416 |
|
] |
1417 |
|
) |
1418 |
|
) |
1419 |
|
|
1420 |
1 |
def _check_heal_ns_operation(self, indata, nsr): |
1421 |
0 |
return |
1422 |
|
|
1423 |
1 |
def _check_instantiate_ns_operation(self, indata, nsr, session): |
1424 |
1 |
vnf_member_index_to_vnfd = {} # map between vnf_member_index to vnf descriptor. |
1425 |
1 |
vim_accounts = [] |
1426 |
1 |
wim_accounts = [] |
1427 |
1 |
nsd = nsr["nsd"] |
1428 |
1 |
self._check_valid_vim_account(indata["vimAccountId"], vim_accounts, session) |
1429 |
1 |
self._check_valid_wim_account(indata.get("wimAccountId"), wim_accounts, session) |
1430 |
1 |
for in_vnf in get_iterable(indata.get("vnf")): |
1431 |
1 |
member_vnf_index = in_vnf["member-vnf-index"] |
1432 |
1 |
if vnf_member_index_to_vnfd.get(member_vnf_index): |
1433 |
0 |
vnfd = vnf_member_index_to_vnfd[member_vnf_index] |
1434 |
|
else: |
1435 |
1 |
vnfd = self._get_vnfd_from_vnf_member_index( |
1436 |
|
member_vnf_index, nsr["_id"] |
1437 |
|
) |
1438 |
1 |
vnf_member_index_to_vnfd[ |
1439 |
|
member_vnf_index |
1440 |
|
] = vnfd # add to cache, avoiding a later look for |
1441 |
1 |
self._check_vnf_instantiation_params(in_vnf, vnfd) |
1442 |
1 |
if in_vnf.get("vimAccountId"): |
1443 |
0 |
self._check_valid_vim_account( |
1444 |
|
in_vnf["vimAccountId"], vim_accounts, session |
1445 |
|
) |
1446 |
|
|
1447 |
1 |
for in_vld in get_iterable(indata.get("vld")): |
1448 |
0 |
self._check_valid_wim_account( |
1449 |
|
in_vld.get("wimAccountId"), wim_accounts, session |
1450 |
|
) |
1451 |
0 |
for vldd in get_iterable(nsd.get("virtual-link-desc")): |
1452 |
0 |
if in_vld["name"] == vldd["id"]: |
1453 |
0 |
break |
1454 |
|
else: |
1455 |
0 |
raise EngineException( |
1456 |
|
"Invalid parameter vld:name='{}' is not present at nsd:vld".format( |
1457 |
|
in_vld["name"] |
1458 |
|
) |
1459 |
|
) |
1460 |
|
|
1461 |
1 |
def _get_vnfd_from_vnf_member_index(self, member_vnf_index, nsr_id): |
1462 |
|
# Obtain vnf descriptor. The vnfr is used to get the vnfd._id used for this member_vnf_index |
1463 |
1 |
vnfr = self.db.get_one( |
1464 |
|
"vnfrs", |
1465 |
|
{"nsr-id-ref": nsr_id, "member-vnf-index-ref": member_vnf_index}, |
1466 |
|
fail_on_empty=False, |
1467 |
|
) |
1468 |
1 |
if not vnfr: |
1469 |
1 |
raise EngineException( |
1470 |
|
"Invalid parameter member_vnf_index='{}' is not one of the " |
1471 |
|
"nsd:constituent-vnfd".format(member_vnf_index) |
1472 |
|
) |
1473 |
|
|
1474 |
|
## Backwards compatibility: if there is no revision, get it from the one and only VNFD entry |
1475 |
1 |
if "revision" in vnfr: |
1476 |
1 |
vnfd_revision = vnfr["vnfd-id"] + ":" + str(vnfr["revision"]) |
1477 |
1 |
vnfd = self.db.get_one("vnfds_revisions", {"_id": vnfd_revision}, fail_on_empty=False) |
1478 |
|
else: |
1479 |
1 |
vnfd = self.db.get_one("vnfds", {"_id": vnfr["vnfd-id"]}, fail_on_empty=False) |
1480 |
|
|
1481 |
1 |
if not vnfd: |
1482 |
0 |
raise EngineException( |
1483 |
|
"vnfd id={} has been deleted!. Operation cannot be performed".format( |
1484 |
|
vnfr["vnfd-id"] |
1485 |
|
) |
1486 |
|
) |
1487 |
1 |
return vnfd |
1488 |
|
|
1489 |
1 |
def _check_valid_vdu(self, vnfd, vdu_id): |
1490 |
1 |
for vdud in get_iterable(vnfd.get("vdu")): |
1491 |
1 |
if vdud["id"] == vdu_id: |
1492 |
0 |
return vdud |
1493 |
|
else: |
1494 |
1 |
raise EngineException( |
1495 |
|
"Invalid parameter vdu_id='{}' not present at vnfd:vdu:id".format( |
1496 |
|
vdu_id |
1497 |
|
) |
1498 |
|
) |
1499 |
|
|
1500 |
1 |
def _check_valid_kdu(self, vnfd, kdu_name): |
1501 |
0 |
for kdud in get_iterable(vnfd.get("kdu")): |
1502 |
0 |
if kdud["name"] == kdu_name: |
1503 |
0 |
return kdud |
1504 |
|
else: |
1505 |
0 |
raise EngineException( |
1506 |
|
"Invalid parameter kdu_name='{}' not present at vnfd:kdu:name".format( |
1507 |
|
kdu_name |
1508 |
|
) |
1509 |
|
) |
1510 |
|
|
1511 |
1 |
def _check_vnf_instantiation_params(self, in_vnf, vnfd): |
1512 |
1 |
for in_vdu in get_iterable(in_vnf.get("vdu")): |
1513 |
1 |
for vdu in get_iterable(vnfd.get("vdu")): |
1514 |
1 |
if in_vdu["id"] == vdu["id"]: |
1515 |
1 |
for volume in get_iterable(in_vdu.get("volume")): |
1516 |
0 |
for volumed in get_iterable(vdu.get("virtual-storage-desc")): |
1517 |
0 |
if volumed == volume["name"]: |
1518 |
0 |
break |
1519 |
|
else: |
1520 |
0 |
raise EngineException( |
1521 |
|
"Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}']:" |
1522 |
|
"volume:name='{}' is not present at " |
1523 |
|
"vnfd:vdu:virtual-storage-desc list".format( |
1524 |
|
in_vnf["member-vnf-index"], |
1525 |
|
in_vdu["id"], |
1526 |
|
volume["id"], |
1527 |
|
) |
1528 |
|
) |
1529 |
|
|
1530 |
1 |
vdu_if_names = set() |
1531 |
1 |
for cpd in get_iterable(vdu.get("int-cpd")): |
1532 |
1 |
for iface in get_iterable( |
1533 |
|
cpd.get("virtual-network-interface-requirement") |
1534 |
|
): |
1535 |
1 |
vdu_if_names.add(iface.get("name")) |
1536 |
|
|
1537 |
1 |
for in_iface in get_iterable(in_vdu.get("interface")): |
1538 |
1 |
if in_iface["name"] in vdu_if_names: |
1539 |
1 |
break |
1540 |
|
else: |
1541 |
0 |
raise EngineException( |
1542 |
|
"Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}']:" |
1543 |
|
"int-cpd[id='{}'] is not present at vnfd:vdu:int-cpd".format( |
1544 |
|
in_vnf["member-vnf-index"], |
1545 |
|
in_vdu["id"], |
1546 |
|
in_iface["name"], |
1547 |
|
) |
1548 |
|
) |
1549 |
1 |
break |
1550 |
|
|
1551 |
|
else: |
1552 |
0 |
raise EngineException( |
1553 |
|
"Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}'] is not present " |
1554 |
|
"at vnfd:vdu".format(in_vnf["member-vnf-index"], in_vdu["id"]) |
1555 |
|
) |
1556 |
|
|
1557 |
1 |
vnfd_ivlds_cpds = { |
1558 |
|
ivld.get("id"): set() |
1559 |
|
for ivld in get_iterable(vnfd.get("int-virtual-link-desc")) |
1560 |
|
} |
1561 |
1 |
for vdu in vnfd.get("vdu", {}): |
1562 |
1 |
for cpd in vdu.get("int-cpd", {}): |
1563 |
1 |
if cpd.get("int-virtual-link-desc"): |
1564 |
1 |
vnfd_ivlds_cpds[cpd.get("int-virtual-link-desc")] = cpd.get("id") |
1565 |
|
|
1566 |
1 |
for in_ivld in get_iterable(in_vnf.get("internal-vld")): |
1567 |
1 |
if in_ivld.get("name") in vnfd_ivlds_cpds: |
1568 |
1 |
for in_icp in get_iterable(in_ivld.get("internal-connection-point")): |
1569 |
0 |
if in_icp["id-ref"] in vnfd_ivlds_cpds[in_ivld.get("name")]: |
1570 |
0 |
break |
1571 |
|
else: |
1572 |
0 |
raise EngineException( |
1573 |
|
"Invalid parameter vnf[member-vnf-index='{}']:internal-vld[name" |
1574 |
|
"='{}']:internal-connection-point[id-ref:'{}'] is not present at " |
1575 |
|
"vnfd:internal-vld:name/id:internal-connection-point".format( |
1576 |
|
in_vnf["member-vnf-index"], |
1577 |
|
in_ivld["name"], |
1578 |
|
in_icp["id-ref"], |
1579 |
|
) |
1580 |
|
) |
1581 |
|
else: |
1582 |
0 |
raise EngineException( |
1583 |
|
"Invalid parameter vnf[member-vnf-index='{}']:internal-vld:name='{}'" |
1584 |
|
" is not present at vnfd '{}'".format( |
1585 |
|
in_vnf["member-vnf-index"], in_ivld["name"], vnfd["id"] |
1586 |
|
) |
1587 |
|
) |
1588 |
|
|
1589 |
1 |
def _check_valid_vim_account(self, vim_account, vim_accounts, session): |
1590 |
1 |
if vim_account in vim_accounts: |
1591 |
0 |
return |
1592 |
1 |
try: |
1593 |
1 |
db_filter = self._get_project_filter(session) |
1594 |
1 |
db_filter["_id"] = vim_account |
1595 |
1 |
self.db.get_one("vim_accounts", db_filter) |
1596 |
0 |
except Exception: |
1597 |
0 |
raise EngineException( |
1598 |
|
"Invalid vimAccountId='{}' not present for the project".format( |
1599 |
|
vim_account |
1600 |
|
) |
1601 |
|
) |
1602 |
1 |
vim_accounts.append(vim_account) |
1603 |
|
|
1604 |
1 |
def _get_vim_account(self, vim_id: str, session): |
1605 |
1 |
try: |
1606 |
1 |
db_filter = self._get_project_filter(session) |
1607 |
1 |
db_filter["_id"] = vim_id |
1608 |
1 |
return self.db.get_one("vim_accounts", db_filter) |
1609 |
0 |
except Exception: |
1610 |
0 |
raise EngineException( |
1611 |
|
"Invalid vimAccountId='{}' not present for the project".format( |
1612 |
|
vim_id |
1613 |
|
) |
1614 |
|
) |
1615 |
|
|
1616 |
1 |
def _check_valid_wim_account(self, wim_account, wim_accounts, session): |
1617 |
1 |
if not isinstance(wim_account, str): |
1618 |
1 |
return |
1619 |
0 |
if wim_account in wim_accounts: |
1620 |
0 |
return |
1621 |
0 |
try: |
1622 |
0 |
db_filter = self._get_project_filter(session) |
1623 |
0 |
db_filter["_id"] = wim_account |
1624 |
0 |
self.db.get_one("wim_accounts", db_filter) |
1625 |
0 |
except Exception: |
1626 |
0 |
raise EngineException( |
1627 |
|
"Invalid wimAccountId='{}' not present for the project".format( |
1628 |
|
wim_account |
1629 |
|
) |
1630 |
|
) |
1631 |
0 |
wim_accounts.append(wim_account) |
1632 |
|
|
1633 |
1 |
def _look_for_pdu( |
1634 |
|
self, session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback |
1635 |
|
): |
1636 |
|
""" |
1637 |
|
Look for a free PDU in the catalog matching vdur type and interfaces. Fills vnfr.vdur with the interface |
1638 |
|
(ip_address, ...) information. |
1639 |
|
Modifies PDU _admin.usageState to 'IN_USE' |
1640 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1641 |
|
:param rollback: list with the database modifications to rollback if needed |
1642 |
|
:param vnfr: vnfr to be updated. It is modified with pdu interface info if pdu is found |
1643 |
|
:param vim_account: vim_account where this vnfr should be deployed |
1644 |
|
:param vnfr_update: dictionary filled by this method with changes to be done at database vnfr |
1645 |
|
:param vnfr_update_rollback: dictionary filled by this method with original content of vnfr in case a rollback |
1646 |
|
of the changed vnfr is needed |
1647 |
|
|
1648 |
|
:return: List of PDU interfaces that are connected to an existing VIM network. Each item contains: |
1649 |
|
"vim-network-name": used at VIM |
1650 |
|
"name": interface name |
1651 |
|
"vnf-vld-id": internal VNFD vld where this interface is connected, or |
1652 |
|
"ns-vld-id": NSD vld where this interface is connected. |
1653 |
|
NOTE: One, and only one between 'vnf-vld-id' and 'ns-vld-id' contains a value. The other will be None |
1654 |
|
""" |
1655 |
|
|
1656 |
1 |
ifaces_forcing_vim_network = [] |
1657 |
1 |
for vdur_index, vdur in enumerate(get_iterable(vnfr.get("vdur"))): |
1658 |
1 |
if not vdur.get("pdu-type"): |
1659 |
1 |
continue |
1660 |
0 |
pdu_type = vdur.get("pdu-type") |
1661 |
0 |
pdu_filter = self._get_project_filter(session) |
1662 |
0 |
pdu_filter["vim_accounts"] = vim_account |
1663 |
0 |
pdu_filter["type"] = pdu_type |
1664 |
0 |
pdu_filter["_admin.operationalState"] = "ENABLED" |
1665 |
0 |
pdu_filter["_admin.usageState"] = "NOT_IN_USE" |
1666 |
|
# TODO feature 1417: "shared": True, |
1667 |
|
|
1668 |
0 |
available_pdus = self.db.get_list("pdus", pdu_filter) |
1669 |
0 |
for pdu in available_pdus: |
1670 |
|
# step 1 check if this pdu contains needed interfaces: |
1671 |
0 |
match_interfaces = True |
1672 |
0 |
for vdur_interface in vdur["interfaces"]: |
1673 |
0 |
for pdu_interface in pdu["interfaces"]: |
1674 |
0 |
if pdu_interface["name"] == vdur_interface["name"]: |
1675 |
|
# TODO feature 1417: match per mgmt type |
1676 |
0 |
break |
1677 |
|
else: # no interface found for name |
1678 |
0 |
match_interfaces = False |
1679 |
0 |
break |
1680 |
0 |
if match_interfaces: |
1681 |
0 |
break |
1682 |
|
else: |
1683 |
0 |
raise EngineException( |
1684 |
|
"No PDU of type={} at vim_account={} found for member_vnf_index={}, vdu={} matching interface " |
1685 |
|
"names".format( |
1686 |
|
pdu_type, |
1687 |
|
vim_account, |
1688 |
|
vnfr["member-vnf-index-ref"], |
1689 |
|
vdur["vdu-id-ref"], |
1690 |
|
) |
1691 |
|
) |
1692 |
|
|
1693 |
|
# step 2. Update pdu |
1694 |
0 |
rollback_pdu = { |
1695 |
|
"_admin.usageState": pdu["_admin"]["usageState"], |
1696 |
|
"_admin.usage.vnfr_id": None, |
1697 |
|
"_admin.usage.nsr_id": None, |
1698 |
|
"_admin.usage.vdur": None, |
1699 |
|
} |
1700 |
0 |
self.db.set_one( |
1701 |
|
"pdus", |
1702 |
|
{"_id": pdu["_id"]}, |
1703 |
|
{ |
1704 |
|
"_admin.usageState": "IN_USE", |
1705 |
|
"_admin.usage": { |
1706 |
|
"vnfr_id": vnfr["_id"], |
1707 |
|
"nsr_id": vnfr["nsr-id-ref"], |
1708 |
|
"vdur": vdur["vdu-id-ref"], |
1709 |
|
}, |
1710 |
|
}, |
1711 |
|
) |
1712 |
0 |
rollback.append( |
1713 |
|
{ |
1714 |
|
"topic": "pdus", |
1715 |
|
"_id": pdu["_id"], |
1716 |
|
"operation": "set", |
1717 |
|
"content": rollback_pdu, |
1718 |
|
} |
1719 |
|
) |
1720 |
|
|
1721 |
|
# step 3. Fill vnfr info by filling vdur |
1722 |
0 |
vdu_text = "vdur.{}".format(vdur_index) |
1723 |
0 |
vnfr_update_rollback[vdu_text + ".pdu-id"] = None |
1724 |
0 |
vnfr_update[vdu_text + ".pdu-id"] = pdu["_id"] |
1725 |
0 |
for iface_index, vdur_interface in enumerate(vdur["interfaces"]): |
1726 |
0 |
for pdu_interface in pdu["interfaces"]: |
1727 |
0 |
if pdu_interface["name"] == vdur_interface["name"]: |
1728 |
0 |
iface_text = vdu_text + ".interfaces.{}".format(iface_index) |
1729 |
0 |
for k, v in pdu_interface.items(): |
1730 |
0 |
if k in ( |
1731 |
|
"ip-address", |
1732 |
|
"mac-address", |
1733 |
|
): # TODO: switch-xxxxx must be inserted |
1734 |
0 |
vnfr_update[iface_text + ".{}".format(k)] = v |
1735 |
0 |
vnfr_update_rollback[ |
1736 |
|
iface_text + ".{}".format(k) |
1737 |
|
] = vdur_interface.get(v) |
1738 |
0 |
if pdu_interface.get("ip-address"): |
1739 |
0 |
if vdur_interface.get( |
1740 |
|
"mgmt-interface" |
1741 |
|
) or vdur_interface.get("mgmt-vnf"): |
1742 |
0 |
vnfr_update_rollback[ |
1743 |
|
vdu_text + ".ip-address" |
1744 |
|
] = vdur.get("ip-address") |
1745 |
0 |
vnfr_update[vdu_text + ".ip-address"] = pdu_interface[ |
1746 |
|
"ip-address" |
1747 |
|
] |
1748 |
0 |
if vdur_interface.get("mgmt-vnf"): |
1749 |
0 |
vnfr_update_rollback["ip-address"] = vnfr.get( |
1750 |
|
"ip-address" |
1751 |
|
) |
1752 |
0 |
vnfr_update["ip-address"] = pdu_interface["ip-address"] |
1753 |
0 |
vnfr_update[vdu_text + ".ip-address"] = pdu_interface[ |
1754 |
|
"ip-address" |
1755 |
|
] |
1756 |
0 |
if pdu_interface.get("vim-network-name") or pdu_interface.get( |
1757 |
|
"vim-network-id" |
1758 |
|
): |
1759 |
0 |
ifaces_forcing_vim_network.append( |
1760 |
|
{ |
1761 |
|
"name": vdur_interface.get("vnf-vld-id") |
1762 |
|
or vdur_interface.get("ns-vld-id"), |
1763 |
|
"vnf-vld-id": vdur_interface.get("vnf-vld-id"), |
1764 |
|
"ns-vld-id": vdur_interface.get("ns-vld-id"), |
1765 |
|
} |
1766 |
|
) |
1767 |
0 |
if pdu_interface.get("vim-network-id"): |
1768 |
0 |
ifaces_forcing_vim_network[-1][ |
1769 |
|
"vim-network-id" |
1770 |
|
] = pdu_interface["vim-network-id"] |
1771 |
0 |
if pdu_interface.get("vim-network-name"): |
1772 |
0 |
ifaces_forcing_vim_network[-1][ |
1773 |
|
"vim-network-name" |
1774 |
|
] = pdu_interface["vim-network-name"] |
1775 |
0 |
break |
1776 |
|
|
1777 |
1 |
return ifaces_forcing_vim_network |
1778 |
|
|
1779 |
1 |
def _look_for_k8scluster( |
1780 |
|
self, session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback |
1781 |
|
): |
1782 |
|
""" |
1783 |
|
Look for an available k8scluster for all the kuds in the vnfd matching version and cni requirements. |
1784 |
|
Fills vnfr.kdur with the selected k8scluster |
1785 |
|
|
1786 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1787 |
|
:param rollback: list with the database modifications to rollback if needed |
1788 |
|
:param vnfr: vnfr to be updated. It is modified with pdu interface info if pdu is found |
1789 |
|
:param vim_account: vim_account where this vnfr should be deployed |
1790 |
|
:param vnfr_update: dictionary filled by this method with changes to be done at database vnfr |
1791 |
|
:param vnfr_update_rollback: dictionary filled by this method with original content of vnfr in case a rollback |
1792 |
|
of the changed vnfr is needed |
1793 |
|
|
1794 |
|
:return: List of KDU interfaces that are connected to an existing VIM network. Each item contains: |
1795 |
|
"vim-network-name": used at VIM |
1796 |
|
"name": interface name |
1797 |
|
"vnf-vld-id": internal VNFD vld where this interface is connected, or |
1798 |
|
"ns-vld-id": NSD vld where this interface is connected. |
1799 |
|
NOTE: One, and only one between 'vnf-vld-id' and 'ns-vld-id' contains a value. The other will be None |
1800 |
|
""" |
1801 |
|
|
1802 |
1 |
ifaces_forcing_vim_network = [] |
1803 |
1 |
if not vnfr.get("kdur"): |
1804 |
1 |
return ifaces_forcing_vim_network |
1805 |
|
|
1806 |
0 |
kdu_filter = self._get_project_filter(session) |
1807 |
0 |
kdu_filter["vim_account"] = vim_account |
1808 |
|
# TODO kdu_filter["_admin.operationalState"] = "ENABLED" |
1809 |
0 |
available_k8sclusters = self.db.get_list("k8sclusters", kdu_filter) |
1810 |
|
|
1811 |
0 |
k8s_requirements = {} # just for logging |
1812 |
0 |
for k8scluster in available_k8sclusters: |
1813 |
0 |
if not vnfr.get("k8s-cluster"): |
1814 |
0 |
break |
1815 |
|
# restrict by cni |
1816 |
0 |
if vnfr["k8s-cluster"].get("cni"): |
1817 |
0 |
k8s_requirements["cni"] = vnfr["k8s-cluster"]["cni"] |
1818 |
0 |
if not set(vnfr["k8s-cluster"]["cni"]).intersection( |
1819 |
|
k8scluster.get("cni", ()) |
1820 |
|
): |
1821 |
0 |
continue |
1822 |
|
# restrict by version |
1823 |
0 |
if vnfr["k8s-cluster"].get("version"): |
1824 |
0 |
k8s_requirements["version"] = vnfr["k8s-cluster"]["version"] |
1825 |
0 |
if k8scluster.get("k8s_version") not in vnfr["k8s-cluster"]["version"]: |
1826 |
0 |
continue |
1827 |
|
# restrict by number of networks |
1828 |
0 |
if vnfr["k8s-cluster"].get("nets"): |
1829 |
0 |
k8s_requirements["networks"] = len(vnfr["k8s-cluster"]["nets"]) |
1830 |
0 |
if not k8scluster.get("nets") or len(k8scluster["nets"]) < len( |
1831 |
|
vnfr["k8s-cluster"]["nets"] |
1832 |
|
): |
1833 |
0 |
continue |
1834 |
0 |
break |
1835 |
|
else: |
1836 |
0 |
raise EngineException( |
1837 |
|
"No k8scluster with requirements='{}' at vim_account={} found for member_vnf_index={}".format( |
1838 |
|
k8s_requirements, vim_account, vnfr["member-vnf-index-ref"] |
1839 |
|
) |
1840 |
|
) |
1841 |
|
|
1842 |
0 |
for kdur_index, kdur in enumerate(get_iterable(vnfr.get("kdur"))): |
1843 |
|
# step 3. Fill vnfr info by filling kdur |
1844 |
0 |
kdu_text = "kdur.{}.".format(kdur_index) |
1845 |
0 |
vnfr_update_rollback[kdu_text + "k8s-cluster.id"] = None |
1846 |
0 |
vnfr_update[kdu_text + "k8s-cluster.id"] = k8scluster["_id"] |
1847 |
|
|
1848 |
|
# step 4. Check VIM networks that forces the selected k8s_cluster |
1849 |
0 |
if vnfr.get("k8s-cluster") and vnfr["k8s-cluster"].get("nets"): |
1850 |
0 |
k8scluster_net_list = list(k8scluster.get("nets").keys()) |
1851 |
0 |
for net_index, kdur_net in enumerate(vnfr["k8s-cluster"]["nets"]): |
1852 |
|
# get a network from k8s_cluster nets. If name matches use this, if not use other |
1853 |
0 |
if kdur_net["id"] in k8scluster_net_list: # name matches |
1854 |
0 |
vim_net = k8scluster["nets"][kdur_net["id"]] |
1855 |
0 |
k8scluster_net_list.remove(kdur_net["id"]) |
1856 |
|
else: |
1857 |
0 |
vim_net = k8scluster["nets"][k8scluster_net_list[0]] |
1858 |
0 |
k8scluster_net_list.pop(0) |
1859 |
0 |
vnfr_update_rollback[ |
1860 |
|
"k8s-cluster.nets.{}.vim_net".format(net_index) |
1861 |
|
] = None |
1862 |
0 |
vnfr_update["k8s-cluster.nets.{}.vim_net".format(net_index)] = vim_net |
1863 |
0 |
if vim_net and ( |
1864 |
|
kdur_net.get("vnf-vld-id") or kdur_net.get("ns-vld-id") |
1865 |
|
): |
1866 |
0 |
ifaces_forcing_vim_network.append( |
1867 |
|
{ |
1868 |
|
"name": kdur_net.get("vnf-vld-id") |
1869 |
|
or kdur_net.get("ns-vld-id"), |
1870 |
|
"vnf-vld-id": kdur_net.get("vnf-vld-id"), |
1871 |
|
"ns-vld-id": kdur_net.get("ns-vld-id"), |
1872 |
|
"vim-network-name": vim_net, # TODO can it be vim-network-id ??? |
1873 |
|
} |
1874 |
|
) |
1875 |
|
# TODO check that this forcing is not incompatible with other forcing |
1876 |
0 |
return ifaces_forcing_vim_network |
1877 |
|
|
1878 |
1 |
def _update_vnfrs_from_nsd(self, nsr): |
1879 |
1 |
try: |
1880 |
1 |
nsr_id = nsr["_id"] |
1881 |
1 |
nsd = nsr["nsd"] |
1882 |
|
|
1883 |
1 |
step = "Getting vnf_profiles from nsd" |
1884 |
1 |
vnf_profiles = nsd.get("df", [{}])[0].get("vnf-profile", ()) |
1885 |
1 |
vld_fixed_ip_connection_point_data = {} |
1886 |
|
|
1887 |
1 |
step = "Getting ip-address info from vnf_profile if it exists" |
1888 |
1 |
for vnfp in vnf_profiles: |
1889 |
|
# Checking ip-address info from nsd.vnf_profile and storing |
1890 |
1 |
for vlc in vnfp.get("virtual-link-connectivity", ()): |
1891 |
1 |
for cpd in vlc.get("constituent-cpd-id", ()): |
1892 |
1 |
if cpd.get("ip-address"): |
1893 |
0 |
step = "Storing ip-address info" |
1894 |
0 |
vld_fixed_ip_connection_point_data.update({vlc.get("virtual-link-profile-id") + '.' + cpd.get("constituent-base-element-id"): { |
1895 |
|
"vnfd-connection-point-ref": cpd.get( |
1896 |
|
"constituent-cpd-id"), |
1897 |
|
"ip-address": cpd.get( |
1898 |
|
"ip-address")}}) |
1899 |
|
|
1900 |
|
# Inserting ip address to vnfr |
1901 |
1 |
if len(vld_fixed_ip_connection_point_data) > 0: |
1902 |
0 |
step = "Getting vnfrs" |
1903 |
0 |
vnfrs = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}) |
1904 |
0 |
for item in vld_fixed_ip_connection_point_data.keys(): |
1905 |
0 |
step = "Filtering vnfrs" |
1906 |
0 |
vnfr = next(filter(lambda vnfr: vnfr["member-vnf-index-ref"] == item.split('.')[1], vnfrs), None) |
1907 |
0 |
if vnfr: |
1908 |
0 |
vnfr_update = {} |
1909 |
0 |
for vdur_index, vdur in enumerate(vnfr["vdur"]): |
1910 |
0 |
for iface_index, iface in enumerate(vdur["interfaces"]): |
1911 |
0 |
step = "Looking for matched interface" |
1912 |
0 |
if ( |
1913 |
|
iface.get("external-connection-point-ref") |
1914 |
|
== vld_fixed_ip_connection_point_data[item].get("vnfd-connection-point-ref") and |
1915 |
|
iface.get("ns-vld-id") == item.split('.')[0] |
1916 |
|
|
1917 |
|
): |
1918 |
0 |
vnfr_update_text = "vdur.{}.interfaces.{}".format( |
1919 |
|
vdur_index, iface_index |
1920 |
|
) |
1921 |
0 |
step = "Storing info in order to update vnfr" |
1922 |
0 |
vnfr_update[ |
1923 |
|
vnfr_update_text + ".ip-address" |
1924 |
|
] = increment_ip_mac( |
1925 |
|
vld_fixed_ip_connection_point_data[item].get("ip-address"), |
1926 |
|
vdur.get("count-index", 0), ) |
1927 |
0 |
vnfr_update[vnfr_update_text + ".fixed-ip"] = True |
1928 |
|
|
1929 |
0 |
step = "updating vnfr at database" |
1930 |
0 |
self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, vnfr_update) |
1931 |
0 |
except ( |
1932 |
|
ValidationError, |
1933 |
|
EngineException, |
1934 |
|
DbException, |
1935 |
|
MsgException, |
1936 |
|
FsException, |
1937 |
|
) as e: |
1938 |
0 |
raise type(e)("{} while '{}'".format(e, step), http_code=e.http_code) |
1939 |
|
|
1940 |
1 |
def _update_vnfrs(self, session, rollback, nsr, indata): |
1941 |
|
# get vnfr |
1942 |
1 |
nsr_id = nsr["_id"] |
1943 |
1 |
vnfrs = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}) |
1944 |
|
|
1945 |
1 |
for vnfr in vnfrs: |
1946 |
1 |
vnfr_update = {} |
1947 |
1 |
vnfr_update_rollback = {} |
1948 |
1 |
member_vnf_index = vnfr["member-vnf-index-ref"] |
1949 |
|
# update vim-account-id |
1950 |
|
|
1951 |
1 |
vim_account = indata["vimAccountId"] |
1952 |
1 |
vca_id = self._get_vim_account(vim_account, session).get("vca") |
1953 |
|
# check instantiate parameters |
1954 |
1 |
for vnf_inst_params in get_iterable(indata.get("vnf")): |
1955 |
1 |
if vnf_inst_params["member-vnf-index"] != member_vnf_index: |
1956 |
1 |
continue |
1957 |
1 |
if vnf_inst_params.get("vimAccountId"): |
1958 |
0 |
vim_account = vnf_inst_params.get("vimAccountId") |
1959 |
0 |
vca_id = self._get_vim_account(vim_account, session).get("vca") |
1960 |
|
|
1961 |
|
# get vnf.vdu.interface instantiation params to update vnfr.vdur.interfaces ip, mac |
1962 |
1 |
for vdu_inst_param in get_iterable(vnf_inst_params.get("vdu")): |
1963 |
1 |
for vdur_index, vdur in enumerate(vnfr["vdur"]): |
1964 |
1 |
if vdu_inst_param["id"] != vdur["vdu-id-ref"]: |
1965 |
1 |
continue |
1966 |
1 |
for iface_inst_param in get_iterable( |
1967 |
|
vdu_inst_param.get("interface") |
1968 |
|
): |
1969 |
1 |
iface_index, _ = next( |
1970 |
|
i |
1971 |
|
for i in enumerate(vdur["interfaces"]) |
1972 |
|
if i[1]["name"] == iface_inst_param["name"] |
1973 |
|
) |
1974 |
1 |
vnfr_update_text = "vdur.{}.interfaces.{}".format( |
1975 |
|
vdur_index, iface_index |
1976 |
|
) |
1977 |
1 |
if iface_inst_param.get("ip-address"): |
1978 |
1 |
vnfr_update[ |
1979 |
|
vnfr_update_text + ".ip-address" |
1980 |
|
] = increment_ip_mac( |
1981 |
|
iface_inst_param.get("ip-address"), |
1982 |
|
vdur.get("count-index", 0), |
1983 |
|
) |
1984 |
1 |
vnfr_update[vnfr_update_text + ".fixed-ip"] = True |
1985 |
1 |
if iface_inst_param.get("mac-address"): |
1986 |
0 |
vnfr_update[ |
1987 |
|
vnfr_update_text + ".mac-address" |
1988 |
|
] = increment_ip_mac( |
1989 |
|
iface_inst_param.get("mac-address"), |
1990 |
|
vdur.get("count-index", 0), |
1991 |
|
) |
1992 |
0 |
vnfr_update[vnfr_update_text + ".fixed-mac"] = True |
1993 |
1 |
if iface_inst_param.get("floating-ip-required"): |
1994 |
1 |
vnfr_update[ |
1995 |
|
vnfr_update_text + ".floating-ip-required" |
1996 |
|
] = True |
1997 |
|
# get vnf.internal-vld.internal-conection-point instantiation params to update vnfr.vdur.interfaces |
1998 |
|
# TODO update vld with the ip-profile |
1999 |
1 |
for ivld_inst_param in get_iterable( |
2000 |
|
vnf_inst_params.get("internal-vld") |
2001 |
|
): |
2002 |
1 |
for icp_inst_param in get_iterable( |
2003 |
|
ivld_inst_param.get("internal-connection-point") |
2004 |
|
): |
2005 |
|
# look for iface |
2006 |
0 |
for vdur_index, vdur in enumerate(vnfr["vdur"]): |
2007 |
0 |
for iface_index, iface in enumerate(vdur["interfaces"]): |
2008 |
0 |
if ( |
2009 |
|
iface.get("internal-connection-point-ref") |
2010 |
|
== icp_inst_param["id-ref"] |
2011 |
|
): |
2012 |
0 |
vnfr_update_text = "vdur.{}.interfaces.{}".format( |
2013 |
|
vdur_index, iface_index |
2014 |
|
) |
2015 |
0 |
if icp_inst_param.get("ip-address"): |
2016 |
0 |
vnfr_update[ |
2017 |
|
vnfr_update_text + ".ip-address" |
2018 |
|
] = increment_ip_mac( |
2019 |
|
icp_inst_param.get("ip-address"), |
2020 |
|
vdur.get("count-index", 0), |
2021 |
|
) |
2022 |
0 |
vnfr_update[ |
2023 |
|
vnfr_update_text + ".fixed-ip" |
2024 |
|
] = True |
2025 |
0 |
if icp_inst_param.get("mac-address"): |
2026 |
0 |
vnfr_update[ |
2027 |
|
vnfr_update_text + ".mac-address" |
2028 |
|
] = increment_ip_mac( |
2029 |
|
icp_inst_param.get("mac-address"), |
2030 |
|
vdur.get("count-index", 0), |
2031 |
|
) |
2032 |
0 |
vnfr_update[ |
2033 |
|
vnfr_update_text + ".fixed-mac" |
2034 |
|
] = True |
2035 |
0 |
break |
2036 |
|
# get ip address from instantiation parameters.vld.vnfd-connection-point-ref |
2037 |
1 |
for vld_inst_param in get_iterable(indata.get("vld")): |
2038 |
0 |
for vnfcp_inst_param in get_iterable( |
2039 |
|
vld_inst_param.get("vnfd-connection-point-ref") |
2040 |
|
): |
2041 |
0 |
if vnfcp_inst_param["member-vnf-index-ref"] != member_vnf_index: |
2042 |
0 |
continue |
2043 |
|
# look for iface |
2044 |
0 |
for vdur_index, vdur in enumerate(vnfr["vdur"]): |
2045 |
0 |
for iface_index, iface in enumerate(vdur["interfaces"]): |
2046 |
0 |
if ( |
2047 |
|
iface.get("external-connection-point-ref") |
2048 |
|
== vnfcp_inst_param["vnfd-connection-point-ref"] |
2049 |
|
): |
2050 |
0 |
vnfr_update_text = "vdur.{}.interfaces.{}".format( |
2051 |
|
vdur_index, iface_index |
2052 |
|
) |
2053 |
0 |
if vnfcp_inst_param.get("ip-address"): |
2054 |
0 |
vnfr_update[ |
2055 |
|
vnfr_update_text + ".ip-address" |
2056 |
|
] = increment_ip_mac( |
2057 |
|
vnfcp_inst_param.get("ip-address"), |
2058 |
|
vdur.get("count-index", 0), |
2059 |
|
) |
2060 |
0 |
vnfr_update[vnfr_update_text + ".fixed-ip"] = True |
2061 |
0 |
if vnfcp_inst_param.get("mac-address"): |
2062 |
0 |
vnfr_update[ |
2063 |
|
vnfr_update_text + ".mac-address" |
2064 |
|
] = increment_ip_mac( |
2065 |
|
vnfcp_inst_param.get("mac-address"), |
2066 |
|
vdur.get("count-index", 0), |
2067 |
|
) |
2068 |
0 |
vnfr_update[vnfr_update_text + ".fixed-mac"] = True |
2069 |
0 |
break |
2070 |
|
|
2071 |
1 |
vnfr_update["vim-account-id"] = vim_account |
2072 |
1 |
vnfr_update_rollback["vim-account-id"] = vnfr.get("vim-account-id") |
2073 |
|
|
2074 |
1 |
if vca_id: |
2075 |
0 |
vnfr_update["vca-id"] = vca_id |
2076 |
0 |
vnfr_update_rollback["vca-id"] = vnfr.get("vca-id") |
2077 |
|
|
2078 |
|
# get pdu |
2079 |
1 |
ifaces_forcing_vim_network = self._look_for_pdu( |
2080 |
|
session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback |
2081 |
|
) |
2082 |
|
|
2083 |
|
# get kdus |
2084 |
1 |
ifaces_forcing_vim_network += self._look_for_k8scluster( |
2085 |
|
session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback |
2086 |
|
) |
2087 |
|
# update database vnfr |
2088 |
1 |
self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, vnfr_update) |
2089 |
1 |
rollback.append( |
2090 |
|
{ |
2091 |
|
"topic": "vnfrs", |
2092 |
|
"_id": vnfr["_id"], |
2093 |
|
"operation": "set", |
2094 |
|
"content": vnfr_update_rollback, |
2095 |
|
} |
2096 |
|
) |
2097 |
|
|
2098 |
|
# Update indada in case pdu forces to use a concrete vim-network-name |
2099 |
|
# TODO check if user has already insert a vim-network-name and raises an error |
2100 |
1 |
if not ifaces_forcing_vim_network: |
2101 |
1 |
continue |
2102 |
0 |
for iface_info in ifaces_forcing_vim_network: |
2103 |
0 |
if iface_info.get("ns-vld-id"): |
2104 |
0 |
if "vld" not in indata: |
2105 |
0 |
indata["vld"] = [] |
2106 |
0 |
indata["vld"].append( |
2107 |
|
{ |
2108 |
|
key: iface_info[key] |
2109 |
|
for key in ("name", "vim-network-name", "vim-network-id") |
2110 |
|
if iface_info.get(key) |
2111 |
|
} |
2112 |
|
) |
2113 |
|
|
2114 |
0 |
elif iface_info.get("vnf-vld-id"): |
2115 |
0 |
if "vnf" not in indata: |
2116 |
0 |
indata["vnf"] = [] |
2117 |
0 |
indata["vnf"].append( |
2118 |
|
{ |
2119 |
|
"member-vnf-index": member_vnf_index, |
2120 |
|
"internal-vld": [ |
2121 |
|
{ |
2122 |
|
key: iface_info[key] |
2123 |
|
for key in ( |
2124 |
|
"name", |
2125 |
|
"vim-network-name", |
2126 |
|
"vim-network-id", |
2127 |
|
) |
2128 |
|
if iface_info.get(key) |
2129 |
|
} |
2130 |
|
], |
2131 |
|
} |
2132 |
|
) |
2133 |
|
|
2134 |
1 |
@staticmethod |
2135 |
1 |
def _create_nslcmop(nsr_id, operation, params): |
2136 |
|
""" |
2137 |
|
Creates a ns-lcm-opp content to be stored at database. |
2138 |
|
:param nsr_id: internal id of the instance |
2139 |
|
:param operation: instantiate, terminate, scale, action, update ... |
2140 |
|
:param params: user parameters for the operation |
2141 |
|
:return: dictionary following SOL005 format |
2142 |
|
""" |
2143 |
1 |
now = time() |
2144 |
1 |
_id = str(uuid4()) |
2145 |
1 |
nslcmop = { |
2146 |
|
"id": _id, |
2147 |
|
"_id": _id, |
2148 |
|
"operationState": "PROCESSING", # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK |
2149 |
|
"queuePosition": None, |
2150 |
|
"stage": None, |
2151 |
|
"errorMessage": None, |
2152 |
|
"detailedStatus": None, |
2153 |
|
"statusEnteredTime": now, |
2154 |
|
"nsInstanceId": nsr_id, |
2155 |
|
"lcmOperationType": operation, |
2156 |
|
"startTime": now, |
2157 |
|
"isAutomaticInvocation": False, |
2158 |
|
"operationParams": params, |
2159 |
|
"isCancelPending": False, |
2160 |
|
"links": { |
2161 |
|
"self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id, |
2162 |
|
"nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id, |
2163 |
|
}, |
2164 |
|
} |
2165 |
1 |
return nslcmop |
2166 |
|
|
2167 |
1 |
def _get_enabled_vims(self, session): |
2168 |
|
""" |
2169 |
|
Retrieve and return VIM accounts that are accessible by current user and has state ENABLE |
2170 |
|
:param session: current session with user information |
2171 |
|
""" |
2172 |
0 |
db_filter = self._get_project_filter(session) |
2173 |
0 |
db_filter["_admin.operationalState"] = "ENABLED" |
2174 |
0 |
vims = self.db.get_list("vim_accounts", db_filter) |
2175 |
0 |
vimAccounts = [] |
2176 |
0 |
for vim in vims: |
2177 |
0 |
vimAccounts.append(vim["_id"]) |
2178 |
0 |
return vimAccounts |
2179 |
|
|
2180 |
1 |
def new( |
2181 |
|
self, |
2182 |
|
rollback, |
2183 |
|
session, |
2184 |
|
indata=None, |
2185 |
|
kwargs=None, |
2186 |
|
headers=None, |
2187 |
|
slice_object=False, |
2188 |
|
): |
2189 |
|
""" |
2190 |
|
Performs a new operation over a ns |
2191 |
|
:param rollback: list to append created items at database in case a rollback must to be done |
2192 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
2193 |
|
:param indata: descriptor with the parameters of the operation. It must contains among others |
2194 |
|
nsInstanceId: _id of the nsr to perform the operation |
2195 |
|
operation: it can be: instantiate, terminate, action, update TODO: heal |
2196 |
|
:param kwargs: used to override the indata descriptor |
2197 |
|
:param headers: http request headers |
2198 |
|
:return: id of the nslcmops |
2199 |
|
""" |
2200 |
|
|
2201 |
1 |
def check_if_nsr_is_not_slice_member(session, nsr_id): |
2202 |
0 |
nsis = None |
2203 |
0 |
db_filter = self._get_project_filter(session) |
2204 |
0 |
db_filter["_admin.nsrs-detailed-list.ANYINDEX.nsrId"] = nsr_id |
2205 |
0 |
nsis = self.db.get_one( |
2206 |
|
"nsis", db_filter, fail_on_empty=False, fail_on_more=False |
2207 |
|
) |
2208 |
0 |
if nsis: |
2209 |
0 |
raise EngineException( |
2210 |
|
"The NS instance {} cannot be terminated because is used by the slice {}".format( |
2211 |
|
nsr_id, nsis["_id"] |
2212 |
|
), |
2213 |
|
http_code=HTTPStatus.CONFLICT, |
2214 |
|
) |
2215 |
|
|
2216 |
1 |
try: |
2217 |
|
# Override descriptor with query string kwargs |
2218 |
1 |
self._update_input_with_kwargs(indata, kwargs, yaml_format=True) |
2219 |
1 |
operation = indata["lcmOperationType"] |
2220 |
1 |
nsInstanceId = indata["nsInstanceId"] |
2221 |
|
|
2222 |
1 |
validate_input(indata, self.operation_schema[operation]) |
2223 |
|
# get ns from nsr_id |
2224 |
1 |
_filter = BaseTopic._get_project_filter(session) |
2225 |
1 |
_filter["_id"] = nsInstanceId |
2226 |
1 |
nsr = self.db.get_one("nsrs", _filter) |
2227 |
|
|
2228 |
|
# initial checking |
2229 |
1 |
if operation == "terminate" and slice_object is False: |
2230 |
0 |
check_if_nsr_is_not_slice_member(session, nsr["_id"]) |
2231 |
1 |
if ( |
2232 |
|
not nsr["_admin"].get("nsState") |
2233 |
|
or nsr["_admin"]["nsState"] == "NOT_INSTANTIATED" |
2234 |
|
): |
2235 |
1 |
if operation == "terminate" and indata.get("autoremove"): |
2236 |
|
# NSR must be deleted |
2237 |
0 |
return ( |
2238 |
|
None, |
2239 |
|
None, |
2240 |
|
) # a none in this case is used to indicate not instantiated. It can be removed |
2241 |
1 |
if operation != "instantiate": |
2242 |
0 |
raise EngineException( |
2243 |
|
"ns_instance '{}' cannot be '{}' because it is not instantiated".format( |
2244 |
|
nsInstanceId, operation |
2245 |
|
), |
2246 |
|
HTTPStatus.CONFLICT, |
2247 |
|
) |
2248 |
|
else: |
2249 |
1 |
if operation == "instantiate" and not session["force"]: |
2250 |
0 |
raise EngineException( |
2251 |
|
"ns_instance '{}' cannot be '{}' because it is already instantiated".format( |
2252 |
|
nsInstanceId, operation |
2253 |
|
), |
2254 |
|
HTTPStatus.CONFLICT, |
2255 |
|
) |
2256 |
1 |
self._check_ns_operation(session, nsr, operation, indata) |
2257 |
1 |
if (indata.get("primitive_params")): |
2258 |
0 |
indata["primitive_params"] = json.dumps(indata["primitive_params"]) |
2259 |
1 |
elif (indata.get("additionalParamsForVnf")): |
2260 |
1 |
indata["additionalParamsForVnf"] = json.dumps(indata["additionalParamsForVnf"]) |
2261 |
|
|
2262 |
1 |
if operation == "instantiate": |
2263 |
1 |
self._update_vnfrs_from_nsd(nsr) |
2264 |
1 |
self._update_vnfrs(session, rollback, nsr, indata) |
2265 |
1 |
if (operation == "update") and (indata["updateType"] == "CHANGE_VNFPKG"): |
2266 |
0 |
nsr_update = {} |
2267 |
0 |
vnfd_id = indata["changeVnfPackageData"]["vnfdId"] |
2268 |
0 |
vnfd = self.db.get_one("vnfds", {"_id": vnfd_id}) |
2269 |
0 |
nsd = self.db.get_one("nsds", {"_id": nsr["nsd-id"]}) |
2270 |
0 |
ns_request = nsr["instantiate_params"] |
2271 |
0 |
vnfr = self.db.get_one("vnfrs", {"_id": indata["changeVnfPackageData"]["vnfInstanceId"]}) |
2272 |
0 |
latest_vnfd_revision = vnfd["_admin"].get("revision", 1) |
2273 |
0 |
vnfr_vnfd_revision = vnfr.get("revision", 1) |
2274 |
0 |
if latest_vnfd_revision != vnfr_vnfd_revision: |
2275 |
0 |
old_vnfd_id = vnfd_id + ":" + str(vnfr_vnfd_revision) |
2276 |
0 |
old_db_vnfd = self.db.get_one("vnfds_revisions", {"_id": old_vnfd_id}) |
2277 |
0 |
old_sw_version = old_db_vnfd.get("software-version", "1.0") |
2278 |
0 |
new_sw_version = vnfd.get("software-version", "1.0") |
2279 |
0 |
if new_sw_version != old_sw_version: |
2280 |
0 |
vnf_index = vnfr["member-vnf-index-ref"] |
2281 |
0 |
self.logger.info("nsr {}".format(nsr)) |
2282 |
0 |
for vdu in vnfd["vdu"]: |
2283 |
0 |
self.nsrtopic._add_flavor_to_nsr(vdu, vnfd, nsr, vnf_index, latest_vnfd_revision) |
2284 |
0 |
sw_image_id = vdu.get("sw-image-desc") |
2285 |
0 |
if sw_image_id: |
2286 |
0 |
image_data = self.nsrtopic._get_image_data_from_vnfd(vnfd, sw_image_id) |
2287 |
0 |
self.nsrtopic._add_image_to_nsr(nsr, image_data) |
2288 |
0 |
for alt_image in vdu.get("alternative-sw-image-desc", ()): |
2289 |
0 |
image_data = self.nsrtopic._get_image_data_from_vnfd(vnfd, alt_image) |
2290 |
0 |
self.nsrtopic._add_image_to_nsr(nsr, image_data) |
2291 |
0 |
nsr_update["image"] = nsr["image"] |
2292 |
0 |
nsr_update["flavor"] = nsr["flavor"] |
2293 |
0 |
self.db.set_one("nsrs", {"_id": nsr["_id"]}, nsr_update) |
2294 |
0 |
ns_k8s_namespace = self.nsrtopic._get_ns_k8s_namespace(nsd, ns_request, session) |
2295 |
0 |
vnfr_descriptor = self.nsrtopic._create_vnfr_descriptor_from_vnfd( |
2296 |
|
nsd, |
2297 |
|
vnfd, |
2298 |
|
vnfd_id, |
2299 |
|
vnf_index, |
2300 |
|
nsr, |
2301 |
|
ns_request, |
2302 |
|
ns_k8s_namespace, |
2303 |
|
latest_vnfd_revision, |
2304 |
|
) |
2305 |
0 |
indata["newVdur"] = vnfr_descriptor["vdur"] |
2306 |
1 |
nslcmop_desc = self._create_nslcmop(nsInstanceId, operation, indata) |
2307 |
1 |
_id = nslcmop_desc["_id"] |
2308 |
1 |
self.format_on_new( |
2309 |
|
nslcmop_desc, session["project_id"], make_public=session["public"] |
2310 |
|
) |
2311 |
1 |
if indata.get("placement-engine"): |
2312 |
|
# Save valid vim accounts in lcm operation descriptor |
2313 |
0 |
nslcmop_desc["operationParams"][ |
2314 |
|
"validVimAccounts" |
2315 |
|
] = self._get_enabled_vims(session) |
2316 |
1 |
self.db.create("nslcmops", nslcmop_desc) |
2317 |
1 |
rollback.append({"topic": "nslcmops", "_id": _id}) |
2318 |
1 |
if not slice_object: |
2319 |
1 |
self.msg.write("ns", operation, nslcmop_desc) |
2320 |
1 |
return _id, None |
2321 |
1 |
except ValidationError as e: # TODO remove try Except, it is captured at nbi.py |
2322 |
1 |
raise EngineException(e, HTTPStatus.UNPROCESSABLE_ENTITY) |
2323 |
|
# except DbException as e: |
2324 |
|
# raise EngineException("Cannot get ns_instance '{}': {}".format(e), HTTPStatus.NOT_FOUND) |
2325 |
|
|
2326 |
1 |
def delete(self, session, _id, dry_run=False, not_send_msg=None): |
2327 |
0 |
raise EngineException( |
2328 |
|
"Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
2329 |
|
) |
2330 |
|
|
2331 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
2332 |
0 |
raise EngineException( |
2333 |
|
"Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
2334 |
|
) |
2335 |
|
|
2336 |
|
|
2337 |
1 |
class NsiTopic(BaseTopic): |
2338 |
1 |
topic = "nsis" |
2339 |
1 |
topic_msg = "nsi" |
2340 |
1 |
quota_name = "slice_instances" |
2341 |
|
|
2342 |
1 |
def __init__(self, db, fs, msg, auth): |
2343 |
0 |
BaseTopic.__init__(self, db, fs, msg, auth) |
2344 |
0 |
self.nsrTopic = NsrTopic(db, fs, msg, auth) |
2345 |
|
|
2346 |
1 |
@staticmethod |
2347 |
1 |
def _format_ns_request(ns_request): |
2348 |
0 |
formated_request = copy(ns_request) |
2349 |
|
# TODO: Add request params |
2350 |
0 |
return formated_request |
2351 |
|
|
2352 |
1 |
@staticmethod |
2353 |
1 |
def _format_addional_params(slice_request): |
2354 |
|
""" |
2355 |
|
Get and format user additional params for NS or VNF |
2356 |
|
:param slice_request: User instantiation additional parameters |
2357 |
|
:return: a formatted copy of additional params or None if not supplied |
2358 |
|
""" |
2359 |
0 |
additional_params = copy(slice_request.get("additionalParamsForNsi")) |
2360 |
0 |
if additional_params: |
2361 |
0 |
for k, v in additional_params.items(): |
2362 |
0 |
if not isinstance(k, str): |
2363 |
0 |
raise EngineException( |
2364 |
|
"Invalid param at additionalParamsForNsi:{}. Only string keys are allowed".format( |
2365 |
|
k |
2366 |
|
) |
2367 |
|
) |
2368 |
0 |
if "." in k or "$" in k: |
2369 |
0 |
raise EngineException( |
2370 |
|
"Invalid param at additionalParamsForNsi:{}. Keys must not contain dots or $".format( |
2371 |
|
k |
2372 |
|
) |
2373 |
|
) |
2374 |
0 |
if isinstance(v, (dict, tuple, list)): |
2375 |
0 |
additional_params[k] = "!!yaml " + safe_dump(v) |
2376 |
0 |
return additional_params |
2377 |
|
|
2378 |
1 |
def _check_descriptor_dependencies(self, session, descriptor): |
2379 |
|
""" |
2380 |
|
Check that the dependent descriptors exist on a new descriptor or edition |
2381 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
2382 |
|
:param descriptor: descriptor to be inserted or edit |
2383 |
|
:return: None or raises exception |
2384 |
|
""" |
2385 |
0 |
if not descriptor.get("nst-ref"): |
2386 |
0 |
return |
2387 |
0 |
nstd_id = descriptor["nst-ref"] |
2388 |
0 |
if not self.get_item_list(session, "nsts", {"id": nstd_id}): |
2389 |
0 |
raise EngineException( |
2390 |
|
"Descriptor error at nst-ref='{}' references a non exist nstd".format( |
2391 |
|
nstd_id |
2392 |
|
), |
2393 |
|
http_code=HTTPStatus.CONFLICT, |
2394 |
|
) |
2395 |
|
|
2396 |
1 |
def check_conflict_on_del(self, session, _id, db_content): |
2397 |
|
""" |
2398 |
|
Check that NSI is not instantiated |
2399 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
2400 |
|
:param _id: nsi internal id |
2401 |
|
:param db_content: The database content of the _id |
2402 |
|
:return: None or raises EngineException with the conflict |
2403 |
|
""" |
2404 |
0 |
if session["force"]: |
2405 |
0 |
return |
2406 |
0 |
nsi = db_content |
2407 |
0 |
if nsi["_admin"].get("nsiState") == "INSTANTIATED": |
2408 |
0 |
raise EngineException( |
2409 |
|
"nsi '{}' cannot be deleted because it is in 'INSTANTIATED' state. " |
2410 |
|
"Launch 'terminate' operation first; or force deletion".format(_id), |
2411 |
|
http_code=HTTPStatus.CONFLICT, |
2412 |
|
) |
2413 |
|
|
2414 |
1 |
def delete_extra(self, session, _id, db_content, not_send_msg=None): |
2415 |
|
""" |
2416 |
|
Deletes associated nsilcmops from database. Deletes associated filesystem. |
2417 |
|
Set usageState of nst |
2418 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
2419 |
|
:param _id: server internal id |
2420 |
|
:param db_content: The database content of the descriptor |
2421 |
|
:param not_send_msg: To not send message (False) or store content (list) instead |
2422 |
|
:return: None if ok or raises EngineException with the problem |
2423 |
|
""" |
2424 |
|
|
2425 |
|
# Deleting the nsrs belonging to nsir |
2426 |
0 |
nsir = db_content |
2427 |
0 |
for nsrs_detailed_item in nsir["_admin"]["nsrs-detailed-list"]: |
2428 |
0 |
nsr_id = nsrs_detailed_item["nsrId"] |
2429 |
0 |
if nsrs_detailed_item.get("shared"): |
2430 |
0 |
_filter = { |
2431 |
|
"_admin.nsrs-detailed-list.ANYINDEX.shared": True, |
2432 |
|
"_admin.nsrs-detailed-list.ANYINDEX.nsrId": nsr_id, |
2433 |
|
"_id.ne": nsir["_id"], |
2434 |
|
} |
2435 |
0 |
nsi = self.db.get_one( |
2436 |
|
"nsis", _filter, fail_on_empty=False, fail_on_more=False |
2437 |
|
) |
2438 |
0 |
if nsi: # last one using nsr |
2439 |
0 |
continue |
2440 |
0 |
try: |
2441 |
0 |
self.nsrTopic.delete( |
2442 |
|
session, nsr_id, dry_run=False, not_send_msg=not_send_msg |
2443 |
|
) |
2444 |
0 |
except (DbException, EngineException) as e: |
2445 |
0 |
if e.http_code == HTTPStatus.NOT_FOUND: |
2446 |
0 |
pass |
2447 |
|
else: |
2448 |
0 |
raise |
2449 |
|
|
2450 |
|
# delete related nsilcmops database entries |
2451 |
0 |
self.db.del_list("nsilcmops", {"netsliceInstanceId": _id}) |
2452 |
|
|
2453 |
|
# Check and set used NST usage state |
2454 |
0 |
nsir_admin = nsir.get("_admin") |
2455 |
0 |
if nsir_admin and nsir_admin.get("nst-id"): |
2456 |
|
# check if used by another NSI |
2457 |
0 |
nsis_list = self.db.get_one( |
2458 |
|
"nsis", |
2459 |
|
{"nst-id": nsir_admin["nst-id"]}, |
2460 |
|
fail_on_empty=False, |
2461 |
|
fail_on_more=False, |
2462 |
|
) |
2463 |
0 |
if not nsis_list: |
2464 |
0 |
self.db.set_one( |
2465 |
|
"nsts", |
2466 |
|
{"_id": nsir_admin["nst-id"]}, |
2467 |
|
{"_admin.usageState": "NOT_IN_USE"}, |
2468 |
|
) |
2469 |
|
|
2470 |
1 |
def new(self, rollback, session, indata=None, kwargs=None, headers=None): |
2471 |
|
""" |
2472 |
|
Creates a new netslice instance record into database. It also creates needed nsrs and vnfrs |
2473 |
|
:param rollback: list to append the created items at database in case a rollback must be done |
2474 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
2475 |
|
:param indata: params to be used for the nsir |
2476 |
|
:param kwargs: used to override the indata descriptor |
2477 |
|
:param headers: http request headers |
2478 |
|
:return: the _id of nsi descriptor created at database |
2479 |
|
""" |
2480 |
|
|
2481 |
0 |
try: |
2482 |
0 |
step = "checking quotas" |
2483 |
0 |
self.check_quota(session) |
2484 |
|
|
2485 |
0 |
step = "" |
2486 |
0 |
slice_request = self._remove_envelop(indata) |
2487 |
|
# Override descriptor with query string kwargs |
2488 |
0 |
self._update_input_with_kwargs(slice_request, kwargs) |
2489 |
0 |
slice_request = self._validate_input_new(slice_request, session["force"]) |
2490 |
|
|
2491 |
|
# look for nstd |
2492 |
0 |
step = "getting nstd id='{}' from database".format( |
2493 |
|
slice_request.get("nstId") |
2494 |
|
) |
2495 |
0 |
_filter = self._get_project_filter(session) |
2496 |
0 |
_filter["_id"] = slice_request["nstId"] |
2497 |
0 |
nstd = self.db.get_one("nsts", _filter) |
2498 |
|
# check NST is not disabled |
2499 |
0 |
step = "checking NST operationalState" |
2500 |
0 |
if nstd["_admin"]["operationalState"] == "DISABLED": |
2501 |
0 |
raise EngineException( |
2502 |
|
"nst with id '{}' is DISABLED, and thus cannot be used to create a netslice " |
2503 |
|
"instance".format(slice_request["nstId"]), |
2504 |
|
http_code=HTTPStatus.CONFLICT, |
2505 |
|
) |
2506 |
0 |
del _filter["_id"] |
2507 |
|
|
2508 |
|
# check NSD is not disabled |
2509 |
0 |
step = "checking operationalState" |
2510 |
0 |
if nstd["_admin"]["operationalState"] == "DISABLED": |
2511 |
0 |
raise EngineException( |
2512 |
|
"nst with id '{}' is DISABLED, and thus cannot be used to create " |
2513 |
|
"a network slice".format(slice_request["nstId"]), |
2514 |
|
http_code=HTTPStatus.CONFLICT, |
2515 |
|
) |
2516 |
|
|
2517 |
0 |
nstd.pop("_admin", None) |
2518 |
0 |
nstd_id = nstd.pop("_id", None) |
2519 |
0 |
nsi_id = str(uuid4()) |
2520 |
0 |
step = "filling nsi_descriptor with input data" |
2521 |
|
|
2522 |
|
# Creating the NSIR |
2523 |
0 |
nsi_descriptor = { |
2524 |
|
"id": nsi_id, |
2525 |
|
"name": slice_request["nsiName"], |
2526 |
|
"description": slice_request.get("nsiDescription", ""), |
2527 |
|
"datacenter": slice_request["vimAccountId"], |
2528 |
|
"nst-ref": nstd["id"], |
2529 |
|
"instantiation_parameters": slice_request, |
2530 |
|
"network-slice-template": nstd, |
2531 |
|
"nsr-ref-list": [], |
2532 |
|
"vlr-list": [], |
2533 |
|
"_id": nsi_id, |
2534 |
|
"additionalParamsForNsi": self._format_addional_params(slice_request), |
2535 |
|
} |
2536 |
|
|
2537 |
0 |
step = "creating nsi at database" |
2538 |
0 |
self.format_on_new( |
2539 |
|
nsi_descriptor, session["project_id"], make_public=session["public"] |
2540 |
|
) |
2541 |
0 |
nsi_descriptor["_admin"]["nsiState"] = "NOT_INSTANTIATED" |
2542 |
0 |
nsi_descriptor["_admin"]["netslice-subnet"] = None |
2543 |
0 |
nsi_descriptor["_admin"]["deployed"] = {} |
2544 |
0 |
nsi_descriptor["_admin"]["deployed"]["RO"] = [] |
2545 |
0 |
nsi_descriptor["_admin"]["nst-id"] = nstd_id |
2546 |
|
|
2547 |
|
# Creating netslice-vld for the RO. |
2548 |
0 |
step = "creating netslice-vld at database" |
2549 |
|
|
2550 |
|
# Building the vlds list to be deployed |
2551 |
|
# From netslice descriptors, creating the initial list |
2552 |
0 |
nsi_vlds = [] |
2553 |
|
|
2554 |
0 |
for netslice_vlds in get_iterable(nstd.get("netslice-vld")): |
2555 |
|
# Getting template Instantiation parameters from NST |
2556 |
0 |
nsi_vld = deepcopy(netslice_vlds) |
2557 |
0 |
nsi_vld["shared-nsrs-list"] = [] |
2558 |
0 |
nsi_vld["vimAccountId"] = slice_request["vimAccountId"] |
2559 |
0 |
nsi_vlds.append(nsi_vld) |
2560 |
|
|
2561 |
0 |
nsi_descriptor["_admin"]["netslice-vld"] = nsi_vlds |
2562 |
|
# Creating netslice-subnet_record. |
2563 |
0 |
needed_nsds = {} |
2564 |
0 |
services = [] |
2565 |
|
|
2566 |
|
# Updating the nstd with the nsd["_id"] associated to the nss -> services list |
2567 |
0 |
for member_ns in nstd["netslice-subnet"]: |
2568 |
0 |
nsd_id = member_ns["nsd-ref"] |
2569 |
0 |
step = "getting nstd id='{}' constituent-nsd='{}' from database".format( |
2570 |
|
member_ns["nsd-ref"], member_ns["id"] |
2571 |
|
) |
2572 |
0 |
if nsd_id not in needed_nsds: |
2573 |
|
# Obtain nsd |
2574 |
0 |
_filter["id"] = nsd_id |
2575 |
0 |
nsd = self.db.get_one( |
2576 |
|
"nsds", _filter, fail_on_empty=True, fail_on_more=True |
2577 |
|
) |
2578 |
0 |
del _filter["id"] |
2579 |
0 |
nsd.pop("_admin") |
2580 |
0 |
needed_nsds[nsd_id] = nsd |
2581 |
|
else: |
2582 |
0 |
nsd = needed_nsds[nsd_id] |
2583 |
0 |
member_ns["_id"] = needed_nsds[nsd_id].get("_id") |
2584 |
0 |
services.append(member_ns) |
2585 |
|
|
2586 |
0 |
step = "filling nsir nsd-id='{}' constituent-nsd='{}' from database".format( |
2587 |
|
member_ns["nsd-ref"], member_ns["id"] |
2588 |
|
) |
2589 |
|
|
2590 |
|
# creates Network Services records (NSRs) |
2591 |
0 |
step = "creating nsrs at database using NsrTopic.new()" |
2592 |
0 |
ns_params = slice_request.get("netslice-subnet") |
2593 |
0 |
nsrs_list = [] |
2594 |
0 |
nsi_netslice_subnet = [] |
2595 |
0 |
for service in services: |
2596 |
|
# Check if the netslice-subnet is shared and if it is share if the nss exists |
2597 |
0 |
_id_nsr = None |
2598 |
0 |
indata_ns = {} |
2599 |
|
# Is the nss shared and instantiated? |
2600 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.shared"] = True |
2601 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.nsd-id"] = service[ |
2602 |
|
"nsd-ref" |
2603 |
|
] |
2604 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.nss-id"] = service["id"] |
2605 |
0 |
nsi = self.db.get_one( |
2606 |
|
"nsis", _filter, fail_on_empty=False, fail_on_more=False |
2607 |
|
) |
2608 |
0 |
if nsi and service.get("is-shared-nss"): |
2609 |
0 |
nsrs_detailed_list = nsi["_admin"]["nsrs-detailed-list"] |
2610 |
0 |
for nsrs_detailed_item in nsrs_detailed_list: |
2611 |
0 |
if nsrs_detailed_item["nsd-id"] == service["nsd-ref"]: |
2612 |
0 |
if nsrs_detailed_item["nss-id"] == service["id"]: |
2613 |
0 |
_id_nsr = nsrs_detailed_item["nsrId"] |
2614 |
0 |
break |
2615 |
0 |
for netslice_subnet in nsi["_admin"]["netslice-subnet"]: |
2616 |
0 |
if netslice_subnet["nss-id"] == service["id"]: |
2617 |
0 |
indata_ns = netslice_subnet |
2618 |
0 |
break |
2619 |
|
else: |
2620 |
0 |
indata_ns = {} |
2621 |
0 |
if service.get("instantiation-parameters"): |
2622 |
0 |
indata_ns = deepcopy(service["instantiation-parameters"]) |
2623 |
|
# del service["instantiation-parameters"] |
2624 |
|
|
2625 |
0 |
indata_ns["nsdId"] = service["_id"] |
2626 |
0 |
indata_ns["nsName"] = ( |
2627 |
|
slice_request.get("nsiName") + "." + service["id"] |
2628 |
|
) |
2629 |
0 |
indata_ns["vimAccountId"] = slice_request.get("vimAccountId") |
2630 |
0 |
indata_ns["nsDescription"] = service["description"] |
2631 |
0 |
if slice_request.get("ssh_keys"): |
2632 |
0 |
indata_ns["ssh_keys"] = slice_request.get("ssh_keys") |
2633 |
|
|
2634 |
0 |
if ns_params: |
2635 |
0 |
for ns_param in ns_params: |
2636 |
0 |
if ns_param.get("id") == service["id"]: |
2637 |
0 |
copy_ns_param = deepcopy(ns_param) |
2638 |
0 |
del copy_ns_param["id"] |
2639 |
0 |
indata_ns.update(copy_ns_param) |
2640 |
0 |
break |
2641 |
|
|
2642 |
|
# Creates Nsr objects |
2643 |
0 |
_id_nsr, _ = self.nsrTopic.new( |
2644 |
|
rollback, session, indata_ns, kwargs, headers |
2645 |
|
) |
2646 |
0 |
nsrs_item = { |
2647 |
|
"nsrId": _id_nsr, |
2648 |
|
"shared": service.get("is-shared-nss"), |
2649 |
|
"nsd-id": service["nsd-ref"], |
2650 |
|
"nss-id": service["id"], |
2651 |
|
"nslcmop_instantiate": None, |
2652 |
|
} |
2653 |
0 |
indata_ns["nss-id"] = service["id"] |
2654 |
0 |
nsrs_list.append(nsrs_item) |
2655 |
0 |
nsi_netslice_subnet.append(indata_ns) |
2656 |
0 |
nsr_ref = {"nsr-ref": _id_nsr} |
2657 |
0 |
nsi_descriptor["nsr-ref-list"].append(nsr_ref) |
2658 |
|
|
2659 |
|
# Adding the nsrs list to the nsi |
2660 |
0 |
nsi_descriptor["_admin"]["nsrs-detailed-list"] = nsrs_list |
2661 |
0 |
nsi_descriptor["_admin"]["netslice-subnet"] = nsi_netslice_subnet |
2662 |
0 |
self.db.set_one( |
2663 |
|
"nsts", {"_id": slice_request["nstId"]}, {"_admin.usageState": "IN_USE"} |
2664 |
|
) |
2665 |
|
|
2666 |
|
# Creating the entry in the database |
2667 |
0 |
self.db.create("nsis", nsi_descriptor) |
2668 |
0 |
rollback.append({"topic": "nsis", "_id": nsi_id}) |
2669 |
0 |
return nsi_id, None |
2670 |
0 |
except Exception as e: # TODO remove try Except, it is captured at nbi.py |
2671 |
0 |
self.logger.exception( |
2672 |
|
"Exception {} at NsiTopic.new()".format(e), exc_info=True |
2673 |
|
) |
2674 |
0 |
raise EngineException("Error {}: {}".format(step, e)) |
2675 |
0 |
except ValidationError as e: |
2676 |
0 |
raise EngineException(e, HTTPStatus.UNPROCESSABLE_ENTITY) |
2677 |
|
|
2678 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
2679 |
0 |
raise EngineException( |
2680 |
|
"Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
2681 |
|
) |
2682 |
|
|
2683 |
|
|
2684 |
1 |
class NsiLcmOpTopic(BaseTopic): |
2685 |
1 |
topic = "nsilcmops" |
2686 |
1 |
topic_msg = "nsi" |
2687 |
1 |
operation_schema = { # mapping between operation and jsonschema to validate |
2688 |
|
"instantiate": nsi_instantiate, |
2689 |
|
"terminate": None, |
2690 |
|
} |
2691 |
|
|
2692 |
1 |
def __init__(self, db, fs, msg, auth): |
2693 |
0 |
BaseTopic.__init__(self, db, fs, msg, auth) |
2694 |
0 |
self.nsi_NsLcmOpTopic = NsLcmOpTopic(self.db, self.fs, self.msg, self.auth) |
2695 |
|
|
2696 |
1 |
def _check_nsi_operation(self, session, nsir, operation, indata): |
2697 |
|
""" |
2698 |
|
Check that user has enter right parameters for the operation |
2699 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
2700 |
|
:param operation: it can be: instantiate, terminate, action, TODO: update, heal |
2701 |
|
:param indata: descriptor with the parameters of the operation |
2702 |
|
:return: None |
2703 |
|
""" |
2704 |
0 |
nsds = {} |
2705 |
0 |
nstd = nsir["network-slice-template"] |
2706 |
|
|
2707 |
0 |
def check_valid_netslice_subnet_id(nstId): |
2708 |
|
# TODO change to vnfR (??) |
2709 |
0 |
for netslice_subnet in nstd["netslice-subnet"]: |
2710 |
0 |
if nstId == netslice_subnet["id"]: |
2711 |
0 |
nsd_id = netslice_subnet["nsd-ref"] |
2712 |
0 |
if nsd_id not in nsds: |
2713 |
0 |
_filter = self._get_project_filter(session) |
2714 |
0 |
_filter["id"] = nsd_id |
2715 |
0 |
nsds[nsd_id] = self.db.get_one("nsds", _filter) |
2716 |
0 |
return nsds[nsd_id] |
2717 |
|
else: |
2718 |
0 |
raise EngineException( |
2719 |
|
"Invalid parameter nstId='{}' is not one of the " |
2720 |
|
"nst:netslice-subnet".format(nstId) |
2721 |
|
) |
2722 |
|
|
2723 |
0 |
if operation == "instantiate": |
2724 |
|
# check the existance of netslice-subnet items |
2725 |
0 |
for in_nst in get_iterable(indata.get("netslice-subnet")): |
2726 |
0 |
check_valid_netslice_subnet_id(in_nst["id"]) |
2727 |
|
|
2728 |
1 |
def _create_nsilcmop(self, session, netsliceInstanceId, operation, params): |
2729 |
0 |
now = time() |
2730 |
0 |
_id = str(uuid4()) |
2731 |
0 |
nsilcmop = { |
2732 |
|
"id": _id, |
2733 |
|
"_id": _id, |
2734 |
|
"operationState": "PROCESSING", # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK |
2735 |
|
"statusEnteredTime": now, |
2736 |
|
"netsliceInstanceId": netsliceInstanceId, |
2737 |
|
"lcmOperationType": operation, |
2738 |
|
"startTime": now, |
2739 |
|
"isAutomaticInvocation": False, |
2740 |
|
"operationParams": params, |
2741 |
|
"isCancelPending": False, |
2742 |
|
"links": { |
2743 |
|
"self": "/osm/nsilcm/v1/nsi_lcm_op_occs/" + _id, |
2744 |
|
"netsliceInstanceId": "/osm/nsilcm/v1/netslice_instances/" |
2745 |
|
+ netsliceInstanceId, |
2746 |
|
}, |
2747 |
|
} |
2748 |
0 |
return nsilcmop |
2749 |
|
|
2750 |
1 |
def add_shared_nsr_2vld(self, nsir, nsr_item): |
2751 |
0 |
for nst_sb_item in nsir["network-slice-template"].get("netslice-subnet"): |
2752 |
0 |
if nst_sb_item.get("is-shared-nss"): |
2753 |
0 |
for admin_subnet_item in nsir["_admin"].get("netslice-subnet"): |
2754 |
0 |
if admin_subnet_item["nss-id"] == nst_sb_item["id"]: |
2755 |
0 |
for admin_vld_item in nsir["_admin"].get("netslice-vld"): |
2756 |
0 |
for admin_vld_nss_cp_ref_item in admin_vld_item[ |
2757 |
|
"nss-connection-point-ref" |
2758 |
|
]: |
2759 |
0 |
if ( |
2760 |
|
admin_subnet_item["nss-id"] |
2761 |
|
== admin_vld_nss_cp_ref_item["nss-ref"] |
2762 |
|
): |
2763 |
0 |
if ( |
2764 |
|
not nsr_item["nsrId"] |
2765 |
|
in admin_vld_item["shared-nsrs-list"] |
2766 |
|
): |
2767 |
0 |
admin_vld_item["shared-nsrs-list"].append( |
2768 |
|
nsr_item["nsrId"] |
2769 |
|
) |
2770 |
0 |
break |
2771 |
|
# self.db.set_one("nsis", {"_id": nsir["_id"]}, nsir) |
2772 |
0 |
self.db.set_one( |
2773 |
|
"nsis", |
2774 |
|
{"_id": nsir["_id"]}, |
2775 |
|
{"_admin.netslice-vld": nsir["_admin"].get("netslice-vld")}, |
2776 |
|
) |
2777 |
|
|
2778 |
1 |
def new(self, rollback, session, indata=None, kwargs=None, headers=None): |
2779 |
|
""" |
2780 |
|
Performs a new operation over a ns |
2781 |
|
:param rollback: list to append created items at database in case a rollback must to be done |
2782 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
2783 |
|
:param indata: descriptor with the parameters of the operation. It must contains among others |
2784 |
|
netsliceInstanceId: _id of the nsir to perform the operation |
2785 |
|
operation: it can be: instantiate, terminate, action, TODO: update, heal |
2786 |
|
:param kwargs: used to override the indata descriptor |
2787 |
|
:param headers: http request headers |
2788 |
|
:return: id of the nslcmops |
2789 |
|
""" |
2790 |
0 |
try: |
2791 |
|
# Override descriptor with query string kwargs |
2792 |
0 |
self._update_input_with_kwargs(indata, kwargs) |
2793 |
0 |
operation = indata["lcmOperationType"] |
2794 |
0 |
netsliceInstanceId = indata["netsliceInstanceId"] |
2795 |
0 |
validate_input(indata, self.operation_schema[operation]) |
2796 |
|
|
2797 |
|
# get nsi from netsliceInstanceId |
2798 |
0 |
_filter = self._get_project_filter(session) |
2799 |
0 |
_filter["_id"] = netsliceInstanceId |
2800 |
0 |
nsir = self.db.get_one("nsis", _filter) |
2801 |
0 |
logging_prefix = "nsi={} {} ".format(netsliceInstanceId, operation) |
2802 |
0 |
del _filter["_id"] |
2803 |
|
|
2804 |
|
# initial checking |
2805 |
0 |
if ( |
2806 |
|
not nsir["_admin"].get("nsiState") |
2807 |
|
or nsir["_admin"]["nsiState"] == "NOT_INSTANTIATED" |
2808 |
|
): |
2809 |
0 |
if operation == "terminate" and indata.get("autoremove"): |
2810 |
|
# NSIR must be deleted |
2811 |
0 |
return ( |
2812 |
|
None, |
2813 |
|
None, |
2814 |
|
) # a none in this case is used to indicate not instantiated. It can be removed |
2815 |
0 |
if operation != "instantiate": |
2816 |
0 |
raise EngineException( |
2817 |
|
"netslice_instance '{}' cannot be '{}' because it is not instantiated".format( |
2818 |
|
netsliceInstanceId, operation |
2819 |
|
), |
2820 |
|
HTTPStatus.CONFLICT, |
2821 |
|
) |
2822 |
|
else: |
2823 |
0 |
if operation == "instantiate" and not session["force"]: |
2824 |
0 |
raise EngineException( |
2825 |
|
"netslice_instance '{}' cannot be '{}' because it is already instantiated".format( |
2826 |
|
netsliceInstanceId, operation |
2827 |
|
), |
2828 |
|
HTTPStatus.CONFLICT, |
2829 |
|
) |
2830 |
|
|
2831 |
|
# Creating all the NS_operation (nslcmop) |
2832 |
|
# Get service list from db |
2833 |
0 |
nsrs_list = nsir["_admin"]["nsrs-detailed-list"] |
2834 |
0 |
nslcmops = [] |
2835 |
|
# nslcmops_item = None |
2836 |
0 |
for index, nsr_item in enumerate(nsrs_list): |
2837 |
0 |
nsr_id = nsr_item["nsrId"] |
2838 |
0 |
if nsr_item.get("shared"): |
2839 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.shared"] = True |
2840 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.nsrId"] = nsr_id |
2841 |
0 |
_filter[ |
2842 |
|
"_admin.nsrs-detailed-list.ANYINDEX.nslcmop_instantiate.ne" |
2843 |
|
] = None |
2844 |
0 |
_filter["_id.ne"] = netsliceInstanceId |
2845 |
0 |
nsi = self.db.get_one( |
2846 |
|
"nsis", _filter, fail_on_empty=False, fail_on_more=False |
2847 |
|
) |
2848 |
0 |
if operation == "terminate": |
2849 |
0 |
_update = { |
2850 |
|
"_admin.nsrs-detailed-list.{}.nslcmop_instantiate".format( |
2851 |
|
index |
2852 |
|
): None |
2853 |
|
} |
2854 |
0 |
self.db.set_one("nsis", {"_id": nsir["_id"]}, _update) |
2855 |
0 |
if ( |
2856 |
|
nsi |
2857 |
|
): # other nsi is using this nsr and it needs this nsr instantiated |
2858 |
0 |
continue # do not create nsilcmop |
2859 |
|
else: # instantiate |
2860 |
|
# looks the first nsi fulfilling the conditions but not being the current NSIR |
2861 |
0 |
if nsi: |
2862 |
0 |
nsi_nsr_item = next( |
2863 |
|
n |
2864 |
|
for n in nsi["_admin"]["nsrs-detailed-list"] |
2865 |
|
if n["nsrId"] == nsr_id |
2866 |
|
and n["shared"] |
2867 |
|
and n["nslcmop_instantiate"] |
2868 |
|
) |
2869 |
0 |
self.add_shared_nsr_2vld(nsir, nsr_item) |
2870 |
0 |
nslcmops.append(nsi_nsr_item["nslcmop_instantiate"]) |
2871 |
0 |
_update = { |
2872 |
|
"_admin.nsrs-detailed-list.{}".format( |
2873 |
|
index |
2874 |
|
): nsi_nsr_item |
2875 |
|
} |
2876 |
0 |
self.db.set_one("nsis", {"_id": nsir["_id"]}, _update) |
2877 |
|
# continue to not create nslcmop since nsrs is shared and nsrs was created |
2878 |
0 |
continue |
2879 |
|
else: |
2880 |
0 |
self.add_shared_nsr_2vld(nsir, nsr_item) |
2881 |
|
|
2882 |
|
# create operation |
2883 |
0 |
try: |
2884 |
0 |
indata_ns = { |
2885 |
|
"lcmOperationType": operation, |
2886 |
|
"nsInstanceId": nsr_id, |
2887 |
|
# Including netslice_id in the ns instantiate Operation |
2888 |
|
"netsliceInstanceId": netsliceInstanceId, |
2889 |
|
} |
2890 |
0 |
if operation == "instantiate": |
2891 |
0 |
service = self.db.get_one("nsrs", {"_id": nsr_id}) |
2892 |
0 |
indata_ns.update(service["instantiate_params"]) |
2893 |
|
|
2894 |
|
# Creating NS_LCM_OP with the flag slice_object=True to not trigger the service instantiation |
2895 |
|
# message via kafka bus |
2896 |
0 |
nslcmop, _ = self.nsi_NsLcmOpTopic.new( |
2897 |
|
rollback, session, indata_ns, None, headers, slice_object=True |
2898 |
|
) |
2899 |
0 |
nslcmops.append(nslcmop) |
2900 |
0 |
if operation == "instantiate": |
2901 |
0 |
_update = { |
2902 |
|
"_admin.nsrs-detailed-list.{}.nslcmop_instantiate".format( |
2903 |
|
index |
2904 |
|
): nslcmop |
2905 |
|
} |
2906 |
0 |
self.db.set_one("nsis", {"_id": nsir["_id"]}, _update) |
2907 |
0 |
except (DbException, EngineException) as e: |
2908 |
0 |
if e.http_code == HTTPStatus.NOT_FOUND: |
2909 |
0 |
self.logger.info( |
2910 |
|
logging_prefix |
2911 |
|
+ "skipping NS={} because not found".format(nsr_id) |
2912 |
|
) |
2913 |
0 |
pass |
2914 |
|
else: |
2915 |
0 |
raise |
2916 |
|
|
2917 |
|
# Creates nsilcmop |
2918 |
0 |
indata["nslcmops_ids"] = nslcmops |
2919 |
0 |
self._check_nsi_operation(session, nsir, operation, indata) |
2920 |
|
|
2921 |
0 |
nsilcmop_desc = self._create_nsilcmop( |
2922 |
|
session, netsliceInstanceId, operation, indata |
2923 |
|
) |
2924 |
0 |
self.format_on_new( |
2925 |
|
nsilcmop_desc, session["project_id"], make_public=session["public"] |
2926 |
|
) |
2927 |
0 |
_id = self.db.create("nsilcmops", nsilcmop_desc) |
2928 |
0 |
rollback.append({"topic": "nsilcmops", "_id": _id}) |
2929 |
0 |
self.msg.write("nsi", operation, nsilcmop_desc) |
2930 |
0 |
return _id, None |
2931 |
0 |
except ValidationError as e: |
2932 |
0 |
raise EngineException(e, HTTPStatus.UNPROCESSABLE_ENTITY) |
2933 |
|
|
2934 |
1 |
def delete(self, session, _id, dry_run=False, not_send_msg=None): |
2935 |
0 |
raise EngineException( |
2936 |
|
"Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
2937 |
|
) |
2938 |
|
|
2939 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
2940 |
0 |
raise EngineException( |
2941 |
|
"Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
2942 |
|
) |