1 |
|
# -*- coding: utf-8 -*- |
2 |
|
|
3 |
|
# Licensed under the Apache License, Version 2.0 (the "License"); |
4 |
|
# you may not use this file except in compliance with the License. |
5 |
|
# You may obtain a copy of the License at |
6 |
|
# |
7 |
|
# http://www.apache.org/licenses/LICENSE-2.0 |
8 |
|
# |
9 |
|
# Unless required by applicable law or agreed to in writing, software |
10 |
|
# distributed under the License is distributed on an "AS IS" BASIS, |
11 |
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
12 |
|
# implied. |
13 |
|
# See the License for the specific language governing permissions and |
14 |
|
# limitations under the License. |
15 |
|
|
16 |
|
# import logging |
17 |
1 |
import json |
18 |
1 |
from uuid import uuid4 |
19 |
1 |
from http import HTTPStatus |
20 |
1 |
from time import time |
21 |
1 |
from copy import copy, deepcopy |
22 |
1 |
from osm_nbi.validation import ( |
23 |
|
validate_input, |
24 |
|
ValidationError, |
25 |
|
ns_instantiate, |
26 |
|
ns_terminate, |
27 |
|
ns_action, |
28 |
|
ns_scale, |
29 |
|
ns_update, |
30 |
|
ns_heal, |
31 |
|
nsi_instantiate, |
32 |
|
ns_migrate, |
33 |
|
ns_verticalscale, |
34 |
|
nslcmop_cancel, |
35 |
|
) |
36 |
1 |
from osm_nbi.base_topic import ( |
37 |
|
BaseTopic, |
38 |
|
EngineException, |
39 |
|
get_iterable, |
40 |
|
deep_get, |
41 |
|
increment_ip_mac, |
42 |
|
update_descriptor_usage_state, |
43 |
|
) |
44 |
1 |
from yaml import safe_dump |
45 |
1 |
from osm_common.dbbase import DbException |
46 |
1 |
from osm_common.msgbase import MsgException |
47 |
1 |
from osm_common.fsbase import FsException |
48 |
1 |
from osm_nbi import utils |
49 |
1 |
from re import ( |
50 |
|
match, |
51 |
|
) # For checking that additional parameter names are valid Jinja2 identifiers |
52 |
|
|
53 |
1 |
__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>" |
54 |
|
|
55 |
|
|
56 |
1 |
class NsrTopic(BaseTopic): |
57 |
1 |
topic = "nsrs" |
58 |
1 |
topic_msg = "ns" |
59 |
1 |
quota_name = "ns_instances" |
60 |
1 |
schema_new = ns_instantiate |
61 |
|
|
62 |
1 |
def __init__(self, db, fs, msg, auth): |
63 |
1 |
BaseTopic.__init__(self, db, fs, msg, auth) |
64 |
|
|
65 |
1 |
@staticmethod |
66 |
1 |
def format_on_new(content, project_id=None, make_public=False): |
67 |
1 |
BaseTopic.format_on_new(content, project_id=project_id, make_public=make_public) |
68 |
1 |
content["_admin"]["nsState"] = "NOT_INSTANTIATED" |
69 |
1 |
return None |
70 |
|
|
71 |
1 |
def check_conflict_on_del(self, session, _id, db_content): |
72 |
|
""" |
73 |
|
Check that NSR is not instantiated |
74 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
75 |
|
:param _id: nsr internal id |
76 |
|
:param db_content: The database content of the nsr |
77 |
|
:return: None or raises EngineException with the conflict |
78 |
|
""" |
79 |
1 |
if session["force"]: |
80 |
1 |
return |
81 |
1 |
nsr = db_content |
82 |
1 |
if nsr["_admin"].get("nsState") == "INSTANTIATED": |
83 |
1 |
raise EngineException( |
84 |
|
"nsr '{}' cannot be deleted because it is in 'INSTANTIATED' state. " |
85 |
|
"Launch 'terminate' operation first; or force deletion".format(_id), |
86 |
|
http_code=HTTPStatus.CONFLICT, |
87 |
|
) |
88 |
|
|
89 |
1 |
def delete_extra(self, session, _id, db_content, not_send_msg=None): |
90 |
|
""" |
91 |
|
Deletes associated nslcmops and vnfrs from database. Deletes associated filesystem. |
92 |
|
Set usageState of pdu, vnfd, nsd |
93 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
94 |
|
:param _id: server internal id |
95 |
|
:param db_content: The database content of the descriptor |
96 |
|
:param not_send_msg: To not send message (False) or store content (list) instead |
97 |
|
:return: None if ok or raises EngineException with the problem |
98 |
|
""" |
99 |
1 |
self.fs.file_delete(_id, ignore_non_exist=True) |
100 |
1 |
self.db.del_list("nslcmops", {"nsInstanceId": _id}) |
101 |
1 |
self.db.del_list("vnfrs", {"nsr-id-ref": _id}) |
102 |
|
|
103 |
|
# set all used pdus as free |
104 |
1 |
self.db.set_list( |
105 |
|
"pdus", |
106 |
|
{"_admin.usage.nsr_id": _id}, |
107 |
|
{"_admin.usageState": "NOT_IN_USE", "_admin.usage": None}, |
108 |
|
) |
109 |
|
|
110 |
|
# Set NSD usageState |
111 |
1 |
nsr = db_content |
112 |
1 |
used_nsd_id = nsr.get("nsd-id") |
113 |
1 |
if used_nsd_id: |
114 |
|
# check if used by another NSR |
115 |
1 |
nsrs_list = self.db.get_one( |
116 |
|
"nsrs", {"nsd-id": used_nsd_id}, fail_on_empty=False, fail_on_more=False |
117 |
|
) |
118 |
1 |
if not nsrs_list: |
119 |
1 |
self.db.set_one( |
120 |
|
"nsds", {"_id": used_nsd_id}, {"_admin.usageState": "NOT_IN_USE"} |
121 |
|
) |
122 |
|
|
123 |
|
# Set VNFD usageState |
124 |
1 |
used_vnfd_id_list = nsr.get("vnfd-id") |
125 |
1 |
if used_vnfd_id_list: |
126 |
1 |
for used_vnfd_id in used_vnfd_id_list: |
127 |
|
# check if used by another NSR |
128 |
1 |
nsrs_list = self.db.get_one( |
129 |
|
"nsrs", |
130 |
|
{"vnfd-id": used_vnfd_id}, |
131 |
|
fail_on_empty=False, |
132 |
|
fail_on_more=False, |
133 |
|
) |
134 |
1 |
if not nsrs_list: |
135 |
1 |
self.db.set_one( |
136 |
|
"vnfds", |
137 |
|
{"_id": used_vnfd_id}, |
138 |
|
{"_admin.usageState": "NOT_IN_USE"}, |
139 |
|
) |
140 |
|
|
141 |
|
# delete extra ro_nsrs used for internal RO module |
142 |
1 |
self.db.del_one("ro_nsrs", q_filter={"_id": _id}, fail_on_empty=False) |
143 |
|
|
144 |
1 |
@staticmethod |
145 |
1 |
def _format_ns_request(ns_request): |
146 |
1 |
formated_request = copy(ns_request) |
147 |
1 |
formated_request.pop("additionalParamsForNs", None) |
148 |
1 |
formated_request.pop("additionalParamsForVnf", None) |
149 |
1 |
return formated_request |
150 |
|
|
151 |
1 |
@staticmethod |
152 |
1 |
def _format_additional_params( |
153 |
|
ns_request, member_vnf_index=None, vdu_id=None, kdu_name=None, descriptor=None |
154 |
|
): |
155 |
|
""" |
156 |
|
Get and format user additional params for NS or VNF. |
157 |
|
The vdu_id and kdu_name params are mutually exclusive! If none of them are given, then the method will |
158 |
|
exclusively search for the VNF/NS LCM additional params. |
159 |
|
|
160 |
|
:param ns_request: User instantiation additional parameters |
161 |
|
:param member_vnf_index: None for extract NS params, or member_vnf_index to extract VNF params |
162 |
|
:vdu_id: VDU's ID against which we want to format the additional params |
163 |
|
:kdu_name: KDU's name against which we want to format the additional params |
164 |
|
:param descriptor: If not None it check that needed parameters of descriptor are supplied |
165 |
|
:return: tuple with a formatted copy of additional params or None if not supplied, plus other parameters |
166 |
|
""" |
167 |
1 |
additional_params = None |
168 |
1 |
other_params = None |
169 |
1 |
if not member_vnf_index: |
170 |
1 |
additional_params = copy(ns_request.get("additionalParamsForNs")) |
171 |
1 |
where_ = "additionalParamsForNs" |
172 |
1 |
elif ns_request.get("additionalParamsForVnf"): |
173 |
1 |
where_ = "additionalParamsForVnf[member-vnf-index={}]".format( |
174 |
|
member_vnf_index |
175 |
|
) |
176 |
1 |
item = next( |
177 |
|
( |
178 |
|
x |
179 |
|
for x in ns_request["additionalParamsForVnf"] |
180 |
|
if x["member-vnf-index"] == member_vnf_index |
181 |
|
), |
182 |
|
None, |
183 |
|
) |
184 |
1 |
if item: |
185 |
1 |
if not vdu_id and not kdu_name: |
186 |
1 |
other_params = item |
187 |
1 |
additional_params = copy(item.get("additionalParams")) or {} |
188 |
1 |
if vdu_id and item.get("additionalParamsForVdu"): |
189 |
0 |
item_vdu = next( |
190 |
|
( |
191 |
|
x |
192 |
|
for x in item["additionalParamsForVdu"] |
193 |
|
if x["vdu_id"] == vdu_id |
194 |
|
), |
195 |
|
None, |
196 |
|
) |
197 |
0 |
other_params = item_vdu |
198 |
0 |
if item_vdu and item_vdu.get("additionalParams"): |
199 |
0 |
where_ += ".additionalParamsForVdu[vdu_id={}]".format(vdu_id) |
200 |
0 |
additional_params = item_vdu["additionalParams"] |
201 |
1 |
if kdu_name: |
202 |
0 |
additional_params = {} |
203 |
0 |
if item.get("additionalParamsForKdu"): |
204 |
0 |
item_kdu = next( |
205 |
|
( |
206 |
|
x |
207 |
|
for x in item["additionalParamsForKdu"] |
208 |
|
if x["kdu_name"] == kdu_name |
209 |
|
), |
210 |
|
None, |
211 |
|
) |
212 |
0 |
other_params = item_kdu |
213 |
0 |
if item_kdu and item_kdu.get("additionalParams"): |
214 |
0 |
where_ += ".additionalParamsForKdu[kdu_name={}]".format( |
215 |
|
kdu_name |
216 |
|
) |
217 |
0 |
additional_params = item_kdu["additionalParams"] |
218 |
|
|
219 |
1 |
if additional_params: |
220 |
1 |
for k, v in additional_params.items(): |
221 |
|
# BEGIN Check that additional parameter names are valid Jinja2 identifiers if target is not Kdu |
222 |
1 |
if not kdu_name and not match("^[a-zA-Z_][a-zA-Z0-9_]*$", k): |
223 |
0 |
raise EngineException( |
224 |
|
"Invalid param name at {}:{}. Must contain only alphanumeric characters " |
225 |
|
"and underscores, and cannot start with a digit".format( |
226 |
|
where_, k |
227 |
|
) |
228 |
|
) |
229 |
|
# END Check that additional parameter names are valid Jinja2 identifiers |
230 |
1 |
if not isinstance(k, str): |
231 |
0 |
raise EngineException( |
232 |
|
"Invalid param at {}:{}. Only string keys are allowed".format( |
233 |
|
where_, k |
234 |
|
) |
235 |
|
) |
236 |
1 |
if "$" in k: |
237 |
0 |
raise EngineException( |
238 |
|
"Invalid param at {}:{}. Keys must not contain $ symbol".format( |
239 |
|
where_, k |
240 |
|
) |
241 |
|
) |
242 |
1 |
if isinstance(v, (dict, tuple, list)): |
243 |
0 |
additional_params[k] = "!!yaml " + safe_dump(v) |
244 |
1 |
if kdu_name: |
245 |
0 |
additional_params = json.dumps(additional_params) |
246 |
|
|
247 |
|
# Select the VDU ID, KDU name or NS/VNF ID, depending on the method's call intent |
248 |
1 |
selector = vdu_id if vdu_id else kdu_name if kdu_name else descriptor.get("id") |
249 |
|
|
250 |
1 |
if descriptor: |
251 |
1 |
for df in descriptor.get("df", []): |
252 |
|
# check that enough parameters are supplied for the initial-config-primitive |
253 |
|
# TODO: check for cloud-init |
254 |
1 |
if member_vnf_index: |
255 |
1 |
initial_primitives = [] |
256 |
1 |
if ( |
257 |
|
"lcm-operations-configuration" in df |
258 |
|
and "operate-vnf-op-config" |
259 |
|
in df["lcm-operations-configuration"] |
260 |
|
): |
261 |
1 |
for config in df["lcm-operations-configuration"][ |
262 |
|
"operate-vnf-op-config" |
263 |
|
].get("day1-2", []): |
264 |
|
# Verify the target object (VNF|NS|VDU|KDU) where we need to populate |
265 |
|
# the params with the additional ones given by the user |
266 |
1 |
if config.get("id") == selector: |
267 |
1 |
for primitive in get_iterable( |
268 |
|
config.get("initial-config-primitive") |
269 |
|
): |
270 |
1 |
initial_primitives.append(primitive) |
271 |
|
else: |
272 |
1 |
initial_primitives = deep_get( |
273 |
|
descriptor, ("ns-configuration", "initial-config-primitive") |
274 |
|
) |
275 |
|
|
276 |
1 |
for initial_primitive in get_iterable(initial_primitives): |
277 |
1 |
for param in get_iterable(initial_primitive.get("parameter")): |
278 |
1 |
if param["value"].startswith("<") and param["value"].endswith( |
279 |
|
">" |
280 |
|
): |
281 |
1 |
if param["value"] in ( |
282 |
|
"<rw_mgmt_ip>", |
283 |
|
"<VDU_SCALE_INFO>", |
284 |
|
"<ns_config_info>", |
285 |
|
"<OSM>", |
286 |
|
): |
287 |
1 |
continue |
288 |
1 |
if ( |
289 |
|
not additional_params |
290 |
|
or param["value"][1:-1] not in additional_params |
291 |
|
): |
292 |
1 |
raise EngineException( |
293 |
|
"Parameter '{}' needed for vnfd[id={}]:day1-2 configuration:" |
294 |
|
"initial-config-primitive[name={}] not supplied".format( |
295 |
|
param["value"], |
296 |
|
descriptor["id"], |
297 |
|
initial_primitive["name"], |
298 |
|
) |
299 |
|
) |
300 |
|
|
301 |
1 |
return additional_params or None, other_params or None |
302 |
|
|
303 |
1 |
def new(self, rollback, session, indata=None, kwargs=None, headers=None): |
304 |
|
""" |
305 |
|
Creates a new nsr into database. It also creates needed vnfrs |
306 |
|
:param rollback: list to append the created items at database in case a rollback must be done |
307 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
308 |
|
:param indata: params to be used for the nsr |
309 |
|
:param kwargs: used to override the indata descriptor |
310 |
|
:param headers: http request headers |
311 |
|
:return: the _id of nsr descriptor created at database. Or an exception of type |
312 |
|
EngineException, ValidationError, DbException, FsException, MsgException. |
313 |
|
Note: Exceptions are not captured on purpose. They should be captured at called |
314 |
|
""" |
315 |
1 |
step = "checking quotas" # first step must be defined outside try |
316 |
1 |
try: |
317 |
1 |
self.check_quota(session) |
318 |
|
|
319 |
1 |
step = "validating input parameters" |
320 |
1 |
ns_request = self._remove_envelop(indata) |
321 |
1 |
self._update_input_with_kwargs(ns_request, kwargs) |
322 |
1 |
ns_request = self._validate_input_new(ns_request, session["force"]) |
323 |
|
|
324 |
1 |
step = "getting nsd id='{}' from database".format(ns_request.get("nsdId")) |
325 |
1 |
nsd = self._get_nsd_from_db(ns_request["nsdId"], session) |
326 |
1 |
ns_k8s_namespace = self._get_ns_k8s_namespace(nsd, ns_request, session) |
327 |
|
|
328 |
1 |
step = "checking nsdOperationalState" |
329 |
1 |
self._check_nsd_operational_state(nsd, ns_request) |
330 |
|
|
331 |
1 |
step = "filling nsr from input data" |
332 |
1 |
nsr_id = str(uuid4()) |
333 |
1 |
nsr_descriptor = self._create_nsr_descriptor_from_nsd( |
334 |
|
nsd, ns_request, nsr_id, session |
335 |
|
) |
336 |
|
|
337 |
|
# Create VNFRs |
338 |
1 |
needed_vnfds = {} |
339 |
|
# TODO: Change for multiple df support |
340 |
1 |
vnf_profiles = nsd.get("df", [{}])[0].get("vnf-profile", ()) |
341 |
1 |
for vnfp in vnf_profiles: |
342 |
1 |
vnfd_id = vnfp.get("vnfd-id") |
343 |
1 |
vnf_index = vnfp.get("id") |
344 |
1 |
step = ( |
345 |
|
"getting vnfd id='{}' constituent-vnfd='{}' from database".format( |
346 |
|
vnfd_id, vnf_index |
347 |
|
) |
348 |
|
) |
349 |
1 |
if vnfd_id not in needed_vnfds: |
350 |
1 |
vnfd = self._get_vnfd_from_db(vnfd_id, session) |
351 |
1 |
if "revision" in vnfd["_admin"]: |
352 |
0 |
vnfd["revision"] = vnfd["_admin"]["revision"] |
353 |
1 |
vnfd.pop("_admin") |
354 |
1 |
needed_vnfds[vnfd_id] = vnfd |
355 |
1 |
nsr_descriptor["vnfd-id"].append(vnfd["_id"]) |
356 |
|
else: |
357 |
1 |
vnfd = needed_vnfds[vnfd_id] |
358 |
|
|
359 |
1 |
step = "filling vnfr vnfd-id='{}' constituent-vnfd='{}'".format( |
360 |
|
vnfd_id, vnf_index |
361 |
|
) |
362 |
1 |
vnfr_descriptor = self._create_vnfr_descriptor_from_vnfd( |
363 |
|
nsd, |
364 |
|
vnfd, |
365 |
|
vnfd_id, |
366 |
|
vnf_index, |
367 |
|
nsr_descriptor, |
368 |
|
ns_request, |
369 |
|
ns_k8s_namespace, |
370 |
|
) |
371 |
|
|
372 |
1 |
step = "creating vnfr vnfd-id='{}' constituent-vnfd='{}' at database".format( |
373 |
|
vnfd_id, vnf_index |
374 |
|
) |
375 |
1 |
self._add_vnfr_to_db(vnfr_descriptor, rollback, session) |
376 |
1 |
nsr_descriptor["constituent-vnfr-ref"].append(vnfr_descriptor["id"]) |
377 |
1 |
step = "Updating VNFD usageState" |
378 |
1 |
update_descriptor_usage_state(vnfd, "vnfds", self.db) |
379 |
|
|
380 |
1 |
step = "creating nsr at database" |
381 |
1 |
self._add_nsr_to_db(nsr_descriptor, rollback, session) |
382 |
1 |
step = "Updating NSD usageState" |
383 |
1 |
update_descriptor_usage_state(nsd, "nsds", self.db) |
384 |
|
|
385 |
1 |
step = "creating nsr temporal folder" |
386 |
1 |
self.fs.mkdir(nsr_id) |
387 |
|
|
388 |
1 |
return nsr_id, None |
389 |
1 |
except ( |
390 |
|
ValidationError, |
391 |
|
EngineException, |
392 |
|
DbException, |
393 |
|
MsgException, |
394 |
|
FsException, |
395 |
|
) as e: |
396 |
1 |
raise type(e)("{} while '{}'".format(e, step), http_code=e.http_code) |
397 |
|
|
398 |
1 |
def _get_nsd_from_db(self, nsd_id, session): |
399 |
1 |
_filter = self._get_project_filter(session) |
400 |
1 |
_filter["_id"] = nsd_id |
401 |
1 |
return self.db.get_one("nsds", _filter) |
402 |
|
|
403 |
1 |
def _get_vnfd_from_db(self, vnfd_id, session): |
404 |
1 |
_filter = self._get_project_filter(session) |
405 |
1 |
_filter["id"] = vnfd_id |
406 |
1 |
vnfd = self.db.get_one("vnfds", _filter, fail_on_empty=True, fail_on_more=True) |
407 |
1 |
return vnfd |
408 |
|
|
409 |
1 |
def _add_nsr_to_db(self, nsr_descriptor, rollback, session): |
410 |
1 |
self.format_on_new( |
411 |
|
nsr_descriptor, session["project_id"], make_public=session["public"] |
412 |
|
) |
413 |
1 |
self.db.create("nsrs", nsr_descriptor) |
414 |
1 |
rollback.append({"topic": "nsrs", "_id": nsr_descriptor["id"]}) |
415 |
|
|
416 |
1 |
def _add_vnfr_to_db(self, vnfr_descriptor, rollback, session): |
417 |
1 |
self.format_on_new( |
418 |
|
vnfr_descriptor, session["project_id"], make_public=session["public"] |
419 |
|
) |
420 |
1 |
self.db.create("vnfrs", vnfr_descriptor) |
421 |
1 |
rollback.append({"topic": "vnfrs", "_id": vnfr_descriptor["id"]}) |
422 |
|
|
423 |
1 |
def _check_nsd_operational_state(self, nsd, ns_request): |
424 |
1 |
if nsd["_admin"]["operationalState"] == "DISABLED": |
425 |
0 |
raise EngineException( |
426 |
|
"nsd with id '{}' is DISABLED, and thus cannot be used to create " |
427 |
|
"a network service".format(ns_request["nsdId"]), |
428 |
|
http_code=HTTPStatus.CONFLICT, |
429 |
|
) |
430 |
|
|
431 |
1 |
def _get_ns_k8s_namespace(self, nsd, ns_request, session): |
432 |
1 |
additional_params, _ = self._format_additional_params( |
433 |
|
ns_request, descriptor=nsd |
434 |
|
) |
435 |
|
# use for k8s-namespace from ns_request or additionalParamsForNs. By default, the project_id |
436 |
1 |
ns_k8s_namespace = session["project_id"][0] if session["project_id"] else None |
437 |
1 |
if ns_request and ns_request.get("k8s-namespace"): |
438 |
0 |
ns_k8s_namespace = ns_request["k8s-namespace"] |
439 |
1 |
if additional_params and additional_params.get("k8s-namespace"): |
440 |
0 |
ns_k8s_namespace = additional_params["k8s-namespace"] |
441 |
|
|
442 |
1 |
return ns_k8s_namespace |
443 |
|
|
444 |
1 |
def _add_shared_volumes_to_nsr( |
445 |
|
self, vdu, vnfd, nsr_descriptor, member_vnf_index, revision=None |
446 |
|
): |
447 |
1 |
svsd = [] |
448 |
1 |
for vsd in vnfd.get("virtual-storage-desc", ()): |
449 |
1 |
if vsd.get("vdu-storage-requirements"): |
450 |
0 |
if ( |
451 |
|
vsd.get("vdu-storage-requirements")[0].get("key") == "multiattach" |
452 |
|
and vsd.get("vdu-storage-requirements")[0].get("value") == "True" |
453 |
|
): |
454 |
|
# Avoid setting the volume name multiple times |
455 |
0 |
if not match(f"shared-.*-{vnfd['id']}", vsd["id"]): |
456 |
0 |
vsd["id"] = f"shared-{vsd['id']}-{vnfd['id']}" |
457 |
0 |
svsd.append(vsd) |
458 |
1 |
if svsd: |
459 |
0 |
nsr_descriptor["shared-volumes"] = svsd |
460 |
|
|
461 |
1 |
def _add_flavor_to_nsr( |
462 |
|
self, vdu, vnfd, nsr_descriptor, member_vnf_index, revision=None |
463 |
|
): |
464 |
1 |
flavor_data = {} |
465 |
1 |
guest_epa = {} |
466 |
|
# Find this vdu compute and storage descriptors |
467 |
1 |
vdu_virtual_compute = {} |
468 |
1 |
vdu_virtual_storage = {} |
469 |
1 |
for vcd in vnfd.get("virtual-compute-desc", ()): |
470 |
1 |
if vcd.get("id") == vdu.get("virtual-compute-desc"): |
471 |
1 |
vdu_virtual_compute = vcd |
472 |
1 |
for vsd in vnfd.get("virtual-storage-desc", ()): |
473 |
1 |
if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]: |
474 |
1 |
vdu_virtual_storage = vsd |
475 |
|
# Get this vdu vcpus, memory and storage info for flavor_data |
476 |
1 |
if vdu_virtual_compute.get("virtual-cpu", {}).get("num-virtual-cpu"): |
477 |
1 |
flavor_data["vcpu-count"] = vdu_virtual_compute["virtual-cpu"][ |
478 |
|
"num-virtual-cpu" |
479 |
|
] |
480 |
1 |
if vdu_virtual_compute.get("virtual-memory", {}).get("size"): |
481 |
1 |
flavor_data["memory-mb"] = ( |
482 |
|
float(vdu_virtual_compute["virtual-memory"]["size"]) * 1024.0 |
483 |
|
) |
484 |
1 |
if vdu_virtual_storage.get("size-of-storage"): |
485 |
1 |
flavor_data["storage-gb"] = vdu_virtual_storage["size-of-storage"] |
486 |
|
# Get this vdu EPA info for guest_epa |
487 |
1 |
if vdu_virtual_compute.get("virtual-cpu", {}).get("cpu-quota"): |
488 |
0 |
guest_epa["cpu-quota"] = vdu_virtual_compute["virtual-cpu"]["cpu-quota"] |
489 |
1 |
if vdu_virtual_compute.get("virtual-cpu", {}).get("pinning"): |
490 |
0 |
vcpu_pinning = vdu_virtual_compute["virtual-cpu"]["pinning"] |
491 |
0 |
if vcpu_pinning.get("thread-policy"): |
492 |
0 |
guest_epa["cpu-thread-pinning-policy"] = vcpu_pinning["thread-policy"] |
493 |
0 |
if vcpu_pinning.get("policy"): |
494 |
0 |
cpu_policy = ( |
495 |
|
"SHARED" if vcpu_pinning["policy"] == "dynamic" else "DEDICATED" |
496 |
|
) |
497 |
0 |
guest_epa["cpu-pinning-policy"] = cpu_policy |
498 |
1 |
if vdu_virtual_compute.get("virtual-memory", {}).get("mem-quota"): |
499 |
0 |
guest_epa["mem-quota"] = vdu_virtual_compute["virtual-memory"]["mem-quota"] |
500 |
1 |
if vdu_virtual_compute.get("virtual-memory", {}).get("mempage-size"): |
501 |
0 |
guest_epa["mempage-size"] = vdu_virtual_compute["virtual-memory"][ |
502 |
|
"mempage-size" |
503 |
|
] |
504 |
1 |
if vdu_virtual_compute.get("virtual-memory", {}).get("numa-node-policy"): |
505 |
0 |
guest_epa["numa-node-policy"] = vdu_virtual_compute["virtual-memory"][ |
506 |
|
"numa-node-policy" |
507 |
|
] |
508 |
1 |
if vdu_virtual_storage.get("disk-io-quota"): |
509 |
0 |
guest_epa["disk-io-quota"] = vdu_virtual_storage["disk-io-quota"] |
510 |
|
|
511 |
1 |
if guest_epa: |
512 |
0 |
flavor_data["guest-epa"] = guest_epa |
513 |
|
|
514 |
1 |
revision = revision if revision is not None else 1 |
515 |
1 |
flavor_data["name"] = ( |
516 |
|
vdu["id"][:56] + "-" + member_vnf_index + "-" + str(revision) + "-flv" |
517 |
|
) |
518 |
1 |
flavor_data["id"] = str(len(nsr_descriptor["flavor"])) |
519 |
1 |
nsr_descriptor["flavor"].append(flavor_data) |
520 |
|
|
521 |
1 |
def _create_nsr_descriptor_from_nsd(self, nsd, ns_request, nsr_id, session): |
522 |
1 |
now = time() |
523 |
1 |
additional_params, _ = self._format_additional_params( |
524 |
|
ns_request, descriptor=nsd |
525 |
|
) |
526 |
|
|
527 |
1 |
nsr_descriptor = { |
528 |
|
"name": ns_request["nsName"], |
529 |
|
"name-ref": ns_request["nsName"], |
530 |
|
"short-name": ns_request["nsName"], |
531 |
|
"admin-status": "ENABLED", |
532 |
|
"nsState": "NOT_INSTANTIATED", |
533 |
|
"currentOperation": "IDLE", |
534 |
|
"currentOperationID": None, |
535 |
|
"errorDescription": None, |
536 |
|
"errorDetail": None, |
537 |
|
"deploymentStatus": None, |
538 |
|
"configurationStatus": None, |
539 |
|
"vcaStatus": None, |
540 |
|
"nsd": {k: v for k, v in nsd.items()}, |
541 |
|
"datacenter": ns_request["vimAccountId"], |
542 |
|
"resource-orchestrator": "osmopenmano", |
543 |
|
"description": ns_request.get("nsDescription", ""), |
544 |
|
"constituent-vnfr-ref": [], |
545 |
|
"operational-status": "init", # typedef ns-operational- |
546 |
|
"config-status": "init", # typedef config-states |
547 |
|
"detailed-status": "scheduled", |
548 |
|
"orchestration-progress": {}, |
549 |
|
"create-time": now, |
550 |
|
"nsd-name-ref": nsd["name"], |
551 |
|
"operational-events": [], # "id", "timestamp", "description", "event", |
552 |
|
"nsd-ref": nsd["id"], |
553 |
|
"nsd-id": nsd["_id"], |
554 |
|
"vnfd-id": [], |
555 |
|
"instantiate_params": self._format_ns_request(ns_request), |
556 |
|
"additionalParamsForNs": additional_params, |
557 |
|
"ns-instance-config-ref": nsr_id, |
558 |
|
"id": nsr_id, |
559 |
|
"_id": nsr_id, |
560 |
|
"ssh-authorized-key": ns_request.get("ssh_keys"), # TODO remove |
561 |
|
"flavor": [], |
562 |
|
"image": [], |
563 |
|
"affinity-or-anti-affinity-group": [], |
564 |
|
"shared-volumes": [], |
565 |
|
"vnffgd": [], |
566 |
|
} |
567 |
1 |
if "revision" in nsd["_admin"]: |
568 |
1 |
nsr_descriptor["revision"] = nsd["_admin"]["revision"] |
569 |
|
|
570 |
1 |
ns_request["nsr_id"] = nsr_id |
571 |
1 |
if ns_request and ns_request.get("config-units"): |
572 |
0 |
nsr_descriptor["config-units"] = ns_request["config-units"] |
573 |
|
# Create vld |
574 |
1 |
if nsd.get("virtual-link-desc"): |
575 |
1 |
nsr_vld = deepcopy(nsd.get("virtual-link-desc", [])) |
576 |
|
# Fill each vld with vnfd-connection-point-ref data |
577 |
|
# TODO: Change for multiple df support |
578 |
1 |
all_vld_connection_point_data = {vld.get("id"): [] for vld in nsr_vld} |
579 |
1 |
vnf_profiles = nsd.get("df", [[]])[0].get("vnf-profile", ()) |
580 |
1 |
for vnf_profile in vnf_profiles: |
581 |
1 |
for vlc in vnf_profile.get("virtual-link-connectivity", ()): |
582 |
1 |
for cpd in vlc.get("constituent-cpd-id", ()): |
583 |
1 |
all_vld_connection_point_data[ |
584 |
|
vlc.get("virtual-link-profile-id") |
585 |
|
].append( |
586 |
|
{ |
587 |
|
"member-vnf-index-ref": cpd.get( |
588 |
|
"constituent-base-element-id" |
589 |
|
), |
590 |
|
"vnfd-connection-point-ref": cpd.get( |
591 |
|
"constituent-cpd-id" |
592 |
|
), |
593 |
|
"vnfd-id-ref": vnf_profile.get("vnfd-id"), |
594 |
|
} |
595 |
|
) |
596 |
|
|
597 |
1 |
vnfd = self._get_vnfd_from_db(vnf_profile.get("vnfd-id"), session) |
598 |
1 |
vnfd.pop("_admin") |
599 |
|
|
600 |
1 |
for vdu in vnfd.get("vdu", ()): |
601 |
1 |
member_vnf_index = vnf_profile.get("id") |
602 |
1 |
self._add_flavor_to_nsr(vdu, vnfd, nsr_descriptor, member_vnf_index) |
603 |
1 |
self._add_shared_volumes_to_nsr( |
604 |
|
vdu, vnfd, nsr_descriptor, member_vnf_index |
605 |
|
) |
606 |
1 |
sw_image_id = vdu.get("sw-image-desc") |
607 |
1 |
if sw_image_id: |
608 |
1 |
image_data = self._get_image_data_from_vnfd(vnfd, sw_image_id) |
609 |
1 |
self._add_image_to_nsr(nsr_descriptor, image_data) |
610 |
|
|
611 |
|
# also add alternative images to the list of images |
612 |
1 |
for alt_image in vdu.get("alternative-sw-image-desc", ()): |
613 |
1 |
image_data = self._get_image_data_from_vnfd(vnfd, alt_image) |
614 |
1 |
self._add_image_to_nsr(nsr_descriptor, image_data) |
615 |
|
|
616 |
|
# Add Affinity or Anti-affinity group information to NSR |
617 |
1 |
vdu_profiles = vnfd.get("df", [[]])[0].get("vdu-profile", ()) |
618 |
1 |
affinity_group_prefix_name = "{}-{}".format( |
619 |
|
nsr_descriptor["name"][:16], vnf_profile.get("id")[:16] |
620 |
|
) |
621 |
|
|
622 |
1 |
for vdu_profile in vdu_profiles: |
623 |
1 |
affinity_group_data = {} |
624 |
1 |
for affinity_group in vdu_profile.get( |
625 |
|
"affinity-or-anti-affinity-group", () |
626 |
|
): |
627 |
0 |
affinity_group_data = ( |
628 |
|
self._get_affinity_or_anti_affinity_group_data_from_vnfd( |
629 |
|
vnfd, affinity_group["id"] |
630 |
|
) |
631 |
|
) |
632 |
0 |
affinity_group_data["member-vnf-index"] = vnf_profile.get("id") |
633 |
0 |
self._add_affinity_or_anti_affinity_group_to_nsr( |
634 |
|
nsr_descriptor, |
635 |
|
affinity_group_data, |
636 |
|
affinity_group_prefix_name, |
637 |
|
) |
638 |
|
|
639 |
1 |
for vld in nsr_vld: |
640 |
1 |
vld["vnfd-connection-point-ref"] = all_vld_connection_point_data.get( |
641 |
|
vld.get("id"), [] |
642 |
|
) |
643 |
1 |
vld["name"] = vld["id"] |
644 |
1 |
nsr_descriptor["vld"] = nsr_vld |
645 |
1 |
if nsd.get("vnffgd"): |
646 |
0 |
vnffgd = nsd.get("vnffgd") |
647 |
0 |
for vnffg in vnffgd: |
648 |
0 |
info = {} |
649 |
0 |
for k, v in vnffg.items(): |
650 |
0 |
if k == "id": |
651 |
0 |
info.update({k: v}) |
652 |
0 |
if k == "nfpd": |
653 |
0 |
info.update({k: v}) |
654 |
0 |
nsr_descriptor["vnffgd"].append(info) |
655 |
|
|
656 |
1 |
return nsr_descriptor |
657 |
|
|
658 |
1 |
def _get_affinity_or_anti_affinity_group_data_from_vnfd( |
659 |
|
self, vnfd, affinity_group_id |
660 |
|
): |
661 |
|
""" |
662 |
|
Gets affinity-or-anti-affinity-group info from df and returns the desired affinity group |
663 |
|
""" |
664 |
0 |
affinity_group = utils.find_in_list( |
665 |
|
vnfd.get("df", [[]])[0].get("affinity-or-anti-affinity-group", ()), |
666 |
|
lambda ag: ag["id"] == affinity_group_id, |
667 |
|
) |
668 |
0 |
affinity_group_data = {} |
669 |
0 |
if affinity_group: |
670 |
0 |
if affinity_group.get("id"): |
671 |
0 |
affinity_group_data["ag-id"] = affinity_group["id"] |
672 |
0 |
if affinity_group.get("type"): |
673 |
0 |
affinity_group_data["type"] = affinity_group["type"] |
674 |
0 |
if affinity_group.get("scope"): |
675 |
0 |
affinity_group_data["scope"] = affinity_group["scope"] |
676 |
0 |
return affinity_group_data |
677 |
|
|
678 |
1 |
def _add_affinity_or_anti_affinity_group_to_nsr( |
679 |
|
self, nsr_descriptor, affinity_group_data, affinity_group_prefix_name |
680 |
|
): |
681 |
|
""" |
682 |
|
Adds affinity-or-anti-affinity-group to nsr checking first it is not already added |
683 |
|
""" |
684 |
0 |
affinity_group = next( |
685 |
|
( |
686 |
|
f |
687 |
|
for f in nsr_descriptor["affinity-or-anti-affinity-group"] |
688 |
|
if all(f.get(k) == affinity_group_data[k] for k in affinity_group_data) |
689 |
|
), |
690 |
|
None, |
691 |
|
) |
692 |
0 |
if not affinity_group: |
693 |
0 |
affinity_group_data["id"] = str( |
694 |
|
len(nsr_descriptor["affinity-or-anti-affinity-group"]) |
695 |
|
) |
696 |
0 |
affinity_group_data["name"] = "{}-{}".format( |
697 |
|
affinity_group_prefix_name, affinity_group_data["ag-id"][:32] |
698 |
|
) |
699 |
0 |
nsr_descriptor["affinity-or-anti-affinity-group"].append( |
700 |
|
affinity_group_data |
701 |
|
) |
702 |
|
|
703 |
1 |
def _get_image_data_from_vnfd(self, vnfd, sw_image_id): |
704 |
1 |
sw_image_desc = utils.find_in_list( |
705 |
|
vnfd.get("sw-image-desc", ()), lambda sw: sw["id"] == sw_image_id |
706 |
|
) |
707 |
1 |
image_data = {} |
708 |
1 |
if sw_image_desc.get("image"): |
709 |
1 |
image_data["image"] = sw_image_desc["image"] |
710 |
1 |
if sw_image_desc.get("checksum"): |
711 |
0 |
image_data["image_checksum"] = sw_image_desc["checksum"]["hash"] |
712 |
1 |
if sw_image_desc.get("vim-type"): |
713 |
1 |
image_data["vim-type"] = sw_image_desc["vim-type"] |
714 |
1 |
return image_data |
715 |
|
|
716 |
1 |
def _add_image_to_nsr(self, nsr_descriptor, image_data): |
717 |
|
""" |
718 |
|
Adds image to nsr checking first it is not already added |
719 |
|
""" |
720 |
1 |
img = next( |
721 |
|
( |
722 |
|
f |
723 |
|
for f in nsr_descriptor["image"] |
724 |
|
if all(f.get(k) == image_data[k] for k in image_data) |
725 |
|
), |
726 |
|
None, |
727 |
|
) |
728 |
1 |
if not img: |
729 |
1 |
image_data["id"] = str(len(nsr_descriptor["image"])) |
730 |
1 |
nsr_descriptor["image"].append(image_data) |
731 |
|
|
732 |
1 |
def _create_vnfr_descriptor_from_vnfd( |
733 |
|
self, |
734 |
|
nsd, |
735 |
|
vnfd, |
736 |
|
vnfd_id, |
737 |
|
vnf_index, |
738 |
|
nsr_descriptor, |
739 |
|
ns_request, |
740 |
|
ns_k8s_namespace, |
741 |
|
revision=None, |
742 |
|
): |
743 |
1 |
vnfr_id = str(uuid4()) |
744 |
1 |
nsr_id = nsr_descriptor["id"] |
745 |
1 |
now = time() |
746 |
1 |
additional_params, vnf_params = self._format_additional_params( |
747 |
|
ns_request, vnf_index, descriptor=vnfd |
748 |
|
) |
749 |
|
|
750 |
1 |
vnfr_descriptor = { |
751 |
|
"id": vnfr_id, |
752 |
|
"_id": vnfr_id, |
753 |
|
"nsr-id-ref": nsr_id, |
754 |
|
"member-vnf-index-ref": vnf_index, |
755 |
|
"additionalParamsForVnf": additional_params, |
756 |
|
"created-time": now, |
757 |
|
# "vnfd": vnfd, # at OSM model.but removed to avoid data duplication TODO: revise |
758 |
|
"vnfd-ref": vnfd_id, |
759 |
|
"vnfd-id": vnfd["_id"], # not at OSM model, but useful |
760 |
|
"vim-account-id": None, |
761 |
|
"vca-id": None, |
762 |
|
"vdur": [], |
763 |
|
"connection-point": [], |
764 |
|
"ip-address": None, # mgmt-interface filled by LCM |
765 |
|
} |
766 |
|
|
767 |
|
# Revision backwards compatility. Only specify the revision in the record if |
768 |
|
# the original VNFD has a revision. |
769 |
1 |
if "revision" in vnfd: |
770 |
0 |
vnfr_descriptor["revision"] = vnfd["revision"] |
771 |
|
|
772 |
1 |
vnf_k8s_namespace = ns_k8s_namespace |
773 |
1 |
if vnf_params: |
774 |
1 |
if vnf_params.get("k8s-namespace"): |
775 |
0 |
vnf_k8s_namespace = vnf_params["k8s-namespace"] |
776 |
1 |
if vnf_params.get("config-units"): |
777 |
0 |
vnfr_descriptor["config-units"] = vnf_params["config-units"] |
778 |
|
|
779 |
|
# Create vld |
780 |
1 |
if vnfd.get("int-virtual-link-desc"): |
781 |
1 |
vnfr_descriptor["vld"] = [] |
782 |
1 |
for vnfd_vld in vnfd.get("int-virtual-link-desc"): |
783 |
1 |
vnfr_descriptor["vld"].append({key: vnfd_vld[key] for key in vnfd_vld}) |
784 |
|
|
785 |
1 |
for cp in vnfd.get("ext-cpd", ()): |
786 |
1 |
vnf_cp = { |
787 |
|
"name": cp.get("id"), |
788 |
|
"connection-point-id": cp.get("int-cpd", {}).get("cpd"), |
789 |
|
"connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"), |
790 |
|
"id": cp.get("id"), |
791 |
|
# "ip-address", "mac-address" # filled by LCM |
792 |
|
# vim-id # TODO it would be nice having a vim port id |
793 |
|
} |
794 |
1 |
vnfr_descriptor["connection-point"].append(vnf_cp) |
795 |
|
|
796 |
|
# Create k8s-cluster information |
797 |
|
# TODO: Validate if a k8s-cluster net can have more than one ext-cpd ? |
798 |
1 |
if vnfd.get("k8s-cluster"): |
799 |
0 |
vnfr_descriptor["k8s-cluster"] = vnfd["k8s-cluster"] |
800 |
0 |
all_k8s_cluster_nets_cpds = {} |
801 |
0 |
for cpd in get_iterable(vnfd.get("ext-cpd")): |
802 |
0 |
if cpd.get("k8s-cluster-net"): |
803 |
0 |
all_k8s_cluster_nets_cpds[cpd.get("k8s-cluster-net")] = cpd.get( |
804 |
|
"id" |
805 |
|
) |
806 |
0 |
for net in get_iterable(vnfr_descriptor["k8s-cluster"].get("nets")): |
807 |
0 |
if net.get("id") in all_k8s_cluster_nets_cpds: |
808 |
0 |
net["external-connection-point-ref"] = all_k8s_cluster_nets_cpds[ |
809 |
|
net.get("id") |
810 |
|
] |
811 |
|
|
812 |
|
# update kdus |
813 |
1 |
for kdu in get_iterable(vnfd.get("kdu")): |
814 |
0 |
additional_params, kdu_params = self._format_additional_params( |
815 |
|
ns_request, vnf_index, kdu_name=kdu["name"], descriptor=vnfd |
816 |
|
) |
817 |
0 |
kdu_k8s_namespace = vnf_k8s_namespace |
818 |
0 |
kdu_model = kdu_params.get("kdu_model") if kdu_params else None |
819 |
0 |
if kdu_params and kdu_params.get("k8s-namespace"): |
820 |
0 |
kdu_k8s_namespace = kdu_params["k8s-namespace"] |
821 |
|
|
822 |
0 |
kdu_deployment_name = "" |
823 |
0 |
if kdu_params and kdu_params.get("kdu-deployment-name"): |
824 |
0 |
kdu_deployment_name = kdu_params.get("kdu-deployment-name") |
825 |
|
|
826 |
0 |
kdur = { |
827 |
|
"additionalParams": additional_params, |
828 |
|
"k8s-namespace": kdu_k8s_namespace, |
829 |
|
"kdu-deployment-name": kdu_deployment_name, |
830 |
|
"kdu-name": kdu["name"], |
831 |
|
# TODO "name": "" Name of the VDU in the VIM |
832 |
|
"ip-address": None, # mgmt-interface filled by LCM |
833 |
|
"k8s-cluster": {}, |
834 |
|
} |
835 |
0 |
if kdu_params and kdu_params.get("config-units"): |
836 |
0 |
kdur["config-units"] = kdu_params["config-units"] |
837 |
0 |
if kdu.get("helm-version"): |
838 |
0 |
kdur["helm-version"] = kdu["helm-version"] |
839 |
0 |
for k8s_type in ("helm-chart", "juju-bundle"): |
840 |
0 |
if kdu.get(k8s_type): |
841 |
0 |
kdur[k8s_type] = kdu_model or kdu[k8s_type] |
842 |
0 |
if not vnfr_descriptor.get("kdur"): |
843 |
0 |
vnfr_descriptor["kdur"] = [] |
844 |
0 |
vnfr_descriptor["kdur"].append(kdur) |
845 |
|
|
846 |
1 |
vnfd_mgmt_cp = vnfd.get("mgmt-cp") |
847 |
|
|
848 |
1 |
for vdu in vnfd.get("vdu", ()): |
849 |
1 |
vdu_mgmt_cp = [] |
850 |
1 |
try: |
851 |
1 |
configs = vnfd.get("df")[0]["lcm-operations-configuration"][ |
852 |
|
"operate-vnf-op-config" |
853 |
|
]["day1-2"] |
854 |
1 |
vdu_config = utils.find_in_list( |
855 |
|
configs, lambda config: config["id"] == vdu["id"] |
856 |
|
) |
857 |
1 |
except Exception: |
858 |
1 |
vdu_config = None |
859 |
|
|
860 |
1 |
try: |
861 |
1 |
vdu_instantiation_level = utils.find_in_list( |
862 |
|
vnfd.get("df")[0]["instantiation-level"][0]["vdu-level"], |
863 |
|
lambda a_vdu_profile: a_vdu_profile["vdu-id"] == vdu["id"], |
864 |
|
) |
865 |
0 |
except Exception: |
866 |
0 |
vdu_instantiation_level = None |
867 |
|
|
868 |
1 |
if vdu_config: |
869 |
0 |
external_connection_ee = utils.filter_in_list( |
870 |
|
vdu_config.get("execution-environment-list", []), |
871 |
|
lambda ee: "external-connection-point-ref" in ee, |
872 |
|
) |
873 |
0 |
for ee in external_connection_ee: |
874 |
0 |
vdu_mgmt_cp.append(ee["external-connection-point-ref"]) |
875 |
|
|
876 |
1 |
additional_params, vdu_params = self._format_additional_params( |
877 |
|
ns_request, vnf_index, vdu_id=vdu["id"], descriptor=vnfd |
878 |
|
) |
879 |
|
|
880 |
1 |
try: |
881 |
1 |
vdu_virtual_storage_descriptors = utils.filter_in_list( |
882 |
|
vnfd.get("virtual-storage-desc", []), |
883 |
|
lambda stg_desc: stg_desc["id"] in vdu["virtual-storage-desc"], |
884 |
|
) |
885 |
0 |
except Exception: |
886 |
0 |
vdu_virtual_storage_descriptors = [] |
887 |
1 |
vdur = { |
888 |
|
"vdu-id-ref": vdu["id"], |
889 |
|
# TODO "name": "" Name of the VDU in the VIM |
890 |
|
"ip-address": None, # mgmt-interface filled by LCM |
891 |
|
# "vim-id", "flavor-id", "image-id", "management-ip" # filled by LCM |
892 |
|
"internal-connection-point": [], |
893 |
|
"interfaces": [], |
894 |
|
"additionalParams": additional_params, |
895 |
|
"vdu-name": vdu["name"], |
896 |
|
"virtual-storages": vdu_virtual_storage_descriptors, |
897 |
|
} |
898 |
1 |
if vdu_params and vdu_params.get("config-units"): |
899 |
0 |
vdur["config-units"] = vdu_params["config-units"] |
900 |
1 |
if deep_get(vdu, ("supplemental-boot-data", "boot-data-drive")): |
901 |
0 |
vdur["boot-data-drive"] = vdu["supplemental-boot-data"][ |
902 |
|
"boot-data-drive" |
903 |
|
] |
904 |
1 |
if vdu.get("pdu-type"): |
905 |
0 |
vdur["pdu-type"] = vdu["pdu-type"] |
906 |
0 |
vdur["name"] = vdu["pdu-type"] |
907 |
|
# TODO volumes: name, volume-id |
908 |
1 |
for icp in vdu.get("int-cpd", ()): |
909 |
1 |
vdu_icp = { |
910 |
|
"id": icp["id"], |
911 |
|
"connection-point-id": icp["id"], |
912 |
|
"name": icp.get("id"), |
913 |
|
} |
914 |
|
|
915 |
1 |
vdur["internal-connection-point"].append(vdu_icp) |
916 |
|
|
917 |
1 |
for iface in icp.get("virtual-network-interface-requirement", ()): |
918 |
|
# Name, mac-address and interface position is taken from VNFD |
919 |
|
# and included into VNFR. By this way RO can process this information |
920 |
|
# while creating the VDU. |
921 |
1 |
iface_fields = ("name", "mac-address", "position", "ip-address") |
922 |
1 |
vdu_iface = { |
923 |
|
x: iface[x] for x in iface_fields if iface.get(x) is not None |
924 |
|
} |
925 |
|
|
926 |
1 |
vdu_iface["internal-connection-point-ref"] = vdu_icp["id"] |
927 |
1 |
if "port-security-enabled" in icp: |
928 |
0 |
vdu_iface["port-security-enabled"] = icp[ |
929 |
|
"port-security-enabled" |
930 |
|
] |
931 |
|
|
932 |
1 |
if "port-security-disable-strategy" in icp: |
933 |
0 |
vdu_iface["port-security-disable-strategy"] = icp[ |
934 |
|
"port-security-disable-strategy" |
935 |
|
] |
936 |
|
|
937 |
1 |
for ext_cp in vnfd.get("ext-cpd", ()): |
938 |
1 |
if not ext_cp.get("int-cpd"): |
939 |
0 |
continue |
940 |
1 |
if ext_cp["int-cpd"].get("vdu-id") != vdu["id"]: |
941 |
1 |
continue |
942 |
1 |
if icp["id"] == ext_cp["int-cpd"].get("cpd"): |
943 |
1 |
vdu_iface["external-connection-point-ref"] = ext_cp.get( |
944 |
|
"id" |
945 |
|
) |
946 |
|
|
947 |
1 |
if "port-security-enabled" in ext_cp: |
948 |
0 |
vdu_iface["port-security-enabled"] = ext_cp[ |
949 |
|
"port-security-enabled" |
950 |
|
] |
951 |
|
|
952 |
1 |
if "port-security-disable-strategy" in ext_cp: |
953 |
0 |
vdu_iface["port-security-disable-strategy"] = ext_cp[ |
954 |
|
"port-security-disable-strategy" |
955 |
|
] |
956 |
|
|
957 |
1 |
break |
958 |
|
|
959 |
1 |
if ( |
960 |
|
vnfd_mgmt_cp |
961 |
|
and vdu_iface.get("external-connection-point-ref") |
962 |
|
== vnfd_mgmt_cp |
963 |
|
): |
964 |
1 |
vdu_iface["mgmt-vnf"] = True |
965 |
1 |
vdu_iface["mgmt-interface"] = True |
966 |
|
|
967 |
1 |
for ecp in vdu_mgmt_cp: |
968 |
0 |
if vdu_iface.get("external-connection-point-ref") == ecp: |
969 |
0 |
vdu_iface["mgmt-interface"] = True |
970 |
|
|
971 |
1 |
if iface.get("virtual-interface"): |
972 |
1 |
vdu_iface.update(deepcopy(iface["virtual-interface"])) |
973 |
|
|
974 |
|
# look for network where this interface is connected |
975 |
1 |
iface_ext_cp = vdu_iface.get("external-connection-point-ref") |
976 |
1 |
if iface_ext_cp: |
977 |
|
# TODO: Change for multiple df support |
978 |
1 |
for df in get_iterable(nsd.get("df")): |
979 |
1 |
for vnf_profile in get_iterable(df.get("vnf-profile")): |
980 |
1 |
for vlc_index, vlc in enumerate( |
981 |
|
get_iterable( |
982 |
|
vnf_profile.get("virtual-link-connectivity") |
983 |
|
) |
984 |
|
): |
985 |
1 |
for cpd in get_iterable( |
986 |
|
vlc.get("constituent-cpd-id") |
987 |
|
): |
988 |
1 |
if ( |
989 |
|
cpd.get("constituent-cpd-id") |
990 |
|
== iface_ext_cp |
991 |
|
) and vnf_profile.get("id") == vnf_index: |
992 |
1 |
vdu_iface["ns-vld-id"] = vlc.get( |
993 |
|
"virtual-link-profile-id" |
994 |
|
) |
995 |
|
# if iface type is SRIOV or PASSTHROUGH, set pci-interfaces flag to True |
996 |
1 |
if vdu_iface.get("type") in ( |
997 |
|
"SR-IOV", |
998 |
|
"PCI-PASSTHROUGH", |
999 |
|
): |
1000 |
0 |
nsr_descriptor["vld"][vlc_index][ |
1001 |
|
"pci-interfaces" |
1002 |
|
] = True |
1003 |
1 |
break |
1004 |
1 |
elif vdu_iface.get("internal-connection-point-ref"): |
1005 |
1 |
vdu_iface["vnf-vld-id"] = icp.get("int-virtual-link-desc") |
1006 |
|
# TODO: store fixed IP address in the record (if it exists in the ICP) |
1007 |
|
# if iface type is SRIOV or PASSTHROUGH, set pci-interfaces flag to True |
1008 |
1 |
if vdu_iface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"): |
1009 |
0 |
ivld_index = utils.find_index_in_list( |
1010 |
|
vnfd.get("int-virtual-link-desc", ()), |
1011 |
|
lambda ivld: ivld["id"] |
1012 |
|
== icp.get("int-virtual-link-desc"), |
1013 |
|
) |
1014 |
0 |
vnfr_descriptor["vld"][ivld_index]["pci-interfaces"] = True |
1015 |
|
|
1016 |
1 |
vdur["interfaces"].append(vdu_iface) |
1017 |
|
|
1018 |
1 |
if vdu.get("sw-image-desc"): |
1019 |
1 |
sw_image = utils.find_in_list( |
1020 |
|
vnfd.get("sw-image-desc", ()), |
1021 |
|
lambda image: image["id"] == vdu.get("sw-image-desc"), |
1022 |
|
) |
1023 |
1 |
nsr_sw_image_data = utils.find_in_list( |
1024 |
|
nsr_descriptor["image"], |
1025 |
|
lambda nsr_image: (nsr_image.get("image") == sw_image.get("image")), |
1026 |
|
) |
1027 |
1 |
vdur["ns-image-id"] = nsr_sw_image_data["id"] |
1028 |
|
|
1029 |
1 |
if vdu.get("alternative-sw-image-desc"): |
1030 |
1 |
alt_image_ids = [] |
1031 |
1 |
for alt_image_id in vdu.get("alternative-sw-image-desc", ()): |
1032 |
1 |
sw_image = utils.find_in_list( |
1033 |
|
vnfd.get("sw-image-desc", ()), |
1034 |
|
lambda image: image["id"] == alt_image_id, |
1035 |
|
) |
1036 |
1 |
nsr_sw_image_data = utils.find_in_list( |
1037 |
|
nsr_descriptor["image"], |
1038 |
|
lambda nsr_image: ( |
1039 |
|
nsr_image.get("image") == sw_image.get("image") |
1040 |
|
), |
1041 |
|
) |
1042 |
1 |
alt_image_ids.append(nsr_sw_image_data["id"]) |
1043 |
1 |
vdur["alt-image-ids"] = alt_image_ids |
1044 |
|
|
1045 |
1 |
revision = revision if revision is not None else 1 |
1046 |
1 |
flavor_data_name = ( |
1047 |
|
vdu["id"][:56] + "-" + vnf_index + "-" + str(revision) + "-flv" |
1048 |
|
) |
1049 |
1 |
nsr_flavor_desc = utils.find_in_list( |
1050 |
|
nsr_descriptor["flavor"], |
1051 |
|
lambda flavor: flavor["name"] == flavor_data_name, |
1052 |
|
) |
1053 |
|
|
1054 |
1 |
if nsr_flavor_desc: |
1055 |
1 |
vdur["ns-flavor-id"] = nsr_flavor_desc["id"] |
1056 |
|
|
1057 |
|
# Adding Shared Volume information to vdur |
1058 |
1 |
if vdur.get("virtual-storages"): |
1059 |
1 |
nsr_sv = [] |
1060 |
1 |
for vsd in vdur["virtual-storages"]: |
1061 |
1 |
if vsd.get("vdu-storage-requirements"): |
1062 |
0 |
if ( |
1063 |
|
vsd["vdu-storage-requirements"][0].get("key") |
1064 |
|
== "multiattach" |
1065 |
|
and vsd["vdu-storage-requirements"][0].get("value") |
1066 |
|
== "True" |
1067 |
|
): |
1068 |
0 |
nsr_sv.append(vsd["id"]) |
1069 |
1 |
if nsr_sv: |
1070 |
0 |
vdur["shared-volumes-id"] = nsr_sv |
1071 |
|
|
1072 |
|
# Adding Affinity groups information to vdur |
1073 |
1 |
try: |
1074 |
1 |
vdu_profile_affinity_group = utils.find_in_list( |
1075 |
|
vnfd.get("df")[0]["vdu-profile"], |
1076 |
|
lambda a_vdu: a_vdu["id"] == vdu["id"], |
1077 |
|
) |
1078 |
0 |
except Exception: |
1079 |
0 |
vdu_profile_affinity_group = None |
1080 |
|
|
1081 |
1 |
if vdu_profile_affinity_group: |
1082 |
1 |
affinity_group_ids = [] |
1083 |
1 |
for affinity_group in vdu_profile_affinity_group.get( |
1084 |
|
"affinity-or-anti-affinity-group", () |
1085 |
|
): |
1086 |
0 |
vdu_affinity_group = utils.find_in_list( |
1087 |
|
vdu_profile_affinity_group.get( |
1088 |
|
"affinity-or-anti-affinity-group", () |
1089 |
|
), |
1090 |
|
lambda ag_fp: ag_fp["id"] == affinity_group["id"], |
1091 |
|
) |
1092 |
0 |
nsr_affinity_group = utils.find_in_list( |
1093 |
|
nsr_descriptor["affinity-or-anti-affinity-group"], |
1094 |
|
lambda nsr_ag: ( |
1095 |
|
nsr_ag.get("ag-id") == vdu_affinity_group.get("id") |
1096 |
|
and nsr_ag.get("member-vnf-index") |
1097 |
|
== vnfr_descriptor.get("member-vnf-index-ref") |
1098 |
|
), |
1099 |
|
) |
1100 |
|
# Update Affinity Group VIM name if VDU instantiation parameter is present |
1101 |
0 |
if vnf_params and vnf_params.get("affinity-or-anti-affinity-group"): |
1102 |
0 |
vnf_params_affinity_group = utils.find_in_list( |
1103 |
|
vnf_params["affinity-or-anti-affinity-group"], |
1104 |
|
lambda vnfp_ag: ( |
1105 |
|
vnfp_ag.get("id") == vdu_affinity_group.get("id") |
1106 |
|
), |
1107 |
|
) |
1108 |
0 |
if vnf_params_affinity_group.get("vim-affinity-group-id"): |
1109 |
0 |
nsr_affinity_group[ |
1110 |
|
"vim-affinity-group-id" |
1111 |
|
] = vnf_params_affinity_group["vim-affinity-group-id"] |
1112 |
0 |
affinity_group_ids.append(nsr_affinity_group["id"]) |
1113 |
1 |
vdur["affinity-or-anti-affinity-group-id"] = affinity_group_ids |
1114 |
|
|
1115 |
1 |
if vdu_instantiation_level: |
1116 |
1 |
count = vdu_instantiation_level.get("number-of-instances") |
1117 |
|
else: |
1118 |
0 |
count = 1 |
1119 |
|
|
1120 |
1 |
for index in range(0, count): |
1121 |
1 |
vdur = deepcopy(vdur) |
1122 |
1 |
for iface in vdur["interfaces"]: |
1123 |
1 |
if iface.get("ip-address") and index != 0: |
1124 |
0 |
iface["ip-address"] = increment_ip_mac(iface["ip-address"]) |
1125 |
1 |
if iface.get("mac-address") and index != 0: |
1126 |
0 |
iface["mac-address"] = increment_ip_mac(iface["mac-address"]) |
1127 |
|
|
1128 |
1 |
vdur["_id"] = str(uuid4()) |
1129 |
1 |
vdur["id"] = vdur["_id"] |
1130 |
1 |
vdur["count-index"] = index |
1131 |
1 |
vnfr_descriptor["vdur"].append(vdur) |
1132 |
1 |
return vnfr_descriptor |
1133 |
|
|
1134 |
1 |
def vca_status_refresh(self, session, ns_instance_content, filter_q): |
1135 |
|
""" |
1136 |
|
vcaStatus in ns_instance_content maybe stale, check if it is stale and create lcm op |
1137 |
|
to refresh vca status by sending message to LCM when it is stale. Ignore otherwise. |
1138 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1139 |
|
:param ns_instance_content: ns instance content |
1140 |
|
:param filter_q: dict: query parameter containing vcaStatus-refresh as true or false |
1141 |
|
:return: None |
1142 |
|
""" |
1143 |
1 |
time_now, time_delta = ( |
1144 |
|
time(), |
1145 |
|
time() - ns_instance_content["_admin"]["modified"], |
1146 |
|
) |
1147 |
1 |
force_refresh = ( |
1148 |
|
isinstance(filter_q, dict) and filter_q.get("vcaStatusRefresh") == "true" |
1149 |
|
) |
1150 |
1 |
threshold_reached = time_delta > 120 |
1151 |
1 |
if force_refresh or threshold_reached: |
1152 |
1 |
operation, _id = "vca_status_refresh", ns_instance_content["_id"] |
1153 |
1 |
ns_instance_content["_admin"]["modified"] = time_now |
1154 |
1 |
self.db.set_one(self.topic, {"_id": _id}, ns_instance_content) |
1155 |
1 |
nslcmop_desc = NsLcmOpTopic._create_nslcmop(_id, operation, None) |
1156 |
1 |
self.format_on_new( |
1157 |
|
nslcmop_desc, session["project_id"], make_public=session["public"] |
1158 |
|
) |
1159 |
1 |
nslcmop_desc["_admin"].pop("nsState") |
1160 |
1 |
self.msg.write("ns", operation, nslcmop_desc) |
1161 |
1 |
return |
1162 |
|
|
1163 |
1 |
def show(self, session, _id, filter_q=None, api_req=False): |
1164 |
|
""" |
1165 |
|
Get complete information on an ns instance. |
1166 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1167 |
|
:param _id: string, ns instance id |
1168 |
|
:param filter_q: dict: query parameter containing vcaStatusRefresh as true or false |
1169 |
|
:param api_req: True if this call is serving an external API request. False if serving internal request. |
1170 |
|
:return: dictionary, raise exception if not found. |
1171 |
|
""" |
1172 |
1 |
ns_instance_content = super().show(session, _id, api_req) |
1173 |
1 |
self.vca_status_refresh(session, ns_instance_content, filter_q) |
1174 |
1 |
return ns_instance_content |
1175 |
|
|
1176 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
1177 |
0 |
raise EngineException( |
1178 |
|
"Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
1179 |
|
) |
1180 |
|
|
1181 |
|
|
1182 |
1 |
class VnfrTopic(BaseTopic): |
1183 |
1 |
topic = "vnfrs" |
1184 |
1 |
topic_msg = None |
1185 |
|
|
1186 |
1 |
def __init__(self, db, fs, msg, auth): |
1187 |
1 |
BaseTopic.__init__(self, db, fs, msg, auth) |
1188 |
|
|
1189 |
1 |
def delete(self, session, _id, dry_run=False, not_send_msg=None): |
1190 |
0 |
raise EngineException( |
1191 |
|
"Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
1192 |
|
) |
1193 |
|
|
1194 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
1195 |
0 |
raise EngineException( |
1196 |
|
"Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
1197 |
|
) |
1198 |
|
|
1199 |
1 |
def new(self, rollback, session, indata=None, kwargs=None, headers=None): |
1200 |
|
# Not used because vnfrs are created and deleted by NsrTopic class directly |
1201 |
0 |
raise EngineException( |
1202 |
|
"Method new called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
1203 |
|
) |
1204 |
|
|
1205 |
|
|
1206 |
1 |
class NsLcmOpTopic(BaseTopic): |
1207 |
1 |
topic = "nslcmops" |
1208 |
1 |
topic_msg = "ns" |
1209 |
1 |
operation_schema = { # mapping between operation and jsonschema to validate |
1210 |
|
"instantiate": ns_instantiate, |
1211 |
|
"action": ns_action, |
1212 |
|
"update": ns_update, |
1213 |
|
"scale": ns_scale, |
1214 |
|
"heal": ns_heal, |
1215 |
|
"terminate": ns_terminate, |
1216 |
|
"migrate": ns_migrate, |
1217 |
|
"verticalscale": ns_verticalscale, |
1218 |
|
"cancel": nslcmop_cancel, |
1219 |
|
} |
1220 |
|
|
1221 |
1 |
def __init__(self, db, fs, msg, auth): |
1222 |
1 |
BaseTopic.__init__(self, db, fs, msg, auth) |
1223 |
1 |
self.nsrtopic = NsrTopic(db, fs, msg, auth) |
1224 |
|
|
1225 |
1 |
def _check_ns_operation(self, session, nsr, operation, indata): |
1226 |
|
""" |
1227 |
|
Check that user has enter right parameters for the operation |
1228 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1229 |
|
:param operation: it can be: instantiate, terminate, action, update, heal |
1230 |
|
:param indata: descriptor with the parameters of the operation |
1231 |
|
:return: None |
1232 |
|
""" |
1233 |
1 |
if operation == "action": |
1234 |
1 |
self._check_action_ns_operation(indata, nsr) |
1235 |
1 |
elif operation == "scale": |
1236 |
0 |
self._check_scale_ns_operation(indata, nsr) |
1237 |
1 |
elif operation == "update": |
1238 |
1 |
self._check_update_ns_operation(indata, nsr) |
1239 |
1 |
elif operation == "heal": |
1240 |
0 |
self._check_heal_ns_operation(indata, nsr) |
1241 |
1 |
elif operation == "instantiate": |
1242 |
1 |
self._check_instantiate_ns_operation(indata, nsr, session) |
1243 |
|
|
1244 |
1 |
def _check_action_ns_operation(self, indata, nsr): |
1245 |
1 |
nsd = nsr["nsd"] |
1246 |
|
# check vnf_member_index |
1247 |
1 |
if indata.get("vnf_member_index"): |
1248 |
0 |
indata["member_vnf_index"] = indata.pop( |
1249 |
|
"vnf_member_index" |
1250 |
|
) # for backward compatibility |
1251 |
1 |
if indata.get("member_vnf_index"): |
1252 |
1 |
vnfd = self._get_vnfd_from_vnf_member_index( |
1253 |
|
indata["member_vnf_index"], nsr["_id"] |
1254 |
|
) |
1255 |
1 |
try: |
1256 |
1 |
configs = vnfd.get("df")[0]["lcm-operations-configuration"][ |
1257 |
|
"operate-vnf-op-config" |
1258 |
|
]["day1-2"] |
1259 |
0 |
except Exception: |
1260 |
0 |
configs = [] |
1261 |
|
|
1262 |
1 |
if indata.get("vdu_id"): |
1263 |
1 |
self._check_valid_vdu(vnfd, indata["vdu_id"]) |
1264 |
0 |
descriptor_configuration = utils.find_in_list( |
1265 |
|
configs, lambda config: config["id"] == indata["vdu_id"] |
1266 |
|
) |
1267 |
1 |
elif indata.get("kdu_name"): |
1268 |
0 |
self._check_valid_kdu(vnfd, indata["kdu_name"]) |
1269 |
0 |
descriptor_configuration = utils.find_in_list( |
1270 |
|
configs, lambda config: config["id"] == indata.get("kdu_name") |
1271 |
|
) |
1272 |
|
else: |
1273 |
1 |
descriptor_configuration = utils.find_in_list( |
1274 |
|
configs, lambda config: config["id"] == vnfd["id"] |
1275 |
|
) |
1276 |
1 |
if descriptor_configuration is not None: |
1277 |
1 |
descriptor_configuration = descriptor_configuration.get( |
1278 |
|
"config-primitive" |
1279 |
|
) |
1280 |
|
else: # use a NSD |
1281 |
0 |
descriptor_configuration = nsd.get("ns-configuration", {}).get( |
1282 |
|
"config-primitive" |
1283 |
|
) |
1284 |
|
|
1285 |
|
# For k8s allows default primitives without validating the parameters |
1286 |
1 |
if indata.get("kdu_name") and indata["primitive"] in ( |
1287 |
|
"upgrade", |
1288 |
|
"rollback", |
1289 |
|
"status", |
1290 |
|
"inspect", |
1291 |
|
"readme", |
1292 |
|
): |
1293 |
|
# TODO should be checked that rollback only can contains revsision_numbe???? |
1294 |
0 |
if not indata.get("member_vnf_index"): |
1295 |
0 |
raise EngineException( |
1296 |
|
"Missing action parameter 'member_vnf_index' for default KDU primitive '{}'".format( |
1297 |
|
indata["primitive"] |
1298 |
|
) |
1299 |
|
) |
1300 |
0 |
return |
1301 |
|
# if not, check primitive |
1302 |
1 |
for config_primitive in get_iterable(descriptor_configuration): |
1303 |
1 |
if indata["primitive"] == config_primitive["name"]: |
1304 |
|
# check needed primitive_params are provided |
1305 |
1 |
if indata.get("primitive_params"): |
1306 |
1 |
in_primitive_params_copy = copy(indata["primitive_params"]) |
1307 |
|
else: |
1308 |
0 |
in_primitive_params_copy = {} |
1309 |
1 |
for paramd in get_iterable(config_primitive.get("parameter")): |
1310 |
1 |
if paramd["name"] in in_primitive_params_copy: |
1311 |
1 |
del in_primitive_params_copy[paramd["name"]] |
1312 |
0 |
elif not paramd.get("default-value"): |
1313 |
0 |
raise EngineException( |
1314 |
|
"Needed parameter {} not provided for primitive '{}'".format( |
1315 |
|
paramd["name"], indata["primitive"] |
1316 |
|
) |
1317 |
|
) |
1318 |
|
# check no extra primitive params are provided |
1319 |
1 |
if in_primitive_params_copy: |
1320 |
0 |
raise EngineException( |
1321 |
|
"parameter/s '{}' not present at vnfd /nsd for primitive '{}'".format( |
1322 |
|
list(in_primitive_params_copy.keys()), indata["primitive"] |
1323 |
|
) |
1324 |
|
) |
1325 |
1 |
break |
1326 |
|
else: |
1327 |
1 |
raise EngineException( |
1328 |
|
"Invalid primitive '{}' is not present at vnfd/nsd".format( |
1329 |
|
indata["primitive"] |
1330 |
|
) |
1331 |
|
) |
1332 |
|
|
1333 |
1 |
def _check_update_ns_operation(self, indata, nsr) -> None: |
1334 |
|
"""Validates the ns-update request according to updateType |
1335 |
|
|
1336 |
|
If updateType is CHANGE_VNFPKG: |
1337 |
|
- it checks the vnfInstanceId, whether it's available under ns instance |
1338 |
|
- it checks the vnfdId whether it matches with the vnfd-id in the vnf-record of specified VNF. |
1339 |
|
Otherwise exception will be raised. |
1340 |
|
If updateType is REMOVE_VNF: |
1341 |
|
- it checks if the vnfInstanceId is available in the ns instance |
1342 |
|
- Otherwise exception will be raised. |
1343 |
|
|
1344 |
|
Args: |
1345 |
|
indata: includes updateType such as CHANGE_VNFPKG, |
1346 |
|
nsr: network service record |
1347 |
|
|
1348 |
|
Raises: |
1349 |
|
EngineException: |
1350 |
|
a meaningful error if given update parameters are not proper such as |
1351 |
|
"Error in validating ns-update request: <ID> does not match |
1352 |
|
with the vnfd-id of vnfinstance |
1353 |
|
http_code=HTTPStatus.UNPROCESSABLE_ENTITY" |
1354 |
|
|
1355 |
|
""" |
1356 |
1 |
try: |
1357 |
1 |
if indata["updateType"] == "CHANGE_VNFPKG": |
1358 |
|
# vnfInstanceId, nsInstanceId, vnfdId are mandatory |
1359 |
1 |
vnf_instance_id = indata["changeVnfPackageData"]["vnfInstanceId"] |
1360 |
1 |
ns_instance_id = indata["nsInstanceId"] |
1361 |
1 |
vnfd_id_2update = indata["changeVnfPackageData"]["vnfdId"] |
1362 |
|
|
1363 |
1 |
if vnf_instance_id not in nsr["constituent-vnfr-ref"]: |
1364 |
1 |
raise EngineException( |
1365 |
|
f"Error in validating ns-update request: vnf {vnf_instance_id} does not " |
1366 |
|
f"belong to NS {ns_instance_id}", |
1367 |
|
http_code=HTTPStatus.UNPROCESSABLE_ENTITY, |
1368 |
|
) |
1369 |
|
|
1370 |
|
# Getting vnfrs through the ns_instance_id |
1371 |
1 |
vnfrs = self.db.get_list("vnfrs", {"nsr-id-ref": ns_instance_id}) |
1372 |
1 |
constituent_vnfd_id = next( |
1373 |
|
( |
1374 |
|
vnfr["vnfd-id"] |
1375 |
|
for vnfr in vnfrs |
1376 |
|
if vnfr["id"] == vnf_instance_id |
1377 |
|
), |
1378 |
|
None, |
1379 |
|
) |
1380 |
|
|
1381 |
|
# Check the given vnfd-id belongs to given vnf instance |
1382 |
1 |
if constituent_vnfd_id and (vnfd_id_2update != constituent_vnfd_id): |
1383 |
1 |
raise EngineException( |
1384 |
|
f"Error in validating ns-update request: vnfd-id {vnfd_id_2update} does not " |
1385 |
|
f"match with the vnfd-id: {constituent_vnfd_id} of VNF instance: {vnf_instance_id}", |
1386 |
|
http_code=HTTPStatus.UNPROCESSABLE_ENTITY, |
1387 |
|
) |
1388 |
|
|
1389 |
|
# Validating the ns update timeout |
1390 |
1 |
if ( |
1391 |
|
indata.get("timeout_ns_update") |
1392 |
|
and indata["timeout_ns_update"] < 300 |
1393 |
|
): |
1394 |
1 |
raise EngineException( |
1395 |
|
"Error in validating ns-update request: {} second is not enough " |
1396 |
|
"to upgrade the VNF instance: {}".format( |
1397 |
|
indata["timeout_ns_update"], vnf_instance_id |
1398 |
|
), |
1399 |
|
http_code=HTTPStatus.UNPROCESSABLE_ENTITY, |
1400 |
|
) |
1401 |
1 |
elif indata["updateType"] == "REMOVE_VNF": |
1402 |
1 |
vnf_instance_id = indata["removeVnfInstanceId"] |
1403 |
1 |
ns_instance_id = indata["nsInstanceId"] |
1404 |
1 |
if vnf_instance_id not in nsr["constituent-vnfr-ref"]: |
1405 |
0 |
raise EngineException( |
1406 |
|
"Invalid VNF Instance Id. '{}' is not " |
1407 |
|
"present in the NS '{}'".format(vnf_instance_id, ns_instance_id) |
1408 |
|
) |
1409 |
|
|
1410 |
1 |
except ( |
1411 |
|
DbException, |
1412 |
|
AttributeError, |
1413 |
|
IndexError, |
1414 |
|
KeyError, |
1415 |
|
ValueError, |
1416 |
|
) as e: |
1417 |
0 |
raise type(e)( |
1418 |
|
"Ns update request could not be processed with error: {}.".format(e) |
1419 |
|
) |
1420 |
|
|
1421 |
1 |
def _check_scale_ns_operation(self, indata, nsr): |
1422 |
0 |
vnfd = self._get_vnfd_from_vnf_member_index( |
1423 |
|
indata["scaleVnfData"]["scaleByStepData"]["member-vnf-index"], nsr["_id"] |
1424 |
|
) |
1425 |
0 |
for scaling_aspect in get_iterable(vnfd.get("df", ())[0]["scaling-aspect"]): |
1426 |
0 |
if ( |
1427 |
|
indata["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"] |
1428 |
|
== scaling_aspect["id"] |
1429 |
|
): |
1430 |
0 |
break |
1431 |
|
else: |
1432 |
0 |
raise EngineException( |
1433 |
|
"Invalid scaleVnfData:scaleByStepData:scaling-group-descriptor '{}' is not " |
1434 |
|
"present at vnfd:scaling-aspect".format( |
1435 |
|
indata["scaleVnfData"]["scaleByStepData"][ |
1436 |
|
"scaling-group-descriptor" |
1437 |
|
] |
1438 |
|
) |
1439 |
|
) |
1440 |
|
|
1441 |
1 |
def _check_heal_ns_operation(self, indata, nsr): |
1442 |
0 |
return |
1443 |
|
|
1444 |
1 |
def _check_instantiate_ns_operation(self, indata, nsr, session): |
1445 |
1 |
vnf_member_index_to_vnfd = {} # map between vnf_member_index to vnf descriptor. |
1446 |
1 |
vim_accounts = [] |
1447 |
1 |
wim_accounts = [] |
1448 |
1 |
nsd = nsr["nsd"] |
1449 |
1 |
self._check_valid_vim_account(indata["vimAccountId"], vim_accounts, session) |
1450 |
1 |
self._check_valid_wim_account(indata.get("wimAccountId"), wim_accounts, session) |
1451 |
1 |
for in_vnf in get_iterable(indata.get("vnf")): |
1452 |
1 |
member_vnf_index = in_vnf["member-vnf-index"] |
1453 |
1 |
if vnf_member_index_to_vnfd.get(member_vnf_index): |
1454 |
0 |
vnfd = vnf_member_index_to_vnfd[member_vnf_index] |
1455 |
|
else: |
1456 |
1 |
vnfd = self._get_vnfd_from_vnf_member_index( |
1457 |
|
member_vnf_index, nsr["_id"] |
1458 |
|
) |
1459 |
1 |
vnf_member_index_to_vnfd[ |
1460 |
|
member_vnf_index |
1461 |
|
] = vnfd # add to cache, avoiding a later look for |
1462 |
1 |
self._check_vnf_instantiation_params(in_vnf, vnfd) |
1463 |
1 |
if in_vnf.get("vimAccountId"): |
1464 |
0 |
self._check_valid_vim_account( |
1465 |
|
in_vnf["vimAccountId"], vim_accounts, session |
1466 |
|
) |
1467 |
|
|
1468 |
1 |
for in_vld in get_iterable(indata.get("vld")): |
1469 |
0 |
self._check_valid_wim_account( |
1470 |
|
in_vld.get("wimAccountId"), wim_accounts, session |
1471 |
|
) |
1472 |
0 |
for vldd in get_iterable(nsd.get("virtual-link-desc")): |
1473 |
0 |
if in_vld["name"] == vldd["id"]: |
1474 |
0 |
break |
1475 |
|
else: |
1476 |
0 |
raise EngineException( |
1477 |
|
"Invalid parameter vld:name='{}' is not present at nsd:vld".format( |
1478 |
|
in_vld["name"] |
1479 |
|
) |
1480 |
|
) |
1481 |
|
|
1482 |
1 |
def _get_vnfd_from_vnf_member_index(self, member_vnf_index, nsr_id): |
1483 |
|
# Obtain vnf descriptor. The vnfr is used to get the vnfd._id used for this member_vnf_index |
1484 |
1 |
vnfr = self.db.get_one( |
1485 |
|
"vnfrs", |
1486 |
|
{"nsr-id-ref": nsr_id, "member-vnf-index-ref": member_vnf_index}, |
1487 |
|
fail_on_empty=False, |
1488 |
|
) |
1489 |
1 |
if not vnfr: |
1490 |
1 |
raise EngineException( |
1491 |
|
"Invalid parameter member_vnf_index='{}' is not one of the " |
1492 |
|
"nsd:constituent-vnfd".format(member_vnf_index) |
1493 |
|
) |
1494 |
|
|
1495 |
|
# Backwards compatibility: if there is no revision, get it from the one and only VNFD entry |
1496 |
1 |
if "revision" in vnfr: |
1497 |
1 |
vnfd_revision = vnfr["vnfd-id"] + ":" + str(vnfr["revision"]) |
1498 |
1 |
vnfd = self.db.get_one( |
1499 |
|
"vnfds_revisions", {"_id": vnfd_revision}, fail_on_empty=False |
1500 |
|
) |
1501 |
|
else: |
1502 |
1 |
vnfd = self.db.get_one( |
1503 |
|
"vnfds", {"_id": vnfr["vnfd-id"]}, fail_on_empty=False |
1504 |
|
) |
1505 |
|
|
1506 |
1 |
if not vnfd: |
1507 |
0 |
raise EngineException( |
1508 |
|
"vnfd id={} has been deleted!. Operation cannot be performed".format( |
1509 |
|
vnfr["vnfd-id"] |
1510 |
|
) |
1511 |
|
) |
1512 |
1 |
return vnfd |
1513 |
|
|
1514 |
1 |
def _check_valid_vdu(self, vnfd, vdu_id): |
1515 |
1 |
for vdud in get_iterable(vnfd.get("vdu")): |
1516 |
1 |
if vdud["id"] == vdu_id: |
1517 |
0 |
return vdud |
1518 |
|
else: |
1519 |
1 |
raise EngineException( |
1520 |
|
"Invalid parameter vdu_id='{}' not present at vnfd:vdu:id".format( |
1521 |
|
vdu_id |
1522 |
|
) |
1523 |
|
) |
1524 |
|
|
1525 |
1 |
def _check_valid_kdu(self, vnfd, kdu_name): |
1526 |
0 |
for kdud in get_iterable(vnfd.get("kdu")): |
1527 |
0 |
if kdud["name"] == kdu_name: |
1528 |
0 |
return kdud |
1529 |
|
else: |
1530 |
0 |
raise EngineException( |
1531 |
|
"Invalid parameter kdu_name='{}' not present at vnfd:kdu:name".format( |
1532 |
|
kdu_name |
1533 |
|
) |
1534 |
|
) |
1535 |
|
|
1536 |
1 |
def _check_vnf_instantiation_params(self, in_vnf, vnfd): |
1537 |
1 |
for in_vdu in get_iterable(in_vnf.get("vdu")): |
1538 |
1 |
for vdu in get_iterable(vnfd.get("vdu")): |
1539 |
1 |
if in_vdu["id"] == vdu["id"]: |
1540 |
1 |
for volume in get_iterable(in_vdu.get("volume")): |
1541 |
0 |
for volumed in get_iterable(vdu.get("virtual-storage-desc")): |
1542 |
0 |
if volumed == volume["name"]: |
1543 |
0 |
break |
1544 |
|
else: |
1545 |
0 |
raise EngineException( |
1546 |
|
"Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}']:" |
1547 |
|
"volume:name='{}' is not present at " |
1548 |
|
"vnfd:vdu:virtual-storage-desc list".format( |
1549 |
|
in_vnf["member-vnf-index"], |
1550 |
|
in_vdu["id"], |
1551 |
|
volume["id"], |
1552 |
|
) |
1553 |
|
) |
1554 |
|
|
1555 |
1 |
vdu_if_names = set() |
1556 |
1 |
for cpd in get_iterable(vdu.get("int-cpd")): |
1557 |
1 |
for iface in get_iterable( |
1558 |
|
cpd.get("virtual-network-interface-requirement") |
1559 |
|
): |
1560 |
1 |
vdu_if_names.add(iface.get("name")) |
1561 |
|
|
1562 |
1 |
for in_iface in get_iterable(in_vdu.get("interface")): |
1563 |
1 |
if in_iface["name"] in vdu_if_names: |
1564 |
1 |
break |
1565 |
|
else: |
1566 |
0 |
raise EngineException( |
1567 |
|
"Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}']:" |
1568 |
|
"int-cpd[id='{}'] is not present at vnfd:vdu:int-cpd".format( |
1569 |
|
in_vnf["member-vnf-index"], |
1570 |
|
in_vdu["id"], |
1571 |
|
in_iface["name"], |
1572 |
|
) |
1573 |
|
) |
1574 |
1 |
break |
1575 |
|
|
1576 |
|
else: |
1577 |
0 |
raise EngineException( |
1578 |
|
"Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}'] is not present " |
1579 |
|
"at vnfd:vdu".format(in_vnf["member-vnf-index"], in_vdu["id"]) |
1580 |
|
) |
1581 |
|
|
1582 |
1 |
vnfd_ivlds_cpds = { |
1583 |
|
ivld.get("id"): set() |
1584 |
|
for ivld in get_iterable(vnfd.get("int-virtual-link-desc")) |
1585 |
|
} |
1586 |
1 |
for vdu in vnfd.get("vdu", {}): |
1587 |
1 |
for cpd in vdu.get("int-cpd", {}): |
1588 |
1 |
if cpd.get("int-virtual-link-desc"): |
1589 |
1 |
vnfd_ivlds_cpds[cpd.get("int-virtual-link-desc")] = cpd.get("id") |
1590 |
|
|
1591 |
1 |
for in_ivld in get_iterable(in_vnf.get("internal-vld")): |
1592 |
1 |
if in_ivld.get("name") in vnfd_ivlds_cpds: |
1593 |
1 |
for in_icp in get_iterable(in_ivld.get("internal-connection-point")): |
1594 |
0 |
if in_icp["id-ref"] in vnfd_ivlds_cpds[in_ivld.get("name")]: |
1595 |
0 |
break |
1596 |
|
else: |
1597 |
0 |
raise EngineException( |
1598 |
|
"Invalid parameter vnf[member-vnf-index='{}']:internal-vld[name" |
1599 |
|
"='{}']:internal-connection-point[id-ref:'{}'] is not present at " |
1600 |
|
"vnfd:internal-vld:name/id:internal-connection-point".format( |
1601 |
|
in_vnf["member-vnf-index"], |
1602 |
|
in_ivld["name"], |
1603 |
|
in_icp["id-ref"], |
1604 |
|
) |
1605 |
|
) |
1606 |
|
else: |
1607 |
0 |
raise EngineException( |
1608 |
|
"Invalid parameter vnf[member-vnf-index='{}']:internal-vld:name='{}'" |
1609 |
|
" is not present at vnfd '{}'".format( |
1610 |
|
in_vnf["member-vnf-index"], in_ivld["name"], vnfd["id"] |
1611 |
|
) |
1612 |
|
) |
1613 |
|
|
1614 |
1 |
def _check_valid_vim_account(self, vim_account, vim_accounts, session): |
1615 |
1 |
if vim_account in vim_accounts: |
1616 |
0 |
return |
1617 |
1 |
try: |
1618 |
1 |
db_filter = self._get_project_filter(session) |
1619 |
1 |
db_filter["_id"] = vim_account |
1620 |
1 |
self.db.get_one("vim_accounts", db_filter) |
1621 |
0 |
except Exception: |
1622 |
0 |
raise EngineException( |
1623 |
|
"Invalid vimAccountId='{}' not present for the project".format( |
1624 |
|
vim_account |
1625 |
|
) |
1626 |
|
) |
1627 |
1 |
vim_accounts.append(vim_account) |
1628 |
|
|
1629 |
1 |
def _get_vim_account(self, vim_id: str, session): |
1630 |
1 |
try: |
1631 |
1 |
db_filter = self._get_project_filter(session) |
1632 |
1 |
db_filter["_id"] = vim_id |
1633 |
1 |
return self.db.get_one("vim_accounts", db_filter) |
1634 |
0 |
except Exception: |
1635 |
0 |
raise EngineException( |
1636 |
|
"Invalid vimAccountId='{}' not present for the project".format(vim_id) |
1637 |
|
) |
1638 |
|
|
1639 |
1 |
def _check_valid_wim_account(self, wim_account, wim_accounts, session): |
1640 |
1 |
if not isinstance(wim_account, str): |
1641 |
1 |
return |
1642 |
0 |
if wim_account in wim_accounts: |
1643 |
0 |
return |
1644 |
0 |
try: |
1645 |
0 |
db_filter = self._get_project_filter(session) |
1646 |
0 |
db_filter["_id"] = wim_account |
1647 |
0 |
self.db.get_one("wim_accounts", db_filter) |
1648 |
0 |
except Exception: |
1649 |
0 |
raise EngineException( |
1650 |
|
"Invalid wimAccountId='{}' not present for the project".format( |
1651 |
|
wim_account |
1652 |
|
) |
1653 |
|
) |
1654 |
0 |
wim_accounts.append(wim_account) |
1655 |
|
|
1656 |
1 |
def _look_for_pdu( |
1657 |
|
self, session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback |
1658 |
|
): |
1659 |
|
""" |
1660 |
|
Look for a free PDU in the catalog matching vdur type and interfaces. Fills vnfr.vdur with the interface |
1661 |
|
(ip_address, ...) information. |
1662 |
|
Modifies PDU _admin.usageState to 'IN_USE' |
1663 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1664 |
|
:param rollback: list with the database modifications to rollback if needed |
1665 |
|
:param vnfr: vnfr to be updated. It is modified with pdu interface info if pdu is found |
1666 |
|
:param vim_account: vim_account where this vnfr should be deployed |
1667 |
|
:param vnfr_update: dictionary filled by this method with changes to be done at database vnfr |
1668 |
|
:param vnfr_update_rollback: dictionary filled by this method with original content of vnfr in case a rollback |
1669 |
|
of the changed vnfr is needed |
1670 |
|
|
1671 |
|
:return: List of PDU interfaces that are connected to an existing VIM network. Each item contains: |
1672 |
|
"vim-network-name": used at VIM |
1673 |
|
"name": interface name |
1674 |
|
"vnf-vld-id": internal VNFD vld where this interface is connected, or |
1675 |
|
"ns-vld-id": NSD vld where this interface is connected. |
1676 |
|
NOTE: One, and only one between 'vnf-vld-id' and 'ns-vld-id' contains a value. The other will be None |
1677 |
|
""" |
1678 |
|
|
1679 |
1 |
ifaces_forcing_vim_network = [] |
1680 |
1 |
for vdur_index, vdur in enumerate(get_iterable(vnfr.get("vdur"))): |
1681 |
1 |
if not vdur.get("pdu-type"): |
1682 |
1 |
continue |
1683 |
0 |
pdu_type = vdur.get("pdu-type") |
1684 |
0 |
pdu_filter = self._get_project_filter(session) |
1685 |
0 |
pdu_filter["vim_accounts"] = vim_account |
1686 |
0 |
pdu_filter["type"] = pdu_type |
1687 |
0 |
pdu_filter["_admin.operationalState"] = "ENABLED" |
1688 |
0 |
pdu_filter["_admin.usageState"] = "NOT_IN_USE" |
1689 |
|
# TODO feature 1417: "shared": True, |
1690 |
|
|
1691 |
0 |
available_pdus = self.db.get_list("pdus", pdu_filter) |
1692 |
0 |
for pdu in available_pdus: |
1693 |
|
# step 1 check if this pdu contains needed interfaces: |
1694 |
0 |
match_interfaces = True |
1695 |
0 |
for vdur_interface in vdur["interfaces"]: |
1696 |
0 |
for pdu_interface in pdu["interfaces"]: |
1697 |
0 |
if pdu_interface["name"] == vdur_interface["name"]: |
1698 |
|
# TODO feature 1417: match per mgmt type |
1699 |
0 |
break |
1700 |
|
else: # no interface found for name |
1701 |
0 |
match_interfaces = False |
1702 |
0 |
break |
1703 |
0 |
if match_interfaces: |
1704 |
0 |
break |
1705 |
|
else: |
1706 |
0 |
raise EngineException( |
1707 |
|
"No PDU of type={} at vim_account={} found for member_vnf_index={}, vdu={} matching interface " |
1708 |
|
"names".format( |
1709 |
|
pdu_type, |
1710 |
|
vim_account, |
1711 |
|
vnfr["member-vnf-index-ref"], |
1712 |
|
vdur["vdu-id-ref"], |
1713 |
|
) |
1714 |
|
) |
1715 |
|
|
1716 |
|
# step 2. Update pdu |
1717 |
0 |
rollback_pdu = { |
1718 |
|
"_admin.usageState": pdu["_admin"]["usageState"], |
1719 |
|
"_admin.usage.vnfr_id": None, |
1720 |
|
"_admin.usage.nsr_id": None, |
1721 |
|
"_admin.usage.vdur": None, |
1722 |
|
} |
1723 |
0 |
self.db.set_one( |
1724 |
|
"pdus", |
1725 |
|
{"_id": pdu["_id"]}, |
1726 |
|
{ |
1727 |
|
"_admin.usageState": "IN_USE", |
1728 |
|
"_admin.usage": { |
1729 |
|
"vnfr_id": vnfr["_id"], |
1730 |
|
"nsr_id": vnfr["nsr-id-ref"], |
1731 |
|
"vdur": vdur["vdu-id-ref"], |
1732 |
|
}, |
1733 |
|
}, |
1734 |
|
) |
1735 |
0 |
rollback.append( |
1736 |
|
{ |
1737 |
|
"topic": "pdus", |
1738 |
|
"_id": pdu["_id"], |
1739 |
|
"operation": "set", |
1740 |
|
"content": rollback_pdu, |
1741 |
|
} |
1742 |
|
) |
1743 |
|
|
1744 |
|
# step 3. Fill vnfr info by filling vdur |
1745 |
0 |
vdu_text = "vdur.{}".format(vdur_index) |
1746 |
0 |
vnfr_update_rollback[vdu_text + ".pdu-id"] = None |
1747 |
0 |
vnfr_update[vdu_text + ".pdu-id"] = pdu["_id"] |
1748 |
0 |
for iface_index, vdur_interface in enumerate(vdur["interfaces"]): |
1749 |
0 |
for pdu_interface in pdu["interfaces"]: |
1750 |
0 |
if pdu_interface["name"] == vdur_interface["name"]: |
1751 |
0 |
iface_text = vdu_text + ".interfaces.{}".format(iface_index) |
1752 |
0 |
for k, v in pdu_interface.items(): |
1753 |
0 |
if k in ( |
1754 |
|
"ip-address", |
1755 |
|
"mac-address", |
1756 |
|
): # TODO: switch-xxxxx must be inserted |
1757 |
0 |
vnfr_update[iface_text + ".{}".format(k)] = v |
1758 |
0 |
vnfr_update_rollback[ |
1759 |
|
iface_text + ".{}".format(k) |
1760 |
|
] = vdur_interface.get(v) |
1761 |
0 |
if pdu_interface.get("ip-address"): |
1762 |
0 |
if vdur_interface.get( |
1763 |
|
"mgmt-interface" |
1764 |
|
) or vdur_interface.get("mgmt-vnf"): |
1765 |
0 |
vnfr_update_rollback[ |
1766 |
|
vdu_text + ".ip-address" |
1767 |
|
] = vdur.get("ip-address") |
1768 |
0 |
vnfr_update[vdu_text + ".ip-address"] = pdu_interface[ |
1769 |
|
"ip-address" |
1770 |
|
] |
1771 |
0 |
if vdur_interface.get("mgmt-vnf"): |
1772 |
0 |
vnfr_update_rollback["ip-address"] = vnfr.get( |
1773 |
|
"ip-address" |
1774 |
|
) |
1775 |
0 |
vnfr_update["ip-address"] = pdu_interface["ip-address"] |
1776 |
0 |
vnfr_update[vdu_text + ".ip-address"] = pdu_interface[ |
1777 |
|
"ip-address" |
1778 |
|
] |
1779 |
0 |
if pdu_interface.get("vim-network-name") or pdu_interface.get( |
1780 |
|
"vim-network-id" |
1781 |
|
): |
1782 |
0 |
ifaces_forcing_vim_network.append( |
1783 |
|
{ |
1784 |
|
"name": vdur_interface.get("vnf-vld-id") |
1785 |
|
or vdur_interface.get("ns-vld-id"), |
1786 |
|
"vnf-vld-id": vdur_interface.get("vnf-vld-id"), |
1787 |
|
"ns-vld-id": vdur_interface.get("ns-vld-id"), |
1788 |
|
} |
1789 |
|
) |
1790 |
0 |
if pdu_interface.get("vim-network-id"): |
1791 |
0 |
ifaces_forcing_vim_network[-1][ |
1792 |
|
"vim-network-id" |
1793 |
|
] = pdu_interface["vim-network-id"] |
1794 |
0 |
if pdu_interface.get("vim-network-name"): |
1795 |
0 |
ifaces_forcing_vim_network[-1][ |
1796 |
|
"vim-network-name" |
1797 |
|
] = pdu_interface["vim-network-name"] |
1798 |
0 |
break |
1799 |
|
|
1800 |
1 |
return ifaces_forcing_vim_network |
1801 |
|
|
1802 |
1 |
def _look_for_k8scluster( |
1803 |
|
self, session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback |
1804 |
|
): |
1805 |
|
""" |
1806 |
|
Look for an available k8scluster for all the kuds in the vnfd matching version and cni requirements. |
1807 |
|
Fills vnfr.kdur with the selected k8scluster |
1808 |
|
|
1809 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1810 |
|
:param rollback: list with the database modifications to rollback if needed |
1811 |
|
:param vnfr: vnfr to be updated. It is modified with pdu interface info if pdu is found |
1812 |
|
:param vim_account: vim_account where this vnfr should be deployed |
1813 |
|
:param vnfr_update: dictionary filled by this method with changes to be done at database vnfr |
1814 |
|
:param vnfr_update_rollback: dictionary filled by this method with original content of vnfr in case a rollback |
1815 |
|
of the changed vnfr is needed |
1816 |
|
|
1817 |
|
:return: List of KDU interfaces that are connected to an existing VIM network. Each item contains: |
1818 |
|
"vim-network-name": used at VIM |
1819 |
|
"name": interface name |
1820 |
|
"vnf-vld-id": internal VNFD vld where this interface is connected, or |
1821 |
|
"ns-vld-id": NSD vld where this interface is connected. |
1822 |
|
NOTE: One, and only one between 'vnf-vld-id' and 'ns-vld-id' contains a value. The other will be None |
1823 |
|
""" |
1824 |
|
|
1825 |
1 |
ifaces_forcing_vim_network = [] |
1826 |
1 |
if not vnfr.get("kdur"): |
1827 |
1 |
return ifaces_forcing_vim_network |
1828 |
|
|
1829 |
0 |
kdu_filter = self._get_project_filter(session) |
1830 |
0 |
kdu_filter["vim_account"] = vim_account |
1831 |
|
# TODO kdu_filter["_admin.operationalState"] = "ENABLED" |
1832 |
0 |
available_k8sclusters = self.db.get_list("k8sclusters", kdu_filter) |
1833 |
|
|
1834 |
0 |
k8s_requirements = {} # just for logging |
1835 |
0 |
for k8scluster in available_k8sclusters: |
1836 |
0 |
if not vnfr.get("k8s-cluster"): |
1837 |
0 |
break |
1838 |
|
# restrict by cni |
1839 |
0 |
if vnfr["k8s-cluster"].get("cni"): |
1840 |
0 |
k8s_requirements["cni"] = vnfr["k8s-cluster"]["cni"] |
1841 |
0 |
if not set(vnfr["k8s-cluster"]["cni"]).intersection( |
1842 |
|
k8scluster.get("cni", ()) |
1843 |
|
): |
1844 |
0 |
continue |
1845 |
|
# restrict by version |
1846 |
0 |
if vnfr["k8s-cluster"].get("version"): |
1847 |
0 |
k8s_requirements["version"] = vnfr["k8s-cluster"]["version"] |
1848 |
0 |
if k8scluster.get("k8s_version") not in vnfr["k8s-cluster"]["version"]: |
1849 |
0 |
continue |
1850 |
|
# restrict by number of networks |
1851 |
0 |
if vnfr["k8s-cluster"].get("nets"): |
1852 |
0 |
k8s_requirements["networks"] = len(vnfr["k8s-cluster"]["nets"]) |
1853 |
0 |
if not k8scluster.get("nets") or len(k8scluster["nets"]) < len( |
1854 |
|
vnfr["k8s-cluster"]["nets"] |
1855 |
|
): |
1856 |
0 |
continue |
1857 |
0 |
break |
1858 |
|
else: |
1859 |
0 |
raise EngineException( |
1860 |
|
"No k8scluster with requirements='{}' at vim_account={} found for member_vnf_index={}".format( |
1861 |
|
k8s_requirements, vim_account, vnfr["member-vnf-index-ref"] |
1862 |
|
) |
1863 |
|
) |
1864 |
|
|
1865 |
0 |
for kdur_index, kdur in enumerate(get_iterable(vnfr.get("kdur"))): |
1866 |
|
# step 3. Fill vnfr info by filling kdur |
1867 |
0 |
kdu_text = "kdur.{}.".format(kdur_index) |
1868 |
0 |
vnfr_update_rollback[kdu_text + "k8s-cluster.id"] = None |
1869 |
0 |
vnfr_update[kdu_text + "k8s-cluster.id"] = k8scluster["_id"] |
1870 |
|
|
1871 |
|
# step 4. Check VIM networks that forces the selected k8s_cluster |
1872 |
0 |
if vnfr.get("k8s-cluster") and vnfr["k8s-cluster"].get("nets"): |
1873 |
0 |
k8scluster_net_list = list(k8scluster.get("nets").keys()) |
1874 |
0 |
for net_index, kdur_net in enumerate(vnfr["k8s-cluster"]["nets"]): |
1875 |
|
# get a network from k8s_cluster nets. If name matches use this, if not use other |
1876 |
0 |
if kdur_net["id"] in k8scluster_net_list: # name matches |
1877 |
0 |
vim_net = k8scluster["nets"][kdur_net["id"]] |
1878 |
0 |
k8scluster_net_list.remove(kdur_net["id"]) |
1879 |
|
else: |
1880 |
0 |
vim_net = k8scluster["nets"][k8scluster_net_list[0]] |
1881 |
0 |
k8scluster_net_list.pop(0) |
1882 |
0 |
vnfr_update_rollback[ |
1883 |
|
"k8s-cluster.nets.{}.vim_net".format(net_index) |
1884 |
|
] = None |
1885 |
0 |
vnfr_update["k8s-cluster.nets.{}.vim_net".format(net_index)] = vim_net |
1886 |
0 |
if vim_net and ( |
1887 |
|
kdur_net.get("vnf-vld-id") or kdur_net.get("ns-vld-id") |
1888 |
|
): |
1889 |
0 |
ifaces_forcing_vim_network.append( |
1890 |
|
{ |
1891 |
|
"name": kdur_net.get("vnf-vld-id") |
1892 |
|
or kdur_net.get("ns-vld-id"), |
1893 |
|
"vnf-vld-id": kdur_net.get("vnf-vld-id"), |
1894 |
|
"ns-vld-id": kdur_net.get("ns-vld-id"), |
1895 |
|
"vim-network-name": vim_net, # TODO can it be vim-network-id ??? |
1896 |
|
} |
1897 |
|
) |
1898 |
|
# TODO check that this forcing is not incompatible with other forcing |
1899 |
0 |
return ifaces_forcing_vim_network |
1900 |
|
|
1901 |
1 |
def _update_vnfrs_from_nsd(self, nsr): |
1902 |
1 |
step = "Getting vnf_profiles from nsd" # first step must be defined outside try |
1903 |
1 |
try: |
1904 |
1 |
nsr_id = nsr["_id"] |
1905 |
1 |
nsd = nsr["nsd"] |
1906 |
|
|
1907 |
1 |
vnf_profiles = nsd.get("df", [{}])[0].get("vnf-profile", ()) |
1908 |
1 |
vld_fixed_ip_connection_point_data = {} |
1909 |
|
|
1910 |
1 |
step = "Getting ip-address info from vnf_profile if it exists" |
1911 |
1 |
for vnfp in vnf_profiles: |
1912 |
|
# Checking ip-address info from nsd.vnf_profile and storing |
1913 |
1 |
for vlc in vnfp.get("virtual-link-connectivity", ()): |
1914 |
1 |
for cpd in vlc.get("constituent-cpd-id", ()): |
1915 |
1 |
if cpd.get("ip-address"): |
1916 |
0 |
step = "Storing ip-address info" |
1917 |
0 |
vld_fixed_ip_connection_point_data.update( |
1918 |
|
{ |
1919 |
|
vlc.get("virtual-link-profile-id") |
1920 |
|
+ "." |
1921 |
|
+ cpd.get("constituent-base-element-id"): { |
1922 |
|
"vnfd-connection-point-ref": cpd.get( |
1923 |
|
"constituent-cpd-id" |
1924 |
|
), |
1925 |
|
"ip-address": cpd.get("ip-address"), |
1926 |
|
} |
1927 |
|
} |
1928 |
|
) |
1929 |
|
|
1930 |
|
# Inserting ip address to vnfr |
1931 |
1 |
if len(vld_fixed_ip_connection_point_data) > 0: |
1932 |
0 |
step = "Getting vnfrs" |
1933 |
0 |
vnfrs = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}) |
1934 |
0 |
for item in vld_fixed_ip_connection_point_data.keys(): |
1935 |
0 |
step = "Filtering vnfrs" |
1936 |
0 |
vnfr = next( |
1937 |
|
filter( |
1938 |
|
lambda vnfr: vnfr["member-vnf-index-ref"] |
1939 |
|
== item.split(".")[1], |
1940 |
|
vnfrs, |
1941 |
|
), |
1942 |
|
None, |
1943 |
|
) |
1944 |
0 |
if vnfr: |
1945 |
0 |
vnfr_update = {} |
1946 |
0 |
for vdur_index, vdur in enumerate(vnfr["vdur"]): |
1947 |
0 |
for iface_index, iface in enumerate(vdur["interfaces"]): |
1948 |
0 |
step = "Looking for matched interface" |
1949 |
0 |
if ( |
1950 |
|
iface.get("external-connection-point-ref") |
1951 |
|
== vld_fixed_ip_connection_point_data[item].get( |
1952 |
|
"vnfd-connection-point-ref" |
1953 |
|
) |
1954 |
|
and iface.get("ns-vld-id") == item.split(".")[0] |
1955 |
|
): |
1956 |
0 |
vnfr_update_text = "vdur.{}.interfaces.{}".format( |
1957 |
|
vdur_index, iface_index |
1958 |
|
) |
1959 |
0 |
step = "Storing info in order to update vnfr" |
1960 |
0 |
vnfr_update[ |
1961 |
|
vnfr_update_text + ".ip-address" |
1962 |
|
] = increment_ip_mac( |
1963 |
|
vld_fixed_ip_connection_point_data[item].get( |
1964 |
|
"ip-address" |
1965 |
|
), |
1966 |
|
vdur.get("count-index", 0), |
1967 |
|
) |
1968 |
0 |
vnfr_update[vnfr_update_text + ".fixed-ip"] = True |
1969 |
|
|
1970 |
0 |
step = "updating vnfr at database" |
1971 |
0 |
self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, vnfr_update) |
1972 |
0 |
except ( |
1973 |
|
ValidationError, |
1974 |
|
EngineException, |
1975 |
|
DbException, |
1976 |
|
MsgException, |
1977 |
|
FsException, |
1978 |
|
) as e: |
1979 |
0 |
raise type(e)("{} while '{}'".format(e, step), http_code=e.http_code) |
1980 |
|
|
1981 |
1 |
def _update_vnfrs(self, session, rollback, nsr, indata): |
1982 |
|
# get vnfr |
1983 |
1 |
nsr_id = nsr["_id"] |
1984 |
1 |
vnfrs = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}) |
1985 |
|
|
1986 |
1 |
for vnfr in vnfrs: |
1987 |
1 |
vnfr_update = {} |
1988 |
1 |
vnfr_update_rollback = {} |
1989 |
1 |
member_vnf_index = vnfr["member-vnf-index-ref"] |
1990 |
|
# update vim-account-id |
1991 |
|
|
1992 |
1 |
vim_account = indata["vimAccountId"] |
1993 |
1 |
vca_id = self._get_vim_account(vim_account, session).get("vca") |
1994 |
|
# check instantiate parameters |
1995 |
1 |
for vnf_inst_params in get_iterable(indata.get("vnf")): |
1996 |
1 |
if vnf_inst_params["member-vnf-index"] != member_vnf_index: |
1997 |
1 |
continue |
1998 |
1 |
if vnf_inst_params.get("vimAccountId"): |
1999 |
0 |
vim_account = vnf_inst_params.get("vimAccountId") |
2000 |
0 |
vca_id = self._get_vim_account(vim_account, session).get("vca") |
2001 |
|
|
2002 |
|
# get vnf.vdu.interface instantiation params to update vnfr.vdur.interfaces ip, mac |
2003 |
1 |
for vdu_inst_param in get_iterable(vnf_inst_params.get("vdu")): |
2004 |
1 |
for vdur_index, vdur in enumerate(vnfr["vdur"]): |
2005 |
1 |
if vdu_inst_param["id"] != vdur["vdu-id-ref"]: |
2006 |
1 |
continue |
2007 |
1 |
for iface_inst_param in get_iterable( |
2008 |
|
vdu_inst_param.get("interface") |
2009 |
|
): |
2010 |
1 |
iface_index, _ = next( |
2011 |
|
i |
2012 |
|
for i in enumerate(vdur["interfaces"]) |
2013 |
|
if i[1]["name"] == iface_inst_param["name"] |
2014 |
|
) |
2015 |
1 |
vnfr_update_text = "vdur.{}.interfaces.{}".format( |
2016 |
|
vdur_index, iface_index |
2017 |
|
) |
2018 |
1 |
if iface_inst_param.get("ip-address"): |
2019 |
1 |
vnfr_update[ |
2020 |
|
vnfr_update_text + ".ip-address" |
2021 |
|
] = increment_ip_mac( |
2022 |
|
iface_inst_param.get("ip-address"), |
2023 |
|
vdur.get("count-index", 0), |
2024 |
|
) |
2025 |
1 |
vnfr_update[vnfr_update_text + ".fixed-ip"] = True |
2026 |
1 |
if iface_inst_param.get("mac-address"): |
2027 |
0 |
vnfr_update[ |
2028 |
|
vnfr_update_text + ".mac-address" |
2029 |
|
] = increment_ip_mac( |
2030 |
|
iface_inst_param.get("mac-address"), |
2031 |
|
vdur.get("count-index", 0), |
2032 |
|
) |
2033 |
0 |
vnfr_update[vnfr_update_text + ".fixed-mac"] = True |
2034 |
1 |
if iface_inst_param.get("floating-ip-required"): |
2035 |
1 |
vnfr_update[ |
2036 |
|
vnfr_update_text + ".floating-ip-required" |
2037 |
|
] = True |
2038 |
|
# get vnf.internal-vld.internal-conection-point instantiation params to update vnfr.vdur.interfaces |
2039 |
|
# TODO update vld with the ip-profile |
2040 |
1 |
for ivld_inst_param in get_iterable( |
2041 |
|
vnf_inst_params.get("internal-vld") |
2042 |
|
): |
2043 |
1 |
for icp_inst_param in get_iterable( |
2044 |
|
ivld_inst_param.get("internal-connection-point") |
2045 |
|
): |
2046 |
|
# look for iface |
2047 |
0 |
for vdur_index, vdur in enumerate(vnfr["vdur"]): |
2048 |
0 |
for iface_index, iface in enumerate(vdur["interfaces"]): |
2049 |
0 |
if ( |
2050 |
|
iface.get("internal-connection-point-ref") |
2051 |
|
== icp_inst_param["id-ref"] |
2052 |
|
): |
2053 |
0 |
vnfr_update_text = "vdur.{}.interfaces.{}".format( |
2054 |
|
vdur_index, iface_index |
2055 |
|
) |
2056 |
0 |
if icp_inst_param.get("ip-address"): |
2057 |
0 |
vnfr_update[ |
2058 |
|
vnfr_update_text + ".ip-address" |
2059 |
|
] = increment_ip_mac( |
2060 |
|
icp_inst_param.get("ip-address"), |
2061 |
|
vdur.get("count-index", 0), |
2062 |
|
) |
2063 |
0 |
vnfr_update[ |
2064 |
|
vnfr_update_text + ".fixed-ip" |
2065 |
|
] = True |
2066 |
0 |
if icp_inst_param.get("mac-address"): |
2067 |
0 |
vnfr_update[ |
2068 |
|
vnfr_update_text + ".mac-address" |
2069 |
|
] = increment_ip_mac( |
2070 |
|
icp_inst_param.get("mac-address"), |
2071 |
|
vdur.get("count-index", 0), |
2072 |
|
) |
2073 |
0 |
vnfr_update[ |
2074 |
|
vnfr_update_text + ".fixed-mac" |
2075 |
|
] = True |
2076 |
0 |
break |
2077 |
|
# get ip address from instantiation parameters.vld.vnfd-connection-point-ref |
2078 |
1 |
for vld_inst_param in get_iterable(indata.get("vld")): |
2079 |
0 |
for vnfcp_inst_param in get_iterable( |
2080 |
|
vld_inst_param.get("vnfd-connection-point-ref") |
2081 |
|
): |
2082 |
0 |
if vnfcp_inst_param["member-vnf-index-ref"] != member_vnf_index: |
2083 |
0 |
continue |
2084 |
|
# look for iface |
2085 |
0 |
for vdur_index, vdur in enumerate(vnfr["vdur"]): |
2086 |
0 |
for iface_index, iface in enumerate(vdur["interfaces"]): |
2087 |
0 |
if ( |
2088 |
|
iface.get("external-connection-point-ref") |
2089 |
|
== vnfcp_inst_param["vnfd-connection-point-ref"] |
2090 |
|
): |
2091 |
0 |
vnfr_update_text = "vdur.{}.interfaces.{}".format( |
2092 |
|
vdur_index, iface_index |
2093 |
|
) |
2094 |
0 |
if vnfcp_inst_param.get("ip-address"): |
2095 |
0 |
vnfr_update[ |
2096 |
|
vnfr_update_text + ".ip-address" |
2097 |
|
] = increment_ip_mac( |
2098 |
|
vnfcp_inst_param.get("ip-address"), |
2099 |
|
vdur.get("count-index", 0), |
2100 |
|
) |
2101 |
0 |
vnfr_update[vnfr_update_text + ".fixed-ip"] = True |
2102 |
0 |
if vnfcp_inst_param.get("mac-address"): |
2103 |
0 |
vnfr_update[ |
2104 |
|
vnfr_update_text + ".mac-address" |
2105 |
|
] = increment_ip_mac( |
2106 |
|
vnfcp_inst_param.get("mac-address"), |
2107 |
|
vdur.get("count-index", 0), |
2108 |
|
) |
2109 |
0 |
vnfr_update[vnfr_update_text + ".fixed-mac"] = True |
2110 |
0 |
break |
2111 |
|
|
2112 |
1 |
vnfr_update["vim-account-id"] = vim_account |
2113 |
1 |
vnfr_update_rollback["vim-account-id"] = vnfr.get("vim-account-id") |
2114 |
|
|
2115 |
1 |
if vca_id: |
2116 |
0 |
vnfr_update["vca-id"] = vca_id |
2117 |
0 |
vnfr_update_rollback["vca-id"] = vnfr.get("vca-id") |
2118 |
|
|
2119 |
|
# get pdu |
2120 |
1 |
ifaces_forcing_vim_network = self._look_for_pdu( |
2121 |
|
session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback |
2122 |
|
) |
2123 |
|
|
2124 |
|
# get kdus |
2125 |
1 |
ifaces_forcing_vim_network += self._look_for_k8scluster( |
2126 |
|
session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback |
2127 |
|
) |
2128 |
|
# update database vnfr |
2129 |
1 |
self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, vnfr_update) |
2130 |
1 |
rollback.append( |
2131 |
|
{ |
2132 |
|
"topic": "vnfrs", |
2133 |
|
"_id": vnfr["_id"], |
2134 |
|
"operation": "set", |
2135 |
|
"content": vnfr_update_rollback, |
2136 |
|
} |
2137 |
|
) |
2138 |
|
|
2139 |
|
# Update indada in case pdu forces to use a concrete vim-network-name |
2140 |
|
# TODO check if user has already insert a vim-network-name and raises an error |
2141 |
1 |
if not ifaces_forcing_vim_network: |
2142 |
1 |
continue |
2143 |
0 |
for iface_info in ifaces_forcing_vim_network: |
2144 |
0 |
if iface_info.get("ns-vld-id"): |
2145 |
0 |
if "vld" not in indata: |
2146 |
0 |
indata["vld"] = [] |
2147 |
0 |
indata["vld"].append( |
2148 |
|
{ |
2149 |
|
key: iface_info[key] |
2150 |
|
for key in ("name", "vim-network-name", "vim-network-id") |
2151 |
|
if iface_info.get(key) |
2152 |
|
} |
2153 |
|
) |
2154 |
|
|
2155 |
0 |
elif iface_info.get("vnf-vld-id"): |
2156 |
0 |
if "vnf" not in indata: |
2157 |
0 |
indata["vnf"] = [] |
2158 |
0 |
indata["vnf"].append( |
2159 |
|
{ |
2160 |
|
"member-vnf-index": member_vnf_index, |
2161 |
|
"internal-vld": [ |
2162 |
|
{ |
2163 |
|
key: iface_info[key] |
2164 |
|
for key in ( |
2165 |
|
"name", |
2166 |
|
"vim-network-name", |
2167 |
|
"vim-network-id", |
2168 |
|
) |
2169 |
|
if iface_info.get(key) |
2170 |
|
} |
2171 |
|
], |
2172 |
|
} |
2173 |
|
) |
2174 |
|
|
2175 |
1 |
@staticmethod |
2176 |
1 |
def _create_nslcmop(nsr_id, operation, params): |
2177 |
|
""" |
2178 |
|
Creates a ns-lcm-opp content to be stored at database. |
2179 |
|
:param nsr_id: internal id of the instance |
2180 |
|
:param operation: instantiate, terminate, scale, action, update ... |
2181 |
|
:param params: user parameters for the operation |
2182 |
|
:return: dictionary following SOL005 format |
2183 |
|
""" |
2184 |
1 |
now = time() |
2185 |
1 |
_id = str(uuid4()) |
2186 |
1 |
nslcmop = { |
2187 |
|
"id": _id, |
2188 |
|
"_id": _id, |
2189 |
|
"operationState": "PROCESSING", # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK |
2190 |
|
"queuePosition": None, |
2191 |
|
"stage": None, |
2192 |
|
"errorMessage": None, |
2193 |
|
"detailedStatus": None, |
2194 |
|
"statusEnteredTime": now, |
2195 |
|
"nsInstanceId": nsr_id, |
2196 |
|
"lcmOperationType": operation, |
2197 |
|
"startTime": now, |
2198 |
|
"isAutomaticInvocation": False, |
2199 |
|
"operationParams": params, |
2200 |
|
"isCancelPending": False, |
2201 |
|
"links": { |
2202 |
|
"self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id, |
2203 |
|
"nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id, |
2204 |
|
}, |
2205 |
|
} |
2206 |
1 |
return nslcmop |
2207 |
|
|
2208 |
1 |
def _get_enabled_vims(self, session): |
2209 |
|
""" |
2210 |
|
Retrieve and return VIM accounts that are accessible by current user and has state ENABLE |
2211 |
|
:param session: current session with user information |
2212 |
|
""" |
2213 |
0 |
db_filter = self._get_project_filter(session) |
2214 |
0 |
db_filter["_admin.operationalState"] = "ENABLED" |
2215 |
0 |
vims = self.db.get_list("vim_accounts", db_filter) |
2216 |
0 |
vimAccounts = [] |
2217 |
0 |
for vim in vims: |
2218 |
0 |
vimAccounts.append(vim["_id"]) |
2219 |
0 |
return vimAccounts |
2220 |
|
|
2221 |
1 |
def new( |
2222 |
|
self, |
2223 |
|
rollback, |
2224 |
|
session, |
2225 |
|
indata=None, |
2226 |
|
kwargs=None, |
2227 |
|
headers=None, |
2228 |
|
slice_object=False, |
2229 |
|
): |
2230 |
|
""" |
2231 |
|
Performs a new operation over a ns |
2232 |
|
:param rollback: list to append created items at database in case a rollback must to be done |
2233 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
2234 |
|
:param indata: descriptor with the parameters of the operation. It must contains among others |
2235 |
|
nsInstanceId: _id of the nsr to perform the operation |
2236 |
|
operation: it can be: instantiate, terminate, action, update TODO: heal |
2237 |
|
:param kwargs: used to override the indata descriptor |
2238 |
|
:param headers: http request headers |
2239 |
|
:return: id of the nslcmops |
2240 |
|
""" |
2241 |
|
|
2242 |
1 |
def check_if_nsr_is_not_slice_member(session, nsr_id): |
2243 |
0 |
nsis = None |
2244 |
0 |
db_filter = self._get_project_filter(session) |
2245 |
0 |
db_filter["_admin.nsrs-detailed-list.ANYINDEX.nsrId"] = nsr_id |
2246 |
0 |
nsis = self.db.get_one( |
2247 |
|
"nsis", db_filter, fail_on_empty=False, fail_on_more=False |
2248 |
|
) |
2249 |
0 |
if nsis: |
2250 |
0 |
raise EngineException( |
2251 |
|
"The NS instance {} cannot be terminated because is used by the slice {}".format( |
2252 |
|
nsr_id, nsis["_id"] |
2253 |
|
), |
2254 |
|
http_code=HTTPStatus.CONFLICT, |
2255 |
|
) |
2256 |
|
|
2257 |
1 |
try: |
2258 |
|
# Override descriptor with query string kwargs |
2259 |
1 |
self._update_input_with_kwargs(indata, kwargs, yaml_format=True) |
2260 |
1 |
operation = indata["lcmOperationType"] |
2261 |
1 |
nsInstanceId = indata["nsInstanceId"] |
2262 |
|
|
2263 |
1 |
validate_input(indata, self.operation_schema[operation]) |
2264 |
|
# get ns from nsr_id |
2265 |
1 |
_filter = BaseTopic._get_project_filter(session) |
2266 |
1 |
_filter["_id"] = nsInstanceId |
2267 |
1 |
nsr = self.db.get_one("nsrs", _filter) |
2268 |
|
|
2269 |
|
# initial checking |
2270 |
1 |
if operation == "terminate" and slice_object is False: |
2271 |
0 |
check_if_nsr_is_not_slice_member(session, nsr["_id"]) |
2272 |
1 |
if ( |
2273 |
|
not nsr["_admin"].get("nsState") |
2274 |
|
or nsr["_admin"]["nsState"] == "NOT_INSTANTIATED" |
2275 |
|
): |
2276 |
1 |
if operation == "terminate" and indata.get("autoremove"): |
2277 |
|
# NSR must be deleted |
2278 |
0 |
return ( |
2279 |
|
None, |
2280 |
|
None, |
2281 |
|
) # a none in this case is used to indicate not instantiated. It can be removed |
2282 |
1 |
if operation != "instantiate": |
2283 |
0 |
raise EngineException( |
2284 |
|
"ns_instance '{}' cannot be '{}' because it is not instantiated".format( |
2285 |
|
nsInstanceId, operation |
2286 |
|
), |
2287 |
|
HTTPStatus.CONFLICT, |
2288 |
|
) |
2289 |
|
else: |
2290 |
1 |
if operation == "instantiate" and not session["force"]: |
2291 |
0 |
raise EngineException( |
2292 |
|
"ns_instance '{}' cannot be '{}' because it is already instantiated".format( |
2293 |
|
nsInstanceId, operation |
2294 |
|
), |
2295 |
|
HTTPStatus.CONFLICT, |
2296 |
|
) |
2297 |
1 |
self._check_ns_operation(session, nsr, operation, indata) |
2298 |
1 |
if indata.get("primitive_params"): |
2299 |
0 |
indata["primitive_params"] = json.dumps(indata["primitive_params"]) |
2300 |
1 |
elif indata.get("additionalParamsForVnf"): |
2301 |
1 |
indata["additionalParamsForVnf"] = json.dumps( |
2302 |
|
indata["additionalParamsForVnf"] |
2303 |
|
) |
2304 |
|
|
2305 |
1 |
if operation == "instantiate": |
2306 |
1 |
self._update_vnfrs_from_nsd(nsr) |
2307 |
1 |
self._update_vnfrs(session, rollback, nsr, indata) |
2308 |
1 |
if (operation == "update") and (indata["updateType"] == "CHANGE_VNFPKG"): |
2309 |
0 |
nsr_update = {} |
2310 |
0 |
vnfd_id = indata["changeVnfPackageData"]["vnfdId"] |
2311 |
0 |
vnfd = self.db.get_one("vnfds", {"_id": vnfd_id}) |
2312 |
0 |
nsd = self.db.get_one("nsds", {"_id": nsr["nsd-id"]}) |
2313 |
0 |
ns_request = nsr["instantiate_params"] |
2314 |
0 |
vnfr = self.db.get_one( |
2315 |
|
"vnfrs", {"_id": indata["changeVnfPackageData"]["vnfInstanceId"]} |
2316 |
|
) |
2317 |
0 |
latest_vnfd_revision = vnfd["_admin"].get("revision", 1) |
2318 |
0 |
vnfr_vnfd_revision = vnfr.get("revision", 1) |
2319 |
0 |
if latest_vnfd_revision != vnfr_vnfd_revision: |
2320 |
0 |
old_vnfd_id = vnfd_id + ":" + str(vnfr_vnfd_revision) |
2321 |
0 |
old_db_vnfd = self.db.get_one( |
2322 |
|
"vnfds_revisions", {"_id": old_vnfd_id} |
2323 |
|
) |
2324 |
0 |
old_sw_version = old_db_vnfd.get("software-version", "1.0") |
2325 |
0 |
new_sw_version = vnfd.get("software-version", "1.0") |
2326 |
0 |
if new_sw_version != old_sw_version: |
2327 |
0 |
vnf_index = vnfr["member-vnf-index-ref"] |
2328 |
0 |
self.logger.info("nsr {}".format(nsr)) |
2329 |
0 |
for vdu in vnfd["vdu"]: |
2330 |
0 |
self.nsrtopic._add_shared_volumes_to_nsr( |
2331 |
|
vdu, vnfd, nsr, vnf_index, latest_vnfd_revision |
2332 |
|
) |
2333 |
0 |
self.nsrtopic._add_flavor_to_nsr( |
2334 |
|
vdu, vnfd, nsr, vnf_index, latest_vnfd_revision |
2335 |
|
) |
2336 |
0 |
sw_image_id = vdu.get("sw-image-desc") |
2337 |
0 |
if sw_image_id: |
2338 |
0 |
image_data = self.nsrtopic._get_image_data_from_vnfd( |
2339 |
|
vnfd, sw_image_id |
2340 |
|
) |
2341 |
0 |
self.nsrtopic._add_image_to_nsr(nsr, image_data) |
2342 |
0 |
for alt_image in vdu.get("alternative-sw-image-desc", ()): |
2343 |
0 |
image_data = self.nsrtopic._get_image_data_from_vnfd( |
2344 |
|
vnfd, alt_image |
2345 |
|
) |
2346 |
0 |
self.nsrtopic._add_image_to_nsr(nsr, image_data) |
2347 |
0 |
nsr_update["image"] = nsr["image"] |
2348 |
0 |
nsr_update["flavor"] = nsr["flavor"] |
2349 |
0 |
nsr_update["shared-volumes"] = nsr["shared-volumes"] |
2350 |
0 |
self.db.set_one("nsrs", {"_id": nsr["_id"]}, nsr_update) |
2351 |
0 |
ns_k8s_namespace = self.nsrtopic._get_ns_k8s_namespace( |
2352 |
|
nsd, ns_request, session |
2353 |
|
) |
2354 |
0 |
vnfr_descriptor = ( |
2355 |
|
self.nsrtopic._create_vnfr_descriptor_from_vnfd( |
2356 |
|
nsd, |
2357 |
|
vnfd, |
2358 |
|
vnfd_id, |
2359 |
|
vnf_index, |
2360 |
|
nsr, |
2361 |
|
ns_request, |
2362 |
|
ns_k8s_namespace, |
2363 |
|
latest_vnfd_revision, |
2364 |
|
) |
2365 |
|
) |
2366 |
0 |
indata["newVdur"] = vnfr_descriptor["vdur"] |
2367 |
1 |
nslcmop_desc = self._create_nslcmop(nsInstanceId, operation, indata) |
2368 |
1 |
_id = nslcmop_desc["_id"] |
2369 |
1 |
self.format_on_new( |
2370 |
|
nslcmop_desc, session["project_id"], make_public=session["public"] |
2371 |
|
) |
2372 |
1 |
if indata.get("placement-engine"): |
2373 |
|
# Save valid vim accounts in lcm operation descriptor |
2374 |
0 |
nslcmop_desc["operationParams"][ |
2375 |
|
"validVimAccounts" |
2376 |
|
] = self._get_enabled_vims(session) |
2377 |
1 |
self.db.create("nslcmops", nslcmop_desc) |
2378 |
1 |
rollback.append({"topic": "nslcmops", "_id": _id}) |
2379 |
1 |
if not slice_object: |
2380 |
1 |
self.msg.write("ns", operation, nslcmop_desc) |
2381 |
1 |
return _id, None |
2382 |
1 |
except ValidationError as e: # TODO remove try Except, it is captured at nbi.py |
2383 |
1 |
raise EngineException(e, HTTPStatus.UNPROCESSABLE_ENTITY) |
2384 |
|
# except DbException as e: |
2385 |
|
# raise EngineException("Cannot get ns_instance '{}': {}".format(e), HTTPStatus.NOT_FOUND) |
2386 |
|
|
2387 |
1 |
def cancel(self, rollback, session, indata=None, kwargs=None, headers=None): |
2388 |
0 |
validate_input(indata, self.operation_schema["cancel"]) |
2389 |
|
# Override descriptor with query string kwargs |
2390 |
0 |
self._update_input_with_kwargs(indata, kwargs, yaml_format=True) |
2391 |
0 |
nsLcmOpOccId = indata["nsLcmOpOccId"] |
2392 |
0 |
cancelMode = indata["cancelMode"] |
2393 |
|
# get nslcmop from nsLcmOpOccId |
2394 |
0 |
_filter = BaseTopic._get_project_filter(session) |
2395 |
0 |
_filter["_id"] = nsLcmOpOccId |
2396 |
0 |
nslcmop = self.db.get_one("nslcmops", _filter) |
2397 |
|
# Fail is this is not an ongoing nslcmop |
2398 |
0 |
if nslcmop.get("operationState") not in [ |
2399 |
|
"STARTING", |
2400 |
|
"PROCESSING", |
2401 |
|
"ROLLING_BACK", |
2402 |
|
]: |
2403 |
0 |
raise EngineException( |
2404 |
|
"Operation is not in STARTING, PROCESSING or ROLLING_BACK state", |
2405 |
|
http_code=HTTPStatus.CONFLICT, |
2406 |
|
) |
2407 |
0 |
nsInstanceId = nslcmop["nsInstanceId"] |
2408 |
0 |
update_dict = { |
2409 |
|
"isCancelPending": True, |
2410 |
|
"cancelMode": cancelMode, |
2411 |
|
} |
2412 |
0 |
self.db.set_one( |
2413 |
|
"nslcmops", q_filter=_filter, update_dict=update_dict, fail_on_empty=False |
2414 |
|
) |
2415 |
0 |
data = { |
2416 |
|
"_id": nsLcmOpOccId, |
2417 |
|
"nsInstanceId": nsInstanceId, |
2418 |
|
"cancelMode": cancelMode, |
2419 |
|
} |
2420 |
0 |
self.msg.write("nslcmops", "cancel", data) |
2421 |
|
|
2422 |
1 |
def delete(self, session, _id, dry_run=False, not_send_msg=None): |
2423 |
0 |
raise EngineException( |
2424 |
|
"Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
2425 |
|
) |
2426 |
|
|
2427 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
2428 |
0 |
raise EngineException( |
2429 |
|
"Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
2430 |
|
) |
2431 |
|
|
2432 |
|
|
2433 |
1 |
class NsiTopic(BaseTopic): |
2434 |
1 |
topic = "nsis" |
2435 |
1 |
topic_msg = "nsi" |
2436 |
1 |
quota_name = "slice_instances" |
2437 |
|
|
2438 |
1 |
def __init__(self, db, fs, msg, auth): |
2439 |
0 |
BaseTopic.__init__(self, db, fs, msg, auth) |
2440 |
0 |
self.nsrTopic = NsrTopic(db, fs, msg, auth) |
2441 |
|
|
2442 |
1 |
@staticmethod |
2443 |
1 |
def _format_ns_request(ns_request): |
2444 |
0 |
formated_request = copy(ns_request) |
2445 |
|
# TODO: Add request params |
2446 |
0 |
return formated_request |
2447 |
|
|
2448 |
1 |
@staticmethod |
2449 |
1 |
def _format_addional_params(slice_request): |
2450 |
|
""" |
2451 |
|
Get and format user additional params for NS or VNF |
2452 |
|
:param slice_request: User instantiation additional parameters |
2453 |
|
:return: a formatted copy of additional params or None if not supplied |
2454 |
|
""" |
2455 |
0 |
additional_params = copy(slice_request.get("additionalParamsForNsi")) |
2456 |
0 |
if additional_params: |
2457 |
0 |
for k, v in additional_params.items(): |
2458 |
0 |
if not isinstance(k, str): |
2459 |
0 |
raise EngineException( |
2460 |
|
"Invalid param at additionalParamsForNsi:{}. Only string keys are allowed".format( |
2461 |
|
k |
2462 |
|
) |
2463 |
|
) |
2464 |
0 |
if "." in k or "$" in k: |
2465 |
0 |
raise EngineException( |
2466 |
|
"Invalid param at additionalParamsForNsi:{}. Keys must not contain dots or $".format( |
2467 |
|
k |
2468 |
|
) |
2469 |
|
) |
2470 |
0 |
if isinstance(v, (dict, tuple, list)): |
2471 |
0 |
additional_params[k] = "!!yaml " + safe_dump(v) |
2472 |
0 |
return additional_params |
2473 |
|
|
2474 |
1 |
def check_conflict_on_del(self, session, _id, db_content): |
2475 |
|
""" |
2476 |
|
Check that NSI is not instantiated |
2477 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
2478 |
|
:param _id: nsi internal id |
2479 |
|
:param db_content: The database content of the _id |
2480 |
|
:return: None or raises EngineException with the conflict |
2481 |
|
""" |
2482 |
0 |
if session["force"]: |
2483 |
0 |
return |
2484 |
0 |
nsi = db_content |
2485 |
0 |
if nsi["_admin"].get("nsiState") == "INSTANTIATED": |
2486 |
0 |
raise EngineException( |
2487 |
|
"nsi '{}' cannot be deleted because it is in 'INSTANTIATED' state. " |
2488 |
|
"Launch 'terminate' operation first; or force deletion".format(_id), |
2489 |
|
http_code=HTTPStatus.CONFLICT, |
2490 |
|
) |
2491 |
|
|
2492 |
1 |
def delete_extra(self, session, _id, db_content, not_send_msg=None): |
2493 |
|
""" |
2494 |
|
Deletes associated nsilcmops from database. Deletes associated filesystem. |
2495 |
|
Set usageState of nst |
2496 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
2497 |
|
:param _id: server internal id |
2498 |
|
:param db_content: The database content of the descriptor |
2499 |
|
:param not_send_msg: To not send message (False) or store content (list) instead |
2500 |
|
:return: None if ok or raises EngineException with the problem |
2501 |
|
""" |
2502 |
|
|
2503 |
|
# Deleting the nsrs belonging to nsir |
2504 |
0 |
nsir = db_content |
2505 |
0 |
for nsrs_detailed_item in nsir["_admin"]["nsrs-detailed-list"]: |
2506 |
0 |
nsr_id = nsrs_detailed_item["nsrId"] |
2507 |
0 |
if nsrs_detailed_item.get("shared"): |
2508 |
0 |
_filter = { |
2509 |
|
"_admin.nsrs-detailed-list.ANYINDEX.shared": True, |
2510 |
|
"_admin.nsrs-detailed-list.ANYINDEX.nsrId": nsr_id, |
2511 |
|
"_id.ne": nsir["_id"], |
2512 |
|
} |
2513 |
0 |
nsi = self.db.get_one( |
2514 |
|
"nsis", _filter, fail_on_empty=False, fail_on_more=False |
2515 |
|
) |
2516 |
0 |
if nsi: # last one using nsr |
2517 |
0 |
continue |
2518 |
0 |
try: |
2519 |
0 |
self.nsrTopic.delete( |
2520 |
|
session, nsr_id, dry_run=False, not_send_msg=not_send_msg |
2521 |
|
) |
2522 |
0 |
except (DbException, EngineException) as e: |
2523 |
0 |
if e.http_code == HTTPStatus.NOT_FOUND: |
2524 |
0 |
pass |
2525 |
|
else: |
2526 |
0 |
raise |
2527 |
|
|
2528 |
|
# delete related nsilcmops database entries |
2529 |
0 |
self.db.del_list("nsilcmops", {"netsliceInstanceId": _id}) |
2530 |
|
|
2531 |
|
# Check and set used NST usage state |
2532 |
0 |
nsir_admin = nsir.get("_admin") |
2533 |
0 |
if nsir_admin and nsir_admin.get("nst-id"): |
2534 |
|
# check if used by another NSI |
2535 |
0 |
nsis_list = self.db.get_one( |
2536 |
|
"nsis", |
2537 |
|
{"nst-id": nsir_admin["nst-id"]}, |
2538 |
|
fail_on_empty=False, |
2539 |
|
fail_on_more=False, |
2540 |
|
) |
2541 |
0 |
if not nsis_list: |
2542 |
0 |
self.db.set_one( |
2543 |
|
"nsts", |
2544 |
|
{"_id": nsir_admin["nst-id"]}, |
2545 |
|
{"_admin.usageState": "NOT_IN_USE"}, |
2546 |
|
) |
2547 |
|
|
2548 |
1 |
def new(self, rollback, session, indata=None, kwargs=None, headers=None): |
2549 |
|
""" |
2550 |
|
Creates a new netslice instance record into database. It also creates needed nsrs and vnfrs |
2551 |
|
:param rollback: list to append the created items at database in case a rollback must be done |
2552 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
2553 |
|
:param indata: params to be used for the nsir |
2554 |
|
:param kwargs: used to override the indata descriptor |
2555 |
|
:param headers: http request headers |
2556 |
|
:return: the _id of nsi descriptor created at database |
2557 |
|
""" |
2558 |
|
|
2559 |
0 |
step = "checking quotas" # first step must be defined outside try |
2560 |
0 |
try: |
2561 |
0 |
self.check_quota(session) |
2562 |
|
|
2563 |
0 |
step = "" |
2564 |
0 |
slice_request = self._remove_envelop(indata) |
2565 |
|
# Override descriptor with query string kwargs |
2566 |
0 |
self._update_input_with_kwargs(slice_request, kwargs) |
2567 |
0 |
slice_request = self._validate_input_new(slice_request, session["force"]) |
2568 |
|
|
2569 |
|
# look for nstd |
2570 |
0 |
step = "getting nstd id='{}' from database".format( |
2571 |
|
slice_request.get("nstId") |
2572 |
|
) |
2573 |
0 |
_filter = self._get_project_filter(session) |
2574 |
0 |
_filter["_id"] = slice_request["nstId"] |
2575 |
0 |
nstd = self.db.get_one("nsts", _filter) |
2576 |
|
# check NST is not disabled |
2577 |
0 |
step = "checking NST operationalState" |
2578 |
0 |
if nstd["_admin"]["operationalState"] == "DISABLED": |
2579 |
0 |
raise EngineException( |
2580 |
|
"nst with id '{}' is DISABLED, and thus cannot be used to create a netslice " |
2581 |
|
"instance".format(slice_request["nstId"]), |
2582 |
|
http_code=HTTPStatus.CONFLICT, |
2583 |
|
) |
2584 |
0 |
del _filter["_id"] |
2585 |
|
|
2586 |
|
# check NSD is not disabled |
2587 |
0 |
step = "checking operationalState" |
2588 |
0 |
if nstd["_admin"]["operationalState"] == "DISABLED": |
2589 |
0 |
raise EngineException( |
2590 |
|
"nst with id '{}' is DISABLED, and thus cannot be used to create " |
2591 |
|
"a network slice".format(slice_request["nstId"]), |
2592 |
|
http_code=HTTPStatus.CONFLICT, |
2593 |
|
) |
2594 |
|
|
2595 |
0 |
nstd.pop("_admin", None) |
2596 |
0 |
nstd_id = nstd.pop("_id", None) |
2597 |
0 |
nsi_id = str(uuid4()) |
2598 |
0 |
step = "filling nsi_descriptor with input data" |
2599 |
|
|
2600 |
|
# Creating the NSIR |
2601 |
0 |
nsi_descriptor = { |
2602 |
|
"id": nsi_id, |
2603 |
|
"name": slice_request["nsiName"], |
2604 |
|
"description": slice_request.get("nsiDescription", ""), |
2605 |
|
"datacenter": slice_request["vimAccountId"], |
2606 |
|
"nst-ref": nstd["id"], |
2607 |
|
"instantiation_parameters": slice_request, |
2608 |
|
"network-slice-template": nstd, |
2609 |
|
"nsr-ref-list": [], |
2610 |
|
"vlr-list": [], |
2611 |
|
"_id": nsi_id, |
2612 |
|
"additionalParamsForNsi": self._format_addional_params(slice_request), |
2613 |
|
} |
2614 |
|
|
2615 |
0 |
step = "creating nsi at database" |
2616 |
0 |
self.format_on_new( |
2617 |
|
nsi_descriptor, session["project_id"], make_public=session["public"] |
2618 |
|
) |
2619 |
0 |
nsi_descriptor["_admin"]["nsiState"] = "NOT_INSTANTIATED" |
2620 |
0 |
nsi_descriptor["_admin"]["netslice-subnet"] = None |
2621 |
0 |
nsi_descriptor["_admin"]["deployed"] = {} |
2622 |
0 |
nsi_descriptor["_admin"]["deployed"]["RO"] = [] |
2623 |
0 |
nsi_descriptor["_admin"]["nst-id"] = nstd_id |
2624 |
|
|
2625 |
|
# Creating netslice-vld for the RO. |
2626 |
0 |
step = "creating netslice-vld at database" |
2627 |
|
|
2628 |
|
# Building the vlds list to be deployed |
2629 |
|
# From netslice descriptors, creating the initial list |
2630 |
0 |
nsi_vlds = [] |
2631 |
|
|
2632 |
0 |
for netslice_vlds in get_iterable(nstd.get("netslice-vld")): |
2633 |
|
# Getting template Instantiation parameters from NST |
2634 |
0 |
nsi_vld = deepcopy(netslice_vlds) |
2635 |
0 |
nsi_vld["shared-nsrs-list"] = [] |
2636 |
0 |
nsi_vld["vimAccountId"] = slice_request["vimAccountId"] |
2637 |
0 |
nsi_vlds.append(nsi_vld) |
2638 |
|
|
2639 |
0 |
nsi_descriptor["_admin"]["netslice-vld"] = nsi_vlds |
2640 |
|
# Creating netslice-subnet_record. |
2641 |
0 |
needed_nsds = {} |
2642 |
0 |
services = [] |
2643 |
|
|
2644 |
|
# Updating the nstd with the nsd["_id"] associated to the nss -> services list |
2645 |
0 |
for member_ns in nstd["netslice-subnet"]: |
2646 |
0 |
nsd_id = member_ns["nsd-ref"] |
2647 |
0 |
step = "getting nstd id='{}' constituent-nsd='{}' from database".format( |
2648 |
|
member_ns["nsd-ref"], member_ns["id"] |
2649 |
|
) |
2650 |
0 |
if nsd_id not in needed_nsds: |
2651 |
|
# Obtain nsd |
2652 |
0 |
_filter["id"] = nsd_id |
2653 |
0 |
nsd = self.db.get_one( |
2654 |
|
"nsds", _filter, fail_on_empty=True, fail_on_more=True |
2655 |
|
) |
2656 |
0 |
del _filter["id"] |
2657 |
0 |
nsd.pop("_admin") |
2658 |
0 |
needed_nsds[nsd_id] = nsd |
2659 |
|
else: |
2660 |
0 |
nsd = needed_nsds[nsd_id] |
2661 |
0 |
member_ns["_id"] = needed_nsds[nsd_id].get("_id") |
2662 |
0 |
services.append(member_ns) |
2663 |
|
|
2664 |
0 |
step = "filling nsir nsd-id='{}' constituent-nsd='{}' from database".format( |
2665 |
|
member_ns["nsd-ref"], member_ns["id"] |
2666 |
|
) |
2667 |
|
|
2668 |
|
# creates Network Services records (NSRs) |
2669 |
0 |
step = "creating nsrs at database using NsrTopic.new()" |
2670 |
0 |
ns_params = slice_request.get("netslice-subnet") |
2671 |
0 |
nsrs_list = [] |
2672 |
0 |
nsi_netslice_subnet = [] |
2673 |
0 |
for service in services: |
2674 |
|
# Check if the netslice-subnet is shared and if it is share if the nss exists |
2675 |
0 |
_id_nsr = None |
2676 |
0 |
indata_ns = {} |
2677 |
|
# Is the nss shared and instantiated? |
2678 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.shared"] = True |
2679 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.nsd-id"] = service[ |
2680 |
|
"nsd-ref" |
2681 |
|
] |
2682 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.nss-id"] = service["id"] |
2683 |
0 |
nsi = self.db.get_one( |
2684 |
|
"nsis", _filter, fail_on_empty=False, fail_on_more=False |
2685 |
|
) |
2686 |
0 |
if nsi and service.get("is-shared-nss"): |
2687 |
0 |
nsrs_detailed_list = nsi["_admin"]["nsrs-detailed-list"] |
2688 |
0 |
for nsrs_detailed_item in nsrs_detailed_list: |
2689 |
0 |
if nsrs_detailed_item["nsd-id"] == service["nsd-ref"]: |
2690 |
0 |
if nsrs_detailed_item["nss-id"] == service["id"]: |
2691 |
0 |
_id_nsr = nsrs_detailed_item["nsrId"] |
2692 |
0 |
break |
2693 |
0 |
for netslice_subnet in nsi["_admin"]["netslice-subnet"]: |
2694 |
0 |
if netslice_subnet["nss-id"] == service["id"]: |
2695 |
0 |
indata_ns = netslice_subnet |
2696 |
0 |
break |
2697 |
|
else: |
2698 |
0 |
indata_ns = {} |
2699 |
0 |
if service.get("instantiation-parameters"): |
2700 |
0 |
indata_ns = deepcopy(service["instantiation-parameters"]) |
2701 |
|
# del service["instantiation-parameters"] |
2702 |
|
|
2703 |
0 |
indata_ns["nsdId"] = service["_id"] |
2704 |
0 |
indata_ns["nsName"] = ( |
2705 |
|
slice_request.get("nsiName") + "." + service["id"] |
2706 |
|
) |
2707 |
0 |
indata_ns["vimAccountId"] = slice_request.get("vimAccountId") |
2708 |
0 |
indata_ns["nsDescription"] = service["description"] |
2709 |
0 |
if slice_request.get("ssh_keys"): |
2710 |
0 |
indata_ns["ssh_keys"] = slice_request.get("ssh_keys") |
2711 |
|
|
2712 |
0 |
if ns_params: |
2713 |
0 |
for ns_param in ns_params: |
2714 |
0 |
if ns_param.get("id") == service["id"]: |
2715 |
0 |
copy_ns_param = deepcopy(ns_param) |
2716 |
0 |
del copy_ns_param["id"] |
2717 |
0 |
indata_ns.update(copy_ns_param) |
2718 |
0 |
break |
2719 |
|
|
2720 |
|
# Creates Nsr objects |
2721 |
0 |
_id_nsr, _ = self.nsrTopic.new( |
2722 |
|
rollback, session, indata_ns, kwargs, headers |
2723 |
|
) |
2724 |
0 |
nsrs_item = { |
2725 |
|
"nsrId": _id_nsr, |
2726 |
|
"shared": service.get("is-shared-nss"), |
2727 |
|
"nsd-id": service["nsd-ref"], |
2728 |
|
"nss-id": service["id"], |
2729 |
|
"nslcmop_instantiate": None, |
2730 |
|
} |
2731 |
0 |
indata_ns["nss-id"] = service["id"] |
2732 |
0 |
nsrs_list.append(nsrs_item) |
2733 |
0 |
nsi_netslice_subnet.append(indata_ns) |
2734 |
0 |
nsr_ref = {"nsr-ref": _id_nsr} |
2735 |
0 |
nsi_descriptor["nsr-ref-list"].append(nsr_ref) |
2736 |
|
|
2737 |
|
# Adding the nsrs list to the nsi |
2738 |
0 |
nsi_descriptor["_admin"]["nsrs-detailed-list"] = nsrs_list |
2739 |
0 |
nsi_descriptor["_admin"]["netslice-subnet"] = nsi_netslice_subnet |
2740 |
0 |
self.db.set_one( |
2741 |
|
"nsts", {"_id": slice_request["nstId"]}, {"_admin.usageState": "IN_USE"} |
2742 |
|
) |
2743 |
|
|
2744 |
|
# Creating the entry in the database |
2745 |
0 |
self.db.create("nsis", nsi_descriptor) |
2746 |
0 |
rollback.append({"topic": "nsis", "_id": nsi_id}) |
2747 |
0 |
return nsi_id, None |
2748 |
0 |
except ValidationError as e: |
2749 |
0 |
raise EngineException(e, HTTPStatus.UNPROCESSABLE_ENTITY) |
2750 |
0 |
except Exception as e: # TODO remove try Except, it is captured at nbi.py |
2751 |
0 |
self.logger.exception( |
2752 |
|
"Exception {} at NsiTopic.new()".format(e), exc_info=True |
2753 |
|
) |
2754 |
0 |
raise EngineException("Error {}: {}".format(step, e)) |
2755 |
|
|
2756 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
2757 |
0 |
raise EngineException( |
2758 |
|
"Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
2759 |
|
) |
2760 |
|
|
2761 |
|
|
2762 |
1 |
class NsiLcmOpTopic(BaseTopic): |
2763 |
1 |
topic = "nsilcmops" |
2764 |
1 |
topic_msg = "nsi" |
2765 |
1 |
operation_schema = { # mapping between operation and jsonschema to validate |
2766 |
|
"instantiate": nsi_instantiate, |
2767 |
|
"terminate": None, |
2768 |
|
} |
2769 |
|
|
2770 |
1 |
def __init__(self, db, fs, msg, auth): |
2771 |
0 |
BaseTopic.__init__(self, db, fs, msg, auth) |
2772 |
0 |
self.nsi_NsLcmOpTopic = NsLcmOpTopic(self.db, self.fs, self.msg, self.auth) |
2773 |
|
|
2774 |
1 |
def _check_nsi_operation(self, session, nsir, operation, indata): |
2775 |
|
""" |
2776 |
|
Check that user has enter right parameters for the operation |
2777 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
2778 |
|
:param operation: it can be: instantiate, terminate, action, TODO: update, heal |
2779 |
|
:param indata: descriptor with the parameters of the operation |
2780 |
|
:return: None |
2781 |
|
""" |
2782 |
0 |
nsds = {} |
2783 |
0 |
nstd = nsir["network-slice-template"] |
2784 |
|
|
2785 |
0 |
def check_valid_netslice_subnet_id(nstId): |
2786 |
|
# TODO change to vnfR (??) |
2787 |
0 |
for netslice_subnet in nstd["netslice-subnet"]: |
2788 |
0 |
if nstId == netslice_subnet["id"]: |
2789 |
0 |
nsd_id = netslice_subnet["nsd-ref"] |
2790 |
0 |
if nsd_id not in nsds: |
2791 |
0 |
_filter = self._get_project_filter(session) |
2792 |
0 |
_filter["id"] = nsd_id |
2793 |
0 |
nsds[nsd_id] = self.db.get_one("nsds", _filter) |
2794 |
0 |
return nsds[nsd_id] |
2795 |
|
else: |
2796 |
0 |
raise EngineException( |
2797 |
|
"Invalid parameter nstId='{}' is not one of the " |
2798 |
|
"nst:netslice-subnet".format(nstId) |
2799 |
|
) |
2800 |
|
|
2801 |
0 |
if operation == "instantiate": |
2802 |
|
# check the existance of netslice-subnet items |
2803 |
0 |
for in_nst in get_iterable(indata.get("netslice-subnet")): |
2804 |
0 |
check_valid_netslice_subnet_id(in_nst["id"]) |
2805 |
|
|
2806 |
1 |
def _create_nsilcmop(self, session, netsliceInstanceId, operation, params): |
2807 |
0 |
now = time() |
2808 |
0 |
_id = str(uuid4()) |
2809 |
0 |
nsilcmop = { |
2810 |
|
"id": _id, |
2811 |
|
"_id": _id, |
2812 |
|
"operationState": "PROCESSING", # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK |
2813 |
|
"statusEnteredTime": now, |
2814 |
|
"netsliceInstanceId": netsliceInstanceId, |
2815 |
|
"lcmOperationType": operation, |
2816 |
|
"startTime": now, |
2817 |
|
"isAutomaticInvocation": False, |
2818 |
|
"operationParams": params, |
2819 |
|
"isCancelPending": False, |
2820 |
|
"links": { |
2821 |
|
"self": "/osm/nsilcm/v1/nsi_lcm_op_occs/" + _id, |
2822 |
|
"netsliceInstanceId": "/osm/nsilcm/v1/netslice_instances/" |
2823 |
|
+ netsliceInstanceId, |
2824 |
|
}, |
2825 |
|
} |
2826 |
0 |
return nsilcmop |
2827 |
|
|
2828 |
1 |
def add_shared_nsr_2vld(self, nsir, nsr_item): |
2829 |
0 |
for nst_sb_item in nsir["network-slice-template"].get("netslice-subnet"): |
2830 |
0 |
if nst_sb_item.get("is-shared-nss"): |
2831 |
0 |
for admin_subnet_item in nsir["_admin"].get("netslice-subnet"): |
2832 |
0 |
if admin_subnet_item["nss-id"] == nst_sb_item["id"]: |
2833 |
0 |
for admin_vld_item in nsir["_admin"].get("netslice-vld"): |
2834 |
0 |
for admin_vld_nss_cp_ref_item in admin_vld_item[ |
2835 |
|
"nss-connection-point-ref" |
2836 |
|
]: |
2837 |
0 |
if ( |
2838 |
|
admin_subnet_item["nss-id"] |
2839 |
|
== admin_vld_nss_cp_ref_item["nss-ref"] |
2840 |
|
): |
2841 |
0 |
if ( |
2842 |
|
not nsr_item["nsrId"] |
2843 |
|
in admin_vld_item["shared-nsrs-list"] |
2844 |
|
): |
2845 |
0 |
admin_vld_item["shared-nsrs-list"].append( |
2846 |
|
nsr_item["nsrId"] |
2847 |
|
) |
2848 |
0 |
break |
2849 |
|
# self.db.set_one("nsis", {"_id": nsir["_id"]}, nsir) |
2850 |
0 |
self.db.set_one( |
2851 |
|
"nsis", |
2852 |
|
{"_id": nsir["_id"]}, |
2853 |
|
{"_admin.netslice-vld": nsir["_admin"].get("netslice-vld")}, |
2854 |
|
) |
2855 |
|
|
2856 |
1 |
def new(self, rollback, session, indata=None, kwargs=None, headers=None): |
2857 |
|
""" |
2858 |
|
Performs a new operation over a ns |
2859 |
|
:param rollback: list to append created items at database in case a rollback must to be done |
2860 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
2861 |
|
:param indata: descriptor with the parameters of the operation. It must contains among others |
2862 |
|
netsliceInstanceId: _id of the nsir to perform the operation |
2863 |
|
operation: it can be: instantiate, terminate, action, TODO: update, heal |
2864 |
|
:param kwargs: used to override the indata descriptor |
2865 |
|
:param headers: http request headers |
2866 |
|
:return: id of the nslcmops |
2867 |
|
""" |
2868 |
0 |
try: |
2869 |
|
# Override descriptor with query string kwargs |
2870 |
0 |
self._update_input_with_kwargs(indata, kwargs) |
2871 |
0 |
operation = indata["lcmOperationType"] |
2872 |
0 |
netsliceInstanceId = indata["netsliceInstanceId"] |
2873 |
0 |
validate_input(indata, self.operation_schema[operation]) |
2874 |
|
|
2875 |
|
# get nsi from netsliceInstanceId |
2876 |
0 |
_filter = self._get_project_filter(session) |
2877 |
0 |
_filter["_id"] = netsliceInstanceId |
2878 |
0 |
nsir = self.db.get_one("nsis", _filter) |
2879 |
0 |
logging_prefix = "nsi={} {} ".format(netsliceInstanceId, operation) |
2880 |
0 |
del _filter["_id"] |
2881 |
|
|
2882 |
|
# initial checking |
2883 |
0 |
if ( |
2884 |
|
not nsir["_admin"].get("nsiState") |
2885 |
|
or nsir["_admin"]["nsiState"] == "NOT_INSTANTIATED" |
2886 |
|
): |
2887 |
0 |
if operation == "terminate" and indata.get("autoremove"): |
2888 |
|
# NSIR must be deleted |
2889 |
0 |
return ( |
2890 |
|
None, |
2891 |
|
None, |
2892 |
|
) # a none in this case is used to indicate not instantiated. It can be removed |
2893 |
0 |
if operation != "instantiate": |
2894 |
0 |
raise EngineException( |
2895 |
|
"netslice_instance '{}' cannot be '{}' because it is not instantiated".format( |
2896 |
|
netsliceInstanceId, operation |
2897 |
|
), |
2898 |
|
HTTPStatus.CONFLICT, |
2899 |
|
) |
2900 |
|
else: |
2901 |
0 |
if operation == "instantiate" and not session["force"]: |
2902 |
0 |
raise EngineException( |
2903 |
|
"netslice_instance '{}' cannot be '{}' because it is already instantiated".format( |
2904 |
|
netsliceInstanceId, operation |
2905 |
|
), |
2906 |
|
HTTPStatus.CONFLICT, |
2907 |
|
) |
2908 |
|
|
2909 |
|
# Creating all the NS_operation (nslcmop) |
2910 |
|
# Get service list from db |
2911 |
0 |
nsrs_list = nsir["_admin"]["nsrs-detailed-list"] |
2912 |
0 |
nslcmops = [] |
2913 |
|
# nslcmops_item = None |
2914 |
0 |
for index, nsr_item in enumerate(nsrs_list): |
2915 |
0 |
nsr_id = nsr_item["nsrId"] |
2916 |
0 |
if nsr_item.get("shared"): |
2917 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.shared"] = True |
2918 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.nsrId"] = nsr_id |
2919 |
0 |
_filter[ |
2920 |
|
"_admin.nsrs-detailed-list.ANYINDEX.nslcmop_instantiate.ne" |
2921 |
|
] = None |
2922 |
0 |
_filter["_id.ne"] = netsliceInstanceId |
2923 |
0 |
nsi = self.db.get_one( |
2924 |
|
"nsis", _filter, fail_on_empty=False, fail_on_more=False |
2925 |
|
) |
2926 |
0 |
if operation == "terminate": |
2927 |
0 |
_update = { |
2928 |
|
"_admin.nsrs-detailed-list.{}.nslcmop_instantiate".format( |
2929 |
|
index |
2930 |
|
): None |
2931 |
|
} |
2932 |
0 |
self.db.set_one("nsis", {"_id": nsir["_id"]}, _update) |
2933 |
0 |
if ( |
2934 |
|
nsi |
2935 |
|
): # other nsi is using this nsr and it needs this nsr instantiated |
2936 |
0 |
continue # do not create nsilcmop |
2937 |
|
else: # instantiate |
2938 |
|
# looks the first nsi fulfilling the conditions but not being the current NSIR |
2939 |
0 |
if nsi: |
2940 |
0 |
nsi_nsr_item = next( |
2941 |
|
n |
2942 |
|
for n in nsi["_admin"]["nsrs-detailed-list"] |
2943 |
|
if n["nsrId"] == nsr_id |
2944 |
|
and n["shared"] |
2945 |
|
and n["nslcmop_instantiate"] |
2946 |
|
) |
2947 |
0 |
self.add_shared_nsr_2vld(nsir, nsr_item) |
2948 |
0 |
nslcmops.append(nsi_nsr_item["nslcmop_instantiate"]) |
2949 |
0 |
_update = { |
2950 |
|
"_admin.nsrs-detailed-list.{}".format( |
2951 |
|
index |
2952 |
|
): nsi_nsr_item |
2953 |
|
} |
2954 |
0 |
self.db.set_one("nsis", {"_id": nsir["_id"]}, _update) |
2955 |
|
# continue to not create nslcmop since nsrs is shared and nsrs was created |
2956 |
0 |
continue |
2957 |
|
else: |
2958 |
0 |
self.add_shared_nsr_2vld(nsir, nsr_item) |
2959 |
|
|
2960 |
|
# create operation |
2961 |
0 |
try: |
2962 |
0 |
indata_ns = { |
2963 |
|
"lcmOperationType": operation, |
2964 |
|
"nsInstanceId": nsr_id, |
2965 |
|
# Including netslice_id in the ns instantiate Operation |
2966 |
|
"netsliceInstanceId": netsliceInstanceId, |
2967 |
|
} |
2968 |
0 |
if operation == "instantiate": |
2969 |
0 |
service = self.db.get_one("nsrs", {"_id": nsr_id}) |
2970 |
0 |
indata_ns.update(service["instantiate_params"]) |
2971 |
|
|
2972 |
|
# Creating NS_LCM_OP with the flag slice_object=True to not trigger the service instantiation |
2973 |
|
# message via kafka bus |
2974 |
0 |
nslcmop, _ = self.nsi_NsLcmOpTopic.new( |
2975 |
|
rollback, session, indata_ns, None, headers, slice_object=True |
2976 |
|
) |
2977 |
0 |
nslcmops.append(nslcmop) |
2978 |
0 |
if operation == "instantiate": |
2979 |
0 |
_update = { |
2980 |
|
"_admin.nsrs-detailed-list.{}.nslcmop_instantiate".format( |
2981 |
|
index |
2982 |
|
): nslcmop |
2983 |
|
} |
2984 |
0 |
self.db.set_one("nsis", {"_id": nsir["_id"]}, _update) |
2985 |
0 |
except (DbException, EngineException) as e: |
2986 |
0 |
if e.http_code == HTTPStatus.NOT_FOUND: |
2987 |
0 |
self.logger.info( |
2988 |
|
logging_prefix |
2989 |
|
+ "skipping NS={} because not found".format(nsr_id) |
2990 |
|
) |
2991 |
0 |
pass |
2992 |
|
else: |
2993 |
0 |
raise |
2994 |
|
|
2995 |
|
# Creates nsilcmop |
2996 |
0 |
indata["nslcmops_ids"] = nslcmops |
2997 |
0 |
self._check_nsi_operation(session, nsir, operation, indata) |
2998 |
|
|
2999 |
0 |
nsilcmop_desc = self._create_nsilcmop( |
3000 |
|
session, netsliceInstanceId, operation, indata |
3001 |
|
) |
3002 |
0 |
self.format_on_new( |
3003 |
|
nsilcmop_desc, session["project_id"], make_public=session["public"] |
3004 |
|
) |
3005 |
0 |
_id = self.db.create("nsilcmops", nsilcmop_desc) |
3006 |
0 |
rollback.append({"topic": "nsilcmops", "_id": _id}) |
3007 |
0 |
self.msg.write("nsi", operation, nsilcmop_desc) |
3008 |
0 |
return _id, None |
3009 |
0 |
except ValidationError as e: |
3010 |
0 |
raise EngineException(e, HTTPStatus.UNPROCESSABLE_ENTITY) |
3011 |
|
|
3012 |
1 |
def delete(self, session, _id, dry_run=False, not_send_msg=None): |
3013 |
0 |
raise EngineException( |
3014 |
|
"Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
3015 |
|
) |
3016 |
|
|
3017 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
3018 |
0 |
raise EngineException( |
3019 |
|
"Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR |
3020 |
|
) |