1 |
|
# -*- coding: utf-8 -*- |
2 |
|
|
3 |
|
# Licensed under the Apache License, Version 2.0 (the "License"); |
4 |
|
# you may not use this file except in compliance with the License. |
5 |
|
# You may obtain a copy of the License at |
6 |
|
# |
7 |
|
# http://www.apache.org/licenses/LICENSE-2.0 |
8 |
|
# |
9 |
|
# Unless required by applicable law or agreed to in writing, software |
10 |
|
# distributed under the License is distributed on an "AS IS" BASIS, |
11 |
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
12 |
|
# implied. |
13 |
|
# See the License for the specific language governing permissions and |
14 |
|
# limitations under the License. |
15 |
|
|
16 |
|
# import logging |
17 |
1 |
from uuid import uuid4 |
18 |
1 |
from http import HTTPStatus |
19 |
1 |
from time import time |
20 |
1 |
from copy import copy, deepcopy |
21 |
1 |
from osm_nbi.validation import validate_input, ValidationError, ns_instantiate, ns_terminate, ns_action, ns_scale,\ |
22 |
|
nsi_instantiate |
23 |
1 |
from osm_nbi.base_topic import BaseTopic, EngineException, get_iterable, deep_get, increment_ip_mac |
24 |
1 |
from yaml import safe_dump |
25 |
1 |
from osm_common.dbbase import DbException |
26 |
1 |
from osm_common.msgbase import MsgException |
27 |
1 |
from osm_common.fsbase import FsException |
28 |
1 |
from osm_nbi import utils |
29 |
1 |
from re import match # For checking that additional parameter names are valid Jinja2 identifiers |
30 |
|
|
31 |
1 |
__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>" |
32 |
|
|
33 |
|
|
34 |
1 |
class NsrTopic(BaseTopic): |
35 |
1 |
topic = "nsrs" |
36 |
1 |
topic_msg = "ns" |
37 |
1 |
quota_name = "ns_instances" |
38 |
1 |
schema_new = ns_instantiate |
39 |
|
|
40 |
1 |
def __init__(self, db, fs, msg, auth): |
41 |
1 |
BaseTopic.__init__(self, db, fs, msg, auth) |
42 |
|
|
43 |
1 |
def _check_descriptor_dependencies(self, session, descriptor): |
44 |
|
""" |
45 |
|
Check that the dependent descriptors exist on a new descriptor or edition |
46 |
|
:param session: client session information |
47 |
|
:param descriptor: descriptor to be inserted or edit |
48 |
|
:return: None or raises exception |
49 |
|
""" |
50 |
0 |
if not descriptor.get("nsdId"): |
51 |
0 |
return |
52 |
0 |
nsd_id = descriptor["nsdId"] |
53 |
0 |
if not self.get_item_list(session, "nsds", {"id": nsd_id}): |
54 |
0 |
raise EngineException("Descriptor error at nsdId='{}' references a non exist nsd".format(nsd_id), |
55 |
|
http_code=HTTPStatus.CONFLICT) |
56 |
|
|
57 |
1 |
@staticmethod |
58 |
1 |
def format_on_new(content, project_id=None, make_public=False): |
59 |
1 |
BaseTopic.format_on_new(content, project_id=project_id, make_public=make_public) |
60 |
1 |
content["_admin"]["nsState"] = "NOT_INSTANTIATED" |
61 |
1 |
return None |
62 |
|
|
63 |
1 |
def check_conflict_on_del(self, session, _id, db_content): |
64 |
|
""" |
65 |
|
Check that NSR is not instantiated |
66 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
67 |
|
:param _id: nsr internal id |
68 |
|
:param db_content: The database content of the nsr |
69 |
|
:return: None or raises EngineException with the conflict |
70 |
|
""" |
71 |
1 |
if session["force"]: |
72 |
1 |
return |
73 |
1 |
nsr = db_content |
74 |
1 |
if nsr["_admin"].get("nsState") == "INSTANTIATED": |
75 |
1 |
raise EngineException("nsr '{}' cannot be deleted because it is in 'INSTANTIATED' state. " |
76 |
|
"Launch 'terminate' operation first; or force deletion".format(_id), |
77 |
|
http_code=HTTPStatus.CONFLICT) |
78 |
|
|
79 |
1 |
def delete_extra(self, session, _id, db_content, not_send_msg=None): |
80 |
|
""" |
81 |
|
Deletes associated nslcmops and vnfrs from database. Deletes associated filesystem. |
82 |
|
Set usageState of pdu, vnfd, nsd |
83 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
84 |
|
:param _id: server internal id |
85 |
|
:param db_content: The database content of the descriptor |
86 |
|
:param not_send_msg: To not send message (False) or store content (list) instead |
87 |
|
:return: None if ok or raises EngineException with the problem |
88 |
|
""" |
89 |
1 |
self.fs.file_delete(_id, ignore_non_exist=True) |
90 |
1 |
self.db.del_list("nslcmops", {"nsInstanceId": _id}) |
91 |
1 |
self.db.del_list("vnfrs", {"nsr-id-ref": _id}) |
92 |
|
|
93 |
|
# set all used pdus as free |
94 |
1 |
self.db.set_list("pdus", {"_admin.usage.nsr_id": _id}, |
95 |
|
{"_admin.usageState": "NOT_IN_USE", "_admin.usage": None}) |
96 |
|
|
97 |
|
# Set NSD usageState |
98 |
1 |
nsr = db_content |
99 |
1 |
used_nsd_id = nsr.get("nsd-id") |
100 |
1 |
if used_nsd_id: |
101 |
|
# check if used by another NSR |
102 |
1 |
nsrs_list = self.db.get_one("nsrs", {"nsd-id": used_nsd_id}, |
103 |
|
fail_on_empty=False, fail_on_more=False) |
104 |
1 |
if not nsrs_list: |
105 |
0 |
self.db.set_one("nsds", {"_id": used_nsd_id}, {"_admin.usageState": "NOT_IN_USE"}) |
106 |
|
|
107 |
|
# Set VNFD usageState |
108 |
1 |
used_vnfd_id_list = nsr.get("vnfd-id") |
109 |
1 |
if used_vnfd_id_list: |
110 |
1 |
for used_vnfd_id in used_vnfd_id_list: |
111 |
|
# check if used by another NSR |
112 |
1 |
nsrs_list = self.db.get_one("nsrs", {"vnfd-id": used_vnfd_id}, |
113 |
|
fail_on_empty=False, fail_on_more=False) |
114 |
1 |
if not nsrs_list: |
115 |
0 |
self.db.set_one("vnfds", {"_id": used_vnfd_id}, {"_admin.usageState": "NOT_IN_USE"}) |
116 |
|
|
117 |
|
# delete extra ro_nsrs used for internal RO module |
118 |
1 |
self.db.del_one("ro_nsrs", q_filter={"_id": _id}, fail_on_empty=False) |
119 |
|
|
120 |
1 |
@staticmethod |
121 |
|
def _format_ns_request(ns_request): |
122 |
1 |
formated_request = copy(ns_request) |
123 |
1 |
formated_request.pop("additionalParamsForNs", None) |
124 |
1 |
formated_request.pop("additionalParamsForVnf", None) |
125 |
1 |
return formated_request |
126 |
|
|
127 |
1 |
@staticmethod |
128 |
1 |
def _format_additional_params(ns_request, member_vnf_index=None, vdu_id=None, kdu_name=None, descriptor=None): |
129 |
|
""" |
130 |
|
Get and format user additional params for NS or VNF |
131 |
|
:param ns_request: User instantiation additional parameters |
132 |
|
:param member_vnf_index: None for extract NS params, or member_vnf_index to extract VNF params |
133 |
|
:param descriptor: If not None it check that needed parameters of descriptor are supplied |
134 |
|
:return: tuple with a formatted copy of additional params or None if not supplied, plus other parameters |
135 |
|
""" |
136 |
1 |
additional_params = None |
137 |
1 |
other_params = None |
138 |
1 |
if not member_vnf_index: |
139 |
1 |
additional_params = copy(ns_request.get("additionalParamsForNs")) |
140 |
1 |
where_ = "additionalParamsForNs" |
141 |
1 |
elif ns_request.get("additionalParamsForVnf"): |
142 |
1 |
where_ = "additionalParamsForVnf[member-vnf-index={}]".format(member_vnf_index) |
143 |
1 |
item = next((x for x in ns_request["additionalParamsForVnf"] if x["member-vnf-index"] == member_vnf_index), |
144 |
|
None) |
145 |
1 |
if item: |
146 |
1 |
if not vdu_id and not kdu_name: |
147 |
1 |
other_params = item |
148 |
1 |
additional_params = copy(item.get("additionalParams")) or {} |
149 |
1 |
if vdu_id and item.get("additionalParamsForVdu"): |
150 |
0 |
item_vdu = next((x for x in item["additionalParamsForVdu"] if x["vdu_id"] == vdu_id), None) |
151 |
0 |
other_params = item_vdu |
152 |
0 |
if item_vdu and item_vdu.get("additionalParams"): |
153 |
0 |
where_ += ".additionalParamsForVdu[vdu_id={}]".format(vdu_id) |
154 |
0 |
additional_params = item_vdu["additionalParams"] |
155 |
1 |
if kdu_name: |
156 |
0 |
additional_params = {} |
157 |
0 |
if item.get("additionalParamsForKdu"): |
158 |
0 |
item_kdu = next((x for x in item["additionalParamsForKdu"] if x["kdu_name"] == kdu_name), None) |
159 |
0 |
other_params = item_kdu |
160 |
0 |
if item_kdu and item_kdu.get("additionalParams"): |
161 |
0 |
where_ += ".additionalParamsForKdu[kdu_name={}]".format(kdu_name) |
162 |
0 |
additional_params = item_kdu["additionalParams"] |
163 |
|
|
164 |
1 |
if additional_params: |
165 |
1 |
for k, v in additional_params.items(): |
166 |
|
# BEGIN Check that additional parameter names are valid Jinja2 identifiers if target is not Kdu |
167 |
1 |
if not kdu_name and not match('^[a-zA-Z_][a-zA-Z0-9_]*$', k): |
168 |
0 |
raise EngineException("Invalid param name at {}:{}. Must contain only alphanumeric characters " |
169 |
|
"and underscores, and cannot start with a digit" |
170 |
|
.format(where_, k)) |
171 |
|
# END Check that additional parameter names are valid Jinja2 identifiers |
172 |
1 |
if not isinstance(k, str): |
173 |
0 |
raise EngineException("Invalid param at {}:{}. Only string keys are allowed".format(where_, k)) |
174 |
1 |
if "." in k or "$" in k: |
175 |
0 |
raise EngineException("Invalid param at {}:{}. Keys must not contain dots or $".format(where_, k)) |
176 |
1 |
if isinstance(v, (dict, tuple, list)): |
177 |
0 |
additional_params[k] = "!!yaml " + safe_dump(v) |
178 |
|
|
179 |
1 |
if descriptor: |
180 |
|
# check that enough parameters are supplied for the initial-config-primitive |
181 |
|
# TODO: check for cloud-init |
182 |
1 |
if member_vnf_index: |
183 |
1 |
if kdu_name: |
184 |
0 |
initial_primitives = None |
185 |
1 |
elif vdu_id: |
186 |
1 |
vdud = next(x for x in descriptor["vdu"] if x["id"] == vdu_id) |
187 |
1 |
initial_primitives = deep_get(vdud, ("vdu-configuration", "initial-config-primitive")) |
188 |
|
else: |
189 |
1 |
vnf_configurations = get_iterable(descriptor.get("vnf-configuration")) |
190 |
1 |
initial_primitives = [] |
191 |
1 |
for vnfc in vnf_configurations: |
192 |
1 |
for primitive in get_iterable(vnfc.get("initial-config-primitive")): |
193 |
1 |
initial_primitives.append(primitive) |
194 |
|
else: |
195 |
1 |
initial_primitives = deep_get(descriptor, ("ns-configuration", "initial-config-primitive")) |
196 |
|
|
197 |
1 |
for initial_primitive in get_iterable(initial_primitives): |
198 |
1 |
for param in get_iterable(initial_primitive.get("parameter")): |
199 |
1 |
if param["value"].startswith("<") and param["value"].endswith(">"): |
200 |
1 |
if param["value"] in ("<rw_mgmt_ip>", "<VDU_SCALE_INFO>", "<ns_config_info>"): |
201 |
1 |
continue |
202 |
1 |
if not additional_params or param["value"][1:-1] not in additional_params: |
203 |
1 |
raise EngineException("Parameter '{}' needed for vnfd[id={}]:vnf-configuration:" |
204 |
|
"initial-config-primitive[name={}] not supplied". |
205 |
|
format(param["value"], descriptor["id"], |
206 |
|
initial_primitive["name"])) |
207 |
|
|
208 |
1 |
return additional_params or None, other_params or None |
209 |
|
|
210 |
1 |
def new(self, rollback, session, indata=None, kwargs=None, headers=None): |
211 |
|
""" |
212 |
|
Creates a new nsr into database. It also creates needed vnfrs |
213 |
|
:param rollback: list to append the created items at database in case a rollback must be done |
214 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
215 |
|
:param indata: params to be used for the nsr |
216 |
|
:param kwargs: used to override the indata descriptor |
217 |
|
:param headers: http request headers |
218 |
|
:return: the _id of nsr descriptor created at database. Or an exception of type |
219 |
|
EngineException, ValidationError, DbException, FsException, MsgException. |
220 |
|
Note: Exceptions are not captured on purpose. They should be captured at called |
221 |
|
""" |
222 |
1 |
try: |
223 |
1 |
step = "checking quotas" |
224 |
1 |
self.check_quota(session) |
225 |
|
|
226 |
1 |
step = "validating input parameters" |
227 |
1 |
ns_request = self._remove_envelop(indata) |
228 |
1 |
self._update_input_with_kwargs(ns_request, kwargs) |
229 |
1 |
self._validate_input_new(ns_request, session["force"]) |
230 |
|
|
231 |
1 |
step = "getting nsd id='{}' from database".format(ns_request.get("nsdId")) |
232 |
1 |
nsd = self._get_nsd_from_db(ns_request["nsdId"], session) |
233 |
1 |
ns_k8s_namespace = self._get_ns_k8s_namespace(nsd, ns_request, session) |
234 |
|
|
235 |
1 |
step = "checking nsdOperationalState" |
236 |
1 |
self._check_nsd_operational_state(nsd, ns_request) |
237 |
|
|
238 |
1 |
step = "filling nsr from input data" |
239 |
1 |
nsr_id = str(uuid4()) |
240 |
1 |
nsr_descriptor = self._create_nsr_descriptor_from_nsd(nsd, ns_request, nsr_id) |
241 |
|
|
242 |
|
# Create VNFRs |
243 |
1 |
needed_vnfds = {} |
244 |
|
# TODO: Change for multiple df support |
245 |
1 |
vnf_profiles = nsd.get("df", [[]])[0].get("vnf-profile", ()) |
246 |
1 |
for vnfp in vnf_profiles: |
247 |
1 |
vnfd_id = vnfp.get("vnfd-id") |
248 |
1 |
vnf_index = vnfp.get("id") |
249 |
1 |
step = "getting vnfd id='{}' constituent-vnfd='{}' from database".format(vnfd_id, vnf_index) |
250 |
1 |
if vnfd_id not in needed_vnfds: |
251 |
1 |
vnfd = self._get_vnfd_from_db(vnfd_id, session) |
252 |
1 |
needed_vnfds[vnfd_id] = vnfd |
253 |
1 |
nsr_descriptor["vnfd-id"].append(vnfd["_id"]) |
254 |
|
else: |
255 |
1 |
vnfd = needed_vnfds[vnfd_id] |
256 |
|
|
257 |
1 |
step = "filling vnfr vnfd-id='{}' constituent-vnfd='{}'".format(vnfd_id, vnf_index) |
258 |
1 |
vnfr_descriptor = self._create_vnfr_descriptor_from_vnfd(nsd, vnfd, vnfd_id, vnf_index, nsr_descriptor, |
259 |
|
ns_request, ns_k8s_namespace) |
260 |
|
|
261 |
1 |
step = "creating vnfr vnfd-id='{}' constituent-vnfd='{}' at database".format(vnfd_id, vnf_index) |
262 |
1 |
self._add_vnfr_to_db(vnfr_descriptor, rollback, session) |
263 |
1 |
nsr_descriptor["constituent-vnfr-ref"].append(vnfr_descriptor["id"]) |
264 |
|
|
265 |
1 |
step = "creating nsr at database" |
266 |
1 |
self._add_nsr_to_db(nsr_descriptor, rollback, session) |
267 |
|
|
268 |
1 |
step = "creating nsr temporal folder" |
269 |
1 |
self.fs.mkdir(nsr_id) |
270 |
|
|
271 |
1 |
return nsr_id, None |
272 |
1 |
except (ValidationError, EngineException, DbException, MsgException, FsException) as e: |
273 |
1 |
raise type(e)("{} while '{}'".format(e, step), http_code=e.http_code) |
274 |
|
|
275 |
1 |
def _get_nsd_from_db(self, nsd_id, session): |
276 |
1 |
_filter = self._get_project_filter(session) |
277 |
1 |
_filter["_id"] = nsd_id |
278 |
1 |
return self.db.get_one("nsds", _filter) |
279 |
|
|
280 |
1 |
def _get_vnfd_from_db(self, vnfd_id, session): |
281 |
1 |
_filter = self._get_project_filter(session) |
282 |
1 |
_filter["id"] = vnfd_id |
283 |
1 |
vnfd = self.db.get_one("vnfds", _filter, fail_on_empty=True, fail_on_more=True) |
284 |
1 |
vnfd.pop("_admin") |
285 |
1 |
return vnfd |
286 |
|
|
287 |
1 |
def _add_nsr_to_db(self, nsr_descriptor, rollback, session): |
288 |
1 |
self.format_on_new(nsr_descriptor, session["project_id"], make_public=session["public"]) |
289 |
1 |
self.db.create("nsrs", nsr_descriptor) |
290 |
1 |
rollback.append({"topic": "nsrs", "_id": nsr_descriptor["id"]}) |
291 |
|
|
292 |
1 |
def _add_vnfr_to_db(self, vnfr_descriptor, rollback, session): |
293 |
1 |
self.format_on_new(vnfr_descriptor, session["project_id"], make_public=session["public"]) |
294 |
1 |
self.db.create("vnfrs", vnfr_descriptor) |
295 |
1 |
rollback.append({"topic": "vnfrs", "_id": vnfr_descriptor["id"]}) |
296 |
|
|
297 |
1 |
def _check_nsd_operational_state(self, nsd, ns_request): |
298 |
1 |
if nsd["_admin"]["operationalState"] == "DISABLED": |
299 |
0 |
raise EngineException("nsd with id '{}' is DISABLED, and thus cannot be used to create " |
300 |
|
"a network service".format(ns_request["nsdId"]), http_code=HTTPStatus.CONFLICT) |
301 |
|
|
302 |
1 |
def _get_ns_k8s_namespace(self, nsd, ns_request, session): |
303 |
1 |
additional_params, _ = self._format_additional_params(ns_request, descriptor=nsd) |
304 |
|
# use for k8s-namespace from ns_request or additionalParamsForNs. By default, the project_id |
305 |
1 |
ns_k8s_namespace = session["project_id"][0] if session["project_id"] else None |
306 |
1 |
if ns_request and ns_request.get("k8s-namespace"): |
307 |
0 |
ns_k8s_namespace = ns_request["k8s-namespace"] |
308 |
1 |
if additional_params and additional_params.get("k8s-namespace"): |
309 |
0 |
ns_k8s_namespace = additional_params["k8s-namespace"] |
310 |
|
|
311 |
1 |
return ns_k8s_namespace |
312 |
|
|
313 |
1 |
def _create_nsr_descriptor_from_nsd(self, nsd, ns_request, nsr_id): |
314 |
1 |
now = time() |
315 |
1 |
additional_params, _ = self._format_additional_params(ns_request, descriptor=nsd) |
316 |
|
|
317 |
1 |
nsr_descriptor = { |
318 |
|
"name": ns_request["nsName"], |
319 |
|
"name-ref": ns_request["nsName"], |
320 |
|
"short-name": ns_request["nsName"], |
321 |
|
"admin-status": "ENABLED", |
322 |
|
"nsState": "NOT_INSTANTIATED", |
323 |
|
"currentOperation": "IDLE", |
324 |
|
"currentOperationID": None, |
325 |
|
"errorDescription": None, |
326 |
|
"errorDetail": None, |
327 |
|
"deploymentStatus": None, |
328 |
|
"configurationStatus": None, |
329 |
|
"vcaStatus": None, |
330 |
|
"nsd": {k: v for k, v in nsd.items()}, |
331 |
|
"datacenter": ns_request["vimAccountId"], |
332 |
|
"resource-orchestrator": "osmopenmano", |
333 |
|
"description": ns_request.get("nsDescription", ""), |
334 |
|
"constituent-vnfr-ref": [], |
335 |
|
"operational-status": "init", # typedef ns-operational- |
336 |
|
"config-status": "init", # typedef config-states |
337 |
|
"detailed-status": "scheduled", |
338 |
|
"orchestration-progress": {}, |
339 |
|
"create-time": now, |
340 |
|
"nsd-name-ref": nsd["name"], |
341 |
|
"operational-events": [], # "id", "timestamp", "description", "event", |
342 |
|
"nsd-ref": nsd["id"], |
343 |
|
"nsd-id": nsd["_id"], |
344 |
|
"vnfd-id": [], |
345 |
|
"instantiate_params": self._format_ns_request(ns_request), |
346 |
|
"additionalParamsForNs": additional_params, |
347 |
|
"ns-instance-config-ref": nsr_id, |
348 |
|
"id": nsr_id, |
349 |
|
"_id": nsr_id, |
350 |
|
"ssh-authorized-key": ns_request.get("ssh_keys"), # TODO remove |
351 |
|
"flavor": [], |
352 |
|
"image": [], |
353 |
|
} |
354 |
1 |
ns_request["nsr_id"] = nsr_id |
355 |
1 |
if ns_request and ns_request.get("config-units"): |
356 |
0 |
nsr_descriptor["config-units"] = ns_request["config-units"] |
357 |
|
|
358 |
|
# Create vld |
359 |
1 |
if nsd.get("virtual-link-desc"): |
360 |
1 |
nsr_vld = deepcopy(nsd.get("virtual-link-desc", [])) |
361 |
|
# Fill each vld with vnfd-connection-point-ref data |
362 |
|
# TODO: Change for multiple df support |
363 |
1 |
all_vld_connection_point_data = {vld.get("id"): [] for vld in nsr_vld} |
364 |
1 |
vnf_profiles = nsd.get("df", [[]])[0].get("vnf-profile", ()) |
365 |
1 |
for vnf_profile in vnf_profiles: |
366 |
1 |
for vlc in vnf_profile.get("virtual-link-connectivity", ()): |
367 |
1 |
for cpd in vlc.get("constituent-cpd-id", ()): |
368 |
1 |
all_vld_connection_point_data[vlc.get("virtual-link-profile-id")].append({ |
369 |
|
"member-vnf-index-ref": cpd.get("constituent-base-element-id"), |
370 |
|
"vnfd-connection-point-ref": cpd.get("constituent-cpd-id"), |
371 |
|
"vnfd-id-ref": vnf_profile.get("vnfd-id") |
372 |
|
}) |
373 |
|
|
374 |
1 |
vnfd = self.db.get_one("vnfds", |
375 |
|
{"id": vnf_profile.get("vnfd-id")}, |
376 |
|
fail_on_empty=True, |
377 |
|
fail_on_more=True) |
378 |
|
|
379 |
1 |
for vdu in vnfd.get("vdu", ()): |
380 |
1 |
flavor_data = {} |
381 |
1 |
guest_epa = {} |
382 |
|
# Find this vdu compute and storage descriptors |
383 |
1 |
vdu_virtual_compute = {} |
384 |
1 |
vdu_virtual_storage = {} |
385 |
1 |
for vcd in vnfd.get("virtual-compute-desc", ()): |
386 |
1 |
if vcd.get("id") == vdu.get("virtual-compute-desc"): |
387 |
1 |
vdu_virtual_compute = vcd |
388 |
1 |
for vsd in vnfd.get("virtual-storage-desc", ()): |
389 |
1 |
if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]: |
390 |
1 |
vdu_virtual_storage = vsd |
391 |
|
# Get this vdu vcpus, memory and storage info for flavor_data |
392 |
1 |
if vdu_virtual_compute.get("virtual-cpu", {}).get("num-virtual-cpu"): |
393 |
1 |
flavor_data["vcpu-count"] = vdu_virtual_compute["virtual-cpu"]["num-virtual-cpu"] |
394 |
1 |
if vdu_virtual_compute.get("virtual-memory", {}).get("size"): |
395 |
1 |
flavor_data["memory-mb"] = float(vdu_virtual_compute["virtual-memory"]["size"]) * 1024.0 |
396 |
1 |
if vdu_virtual_storage.get("size-of-storage"): |
397 |
1 |
flavor_data["storage-gb"] = vdu_virtual_storage["size-of-storage"] |
398 |
|
# Get this vdu EPA info for guest_epa |
399 |
1 |
if vdu_virtual_compute.get("virtual-cpu", {}).get("cpu-quota"): |
400 |
0 |
guest_epa["cpu-quota"] = vdu_virtual_compute["virtual-cpu"]["cpu-quota"] |
401 |
1 |
if vdu_virtual_compute.get("virtual-cpu", {}).get("pinning"): |
402 |
0 |
vcpu_pinning = vdu_virtual_compute["virtual-cpu"]["pinning"] |
403 |
0 |
if vcpu_pinning.get("thread-policy"): |
404 |
0 |
guest_epa["cpu-thread-pinning-policy"] = vcpu_pinning["thread-policy"] |
405 |
0 |
if vcpu_pinning.get("policy"): |
406 |
0 |
cpu_policy = "SHARED" if vcpu_pinning["policy"] == "dynamic" else "DEDICATED" |
407 |
0 |
guest_epa["cpu-pinning-policy"] = cpu_policy |
408 |
1 |
if vdu_virtual_compute.get("virtual-memory", {}).get("mem-quota"): |
409 |
0 |
guest_epa["mem-quota"] = vdu_virtual_compute["virtual-memory"]["mem-quota"] |
410 |
1 |
if vdu_virtual_compute.get("virtual-memory", {}).get("mempage-size"): |
411 |
0 |
guest_epa["mempage-size"] = vdu_virtual_compute["virtual-memory"]["mempage-size"] |
412 |
1 |
if vdu_virtual_compute.get("virtual-memory", {}).get("numa-node-policy"): |
413 |
0 |
guest_epa["numa-node-policy"] = vdu_virtual_compute["virtual-memory"]["numa-node-policy"] |
414 |
1 |
if vdu_virtual_storage.get("disk-io-quota"): |
415 |
0 |
guest_epa["disk-io-quota"] = vdu_virtual_storage["disk-io-quota"] |
416 |
|
|
417 |
1 |
if guest_epa: |
418 |
0 |
flavor_data["guest-epa"] = guest_epa |
419 |
|
|
420 |
1 |
flavor_data["name"] = vdu["id"][:56] + "-flv" |
421 |
1 |
flavor_data["id"] = str(len(nsr_descriptor["flavor"])) |
422 |
1 |
nsr_descriptor["flavor"].append(flavor_data) |
423 |
|
|
424 |
1 |
sw_image_id = vdu.get("sw-image-desc") |
425 |
1 |
if sw_image_id: |
426 |
1 |
sw_image_desc = utils.find_in_list(vnfd.get("sw-image-desc", ()), |
427 |
|
lambda sw: sw["id"] == sw_image_id) |
428 |
1 |
image_data = {} |
429 |
1 |
if sw_image_desc.get("image"): |
430 |
0 |
image_data["image"] = sw_image_desc["image"] |
431 |
1 |
if sw_image_desc.get("checksum"): |
432 |
0 |
image_data["image_checksum"] = sw_image_desc["checksum"]["hash"] |
433 |
1 |
img = next((f for f in nsr_descriptor["image"] if |
434 |
|
all(f.get(k) == image_data[k] for k in image_data)), None) |
435 |
1 |
if not img: |
436 |
1 |
image_data["id"] = str(len(nsr_descriptor["image"])) |
437 |
1 |
nsr_descriptor["image"].append(image_data) |
438 |
|
|
439 |
1 |
for vld in nsr_vld: |
440 |
1 |
vld["vnfd-connection-point-ref"] = all_vld_connection_point_data.get(vld.get("id"), []) |
441 |
1 |
vld["name"] = vld["id"] |
442 |
1 |
nsr_descriptor["vld"] = nsr_vld |
443 |
|
|
444 |
1 |
return nsr_descriptor |
445 |
|
|
446 |
1 |
def _create_vnfr_descriptor_from_vnfd(self, nsd, vnfd, vnfd_id, vnf_index, nsr_descriptor, |
447 |
|
ns_request, ns_k8s_namespace): |
448 |
1 |
vnfr_id = str(uuid4()) |
449 |
1 |
nsr_id = nsr_descriptor["id"] |
450 |
1 |
now = time() |
451 |
1 |
additional_params, vnf_params = self._format_additional_params(ns_request, vnf_index, descriptor=vnfd) |
452 |
|
|
453 |
1 |
vnfr_descriptor = { |
454 |
|
"id": vnfr_id, |
455 |
|
"_id": vnfr_id, |
456 |
|
"nsr-id-ref": nsr_id, |
457 |
|
"member-vnf-index-ref": vnf_index, |
458 |
|
"additionalParamsForVnf": additional_params, |
459 |
|
"created-time": now, |
460 |
|
# "vnfd": vnfd, # at OSM model.but removed to avoid data duplication TODO: revise |
461 |
|
"vnfd-ref": vnfd_id, |
462 |
|
"vnfd-id": vnfd["_id"], # not at OSM model, but useful |
463 |
|
"vim-account-id": None, |
464 |
|
"vdur": [], |
465 |
|
"connection-point": [], |
466 |
|
"ip-address": None, # mgmt-interface filled by LCM |
467 |
|
} |
468 |
1 |
vnf_k8s_namespace = ns_k8s_namespace |
469 |
1 |
if vnf_params: |
470 |
1 |
if vnf_params.get("k8s-namespace"): |
471 |
0 |
vnf_k8s_namespace = vnf_params["k8s-namespace"] |
472 |
1 |
if vnf_params.get("config-units"): |
473 |
0 |
vnfr_descriptor["config-units"] = vnf_params["config-units"] |
474 |
|
|
475 |
|
# Create vld |
476 |
1 |
if vnfd.get("int-virtual-link-desc"): |
477 |
1 |
vnfr_descriptor["vld"] = [] |
478 |
1 |
for vnfd_vld in vnfd.get("int-virtual-link-desc"): |
479 |
1 |
vnfr_descriptor["vld"].append({key: vnfd_vld[key] for key in vnfd_vld}) |
480 |
|
|
481 |
1 |
for cp in vnfd.get("ext-cpd", ()): |
482 |
1 |
vnf_cp = { |
483 |
|
"name": cp.get("id"), |
484 |
|
"connection-point-id": cp.get("int-cpd").get("cpd"), |
485 |
|
"connection-point-vdu-id": cp.get("int-cpd").get("vdu-id"), |
486 |
|
"id": cp.get("id"), |
487 |
|
# "ip-address", "mac-address" # filled by LCM |
488 |
|
# vim-id # TODO it would be nice having a vim port id |
489 |
|
} |
490 |
1 |
vnfr_descriptor["connection-point"].append(vnf_cp) |
491 |
|
|
492 |
|
# Create k8s-cluster information |
493 |
|
# TODO: Validate if a k8s-cluster net can have more than one ext-cpd ? |
494 |
1 |
if vnfd.get("k8s-cluster"): |
495 |
0 |
vnfr_descriptor["k8s-cluster"] = vnfd["k8s-cluster"] |
496 |
0 |
all_k8s_cluster_nets_cpds = {} |
497 |
0 |
for cpd in get_iterable(vnfd.get("ext-cpd")): |
498 |
0 |
if cpd.get("k8s-cluster-net"): |
499 |
0 |
all_k8s_cluster_nets_cpds[cpd.get("k8s-cluster-net")] = cpd.get("id") |
500 |
0 |
for net in get_iterable(vnfr_descriptor["k8s-cluster"].get("nets")): |
501 |
0 |
if net.get("id") in all_k8s_cluster_nets_cpds: |
502 |
0 |
net["external-connection-point-ref"] = all_k8s_cluster_nets_cpds[net.get("id")] |
503 |
|
|
504 |
|
# update kdus |
505 |
|
# TODO: Change for multiple df support |
506 |
1 |
all_kdu_profiles = vnfd.get("df", [[]])[0].get("kdu-profile", ()) |
507 |
1 |
all_kdu_profiles_models = {profile.get("name"): profile.get("kdu-model-id") for profile in all_kdu_profiles} |
508 |
1 |
all_kdu_models = vnfd.get("kdu-model", ()) |
509 |
1 |
all_kdu_models = {model.get("id"): model for model in all_kdu_models} |
510 |
1 |
for kdu in get_iterable(vnfd.get("kdu")): |
511 |
0 |
additional_params, kdu_params = self._format_additional_params(ns_request, |
512 |
|
vnf_index, |
513 |
|
kdu_name=kdu["name"], |
514 |
|
descriptor=vnfd) |
515 |
0 |
kdu_k8s_namespace = vnf_k8s_namespace |
516 |
0 |
kdu_model = kdu_params.get("kdu_model") if kdu_params else None |
517 |
0 |
if kdu_params and kdu_params.get("k8s-namespace"): |
518 |
0 |
kdu_k8s_namespace = kdu_params["k8s-namespace"] |
519 |
|
|
520 |
0 |
kdur = { |
521 |
|
"additionalParams": additional_params, |
522 |
|
"k8s-namespace": kdu_k8s_namespace, |
523 |
|
"kdu-name": kdu.get("name"), |
524 |
|
# TODO "name": "" Name of the VDU in the VIM |
525 |
|
"ip-address": None, # mgmt-interface filled by LCM |
526 |
|
"k8s-cluster": {}, |
527 |
|
} |
528 |
0 |
if kdu_params and kdu_params.get("config-units"): |
529 |
0 |
kdur["config-units"] = kdu_params["config-units"] |
530 |
|
|
531 |
0 |
kdu_model_data = all_kdu_models[all_kdu_profiles_models[kdur["name"]]] |
532 |
0 |
kdur[kdu_model_data.get("kdu-model-type")] = kdu_model or kdu_model_data |
533 |
0 |
if not vnfr_descriptor.get("kdur"): |
534 |
0 |
vnfr_descriptor["kdur"] = [] |
535 |
0 |
vnfr_descriptor["kdur"].append(kdur) |
536 |
|
|
537 |
1 |
vnfd_mgmt_cp = vnfd.get("mgmt-cp") |
538 |
1 |
for vdu in vnfd.get("vdu", ()): |
539 |
1 |
additional_params, vdu_params = self._format_additional_params( |
540 |
|
ns_request, vnf_index, vdu_id=vdu["id"], descriptor=vnfd) |
541 |
1 |
vdur = { |
542 |
|
"vdu-id-ref": vdu["id"], |
543 |
|
# TODO "name": "" Name of the VDU in the VIM |
544 |
|
"ip-address": None, # mgmt-interface filled by LCM |
545 |
|
# "vim-id", "flavor-id", "image-id", "management-ip" # filled by LCM |
546 |
|
"internal-connection-point": [], |
547 |
|
"interfaces": [], |
548 |
|
"additionalParams": additional_params, |
549 |
|
"vdu-name": vdu["name"] |
550 |
|
} |
551 |
1 |
if vdu_params and vdu_params.get("config-units"): |
552 |
0 |
vdur["config-units"] = vdu_params["config-units"] |
553 |
1 |
if deep_get(vdu, ("supplemental-boot-data", "boot-data-drive")): |
554 |
0 |
vdur["boot-data-drive"] = vdu["supplemental-boot-data"]["boot-data-drive"] |
555 |
1 |
if vdu.get("pdu-type"): |
556 |
0 |
vdur["pdu-type"] = vdu["pdu-type"] |
557 |
0 |
vdur["name"] = vdu["pdu-type"] |
558 |
|
# TODO volumes: name, volume-id |
559 |
1 |
for icp in vdu.get("int-cpd", ()): |
560 |
1 |
vdu_icp = { |
561 |
|
"id": icp["id"], |
562 |
|
"connection-point-id": icp["id"], |
563 |
|
"name": icp.get("id"), |
564 |
|
} |
565 |
1 |
vdur["internal-connection-point"].append(vdu_icp) |
566 |
|
|
567 |
1 |
for iface in icp.get("virtual-network-interface-requirement", ()): |
568 |
1 |
iface_fields = ("name", "mac-address") |
569 |
1 |
vdu_iface = {x: iface[x] for x in iface_fields if iface.get(x) is not None} |
570 |
|
|
571 |
1 |
vdu_iface["internal-connection-point-ref"] = vdu_icp["id"] |
572 |
1 |
for ext_cp in vnfd.get("ext-cpd", ()): |
573 |
1 |
if not ext_cp.get("int-cpd"): |
574 |
0 |
continue |
575 |
1 |
if ext_cp["int-cpd"].get("vdu-id") != vdu["id"]: |
576 |
1 |
continue |
577 |
1 |
if icp["id"] == ext_cp["int-cpd"].get("cpd"): |
578 |
1 |
vdu_iface["external-connection-point-ref"] = ext_cp.get("id") |
579 |
1 |
break |
580 |
|
|
581 |
1 |
if vnfd_mgmt_cp and vdu_iface.get("external-connection-point-ref") == vnfd_mgmt_cp: |
582 |
1 |
vdu_iface["mgmt-vnf"] = True |
583 |
1 |
vdu_iface["mgmt-interface"] = True # TODO change to mgmt-vdu |
584 |
|
|
585 |
1 |
if iface.get("virtual-interface"): |
586 |
1 |
vdu_iface.update(deepcopy(iface["virtual-interface"])) |
587 |
|
|
588 |
|
# look for network where this interface is connected |
589 |
1 |
iface_ext_cp = vdu_iface.get("external-connection-point-ref") |
590 |
1 |
if iface_ext_cp: |
591 |
|
# TODO: Change for multiple df support |
592 |
1 |
for df in get_iterable(nsd.get("df")): |
593 |
1 |
for vnf_profile in get_iterable(df.get("vnf-profile")): |
594 |
1 |
for vlc in get_iterable(vnf_profile.get("virtual-link-connectivity")): |
595 |
1 |
for cpd in get_iterable(vlc.get("constituent-cpd-id")): |
596 |
1 |
if cpd.get("constituent-cpd-id") == iface_ext_cp: |
597 |
1 |
vdu_iface["ns-vld-id"] = vlc.get("virtual-link-profile-id") |
598 |
1 |
break |
599 |
1 |
elif vdu_iface.get("internal-connection-point-ref"): |
600 |
1 |
vdu_iface["vnf-vld-id"] = icp.get("int-virtual-link-desc") |
601 |
|
|
602 |
1 |
vdur["interfaces"].append(vdu_iface) |
603 |
|
|
604 |
1 |
if vdu.get("sw-image-desc"): |
605 |
1 |
sw_image = utils.find_in_list( |
606 |
|
vnfd.get("sw-image-desc", ()), |
607 |
|
lambda image: image["id"] == vdu.get("sw-image-desc")) |
608 |
1 |
nsr_sw_image_data = utils.find_in_list( |
609 |
|
nsr_descriptor["image"], |
610 |
|
lambda nsr_image: (nsr_image.get("image") == sw_image.get("image")) |
611 |
|
) |
612 |
1 |
vdur["ns-image-id"] = nsr_sw_image_data["id"] |
613 |
|
|
614 |
1 |
flavor_data_name = vdu["id"][:56] + "-flv" |
615 |
1 |
nsr_flavor_desc = utils.find_in_list( |
616 |
|
nsr_descriptor["flavor"], |
617 |
|
lambda flavor: flavor["name"] == flavor_data_name) |
618 |
|
|
619 |
1 |
if nsr_flavor_desc: |
620 |
1 |
vdur["ns-flavor-id"] = nsr_flavor_desc["id"] |
621 |
|
|
622 |
1 |
count = int(vdu.get("count", 1)) |
623 |
1 |
for index in range(0, count): |
624 |
1 |
vdur = deepcopy(vdur) |
625 |
1 |
for iface in vdur["interfaces"]: |
626 |
1 |
if iface.get("ip-address"): |
627 |
0 |
iface["ip-address"] = increment_ip_mac(iface["ip-address"]) |
628 |
1 |
if iface.get("mac-address"): |
629 |
0 |
iface["mac-address"] = increment_ip_mac(iface["mac-address"]) |
630 |
|
|
631 |
1 |
vdur["_id"] = str(uuid4()) |
632 |
1 |
vdur["id"] = vdur["_id"] |
633 |
1 |
vdur["count-index"] = index |
634 |
1 |
vnfr_descriptor["vdur"].append(vdur) |
635 |
|
|
636 |
1 |
return vnfr_descriptor |
637 |
|
|
638 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
639 |
0 |
raise EngineException("Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR) |
640 |
|
|
641 |
|
|
642 |
1 |
class VnfrTopic(BaseTopic): |
643 |
1 |
topic = "vnfrs" |
644 |
1 |
topic_msg = None |
645 |
|
|
646 |
1 |
def __init__(self, db, fs, msg, auth): |
647 |
0 |
BaseTopic.__init__(self, db, fs, msg, auth) |
648 |
|
|
649 |
1 |
def delete(self, session, _id, dry_run=False, not_send_msg=None): |
650 |
0 |
raise EngineException("Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR) |
651 |
|
|
652 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
653 |
0 |
raise EngineException("Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR) |
654 |
|
|
655 |
1 |
def new(self, rollback, session, indata=None, kwargs=None, headers=None): |
656 |
|
# Not used because vnfrs are created and deleted by NsrTopic class directly |
657 |
0 |
raise EngineException("Method new called directly", HTTPStatus.INTERNAL_SERVER_ERROR) |
658 |
|
|
659 |
|
|
660 |
1 |
class NsLcmOpTopic(BaseTopic): |
661 |
1 |
topic = "nslcmops" |
662 |
1 |
topic_msg = "ns" |
663 |
1 |
operation_schema = { # mapping between operation and jsonschema to validate |
664 |
|
"instantiate": ns_instantiate, |
665 |
|
"action": ns_action, |
666 |
|
"scale": ns_scale, |
667 |
|
"terminate": ns_terminate, |
668 |
|
} |
669 |
|
|
670 |
1 |
def __init__(self, db, fs, msg, auth): |
671 |
1 |
BaseTopic.__init__(self, db, fs, msg, auth) |
672 |
|
|
673 |
1 |
def _check_ns_operation(self, session, nsr, operation, indata): |
674 |
|
""" |
675 |
|
Check that user has enter right parameters for the operation |
676 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
677 |
|
:param operation: it can be: instantiate, terminate, action, TODO: update, heal |
678 |
|
:param indata: descriptor with the parameters of the operation |
679 |
|
:return: None |
680 |
|
""" |
681 |
1 |
if operation == "action": |
682 |
1 |
self._check_action_ns_operation(indata, nsr) |
683 |
1 |
elif operation == "scale": |
684 |
0 |
self._check_scale_ns_operation(indata, nsr) |
685 |
1 |
elif operation == "instantiate": |
686 |
1 |
self._check_instantiate_ns_operation(indata, nsr, session) |
687 |
|
|
688 |
1 |
def _check_action_ns_operation(self, indata, nsr): |
689 |
1 |
nsd = nsr["nsd"] |
690 |
|
# check vnf_member_index |
691 |
1 |
if indata.get("vnf_member_index"): |
692 |
0 |
indata["member_vnf_index"] = indata.pop("vnf_member_index") # for backward compatibility |
693 |
1 |
if indata.get("member_vnf_index"): |
694 |
1 |
vnfd = self._get_vnfd_from_vnf_member_index(indata["member_vnf_index"], nsr["_id"]) |
695 |
1 |
if indata.get("vdu_id"): |
696 |
1 |
self._check_valid_vdu(vnfd, indata["vdu_id"]) |
697 |
|
# TODO: Change the [0] as vdu-configuration is now a list |
698 |
0 |
descriptor_configuration = vnfd.get("vdu-configuration", [{}])[0].get("config-primitive") |
699 |
1 |
elif indata.get("kdu_name"): |
700 |
0 |
self._check_valid_kdu(vnfd, indata["kdu_name"]) |
701 |
|
# TODO: Change the [0] as kdu-configuration is now a list |
702 |
0 |
descriptor_configuration = vnfd.get("kdu-configuration", [{}])[0].get("config-primitive") |
703 |
|
else: |
704 |
|
# TODO: Change the [0] as vnf-configuration is now a list |
705 |
1 |
descriptor_configuration = vnfd.get("vnf-configuration", [{}])[0].get("config-primitive") |
706 |
|
else: # use a NSD |
707 |
0 |
descriptor_configuration = nsd.get("ns-configuration", {}).get("config-primitive") |
708 |
|
|
709 |
|
# For k8s allows default primitives without validating the parameters |
710 |
1 |
if indata.get("kdu_name") and indata["primitive"] in ("upgrade", "rollback", "status", "inspect", "readme"): |
711 |
|
# TODO should be checked that rollback only can contains revsision_numbe???? |
712 |
0 |
if not indata.get("member_vnf_index"): |
713 |
0 |
raise EngineException("Missing action parameter 'member_vnf_index' for default KDU primitive '{}'" |
714 |
|
.format(indata["primitive"])) |
715 |
0 |
return |
716 |
|
# if not, check primitive |
717 |
1 |
for config_primitive in get_iterable(descriptor_configuration): |
718 |
1 |
if indata["primitive"] == config_primitive["name"]: |
719 |
|
# check needed primitive_params are provided |
720 |
1 |
if indata.get("primitive_params"): |
721 |
1 |
in_primitive_params_copy = copy(indata["primitive_params"]) |
722 |
|
else: |
723 |
0 |
in_primitive_params_copy = {} |
724 |
1 |
for paramd in get_iterable(config_primitive.get("parameter")): |
725 |
1 |
if paramd["name"] in in_primitive_params_copy: |
726 |
1 |
del in_primitive_params_copy[paramd["name"]] |
727 |
0 |
elif not paramd.get("default-value"): |
728 |
0 |
raise EngineException("Needed parameter {} not provided for primitive '{}'".format( |
729 |
|
paramd["name"], indata["primitive"])) |
730 |
|
# check no extra primitive params are provided |
731 |
1 |
if in_primitive_params_copy: |
732 |
0 |
raise EngineException("parameter/s '{}' not present at vnfd /nsd for primitive '{}'".format( |
733 |
|
list(in_primitive_params_copy.keys()), indata["primitive"])) |
734 |
1 |
break |
735 |
|
else: |
736 |
1 |
raise EngineException("Invalid primitive '{}' is not present at vnfd/nsd".format(indata["primitive"])) |
737 |
|
|
738 |
1 |
def _check_scale_ns_operation(self, indata, nsr): |
739 |
0 |
vnfd = self._get_vnfd_from_vnf_member_index(indata["scaleVnfData"]["scaleByStepData"]["member-vnf-index"], |
740 |
|
nsr["_id"]) |
741 |
0 |
for scaling_group in get_iterable(vnfd.get("scaling-group-descriptor")): |
742 |
0 |
if indata["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"] == scaling_group["name"]: |
743 |
0 |
break |
744 |
|
else: |
745 |
0 |
raise EngineException("Invalid scaleVnfData:scaleByStepData:scaling-group-descriptor '{}' is not " |
746 |
|
"present at vnfd:scaling-group-descriptor" |
747 |
|
.format(indata["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"])) |
748 |
|
|
749 |
1 |
def _check_instantiate_ns_operation(self, indata, nsr, session): |
750 |
1 |
vnf_member_index_to_vnfd = {} # map between vnf_member_index to vnf descriptor. |
751 |
1 |
vim_accounts = [] |
752 |
1 |
wim_accounts = [] |
753 |
1 |
nsd = nsr["nsd"] |
754 |
1 |
self._check_valid_vim_account(indata["vimAccountId"], vim_accounts, session) |
755 |
1 |
self._check_valid_wim_account(indata.get("wimAccountId"), wim_accounts, session) |
756 |
1 |
for in_vnf in get_iterable(indata.get("vnf")): |
757 |
1 |
member_vnf_index = in_vnf["member-vnf-index"] |
758 |
1 |
if vnf_member_index_to_vnfd.get(member_vnf_index): |
759 |
0 |
vnfd = vnf_member_index_to_vnfd[member_vnf_index] |
760 |
|
else: |
761 |
1 |
vnfd = self._get_vnfd_from_vnf_member_index(member_vnf_index, nsr["_id"]) |
762 |
1 |
vnf_member_index_to_vnfd[member_vnf_index] = vnfd # add to cache, avoiding a later look for |
763 |
1 |
self._check_vnf_instantiation_params(in_vnf, vnfd) |
764 |
1 |
if in_vnf.get("vimAccountId"): |
765 |
0 |
self._check_valid_vim_account(in_vnf["vimAccountId"], vim_accounts, session) |
766 |
|
|
767 |
1 |
for in_vld in get_iterable(indata.get("vld")): |
768 |
0 |
self._check_valid_wim_account(in_vld.get("wimAccountId"), wim_accounts, session) |
769 |
0 |
for vldd in get_iterable(nsd.get("virtual-link-desc")): |
770 |
0 |
if in_vld["name"] == vldd["id"]: |
771 |
0 |
break |
772 |
|
else: |
773 |
0 |
raise EngineException("Invalid parameter vld:name='{}' is not present at nsd:vld".format( |
774 |
|
in_vld["name"])) |
775 |
|
|
776 |
1 |
def _get_vnfd_from_vnf_member_index(self, member_vnf_index, nsr_id): |
777 |
|
# Obtain vnf descriptor. The vnfr is used to get the vnfd._id used for this member_vnf_index |
778 |
1 |
vnfr = self.db.get_one("vnfrs", |
779 |
|
{"nsr-id-ref": nsr_id, "member-vnf-index-ref": member_vnf_index}, |
780 |
|
fail_on_empty=False) |
781 |
1 |
if not vnfr: |
782 |
1 |
raise EngineException("Invalid parameter member_vnf_index='{}' is not one of the " |
783 |
|
"nsd:constituent-vnfd".format(member_vnf_index)) |
784 |
1 |
vnfd = self.db.get_one("vnfds", {"_id": vnfr["vnfd-id"]}, fail_on_empty=False) |
785 |
1 |
if not vnfd: |
786 |
0 |
raise EngineException("vnfd id={} has been deleted!. Operation cannot be performed". |
787 |
|
format(vnfr["vnfd-id"])) |
788 |
1 |
return vnfd |
789 |
|
|
790 |
1 |
def _check_valid_vdu(self, vnfd, vdu_id): |
791 |
1 |
for vdud in get_iterable(vnfd.get("vdu")): |
792 |
1 |
if vdud["id"] == vdu_id: |
793 |
0 |
return vdud |
794 |
|
else: |
795 |
1 |
raise EngineException("Invalid parameter vdu_id='{}' not present at vnfd:vdu:id".format(vdu_id)) |
796 |
|
|
797 |
1 |
def _check_valid_kdu(self, vnfd, kdu_name): |
798 |
0 |
for kdud in get_iterable(vnfd.get("kdu")): |
799 |
0 |
if kdud["name"] == kdu_name: |
800 |
0 |
return kdud |
801 |
|
else: |
802 |
0 |
raise EngineException("Invalid parameter kdu_name='{}' not present at vnfd:kdu:name".format(kdu_name)) |
803 |
|
|
804 |
1 |
def _check_vnf_instantiation_params(self, in_vnf, vnfd): |
805 |
1 |
for in_vdu in get_iterable(in_vnf.get("vdu")): |
806 |
1 |
for vdu in get_iterable(vnfd.get("vdu")): |
807 |
1 |
if in_vdu["id"] == vdu["id"]: |
808 |
1 |
for volume in get_iterable(in_vdu.get("volume")): |
809 |
0 |
for volumed in get_iterable(vdu.get("virtual-storage-desc")): |
810 |
0 |
if volumed["id"] == volume["name"]: |
811 |
0 |
break |
812 |
|
else: |
813 |
0 |
raise EngineException("Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}']:" |
814 |
|
"volume:name='{}' is not present at " |
815 |
|
"vnfd:vdu:virtual-storage-desc list". |
816 |
|
format(in_vnf["member-vnf-index"], in_vdu["id"], |
817 |
|
volume["id"])) |
818 |
|
|
819 |
1 |
vdu_if_names = set() |
820 |
1 |
for cpd in get_iterable(vdu.get("int-cpd")): |
821 |
1 |
for iface in get_iterable(cpd.get("virtual-network-interface-requirement")): |
822 |
1 |
vdu_if_names.add(iface.get("name")) |
823 |
|
|
824 |
1 |
for in_iface in get_iterable(in_vdu["interface"]): |
825 |
1 |
if in_iface["name"] in vdu_if_names: |
826 |
1 |
break |
827 |
|
else: |
828 |
0 |
raise EngineException("Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}']:" |
829 |
|
"int-cpd[id='{}'] is not present at vnfd:vdu:int-cpd" |
830 |
|
.format(in_vnf["member-vnf-index"], in_vdu["id"], |
831 |
|
in_iface["name"])) |
832 |
1 |
break |
833 |
|
|
834 |
|
else: |
835 |
0 |
raise EngineException("Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}'] is not present " |
836 |
|
"at vnfd:vdu".format(in_vnf["member-vnf-index"], in_vdu["id"])) |
837 |
|
|
838 |
1 |
vnfd_ivlds_cpds = {ivld.get("id"): set() for ivld in get_iterable(vnfd.get("int-virtual-link-desc"))} |
839 |
1 |
for vdu in get_iterable(vnfd.get("vdu")): |
840 |
1 |
for cpd in get_iterable(vnfd.get("int-cpd")): |
841 |
0 |
if cpd.get("int-virtual-link-desc"): |
842 |
0 |
vnfd_ivlds_cpds[cpd.get("int-virtual-link-desc")] = cpd.get("id") |
843 |
|
|
844 |
1 |
for in_ivld in get_iterable(in_vnf.get("internal-vld")): |
845 |
1 |
if in_ivld.get("name") in vnfd_ivlds_cpds: |
846 |
1 |
for in_icp in get_iterable(in_ivld.get("internal-connection-point")): |
847 |
0 |
if in_icp["id-ref"] in vnfd_ivlds_cpds[in_ivld.get("name")]: |
848 |
0 |
break |
849 |
|
else: |
850 |
0 |
raise EngineException("Invalid parameter vnf[member-vnf-index='{}']:internal-vld[name" |
851 |
|
"='{}']:internal-connection-point[id-ref:'{}'] is not present at " |
852 |
|
"vnfd:internal-vld:name/id:internal-connection-point" |
853 |
|
.format(in_vnf["member-vnf-index"], in_ivld["name"], |
854 |
|
in_icp["id-ref"])) |
855 |
|
else: |
856 |
0 |
raise EngineException("Invalid parameter vnf[member-vnf-index='{}']:internal-vld:name='{}'" |
857 |
|
" is not present at vnfd '{}'".format(in_vnf["member-vnf-index"], |
858 |
|
in_ivld["name"], vnfd["id"])) |
859 |
|
|
860 |
1 |
def _check_valid_vim_account(self, vim_account, vim_accounts, session): |
861 |
1 |
if vim_account in vim_accounts: |
862 |
0 |
return |
863 |
1 |
try: |
864 |
1 |
db_filter = self._get_project_filter(session) |
865 |
1 |
db_filter["_id"] = vim_account |
866 |
1 |
self.db.get_one("vim_accounts", db_filter) |
867 |
0 |
except Exception: |
868 |
0 |
raise EngineException("Invalid vimAccountId='{}' not present for the project".format(vim_account)) |
869 |
1 |
vim_accounts.append(vim_account) |
870 |
|
|
871 |
1 |
def _check_valid_wim_account(self, wim_account, wim_accounts, session): |
872 |
1 |
if not isinstance(wim_account, str): |
873 |
1 |
return |
874 |
0 |
if wim_account in wim_accounts: |
875 |
0 |
return |
876 |
0 |
try: |
877 |
0 |
db_filter = self._get_project_filter(session, write=False, show_all=True) |
878 |
0 |
db_filter["_id"] = wim_account |
879 |
0 |
self.db.get_one("wim_accounts", db_filter) |
880 |
0 |
except Exception: |
881 |
0 |
raise EngineException("Invalid wimAccountId='{}' not present for the project".format(wim_account)) |
882 |
0 |
wim_accounts.append(wim_account) |
883 |
|
|
884 |
1 |
def _look_for_pdu(self, session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback): |
885 |
|
""" |
886 |
|
Look for a free PDU in the catalog matching vdur type and interfaces. Fills vnfr.vdur with the interface |
887 |
|
(ip_address, ...) information. |
888 |
|
Modifies PDU _admin.usageState to 'IN_USE' |
889 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
890 |
|
:param rollback: list with the database modifications to rollback if needed |
891 |
|
:param vnfr: vnfr to be updated. It is modified with pdu interface info if pdu is found |
892 |
|
:param vim_account: vim_account where this vnfr should be deployed |
893 |
|
:param vnfr_update: dictionary filled by this method with changes to be done at database vnfr |
894 |
|
:param vnfr_update_rollback: dictionary filled by this method with original content of vnfr in case a rollback |
895 |
|
of the changed vnfr is needed |
896 |
|
|
897 |
|
:return: List of PDU interfaces that are connected to an existing VIM network. Each item contains: |
898 |
|
"vim-network-name": used at VIM |
899 |
|
"name": interface name |
900 |
|
"vnf-vld-id": internal VNFD vld where this interface is connected, or |
901 |
|
"ns-vld-id": NSD vld where this interface is connected. |
902 |
|
NOTE: One, and only one between 'vnf-vld-id' and 'ns-vld-id' contains a value. The other will be None |
903 |
|
""" |
904 |
|
|
905 |
1 |
ifaces_forcing_vim_network = [] |
906 |
1 |
for vdur_index, vdur in enumerate(get_iterable(vnfr.get("vdur"))): |
907 |
1 |
if not vdur.get("pdu-type"): |
908 |
1 |
continue |
909 |
0 |
pdu_type = vdur.get("pdu-type") |
910 |
0 |
pdu_filter = self._get_project_filter(session) |
911 |
0 |
pdu_filter["vim_accounts"] = vim_account |
912 |
0 |
pdu_filter["type"] = pdu_type |
913 |
0 |
pdu_filter["_admin.operationalState"] = "ENABLED" |
914 |
0 |
pdu_filter["_admin.usageState"] = "NOT_IN_USE" |
915 |
|
# TODO feature 1417: "shared": True, |
916 |
|
|
917 |
0 |
available_pdus = self.db.get_list("pdus", pdu_filter) |
918 |
0 |
for pdu in available_pdus: |
919 |
|
# step 1 check if this pdu contains needed interfaces: |
920 |
0 |
match_interfaces = True |
921 |
0 |
for vdur_interface in vdur["interfaces"]: |
922 |
0 |
for pdu_interface in pdu["interfaces"]: |
923 |
0 |
if pdu_interface["name"] == vdur_interface["name"]: |
924 |
|
# TODO feature 1417: match per mgmt type |
925 |
0 |
break |
926 |
|
else: # no interface found for name |
927 |
0 |
match_interfaces = False |
928 |
0 |
break |
929 |
0 |
if match_interfaces: |
930 |
0 |
break |
931 |
|
else: |
932 |
0 |
raise EngineException( |
933 |
|
"No PDU of type={} at vim_account={} found for member_vnf_index={}, vdu={} matching interface " |
934 |
|
"names".format(pdu_type, vim_account, vnfr["member-vnf-index-ref"], vdur["vdu-id-ref"])) |
935 |
|
|
936 |
|
# step 2. Update pdu |
937 |
0 |
rollback_pdu = { |
938 |
|
"_admin.usageState": pdu["_admin"]["usageState"], |
939 |
|
"_admin.usage.vnfr_id": None, |
940 |
|
"_admin.usage.nsr_id": None, |
941 |
|
"_admin.usage.vdur": None, |
942 |
|
} |
943 |
0 |
self.db.set_one("pdus", {"_id": pdu["_id"]}, |
944 |
|
{"_admin.usageState": "IN_USE", |
945 |
|
"_admin.usage": {"vnfr_id": vnfr["_id"], |
946 |
|
"nsr_id": vnfr["nsr-id-ref"], |
947 |
|
"vdur": vdur["vdu-id-ref"]} |
948 |
|
}) |
949 |
0 |
rollback.append({"topic": "pdus", "_id": pdu["_id"], "operation": "set", "content": rollback_pdu}) |
950 |
|
|
951 |
|
# step 3. Fill vnfr info by filling vdur |
952 |
0 |
vdu_text = "vdur.{}".format(vdur_index) |
953 |
0 |
vnfr_update_rollback[vdu_text + ".pdu-id"] = None |
954 |
0 |
vnfr_update[vdu_text + ".pdu-id"] = pdu["_id"] |
955 |
0 |
for iface_index, vdur_interface in enumerate(vdur["interfaces"]): |
956 |
0 |
for pdu_interface in pdu["interfaces"]: |
957 |
0 |
if pdu_interface["name"] == vdur_interface["name"]: |
958 |
0 |
iface_text = vdu_text + ".interfaces.{}".format(iface_index) |
959 |
0 |
for k, v in pdu_interface.items(): |
960 |
0 |
if k in ("ip-address", "mac-address"): # TODO: switch-xxxxx must be inserted |
961 |
0 |
vnfr_update[iface_text + ".{}".format(k)] = v |
962 |
0 |
vnfr_update_rollback[iface_text + ".{}".format(k)] = vdur_interface.get(v) |
963 |
0 |
if pdu_interface.get("ip-address"): |
964 |
0 |
if vdur_interface.get("mgmt-interface") or vdur_interface.get("mgmt-vnf"): |
965 |
0 |
vnfr_update_rollback[vdu_text + ".ip-address"] = vdur.get("ip-address") |
966 |
0 |
vnfr_update[vdu_text + ".ip-address"] = pdu_interface["ip-address"] |
967 |
0 |
if vdur_interface.get("mgmt-vnf"): |
968 |
0 |
vnfr_update_rollback["ip-address"] = vnfr.get("ip-address") |
969 |
0 |
vnfr_update["ip-address"] = pdu_interface["ip-address"] |
970 |
0 |
vnfr_update[vdu_text + ".ip-address"] = pdu_interface["ip-address"] |
971 |
0 |
if pdu_interface.get("vim-network-name") or pdu_interface.get("vim-network-id"): |
972 |
0 |
ifaces_forcing_vim_network.append({ |
973 |
|
"name": vdur_interface.get("vnf-vld-id") or vdur_interface.get("ns-vld-id"), |
974 |
|
"vnf-vld-id": vdur_interface.get("vnf-vld-id"), |
975 |
|
"ns-vld-id": vdur_interface.get("ns-vld-id")}) |
976 |
0 |
if pdu_interface.get("vim-network-id"): |
977 |
0 |
ifaces_forcing_vim_network[-1]["vim-network-id"] = pdu_interface["vim-network-id"] |
978 |
0 |
if pdu_interface.get("vim-network-name"): |
979 |
0 |
ifaces_forcing_vim_network[-1]["vim-network-name"] = pdu_interface["vim-network-name"] |
980 |
0 |
break |
981 |
|
|
982 |
1 |
return ifaces_forcing_vim_network |
983 |
|
|
984 |
1 |
def _look_for_k8scluster(self, session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback): |
985 |
|
""" |
986 |
|
Look for an available k8scluster for all the kuds in the vnfd matching version and cni requirements. |
987 |
|
Fills vnfr.kdur with the selected k8scluster |
988 |
|
|
989 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
990 |
|
:param rollback: list with the database modifications to rollback if needed |
991 |
|
:param vnfr: vnfr to be updated. It is modified with pdu interface info if pdu is found |
992 |
|
:param vim_account: vim_account where this vnfr should be deployed |
993 |
|
:param vnfr_update: dictionary filled by this method with changes to be done at database vnfr |
994 |
|
:param vnfr_update_rollback: dictionary filled by this method with original content of vnfr in case a rollback |
995 |
|
of the changed vnfr is needed |
996 |
|
|
997 |
|
:return: List of KDU interfaces that are connected to an existing VIM network. Each item contains: |
998 |
|
"vim-network-name": used at VIM |
999 |
|
"name": interface name |
1000 |
|
"vnf-vld-id": internal VNFD vld where this interface is connected, or |
1001 |
|
"ns-vld-id": NSD vld where this interface is connected. |
1002 |
|
NOTE: One, and only one between 'vnf-vld-id' and 'ns-vld-id' contains a value. The other will be None |
1003 |
|
""" |
1004 |
|
|
1005 |
1 |
ifaces_forcing_vim_network = [] |
1006 |
1 |
if not vnfr.get("kdur"): |
1007 |
1 |
return ifaces_forcing_vim_network |
1008 |
|
|
1009 |
0 |
kdu_filter = self._get_project_filter(session) |
1010 |
0 |
kdu_filter["vim_account"] = vim_account |
1011 |
|
# TODO kdu_filter["_admin.operationalState"] = "ENABLED" |
1012 |
0 |
available_k8sclusters = self.db.get_list("k8sclusters", kdu_filter) |
1013 |
|
|
1014 |
0 |
k8s_requirements = {} # just for logging |
1015 |
0 |
for k8scluster in available_k8sclusters: |
1016 |
0 |
if not vnfr.get("k8s-cluster"): |
1017 |
0 |
break |
1018 |
|
# restrict by cni |
1019 |
0 |
if vnfr["k8s-cluster"].get("cni"): |
1020 |
0 |
k8s_requirements["cni"] = vnfr["k8s-cluster"]["cni"] |
1021 |
0 |
if not set(vnfr["k8s-cluster"]["cni"]).intersection(k8scluster.get("cni", ())): |
1022 |
0 |
continue |
1023 |
|
# restrict by version |
1024 |
0 |
if vnfr["k8s-cluster"].get("version"): |
1025 |
0 |
k8s_requirements["version"] = vnfr["k8s-cluster"]["version"] |
1026 |
0 |
if k8scluster.get("k8s_version") not in vnfr["k8s-cluster"]["version"]: |
1027 |
0 |
continue |
1028 |
|
# restrict by number of networks |
1029 |
0 |
if vnfr["k8s-cluster"].get("nets"): |
1030 |
0 |
k8s_requirements["networks"] = len(vnfr["k8s-cluster"]["nets"]) |
1031 |
0 |
if not k8scluster.get("nets") or len(k8scluster["nets"]) < len(vnfr["k8s-cluster"]["nets"]): |
1032 |
0 |
continue |
1033 |
0 |
break |
1034 |
|
else: |
1035 |
0 |
raise EngineException("No k8scluster with requirements='{}' at vim_account={} found for member_vnf_index={}" |
1036 |
|
.format(k8s_requirements, vim_account, vnfr["member-vnf-index-ref"])) |
1037 |
|
|
1038 |
0 |
for kdur_index, kdur in enumerate(get_iterable(vnfr.get("kdur"))): |
1039 |
|
# step 3. Fill vnfr info by filling kdur |
1040 |
0 |
kdu_text = "kdur.{}.".format(kdur_index) |
1041 |
0 |
vnfr_update_rollback[kdu_text + "k8s-cluster.id"] = None |
1042 |
0 |
vnfr_update[kdu_text + "k8s-cluster.id"] = k8scluster["_id"] |
1043 |
|
|
1044 |
|
# step 4. Check VIM networks that forces the selected k8s_cluster |
1045 |
0 |
if vnfr.get("k8s-cluster") and vnfr["k8s-cluster"].get("nets"): |
1046 |
0 |
k8scluster_net_list = list(k8scluster.get("nets").keys()) |
1047 |
0 |
for net_index, kdur_net in enumerate(vnfr["k8s-cluster"]["nets"]): |
1048 |
|
# get a network from k8s_cluster nets. If name matches use this, if not use other |
1049 |
0 |
if kdur_net["id"] in k8scluster_net_list: # name matches |
1050 |
0 |
vim_net = k8scluster["nets"][kdur_net["id"]] |
1051 |
0 |
k8scluster_net_list.remove(kdur_net["id"]) |
1052 |
|
else: |
1053 |
0 |
vim_net = k8scluster["nets"][k8scluster_net_list[0]] |
1054 |
0 |
k8scluster_net_list.pop(0) |
1055 |
0 |
vnfr_update_rollback["k8s-cluster.nets.{}.vim_net".format(net_index)] = None |
1056 |
0 |
vnfr_update["k8s-cluster.nets.{}.vim_net".format(net_index)] = vim_net |
1057 |
0 |
if vim_net and (kdur_net.get("vnf-vld-id") or kdur_net.get("ns-vld-id")): |
1058 |
0 |
ifaces_forcing_vim_network.append({ |
1059 |
|
"name": kdur_net.get("vnf-vld-id") or kdur_net.get("ns-vld-id"), |
1060 |
|
"vnf-vld-id": kdur_net.get("vnf-vld-id"), |
1061 |
|
"ns-vld-id": kdur_net.get("ns-vld-id"), |
1062 |
|
"vim-network-name": vim_net, # TODO can it be vim-network-id ??? |
1063 |
|
}) |
1064 |
|
# TODO check that this forcing is not incompatible with other forcing |
1065 |
0 |
return ifaces_forcing_vim_network |
1066 |
|
|
1067 |
1 |
def _update_vnfrs(self, session, rollback, nsr, indata): |
1068 |
|
# get vnfr |
1069 |
1 |
nsr_id = nsr["_id"] |
1070 |
1 |
vnfrs = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}) |
1071 |
|
|
1072 |
1 |
for vnfr in vnfrs: |
1073 |
1 |
vnfr_update = {} |
1074 |
1 |
vnfr_update_rollback = {} |
1075 |
1 |
member_vnf_index = vnfr["member-vnf-index-ref"] |
1076 |
|
# update vim-account-id |
1077 |
|
|
1078 |
1 |
vim_account = indata["vimAccountId"] |
1079 |
|
# check instantiate parameters |
1080 |
1 |
for vnf_inst_params in get_iterable(indata.get("vnf")): |
1081 |
1 |
if vnf_inst_params["member-vnf-index"] != member_vnf_index: |
1082 |
1 |
continue |
1083 |
1 |
if vnf_inst_params.get("vimAccountId"): |
1084 |
0 |
vim_account = vnf_inst_params.get("vimAccountId") |
1085 |
|
|
1086 |
|
# get vnf.vdu.interface instantiation params to update vnfr.vdur.interfaces ip, mac |
1087 |
1 |
for vdu_inst_param in get_iterable(vnf_inst_params.get("vdu")): |
1088 |
1 |
for vdur_index, vdur in enumerate(vnfr["vdur"]): |
1089 |
1 |
if vdu_inst_param["id"] != vdur["vdu-id-ref"]: |
1090 |
1 |
continue |
1091 |
1 |
for iface_inst_param in get_iterable(vdu_inst_param.get("interface")): |
1092 |
1 |
iface_index, _ = next(i for i in enumerate(vdur["interfaces"]) |
1093 |
|
if i[1]["name"] == iface_inst_param["name"]) |
1094 |
1 |
vnfr_update_text = "vdur.{}.interfaces.{}".format(vdur_index, iface_index) |
1095 |
1 |
if iface_inst_param.get("ip-address"): |
1096 |
1 |
vnfr_update[vnfr_update_text + ".ip-address"] = increment_ip_mac( |
1097 |
|
iface_inst_param.get("ip-address"), vdur.get("count-index", 0)) |
1098 |
1 |
vnfr_update[vnfr_update_text + ".fixed-ip"] = True |
1099 |
1 |
if iface_inst_param.get("mac-address"): |
1100 |
0 |
vnfr_update[vnfr_update_text + ".mac-address"] = increment_ip_mac( |
1101 |
|
iface_inst_param.get("mac-address"), vdur.get("count-index", 0)) |
1102 |
0 |
vnfr_update[vnfr_update_text + ".fixed-mac"] = True |
1103 |
|
# get vnf.internal-vld.internal-conection-point instantiation params to update vnfr.vdur.interfaces |
1104 |
|
# TODO update vld with the ip-profile |
1105 |
1 |
for ivld_inst_param in get_iterable(vnf_inst_params.get("internal-vld")): |
1106 |
1 |
for icp_inst_param in get_iterable(ivld_inst_param.get("internal-connection-point")): |
1107 |
|
# look for iface |
1108 |
0 |
for vdur_index, vdur in enumerate(vnfr["vdur"]): |
1109 |
0 |
for iface_index, iface in enumerate(vdur["interfaces"]): |
1110 |
0 |
if iface.get("internal-connection-point-ref") == icp_inst_param["id-ref"]: |
1111 |
0 |
vnfr_update_text = "vdur.{}.interfaces.{}".format(vdur_index, iface_index) |
1112 |
0 |
if icp_inst_param.get("ip-address"): |
1113 |
0 |
vnfr_update[vnfr_update_text + ".ip-address"] = increment_ip_mac( |
1114 |
|
icp_inst_param.get("ip-address"), vdur.get("count-index", 0)) |
1115 |
0 |
vnfr_update[vnfr_update_text + ".fixed-ip"] = True |
1116 |
0 |
if icp_inst_param.get("mac-address"): |
1117 |
0 |
vnfr_update[vnfr_update_text + ".mac-address"] = increment_ip_mac( |
1118 |
|
icp_inst_param.get("mac-address"), vdur.get("count-index", 0)) |
1119 |
0 |
vnfr_update[vnfr_update_text + ".fixed-mac"] = True |
1120 |
0 |
break |
1121 |
|
# get ip address from instantiation parameters.vld.vnfd-connection-point-ref |
1122 |
1 |
for vld_inst_param in get_iterable(indata.get("vld")): |
1123 |
0 |
for vnfcp_inst_param in get_iterable(vld_inst_param.get("vnfd-connection-point-ref")): |
1124 |
0 |
if vnfcp_inst_param["member-vnf-index-ref"] != member_vnf_index: |
1125 |
0 |
continue |
1126 |
|
# look for iface |
1127 |
0 |
for vdur_index, vdur in enumerate(vnfr["vdur"]): |
1128 |
0 |
for iface_index, iface in enumerate(vdur["interfaces"]): |
1129 |
0 |
if iface.get("external-connection-point-ref") == \ |
1130 |
|
vnfcp_inst_param["vnfd-connection-point-ref"]: |
1131 |
0 |
vnfr_update_text = "vdur.{}.interfaces.{}".format(vdur_index, iface_index) |
1132 |
0 |
if vnfcp_inst_param.get("ip-address"): |
1133 |
0 |
vnfr_update[vnfr_update_text + ".ip-address"] = increment_ip_mac( |
1134 |
|
vnfcp_inst_param.get("ip-address"), vdur.get("count-index", 0)) |
1135 |
0 |
vnfr_update[vnfr_update_text + ".fixed-ip"] = True |
1136 |
0 |
if vnfcp_inst_param.get("mac-address"): |
1137 |
0 |
vnfr_update[vnfr_update_text + ".mac-address"] = increment_ip_mac( |
1138 |
|
vnfcp_inst_param.get("mac-address"), vdur.get("count-index", 0)) |
1139 |
0 |
vnfr_update[vnfr_update_text + ".fixed-mac"] = True |
1140 |
0 |
break |
1141 |
|
|
1142 |
1 |
vnfr_update["vim-account-id"] = vim_account |
1143 |
1 |
vnfr_update_rollback["vim-account-id"] = vnfr.get("vim-account-id") |
1144 |
|
|
1145 |
|
# get pdu |
1146 |
1 |
ifaces_forcing_vim_network = self._look_for_pdu(session, rollback, vnfr, vim_account, vnfr_update, |
1147 |
|
vnfr_update_rollback) |
1148 |
|
|
1149 |
|
# get kdus |
1150 |
1 |
ifaces_forcing_vim_network += self._look_for_k8scluster(session, rollback, vnfr, vim_account, vnfr_update, |
1151 |
|
vnfr_update_rollback) |
1152 |
|
# update database vnfr |
1153 |
1 |
self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, vnfr_update) |
1154 |
1 |
rollback.append({"topic": "vnfrs", "_id": vnfr["_id"], "operation": "set", "content": vnfr_update_rollback}) |
1155 |
|
|
1156 |
|
# Update indada in case pdu forces to use a concrete vim-network-name |
1157 |
|
# TODO check if user has already insert a vim-network-name and raises an error |
1158 |
1 |
if not ifaces_forcing_vim_network: |
1159 |
1 |
continue |
1160 |
0 |
for iface_info in ifaces_forcing_vim_network: |
1161 |
0 |
if iface_info.get("ns-vld-id"): |
1162 |
0 |
if "vld" not in indata: |
1163 |
0 |
indata["vld"] = [] |
1164 |
0 |
indata["vld"].append({key: iface_info[key] for key in |
1165 |
|
("name", "vim-network-name", "vim-network-id") if iface_info.get(key)}) |
1166 |
|
|
1167 |
0 |
elif iface_info.get("vnf-vld-id"): |
1168 |
0 |
if "vnf" not in indata: |
1169 |
0 |
indata["vnf"] = [] |
1170 |
0 |
indata["vnf"].append({ |
1171 |
|
"member-vnf-index": member_vnf_index, |
1172 |
|
"internal-vld": [{key: iface_info[key] for key in |
1173 |
|
("name", "vim-network-name", "vim-network-id") if iface_info.get(key)}] |
1174 |
|
}) |
1175 |
|
|
1176 |
1 |
@staticmethod |
1177 |
|
def _create_nslcmop(nsr_id, operation, params): |
1178 |
|
""" |
1179 |
|
Creates a ns-lcm-opp content to be stored at database. |
1180 |
|
:param nsr_id: internal id of the instance |
1181 |
|
:param operation: instantiate, terminate, scale, action, ... |
1182 |
|
:param params: user parameters for the operation |
1183 |
|
:return: dictionary following SOL005 format |
1184 |
|
""" |
1185 |
1 |
now = time() |
1186 |
1 |
_id = str(uuid4()) |
1187 |
1 |
nslcmop = { |
1188 |
|
"id": _id, |
1189 |
|
"_id": _id, |
1190 |
|
"operationState": "PROCESSING", # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK |
1191 |
|
"queuePosition": None, |
1192 |
|
"stage": None, |
1193 |
|
"errorMessage": None, |
1194 |
|
"detailedStatus": None, |
1195 |
|
"statusEnteredTime": now, |
1196 |
|
"nsInstanceId": nsr_id, |
1197 |
|
"lcmOperationType": operation, |
1198 |
|
"startTime": now, |
1199 |
|
"isAutomaticInvocation": False, |
1200 |
|
"operationParams": params, |
1201 |
|
"isCancelPending": False, |
1202 |
|
"links": { |
1203 |
|
"self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id, |
1204 |
|
"nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id, |
1205 |
|
} |
1206 |
|
} |
1207 |
1 |
return nslcmop |
1208 |
|
|
1209 |
1 |
def _get_enabled_vims(self, session): |
1210 |
|
""" |
1211 |
|
Retrieve and return VIM accounts that are accessible by current user and has state ENABLE |
1212 |
|
:param session: current session with user information |
1213 |
|
""" |
1214 |
0 |
db_filter = self._get_project_filter(session) |
1215 |
0 |
db_filter["_admin.operationalState"] = "ENABLED" |
1216 |
0 |
vims = self.db.get_list("vim_accounts", db_filter) |
1217 |
0 |
vimAccounts = [] |
1218 |
0 |
for vim in vims: |
1219 |
0 |
vimAccounts.append(vim['_id']) |
1220 |
0 |
return vimAccounts |
1221 |
|
|
1222 |
1 |
def new(self, rollback, session, indata=None, kwargs=None, headers=None, slice_object=False): |
1223 |
|
""" |
1224 |
|
Performs a new operation over a ns |
1225 |
|
:param rollback: list to append created items at database in case a rollback must to be done |
1226 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1227 |
|
:param indata: descriptor with the parameters of the operation. It must contains among others |
1228 |
|
nsInstanceId: _id of the nsr to perform the operation |
1229 |
|
operation: it can be: instantiate, terminate, action, TODO: update, heal |
1230 |
|
:param kwargs: used to override the indata descriptor |
1231 |
|
:param headers: http request headers |
1232 |
|
:return: id of the nslcmops |
1233 |
|
""" |
1234 |
1 |
def check_if_nsr_is_not_slice_member(session, nsr_id): |
1235 |
0 |
nsis = None |
1236 |
0 |
db_filter = self._get_project_filter(session) |
1237 |
0 |
db_filter["_admin.nsrs-detailed-list.ANYINDEX.nsrId"] = nsr_id |
1238 |
0 |
nsis = self.db.get_one("nsis", db_filter, fail_on_empty=False, fail_on_more=False) |
1239 |
0 |
if nsis: |
1240 |
0 |
raise EngineException("The NS instance {} cannot be terminated because is used by the slice {}".format( |
1241 |
|
nsr_id, nsis["_id"]), http_code=HTTPStatus.CONFLICT) |
1242 |
|
|
1243 |
1 |
try: |
1244 |
|
# Override descriptor with query string kwargs |
1245 |
1 |
self._update_input_with_kwargs(indata, kwargs, yaml_format=True) |
1246 |
1 |
operation = indata["lcmOperationType"] |
1247 |
1 |
nsInstanceId = indata["nsInstanceId"] |
1248 |
|
|
1249 |
1 |
validate_input(indata, self.operation_schema[operation]) |
1250 |
|
# get ns from nsr_id |
1251 |
1 |
_filter = BaseTopic._get_project_filter(session) |
1252 |
1 |
_filter["_id"] = nsInstanceId |
1253 |
1 |
nsr = self.db.get_one("nsrs", _filter) |
1254 |
|
|
1255 |
|
# initial checking |
1256 |
1 |
if operation == "terminate" and slice_object is False: |
1257 |
0 |
check_if_nsr_is_not_slice_member(session, nsr["_id"]) |
1258 |
1 |
if not nsr["_admin"].get("nsState") or nsr["_admin"]["nsState"] == "NOT_INSTANTIATED": |
1259 |
1 |
if operation == "terminate" and indata.get("autoremove"): |
1260 |
|
# NSR must be deleted |
1261 |
0 |
return None, None # a none in this case is used to indicate not instantiated. It can be removed |
1262 |
1 |
if operation != "instantiate": |
1263 |
0 |
raise EngineException("ns_instance '{}' cannot be '{}' because it is not instantiated".format( |
1264 |
|
nsInstanceId, operation), HTTPStatus.CONFLICT) |
1265 |
|
else: |
1266 |
0 |
if operation == "instantiate" and not session["force"]: |
1267 |
0 |
raise EngineException("ns_instance '{}' cannot be '{}' because it is already instantiated".format( |
1268 |
|
nsInstanceId, operation), HTTPStatus.CONFLICT) |
1269 |
1 |
self._check_ns_operation(session, nsr, operation, indata) |
1270 |
|
|
1271 |
1 |
if operation == "instantiate": |
1272 |
1 |
self._update_vnfrs(session, rollback, nsr, indata) |
1273 |
|
|
1274 |
1 |
nslcmop_desc = self._create_nslcmop(nsInstanceId, operation, indata) |
1275 |
1 |
_id = nslcmop_desc["_id"] |
1276 |
1 |
self.format_on_new(nslcmop_desc, session["project_id"], make_public=session["public"]) |
1277 |
1 |
if indata.get("placement-engine"): |
1278 |
|
# Save valid vim accounts in lcm operation descriptor |
1279 |
0 |
nslcmop_desc['operationParams']['validVimAccounts'] = self._get_enabled_vims(session) |
1280 |
1 |
self.db.create("nslcmops", nslcmop_desc) |
1281 |
1 |
rollback.append({"topic": "nslcmops", "_id": _id}) |
1282 |
1 |
if not slice_object: |
1283 |
1 |
self.msg.write("ns", operation, nslcmop_desc) |
1284 |
1 |
return _id, None |
1285 |
1 |
except ValidationError as e: # TODO remove try Except, it is captured at nbi.py |
1286 |
0 |
raise EngineException(e, HTTPStatus.UNPROCESSABLE_ENTITY) |
1287 |
|
# except DbException as e: |
1288 |
|
# raise EngineException("Cannot get ns_instance '{}': {}".format(e), HTTPStatus.NOT_FOUND) |
1289 |
|
|
1290 |
1 |
def delete(self, session, _id, dry_run=False, not_send_msg=None): |
1291 |
0 |
raise EngineException("Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR) |
1292 |
|
|
1293 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
1294 |
0 |
raise EngineException("Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR) |
1295 |
|
|
1296 |
|
|
1297 |
1 |
class NsiTopic(BaseTopic): |
1298 |
1 |
topic = "nsis" |
1299 |
1 |
topic_msg = "nsi" |
1300 |
1 |
quota_name = "slice_instances" |
1301 |
|
|
1302 |
1 |
def __init__(self, db, fs, msg, auth): |
1303 |
0 |
BaseTopic.__init__(self, db, fs, msg, auth) |
1304 |
0 |
self.nsrTopic = NsrTopic(db, fs, msg, auth) |
1305 |
|
|
1306 |
1 |
@staticmethod |
1307 |
|
def _format_ns_request(ns_request): |
1308 |
0 |
formated_request = copy(ns_request) |
1309 |
|
# TODO: Add request params |
1310 |
0 |
return formated_request |
1311 |
|
|
1312 |
1 |
@staticmethod |
1313 |
|
def _format_addional_params(slice_request): |
1314 |
|
""" |
1315 |
|
Get and format user additional params for NS or VNF |
1316 |
|
:param slice_request: User instantiation additional parameters |
1317 |
|
:return: a formatted copy of additional params or None if not supplied |
1318 |
|
""" |
1319 |
0 |
additional_params = copy(slice_request.get("additionalParamsForNsi")) |
1320 |
0 |
if additional_params: |
1321 |
0 |
for k, v in additional_params.items(): |
1322 |
0 |
if not isinstance(k, str): |
1323 |
0 |
raise EngineException("Invalid param at additionalParamsForNsi:{}. Only string keys are allowed". |
1324 |
|
format(k)) |
1325 |
0 |
if "." in k or "$" in k: |
1326 |
0 |
raise EngineException("Invalid param at additionalParamsForNsi:{}. Keys must not contain dots or $". |
1327 |
|
format(k)) |
1328 |
0 |
if isinstance(v, (dict, tuple, list)): |
1329 |
0 |
additional_params[k] = "!!yaml " + safe_dump(v) |
1330 |
0 |
return additional_params |
1331 |
|
|
1332 |
1 |
def _check_descriptor_dependencies(self, session, descriptor): |
1333 |
|
""" |
1334 |
|
Check that the dependent descriptors exist on a new descriptor or edition |
1335 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1336 |
|
:param descriptor: descriptor to be inserted or edit |
1337 |
|
:return: None or raises exception |
1338 |
|
""" |
1339 |
0 |
if not descriptor.get("nst-ref"): |
1340 |
0 |
return |
1341 |
0 |
nstd_id = descriptor["nst-ref"] |
1342 |
0 |
if not self.get_item_list(session, "nsts", {"id": nstd_id}): |
1343 |
0 |
raise EngineException("Descriptor error at nst-ref='{}' references a non exist nstd".format(nstd_id), |
1344 |
|
http_code=HTTPStatus.CONFLICT) |
1345 |
|
|
1346 |
1 |
def check_conflict_on_del(self, session, _id, db_content): |
1347 |
|
""" |
1348 |
|
Check that NSI is not instantiated |
1349 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1350 |
|
:param _id: nsi internal id |
1351 |
|
:param db_content: The database content of the _id |
1352 |
|
:return: None or raises EngineException with the conflict |
1353 |
|
""" |
1354 |
0 |
if session["force"]: |
1355 |
0 |
return |
1356 |
0 |
nsi = db_content |
1357 |
0 |
if nsi["_admin"].get("nsiState") == "INSTANTIATED": |
1358 |
0 |
raise EngineException("nsi '{}' cannot be deleted because it is in 'INSTANTIATED' state. " |
1359 |
|
"Launch 'terminate' operation first; or force deletion".format(_id), |
1360 |
|
http_code=HTTPStatus.CONFLICT) |
1361 |
|
|
1362 |
1 |
def delete_extra(self, session, _id, db_content, not_send_msg=None): |
1363 |
|
""" |
1364 |
|
Deletes associated nsilcmops from database. Deletes associated filesystem. |
1365 |
|
Set usageState of nst |
1366 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1367 |
|
:param _id: server internal id |
1368 |
|
:param db_content: The database content of the descriptor |
1369 |
|
:param not_send_msg: To not send message (False) or store content (list) instead |
1370 |
|
:return: None if ok or raises EngineException with the problem |
1371 |
|
""" |
1372 |
|
|
1373 |
|
# Deleting the nsrs belonging to nsir |
1374 |
0 |
nsir = db_content |
1375 |
0 |
for nsrs_detailed_item in nsir["_admin"]["nsrs-detailed-list"]: |
1376 |
0 |
nsr_id = nsrs_detailed_item["nsrId"] |
1377 |
0 |
if nsrs_detailed_item.get("shared"): |
1378 |
0 |
_filter = {"_admin.nsrs-detailed-list.ANYINDEX.shared": True, |
1379 |
|
"_admin.nsrs-detailed-list.ANYINDEX.nsrId": nsr_id, |
1380 |
|
"_id.ne": nsir["_id"]} |
1381 |
0 |
nsi = self.db.get_one("nsis", _filter, fail_on_empty=False, fail_on_more=False) |
1382 |
0 |
if nsi: # last one using nsr |
1383 |
0 |
continue |
1384 |
0 |
try: |
1385 |
0 |
self.nsrTopic.delete(session, nsr_id, dry_run=False, not_send_msg=not_send_msg) |
1386 |
0 |
except (DbException, EngineException) as e: |
1387 |
0 |
if e.http_code == HTTPStatus.NOT_FOUND: |
1388 |
0 |
pass |
1389 |
|
else: |
1390 |
0 |
raise |
1391 |
|
|
1392 |
|
# delete related nsilcmops database entries |
1393 |
0 |
self.db.del_list("nsilcmops", {"netsliceInstanceId": _id}) |
1394 |
|
|
1395 |
|
# Check and set used NST usage state |
1396 |
0 |
nsir_admin = nsir.get("_admin") |
1397 |
0 |
if nsir_admin and nsir_admin.get("nst-id"): |
1398 |
|
# check if used by another NSI |
1399 |
0 |
nsis_list = self.db.get_one("nsis", {"nst-id": nsir_admin["nst-id"]}, |
1400 |
|
fail_on_empty=False, fail_on_more=False) |
1401 |
0 |
if not nsis_list: |
1402 |
0 |
self.db.set_one("nsts", {"_id": nsir_admin["nst-id"]}, {"_admin.usageState": "NOT_IN_USE"}) |
1403 |
|
|
1404 |
1 |
def new(self, rollback, session, indata=None, kwargs=None, headers=None): |
1405 |
|
""" |
1406 |
|
Creates a new netslice instance record into database. It also creates needed nsrs and vnfrs |
1407 |
|
:param rollback: list to append the created items at database in case a rollback must be done |
1408 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1409 |
|
:param indata: params to be used for the nsir |
1410 |
|
:param kwargs: used to override the indata descriptor |
1411 |
|
:param headers: http request headers |
1412 |
|
:return: the _id of nsi descriptor created at database |
1413 |
|
""" |
1414 |
|
|
1415 |
0 |
try: |
1416 |
0 |
step = "checking quotas" |
1417 |
0 |
self.check_quota(session) |
1418 |
|
|
1419 |
0 |
step = "" |
1420 |
0 |
slice_request = self._remove_envelop(indata) |
1421 |
|
# Override descriptor with query string kwargs |
1422 |
0 |
self._update_input_with_kwargs(slice_request, kwargs) |
1423 |
0 |
self._validate_input_new(slice_request, session["force"]) |
1424 |
|
|
1425 |
|
# look for nstd |
1426 |
0 |
step = "getting nstd id='{}' from database".format(slice_request.get("nstId")) |
1427 |
0 |
_filter = self._get_project_filter(session) |
1428 |
0 |
_filter["_id"] = slice_request["nstId"] |
1429 |
0 |
nstd = self.db.get_one("nsts", _filter) |
1430 |
|
# check NST is not disabled |
1431 |
0 |
step = "checking NST operationalState" |
1432 |
0 |
if nstd["_admin"]["operationalState"] == "DISABLED": |
1433 |
0 |
raise EngineException("nst with id '{}' is DISABLED, and thus cannot be used to create a netslice " |
1434 |
|
"instance".format(slice_request["nstId"]), http_code=HTTPStatus.CONFLICT) |
1435 |
0 |
del _filter["_id"] |
1436 |
|
|
1437 |
|
# check NSD is not disabled |
1438 |
0 |
step = "checking operationalState" |
1439 |
0 |
if nstd["_admin"]["operationalState"] == "DISABLED": |
1440 |
0 |
raise EngineException("nst with id '{}' is DISABLED, and thus cannot be used to create " |
1441 |
|
"a network slice".format(slice_request["nstId"]), http_code=HTTPStatus.CONFLICT) |
1442 |
|
|
1443 |
0 |
nstd.pop("_admin", None) |
1444 |
0 |
nstd_id = nstd.pop("_id", None) |
1445 |
0 |
nsi_id = str(uuid4()) |
1446 |
0 |
step = "filling nsi_descriptor with input data" |
1447 |
|
|
1448 |
|
# Creating the NSIR |
1449 |
0 |
nsi_descriptor = { |
1450 |
|
"id": nsi_id, |
1451 |
|
"name": slice_request["nsiName"], |
1452 |
|
"description": slice_request.get("nsiDescription", ""), |
1453 |
|
"datacenter": slice_request["vimAccountId"], |
1454 |
|
"nst-ref": nstd["id"], |
1455 |
|
"instantiation_parameters": slice_request, |
1456 |
|
"network-slice-template": nstd, |
1457 |
|
"nsr-ref-list": [], |
1458 |
|
"vlr-list": [], |
1459 |
|
"_id": nsi_id, |
1460 |
|
"additionalParamsForNsi": self._format_addional_params(slice_request) |
1461 |
|
} |
1462 |
|
|
1463 |
0 |
step = "creating nsi at database" |
1464 |
0 |
self.format_on_new(nsi_descriptor, session["project_id"], make_public=session["public"]) |
1465 |
0 |
nsi_descriptor["_admin"]["nsiState"] = "NOT_INSTANTIATED" |
1466 |
0 |
nsi_descriptor["_admin"]["netslice-subnet"] = None |
1467 |
0 |
nsi_descriptor["_admin"]["deployed"] = {} |
1468 |
0 |
nsi_descriptor["_admin"]["deployed"]["RO"] = [] |
1469 |
0 |
nsi_descriptor["_admin"]["nst-id"] = nstd_id |
1470 |
|
|
1471 |
|
# Creating netslice-vld for the RO. |
1472 |
0 |
step = "creating netslice-vld at database" |
1473 |
|
|
1474 |
|
# Building the vlds list to be deployed |
1475 |
|
# From netslice descriptors, creating the initial list |
1476 |
0 |
nsi_vlds = [] |
1477 |
|
|
1478 |
0 |
for netslice_vlds in get_iterable(nstd.get("netslice-vld")): |
1479 |
|
# Getting template Instantiation parameters from NST |
1480 |
0 |
nsi_vld = deepcopy(netslice_vlds) |
1481 |
0 |
nsi_vld["shared-nsrs-list"] = [] |
1482 |
0 |
nsi_vld["vimAccountId"] = slice_request["vimAccountId"] |
1483 |
0 |
nsi_vlds.append(nsi_vld) |
1484 |
|
|
1485 |
0 |
nsi_descriptor["_admin"]["netslice-vld"] = nsi_vlds |
1486 |
|
# Creating netslice-subnet_record. |
1487 |
0 |
needed_nsds = {} |
1488 |
0 |
services = [] |
1489 |
|
|
1490 |
|
# Updating the nstd with the nsd["_id"] associated to the nss -> services list |
1491 |
0 |
for member_ns in nstd["netslice-subnet"]: |
1492 |
0 |
nsd_id = member_ns["nsd-ref"] |
1493 |
0 |
step = "getting nstd id='{}' constituent-nsd='{}' from database".format( |
1494 |
|
member_ns["nsd-ref"], member_ns["id"]) |
1495 |
0 |
if nsd_id not in needed_nsds: |
1496 |
|
# Obtain nsd |
1497 |
0 |
_filter["id"] = nsd_id |
1498 |
0 |
nsd = self.db.get_one("nsds", _filter, fail_on_empty=True, fail_on_more=True) |
1499 |
0 |
del _filter["id"] |
1500 |
0 |
nsd.pop("_admin") |
1501 |
0 |
needed_nsds[nsd_id] = nsd |
1502 |
|
else: |
1503 |
0 |
nsd = needed_nsds[nsd_id] |
1504 |
0 |
member_ns["_id"] = needed_nsds[nsd_id].get("_id") |
1505 |
0 |
services.append(member_ns) |
1506 |
|
|
1507 |
0 |
step = "filling nsir nsd-id='{}' constituent-nsd='{}' from database".format( |
1508 |
|
member_ns["nsd-ref"], member_ns["id"]) |
1509 |
|
|
1510 |
|
# creates Network Services records (NSRs) |
1511 |
0 |
step = "creating nsrs at database using NsrTopic.new()" |
1512 |
0 |
ns_params = slice_request.get("netslice-subnet") |
1513 |
0 |
nsrs_list = [] |
1514 |
0 |
nsi_netslice_subnet = [] |
1515 |
0 |
for service in services: |
1516 |
|
# Check if the netslice-subnet is shared and if it is share if the nss exists |
1517 |
0 |
_id_nsr = None |
1518 |
0 |
indata_ns = {} |
1519 |
|
# Is the nss shared and instantiated? |
1520 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.shared"] = True |
1521 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.nsd-id"] = service["nsd-ref"] |
1522 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.nss-id"] = service["id"] |
1523 |
0 |
nsi = self.db.get_one("nsis", _filter, fail_on_empty=False, fail_on_more=False) |
1524 |
0 |
if nsi and service.get("is-shared-nss"): |
1525 |
0 |
nsrs_detailed_list = nsi["_admin"]["nsrs-detailed-list"] |
1526 |
0 |
for nsrs_detailed_item in nsrs_detailed_list: |
1527 |
0 |
if nsrs_detailed_item["nsd-id"] == service["nsd-ref"]: |
1528 |
0 |
if nsrs_detailed_item["nss-id"] == service["id"]: |
1529 |
0 |
_id_nsr = nsrs_detailed_item["nsrId"] |
1530 |
0 |
break |
1531 |
0 |
for netslice_subnet in nsi["_admin"]["netslice-subnet"]: |
1532 |
0 |
if netslice_subnet["nss-id"] == service["id"]: |
1533 |
0 |
indata_ns = netslice_subnet |
1534 |
0 |
break |
1535 |
|
else: |
1536 |
0 |
indata_ns = {} |
1537 |
0 |
if service.get("instantiation-parameters"): |
1538 |
0 |
indata_ns = deepcopy(service["instantiation-parameters"]) |
1539 |
|
# del service["instantiation-parameters"] |
1540 |
|
|
1541 |
0 |
indata_ns["nsdId"] = service["_id"] |
1542 |
0 |
indata_ns["nsName"] = slice_request.get("nsiName") + "." + service["id"] |
1543 |
0 |
indata_ns["vimAccountId"] = slice_request.get("vimAccountId") |
1544 |
0 |
indata_ns["nsDescription"] = service["description"] |
1545 |
0 |
if slice_request.get("ssh_keys"): |
1546 |
0 |
indata_ns["ssh_keys"] = slice_request.get("ssh_keys") |
1547 |
|
|
1548 |
0 |
if ns_params: |
1549 |
0 |
for ns_param in ns_params: |
1550 |
0 |
if ns_param.get("id") == service["id"]: |
1551 |
0 |
copy_ns_param = deepcopy(ns_param) |
1552 |
0 |
del copy_ns_param["id"] |
1553 |
0 |
indata_ns.update(copy_ns_param) |
1554 |
0 |
break |
1555 |
|
|
1556 |
|
# Creates Nsr objects |
1557 |
0 |
_id_nsr, _ = self.nsrTopic.new(rollback, session, indata_ns, kwargs, headers) |
1558 |
0 |
nsrs_item = {"nsrId": _id_nsr, "shared": service.get("is-shared-nss"), "nsd-id": service["nsd-ref"], |
1559 |
|
"nss-id": service["id"], "nslcmop_instantiate": None} |
1560 |
0 |
indata_ns["nss-id"] = service["id"] |
1561 |
0 |
nsrs_list.append(nsrs_item) |
1562 |
0 |
nsi_netslice_subnet.append(indata_ns) |
1563 |
0 |
nsr_ref = {"nsr-ref": _id_nsr} |
1564 |
0 |
nsi_descriptor["nsr-ref-list"].append(nsr_ref) |
1565 |
|
|
1566 |
|
# Adding the nsrs list to the nsi |
1567 |
0 |
nsi_descriptor["_admin"]["nsrs-detailed-list"] = nsrs_list |
1568 |
0 |
nsi_descriptor["_admin"]["netslice-subnet"] = nsi_netslice_subnet |
1569 |
0 |
self.db.set_one("nsts", {"_id": slice_request["nstId"]}, {"_admin.usageState": "IN_USE"}) |
1570 |
|
|
1571 |
|
# Creating the entry in the database |
1572 |
0 |
self.db.create("nsis", nsi_descriptor) |
1573 |
0 |
rollback.append({"topic": "nsis", "_id": nsi_id}) |
1574 |
0 |
return nsi_id, None |
1575 |
0 |
except Exception as e: # TODO remove try Except, it is captured at nbi.py |
1576 |
0 |
self.logger.exception("Exception {} at NsiTopic.new()".format(e), exc_info=True) |
1577 |
0 |
raise EngineException("Error {}: {}".format(step, e)) |
1578 |
0 |
except ValidationError as e: |
1579 |
0 |
raise EngineException(e, HTTPStatus.UNPROCESSABLE_ENTITY) |
1580 |
|
|
1581 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
1582 |
0 |
raise EngineException("Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR) |
1583 |
|
|
1584 |
|
|
1585 |
1 |
class NsiLcmOpTopic(BaseTopic): |
1586 |
1 |
topic = "nsilcmops" |
1587 |
1 |
topic_msg = "nsi" |
1588 |
1 |
operation_schema = { # mapping between operation and jsonschema to validate |
1589 |
|
"instantiate": nsi_instantiate, |
1590 |
|
"terminate": None |
1591 |
|
} |
1592 |
|
|
1593 |
1 |
def __init__(self, db, fs, msg, auth): |
1594 |
0 |
BaseTopic.__init__(self, db, fs, msg, auth) |
1595 |
0 |
self.nsi_NsLcmOpTopic = NsLcmOpTopic(self.db, self.fs, self.msg, self.auth) |
1596 |
|
|
1597 |
1 |
def _check_nsi_operation(self, session, nsir, operation, indata): |
1598 |
|
""" |
1599 |
|
Check that user has enter right parameters for the operation |
1600 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1601 |
|
:param operation: it can be: instantiate, terminate, action, TODO: update, heal |
1602 |
|
:param indata: descriptor with the parameters of the operation |
1603 |
|
:return: None |
1604 |
|
""" |
1605 |
0 |
nsds = {} |
1606 |
0 |
nstd = nsir["network-slice-template"] |
1607 |
|
|
1608 |
0 |
def check_valid_netslice_subnet_id(nstId): |
1609 |
|
# TODO change to vnfR (??) |
1610 |
0 |
for netslice_subnet in nstd["netslice-subnet"]: |
1611 |
0 |
if nstId == netslice_subnet["id"]: |
1612 |
0 |
nsd_id = netslice_subnet["nsd-ref"] |
1613 |
0 |
if nsd_id not in nsds: |
1614 |
0 |
_filter = self._get_project_filter(session) |
1615 |
0 |
_filter["id"] = nsd_id |
1616 |
0 |
nsds[nsd_id] = self.db.get_one("nsds", _filter) |
1617 |
0 |
return nsds[nsd_id] |
1618 |
|
else: |
1619 |
0 |
raise EngineException("Invalid parameter nstId='{}' is not one of the " |
1620 |
|
"nst:netslice-subnet".format(nstId)) |
1621 |
0 |
if operation == "instantiate": |
1622 |
|
# check the existance of netslice-subnet items |
1623 |
0 |
for in_nst in get_iterable(indata.get("netslice-subnet")): |
1624 |
0 |
check_valid_netslice_subnet_id(in_nst["id"]) |
1625 |
|
|
1626 |
1 |
def _create_nsilcmop(self, session, netsliceInstanceId, operation, params): |
1627 |
0 |
now = time() |
1628 |
0 |
_id = str(uuid4()) |
1629 |
0 |
nsilcmop = { |
1630 |
|
"id": _id, |
1631 |
|
"_id": _id, |
1632 |
|
"operationState": "PROCESSING", # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK |
1633 |
|
"statusEnteredTime": now, |
1634 |
|
"netsliceInstanceId": netsliceInstanceId, |
1635 |
|
"lcmOperationType": operation, |
1636 |
|
"startTime": now, |
1637 |
|
"isAutomaticInvocation": False, |
1638 |
|
"operationParams": params, |
1639 |
|
"isCancelPending": False, |
1640 |
|
"links": { |
1641 |
|
"self": "/osm/nsilcm/v1/nsi_lcm_op_occs/" + _id, |
1642 |
|
"netsliceInstanceId": "/osm/nsilcm/v1/netslice_instances/" + netsliceInstanceId, |
1643 |
|
} |
1644 |
|
} |
1645 |
0 |
return nsilcmop |
1646 |
|
|
1647 |
1 |
def add_shared_nsr_2vld(self, nsir, nsr_item): |
1648 |
0 |
for nst_sb_item in nsir["network-slice-template"].get("netslice-subnet"): |
1649 |
0 |
if nst_sb_item.get("is-shared-nss"): |
1650 |
0 |
for admin_subnet_item in nsir["_admin"].get("netslice-subnet"): |
1651 |
0 |
if admin_subnet_item["nss-id"] == nst_sb_item["id"]: |
1652 |
0 |
for admin_vld_item in nsir["_admin"].get("netslice-vld"): |
1653 |
0 |
for admin_vld_nss_cp_ref_item in admin_vld_item["nss-connection-point-ref"]: |
1654 |
0 |
if admin_subnet_item["nss-id"] == admin_vld_nss_cp_ref_item["nss-ref"]: |
1655 |
0 |
if not nsr_item["nsrId"] in admin_vld_item["shared-nsrs-list"]: |
1656 |
0 |
admin_vld_item["shared-nsrs-list"].append(nsr_item["nsrId"]) |
1657 |
0 |
break |
1658 |
|
# self.db.set_one("nsis", {"_id": nsir["_id"]}, nsir) |
1659 |
0 |
self.db.set_one("nsis", {"_id": nsir["_id"]}, {"_admin.netslice-vld": nsir["_admin"].get("netslice-vld")}) |
1660 |
|
|
1661 |
1 |
def new(self, rollback, session, indata=None, kwargs=None, headers=None): |
1662 |
|
""" |
1663 |
|
Performs a new operation over a ns |
1664 |
|
:param rollback: list to append created items at database in case a rollback must to be done |
1665 |
|
:param session: contains "username", "admin", "force", "public", "project_id", "set_project" |
1666 |
|
:param indata: descriptor with the parameters of the operation. It must contains among others |
1667 |
|
netsliceInstanceId: _id of the nsir to perform the operation |
1668 |
|
operation: it can be: instantiate, terminate, action, TODO: update, heal |
1669 |
|
:param kwargs: used to override the indata descriptor |
1670 |
|
:param headers: http request headers |
1671 |
|
:return: id of the nslcmops |
1672 |
|
""" |
1673 |
0 |
try: |
1674 |
|
# Override descriptor with query string kwargs |
1675 |
0 |
self._update_input_with_kwargs(indata, kwargs) |
1676 |
0 |
operation = indata["lcmOperationType"] |
1677 |
0 |
netsliceInstanceId = indata["netsliceInstanceId"] |
1678 |
0 |
validate_input(indata, self.operation_schema[operation]) |
1679 |
|
|
1680 |
|
# get nsi from netsliceInstanceId |
1681 |
0 |
_filter = self._get_project_filter(session) |
1682 |
0 |
_filter["_id"] = netsliceInstanceId |
1683 |
0 |
nsir = self.db.get_one("nsis", _filter) |
1684 |
0 |
logging_prefix = "nsi={} {} ".format(netsliceInstanceId, operation) |
1685 |
0 |
del _filter["_id"] |
1686 |
|
|
1687 |
|
# initial checking |
1688 |
0 |
if not nsir["_admin"].get("nsiState") or nsir["_admin"]["nsiState"] == "NOT_INSTANTIATED": |
1689 |
0 |
if operation == "terminate" and indata.get("autoremove"): |
1690 |
|
# NSIR must be deleted |
1691 |
0 |
return None, None # a none in this case is used to indicate not instantiated. It can be removed |
1692 |
0 |
if operation != "instantiate": |
1693 |
0 |
raise EngineException("netslice_instance '{}' cannot be '{}' because it is not instantiated".format( |
1694 |
|
netsliceInstanceId, operation), HTTPStatus.CONFLICT) |
1695 |
|
else: |
1696 |
0 |
if operation == "instantiate" and not session["force"]: |
1697 |
0 |
raise EngineException("netslice_instance '{}' cannot be '{}' because it is already instantiated". |
1698 |
|
format(netsliceInstanceId, operation), HTTPStatus.CONFLICT) |
1699 |
|
|
1700 |
|
# Creating all the NS_operation (nslcmop) |
1701 |
|
# Get service list from db |
1702 |
0 |
nsrs_list = nsir["_admin"]["nsrs-detailed-list"] |
1703 |
0 |
nslcmops = [] |
1704 |
|
# nslcmops_item = None |
1705 |
0 |
for index, nsr_item in enumerate(nsrs_list): |
1706 |
0 |
nsr_id = nsr_item["nsrId"] |
1707 |
0 |
if nsr_item.get("shared"): |
1708 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.shared"] = True |
1709 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.nsrId"] = nsr_id |
1710 |
0 |
_filter["_admin.nsrs-detailed-list.ANYINDEX.nslcmop_instantiate.ne"] = None |
1711 |
0 |
_filter["_id.ne"] = netsliceInstanceId |
1712 |
0 |
nsi = self.db.get_one("nsis", _filter, fail_on_empty=False, fail_on_more=False) |
1713 |
0 |
if operation == "terminate": |
1714 |
0 |
_update = {"_admin.nsrs-detailed-list.{}.nslcmop_instantiate".format(index): None} |
1715 |
0 |
self.db.set_one("nsis", {"_id": nsir["_id"]}, _update) |
1716 |
0 |
if nsi: # other nsi is using this nsr and it needs this nsr instantiated |
1717 |
0 |
continue # do not create nsilcmop |
1718 |
|
else: # instantiate |
1719 |
|
# looks the first nsi fulfilling the conditions but not being the current NSIR |
1720 |
0 |
if nsi: |
1721 |
0 |
nsi_nsr_item = next(n for n in nsi["_admin"]["nsrs-detailed-list"] if |
1722 |
|
n["nsrId"] == nsr_id and n["shared"] and |
1723 |
|
n["nslcmop_instantiate"]) |
1724 |
0 |
self.add_shared_nsr_2vld(nsir, nsr_item) |
1725 |
0 |
nslcmops.append(nsi_nsr_item["nslcmop_instantiate"]) |
1726 |
0 |
_update = {"_admin.nsrs-detailed-list.{}".format(index): nsi_nsr_item} |
1727 |
0 |
self.db.set_one("nsis", {"_id": nsir["_id"]}, _update) |
1728 |
|
# continue to not create nslcmop since nsrs is shared and nsrs was created |
1729 |
0 |
continue |
1730 |
|
else: |
1731 |
0 |
self.add_shared_nsr_2vld(nsir, nsr_item) |
1732 |
|
|
1733 |
|
# create operation |
1734 |
0 |
try: |
1735 |
0 |
indata_ns = { |
1736 |
|
"lcmOperationType": operation, |
1737 |
|
"nsInstanceId": nsr_id, |
1738 |
|
# Including netslice_id in the ns instantiate Operation |
1739 |
|
"netsliceInstanceId": netsliceInstanceId, |
1740 |
|
} |
1741 |
0 |
if operation == "instantiate": |
1742 |
0 |
service = self.db.get_one("nsrs", {"_id": nsr_id}) |
1743 |
0 |
indata_ns.update(service["instantiate_params"]) |
1744 |
|
|
1745 |
|
# Creating NS_LCM_OP with the flag slice_object=True to not trigger the service instantiation |
1746 |
|
# message via kafka bus |
1747 |
0 |
nslcmop, _ = self.nsi_NsLcmOpTopic.new(rollback, session, indata_ns, None, headers, |
1748 |
|
slice_object=True) |
1749 |
0 |
nslcmops.append(nslcmop) |
1750 |
0 |
if operation == "instantiate": |
1751 |
0 |
_update = {"_admin.nsrs-detailed-list.{}.nslcmop_instantiate".format(index): nslcmop} |
1752 |
0 |
self.db.set_one("nsis", {"_id": nsir["_id"]}, _update) |
1753 |
0 |
except (DbException, EngineException) as e: |
1754 |
0 |
if e.http_code == HTTPStatus.NOT_FOUND: |
1755 |
0 |
self.logger.info(logging_prefix + "skipping NS={} because not found".format(nsr_id)) |
1756 |
0 |
pass |
1757 |
|
else: |
1758 |
0 |
raise |
1759 |
|
|
1760 |
|
# Creates nsilcmop |
1761 |
0 |
indata["nslcmops_ids"] = nslcmops |
1762 |
0 |
self._check_nsi_operation(session, nsir, operation, indata) |
1763 |
|
|
1764 |
0 |
nsilcmop_desc = self._create_nsilcmop(session, netsliceInstanceId, operation, indata) |
1765 |
0 |
self.format_on_new(nsilcmop_desc, session["project_id"], make_public=session["public"]) |
1766 |
0 |
_id = self.db.create("nsilcmops", nsilcmop_desc) |
1767 |
0 |
rollback.append({"topic": "nsilcmops", "_id": _id}) |
1768 |
0 |
self.msg.write("nsi", operation, nsilcmop_desc) |
1769 |
0 |
return _id, None |
1770 |
0 |
except ValidationError as e: |
1771 |
0 |
raise EngineException(e, HTTPStatus.UNPROCESSABLE_ENTITY) |
1772 |
|
|
1773 |
1 |
def delete(self, session, _id, dry_run=False, not_send_msg=None): |
1774 |
0 |
raise EngineException("Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR) |
1775 |
|
|
1776 |
1 |
def edit(self, session, _id, indata=None, kwargs=None, content=None): |
1777 |
0 |
raise EngineException("Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR) |