1 |
|
# -*- coding: utf-8 -*- |
2 |
|
|
3 |
|
## |
4 |
|
# Copyright 2020 Telefonica Investigacion y Desarrollo, S.A.U. |
5 |
|
# Licensed under the Apache License, Version 2.0 (the "License"); |
6 |
|
# you may not use this file except in compliance with the License. |
7 |
|
# You may obtain a copy of the License at |
8 |
|
# |
9 |
|
# http://www.apache.org/licenses/LICENSE-2.0 |
10 |
|
# |
11 |
|
# Unless required by applicable law or agreed to in writing, software |
12 |
|
# distributed under the License is distributed on an "AS IS" BASIS, |
13 |
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
14 |
|
# implied. |
15 |
|
# See the License for the specific language governing permissions and |
16 |
|
# limitations under the License. |
17 |
|
## |
18 |
|
|
19 |
1 |
from http import HTTPStatus |
20 |
1 |
import logging |
21 |
1 |
from random import choice as random_choice |
22 |
1 |
from threading import Lock |
23 |
1 |
from time import time |
24 |
1 |
from traceback import format_exc as traceback_format_exc |
25 |
1 |
from uuid import uuid4 |
26 |
|
|
27 |
1 |
from cryptography.hazmat.backends import default_backend as crypto_default_backend |
28 |
1 |
from cryptography.hazmat.primitives import serialization as crypto_serialization |
29 |
1 |
from cryptography.hazmat.primitives.asymmetric import rsa |
30 |
1 |
from jinja2 import ( |
31 |
|
Environment, |
32 |
|
select_autoescape, |
33 |
|
StrictUndefined, |
34 |
|
TemplateError, |
35 |
|
TemplateNotFound, |
36 |
|
UndefinedError, |
37 |
|
) |
38 |
1 |
from osm_common import ( |
39 |
|
dbmemory, |
40 |
|
dbmongo, |
41 |
|
fslocal, |
42 |
|
fsmongo, |
43 |
|
msgkafka, |
44 |
|
msglocal, |
45 |
|
version as common_version, |
46 |
|
) |
47 |
1 |
from osm_common.dbbase import DbException |
48 |
1 |
from osm_common.fsbase import FsException |
49 |
1 |
from osm_common.msgbase import MsgException |
50 |
1 |
from osm_ng_ro.ns_thread import deep_get, NsWorker, NsWorkerException |
51 |
1 |
from osm_ng_ro.validation import deploy_schema, validate_input |
52 |
|
|
53 |
1 |
__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>" |
54 |
1 |
min_common_version = "0.1.16" |
55 |
|
|
56 |
|
|
57 |
1 |
class NsException(Exception): |
58 |
1 |
def __init__(self, message, http_code=HTTPStatus.BAD_REQUEST): |
59 |
1 |
self.http_code = http_code |
60 |
1 |
super(Exception, self).__init__(message) |
61 |
|
|
62 |
|
|
63 |
1 |
def get_process_id(): |
64 |
|
""" |
65 |
|
Obtain a unique ID for this process. If running from inside docker, it will get docker ID. If not it |
66 |
|
will provide a random one |
67 |
|
:return: Obtained ID |
68 |
|
""" |
69 |
|
# Try getting docker id. If fails, get pid |
70 |
0 |
try: |
71 |
0 |
with open("/proc/self/cgroup", "r") as f: |
72 |
0 |
text_id_ = f.readline() |
73 |
0 |
_, _, text_id = text_id_.rpartition("/") |
74 |
0 |
text_id = text_id.replace("\n", "")[:12] |
75 |
|
|
76 |
0 |
if text_id: |
77 |
0 |
return text_id |
78 |
0 |
except Exception as error: |
79 |
0 |
logging.exception(f"{error} occured while getting process id") |
80 |
|
|
81 |
|
# Return a random id |
82 |
0 |
return "".join(random_choice("0123456789abcdef") for _ in range(12)) |
83 |
|
|
84 |
|
|
85 |
1 |
def versiontuple(v): |
86 |
|
"""utility for compare dot separate versions. Fills with zeros to proper number comparison""" |
87 |
0 |
filled = [] |
88 |
|
|
89 |
0 |
for point in v.split("."): |
90 |
0 |
filled.append(point.zfill(8)) |
91 |
|
|
92 |
0 |
return tuple(filled) |
93 |
|
|
94 |
|
|
95 |
1 |
class Ns(object): |
96 |
1 |
def __init__(self): |
97 |
1 |
self.db = None |
98 |
1 |
self.fs = None |
99 |
1 |
self.msg = None |
100 |
1 |
self.config = None |
101 |
|
# self.operations = None |
102 |
1 |
self.logger = None |
103 |
|
# ^ Getting logger inside method self.start because parent logger (ro) is not available yet. |
104 |
|
# If done now it will not be linked to parent not getting its handler and level |
105 |
1 |
self.map_topic = {} |
106 |
1 |
self.write_lock = None |
107 |
1 |
self.vims_assigned = {} |
108 |
1 |
self.next_worker = 0 |
109 |
1 |
self.plugins = {} |
110 |
1 |
self.workers = [] |
111 |
|
|
112 |
1 |
def init_db(self, target_version): |
113 |
0 |
pass |
114 |
|
|
115 |
1 |
def start(self, config): |
116 |
|
""" |
117 |
|
Connect to database, filesystem storage, and messaging |
118 |
|
:param config: two level dictionary with configuration. Top level should contain 'database', 'storage', |
119 |
|
:param config: Configuration of db, storage, etc |
120 |
|
:return: None |
121 |
|
""" |
122 |
0 |
self.config = config |
123 |
0 |
self.config["process_id"] = get_process_id() # used for HA identity |
124 |
0 |
self.logger = logging.getLogger("ro.ns") |
125 |
|
|
126 |
|
# check right version of common |
127 |
0 |
if versiontuple(common_version) < versiontuple(min_common_version): |
128 |
0 |
raise NsException( |
129 |
|
"Not compatible osm/common version '{}'. Needed '{}' or higher".format( |
130 |
|
common_version, min_common_version |
131 |
|
) |
132 |
|
) |
133 |
|
|
134 |
0 |
try: |
135 |
0 |
if not self.db: |
136 |
0 |
if config["database"]["driver"] == "mongo": |
137 |
0 |
self.db = dbmongo.DbMongo() |
138 |
0 |
self.db.db_connect(config["database"]) |
139 |
0 |
elif config["database"]["driver"] == "memory": |
140 |
0 |
self.db = dbmemory.DbMemory() |
141 |
0 |
self.db.db_connect(config["database"]) |
142 |
|
else: |
143 |
0 |
raise NsException( |
144 |
|
"Invalid configuration param '{}' at '[database]':'driver'".format( |
145 |
|
config["database"]["driver"] |
146 |
|
) |
147 |
|
) |
148 |
|
|
149 |
0 |
if not self.fs: |
150 |
0 |
if config["storage"]["driver"] == "local": |
151 |
0 |
self.fs = fslocal.FsLocal() |
152 |
0 |
self.fs.fs_connect(config["storage"]) |
153 |
0 |
elif config["storage"]["driver"] == "mongo": |
154 |
0 |
self.fs = fsmongo.FsMongo() |
155 |
0 |
self.fs.fs_connect(config["storage"]) |
156 |
0 |
elif config["storage"]["driver"] is None: |
157 |
0 |
pass |
158 |
|
else: |
159 |
0 |
raise NsException( |
160 |
|
"Invalid configuration param '{}' at '[storage]':'driver'".format( |
161 |
|
config["storage"]["driver"] |
162 |
|
) |
163 |
|
) |
164 |
|
|
165 |
0 |
if not self.msg: |
166 |
0 |
if config["message"]["driver"] == "local": |
167 |
0 |
self.msg = msglocal.MsgLocal() |
168 |
0 |
self.msg.connect(config["message"]) |
169 |
0 |
elif config["message"]["driver"] == "kafka": |
170 |
0 |
self.msg = msgkafka.MsgKafka() |
171 |
0 |
self.msg.connect(config["message"]) |
172 |
|
else: |
173 |
0 |
raise NsException( |
174 |
|
"Invalid configuration param '{}' at '[message]':'driver'".format( |
175 |
|
config["message"]["driver"] |
176 |
|
) |
177 |
|
) |
178 |
|
|
179 |
|
# TODO load workers to deal with exising database tasks |
180 |
|
|
181 |
0 |
self.write_lock = Lock() |
182 |
0 |
except (DbException, FsException, MsgException) as e: |
183 |
0 |
raise NsException(str(e), http_code=e.http_code) |
184 |
|
|
185 |
1 |
def get_assigned_vims(self): |
186 |
0 |
return list(self.vims_assigned.keys()) |
187 |
|
|
188 |
1 |
def stop(self): |
189 |
0 |
try: |
190 |
0 |
if self.db: |
191 |
0 |
self.db.db_disconnect() |
192 |
|
|
193 |
0 |
if self.fs: |
194 |
0 |
self.fs.fs_disconnect() |
195 |
|
|
196 |
0 |
if self.msg: |
197 |
0 |
self.msg.disconnect() |
198 |
|
|
199 |
0 |
self.write_lock = None |
200 |
0 |
except (DbException, FsException, MsgException) as e: |
201 |
0 |
raise NsException(str(e), http_code=e.http_code) |
202 |
|
|
203 |
0 |
for worker in self.workers: |
204 |
0 |
worker.insert_task(("terminate",)) |
205 |
|
|
206 |
1 |
def _create_worker(self): |
207 |
|
""" |
208 |
|
Look for a worker thread in idle status. If not found it creates one unless the number of threads reach the |
209 |
|
limit of 'server.ns_threads' configuration. If reached, it just assigns one existing thread |
210 |
|
return the index of the assigned worker thread. Worker threads are storead at self.workers |
211 |
|
""" |
212 |
|
# Look for a thread in idle status |
213 |
0 |
worker_id = next( |
214 |
|
( |
215 |
|
i |
216 |
|
for i in range(len(self.workers)) |
217 |
|
if self.workers[i] and self.workers[i].idle |
218 |
|
), |
219 |
|
None, |
220 |
|
) |
221 |
|
|
222 |
0 |
if worker_id is not None: |
223 |
|
# unset idle status to avoid race conditions |
224 |
0 |
self.workers[worker_id].idle = False |
225 |
|
else: |
226 |
0 |
worker_id = len(self.workers) |
227 |
|
|
228 |
0 |
if worker_id < self.config["global"]["server.ns_threads"]: |
229 |
|
# create a new worker |
230 |
0 |
self.workers.append( |
231 |
|
NsWorker(worker_id, self.config, self.plugins, self.db) |
232 |
|
) |
233 |
0 |
self.workers[worker_id].start() |
234 |
|
else: |
235 |
|
# reached maximum number of threads, assign VIM to an existing one |
236 |
0 |
worker_id = self.next_worker |
237 |
0 |
self.next_worker = (self.next_worker + 1) % self.config["global"][ |
238 |
|
"server.ns_threads" |
239 |
|
] |
240 |
|
|
241 |
0 |
return worker_id |
242 |
|
|
243 |
1 |
def assign_vim(self, target_id): |
244 |
0 |
with self.write_lock: |
245 |
0 |
return self._assign_vim(target_id) |
246 |
|
|
247 |
1 |
def _assign_vim(self, target_id): |
248 |
0 |
if target_id not in self.vims_assigned: |
249 |
0 |
worker_id = self.vims_assigned[target_id] = self._create_worker() |
250 |
0 |
self.workers[worker_id].insert_task(("load_vim", target_id)) |
251 |
|
|
252 |
1 |
def reload_vim(self, target_id): |
253 |
|
# send reload_vim to the thread working with this VIM and inform all that a VIM has been changed, |
254 |
|
# this is because database VIM information is cached for threads working with SDN |
255 |
0 |
with self.write_lock: |
256 |
0 |
for worker in self.workers: |
257 |
0 |
if worker and not worker.idle: |
258 |
0 |
worker.insert_task(("reload_vim", target_id)) |
259 |
|
|
260 |
1 |
def unload_vim(self, target_id): |
261 |
0 |
with self.write_lock: |
262 |
0 |
return self._unload_vim(target_id) |
263 |
|
|
264 |
1 |
def _unload_vim(self, target_id): |
265 |
0 |
if target_id in self.vims_assigned: |
266 |
0 |
worker_id = self.vims_assigned[target_id] |
267 |
0 |
self.workers[worker_id].insert_task(("unload_vim", target_id)) |
268 |
0 |
del self.vims_assigned[target_id] |
269 |
|
|
270 |
1 |
def check_vim(self, target_id): |
271 |
0 |
with self.write_lock: |
272 |
0 |
if target_id in self.vims_assigned: |
273 |
0 |
worker_id = self.vims_assigned[target_id] |
274 |
|
else: |
275 |
0 |
worker_id = self._create_worker() |
276 |
|
|
277 |
0 |
worker = self.workers[worker_id] |
278 |
0 |
worker.insert_task(("check_vim", target_id)) |
279 |
|
|
280 |
1 |
def unload_unused_vims(self): |
281 |
0 |
with self.write_lock: |
282 |
0 |
vims_to_unload = [] |
283 |
|
|
284 |
0 |
for target_id in self.vims_assigned: |
285 |
0 |
if not self.db.get_one( |
286 |
|
"ro_tasks", |
287 |
|
q_filter={ |
288 |
|
"target_id": target_id, |
289 |
|
"tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"], |
290 |
|
}, |
291 |
|
fail_on_empty=False, |
292 |
|
): |
293 |
0 |
vims_to_unload.append(target_id) |
294 |
|
|
295 |
0 |
for target_id in vims_to_unload: |
296 |
0 |
self._unload_vim(target_id) |
297 |
|
|
298 |
1 |
def _get_cloud_init(self, where): |
299 |
|
""" |
300 |
|
Not used as cloud init content is provided in the http body. This method reads cloud init from a file |
301 |
|
:param where: can be 'vnfr_id:file:file_name' or 'vnfr_id:vdu:vdu_idex' |
302 |
|
:return: |
303 |
|
""" |
304 |
0 |
vnfd_id, _, other = where.partition(":") |
305 |
0 |
_type, _, name = other.partition(":") |
306 |
0 |
vnfd = self.db.get_one("vnfds", {"_id": vnfd_id}) |
307 |
|
|
308 |
0 |
if _type == "file": |
309 |
0 |
base_folder = vnfd["_admin"]["storage"] |
310 |
0 |
cloud_init_file = "{}/{}/cloud_init/{}".format( |
311 |
|
base_folder["folder"], base_folder["pkg-dir"], name |
312 |
|
) |
313 |
|
|
314 |
0 |
if not self.fs: |
315 |
0 |
raise NsException( |
316 |
|
"Cannot read file '{}'. Filesystem not loaded, change configuration at storage.driver".format( |
317 |
|
cloud_init_file |
318 |
|
) |
319 |
|
) |
320 |
|
|
321 |
0 |
with self.fs.file_open(cloud_init_file, "r") as ci_file: |
322 |
0 |
cloud_init_content = ci_file.read() |
323 |
0 |
elif _type == "vdu": |
324 |
0 |
cloud_init_content = vnfd["vdu"][int(name)]["cloud-init"] |
325 |
|
else: |
326 |
0 |
raise NsException("Mismatch descriptor for cloud init: {}".format(where)) |
327 |
|
|
328 |
0 |
return cloud_init_content |
329 |
|
|
330 |
1 |
def _parse_jinja2(self, cloud_init_content, params, context): |
331 |
1 |
try: |
332 |
1 |
env = Environment( |
333 |
|
undefined=StrictUndefined, |
334 |
|
autoescape=select_autoescape(default_for_string=True, default=True), |
335 |
|
) |
336 |
1 |
template = env.from_string(cloud_init_content) |
337 |
|
|
338 |
1 |
return template.render(params or {}) |
339 |
1 |
except UndefinedError as e: |
340 |
1 |
raise NsException( |
341 |
|
"Variable '{}' defined at vnfd='{}' must be provided in the instantiation parameters" |
342 |
|
"inside the 'additionalParamsForVnf' block".format(e, context) |
343 |
|
) |
344 |
1 |
except (TemplateError, TemplateNotFound) as e: |
345 |
1 |
raise NsException( |
346 |
|
"Error parsing Jinja2 to cloud-init content at vnfd='{}': {}".format( |
347 |
|
context, e |
348 |
|
) |
349 |
|
) |
350 |
|
|
351 |
1 |
def _create_db_ro_nsrs(self, nsr_id, now): |
352 |
0 |
try: |
353 |
0 |
key = rsa.generate_private_key( |
354 |
|
backend=crypto_default_backend(), public_exponent=65537, key_size=2048 |
355 |
|
) |
356 |
0 |
private_key = key.private_bytes( |
357 |
|
crypto_serialization.Encoding.PEM, |
358 |
|
crypto_serialization.PrivateFormat.PKCS8, |
359 |
|
crypto_serialization.NoEncryption(), |
360 |
|
) |
361 |
0 |
public_key = key.public_key().public_bytes( |
362 |
|
crypto_serialization.Encoding.OpenSSH, |
363 |
|
crypto_serialization.PublicFormat.OpenSSH, |
364 |
|
) |
365 |
0 |
private_key = private_key.decode("utf8") |
366 |
|
# Change first line because Paramiko needs a explicit start with 'BEGIN RSA PRIVATE KEY' |
367 |
0 |
i = private_key.find("\n") |
368 |
0 |
private_key = "-----BEGIN RSA PRIVATE KEY-----" + private_key[i:] |
369 |
0 |
public_key = public_key.decode("utf8") |
370 |
0 |
except Exception as e: |
371 |
0 |
raise NsException("Cannot create ssh-keys: {}".format(e)) |
372 |
|
|
373 |
0 |
schema_version = "1.1" |
374 |
0 |
private_key_encrypted = self.db.encrypt( |
375 |
|
private_key, schema_version=schema_version, salt=nsr_id |
376 |
|
) |
377 |
0 |
db_content = { |
378 |
|
"_id": nsr_id, |
379 |
|
"_admin": { |
380 |
|
"created": now, |
381 |
|
"modified": now, |
382 |
|
"schema_version": schema_version, |
383 |
|
}, |
384 |
|
"public_key": public_key, |
385 |
|
"private_key": private_key_encrypted, |
386 |
|
"actions": [], |
387 |
|
} |
388 |
0 |
self.db.create("ro_nsrs", db_content) |
389 |
|
|
390 |
0 |
return db_content |
391 |
|
|
392 |
1 |
def deploy(self, session, indata, version, nsr_id, *args, **kwargs): |
393 |
0 |
self.logger.debug("ns.deploy nsr_id={} indata={}".format(nsr_id, indata)) |
394 |
0 |
validate_input(indata, deploy_schema) |
395 |
0 |
action_id = indata.get("action_id", str(uuid4())) |
396 |
0 |
task_index = 0 |
397 |
|
# get current deployment |
398 |
0 |
db_nsr_update = {} # update operation on nsrs |
399 |
0 |
db_vnfrs_update = {} |
400 |
0 |
db_vnfrs = {} # vnf's info indexed by _id |
401 |
0 |
nb_ro_tasks = 0 # for logging |
402 |
0 |
vdu2cloud_init = indata.get("cloud_init_content") or {} |
403 |
0 |
step = "" |
404 |
0 |
logging_text = "Task deploy nsr_id={} action_id={} ".format(nsr_id, action_id) |
405 |
0 |
self.logger.debug(logging_text + "Enter") |
406 |
|
|
407 |
0 |
try: |
408 |
0 |
step = "Getting ns and vnfr record from db" |
409 |
0 |
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) |
410 |
0 |
db_new_tasks = [] |
411 |
0 |
tasks_by_target_record_id = {} |
412 |
|
# read from db: vnf's of this ns |
413 |
0 |
step = "Getting vnfrs from db" |
414 |
0 |
db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}) |
415 |
|
|
416 |
0 |
if not db_vnfrs_list: |
417 |
0 |
raise NsException("Cannot obtain associated VNF for ns") |
418 |
|
|
419 |
0 |
for vnfr in db_vnfrs_list: |
420 |
0 |
db_vnfrs[vnfr["_id"]] = vnfr |
421 |
0 |
db_vnfrs_update[vnfr["_id"]] = {} |
422 |
|
|
423 |
0 |
now = time() |
424 |
0 |
db_ro_nsr = self.db.get_one("ro_nsrs", {"_id": nsr_id}, fail_on_empty=False) |
425 |
|
|
426 |
0 |
if not db_ro_nsr: |
427 |
0 |
db_ro_nsr = self._create_db_ro_nsrs(nsr_id, now) |
428 |
|
|
429 |
0 |
ro_nsr_public_key = db_ro_nsr["public_key"] |
430 |
|
|
431 |
|
# check that action_id is not in the list of actions. Suffixed with :index |
432 |
0 |
if action_id in db_ro_nsr["actions"]: |
433 |
0 |
index = 1 |
434 |
|
|
435 |
|
while True: |
436 |
0 |
new_action_id = "{}:{}".format(action_id, index) |
437 |
|
|
438 |
0 |
if new_action_id not in db_ro_nsr["actions"]: |
439 |
0 |
action_id = new_action_id |
440 |
0 |
self.logger.debug( |
441 |
|
logging_text |
442 |
|
+ "Changing action_id in use to {}".format(action_id) |
443 |
|
) |
444 |
0 |
break |
445 |
|
|
446 |
0 |
index += 1 |
447 |
|
|
448 |
0 |
def _create_task( |
449 |
|
target_id, |
450 |
|
item, |
451 |
|
action, |
452 |
|
target_record, |
453 |
|
target_record_id, |
454 |
|
extra_dict=None, |
455 |
|
): |
456 |
|
nonlocal task_index |
457 |
|
nonlocal action_id |
458 |
|
nonlocal nsr_id |
459 |
|
|
460 |
0 |
task = { |
461 |
|
"target_id": target_id, # it will be removed before pushing at database |
462 |
|
"action_id": action_id, |
463 |
|
"nsr_id": nsr_id, |
464 |
|
"task_id": "{}:{}".format(action_id, task_index), |
465 |
|
"status": "SCHEDULED", |
466 |
|
"action": action, |
467 |
|
"item": item, |
468 |
|
"target_record": target_record, |
469 |
|
"target_record_id": target_record_id, |
470 |
|
} |
471 |
|
|
472 |
0 |
if extra_dict: |
473 |
0 |
task.update(extra_dict) # params, find_params, depends_on |
474 |
|
|
475 |
0 |
task_index += 1 |
476 |
|
|
477 |
0 |
return task |
478 |
|
|
479 |
0 |
def _create_ro_task(target_id, task): |
480 |
|
nonlocal action_id |
481 |
|
nonlocal task_index |
482 |
|
nonlocal now |
483 |
|
|
484 |
0 |
_id = task["task_id"] |
485 |
0 |
db_ro_task = { |
486 |
|
"_id": _id, |
487 |
|
"locked_by": None, |
488 |
|
"locked_at": 0.0, |
489 |
|
"target_id": target_id, |
490 |
|
"vim_info": { |
491 |
|
"created": False, |
492 |
|
"created_items": None, |
493 |
|
"vim_id": None, |
494 |
|
"vim_name": None, |
495 |
|
"vim_status": None, |
496 |
|
"vim_details": None, |
497 |
|
"refresh_at": None, |
498 |
|
}, |
499 |
|
"modified_at": now, |
500 |
|
"created_at": now, |
501 |
|
"to_check_at": now, |
502 |
|
"tasks": [task], |
503 |
|
} |
504 |
|
|
505 |
0 |
return db_ro_task |
506 |
|
|
507 |
0 |
def _process_image_params(target_image, vim_info, target_record_id): |
508 |
0 |
find_params = {} |
509 |
|
|
510 |
0 |
if target_image.get("image"): |
511 |
0 |
find_params["filter_dict"] = {"name": target_image.get("image")} |
512 |
|
|
513 |
0 |
if target_image.get("vim_image_id"): |
514 |
0 |
find_params["filter_dict"] = { |
515 |
|
"id": target_image.get("vim_image_id") |
516 |
|
} |
517 |
|
|
518 |
0 |
if target_image.get("image_checksum"): |
519 |
0 |
find_params["filter_dict"] = { |
520 |
|
"checksum": target_image.get("image_checksum") |
521 |
|
} |
522 |
|
|
523 |
0 |
return {"find_params": find_params} |
524 |
|
|
525 |
0 |
def _process_flavor_params(target_flavor, vim_info, target_record_id): |
526 |
0 |
def _get_resource_allocation_params(quota_descriptor): |
527 |
|
""" |
528 |
|
read the quota_descriptor from vnfd and fetch the resource allocation properties from the |
529 |
|
descriptor object |
530 |
|
:param quota_descriptor: cpu/mem/vif/disk-io quota descriptor |
531 |
|
:return: quota params for limit, reserve, shares from the descriptor object |
532 |
|
""" |
533 |
0 |
quota = {} |
534 |
|
|
535 |
0 |
if quota_descriptor.get("limit"): |
536 |
0 |
quota["limit"] = int(quota_descriptor["limit"]) |
537 |
|
|
538 |
0 |
if quota_descriptor.get("reserve"): |
539 |
0 |
quota["reserve"] = int(quota_descriptor["reserve"]) |
540 |
|
|
541 |
0 |
if quota_descriptor.get("shares"): |
542 |
0 |
quota["shares"] = int(quota_descriptor["shares"]) |
543 |
|
|
544 |
0 |
return quota |
545 |
|
|
546 |
|
nonlocal indata |
547 |
|
|
548 |
0 |
flavor_data = { |
549 |
|
"disk": int(target_flavor["storage-gb"]), |
550 |
|
"ram": int(target_flavor["memory-mb"]), |
551 |
|
"vcpus": int(target_flavor["vcpu-count"]), |
552 |
|
} |
553 |
0 |
numa = {} |
554 |
0 |
extended = {} |
555 |
|
|
556 |
0 |
target_vdur = None |
557 |
0 |
for vnf in indata.get("vnf", []): |
558 |
0 |
for vdur in vnf.get("vdur", []): |
559 |
0 |
if vdur.get("ns-flavor-id") == target_flavor["id"]: |
560 |
0 |
target_vdur = vdur |
561 |
|
|
562 |
0 |
for storage in target_vdur.get("virtual-storages", []): |
563 |
0 |
if ( |
564 |
|
storage.get("type-of-storage") |
565 |
|
== "etsi-nfv-descriptors:ephemeral-storage" |
566 |
|
): |
567 |
0 |
flavor_data["ephemeral"] = int( |
568 |
|
storage.get("size-of-storage", 0) |
569 |
|
) |
570 |
0 |
elif ( |
571 |
|
storage.get("type-of-storage") |
572 |
|
== "etsi-nfv-descriptors:swap-storage" |
573 |
|
): |
574 |
0 |
flavor_data["swap"] = int(storage.get("size-of-storage", 0)) |
575 |
|
|
576 |
0 |
if target_flavor.get("guest-epa"): |
577 |
0 |
extended = {} |
578 |
0 |
epa_vcpu_set = False |
579 |
|
|
580 |
0 |
if target_flavor["guest-epa"].get("numa-node-policy"): |
581 |
0 |
numa_node_policy = target_flavor["guest-epa"].get( |
582 |
|
"numa-node-policy" |
583 |
|
) |
584 |
|
|
585 |
0 |
if numa_node_policy.get("node"): |
586 |
0 |
numa_node = numa_node_policy["node"][0] |
587 |
|
|
588 |
0 |
if numa_node.get("num-cores"): |
589 |
0 |
numa["cores"] = numa_node["num-cores"] |
590 |
0 |
epa_vcpu_set = True |
591 |
|
|
592 |
0 |
if numa_node.get("paired-threads"): |
593 |
0 |
if numa_node["paired-threads"].get( |
594 |
|
"num-paired-threads" |
595 |
|
): |
596 |
0 |
numa["paired-threads"] = int( |
597 |
|
numa_node["paired-threads"][ |
598 |
|
"num-paired-threads" |
599 |
|
] |
600 |
|
) |
601 |
0 |
epa_vcpu_set = True |
602 |
|
|
603 |
0 |
if len( |
604 |
|
numa_node["paired-threads"].get("paired-thread-ids") |
605 |
|
): |
606 |
0 |
numa["paired-threads-id"] = [] |
607 |
|
|
608 |
0 |
for pair in numa_node["paired-threads"][ |
609 |
|
"paired-thread-ids" |
610 |
|
]: |
611 |
0 |
numa["paired-threads-id"].append( |
612 |
|
( |
613 |
|
str(pair["thread-a"]), |
614 |
|
str(pair["thread-b"]), |
615 |
|
) |
616 |
|
) |
617 |
|
|
618 |
0 |
if numa_node.get("num-threads"): |
619 |
0 |
numa["threads"] = int(numa_node["num-threads"]) |
620 |
0 |
epa_vcpu_set = True |
621 |
|
|
622 |
0 |
if numa_node.get("memory-mb"): |
623 |
0 |
numa["memory"] = max( |
624 |
|
int(numa_node["memory-mb"] / 1024), 1 |
625 |
|
) |
626 |
|
|
627 |
0 |
if target_flavor["guest-epa"].get("mempage-size"): |
628 |
0 |
extended["mempage-size"] = target_flavor["guest-epa"].get( |
629 |
|
"mempage-size" |
630 |
|
) |
631 |
|
|
632 |
0 |
if ( |
633 |
|
target_flavor["guest-epa"].get("cpu-pinning-policy") |
634 |
|
and not epa_vcpu_set |
635 |
|
): |
636 |
0 |
if ( |
637 |
|
target_flavor["guest-epa"]["cpu-pinning-policy"] |
638 |
|
== "DEDICATED" |
639 |
|
): |
640 |
0 |
if ( |
641 |
|
target_flavor["guest-epa"].get( |
642 |
|
"cpu-thread-pinning-policy" |
643 |
|
) |
644 |
|
and target_flavor["guest-epa"][ |
645 |
|
"cpu-thread-pinning-policy" |
646 |
|
] |
647 |
|
!= "PREFER" |
648 |
|
): |
649 |
0 |
numa["cores"] = max(flavor_data["vcpus"], 1) |
650 |
|
else: |
651 |
0 |
numa["threads"] = max(flavor_data["vcpus"], 1) |
652 |
|
|
653 |
0 |
epa_vcpu_set = True |
654 |
|
|
655 |
0 |
if target_flavor["guest-epa"].get("cpu-quota") and not epa_vcpu_set: |
656 |
0 |
cpuquota = _get_resource_allocation_params( |
657 |
|
target_flavor["guest-epa"].get("cpu-quota") |
658 |
|
) |
659 |
|
|
660 |
0 |
if cpuquota: |
661 |
0 |
extended["cpu-quota"] = cpuquota |
662 |
|
|
663 |
0 |
if target_flavor["guest-epa"].get("mem-quota"): |
664 |
0 |
vduquota = _get_resource_allocation_params( |
665 |
|
target_flavor["guest-epa"].get("mem-quota") |
666 |
|
) |
667 |
|
|
668 |
0 |
if vduquota: |
669 |
0 |
extended["mem-quota"] = vduquota |
670 |
|
|
671 |
0 |
if target_flavor["guest-epa"].get("disk-io-quota"): |
672 |
0 |
diskioquota = _get_resource_allocation_params( |
673 |
|
target_flavor["guest-epa"].get("disk-io-quota") |
674 |
|
) |
675 |
|
|
676 |
0 |
if diskioquota: |
677 |
0 |
extended["disk-io-quota"] = diskioquota |
678 |
|
|
679 |
0 |
if target_flavor["guest-epa"].get("vif-quota"): |
680 |
0 |
vifquota = _get_resource_allocation_params( |
681 |
|
target_flavor["guest-epa"].get("vif-quota") |
682 |
|
) |
683 |
|
|
684 |
0 |
if vifquota: |
685 |
0 |
extended["vif-quota"] = vifquota |
686 |
|
|
687 |
0 |
if numa: |
688 |
0 |
extended["numas"] = [numa] |
689 |
|
|
690 |
0 |
if extended: |
691 |
0 |
flavor_data["extended"] = extended |
692 |
|
|
693 |
0 |
extra_dict = {"find_params": {"flavor_data": flavor_data}} |
694 |
0 |
flavor_data_name = flavor_data.copy() |
695 |
0 |
flavor_data_name["name"] = target_flavor["name"] |
696 |
0 |
extra_dict["params"] = {"flavor_data": flavor_data_name} |
697 |
|
|
698 |
0 |
return extra_dict |
699 |
|
|
700 |
0 |
def _process_affinity_group_params( |
701 |
|
target_affinity_group, vim_info, target_record_id |
702 |
|
): |
703 |
0 |
extra_dict = {} |
704 |
|
|
705 |
0 |
affinity_group_data = { |
706 |
|
"name": target_affinity_group["name"], |
707 |
|
"type": target_affinity_group["type"], |
708 |
|
"scope": target_affinity_group["scope"], |
709 |
|
} |
710 |
|
|
711 |
0 |
if target_affinity_group.get("vim-affinity-group-id"): |
712 |
0 |
affinity_group_data[ |
713 |
|
"vim-affinity-group-id" |
714 |
|
] = target_affinity_group["vim-affinity-group-id"] |
715 |
|
|
716 |
0 |
extra_dict["params"] = { |
717 |
|
"affinity_group_data": affinity_group_data, |
718 |
|
} |
719 |
|
|
720 |
0 |
return extra_dict |
721 |
|
|
722 |
0 |
def _ip_profile_2_ro(ip_profile): |
723 |
0 |
if not ip_profile: |
724 |
0 |
return None |
725 |
|
|
726 |
0 |
ro_ip_profile = { |
727 |
|
"ip_version": "IPv4" |
728 |
|
if "v4" in ip_profile.get("ip-version", "ipv4") |
729 |
|
else "IPv6", |
730 |
|
"subnet_address": ip_profile.get("subnet-address"), |
731 |
|
"gateway_address": ip_profile.get("gateway-address"), |
732 |
|
"dhcp_enabled": ip_profile.get("dhcp-params", {}).get( |
733 |
|
"enabled", False |
734 |
|
), |
735 |
|
"dhcp_start_address": ip_profile.get("dhcp-params", {}).get( |
736 |
|
"start-address", None |
737 |
|
), |
738 |
|
"dhcp_count": ip_profile.get("dhcp-params", {}).get("count", None), |
739 |
|
} |
740 |
|
|
741 |
0 |
if ip_profile.get("dns-server"): |
742 |
0 |
ro_ip_profile["dns_address"] = ";".join( |
743 |
|
[v["address"] for v in ip_profile["dns-server"]] |
744 |
|
) |
745 |
|
|
746 |
0 |
if ip_profile.get("security-group"): |
747 |
0 |
ro_ip_profile["security_group"] = ip_profile["security-group"] |
748 |
|
|
749 |
0 |
return ro_ip_profile |
750 |
|
|
751 |
0 |
def _process_net_params(target_vld, vim_info, target_record_id): |
752 |
|
nonlocal indata |
753 |
0 |
extra_dict = {} |
754 |
|
|
755 |
0 |
if vim_info.get("sdn"): |
756 |
|
# vnf_preffix = "vnfrs:{}".format(vnfr_id) |
757 |
|
# ns_preffix = "nsrs:{}".format(nsr_id) |
758 |
|
# remove the ending ".sdn |
759 |
0 |
vld_target_record_id, _, _ = target_record_id.rpartition(".") |
760 |
0 |
extra_dict["params"] = { |
761 |
|
k: vim_info[k] |
762 |
|
for k in ("sdn-ports", "target_vim", "vlds", "type") |
763 |
|
if vim_info.get(k) |
764 |
|
} |
765 |
|
|
766 |
|
# TODO needed to add target_id in the dependency. |
767 |
0 |
if vim_info.get("target_vim"): |
768 |
0 |
extra_dict["depends_on"] = [ |
769 |
|
vim_info.get("target_vim") + " " + vld_target_record_id |
770 |
|
] |
771 |
|
|
772 |
0 |
return extra_dict |
773 |
|
|
774 |
0 |
if vim_info.get("vim_network_name"): |
775 |
0 |
extra_dict["find_params"] = { |
776 |
|
"filter_dict": {"name": vim_info.get("vim_network_name")} |
777 |
|
} |
778 |
0 |
elif vim_info.get("vim_network_id"): |
779 |
0 |
extra_dict["find_params"] = { |
780 |
|
"filter_dict": {"id": vim_info.get("vim_network_id")} |
781 |
|
} |
782 |
0 |
elif target_vld.get("mgmt-network"): |
783 |
0 |
extra_dict["find_params"] = {"mgmt": True, "name": target_vld["id"]} |
784 |
|
else: |
785 |
|
# create |
786 |
0 |
extra_dict["params"] = { |
787 |
|
"net_name": "{}-{}".format( |
788 |
|
indata["name"][:16], |
789 |
|
target_vld.get("name", target_vld["id"])[:16], |
790 |
|
), |
791 |
|
"ip_profile": _ip_profile_2_ro(vim_info.get("ip_profile")), |
792 |
|
"provider_network_profile": vim_info.get("provider_network"), |
793 |
|
} |
794 |
|
|
795 |
0 |
if not target_vld.get("underlay"): |
796 |
0 |
extra_dict["params"]["net_type"] = "bridge" |
797 |
|
else: |
798 |
0 |
extra_dict["params"]["net_type"] = ( |
799 |
|
"ptp" if target_vld.get("type") == "ELINE" else "data" |
800 |
|
) |
801 |
|
|
802 |
0 |
return extra_dict |
803 |
|
|
804 |
0 |
def _process_vdu_params(target_vdu, vim_info, target_record_id): |
805 |
|
nonlocal vnfr_id |
806 |
|
nonlocal nsr_id |
807 |
|
nonlocal indata |
808 |
|
nonlocal vnfr |
809 |
|
nonlocal vdu2cloud_init |
810 |
|
nonlocal tasks_by_target_record_id |
811 |
|
|
812 |
0 |
vnf_preffix = "vnfrs:{}".format(vnfr_id) |
813 |
0 |
ns_preffix = "nsrs:{}".format(nsr_id) |
814 |
0 |
image_text = ns_preffix + ":image." + target_vdu["ns-image-id"] |
815 |
0 |
flavor_text = ns_preffix + ":flavor." + target_vdu["ns-flavor-id"] |
816 |
0 |
extra_dict = {"depends_on": [image_text, flavor_text]} |
817 |
0 |
net_list = [] |
818 |
|
|
819 |
|
# If the position info is provided for all the interfaces, it will be sorted |
820 |
|
# according to position number ascendingly. |
821 |
0 |
if all( |
822 |
|
i.get("position") + 1 |
823 |
|
for i in target_vdu["interfaces"] |
824 |
|
if i.get("position") is not None |
825 |
|
): |
826 |
0 |
sorted_interfaces = sorted( |
827 |
|
target_vdu["interfaces"], |
828 |
|
key=lambda x: (x.get("position") is None, x.get("position")), |
829 |
|
) |
830 |
0 |
target_vdu["interfaces"] = sorted_interfaces |
831 |
|
|
832 |
|
# If the position info is provided for some interfaces but not all of them, the interfaces |
833 |
|
# which has specific position numbers will be placed and others' positions will not be taken care. |
834 |
|
else: |
835 |
0 |
if any( |
836 |
|
i.get("position") + 1 |
837 |
|
for i in target_vdu["interfaces"] |
838 |
|
if i.get("position") is not None |
839 |
|
): |
840 |
0 |
n = len(target_vdu["interfaces"]) |
841 |
0 |
sorted_interfaces = [-1] * n |
842 |
0 |
k, m = 0, 0 |
843 |
0 |
while k < n: |
844 |
0 |
if target_vdu["interfaces"][k].get("position"): |
845 |
0 |
idx = target_vdu["interfaces"][k]["position"] |
846 |
0 |
sorted_interfaces[idx - 1] = target_vdu["interfaces"][k] |
847 |
0 |
k += 1 |
848 |
0 |
while m < n: |
849 |
0 |
if not target_vdu["interfaces"][m].get("position"): |
850 |
0 |
idy = sorted_interfaces.index(-1) |
851 |
0 |
sorted_interfaces[idy] = target_vdu["interfaces"][m] |
852 |
0 |
m += 1 |
853 |
|
|
854 |
0 |
target_vdu["interfaces"] = sorted_interfaces |
855 |
|
|
856 |
|
# If the position info is not provided for the interfaces, interfaces will be attached |
857 |
|
# according to the order in the VNFD. |
858 |
0 |
for iface_index, interface in enumerate(target_vdu["interfaces"]): |
859 |
0 |
if interface.get("ns-vld-id"): |
860 |
0 |
net_text = ns_preffix + ":vld." + interface["ns-vld-id"] |
861 |
0 |
elif interface.get("vnf-vld-id"): |
862 |
0 |
net_text = vnf_preffix + ":vld." + interface["vnf-vld-id"] |
863 |
|
else: |
864 |
0 |
self.logger.error( |
865 |
|
"Interface {} from vdu {} not connected to any vld".format( |
866 |
|
iface_index, target_vdu["vdu-name"] |
867 |
|
) |
868 |
|
) |
869 |
|
|
870 |
0 |
continue # interface not connected to any vld |
871 |
|
|
872 |
0 |
extra_dict["depends_on"].append(net_text) |
873 |
|
|
874 |
0 |
if "port-security-enabled" in interface: |
875 |
0 |
interface["port_security"] = interface.pop( |
876 |
|
"port-security-enabled" |
877 |
|
) |
878 |
|
|
879 |
0 |
if "port-security-disable-strategy" in interface: |
880 |
0 |
interface["port_security_disable_strategy"] = interface.pop( |
881 |
|
"port-security-disable-strategy" |
882 |
|
) |
883 |
|
|
884 |
0 |
net_item = { |
885 |
|
x: v |
886 |
|
for x, v in interface.items() |
887 |
|
if x |
888 |
|
in ( |
889 |
|
"name", |
890 |
|
"vpci", |
891 |
|
"port_security", |
892 |
|
"port_security_disable_strategy", |
893 |
|
"floating_ip", |
894 |
|
) |
895 |
|
} |
896 |
0 |
net_item["net_id"] = "TASK-" + net_text |
897 |
0 |
net_item["type"] = "virtual" |
898 |
|
|
899 |
|
# TODO mac_address: used for SR-IOV ifaces #TODO for other types |
900 |
|
# TODO floating_ip: True/False (or it can be None) |
901 |
0 |
if interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"): |
902 |
|
# mark the net create task as type data |
903 |
0 |
if deep_get( |
904 |
|
tasks_by_target_record_id, net_text, "params", "net_type" |
905 |
|
): |
906 |
0 |
tasks_by_target_record_id[net_text]["params"][ |
907 |
|
"net_type" |
908 |
|
] = "data" |
909 |
|
|
910 |
0 |
net_item["use"] = "data" |
911 |
0 |
net_item["model"] = interface["type"] |
912 |
0 |
net_item["type"] = interface["type"] |
913 |
0 |
elif ( |
914 |
|
interface.get("type") == "OM-MGMT" |
915 |
|
or interface.get("mgmt-interface") |
916 |
|
or interface.get("mgmt-vnf") |
917 |
|
): |
918 |
0 |
net_item["use"] = "mgmt" |
919 |
|
else: |
920 |
|
# if interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"): |
921 |
0 |
net_item["use"] = "bridge" |
922 |
0 |
net_item["model"] = interface.get("type") |
923 |
|
|
924 |
0 |
if interface.get("ip-address"): |
925 |
0 |
net_item["ip_address"] = interface["ip-address"] |
926 |
|
|
927 |
0 |
if interface.get("mac-address"): |
928 |
0 |
net_item["mac_address"] = interface["mac-address"] |
929 |
|
|
930 |
0 |
net_list.append(net_item) |
931 |
|
|
932 |
0 |
if interface.get("mgmt-vnf"): |
933 |
0 |
extra_dict["mgmt_vnf_interface"] = iface_index |
934 |
0 |
elif interface.get("mgmt-interface"): |
935 |
0 |
extra_dict["mgmt_vdu_interface"] = iface_index |
936 |
|
|
937 |
|
# cloud config |
938 |
0 |
cloud_config = {} |
939 |
|
|
940 |
0 |
if target_vdu.get("cloud-init"): |
941 |
0 |
if target_vdu["cloud-init"] not in vdu2cloud_init: |
942 |
0 |
vdu2cloud_init[target_vdu["cloud-init"]] = self._get_cloud_init( |
943 |
|
target_vdu["cloud-init"] |
944 |
|
) |
945 |
|
|
946 |
0 |
cloud_content_ = vdu2cloud_init[target_vdu["cloud-init"]] |
947 |
0 |
cloud_config["user-data"] = self._parse_jinja2( |
948 |
|
cloud_content_, |
949 |
|
target_vdu.get("additionalParams"), |
950 |
|
target_vdu["cloud-init"], |
951 |
|
) |
952 |
|
|
953 |
0 |
if target_vdu.get("boot-data-drive"): |
954 |
0 |
cloud_config["boot-data-drive"] = target_vdu.get("boot-data-drive") |
955 |
|
|
956 |
0 |
ssh_keys = [] |
957 |
|
|
958 |
0 |
if target_vdu.get("ssh-keys"): |
959 |
0 |
ssh_keys += target_vdu.get("ssh-keys") |
960 |
|
|
961 |
0 |
if target_vdu.get("ssh-access-required"): |
962 |
0 |
ssh_keys.append(ro_nsr_public_key) |
963 |
|
|
964 |
0 |
if ssh_keys: |
965 |
0 |
cloud_config["key-pairs"] = ssh_keys |
966 |
|
|
967 |
0 |
persistent_root_disk = {} |
968 |
0 |
disk_list = [] |
969 |
0 |
vnfd_id = vnfr["vnfd-id"] |
970 |
0 |
vnfd = self.db.get_one("vnfds", {"_id": vnfd_id}) |
971 |
0 |
for vdu in vnfd.get("vdu", ()): |
972 |
0 |
if vdu["name"] == target_vdu["vdu-name"]: |
973 |
0 |
for vsd in vnfd.get("virtual-storage-desc", ()): |
974 |
0 |
if ( |
975 |
|
vsd.get("id") |
976 |
|
== vdu.get("virtual-storage-desc", [[]])[0] |
977 |
|
): |
978 |
0 |
root_disk = vsd |
979 |
0 |
if root_disk.get( |
980 |
|
"type-of-storage" |
981 |
|
) == "persistent-storage:persistent-storage" and root_disk.get( |
982 |
|
"size-of-storage" |
983 |
|
): |
984 |
0 |
persistent_root_disk[vsd["id"]] = { |
985 |
|
"image_id": vdu.get("sw-image-desc"), |
986 |
|
"size": root_disk["size-of-storage"], |
987 |
|
} |
988 |
0 |
disk_list.append(persistent_root_disk[vsd["id"]]) |
989 |
|
|
990 |
0 |
if target_vdu.get("virtual-storages"): |
991 |
0 |
for disk in target_vdu["virtual-storages"]: |
992 |
0 |
if ( |
993 |
|
disk.get("type-of-storage") |
994 |
|
== "persistent-storage:persistent-storage" |
995 |
|
and disk["id"] not in persistent_root_disk.keys() |
996 |
|
): |
997 |
0 |
disk_list.append({"size": disk["size-of-storage"]}) |
998 |
|
|
999 |
0 |
affinity_group_list = [] |
1000 |
|
|
1001 |
0 |
if target_vdu.get("affinity-or-anti-affinity-group-id"): |
1002 |
0 |
affinity_group = {} |
1003 |
0 |
for affinity_group_id in target_vdu[ |
1004 |
|
"affinity-or-anti-affinity-group-id" |
1005 |
|
]: |
1006 |
0 |
affinity_group_text = ( |
1007 |
|
ns_preffix |
1008 |
|
+ ":affinity-or-anti-affinity-group." |
1009 |
|
+ affinity_group_id |
1010 |
|
) |
1011 |
|
|
1012 |
0 |
extra_dict["depends_on"].append(affinity_group_text) |
1013 |
0 |
affinity_group["affinity_group_id"] = ( |
1014 |
|
"TASK-" + affinity_group_text |
1015 |
|
) |
1016 |
0 |
affinity_group_list.append(affinity_group) |
1017 |
|
|
1018 |
0 |
extra_dict["params"] = { |
1019 |
|
"name": "{}-{}-{}-{}".format( |
1020 |
|
indata["name"][:16], |
1021 |
|
vnfr["member-vnf-index-ref"][:16], |
1022 |
|
target_vdu["vdu-name"][:32], |
1023 |
|
target_vdu.get("count-index") or 0, |
1024 |
|
), |
1025 |
|
"description": target_vdu["vdu-name"], |
1026 |
|
"start": True, |
1027 |
|
"image_id": "TASK-" + image_text, |
1028 |
|
"flavor_id": "TASK-" + flavor_text, |
1029 |
|
"affinity_group_list": affinity_group_list, |
1030 |
|
"net_list": net_list, |
1031 |
|
"cloud_config": cloud_config or None, |
1032 |
|
"disk_list": disk_list, |
1033 |
|
"availability_zone_index": None, # TODO |
1034 |
|
"availability_zone_list": None, # TODO |
1035 |
|
} |
1036 |
|
|
1037 |
0 |
return extra_dict |
1038 |
|
|
1039 |
0 |
def _process_items( |
1040 |
|
target_list, |
1041 |
|
existing_list, |
1042 |
|
db_record, |
1043 |
|
db_update, |
1044 |
|
db_path, |
1045 |
|
item, |
1046 |
|
process_params, |
1047 |
|
): |
1048 |
|
nonlocal db_new_tasks |
1049 |
|
nonlocal tasks_by_target_record_id |
1050 |
|
nonlocal task_index |
1051 |
|
|
1052 |
|
# ensure all the target_list elements has an "id". If not assign the index as id |
1053 |
0 |
for target_index, tl in enumerate(target_list): |
1054 |
0 |
if tl and not tl.get("id"): |
1055 |
0 |
tl["id"] = str(target_index) |
1056 |
|
|
1057 |
|
# step 1 items (networks,vdus,...) to be deleted/updated |
1058 |
0 |
for item_index, existing_item in enumerate(existing_list): |
1059 |
0 |
target_item = next( |
1060 |
|
(t for t in target_list if t["id"] == existing_item["id"]), None |
1061 |
|
) |
1062 |
|
|
1063 |
0 |
for target_vim, existing_viminfo in existing_item.get( |
1064 |
|
"vim_info", {} |
1065 |
|
).items(): |
1066 |
0 |
if existing_viminfo is None: |
1067 |
0 |
continue |
1068 |
|
|
1069 |
0 |
if target_item: |
1070 |
0 |
target_viminfo = target_item.get("vim_info", {}).get( |
1071 |
|
target_vim |
1072 |
|
) |
1073 |
|
else: |
1074 |
0 |
target_viminfo = None |
1075 |
|
|
1076 |
0 |
if target_viminfo is None: |
1077 |
|
# must be deleted |
1078 |
0 |
self._assign_vim(target_vim) |
1079 |
0 |
target_record_id = "{}.{}".format( |
1080 |
|
db_record, existing_item["id"] |
1081 |
|
) |
1082 |
0 |
item_ = item |
1083 |
|
|
1084 |
0 |
if target_vim.startswith("sdn") or target_vim.startswith( |
1085 |
|
"wim" |
1086 |
|
): |
1087 |
|
# item must be sdn-net instead of net if target_vim is a sdn |
1088 |
0 |
item_ = "sdn_net" |
1089 |
0 |
target_record_id += ".sdn" |
1090 |
|
|
1091 |
0 |
task = _create_task( |
1092 |
|
target_vim, |
1093 |
|
item_, |
1094 |
|
"DELETE", |
1095 |
|
target_record="{}.{}.vim_info.{}".format( |
1096 |
|
db_record, item_index, target_vim |
1097 |
|
), |
1098 |
|
target_record_id=target_record_id, |
1099 |
|
) |
1100 |
0 |
tasks_by_target_record_id[target_record_id] = task |
1101 |
0 |
db_new_tasks.append(task) |
1102 |
|
# TODO delete |
1103 |
|
# TODO check one by one the vims to be created/deleted |
1104 |
|
|
1105 |
|
# step 2 items (networks,vdus,...) to be created |
1106 |
0 |
for target_item in target_list: |
1107 |
0 |
item_index = -1 |
1108 |
|
|
1109 |
0 |
for item_index, existing_item in enumerate(existing_list): |
1110 |
0 |
if existing_item["id"] == target_item["id"]: |
1111 |
0 |
break |
1112 |
|
else: |
1113 |
0 |
item_index += 1 |
1114 |
0 |
db_update[db_path + ".{}".format(item_index)] = target_item |
1115 |
0 |
existing_list.append(target_item) |
1116 |
0 |
existing_item = None |
1117 |
|
|
1118 |
0 |
for target_vim, target_viminfo in target_item.get( |
1119 |
|
"vim_info", {} |
1120 |
|
).items(): |
1121 |
0 |
existing_viminfo = None |
1122 |
|
|
1123 |
0 |
if existing_item: |
1124 |
0 |
existing_viminfo = existing_item.get("vim_info", {}).get( |
1125 |
|
target_vim |
1126 |
|
) |
1127 |
|
|
1128 |
|
# TODO check if different. Delete and create??? |
1129 |
|
# TODO delete if not exist |
1130 |
0 |
if existing_viminfo is not None: |
1131 |
0 |
continue |
1132 |
|
|
1133 |
0 |
target_record_id = "{}.{}".format(db_record, target_item["id"]) |
1134 |
0 |
item_ = item |
1135 |
|
|
1136 |
0 |
if target_vim.startswith("sdn") or target_vim.startswith("wim"): |
1137 |
|
# item must be sdn-net instead of net if target_vim is a sdn |
1138 |
0 |
item_ = "sdn_net" |
1139 |
0 |
target_record_id += ".sdn" |
1140 |
|
|
1141 |
0 |
extra_dict = process_params( |
1142 |
|
target_item, target_viminfo, target_record_id |
1143 |
|
) |
1144 |
0 |
self._assign_vim(target_vim) |
1145 |
0 |
task = _create_task( |
1146 |
|
target_vim, |
1147 |
|
item_, |
1148 |
|
"CREATE", |
1149 |
|
target_record="{}.{}.vim_info.{}".format( |
1150 |
|
db_record, item_index, target_vim |
1151 |
|
), |
1152 |
|
target_record_id=target_record_id, |
1153 |
|
extra_dict=extra_dict, |
1154 |
|
) |
1155 |
0 |
tasks_by_target_record_id[target_record_id] = task |
1156 |
0 |
db_new_tasks.append(task) |
1157 |
|
|
1158 |
0 |
if target_item.get("common_id"): |
1159 |
0 |
task["common_id"] = target_item["common_id"] |
1160 |
|
|
1161 |
0 |
db_update[db_path + ".{}".format(item_index)] = target_item |
1162 |
|
|
1163 |
0 |
def _process_action(indata): |
1164 |
|
nonlocal db_new_tasks |
1165 |
|
nonlocal task_index |
1166 |
|
nonlocal db_vnfrs |
1167 |
|
nonlocal db_ro_nsr |
1168 |
|
|
1169 |
0 |
if indata["action"]["action"] == "inject_ssh_key": |
1170 |
0 |
key = indata["action"].get("key") |
1171 |
0 |
user = indata["action"].get("user") |
1172 |
0 |
password = indata["action"].get("password") |
1173 |
|
|
1174 |
0 |
for vnf in indata.get("vnf", ()): |
1175 |
0 |
if vnf["_id"] not in db_vnfrs: |
1176 |
0 |
raise NsException("Invalid vnf={}".format(vnf["_id"])) |
1177 |
|
|
1178 |
0 |
db_vnfr = db_vnfrs[vnf["_id"]] |
1179 |
|
|
1180 |
0 |
for target_vdu in vnf.get("vdur", ()): |
1181 |
0 |
vdu_index, vdur = next( |
1182 |
|
( |
1183 |
|
i_v |
1184 |
|
for i_v in enumerate(db_vnfr["vdur"]) |
1185 |
|
if i_v[1]["id"] == target_vdu["id"] |
1186 |
|
), |
1187 |
|
(None, None), |
1188 |
|
) |
1189 |
|
|
1190 |
0 |
if not vdur: |
1191 |
0 |
raise NsException( |
1192 |
|
"Invalid vdu vnf={}.{}".format( |
1193 |
|
vnf["_id"], target_vdu["id"] |
1194 |
|
) |
1195 |
|
) |
1196 |
|
|
1197 |
0 |
target_vim, vim_info = next( |
1198 |
|
k_v for k_v in vdur["vim_info"].items() |
1199 |
|
) |
1200 |
0 |
self._assign_vim(target_vim) |
1201 |
0 |
target_record = "vnfrs:{}:vdur.{}.ssh_keys".format( |
1202 |
|
vnf["_id"], vdu_index |
1203 |
|
) |
1204 |
0 |
extra_dict = { |
1205 |
|
"depends_on": [ |
1206 |
|
"vnfrs:{}:vdur.{}".format(vnf["_id"], vdur["id"]) |
1207 |
|
], |
1208 |
|
"params": { |
1209 |
|
"ip_address": vdur.get("ip-address"), |
1210 |
|
"user": user, |
1211 |
|
"key": key, |
1212 |
|
"password": password, |
1213 |
|
"private_key": db_ro_nsr["private_key"], |
1214 |
|
"salt": db_ro_nsr["_id"], |
1215 |
|
"schema_version": db_ro_nsr["_admin"][ |
1216 |
|
"schema_version" |
1217 |
|
], |
1218 |
|
}, |
1219 |
|
} |
1220 |
0 |
task = _create_task( |
1221 |
|
target_vim, |
1222 |
|
"vdu", |
1223 |
|
"EXEC", |
1224 |
|
target_record=target_record, |
1225 |
|
target_record_id=None, |
1226 |
|
extra_dict=extra_dict, |
1227 |
|
) |
1228 |
0 |
db_new_tasks.append(task) |
1229 |
|
|
1230 |
0 |
with self.write_lock: |
1231 |
0 |
if indata.get("action"): |
1232 |
0 |
_process_action(indata) |
1233 |
|
else: |
1234 |
|
# compute network differences |
1235 |
|
# NS.vld |
1236 |
0 |
step = "process NS VLDs" |
1237 |
0 |
_process_items( |
1238 |
|
target_list=indata["ns"]["vld"] or [], |
1239 |
|
existing_list=db_nsr.get("vld") or [], |
1240 |
|
db_record="nsrs:{}:vld".format(nsr_id), |
1241 |
|
db_update=db_nsr_update, |
1242 |
|
db_path="vld", |
1243 |
|
item="net", |
1244 |
|
process_params=_process_net_params, |
1245 |
|
) |
1246 |
|
|
1247 |
0 |
step = "process NS images" |
1248 |
0 |
_process_items( |
1249 |
|
target_list=indata.get("image") or [], |
1250 |
|
existing_list=db_nsr.get("image") or [], |
1251 |
|
db_record="nsrs:{}:image".format(nsr_id), |
1252 |
|
db_update=db_nsr_update, |
1253 |
|
db_path="image", |
1254 |
|
item="image", |
1255 |
|
process_params=_process_image_params, |
1256 |
|
) |
1257 |
|
|
1258 |
0 |
step = "process NS flavors" |
1259 |
0 |
_process_items( |
1260 |
|
target_list=indata.get("flavor") or [], |
1261 |
|
existing_list=db_nsr.get("flavor") or [], |
1262 |
|
db_record="nsrs:{}:flavor".format(nsr_id), |
1263 |
|
db_update=db_nsr_update, |
1264 |
|
db_path="flavor", |
1265 |
|
item="flavor", |
1266 |
|
process_params=_process_flavor_params, |
1267 |
|
) |
1268 |
|
|
1269 |
0 |
step = "process NS Affinity Groups" |
1270 |
0 |
_process_items( |
1271 |
|
target_list=indata.get("affinity-or-anti-affinity-group") or [], |
1272 |
|
existing_list=db_nsr.get("affinity-or-anti-affinity-group") |
1273 |
|
or [], |
1274 |
|
db_record="nsrs:{}:affinity-or-anti-affinity-group".format( |
1275 |
|
nsr_id |
1276 |
|
), |
1277 |
|
db_update=db_nsr_update, |
1278 |
|
db_path="affinity-or-anti-affinity-group", |
1279 |
|
item="affinity-or-anti-affinity-group", |
1280 |
|
process_params=_process_affinity_group_params, |
1281 |
|
) |
1282 |
|
|
1283 |
|
# VNF.vld |
1284 |
0 |
for vnfr_id, vnfr in db_vnfrs.items(): |
1285 |
|
# vnfr_id need to be set as global variable for among others nested method _process_vdu_params |
1286 |
0 |
step = "process VNF={} VLDs".format(vnfr_id) |
1287 |
0 |
target_vnf = next( |
1288 |
|
( |
1289 |
|
vnf |
1290 |
|
for vnf in indata.get("vnf", ()) |
1291 |
|
if vnf["_id"] == vnfr_id |
1292 |
|
), |
1293 |
|
None, |
1294 |
|
) |
1295 |
0 |
target_list = target_vnf.get("vld") if target_vnf else None |
1296 |
0 |
_process_items( |
1297 |
|
target_list=target_list or [], |
1298 |
|
existing_list=vnfr.get("vld") or [], |
1299 |
|
db_record="vnfrs:{}:vld".format(vnfr_id), |
1300 |
|
db_update=db_vnfrs_update[vnfr["_id"]], |
1301 |
|
db_path="vld", |
1302 |
|
item="net", |
1303 |
|
process_params=_process_net_params, |
1304 |
|
) |
1305 |
|
|
1306 |
0 |
target_list = target_vnf.get("vdur") if target_vnf else None |
1307 |
0 |
step = "process VNF={} VDUs".format(vnfr_id) |
1308 |
0 |
_process_items( |
1309 |
|
target_list=target_list or [], |
1310 |
|
existing_list=vnfr.get("vdur") or [], |
1311 |
|
db_record="vnfrs:{}:vdur".format(vnfr_id), |
1312 |
|
db_update=db_vnfrs_update[vnfr["_id"]], |
1313 |
|
db_path="vdur", |
1314 |
|
item="vdu", |
1315 |
|
process_params=_process_vdu_params, |
1316 |
|
) |
1317 |
|
|
1318 |
0 |
for db_task in db_new_tasks: |
1319 |
0 |
step = "Updating database, Appending tasks to ro_tasks" |
1320 |
0 |
target_id = db_task.pop("target_id") |
1321 |
0 |
common_id = db_task.get("common_id") |
1322 |
|
|
1323 |
0 |
if common_id: |
1324 |
0 |
if self.db.set_one( |
1325 |
|
"ro_tasks", |
1326 |
|
q_filter={ |
1327 |
|
"target_id": target_id, |
1328 |
|
"tasks.common_id": common_id, |
1329 |
|
}, |
1330 |
|
update_dict={"to_check_at": now, "modified_at": now}, |
1331 |
|
push={"tasks": db_task}, |
1332 |
|
fail_on_empty=False, |
1333 |
|
): |
1334 |
0 |
continue |
1335 |
|
|
1336 |
0 |
if not self.db.set_one( |
1337 |
|
"ro_tasks", |
1338 |
|
q_filter={ |
1339 |
|
"target_id": target_id, |
1340 |
|
"tasks.target_record": db_task["target_record"], |
1341 |
|
}, |
1342 |
|
update_dict={"to_check_at": now, "modified_at": now}, |
1343 |
|
push={"tasks": db_task}, |
1344 |
|
fail_on_empty=False, |
1345 |
|
): |
1346 |
|
# Create a ro_task |
1347 |
0 |
step = "Updating database, Creating ro_tasks" |
1348 |
0 |
db_ro_task = _create_ro_task(target_id, db_task) |
1349 |
0 |
nb_ro_tasks += 1 |
1350 |
0 |
self.db.create("ro_tasks", db_ro_task) |
1351 |
|
|
1352 |
0 |
step = "Updating database, nsrs" |
1353 |
0 |
if db_nsr_update: |
1354 |
0 |
self.db.set_one("nsrs", {"_id": nsr_id}, db_nsr_update) |
1355 |
|
|
1356 |
0 |
for vnfr_id, db_vnfr_update in db_vnfrs_update.items(): |
1357 |
0 |
if db_vnfr_update: |
1358 |
0 |
step = "Updating database, vnfrs={}".format(vnfr_id) |
1359 |
0 |
self.db.set_one("vnfrs", {"_id": vnfr_id}, db_vnfr_update) |
1360 |
|
|
1361 |
0 |
self.logger.debug( |
1362 |
|
logging_text |
1363 |
|
+ "Exit. Created {} ro_tasks; {} tasks".format( |
1364 |
|
nb_ro_tasks, len(db_new_tasks) |
1365 |
|
) |
1366 |
|
) |
1367 |
|
|
1368 |
0 |
return ( |
1369 |
|
{"status": "ok", "nsr_id": nsr_id, "action_id": action_id}, |
1370 |
|
action_id, |
1371 |
|
True, |
1372 |
|
) |
1373 |
0 |
except Exception as e: |
1374 |
0 |
if isinstance(e, (DbException, NsException)): |
1375 |
0 |
self.logger.error( |
1376 |
|
logging_text + "Exit Exception while '{}': {}".format(step, e) |
1377 |
|
) |
1378 |
|
else: |
1379 |
0 |
e = traceback_format_exc() |
1380 |
0 |
self.logger.critical( |
1381 |
|
logging_text + "Exit Exception while '{}': {}".format(step, e), |
1382 |
|
exc_info=True, |
1383 |
|
) |
1384 |
|
|
1385 |
0 |
raise NsException(e) |
1386 |
|
|
1387 |
1 |
def delete(self, session, indata, version, nsr_id, *args, **kwargs): |
1388 |
0 |
self.logger.debug("ns.delete version={} nsr_id={}".format(version, nsr_id)) |
1389 |
|
# self.db.del_list({"_id": ro_task["_id"], "tasks.nsr_id.ne": nsr_id}) |
1390 |
|
|
1391 |
0 |
with self.write_lock: |
1392 |
0 |
try: |
1393 |
0 |
NsWorker.delete_db_tasks(self.db, nsr_id, None) |
1394 |
0 |
except NsWorkerException as e: |
1395 |
0 |
raise NsException(e) |
1396 |
|
|
1397 |
0 |
return None, None, True |
1398 |
|
|
1399 |
1 |
def status(self, session, indata, version, nsr_id, action_id, *args, **kwargs): |
1400 |
|
# self.logger.debug("ns.status version={} nsr_id={}, action_id={} indata={}" |
1401 |
|
# .format(version, nsr_id, action_id, indata)) |
1402 |
0 |
task_list = [] |
1403 |
0 |
done = 0 |
1404 |
0 |
total = 0 |
1405 |
0 |
ro_tasks = self.db.get_list("ro_tasks", {"tasks.action_id": action_id}) |
1406 |
0 |
global_status = "DONE" |
1407 |
0 |
details = [] |
1408 |
|
|
1409 |
0 |
for ro_task in ro_tasks: |
1410 |
0 |
for task in ro_task["tasks"]: |
1411 |
0 |
if task and task["action_id"] == action_id: |
1412 |
0 |
task_list.append(task) |
1413 |
0 |
total += 1 |
1414 |
|
|
1415 |
0 |
if task["status"] == "FAILED": |
1416 |
0 |
global_status = "FAILED" |
1417 |
0 |
error_text = "Error at {} {}: {}".format( |
1418 |
|
task["action"].lower(), |
1419 |
|
task["item"], |
1420 |
|
ro_task["vim_info"].get("vim_details") or "unknown", |
1421 |
|
) |
1422 |
0 |
details.append(error_text) |
1423 |
0 |
elif task["status"] in ("SCHEDULED", "BUILD"): |
1424 |
0 |
if global_status != "FAILED": |
1425 |
0 |
global_status = "BUILD" |
1426 |
|
else: |
1427 |
0 |
done += 1 |
1428 |
|
|
1429 |
0 |
return_data = { |
1430 |
|
"status": global_status, |
1431 |
|
"details": ". ".join(details) |
1432 |
|
if details |
1433 |
|
else "progress {}/{}".format(done, total), |
1434 |
|
"nsr_id": nsr_id, |
1435 |
|
"action_id": action_id, |
1436 |
|
"tasks": task_list, |
1437 |
|
} |
1438 |
|
|
1439 |
0 |
return return_data, None, True |
1440 |
|
|
1441 |
1 |
def cancel(self, session, indata, version, nsr_id, action_id, *args, **kwargs): |
1442 |
0 |
print( |
1443 |
|
"ns.cancel session={} indata={} version={} nsr_id={}, action_id={}".format( |
1444 |
|
session, indata, version, nsr_id, action_id |
1445 |
|
) |
1446 |
|
) |
1447 |
|
|
1448 |
0 |
return None, None, True |
1449 |
|
|
1450 |
1 |
def get_deploy(self, session, indata, version, nsr_id, action_id, *args, **kwargs): |
1451 |
0 |
nsrs = self.db.get_list("nsrs", {}) |
1452 |
0 |
return_data = [] |
1453 |
|
|
1454 |
0 |
for ns in nsrs: |
1455 |
0 |
return_data.append({"_id": ns["_id"], "name": ns["name"]}) |
1456 |
|
|
1457 |
0 |
return return_data, None, True |
1458 |
|
|
1459 |
1 |
def get_actions(self, session, indata, version, nsr_id, action_id, *args, **kwargs): |
1460 |
0 |
ro_tasks = self.db.get_list("ro_tasks", {"tasks.nsr_id": nsr_id}) |
1461 |
0 |
return_data = [] |
1462 |
|
|
1463 |
0 |
for ro_task in ro_tasks: |
1464 |
0 |
for task in ro_task["tasks"]: |
1465 |
0 |
if task["action_id"] not in return_data: |
1466 |
0 |
return_data.append(task["action_id"]) |
1467 |
|
|
1468 |
0 |
return return_data, None, True |