fix 1385: enhance unload vim from ns_thread when not needed
[osm/RO.git] / NG-RO / osm_ng_ro / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2020 Telefonica Investigacion y Desarrollo, S.A.U.
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
14 # implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 ##
18
19 import logging
20 # import yaml
21 from traceback import format_exc as traceback_format_exc
22 from osm_ng_ro.ns_thread import NsWorker, NsWorkerException, deep_get
23 from osm_ng_ro.validation import validate_input, deploy_schema
24 from osm_common import dbmongo, dbmemory, fslocal, fsmongo, msglocal, msgkafka, version as common_version
25 from osm_common.dbbase import DbException
26 from osm_common.fsbase import FsException
27 from osm_common.msgbase import MsgException
28 from http import HTTPStatus
29 from uuid import uuid4
30 from threading import Lock
31 from random import choice as random_choice
32 from time import time
33 from jinja2 import Environment, TemplateError, TemplateNotFound, StrictUndefined, UndefinedError
34 from cryptography.hazmat.primitives import serialization as crypto_serialization
35 from cryptography.hazmat.primitives.asymmetric import rsa
36 from cryptography.hazmat.backends import default_backend as crypto_default_backend
37
38 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
39 min_common_version = "0.1.16"
40
41
42 class NsException(Exception):
43
44 def __init__(self, message, http_code=HTTPStatus.BAD_REQUEST):
45 self.http_code = http_code
46 super(Exception, self).__init__(message)
47
48
49 def get_process_id():
50 """
51 Obtain a unique ID for this process. If running from inside docker, it will get docker ID. If not it
52 will provide a random one
53 :return: Obtained ID
54 """
55 # Try getting docker id. If fails, get pid
56 try:
57 with open("/proc/self/cgroup", "r") as f:
58 text_id_ = f.readline()
59 _, _, text_id = text_id_.rpartition("/")
60 text_id = text_id.replace("\n", "")[:12]
61 if text_id:
62 return text_id
63 except Exception:
64 pass
65 # Return a random id
66 return "".join(random_choice("0123456789abcdef") for _ in range(12))
67
68
69 def versiontuple(v):
70 """utility for compare dot separate versions. Fills with zeros to proper number comparison"""
71 filled = []
72 for point in v.split("."):
73 filled.append(point.zfill(8))
74 return tuple(filled)
75
76
77 class Ns(object):
78
79 def __init__(self):
80 self.db = None
81 self.fs = None
82 self.msg = None
83 self.config = None
84 # self.operations = None
85 self.logger = None
86 # ^ Getting logger inside method self.start because parent logger (ro) is not available yet.
87 # If done now it will not be linked to parent not getting its handler and level
88 self.map_topic = {}
89 self.write_lock = None
90 self.vims_assigned = {}
91 self.next_worker = 0
92 self.plugins = {}
93 self.workers = []
94
95 def init_db(self, target_version):
96 pass
97
98 def start(self, config):
99 """
100 Connect to database, filesystem storage, and messaging
101 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
102 :param config: Configuration of db, storage, etc
103 :return: None
104 """
105 self.config = config
106 self.config["process_id"] = get_process_id() # used for HA identity
107 self.logger = logging.getLogger("ro.ns")
108 # check right version of common
109 if versiontuple(common_version) < versiontuple(min_common_version):
110 raise NsException("Not compatible osm/common version '{}'. Needed '{}' or higher".format(
111 common_version, min_common_version))
112
113 try:
114 if not self.db:
115 if config["database"]["driver"] == "mongo":
116 self.db = dbmongo.DbMongo()
117 self.db.db_connect(config["database"])
118 elif config["database"]["driver"] == "memory":
119 self.db = dbmemory.DbMemory()
120 self.db.db_connect(config["database"])
121 else:
122 raise NsException("Invalid configuration param '{}' at '[database]':'driver'".format(
123 config["database"]["driver"]))
124 if not self.fs:
125 if config["storage"]["driver"] == "local":
126 self.fs = fslocal.FsLocal()
127 self.fs.fs_connect(config["storage"])
128 elif config["storage"]["driver"] == "mongo":
129 self.fs = fsmongo.FsMongo()
130 self.fs.fs_connect(config["storage"])
131 elif config["storage"]["driver"] is None:
132 pass
133 else:
134 raise NsException("Invalid configuration param '{}' at '[storage]':'driver'".format(
135 config["storage"]["driver"]))
136 if not self.msg:
137 if config["message"]["driver"] == "local":
138 self.msg = msglocal.MsgLocal()
139 self.msg.connect(config["message"])
140 elif config["message"]["driver"] == "kafka":
141 self.msg = msgkafka.MsgKafka()
142 self.msg.connect(config["message"])
143 else:
144 raise NsException("Invalid configuration param '{}' at '[message]':'driver'".format(
145 config["message"]["driver"]))
146
147 # TODO load workers to deal with exising database tasks
148
149 self.write_lock = Lock()
150 except (DbException, FsException, MsgException) as e:
151 raise NsException(str(e), http_code=e.http_code)
152
153 def get_assigned_vims(self):
154 return list(self.vims_assigned.keys())
155
156 def stop(self):
157 try:
158 if self.db:
159 self.db.db_disconnect()
160 if self.fs:
161 self.fs.fs_disconnect()
162 if self.msg:
163 self.msg.disconnect()
164 self.write_lock = None
165 except (DbException, FsException, MsgException) as e:
166 raise NsException(str(e), http_code=e.http_code)
167 for worker in self.workers:
168 worker.insert_task(("terminate",))
169
170 def _create_worker(self):
171 """
172 Look for a worker thread in idle status. If not found it creates one unless the number of threads reach the
173 limit of 'server.ns_threads' configuration. If reached, it just assigns one existing thread
174 return the index of the assigned worker thread. Worker threads are storead at self.workers
175 """
176 # Look for a thread in idle status
177 worker_id = next((i for i in range(len(self.workers)) if self.workers[i] and self.workers[i].idle), None)
178 if worker_id is not None:
179 # unset idle status to avoid race conditions
180 self.workers[worker_id].idle = False
181 else:
182 worker_id = len(self.workers)
183 if worker_id < self.config["global"]["server.ns_threads"]:
184 # create a new worker
185 self.workers.append(NsWorker(worker_id, self.config, self.plugins, self.db))
186 self.workers[worker_id].start()
187 else:
188 # reached maximum number of threads, assign VIM to an existing one
189 worker_id = self.next_worker
190 self.next_worker = (self.next_worker + 1) % self.config["global"]["server.ns_threads"]
191 return worker_id
192
193 def assign_vim(self, target_id):
194 with self.write_lock:
195 return self._assign_vim(target_id)
196
197 def _assign_vim(self, target_id):
198 if target_id not in self.vims_assigned:
199 worker_id = self.vims_assigned[target_id] = self._create_worker()
200 self.workers[worker_id].insert_task(("load_vim", target_id))
201
202 def reload_vim(self, target_id):
203 # send reload_vim to the thread working with this VIM and inform all that a VIM has been changed,
204 # this is because database VIM information is cached for threads working with SDN
205 with self.write_lock:
206 for worker in self.workers:
207 if worker and not worker.idle:
208 worker.insert_task(("reload_vim", target_id))
209
210 def unload_vim(self, target_id):
211 with self.write_lock:
212 return self._unload_vim(target_id)
213
214 def _unload_vim(self, target_id):
215 if target_id in self.vims_assigned:
216 worker_id = self.vims_assigned[target_id]
217 self.workers[worker_id].insert_task(("unload_vim", target_id))
218 del self.vims_assigned[target_id]
219
220 def check_vim(self, target_id):
221 with self.write_lock:
222 if target_id in self.vims_assigned:
223 worker_id = self.vims_assigned[target_id]
224 else:
225 worker_id = self._create_worker()
226
227 worker = self.workers[worker_id]
228 worker.insert_task(("check_vim", target_id))
229
230 def unload_unused_vims(self):
231 with self.write_lock:
232 vims_to_unload = []
233 for target_id in self.vims_assigned:
234 if not self.db.get_one("ro_tasks",
235 q_filter={"target_id": target_id,
236 "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED']},
237 fail_on_empty=False):
238 vims_to_unload.append(target_id)
239 for target_id in vims_to_unload:
240 self._unload_vim(target_id)
241
242 def _get_cloud_init(self, where):
243 """
244 Not used as cloud init content is provided in the http body. This method reads cloud init from a file
245 :param where: can be 'vnfr_id:file:file_name' or 'vnfr_id:vdu:vdu_idex'
246 :return:
247 """
248 vnfd_id, _, other = where.partition(":")
249 _type, _, name = other.partition(":")
250 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
251 if _type == "file":
252 base_folder = vnfd["_admin"]["storage"]
253 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"], name)
254 if not self.fs:
255 raise NsException("Cannot read file '{}'. Filesystem not loaded, change configuration at storage.driver"
256 .format(cloud_init_file))
257 with self.fs.file_open(cloud_init_file, "r") as ci_file:
258 cloud_init_content = ci_file.read()
259 elif _type == "vdu":
260 cloud_init_content = vnfd["vdu"][int(name)]["cloud-init"]
261 else:
262 raise NsException("Mismatch descriptor for cloud init: {}".format(where))
263 return cloud_init_content
264
265 def _parse_jinja2(self, cloud_init_content, params, context):
266
267 try:
268 env = Environment(undefined=StrictUndefined)
269 template = env.from_string(cloud_init_content)
270 return template.render(params or {})
271 except UndefinedError as e:
272 raise NsException(
273 "Variable '{}' defined at vnfd='{}' must be provided in the instantiation parameters"
274 "inside the 'additionalParamsForVnf' block".format(e, context))
275 except (TemplateError, TemplateNotFound) as e:
276 raise NsException("Error parsing Jinja2 to cloud-init content at vnfd='{}': {}".format(context, e))
277
278 def _create_db_ro_nsrs(self, nsr_id, now):
279 try:
280 key = rsa.generate_private_key(
281 backend=crypto_default_backend(),
282 public_exponent=65537,
283 key_size=2048
284 )
285 private_key = key.private_bytes(
286 crypto_serialization.Encoding.PEM,
287 crypto_serialization.PrivateFormat.PKCS8,
288 crypto_serialization.NoEncryption())
289 public_key = key.public_key().public_bytes(
290 crypto_serialization.Encoding.OpenSSH,
291 crypto_serialization.PublicFormat.OpenSSH
292 )
293 private_key = private_key.decode('utf8')
294 # Change first line because Paramiko needs a explicit start with 'BEGIN RSA PRIVATE KEY'
295 i = private_key.find("\n")
296 private_key = "-----BEGIN RSA PRIVATE KEY-----" + private_key[i:]
297 public_key = public_key.decode('utf8')
298 except Exception as e:
299 raise NsException("Cannot create ssh-keys: {}".format(e))
300
301 schema_version = "1.1"
302 private_key_encrypted = self.db.encrypt(private_key, schema_version=schema_version, salt=nsr_id)
303 db_content = {
304 "_id": nsr_id,
305 "_admin": {
306 "created": now,
307 "modified": now,
308 "schema_version": schema_version
309 },
310 "public_key": public_key,
311 "private_key": private_key_encrypted,
312 "actions": []
313 }
314 self.db.create("ro_nsrs", db_content)
315 return db_content
316
317 def deploy(self, session, indata, version, nsr_id, *args, **kwargs):
318 self.logger.debug("ns.deploy nsr_id={} indata={}".format(nsr_id, indata))
319 validate_input(indata, deploy_schema)
320 action_id = indata.get("action_id", str(uuid4()))
321 task_index = 0
322 # get current deployment
323 db_nsr_update = {} # update operation on nsrs
324 db_vnfrs_update = {}
325 db_vnfrs = {} # vnf's info indexed by _id
326 nb_ro_tasks = 0 # for logging
327 vdu2cloud_init = indata.get("cloud_init_content") or {}
328 step = ''
329 logging_text = "Task deploy nsr_id={} action_id={} ".format(nsr_id, action_id)
330 self.logger.debug(logging_text + "Enter")
331 try:
332 step = "Getting ns and vnfr record from db"
333 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
334 db_new_tasks = []
335 tasks_by_target_record_id = {}
336 # read from db: vnf's of this ns
337 step = "Getting vnfrs from db"
338 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
339 if not db_vnfrs_list:
340 raise NsException("Cannot obtain associated VNF for ns")
341 for vnfr in db_vnfrs_list:
342 db_vnfrs[vnfr["_id"]] = vnfr
343 db_vnfrs_update[vnfr["_id"]] = {}
344 now = time()
345 db_ro_nsr = self.db.get_one("ro_nsrs", {"_id": nsr_id}, fail_on_empty=False)
346 if not db_ro_nsr:
347 db_ro_nsr = self._create_db_ro_nsrs(nsr_id, now)
348 ro_nsr_public_key = db_ro_nsr["public_key"]
349
350 # check that action_id is not in the list of actions. Suffixed with :index
351 if action_id in db_ro_nsr["actions"]:
352 index = 1
353 while True:
354 new_action_id = "{}:{}".format(action_id, index)
355 if new_action_id not in db_ro_nsr["actions"]:
356 action_id = new_action_id
357 self.logger.debug(logging_text + "Changing action_id in use to {}".format(action_id))
358 break
359 index += 1
360
361 def _create_task(target_id, item, action, target_record, target_record_id, extra_dict=None):
362 nonlocal task_index
363 nonlocal action_id
364 nonlocal nsr_id
365
366 task = {
367 "target_id": target_id, # it will be removed before pushing at database
368 "action_id": action_id,
369 "nsr_id": nsr_id,
370 "task_id": "{}:{}".format(action_id, task_index),
371 "status": "SCHEDULED",
372 "action": action,
373 "item": item,
374 "target_record": target_record,
375 "target_record_id": target_record_id,
376 }
377 if extra_dict:
378 task.update(extra_dict) # params, find_params, depends_on
379 task_index += 1
380 return task
381
382 def _create_ro_task(target_id, task):
383 nonlocal action_id
384 nonlocal task_index
385 nonlocal now
386
387 _id = task["task_id"]
388 db_ro_task = {
389 "_id": _id,
390 "locked_by": None,
391 "locked_at": 0.0,
392 "target_id": target_id,
393 "vim_info": {
394 "created": False,
395 "created_items": None,
396 "vim_id": None,
397 "vim_name": None,
398 "vim_status": None,
399 "vim_details": None,
400 "refresh_at": None,
401 },
402 "modified_at": now,
403 "created_at": now,
404 "to_check_at": now,
405 "tasks": [task],
406 }
407 return db_ro_task
408
409 def _process_image_params(target_image, vim_info, target_record_id):
410 find_params = {}
411 if target_image.get("image"):
412 find_params["filter_dict"] = {"name": target_image.get("image")}
413 if target_image.get("vim_image_id"):
414 find_params["filter_dict"] = {"id": target_image.get("vim_image_id")}
415 if target_image.get("image_checksum"):
416 find_params["filter_dict"] = {"checksum": target_image.get("image_checksum")}
417 return {"find_params": find_params}
418
419 def _process_flavor_params(target_flavor, vim_info, target_record_id):
420
421 def _get_resource_allocation_params(quota_descriptor):
422 """
423 read the quota_descriptor from vnfd and fetch the resource allocation properties from the
424 descriptor object
425 :param quota_descriptor: cpu/mem/vif/disk-io quota descriptor
426 :return: quota params for limit, reserve, shares from the descriptor object
427 """
428 quota = {}
429 if quota_descriptor.get("limit"):
430 quota["limit"] = int(quota_descriptor["limit"])
431 if quota_descriptor.get("reserve"):
432 quota["reserve"] = int(quota_descriptor["reserve"])
433 if quota_descriptor.get("shares"):
434 quota["shares"] = int(quota_descriptor["shares"])
435 return quota
436
437 flavor_data = {
438 "disk": int(target_flavor["storage-gb"]),
439 "ram": int(target_flavor["memory-mb"]),
440 "vcpus": int(target_flavor["vcpu-count"]),
441 }
442 numa = {}
443 extended = {}
444 if target_flavor.get("guest-epa"):
445 extended = {}
446 epa_vcpu_set = False
447 if target_flavor["guest-epa"].get("numa-node-policy"):
448 numa_node_policy = target_flavor["guest-epa"].get("numa-node-policy")
449 if numa_node_policy.get("node"):
450 numa_node = numa_node_policy["node"][0]
451 if numa_node.get("num-cores"):
452 numa["cores"] = numa_node["num-cores"]
453 epa_vcpu_set = True
454 if numa_node.get("paired-threads"):
455 if numa_node["paired-threads"].get("num-paired-threads"):
456 numa["paired-threads"] = int(numa_node["paired-threads"]["num-paired-threads"])
457 epa_vcpu_set = True
458 if len(numa_node["paired-threads"].get("paired-thread-ids")):
459 numa["paired-threads-id"] = []
460 for pair in numa_node["paired-threads"]["paired-thread-ids"]:
461 numa["paired-threads-id"].append(
462 (str(pair["thread-a"]), str(pair["thread-b"]))
463 )
464 if numa_node.get("num-threads"):
465 numa["threads"] = int(numa_node["num-threads"])
466 epa_vcpu_set = True
467 if numa_node.get("memory-mb"):
468 numa["memory"] = max(int(numa_node["memory-mb"] / 1024), 1)
469 if target_flavor["guest-epa"].get("mempage-size"):
470 extended["mempage-size"] = target_flavor["guest-epa"].get("mempage-size")
471 if target_flavor["guest-epa"].get("cpu-pinning-policy") and not epa_vcpu_set:
472 if target_flavor["guest-epa"]["cpu-pinning-policy"] == "DEDICATED":
473 if target_flavor["guest-epa"].get("cpu-thread-pinning-policy") and \
474 target_flavor["guest-epa"]["cpu-thread-pinning-policy"] != "PREFER":
475 numa["cores"] = max(flavor_data["vcpus"], 1)
476 else:
477 numa["threads"] = max(flavor_data["vcpus"], 1)
478 epa_vcpu_set = True
479 if target_flavor["guest-epa"].get("cpu-quota") and not epa_vcpu_set:
480 cpuquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("cpu-quota"))
481 if cpuquota:
482 extended["cpu-quota"] = cpuquota
483 if target_flavor["guest-epa"].get("mem-quota"):
484 vduquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("mem-quota"))
485 if vduquota:
486 extended["mem-quota"] = vduquota
487 if target_flavor["guest-epa"].get("disk-io-quota"):
488 diskioquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("disk-io-quota"))
489 if diskioquota:
490 extended["disk-io-quota"] = diskioquota
491 if target_flavor["guest-epa"].get("vif-quota"):
492 vifquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("vif-quota"))
493 if vifquota:
494 extended["vif-quota"] = vifquota
495 if numa:
496 extended["numas"] = [numa]
497 if extended:
498 flavor_data["extended"] = extended
499
500 extra_dict = {"find_params": {"flavor_data": flavor_data}}
501 flavor_data_name = flavor_data.copy()
502 flavor_data_name["name"] = target_flavor["name"]
503 extra_dict["params"] = {"flavor_data": flavor_data_name}
504 return extra_dict
505
506 def _ip_profile_2_ro(ip_profile):
507 if not ip_profile:
508 return None
509 ro_ip_profile = {
510 "ip_version": "IPv4" if "v4" in ip_profile.get("ip-version", "ipv4") else "IPv6",
511 "subnet_address": ip_profile.get("subnet-address"),
512 "gateway_address": ip_profile.get("gateway-address"),
513 "dhcp_enabled": ip_profile["dhcp-params"].get("enabled", True),
514 "dhcp_start_address": ip_profile["dhcp-params"].get("start-address"),
515 "dhcp_count": ip_profile["dhcp-params"].get("count"),
516
517 }
518 if ip_profile.get("dns-server"):
519 ro_ip_profile["dns_address"] = ";".join([v["address"] for v in ip_profile["dns-server"]])
520 if ip_profile.get('security-group'):
521 ro_ip_profile["security_group"] = ip_profile['security-group']
522 return ro_ip_profile
523
524 def _process_net_params(target_vld, vim_info, target_record_id):
525 nonlocal indata
526 extra_dict = {}
527
528 if vim_info.get("sdn"):
529 # vnf_preffix = "vnfrs:{}".format(vnfr_id)
530 # ns_preffix = "nsrs:{}".format(nsr_id)
531 vld_target_record_id, _, _ = target_record_id.rpartition(".") # remove the ending ".sdn
532 extra_dict["params"] = {k: vim_info[k] for k in ("sdn-ports", "target_vim", "vlds", "type")
533 if vim_info.get(k)}
534 # TODO needed to add target_id in the dependency.
535 if vim_info.get("target_vim"):
536 extra_dict["depends_on"] = [vim_info.get("target_vim") + " " + vld_target_record_id]
537 return extra_dict
538
539 if vim_info.get("vim_network_name"):
540 extra_dict["find_params"] = {"filter_dict": {"name": vim_info.get("vim_network_name")}}
541 elif vim_info.get("vim_network_id"):
542 extra_dict["find_params"] = {"filter_dict": {"id": vim_info.get("vim_network_id")}}
543 elif target_vld.get("mgmt-network"):
544 extra_dict["find_params"] = {"mgmt": True, "name": target_vld["id"]}
545 else:
546 # create
547 extra_dict["params"] = {
548 "net_name": "{}-{}".format(indata["name"][:16], target_vld.get("name", target_vld["id"])[:16]),
549 "ip_profile": _ip_profile_2_ro(vim_info.get('ip_profile')),
550 "provider_network_profile": vim_info.get('provider_network'),
551 }
552 if not target_vld.get("underlay"):
553 extra_dict["params"]["net_type"] = "bridge"
554 else:
555 extra_dict["params"]["net_type"] = "ptp" if target_vld.get("type") == "ELINE" else "data"
556 return extra_dict
557
558 def _process_vdu_params(target_vdu, vim_info, target_record_id):
559 nonlocal vnfr_id
560 nonlocal nsr_id
561 nonlocal indata
562 nonlocal vnfr
563 nonlocal vdu2cloud_init
564 nonlocal tasks_by_target_record_id
565 vnf_preffix = "vnfrs:{}".format(vnfr_id)
566 ns_preffix = "nsrs:{}".format(nsr_id)
567 image_text = ns_preffix + ":image." + target_vdu["ns-image-id"]
568 flavor_text = ns_preffix + ":flavor." + target_vdu["ns-flavor-id"]
569 extra_dict = {"depends_on": [image_text, flavor_text]}
570 net_list = []
571 for iface_index, interface in enumerate(target_vdu["interfaces"]):
572 if interface.get("ns-vld-id"):
573 net_text = ns_preffix + ":vld." + interface["ns-vld-id"]
574 elif interface.get("vnf-vld-id"):
575 net_text = vnf_preffix + ":vld." + interface["vnf-vld-id"]
576 else:
577 self.logger.error("Interface {} from vdu {} not connected to any vld".format(
578 iface_index, target_vdu["vdu-name"]))
579 continue # interface not connected to any vld
580 extra_dict["depends_on"].append(net_text)
581 net_item = {x: v for x, v in interface.items() if x in
582 ("name", "vpci", "port_security", "port_security_disable_strategy", "floating_ip")}
583 net_item["net_id"] = "TASK-" + net_text
584 net_item["type"] = "virtual"
585 # TODO mac_address: used for SR-IOV ifaces #TODO for other types
586 # TODO floating_ip: True/False (or it can be None)
587 if interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
588 # mark the net create task as type data
589 if deep_get(tasks_by_target_record_id, net_text, "params", "net_type"):
590 tasks_by_target_record_id[net_text]["params"]["net_type"] = "data"
591 net_item["use"] = "data"
592 net_item["model"] = interface["type"]
593 net_item["type"] = interface["type"]
594 elif interface.get("type") == "OM-MGMT" or interface.get("mgmt-interface") or \
595 interface.get("mgmt-vnf"):
596 net_item["use"] = "mgmt"
597 else: # if interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
598 net_item["use"] = "bridge"
599 net_item["model"] = interface.get("type")
600 if interface.get("ip-address"):
601 net_item["ip_address"] = interface["ip-address"]
602 if interface.get("mac-address"):
603 net_item["mac_address"] = interface["mac-address"]
604 net_list.append(net_item)
605 if interface.get("mgmt-vnf"):
606 extra_dict["mgmt_vnf_interface"] = iface_index
607 elif interface.get("mgmt-interface"):
608 extra_dict["mgmt_vdu_interface"] = iface_index
609 # cloud config
610 cloud_config = {}
611 if target_vdu.get("cloud-init"):
612 if target_vdu["cloud-init"] not in vdu2cloud_init:
613 vdu2cloud_init[target_vdu["cloud-init"]] = self._get_cloud_init(target_vdu["cloud-init"])
614 cloud_content_ = vdu2cloud_init[target_vdu["cloud-init"]]
615 cloud_config["user-data"] = self._parse_jinja2(cloud_content_, target_vdu.get("additionalParams"),
616 target_vdu["cloud-init"])
617 if target_vdu.get("boot-data-drive"):
618 cloud_config["boot-data-drive"] = target_vdu.get("boot-data-drive")
619 ssh_keys = []
620 if target_vdu.get("ssh-keys"):
621 ssh_keys += target_vdu.get("ssh-keys")
622 if target_vdu.get("ssh-access-required"):
623 ssh_keys.append(ro_nsr_public_key)
624 if ssh_keys:
625 cloud_config["key-pairs"] = ssh_keys
626
627 extra_dict["params"] = {
628 "name": "{}-{}-{}-{}".format(indata["name"][:16], vnfr["member-vnf-index-ref"][:16],
629 target_vdu["vdu-name"][:32], target_vdu.get("count-index") or 0),
630 "description": target_vdu["vdu-name"],
631 "start": True,
632 "image_id": "TASK-" + image_text,
633 "flavor_id": "TASK-" + flavor_text,
634 "net_list": net_list,
635 "cloud_config": cloud_config or None,
636 "disk_list": None, # TODO
637 "availability_zone_index": None, # TODO
638 "availability_zone_list": None, # TODO
639 }
640 return extra_dict
641
642 def _process_items(target_list, existing_list, db_record, db_update, db_path, item, process_params):
643 nonlocal db_new_tasks
644 nonlocal tasks_by_target_record_id
645 nonlocal task_index
646
647 # ensure all the target_list elements has an "id". If not assign the index as id
648 for target_index, tl in enumerate(target_list):
649 if tl and not tl.get("id"):
650 tl["id"] = str(target_index)
651
652 # step 1 items (networks,vdus,...) to be deleted/updated
653 for item_index, existing_item in enumerate(existing_list):
654 target_item = next((t for t in target_list if t["id"] == existing_item["id"]), None)
655 for target_vim, existing_viminfo in existing_item.get("vim_info", {}).items():
656 if existing_viminfo is None:
657 continue
658 if target_item:
659 target_viminfo = target_item.get("vim_info", {}).get(target_vim)
660 else:
661 target_viminfo = None
662 if target_viminfo is None:
663 # must be deleted
664 self._assign_vim(target_vim)
665 target_record_id = "{}.{}".format(db_record, existing_item["id"])
666 item_ = item
667 if target_vim.startswith("sdn"):
668 # item must be sdn-net instead of net if target_vim is a sdn
669 item_ = "sdn_net"
670 target_record_id += ".sdn"
671 task = _create_task(
672 target_vim, item_, "DELETE",
673 target_record="{}.{}.vim_info.{}".format(db_record, item_index, target_vim),
674 target_record_id=target_record_id)
675 tasks_by_target_record_id[target_record_id] = task
676 db_new_tasks.append(task)
677 # TODO delete
678 # TODO check one by one the vims to be created/deleted
679
680 # step 2 items (networks,vdus,...) to be created
681 for target_item in target_list:
682 item_index = -1
683 for item_index, existing_item in enumerate(existing_list):
684 if existing_item["id"] == target_item["id"]:
685 break
686 else:
687 item_index += 1
688 db_update[db_path + ".{}".format(item_index)] = target_item
689 existing_list.append(target_item)
690 existing_item = None
691
692 for target_vim, target_viminfo in target_item.get("vim_info", {}).items():
693 existing_viminfo = None
694 if existing_item:
695 existing_viminfo = existing_item.get("vim_info", {}).get(target_vim)
696 # TODO check if different. Delete and create???
697 # TODO delete if not exist
698 if existing_viminfo is not None:
699 continue
700
701 target_record_id = "{}.{}".format(db_record, target_item["id"])
702 item_ = item
703 if target_vim.startswith("sdn"):
704 # item must be sdn-net instead of net if target_vim is a sdn
705 item_ = "sdn_net"
706 target_record_id += ".sdn"
707 extra_dict = process_params(target_item, target_viminfo, target_record_id)
708
709 self._assign_vim(target_vim)
710 task = _create_task(
711 target_vim, item_, "CREATE",
712 target_record="{}.{}.vim_info.{}".format(db_record, item_index, target_vim),
713 target_record_id=target_record_id,
714 extra_dict=extra_dict)
715 tasks_by_target_record_id[target_record_id] = task
716 db_new_tasks.append(task)
717 if target_item.get("common_id"):
718 task["common_id"] = target_item["common_id"]
719
720 db_update[db_path + ".{}".format(item_index)] = target_item
721
722 def _process_action(indata):
723 nonlocal db_new_tasks
724 nonlocal task_index
725 nonlocal db_vnfrs
726 nonlocal db_ro_nsr
727
728 if indata["action"]["action"] == "inject_ssh_key":
729 key = indata["action"].get("key")
730 user = indata["action"].get("user")
731 password = indata["action"].get("password")
732 for vnf in indata.get("vnf", ()):
733 if vnf["_id"] not in db_vnfrs:
734 raise NsException("Invalid vnf={}".format(vnf["_id"]))
735 db_vnfr = db_vnfrs[vnf["_id"]]
736 for target_vdu in vnf.get("vdur", ()):
737 vdu_index, vdur = next((i_v for i_v in enumerate(db_vnfr["vdur"]) if
738 i_v[1]["id"] == target_vdu["id"]), (None, None))
739 if not vdur:
740 raise NsException("Invalid vdu vnf={}.{}".format(vnf["_id"], target_vdu["id"]))
741 target_vim, vim_info = next(k_v for k_v in vdur["vim_info"].items())
742 self._assign_vim(target_vim)
743 target_record = "vnfrs:{}:vdur.{}.ssh_keys".format(vnf["_id"], vdu_index)
744 extra_dict = {
745 "depends_on": ["vnfrs:{}:vdur.{}".format(vnf["_id"], vdur["id"])],
746 "params": {
747 "ip_address": vdur.get("ip-address"),
748 "user": user,
749 "key": key,
750 "password": password,
751 "private_key": db_ro_nsr["private_key"],
752 "salt": db_ro_nsr["_id"],
753 "schema_version": db_ro_nsr["_admin"]["schema_version"]
754 }
755 }
756 task = _create_task(target_vim, "vdu", "EXEC",
757 target_record=target_record,
758 target_record_id=None,
759 extra_dict=extra_dict)
760 db_new_tasks.append(task)
761
762 with self.write_lock:
763 if indata.get("action"):
764 _process_action(indata)
765 else:
766 # compute network differences
767 # NS.vld
768 step = "process NS VLDs"
769 _process_items(target_list=indata["ns"]["vld"] or [], existing_list=db_nsr.get("vld") or [],
770 db_record="nsrs:{}:vld".format(nsr_id), db_update=db_nsr_update,
771 db_path="vld", item="net", process_params=_process_net_params)
772
773 step = "process NS images"
774 _process_items(target_list=indata.get("image") or [], existing_list=db_nsr.get("image") or [],
775 db_record="nsrs:{}:image".format(nsr_id),
776 db_update=db_nsr_update, db_path="image", item="image",
777 process_params=_process_image_params)
778
779 step = "process NS flavors"
780 _process_items(target_list=indata.get("flavor") or [], existing_list=db_nsr.get("flavor") or [],
781 db_record="nsrs:{}:flavor".format(nsr_id),
782 db_update=db_nsr_update, db_path="flavor", item="flavor",
783 process_params=_process_flavor_params)
784
785 # VNF.vld
786 for vnfr_id, vnfr in db_vnfrs.items():
787 # vnfr_id need to be set as global variable for among others nested method _process_vdu_params
788 step = "process VNF={} VLDs".format(vnfr_id)
789 target_vnf = next((vnf for vnf in indata.get("vnf", ()) if vnf["_id"] == vnfr_id), None)
790 target_list = target_vnf.get("vld") if target_vnf else None
791 _process_items(target_list=target_list or [], existing_list=vnfr.get("vld") or [],
792 db_record="vnfrs:{}:vld".format(vnfr_id), db_update=db_vnfrs_update[vnfr["_id"]],
793 db_path="vld", item="net", process_params=_process_net_params)
794
795 target_list = target_vnf.get("vdur") if target_vnf else None
796 step = "process VNF={} VDUs".format(vnfr_id)
797 _process_items(target_list=target_list or [], existing_list=vnfr.get("vdur") or [],
798 db_record="vnfrs:{}:vdur".format(vnfr_id),
799 db_update=db_vnfrs_update[vnfr["_id"]], db_path="vdur", item="vdu",
800 process_params=_process_vdu_params)
801
802 for db_task in db_new_tasks:
803 step = "Updating database, Appending tasks to ro_tasks"
804 target_id = db_task.pop("target_id")
805 common_id = db_task.get("common_id")
806 if common_id:
807 if self.db.set_one("ro_tasks",
808 q_filter={"target_id": target_id,
809 "tasks.common_id": common_id},
810 update_dict={"to_check_at": now, "modified_at": now},
811 push={"tasks": db_task}, fail_on_empty=False):
812 continue
813 if not self.db.set_one("ro_tasks",
814 q_filter={"target_id": target_id,
815 "tasks.target_record": db_task["target_record"]},
816 update_dict={"to_check_at": now, "modified_at": now},
817 push={"tasks": db_task}, fail_on_empty=False):
818 # Create a ro_task
819 step = "Updating database, Creating ro_tasks"
820 db_ro_task = _create_ro_task(target_id, db_task)
821 nb_ro_tasks += 1
822 self.db.create("ro_tasks", db_ro_task)
823 step = "Updating database, nsrs"
824 if db_nsr_update:
825 self.db.set_one("nsrs", {"_id": nsr_id}, db_nsr_update)
826 for vnfr_id, db_vnfr_update in db_vnfrs_update.items():
827 if db_vnfr_update:
828 step = "Updating database, vnfrs={}".format(vnfr_id)
829 self.db.set_one("vnfrs", {"_id": vnfr_id}, db_vnfr_update)
830
831 self.logger.debug(logging_text + "Exit. Created {} ro_tasks; {} tasks".format(nb_ro_tasks,
832 len(db_new_tasks)))
833 return {"status": "ok", "nsr_id": nsr_id, "action_id": action_id}, action_id, True
834
835 except Exception as e:
836 if isinstance(e, (DbException, NsException)):
837 self.logger.error(logging_text + "Exit Exception while '{}': {}".format(step, e))
838 else:
839 e = traceback_format_exc()
840 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(step, e), exc_info=True)
841 raise NsException(e)
842
843 def delete(self, session, indata, version, nsr_id, *args, **kwargs):
844 self.logger.debug("ns.delete version={} nsr_id={}".format(version, nsr_id))
845 # self.db.del_list({"_id": ro_task["_id"], "tasks.nsr_id.ne": nsr_id})
846 with self.write_lock:
847 try:
848 NsWorker.delete_db_tasks(self.db, nsr_id, None)
849 except NsWorkerException as e:
850 raise NsException(e)
851 return None, None, True
852
853 def status(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
854 # self.logger.debug("ns.status version={} nsr_id={}, action_id={} indata={}"
855 # .format(version, nsr_id, action_id, indata))
856 task_list = []
857 done = 0
858 total = 0
859 ro_tasks = self.db.get_list("ro_tasks", {"tasks.action_id": action_id})
860 global_status = "DONE"
861 details = []
862 for ro_task in ro_tasks:
863 for task in ro_task["tasks"]:
864 if task and task["action_id"] == action_id:
865 task_list.append(task)
866 total += 1
867 if task["status"] == "FAILED":
868 global_status = "FAILED"
869 error_text = "Error at {} {}: {}".format(task["action"].lower(), task["item"],
870 ro_task["vim_info"].get("vim_details") or "unknown")
871 details.append(error_text)
872 elif task["status"] in ("SCHEDULED", "BUILD"):
873 if global_status != "FAILED":
874 global_status = "BUILD"
875 else:
876 done += 1
877 return_data = {
878 "status": global_status,
879 "details": ". ".join(details) if details else "progress {}/{}".format(done, total),
880 "nsr_id": nsr_id,
881 "action_id": action_id,
882 "tasks": task_list
883 }
884 return return_data, None, True
885
886 def cancel(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
887 print("ns.cancel session={} indata={} version={} nsr_id={}, action_id={}".format(session, indata, version,
888 nsr_id, action_id))
889 return None, None, True
890
891 def get_deploy(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
892 nsrs = self.db.get_list("nsrs", {})
893 return_data = []
894 for ns in nsrs:
895 return_data.append({"_id": ns["_id"], "name": ns["name"]})
896 return return_data, None, True
897
898 def get_actions(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
899 ro_tasks = self.db.get_list("ro_tasks", {"tasks.nsr_id": nsr_id})
900 return_data = []
901 for ro_task in ro_tasks:
902 for task in ro_task["tasks"]:
903 if task["action_id"] not in return_data:
904 return_data.append(task["action_id"])
905 return return_data, None, True