migration to python3 (#1)
[osm/vim-emu.git] / src / emuvim / api / tango / llcm.py
1
2 # Copyright (c) 2018 SONATA-NFV, 5GTANGO and Paderborn University
3 # ALL RIGHTS RESERVED.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 # Neither the name of the SONATA-NFV, 5GTANGO, Paderborn University
18 # nor the names of its contributors may be used to endorse or promote
19 # products derived from this software without specific prior written
20 # permission.
21 #
22 # This work has been performed in the framework of the SONATA project,
23 # funded by the European Commission under Grant number 671517 through
24 # the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 # acknowledge the contributions of their colleagues of the SONATA
26 # partner consortium (www.sonata-nfv.eu).
27 #
28 # This work has also been performed in the framework of the 5GTANGO project,
29 # funded by the European Commission under Grant number 761493 through
30 # the Horizon 2020 and 5G-PPP programmes. The authors would like to
31 # acknowledge the contributions of their colleagues of the 5GTANGO
32 # partner consortium (www.5gtango.eu).
33 import logging
34 import os
35 import uuid
36 import hashlib
37 import zipfile
38 import yaml
39 import threading
40 import datetime
41 from docker import DockerClient
42 from flask import Flask, request
43 import flask_restful as fr
44 from gevent.pywsgi import WSGIServer
45 from subprocess import Popen
46 import ipaddress
47 import copy
48 import time
49
50
51 LOG = logging.getLogger("5gtango.llcm")
52 LOG.setLevel(logging.INFO)
53
54
55 CORS_HEADER = {'Access-Control-Allow-Origin': '*',
56 'Access-Control-Allow-Methods': 'GET,OPTIONS'}
57
58
59 GK_STORAGE = "/tmp/vim-emu-tango-llcm/"
60 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
61 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
62
63 # Enable Dockerfile build functionality
64 BUILD_DOCKERFILE = False
65
66 # flag to indicate that we run without the emulator (only the bare API for
67 # integration testing)
68 GK_STANDALONE_MODE = False
69
70 # should a new version of an image be pulled even if its available
71 FORCE_PULL = False
72
73 # flag to indicate if we use bidirectional forwarding rules in the
74 # automatic chaining process
75 BIDIRECTIONAL_CHAIN = True
76
77 # override the management interfaces in the descriptors with default
78 # docker0 interfaces in the containers
79 USE_DOCKER_MGMT = False
80
81 # automatically deploy uploaded packages (no need to execute son-access
82 # deploy --latest separately)
83 AUTO_DEPLOY = False
84
85 # and also automatically terminate any other running services
86 AUTO_DELETE = False
87
88 # global subnet definitions (see reset_subnets())
89 ELAN_SUBNETS = None
90 ELINE_SUBNETS = None
91
92 # Time in seconds to wait for vnf stop scripts to execute fully
93 VNF_STOP_WAIT_TIME = 5
94
95 # If services are instantiated multiple times, the public port
96 # mappings need to be adapted to avoid colisions. We use this
97 # offset for this: NEW_PORT (SSIID * OFFSET) + ORIGINAL_PORT
98 MULTI_INSTANCE_PORT_OFFSET = 1000
99
100 # Selected Placement Algorithm: Points to the class of the selected
101 # placement algorithm.
102 PLACEMENT_ALGORITHM_OBJ = None
103
104 # Path to folder with <container_name>.env.yml files that contain
105 # environment variables injected into the specific container
106 # when it is started.
107 PER_INSTANCE_ENV_CONFIGURATION_FOLDER = None
108
109
110 class OnBoardingException(BaseException):
111 pass
112
113
114 class Gatekeeper(object):
115
116 def __init__(self):
117 self.services = dict()
118 self.dcs = dict()
119 self.net = None
120 # used to generate short names for VNFs (Mininet limitation)
121 self.vnf_counter = 0
122 reset_subnets()
123 LOG.info("Initialized 5GTANGO LLCM module.")
124
125 def register_service_package(self, service_uuid, service):
126 """
127 register new service package
128 :param service_uuid
129 :param service object
130 """
131 self.services[service_uuid] = service
132 # lets perform all steps needed to onboard the service
133 service.onboard()
134
135
136 class Service(object):
137 """
138 This class represents a NS uploaded as a *.son package to the
139 dummy gatekeeper.
140 Can have multiple running instances of this service.
141 """
142
143 def __init__(self,
144 service_uuid,
145 package_file_hash,
146 package_file_path):
147 self.uuid = service_uuid
148 self.package_file_hash = package_file_hash
149 self.package_file_path = package_file_path
150 self.package_content_path = os.path.join(
151 CATALOG_FOLDER, "services/%s" % self.uuid)
152 self.manifest = None
153 self.nsd = None
154 self.vnfds = dict()
155 self.local_docker_files = dict()
156 self.remote_docker_image_urls = dict()
157 self.instances = dict()
158 self._instance_counter = 0
159 self.created_at = str(datetime.datetime.now())
160
161 def onboard(self):
162 """
163 Do all steps to prepare this service to be instantiated
164 :return:
165 """
166 # 1. extract the contents of the package and store them in our catalog
167 self._unpack_service_package()
168 # 2. read in all descriptor files
169 self._load_package_descriptor()
170 self._load_nsd()
171 self._load_vnfd()
172 if self.nsd is None:
173 raise OnBoardingException("No NSD found.")
174 if len(self.vnfds) < 1:
175 raise OnBoardingException("No VNFDs found.")
176 # 3. prepare container images (e.g. download or build Dockerfile)
177 if BUILD_DOCKERFILE:
178 self._load_docker_files()
179 self._build_images_from_dockerfiles()
180 else:
181 self._load_docker_urls()
182 self._pull_predefined_dockerimages()
183 # 4. reserve subnets
184 eline_fwd_links, elan_fwd_links = self._get_elines_and_elans()
185 self.eline_subnets = [ELINE_SUBNETS.pop(0) for _ in eline_fwd_links]
186 self.elan_subnets = [ELAN_SUBNETS.pop(0) for _ in elan_fwd_links]
187 LOG.debug("Reserved subnets for service '{}': E-Line: {} / E-LAN: {}"
188 .format(self.manifest.get("name"),
189 self.eline_subnets, self.elan_subnets))
190 LOG.info("On-boarded service: {}".format(self.manifest.get("name")))
191
192 def start_service(self):
193 """
194 This methods creates and starts a new service instance.
195 It computes placements, iterates over all VNFDs, and starts
196 each VNFD as a Docker container in the data center selected
197 by the placement algorithm.
198 :return:
199 """
200 LOG.info("Starting service {} ({})"
201 .format(get_triple_id(self.nsd), self.uuid))
202
203 # 1. each service instance gets a new uuid to identify it
204 instance_uuid = str(uuid.uuid4())
205 # build a instances dict (a bit like a NSR :))
206 self.instances[instance_uuid] = dict()
207 self.instances[instance_uuid]["uuid"] = self.uuid
208 # SSIID = short service instance ID (to postfix Container names)
209 self.instances[instance_uuid]["ssiid"] = self._instance_counter
210 self.instances[instance_uuid]["name"] = get_triple_id(self.nsd)
211 self.instances[instance_uuid]["vnf_instances"] = list()
212 self.instances[instance_uuid]["created_at"] = str(datetime.datetime.now())
213 # increase for next instance
214 self._instance_counter += 1
215
216 # 3. start all vnfds that we have in the service
217 for vnf_id in self.vnfds:
218 vnfd = self.vnfds[vnf_id]
219 # attention: returns a list of started deployment units
220 vnfis = self._start_vnfd(
221 vnfd, vnf_id, self.instances[instance_uuid]["ssiid"])
222 # add list of VNFIs to total VNFI list
223 self.instances[instance_uuid]["vnf_instances"].extend(vnfis)
224
225 # 4. Deploy E-Line, E-Tree and E-LAN links
226 # Attention: Only done if ""forwarding_graphs" section in NSD exists,
227 # even if "forwarding_graphs" are not used directly.
228 # Attention2: Do a copy of *_subnets with list() is important here!
229 eline_fwd_links, elan_fwd_links = self._get_elines_and_elans()
230 # 5a. deploy E-Line links
231 GK.net.deployed_elines.extend(eline_fwd_links) # bookkeeping
232 self._connect_elines(eline_fwd_links, instance_uuid, list(self.eline_subnets))
233 # 5b. deploy E-Tree/E-LAN links
234 GK.net.deployed_elans.extend(elan_fwd_links) # bookkeeping
235 self._connect_elans(elan_fwd_links, instance_uuid, list(self.elan_subnets))
236
237 # 6. run the emulator specific entrypoint scripts in the VNFIs of this
238 # service instance
239 self._trigger_emulator_start_scripts_in_vnfis(
240 self.instances[instance_uuid]["vnf_instances"])
241 # done
242 LOG.info("Service '{}' started. Instance id: {} SSIID: {}"
243 .format(self.instances[instance_uuid]["name"],
244 instance_uuid,
245 self.instances[instance_uuid]["ssiid"]))
246 return instance_uuid
247
248 def stop_service(self, instance_uuid):
249 """
250 This method stops a running service instance.
251 It iterates over all VNF instances, stopping them each
252 and removing them from their data center.
253 :param instance_uuid: the uuid of the service instance to be stopped
254 """
255 LOG.info("Stopping service %r" % self.uuid)
256 # get relevant information
257 # instance_uuid = str(self.uuid.uuid4())
258 vnf_instances = self.instances[instance_uuid]["vnf_instances"]
259 # trigger stop skripts in vnf instances and wait a few seconds for
260 # completion
261 self._trigger_emulator_stop_scripts_in_vnfis(vnf_instances)
262 time.sleep(VNF_STOP_WAIT_TIME)
263 # stop all vnfs
264 for v in vnf_instances:
265 self._stop_vnfi(v)
266 # last step: remove the instance from the list of all instances
267 del self.instances[instance_uuid]
268
269 def _get_elines_and_elans(self):
270 """
271 Get the E-Line, E-LAN, E-Tree links from the NSD.
272 """
273 # Attention: Only done if ""forwarding_graphs" section in NSD exists,
274 # even if "forwarding_graphs" are not used directly.
275 eline_fwd_links = list()
276 elan_fwd_links = list()
277 if "virtual_links" in self.nsd and "forwarding_graphs" in self.nsd:
278 vlinks = self.nsd["virtual_links"]
279 # constituent virtual links are not checked
280 eline_fwd_links = [l for l in vlinks if (
281 l["connectivity_type"] == "E-Line")]
282 elan_fwd_links = [l for l in vlinks if (
283 l["connectivity_type"] == "E-LAN" or
284 l["connectivity_type"] == "E-Tree")] # Treat E-Tree as E-LAN
285 return eline_fwd_links, elan_fwd_links
286
287 def _get_resource_limits(self, deployment_unit):
288 """
289 Extract resource limits from deployment units.
290 """
291 # defaults
292 cpu_list = None
293 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(1.0))
294 mem_limit = None
295 # update from descriptor
296 if "resource_requirements" in deployment_unit:
297 res_req = deployment_unit.get("resource_requirements")
298 cpu_list = res_req.get("cpu").get("cpuset")
299 if cpu_list is None:
300 cpu_list = res_req.get("cpu").get("vcpus")
301 if cpu_list is not None:
302 # attention: docker expects list as string w/o spaces:
303 cpu_list = str(cpu_list).replace(" ", "").strip()
304 cpu_bw = res_req.get("cpu").get("cpu_bw")
305 if cpu_bw is None:
306 cpu_bw = 1.0
307 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
308 mem_limit = res_req.get("memory").get("size")
309 mem_unit = str(res_req.get("memory").get("size_unit", "GB"))
310 if mem_limit is not None:
311 mem_limit = int(mem_limit)
312 # to bytes
313 if "G" in mem_unit:
314 mem_limit = mem_limit * 1024 * 1024 * 1024
315 elif "M" in mem_unit:
316 mem_limit = mem_limit * 1024 * 1024
317 elif "K" in mem_unit:
318 mem_limit = mem_limit * 1024
319 return cpu_list, cpu_period, cpu_quota, mem_limit
320
321 def _start_vnfd(self, vnfd, vnf_id, ssiid, **kwargs):
322 """
323 Start a single VNFD of this service
324 :param vnfd: vnfd descriptor dict
325 :param vnf_id: unique id of this vnf in the nsd
326 :return:
327 """
328 vnfis = list()
329 # the vnf_name refers to the container image to be deployed
330 vnf_name = vnfd.get("name")
331 # combine VDUs and CDUs
332 deployment_units = (vnfd.get("virtual_deployment_units", []) +
333 vnfd.get("cloudnative_deployment_units", []))
334 # iterate over all deployment units within each VNFDs
335 for u in deployment_units:
336 # 0. vnf_container_name = vnf_id.vdu_id
337 vnf_container_name = get_container_name(vnf_id, u.get("id"))
338 vnf_container_instance_name = get_container_name(vnf_id, u.get("id"), ssiid)
339 # 1. get the name of the docker image to star
340 if vnf_container_name not in self.remote_docker_image_urls:
341 raise Exception("No image name for %r found. Abort." % vnf_container_name)
342 docker_image_name = self.remote_docker_image_urls.get(vnf_container_name)
343 # 2. select datacenter to start the VNF in
344 target_dc = self._place(vnfd, vnf_id, u, ssiid)
345 # 3. perform some checks to ensure we can start the container
346 assert(docker_image_name is not None)
347 assert(target_dc is not None)
348 if not self._check_docker_image_exists(docker_image_name):
349 raise Exception("Docker image {} not found. Abort."
350 .format(docker_image_name))
351
352 # 4. get the resource limits
353 cpu_list, cpu_period, cpu_quota, mem_limit = self._get_resource_limits(u)
354
355 # get connection points defined for the DU
356 intfs = u.get("connection_points", [])
357 # do some re-naming of fields to be compatible to containernet
358 for i in intfs:
359 if i.get("address"):
360 LOG.info("Found static address for {}: {}"
361 .format(i.get("id"), i.get("address")))
362 i["ip"] = i.get("address")
363
364 # get ports and port_bindings from the port and publish fields of CNFD
365 # see: https://github.com/containernet/containernet/wiki/Exposing-and-mapping-network-ports
366 ports = list() # Containernet naming
367 port_bindings = dict()
368 for i in intfs:
369 if i.get("port"): # field with a single port
370 if not isinstance(i.get("port"), int):
371 LOG.info("Field 'port' is no int CP: {}".format(i))
372 else:
373 ports.append(i.get("port")) # collect all ports
374 if i.get("ports"): # list with multiple ports
375 if not isinstance(i.get("ports"), list):
376 LOG.info("Field 'port' is no list CP: {}".format(i))
377 else:
378 for p in i.get("ports"):
379 if not isinstance(p, int):
380 # do some parsing
381 try:
382 if "/udp" in p:
383 p = tuple(p.split("/"))
384 else:
385 p = int(p)
386 ports.append(p) # collect all ports
387 except BaseException as ex:
388 LOG.error(
389 "Could not parse ports list: {}".format(p))
390 LOG.error(ex)
391 else:
392 ports.append(p) # collect all ports
393 if i.get("publish"):
394 if not isinstance(i.get("publish"), dict):
395 LOG.info("Field 'publish' is no dict CP: {}".format(i))
396 else:
397 port_bindings.update(i.get("publish"))
398 # update port mapping for cases where service is deployed > 1 times
399 port_bindings = update_port_mapping_multi_instance(ssiid, port_bindings)
400 if len(ports) > 0:
401 LOG.info("{} exposes ports: {}".format(vnf_container_instance_name, ports))
402 if len(port_bindings) > 0:
403 LOG.info("{} publishes ports: {}".format(vnf_container_instance_name, port_bindings))
404
405 # 5. collect additional information to start container
406 volumes = list()
407 cenv = dict()
408 # 5.1 inject descriptor based start/stop commands into env (overwrite)
409 VNFD_CMD_START = u.get("vm_cmd_start")
410 VNFD_CMD_STOP = u.get("vm_cmd_stop")
411 if VNFD_CMD_START and not VNFD_CMD_START == "None":
412 LOG.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_START) +
413 " Overwriting SON_EMU_CMD.")
414 cenv["SON_EMU_CMD"] = VNFD_CMD_START
415 if VNFD_CMD_STOP and not VNFD_CMD_STOP == "None":
416 LOG.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_STOP) +
417 " Overwriting SON_EMU_CMD_STOP.")
418 cenv["SON_EMU_CMD_STOP"] = VNFD_CMD_STOP
419
420 # 5.2 inject per instance configurations based on envs
421 conf_envs = self._load_instance_conf_envs(vnf_container_instance_name)
422 cenv.update(conf_envs)
423
424 # 5.3 handle optional ipc_mode setting
425 ipc_mode = u.get("ipc_mode", None)
426 # 5.4 handle optional devices setting
427 devices = u.get("devices", [])
428 # 5.5 handle optional cap_add setting
429 cap_add = u.get("cap_add", [])
430
431 # 6. Start the container
432 LOG.info("Starting %r as %r in DC %r" %
433 (vnf_name, vnf_container_instance_name, target_dc))
434 LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
435 # start the container
436 vnfi = target_dc.startCompute(
437 vnf_container_instance_name,
438 network=intfs,
439 image=docker_image_name,
440 cpu_quota=cpu_quota,
441 cpu_period=cpu_period,
442 cpuset_cpus=cpu_list,
443 mem_limit=mem_limit,
444 volumes=volumes,
445 properties=cenv, # environment
446 ports=ports,
447 port_bindings=port_bindings,
448 # only publish if explicitly stated in descriptor
449 publish_all_ports=False,
450 ipc_mode=ipc_mode,
451 devices=devices,
452 cap_add=cap_add,
453 type=kwargs.get('type', 'docker'))
454 # add vnfd reference to vnfi
455 vnfi.vnfd = vnfd
456 # add container name
457 vnfi.vnf_container_name = vnf_container_name
458 vnfi.vnf_container_instance_name = vnf_container_instance_name
459 vnfi.ssiid = ssiid
460 # store vnfi
461 vnfis.append(vnfi)
462 return vnfis
463
464 def _stop_vnfi(self, vnfi):
465 """
466 Stop a VNF instance.
467 :param vnfi: vnf instance to be stopped
468 """
469 # Find the correct datacenter
470 status = vnfi.getStatus()
471 dc = vnfi.datacenter
472 # stop the vnfi
473 LOG.info("Stopping the vnf instance contained in %r in DC %r" %
474 (status["name"], dc))
475 dc.stopCompute(status["name"])
476
477 def _get_vnf_instance(self, instance_uuid, vnf_id):
478 """
479 Returns VNFI object for a given "vnf_id" or "vnf_container_name" taken from an NSD.
480 :return: single object
481 """
482 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
483 if str(vnfi.name) == str(vnf_id):
484 return vnfi
485 LOG.warning("No container with name: {0} found.".format(vnf_id))
486 return None
487
488 def _get_vnf_instance_units(self, instance_uuid, vnf_id):
489 """
490 Returns a list of VNFI objects (all deployment units) for a given
491 "vnf_id" taken from an NSD.
492 :return: list
493 """
494 if vnf_id is None:
495 return None
496 r = list()
497 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
498 if vnf_id in vnfi.name:
499 r.append(vnfi)
500 if len(r) > 0:
501 LOG.debug("Found units: {} for vnf_id: {}"
502 .format([i.name for i in r], vnf_id))
503 return r
504 LOG.warning("No container(s) with name: {0} found.".format(vnf_id))
505 return None
506
507 @staticmethod
508 def _vnf_reconfigure_network(vnfi, if_name, net_str=None, new_name=None):
509 """
510 Reconfigure the network configuration of a specific interface
511 of a running container.
512 :param vnfi: container instance
513 :param if_name: interface name
514 :param net_str: network configuration string, e.g., 1.2.3.4/24
515 :return:
516 """
517 # assign new ip address
518 if net_str is not None:
519 intf = vnfi.intf(intf=if_name)
520 if intf is not None:
521 intf.setIP(net_str)
522 LOG.debug("Reconfigured network of %s:%s to %r" %
523 (vnfi.name, if_name, net_str))
524 else:
525 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (
526 vnfi.name, if_name))
527
528 if new_name is not None:
529 vnfi.cmd('ip link set', if_name, 'down')
530 vnfi.cmd('ip link set', if_name, 'name', new_name)
531 vnfi.cmd('ip link set', new_name, 'up')
532 LOG.debug("Reconfigured interface name of %s:%s to %s" %
533 (vnfi.name, if_name, new_name))
534
535 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
536 for vnfi in vnfi_list:
537 config = vnfi.dcinfo.get("Config", dict())
538 env = config.get("Env", list())
539 for env_var in env:
540 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
541 if var == "SON_EMU_CMD" or var == "VIM_EMU_CMD":
542 LOG.info("Executing script in '{}': {}={}"
543 .format(vnfi.name, var, cmd))
544 # execute command in new thread to ensure that GK is not
545 # blocked by VNF
546 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
547 t.daemon = True
548 t.start()
549 break # only execute one command
550
551 def _trigger_emulator_stop_scripts_in_vnfis(self, vnfi_list):
552 for vnfi in vnfi_list:
553 config = vnfi.dcinfo.get("Config", dict())
554 env = config.get("Env", list())
555 for env_var in env:
556 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
557 if var == "SON_EMU_CMD_STOP" or var == "VIM_EMU_CMD_STOP":
558 LOG.info("Executing script in '{}': {}={}"
559 .format(vnfi.name, var, cmd))
560 # execute command in new thread to ensure that GK is not
561 # blocked by VNF
562 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
563 t.daemon = True
564 t.start()
565 break # only execute one command
566
567 def _load_instance_conf_envs(self, cname):
568 """
569 Try to load an instance-specific env file. If not found,
570 just return an empty dict.
571 """
572 if PER_INSTANCE_ENV_CONFIGURATION_FOLDER is None:
573 return dict()
574 try:
575 path = os.path.expanduser(PER_INSTANCE_ENV_CONFIGURATION_FOLDER)
576 path = os.path.join(path, "{}.env.yml".format(cname))
577 res = load_yaml(path)
578 LOG.info("Loaded instance-specific env file for '{}': {}"
579 .format(cname, res))
580 return res
581 except BaseException as ex:
582 LOG.info("No instance-specific env file found for: {}"
583 .format(cname))
584 del ex
585 return dict()
586
587 def _unpack_service_package(self):
588 """
589 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
590 """
591 LOG.info("Unzipping: %r" % self.package_file_path)
592 with zipfile.ZipFile(self.package_file_path, "r") as z:
593 z.extractall(self.package_content_path)
594
595 def _load_package_descriptor(self):
596 """
597 Load the main package descriptor YAML and keep it as dict.
598 :return:
599 """
600 self.manifest = load_yaml(
601 os.path.join(
602 self.package_content_path, "TOSCA-Metadata/NAPD.yaml"))
603
604 def _load_nsd(self):
605 """
606 Load the entry NSD YAML and keep it as dict.
607 :return:
608 """
609 if "package_content" in self.manifest:
610 nsd_path = None
611 for f in self.manifest.get("package_content"):
612 if f.get("content-type") == "application/vnd.5gtango.nsd":
613 nsd_path = os.path.join(
614 self.package_content_path,
615 make_relative_path(f.get("source")))
616 break # always use the first NSD for now
617 if nsd_path is None:
618 raise OnBoardingException("No NSD with type 'application/vnd.5gtango.nsd' found.")
619 self.nsd = load_yaml(nsd_path)
620 GK.net.deployed_nsds.append(self.nsd) # TODO this seems strange (remove?)
621 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
622 else:
623 raise OnBoardingException(
624 "No 'package_content' section in package manifest:\n{}"
625 .format(self.manifest))
626
627 def _load_vnfd(self):
628 """
629 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
630 :return:
631 """
632 # first make a list of all the vnfds in the package
633 vnfd_set = dict()
634 if "package_content" in self.manifest:
635 for pc in self.manifest.get("package_content"):
636 if pc.get(
637 "content-type") == "application/vnd.5gtango.vnfd":
638 vnfd_path = os.path.join(
639 self.package_content_path,
640 make_relative_path(pc.get("source")))
641 vnfd = load_yaml(vnfd_path)
642 vnfd_set[vnfd.get("name")] = vnfd
643 if len(vnfd_set) < 1:
644 raise OnBoardingException("No VNFDs found.")
645 # then link each vnf_id in the nsd to its vnfd
646 for v in self.nsd.get("network_functions"):
647 if v.get("vnf_name") in vnfd_set:
648 self.vnfds[v.get("vnf_id")] = vnfd_set[v.get("vnf_name")]
649 LOG.debug("Loaded VNFD: {0} id: {1}"
650 .format(v.get("vnf_name"), v.get("vnf_id")))
651
652 def _connect_elines(self, eline_fwd_links, instance_uuid, subnets):
653 """
654 Connect all E-LINE links in the NSD
655 Attention: This method DOES NOT support multi V/CDU VNFs!
656 :param eline_fwd_links: list of E-LINE links in the NSD
657 :param: instance_uuid of the service
658 :param: subnets list of subnets to be used
659 :return:
660 """
661 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
662 # eg. different services get a unique cookie for their flowrules
663 cookie = 1
664 for link in eline_fwd_links:
665 LOG.info("Found E-Line: {}".format(link))
666 src_id, src_if_name = parse_interface(
667 link["connection_points_reference"][0])
668 dst_id, dst_if_name = parse_interface(
669 link["connection_points_reference"][1])
670 LOG.info("Searching C/VDU for E-Line: src={}, src_if={}, dst={}, dst_if={}"
671 .format(src_id, src_if_name, dst_id, dst_if_name))
672 # handle C/VDUs (ugly hack, only one V/CDU per VNF for now)
673 src_units = self._get_vnf_instance_units(instance_uuid, src_id)
674 dst_units = self._get_vnf_instance_units(instance_uuid, dst_id)
675 if src_units is None or dst_units is None:
676 LOG.info("No VNF-VNF link. Skipping: src={}, src_if={}, dst={}, dst_if={}"
677 .format(src_id, src_if_name, dst_id, dst_if_name))
678 return
679 # we only support VNFs with one V/CDU right now
680 if len(src_units) != 1 or len(dst_units) != 1:
681 raise BaseException("LLCM does not support E-LINES for multi V/CDU VNFs.")
682 # get the full name from that C/VDU and use it as src_id and dst_id
683 src_id = src_units[0].name
684 dst_id = dst_units[0].name
685 # from here we have all info we need
686 LOG.info("Creating E-Line for C/VDU: src={}, src_if={}, dst={}, dst_if={}"
687 .format(src_id, src_if_name, dst_id, dst_if_name))
688 # get involved vnfis
689 src_vnfi = src_units[0]
690 dst_vnfi = dst_units[0]
691 # proceed with chaining setup
692 setChaining = False
693 if src_vnfi is not None and dst_vnfi is not None:
694 setChaining = True
695 # re-configure the VNFs IP assignment and ensure that a new
696 # subnet is used for each E-Link
697 eline_net = subnets.pop(0)
698 ip1 = "{0}/{1}".format(str(eline_net[1]),
699 eline_net.prefixlen)
700 ip2 = "{0}/{1}".format(str(eline_net[2]),
701 eline_net.prefixlen)
702 # check if VNFs have fixed IPs (ip/address field in VNFDs)
703 if (self._get_vnfd_cp_from_vnfi(
704 src_vnfi, src_if_name).get("ip") is None and
705 self._get_vnfd_cp_from_vnfi(
706 src_vnfi, src_if_name).get("address") is None):
707 self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
708 # check if VNFs have fixed IPs (ip field in VNFDs)
709 if (self._get_vnfd_cp_from_vnfi(
710 dst_vnfi, dst_if_name).get("ip") is None and
711 self._get_vnfd_cp_from_vnfi(
712 dst_vnfi, dst_if_name).get("address") is None):
713 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
714 # set the chaining
715 if setChaining:
716 GK.net.setChain(
717 src_id, dst_id,
718 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
719 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
720
721 def _get_vnfd_cp_from_vnfi(self, vnfi, ifname):
722 """
723 Gets the connection point data structure from the VNFD
724 of the given VNFI using ifname.
725 """
726 if vnfi.vnfd is None:
727 return {}
728 cps = vnfi.vnfd.get("connection_points")
729 for cp in cps:
730 if cp.get("id") == ifname:
731 return cp
732
733 def _connect_elans(self, elan_fwd_links, instance_uuid, subnets):
734 """
735 Connect all E-LAN/E-Tree links in the NSD
736 This method supports multi-V/CDU VNFs if the connection
737 point names of the DUs are the same as the ones in the NSD.
738 :param elan_fwd_links: list of E-LAN links in the NSD
739 :param: instance_uuid of the service
740 :param: subnets list of subnets to be used
741 :return:
742 """
743 for link in elan_fwd_links:
744 # a new E-LAN/E-Tree
745 elan_vnf_list = []
746 lan_net = subnets.pop(0)
747 lan_hosts = list(lan_net.hosts())
748
749 # generate lan ip address for all interfaces (of all involved (V/CDUs))
750 for intf_ref in link["connection_points_reference"]:
751 vnf_id, intf_name = parse_interface(intf_ref)
752 if vnf_id is None:
753 continue # skip references to NS connection points
754 units = self._get_vnf_instance_units(instance_uuid, vnf_id)
755 if units is None:
756 continue # skip if no deployment unit is present
757 # iterate over all involved deployment units
758 for uvnfi in units:
759 # Attention: we apply a simplification for multi DU VNFs here:
760 # the connection points of all involved DUs have to have the same
761 # name as the connection points of the surrounding VNF to be mapped.
762 # This is because we do not consider links specified in the VNFDs
763 container_name = uvnfi.name
764
765 ip_address = None
766 # get the interface of the unit
767 intf = self._get_vnfd_cp_from_vnfi(uvnfi, intf_name)
768 # check if there is a manually assigned address
769 if intf is not None:
770 if intf.get("address"):
771 ip_address = intf.get("address")
772 if ip_address is None:
773 # automatically asign an IP from our pool
774 ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)),
775 lan_net.prefixlen)
776 LOG.debug(
777 "Setting up E-LAN/E-Tree interface. (%s:%s) -> %s" % (
778 container_name, intf_name, ip_address))
779 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
780 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
781 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is
782 # necessary.
783 vnfi = self._get_vnf_instance(instance_uuid, container_name)
784 if vnfi is not None:
785 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
786 # add this vnf and interface to the E-LAN for tagging
787 elan_vnf_list.append(
788 {'name': container_name, 'interface': intf_name})
789 # install the VLAN tags for this E-LAN
790 GK.net.setLAN(elan_vnf_list)
791
792 def _load_docker_files(self):
793 """
794 Get all paths to Dockerfiles from VNFDs and store them in dict.
795 :return:
796 """
797 for vnf_id, v in self.vnfds.iteritems():
798 for vu in v.get("virtual_deployment_units", []):
799 vnf_container_name = get_container_name(vnf_id, vu.get("id"))
800 if vu.get("vm_image_format") == "docker":
801 vm_image = vu.get("vm_image")
802 docker_path = os.path.join(
803 self.package_content_path,
804 make_relative_path(vm_image))
805 self.local_docker_files[vnf_container_name] = docker_path
806 LOG.debug("Found Dockerfile (%r): %r" % (vnf_container_name, docker_path))
807 for cu in v.get("cloudnative_deployment_units", []):
808 vnf_container_name = get_container_name(vnf_id, cu.get("id"))
809 image = cu.get("image")
810 docker_path = os.path.join(
811 self.package_content_path,
812 make_relative_path(image))
813 self.local_docker_files[vnf_container_name] = docker_path
814 LOG.debug("Found Dockerfile (%r): %r" % (vnf_container_name, docker_path))
815
816 def _load_docker_urls(self):
817 """
818 Get all URLs to pre-build docker images in some repo.
819 :return:
820 """
821 for vnf_id, v in list(self.vnfds.items()):
822 for vu in v.get("virtual_deployment_units", []):
823 vnf_container_name = get_container_name(vnf_id, vu.get("id"))
824 if vu.get("vm_image_format") == "docker":
825 url = vu.get("vm_image")
826 if url is not None:
827 url = url.replace("http://", "")
828 self.remote_docker_image_urls[vnf_container_name] = url
829 LOG.debug("Found Docker image URL (%r): %r" %
830 (vnf_container_name,
831 self.remote_docker_image_urls[vnf_container_name]))
832 for cu in v.get("cloudnative_deployment_units", []):
833 vnf_container_name = get_container_name(vnf_id, cu.get("id"))
834 url = cu.get("image")
835 if url is not None:
836 url = url.replace("http://", "")
837 self.remote_docker_image_urls[vnf_container_name] = url
838 LOG.debug("Found Docker image URL (%r): %r" %
839 (vnf_container_name,
840 self.remote_docker_image_urls[vnf_container_name]))
841
842 def _build_images_from_dockerfiles(self):
843 """
844 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
845 """
846 if GK_STANDALONE_MODE:
847 return # do not build anything in standalone mode
848 dc = DockerClient()
849 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(
850 self.local_docker_files))
851 for k, v in list(self.local_docker_files.items()):
852 for line in dc.build(path=v.replace(
853 "Dockerfile", ""), tag=k, rm=False, nocache=False):
854 LOG.debug("DOCKER BUILD: %s" % line)
855 LOG.info("Docker image created: %s" % k)
856
857 def _pull_predefined_dockerimages(self):
858 """
859 If the package contains URLs to pre-build Docker images, we download them with this method.
860 """
861 dc = DockerClient()
862 for url in list(self.remote_docker_image_urls.values()):
863 # only pull if not present (speedup for development)
864 if not FORCE_PULL:
865 if len(dc.images.list(name=url)) > 0:
866 LOG.debug("Image %r present. Skipping pull." % url)
867 continue
868 LOG.info("Pulling image: %r" % url)
869 # this seems to fail with latest docker api version 2.0.2
870 # dc.images.pull(url,
871 # insecure_registry=True)
872 # using docker cli instead
873 cmd = ["docker",
874 "pull",
875 url,
876 ]
877 Popen(cmd).wait()
878
879 def _check_docker_image_exists(self, image_name):
880 """
881 Query the docker service and check if the given image exists
882 :param image_name: name of the docker image
883 :return:
884 """
885 return len(DockerClient().images.list(name=image_name)) > 0
886
887 def _place(self, vnfd, vnfid, vdu, ssiid):
888 """
889 Do placement. Return the name of the DC to place
890 the given VDU.
891 """
892 assert(len(self.vnfds) > 0)
893 assert(len(GK.dcs) > 0)
894 if PLACEMENT_ALGORITHM_OBJ is None:
895 LOG.error("No placement algorithm given. Using FirstDcPlacement!")
896 p = FirstDcPlacement()
897 else:
898 p = PLACEMENT_ALGORITHM_OBJ
899 cname = get_container_name(vnfid, vdu.get("id"), ssiid)
900 rdc = p.place(GK.dcs, vnfd, vnfid, vdu, ssiid, cname)
901 LOG.info("Placement: '{}' --> '{}'".format(cname, rdc))
902 return rdc
903
904 def _calculate_cpu_cfs_values(self, cpu_time_percentage):
905 """
906 Calculate cpu period and quota for CFS
907 :param cpu_time_percentage: percentage of overall CPU to be used
908 :return: cpu_period, cpu_quota
909 """
910 if cpu_time_percentage is None:
911 return -1, -1
912 if cpu_time_percentage < 0:
913 return -1, -1
914 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
915 # Attention minimum cpu_quota is 1ms (micro)
916 cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
917 LOG.debug("cpu_period is %r, cpu_percentage is %r" %
918 (cpu_period, cpu_time_percentage))
919 # calculate the fraction of cpu time for this container
920 cpu_quota = cpu_period * cpu_time_percentage
921 # ATTENTION >= 1000 to avoid a invalid argument system error ... no
922 # idea why
923 if cpu_quota < 1000:
924 LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
925 cpu_quota = 1000
926 LOG.warning("Increased CPU quota to avoid system error.")
927 LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" %
928 (cpu_period, cpu_quota))
929 return int(cpu_period), int(cpu_quota)
930
931
932 """
933 Some (simple) placement algorithms
934 """
935
936
937 class FirstDcPlacement(object):
938 """
939 Placement: Always use one and the same data center from the GK.dcs dict.
940 """
941
942 def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
943 return list(dcs.values())[0]
944
945
946 class RoundRobinDcPlacement(object):
947 """
948 Placement: Distribute VNFs across all available DCs in a round robin fashion.
949 """
950
951 def __init__(self):
952 self.count = 0
953
954 def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
955 dcs_list = list(dcs.values())
956 rdc = dcs_list[self.count % len(dcs_list)]
957 self.count += 1 # inc. count to use next DC
958 return rdc
959
960
961 class StaticConfigPlacement(object):
962 """
963 Placement: Fixed assignment based on config file.
964 """
965
966 def __init__(self, path=None):
967 if path is None:
968 path = "static_placement.yml"
969 path = os.path.expanduser(path)
970 self.static_placement = dict()
971 try:
972 self.static_placement = load_yaml(path)
973 except BaseException as ex:
974 LOG.error(ex)
975 LOG.error("Couldn't load placement from {}"
976 .format(path))
977 LOG.info("Loaded static placement: {}"
978 .format(self.static_placement))
979
980 def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
981 # check for container name entry
982 if cname not in self.static_placement:
983 LOG.error("Coudn't find {} in placement".format(cname))
984 LOG.error("Using first DC as fallback!")
985 return list(dcs.values())[0]
986 # lookup
987 candidate_dc = self.static_placement.get(cname)
988 # check if DC exsits
989 if candidate_dc not in dcs:
990 LOG.error("Coudn't find DC {}".format(candidate_dc))
991 LOG.error("Using first DC as fallback!")
992 return list(dcs.values())[0]
993 # return correct DC
994 return dcs.get(candidate_dc)
995
996
997 """
998 Resource definitions and API endpoints
999 """
1000
1001
1002 class Packages(fr.Resource):
1003
1004 def post(self):
1005 """
1006 Upload a *.son service package to the dummy gatekeeper.
1007
1008 We expect request with a *.son file and store it in UPLOAD_FOLDER
1009 :return: UUID
1010 """
1011 try:
1012 # get file contents
1013 LOG.info("POST /packages called")
1014 # lets search for the package in the request
1015 is_file_object = False # make API more robust: file can be in data or in files field
1016 if "package" in request.files:
1017 son_file = request.files["package"]
1018 is_file_object = True
1019 elif len(request.data) > 0:
1020 son_file = request.data
1021 else:
1022 return {"service_uuid": None, "size": 0, "sha1": None,
1023 "error": "upload failed. file not found."}, 500
1024 # generate a uuid to reference this package
1025 service_uuid = str(uuid.uuid4())
1026 file_hash = str(son_file)
1027 file_hash = hashlib.sha1(file_hash.encode())
1028 file_hash = file_hash.hexdigest()
1029 # ensure that upload folder exists
1030 ensure_dir(UPLOAD_FOLDER)
1031 upload_path = os.path.\
1032 join(UPLOAD_FOLDER, "%s.tgo" % service_uuid)
1033 # store *.son file to disk
1034 if is_file_object:
1035 son_file.save(upload_path)
1036 else:
1037 with open(upload_path, 'wb') as f:
1038 f.write(son_file)
1039 size = os.path.getsize(upload_path)
1040
1041 # first stop and delete any other running services
1042 if AUTO_DELETE:
1043 service_list = copy.copy(GK.services)
1044 for service_uuid in service_list:
1045 instances_list = copy.copy(
1046 GK.services[service_uuid].instances)
1047 for instance_uuid in instances_list:
1048 # valid service and instance UUID, stop service
1049 GK.services.get(service_uuid).stop_service(
1050 instance_uuid)
1051 LOG.info("service instance with uuid %r stopped." %
1052 instance_uuid)
1053
1054 # create a service object and register it
1055 s = Service(service_uuid, file_hash, upload_path)
1056 GK.register_service_package(service_uuid, s)
1057
1058 # automatically deploy the service
1059 if AUTO_DEPLOY:
1060 # ok, we have a service uuid, lets start the service
1061 reset_subnets()
1062 GK.services.get(service_uuid).start_service()
1063
1064 # generate the JSON result
1065 return {"service_uuid": service_uuid, "size": size,
1066 "sha1": file_hash, "error": None}, 201
1067 except BaseException:
1068 LOG.exception("Service package upload failed:")
1069 return {"service_uuid": None, "size": 0,
1070 "sha1": None, "error": "upload failed"}, 500
1071
1072 def get(self):
1073 """
1074 Return a list of package descriptor headers.
1075 Fakes the behavior of 5GTANGO's GK API to be
1076 compatible with tng-cli.
1077 :return: list
1078 """
1079 LOG.info("GET /packages")
1080 result = list()
1081 for suuid, sobj in GK.services.items():
1082 pkg = dict()
1083 pkg["pd"] = dict()
1084 pkg["uuid"] = suuid
1085 pkg["pd"]["name"] = sobj.manifest.get("name")
1086 pkg["pd"]["version"] = sobj.manifest.get("version")
1087 pkg["created_at"] = sobj.created_at
1088 result.append(pkg)
1089 return result, 200, CORS_HEADER
1090
1091
1092 class Services(fr.Resource):
1093
1094 def get(self):
1095 """
1096 Return a list of services.
1097 Fakes the behavior of 5GTANGO's GK API to be
1098 compatible with tng-cli.
1099 :return: list
1100 """
1101 LOG.info("GET /services")
1102 result = list()
1103 for suuid, sobj in GK.services.items():
1104 service = dict()
1105 service["nsd"] = dict()
1106 service["uuid"] = suuid
1107 service["nsd"]["name"] = sobj.nsd.get("name")
1108 service["nsd"]["version"] = sobj.nsd.get("version")
1109 service["created_at"] = sobj.created_at
1110 result.append(service)
1111 return result, 200, CORS_HEADER
1112
1113
1114 class Instantiations(fr.Resource):
1115
1116 def post(self):
1117 """
1118 Instantiate a service specified by its UUID.
1119 Will return a new UUID to identify the running service instance.
1120 :return: UUID
1121 """
1122 LOG.info("POST /instantiations (or /requests) called")
1123 # try to extract the service uuid from the request
1124 json_data = request.get_json(force=True)
1125 service_uuid = json_data.get("service_uuid")
1126 service_name = json_data.get("service_name")
1127 if service_name is None:
1128 # lets be fuzzy
1129 service_name = service_uuid
1130 # first try to find by service_name
1131 if service_name is not None:
1132 for s_uuid, s in GK.services.items():
1133 if s.manifest.get("name") == service_name:
1134 LOG.info("Searched for: {}. Found service w. UUID: {}"
1135 .format(service_name, s_uuid))
1136 service_uuid = s_uuid
1137 # lets be a bit fuzzy here to make testing easier
1138 if (service_uuid is None or service_uuid ==
1139 "latest") and len(GK.services) > 0:
1140 # if we don't get a service uuid, we simple start the first service
1141 # in the list
1142 service_uuid = list(GK.services.keys())[0]
1143 if service_uuid in GK.services:
1144 # ok, we have a service uuid, lets start the service
1145 service_instance_uuid = GK.services.get(
1146 service_uuid).start_service()
1147 # multiple ID fields to be compatible with tng-bench and tng-cli
1148 return ({"service_instance_uuid": service_instance_uuid,
1149 "id": service_instance_uuid}, 201)
1150 LOG.error("Service not found: {}/{}".format(service_uuid, service_name))
1151 return "Service not found", 404
1152
1153 def get(self):
1154 """
1155 Returns a list of UUIDs containing all running services.
1156 :return: dict / list
1157 """
1158 LOG.debug("GET /instantiations or /api/v3/records/services")
1159 # return {"service_instantiations_list": [
1160 # list(s.instances.keys()) for s in GK.services.values()]}
1161 result = list()
1162 for suuid, sobj in GK.services.items():
1163 for iuuid, iobj in sobj.instances.items():
1164 inst = dict()
1165 inst["uuid"] = iobj.get("uuid")
1166 inst["instance_name"] = "{}-inst.{}".format(
1167 iobj.get("name"), iobj.get("ssiid"))
1168 inst["status"] = "running"
1169 inst["created_at"] = iobj.get("created_at")
1170 result.append(inst)
1171 return result, 200, CORS_HEADER
1172
1173 def delete(self):
1174 """
1175 Stops a running service specified by its service and instance UUID.
1176 """
1177 # try to extract the service and instance UUID from the request
1178 json_data = request.get_json(force=True)
1179 service_uuid_input = json_data.get("service_uuid")
1180 instance_uuid_input = json_data.get("service_instance_uuid")
1181 if len(GK.services) < 1:
1182 return "No service on-boarded.", 404
1183 # try to be fuzzy
1184 if service_uuid_input is None:
1185 # if we don't get a service uuid we stop all services
1186 service_uuid_list = list(GK.services.keys())
1187 LOG.info("No service_uuid given, stopping all.")
1188 else:
1189 service_uuid_list = [service_uuid_input]
1190 # for each service
1191 for service_uuid in service_uuid_list:
1192 if instance_uuid_input is None:
1193 instance_uuid_list = list(
1194 GK.services[service_uuid].instances.keys())
1195 else:
1196 instance_uuid_list = [instance_uuid_input]
1197 # for all service instances
1198 for instance_uuid in instance_uuid_list:
1199 if (service_uuid in GK.services and
1200 instance_uuid in GK.services[service_uuid].instances):
1201 # valid service and instance UUID, stop service
1202 GK.services.get(service_uuid).stop_service(instance_uuid)
1203 LOG.info("Service instance with uuid %r stopped." % instance_uuid)
1204 return "Service(s) stopped.", 200
1205
1206
1207 class Exit(fr.Resource):
1208
1209 def put(self):
1210 """
1211 Stop the running Containernet instance regardless of data transmitted
1212 """
1213 list(GK.dcs.values())[0].net.stop()
1214
1215
1216 def generate_subnets(prefix, base, subnet_size=50, mask=24):
1217 # Generate a list of ipaddress in subnets
1218 r = list()
1219 for net in range(base, base + subnet_size):
1220 subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
1221 try:
1222 r.append(ipaddress.ip_network(subnet))
1223 except ValueError:
1224 r.append(ipaddress.ip_network(unicode(subnet)))
1225 return r
1226
1227
1228 def reset_subnets():
1229 global ELINE_SUBNETS
1230 global ELAN_SUBNETS
1231 # private subnet definitions for the generated interfaces
1232 # 30.0.xxx.0/24
1233 ELAN_SUBNETS = generate_subnets('30.0', 0, subnet_size=50, mask=24)
1234 # 20.0.xxx.0/24
1235 ELINE_SUBNETS = generate_subnets('20.0', 0, subnet_size=50, mask=24)
1236
1237
1238 def initialize_GK():
1239 global GK
1240 GK = Gatekeeper()
1241
1242
1243 # create a single, global GK object
1244 GK = None
1245 initialize_GK()
1246 # setup Flask
1247 http_server = None
1248 app = Flask(__name__)
1249 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
1250 api = fr.Api(app)
1251 # define endpoints
1252 api.add_resource(Packages, '/packages', '/api/v2/packages', '/api/v3/packages')
1253 api.add_resource(Services, '/services', '/api/v2/services', '/api/v3/services')
1254 api.add_resource(Instantiations, '/instantiations',
1255 '/api/v2/instantiations', '/api/v2/requests', '/api/v3/requests',
1256 '/api/v3/records/services')
1257 api.add_resource(Exit, '/emulator/exit')
1258
1259
1260 def start_rest_api(host, port, datacenters=dict()):
1261 global http_server
1262 GK.dcs = datacenters
1263 GK.net = get_dc_network()
1264 # start the Flask server (not the best performance but ok for our use case)
1265 # app.run(host=host,
1266 # port=port,
1267 # debug=True,
1268 # use_reloader=False # this is needed to run Flask in a non-main thread
1269 # )
1270 http_server = WSGIServer((host, port), app, log=open("/dev/null", "w"))
1271 http_server.serve_forever()
1272
1273
1274 def stop_rest_api():
1275 if http_server:
1276 http_server.close()
1277
1278
1279 def ensure_dir(name):
1280 if not os.path.exists(name):
1281 os.makedirs(name)
1282
1283
1284 def load_yaml(path):
1285 with open(path, "r") as f:
1286 try:
1287 r = yaml.load(f)
1288 except yaml.YAMLError as exc:
1289 LOG.exception("YAML parse error: %r" % str(exc))
1290 r = dict()
1291 return r
1292
1293
1294 def make_relative_path(path):
1295 if path.startswith("file://"):
1296 path = path.replace("file://", "", 1)
1297 if path.startswith("/"):
1298 path = path.replace("/", "", 1)
1299 return path
1300
1301
1302 def get_dc_network():
1303 """
1304 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
1305 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
1306 :return:
1307 """
1308 assert (len(GK.dcs) > 0)
1309 return list(GK.dcs.values())[0].net
1310
1311
1312 def parse_interface(interface_name):
1313 """
1314 convert the interface name in the nsd to the according vnf_id, vnf_interface names
1315 :param interface_name:
1316 :return:
1317 """
1318 if ':' in interface_name:
1319 vnf_id, vnf_interface = interface_name.split(':')
1320 else:
1321 vnf_id = None
1322 vnf_interface = interface_name
1323 return vnf_id, vnf_interface
1324
1325
1326 def get_container_name(vnf_id, vdu_id, ssiid=None):
1327 if ssiid is not None:
1328 return "{}.{}.{}".format(vnf_id, vdu_id, ssiid)
1329 return "{}.{}".format(vnf_id, vdu_id)
1330
1331
1332 def get_triple_id(descr):
1333 return "{}.{}.{}".format(
1334 descr.get("vendor"), descr.get("name"), descr.get("version"))
1335
1336
1337 def update_port_mapping_multi_instance(ssiid, port_bindings):
1338 """
1339 Port_bindings are used to expose ports of the deployed containers.
1340 They would collide if we deploy multiple service instances.
1341 This function adds a offset to them which is based on the
1342 short service instance id (SSIID).
1343 MULTI_INSTANCE_PORT_OFFSET
1344 """
1345 def _offset(p):
1346 return p + MULTI_INSTANCE_PORT_OFFSET * ssiid
1347
1348 port_bindings = {k: _offset(v) for k, v in port_bindings.items()}
1349 return port_bindings
1350
1351
1352 if __name__ == '__main__':
1353 """
1354 Lets allow to run the API in standalone mode.
1355 """
1356 GK_STANDALONE_MODE = True
1357 logging.getLogger("werkzeug").setLevel(logging.INFO)
1358 start_rest_api("0.0.0.0", 8000)