Feature: Support for ipc_mode and devices keywords in 5GTANGO LLCM.
[osm/vim-emu.git] / src / emuvim / api / tango / llcm.py
1
2 # Copyright (c) 2018 SONATA-NFV, 5GTANGO and Paderborn University
3 # ALL RIGHTS RESERVED.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 # Neither the name of the SONATA-NFV, 5GTANGO, Paderborn University
18 # nor the names of its contributors may be used to endorse or promote
19 # products derived from this software without specific prior written
20 # permission.
21 #
22 # This work has been performed in the framework of the SONATA project,
23 # funded by the European Commission under Grant number 671517 through
24 # the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 # acknowledge the contributions of their colleagues of the SONATA
26 # partner consortium (www.sonata-nfv.eu).
27 #
28 # This work has also been performed in the framework of the 5GTANGO project,
29 # funded by the European Commission under Grant number 761493 through
30 # the Horizon 2020 and 5G-PPP programmes. The authors would like to
31 # acknowledge the contributions of their colleagues of the 5GTANGO
32 # partner consortium (www.5gtango.eu).
33 import logging
34 import os
35 import uuid
36 import hashlib
37 import zipfile
38 import yaml
39 import threading
40 import datetime
41 from docker import DockerClient
42 from flask import Flask, request
43 import flask_restful as fr
44 from gevent.pywsgi import WSGIServer
45 from subprocess import Popen
46 import ipaddress
47 import copy
48 import time
49
50
51 LOG = logging.getLogger("5gtango.llcm")
52 LOG.setLevel(logging.INFO)
53
54
55 CORS_HEADER = {'Access-Control-Allow-Origin': '*',
56 'Access-Control-Allow-Methods': 'GET,OPTIONS'}
57
58
59 GK_STORAGE = "/tmp/vim-emu-tango-llcm/"
60 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
61 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
62
63 # Enable Dockerfile build functionality
64 BUILD_DOCKERFILE = False
65
66 # flag to indicate that we run without the emulator (only the bare API for
67 # integration testing)
68 GK_STANDALONE_MODE = False
69
70 # should a new version of an image be pulled even if its available
71 FORCE_PULL = False
72
73 # flag to indicate if we use bidirectional forwarding rules in the
74 # automatic chaining process
75 BIDIRECTIONAL_CHAIN = True
76
77 # override the management interfaces in the descriptors with default
78 # docker0 interfaces in the containers
79 USE_DOCKER_MGMT = False
80
81 # automatically deploy uploaded packages (no need to execute son-access
82 # deploy --latest separately)
83 AUTO_DEPLOY = False
84
85 # and also automatically terminate any other running services
86 AUTO_DELETE = False
87
88 # global subnet definitions (see reset_subnets())
89 ELAN_SUBNETS = None
90 ELINE_SUBNETS = None
91
92 # Time in seconds to wait for vnf stop scripts to execute fully
93 VNF_STOP_WAIT_TIME = 5
94
95 # If services are instantiated multiple times, the public port
96 # mappings need to be adapted to avoid colisions. We use this
97 # offset for this: NEW_PORT (SSIID * OFFSET) + ORIGINAL_PORT
98 MULTI_INSTANCE_PORT_OFFSET = 1000
99
100 # Selected Placement Algorithm: Points to the class of the selected
101 # placement algorithm.
102 PLACEMENT_ALGORITHM_OBJ = None
103
104 # Path to folder with <container_name>.env.yml files that contain
105 # environment variables injected into the specific container
106 # when it is started.
107 PER_INSTANCE_ENV_CONFIGURATION_FOLDER = None
108
109
110 class OnBoardingException(BaseException):
111 pass
112
113
114 class Gatekeeper(object):
115
116 def __init__(self):
117 self.services = dict()
118 self.dcs = dict()
119 self.net = None
120 # used to generate short names for VNFs (Mininet limitation)
121 self.vnf_counter = 0
122 reset_subnets()
123 LOG.info("Initialized 5GTANGO LLCM module.")
124
125 def register_service_package(self, service_uuid, service):
126 """
127 register new service package
128 :param service_uuid
129 :param service object
130 """
131 self.services[service_uuid] = service
132 # lets perform all steps needed to onboard the service
133 service.onboard()
134
135
136 class Service(object):
137 """
138 This class represents a NS uploaded as a *.son package to the
139 dummy gatekeeper.
140 Can have multiple running instances of this service.
141 """
142
143 def __init__(self,
144 service_uuid,
145 package_file_hash,
146 package_file_path):
147 self.uuid = service_uuid
148 self.package_file_hash = package_file_hash
149 self.package_file_path = package_file_path
150 self.package_content_path = os.path.join(
151 CATALOG_FOLDER, "services/%s" % self.uuid)
152 self.manifest = None
153 self.nsd = None
154 self.vnfds = dict()
155 self.local_docker_files = dict()
156 self.remote_docker_image_urls = dict()
157 self.instances = dict()
158 self._instance_counter = 0
159 self.created_at = str(datetime.datetime.now())
160
161 def onboard(self):
162 """
163 Do all steps to prepare this service to be instantiated
164 :return:
165 """
166 # 1. extract the contents of the package and store them in our catalog
167 self._unpack_service_package()
168 # 2. read in all descriptor files
169 self._load_package_descriptor()
170 self._load_nsd()
171 self._load_vnfd()
172 if self.nsd is None:
173 raise OnBoardingException("No NSD found.")
174 if len(self.vnfds) < 1:
175 raise OnBoardingException("No VNFDs found.")
176 # 3. prepare container images (e.g. download or build Dockerfile)
177 if BUILD_DOCKERFILE:
178 self._load_docker_files()
179 self._build_images_from_dockerfiles()
180 else:
181 self._load_docker_urls()
182 self._pull_predefined_dockerimages()
183 # 4. reserve subnets
184 eline_fwd_links, elan_fwd_links = self._get_elines_and_elans()
185 self.eline_subnets = [ELINE_SUBNETS.pop(0) for _ in eline_fwd_links]
186 self.elan_subnets = [ELAN_SUBNETS.pop(0) for _ in elan_fwd_links]
187 LOG.debug("Reserved subnets for service '{}': E-Line: {} / E-LAN: {}"
188 .format(self.manifest.get("name"),
189 self.eline_subnets, self.elan_subnets))
190 LOG.info("On-boarded service: {}".format(self.manifest.get("name")))
191
192 def start_service(self):
193 """
194 This methods creates and starts a new service instance.
195 It computes placements, iterates over all VNFDs, and starts
196 each VNFD as a Docker container in the data center selected
197 by the placement algorithm.
198 :return:
199 """
200 LOG.info("Starting service {} ({})"
201 .format(get_triple_id(self.nsd), self.uuid))
202
203 # 1. each service instance gets a new uuid to identify it
204 instance_uuid = str(uuid.uuid4())
205 # build a instances dict (a bit like a NSR :))
206 self.instances[instance_uuid] = dict()
207 self.instances[instance_uuid]["uuid"] = self.uuid
208 # SSIID = short service instance ID (to postfix Container names)
209 self.instances[instance_uuid]["ssiid"] = self._instance_counter
210 self.instances[instance_uuid]["name"] = get_triple_id(self.nsd)
211 self.instances[instance_uuid]["vnf_instances"] = list()
212 self.instances[instance_uuid]["created_at"] = str(datetime.datetime.now())
213 # increase for next instance
214 self._instance_counter += 1
215
216 # 3. start all vnfds that we have in the service
217 for vnf_id in self.vnfds:
218 vnfd = self.vnfds[vnf_id]
219 # attention: returns a list of started deployment units
220 vnfis = self._start_vnfd(
221 vnfd, vnf_id, self.instances[instance_uuid]["ssiid"])
222 # add list of VNFIs to total VNFI list
223 self.instances[instance_uuid]["vnf_instances"].extend(vnfis)
224
225 # 4. Deploy E-Line, E-Tree and E-LAN links
226 # Attention: Only done if ""forwarding_graphs" section in NSD exists,
227 # even if "forwarding_graphs" are not used directly.
228 # Attention2: Do a copy of *_subnets with list() is important here!
229 eline_fwd_links, elan_fwd_links = self._get_elines_and_elans()
230 # 5a. deploy E-Line links
231 GK.net.deployed_elines.extend(eline_fwd_links) # bookkeeping
232 self._connect_elines(eline_fwd_links, instance_uuid, list(self.eline_subnets))
233 # 5b. deploy E-Tree/E-LAN links
234 GK.net.deployed_elans.extend(elan_fwd_links) # bookkeeping
235 self._connect_elans(elan_fwd_links, instance_uuid, list(self.elan_subnets))
236
237 # 6. run the emulator specific entrypoint scripts in the VNFIs of this
238 # service instance
239 self._trigger_emulator_start_scripts_in_vnfis(
240 self.instances[instance_uuid]["vnf_instances"])
241 # done
242 LOG.info("Service '{}' started. Instance id: {} SSIID: {}"
243 .format(self.instances[instance_uuid]["name"],
244 instance_uuid,
245 self.instances[instance_uuid]["ssiid"]))
246 return instance_uuid
247
248 def stop_service(self, instance_uuid):
249 """
250 This method stops a running service instance.
251 It iterates over all VNF instances, stopping them each
252 and removing them from their data center.
253 :param instance_uuid: the uuid of the service instance to be stopped
254 """
255 LOG.info("Stopping service %r" % self.uuid)
256 # get relevant information
257 # instance_uuid = str(self.uuid.uuid4())
258 vnf_instances = self.instances[instance_uuid]["vnf_instances"]
259 # trigger stop skripts in vnf instances and wait a few seconds for
260 # completion
261 self._trigger_emulator_stop_scripts_in_vnfis(vnf_instances)
262 time.sleep(VNF_STOP_WAIT_TIME)
263 # stop all vnfs
264 for v in vnf_instances:
265 self._stop_vnfi(v)
266 # last step: remove the instance from the list of all instances
267 del self.instances[instance_uuid]
268
269 def _get_elines_and_elans(self):
270 """
271 Get the E-Line, E-LAN, E-Tree links from the NSD.
272 """
273 # Attention: Only done if ""forwarding_graphs" section in NSD exists,
274 # even if "forwarding_graphs" are not used directly.
275 eline_fwd_links = list()
276 elan_fwd_links = list()
277 if "virtual_links" in self.nsd and "forwarding_graphs" in self.nsd:
278 vlinks = self.nsd["virtual_links"]
279 # constituent virtual links are not checked
280 eline_fwd_links = [l for l in vlinks if (
281 l["connectivity_type"] == "E-Line")]
282 elan_fwd_links = [l for l in vlinks if (
283 l["connectivity_type"] == "E-LAN" or
284 l["connectivity_type"] == "E-Tree")] # Treat E-Tree as E-LAN
285 return eline_fwd_links, elan_fwd_links
286
287 def _get_resource_limits(self, deployment_unit):
288 """
289 Extract resource limits from deployment units.
290 """
291 # defaults
292 cpu_list = None
293 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(1.0))
294 mem_limit = None
295 # update from descriptor
296 if "resource_requirements" in deployment_unit:
297 res_req = deployment_unit.get("resource_requirements")
298 cpu_list = res_req.get("cpu").get("cpuset")
299 if cpu_list is None:
300 cpu_list = res_req.get("cpu").get("vcpus")
301 if cpu_list is not None:
302 # attention: docker expects list as string w/o spaces:
303 cpu_list = str(cpu_list).replace(" ", "").strip()
304 cpu_bw = res_req.get("cpu").get("cpu_bw")
305 if cpu_bw is None:
306 cpu_bw = 1.0
307 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
308 mem_limit = res_req.get("memory").get("size")
309 mem_unit = str(res_req.get("memory").get("size_unit", "GB"))
310 if mem_limit is not None:
311 mem_limit = int(mem_limit)
312 # to bytes
313 if "G" in mem_unit:
314 mem_limit = mem_limit * 1024 * 1024 * 1024
315 elif "M" in mem_unit:
316 mem_limit = mem_limit * 1024 * 1024
317 elif "K" in mem_unit:
318 mem_limit = mem_limit * 1024
319 return cpu_list, cpu_period, cpu_quota, mem_limit
320
321 def _start_vnfd(self, vnfd, vnf_id, ssiid, **kwargs):
322 """
323 Start a single VNFD of this service
324 :param vnfd: vnfd descriptor dict
325 :param vnf_id: unique id of this vnf in the nsd
326 :return:
327 """
328 vnfis = list()
329 # the vnf_name refers to the container image to be deployed
330 vnf_name = vnfd.get("name")
331 # combine VDUs and CDUs
332 deployment_units = (vnfd.get("virtual_deployment_units", []) +
333 vnfd.get("cloudnative_deployment_units", []))
334 # iterate over all deployment units within each VNFDs
335 for u in deployment_units:
336 # 0. vnf_container_name = vnf_id.vdu_id
337 vnf_container_name = get_container_name(vnf_id, u.get("id"))
338 vnf_container_instance_name = get_container_name(vnf_id, u.get("id"), ssiid)
339 # 1. get the name of the docker image to star
340 if vnf_container_name not in self.remote_docker_image_urls:
341 raise Exception("No image name for %r found. Abort." % vnf_container_name)
342 docker_image_name = self.remote_docker_image_urls.get(vnf_container_name)
343 # 2. select datacenter to start the VNF in
344 target_dc = self._place(vnfd, vnf_id, u, ssiid)
345 # 3. perform some checks to ensure we can start the container
346 assert(docker_image_name is not None)
347 assert(target_dc is not None)
348 if not self._check_docker_image_exists(docker_image_name):
349 raise Exception("Docker image {} not found. Abort."
350 .format(docker_image_name))
351
352 # 4. get the resource limits
353 cpu_list, cpu_period, cpu_quota, mem_limit = self._get_resource_limits(u)
354
355 # get connection points defined for the DU
356 intfs = u.get("connection_points", [])
357 # do some re-naming of fields to be compatible to containernet
358 for i in intfs:
359 if i.get("address"):
360 LOG.info("Found static address for {}: {}"
361 .format(i.get("id"), i.get("address")))
362 i["ip"] = i.get("address")
363
364 # get ports and port_bindings from the port and publish fields of CNFD
365 # see: https://github.com/containernet/containernet/wiki/Exposing-and-mapping-network-ports
366 ports = list() # Containernet naming
367 port_bindings = dict()
368 for i in intfs:
369 if i.get("port"): # field with a single port
370 if not isinstance(i.get("port"), int):
371 LOG.info("Field 'port' is no int CP: {}".format(i))
372 else:
373 ports.append(i.get("port")) # collect all ports
374 if i.get("ports"): # list with multiple ports
375 if not isinstance(i.get("ports"), list):
376 LOG.info("Field 'port' is no list CP: {}".format(i))
377 else:
378 for p in i.get("ports"):
379 if not isinstance(p, int):
380 # do some parsing
381 try:
382 if "/udp" in p:
383 p = tuple(p.split("/"))
384 else:
385 p = int(p)
386 ports.append(p) # collect all ports
387 except BaseException as ex:
388 LOG.error(
389 "Could not parse ports list: {}".format(p))
390 LOG.error(ex)
391 else:
392 ports.append(p) # collect all ports
393 if i.get("publish"):
394 if not isinstance(i.get("publish"), dict):
395 LOG.info("Field 'publish' is no dict CP: {}".format(i))
396 else:
397 port_bindings.update(i.get("publish"))
398 # update port mapping for cases where service is deployed > 1 times
399 port_bindings = update_port_mapping_multi_instance(ssiid, port_bindings)
400 if len(ports) > 0:
401 LOG.info("{} exposes ports: {}".format(vnf_container_instance_name, ports))
402 if len(port_bindings) > 0:
403 LOG.info("{} publishes ports: {}".format(vnf_container_instance_name, port_bindings))
404
405 # 5. collect additional information to start container
406 volumes = list()
407 cenv = dict()
408 # 5.1 inject descriptor based start/stop commands into env (overwrite)
409 VNFD_CMD_START = u.get("vm_cmd_start")
410 VNFD_CMD_STOP = u.get("vm_cmd_stop")
411 if VNFD_CMD_START and not VNFD_CMD_START == "None":
412 LOG.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_START) +
413 " Overwriting SON_EMU_CMD.")
414 cenv["SON_EMU_CMD"] = VNFD_CMD_START
415 if VNFD_CMD_STOP and not VNFD_CMD_STOP == "None":
416 LOG.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_STOP) +
417 " Overwriting SON_EMU_CMD_STOP.")
418 cenv["SON_EMU_CMD_STOP"] = VNFD_CMD_STOP
419
420 # 5.2 inject per instance configurations based on envs
421 conf_envs = self._load_instance_conf_envs(vnf_container_instance_name)
422 cenv.update(conf_envs)
423
424 # 5.3 handle optional ipc_mode setting
425 ipc_mode = u.get("ipc_mode", None)
426 # 5.4 handle optional devices setting
427 devices = u.get("devices", [])
428
429 # 6. Start the container
430 LOG.info("Starting %r as %r in DC %r" %
431 (vnf_name, vnf_container_instance_name, target_dc))
432 LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
433 # start the container
434 vnfi = target_dc.startCompute(
435 vnf_container_instance_name,
436 network=intfs,
437 image=docker_image_name,
438 cpu_quota=cpu_quota,
439 cpu_period=cpu_period,
440 cpuset_cpus=cpu_list,
441 mem_limit=mem_limit,
442 volumes=volumes,
443 properties=cenv, # environment
444 ports=ports,
445 port_bindings=port_bindings,
446 # only publish if explicitly stated in descriptor
447 publish_all_ports=False,
448 ipc_mode=ipc_mode,
449 devices=devices,
450 type=kwargs.get('type', 'docker'))
451 # add vnfd reference to vnfi
452 vnfi.vnfd = vnfd
453 # add container name
454 vnfi.vnf_container_name = vnf_container_name
455 vnfi.vnf_container_instance_name = vnf_container_instance_name
456 vnfi.ssiid = ssiid
457 # store vnfi
458 vnfis.append(vnfi)
459 return vnfis
460
461 def _stop_vnfi(self, vnfi):
462 """
463 Stop a VNF instance.
464 :param vnfi: vnf instance to be stopped
465 """
466 # Find the correct datacenter
467 status = vnfi.getStatus()
468 dc = vnfi.datacenter
469 # stop the vnfi
470 LOG.info("Stopping the vnf instance contained in %r in DC %r" %
471 (status["name"], dc))
472 dc.stopCompute(status["name"])
473
474 def _get_vnf_instance(self, instance_uuid, vnf_id):
475 """
476 Returns VNFI object for a given "vnf_id" or "vnf_container_name" taken from an NSD.
477 :return: single object
478 """
479 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
480 if str(vnfi.name) == str(vnf_id):
481 return vnfi
482 LOG.warning("No container with name: {0} found.".format(vnf_id))
483 return None
484
485 def _get_vnf_instance_units(self, instance_uuid, vnf_id):
486 """
487 Returns a list of VNFI objects (all deployment units) for a given
488 "vnf_id" taken from an NSD.
489 :return: list
490 """
491 if vnf_id is None:
492 return None
493 r = list()
494 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
495 if vnf_id in vnfi.name:
496 r.append(vnfi)
497 if len(r) > 0:
498 LOG.debug("Found units: {} for vnf_id: {}"
499 .format([i.name for i in r], vnf_id))
500 return r
501 LOG.warning("No container(s) with name: {0} found.".format(vnf_id))
502 return None
503
504 @staticmethod
505 def _vnf_reconfigure_network(vnfi, if_name, net_str=None, new_name=None):
506 """
507 Reconfigure the network configuration of a specific interface
508 of a running container.
509 :param vnfi: container instance
510 :param if_name: interface name
511 :param net_str: network configuration string, e.g., 1.2.3.4/24
512 :return:
513 """
514 # assign new ip address
515 if net_str is not None:
516 intf = vnfi.intf(intf=if_name)
517 if intf is not None:
518 intf.setIP(net_str)
519 LOG.debug("Reconfigured network of %s:%s to %r" %
520 (vnfi.name, if_name, net_str))
521 else:
522 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (
523 vnfi.name, if_name))
524
525 if new_name is not None:
526 vnfi.cmd('ip link set', if_name, 'down')
527 vnfi.cmd('ip link set', if_name, 'name', new_name)
528 vnfi.cmd('ip link set', new_name, 'up')
529 LOG.debug("Reconfigured interface name of %s:%s to %s" %
530 (vnfi.name, if_name, new_name))
531
532 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
533 for vnfi in vnfi_list:
534 config = vnfi.dcinfo.get("Config", dict())
535 env = config.get("Env", list())
536 for env_var in env:
537 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
538 if var == "SON_EMU_CMD" or var == "VIM_EMU_CMD":
539 LOG.info("Executing script in '{}': {}={}"
540 .format(vnfi.name, var, cmd))
541 # execute command in new thread to ensure that GK is not
542 # blocked by VNF
543 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
544 t.daemon = True
545 t.start()
546 break # only execute one command
547
548 def _trigger_emulator_stop_scripts_in_vnfis(self, vnfi_list):
549 for vnfi in vnfi_list:
550 config = vnfi.dcinfo.get("Config", dict())
551 env = config.get("Env", list())
552 for env_var in env:
553 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
554 if var == "SON_EMU_CMD_STOP" or var == "VIM_EMU_CMD_STOP":
555 LOG.info("Executing script in '{}': {}={}"
556 .format(vnfi.name, var, cmd))
557 # execute command in new thread to ensure that GK is not
558 # blocked by VNF
559 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
560 t.daemon = True
561 t.start()
562 break # only execute one command
563
564 def _load_instance_conf_envs(self, cname):
565 """
566 Try to load an instance-specific env file. If not found,
567 just return an empty dict.
568 """
569 if PER_INSTANCE_ENV_CONFIGURATION_FOLDER is None:
570 return dict()
571 try:
572 path = os.path.expanduser(PER_INSTANCE_ENV_CONFIGURATION_FOLDER)
573 path = os.path.join(path, "{}.env.yml".format(cname))
574 res = load_yaml(path)
575 LOG.info("Loaded instance-specific env file for '{}': {}"
576 .format(cname, res))
577 return res
578 except BaseException as ex:
579 LOG.info("No instance-specific env file found for: {}"
580 .format(cname))
581 del ex
582 return dict()
583
584 def _unpack_service_package(self):
585 """
586 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
587 """
588 LOG.info("Unzipping: %r" % self.package_file_path)
589 with zipfile.ZipFile(self.package_file_path, "r") as z:
590 z.extractall(self.package_content_path)
591
592 def _load_package_descriptor(self):
593 """
594 Load the main package descriptor YAML and keep it as dict.
595 :return:
596 """
597 self.manifest = load_yaml(
598 os.path.join(
599 self.package_content_path, "TOSCA-Metadata/NAPD.yaml"))
600
601 def _load_nsd(self):
602 """
603 Load the entry NSD YAML and keep it as dict.
604 :return:
605 """
606 if "package_content" in self.manifest:
607 nsd_path = None
608 for f in self.manifest.get("package_content"):
609 if f.get("content-type") == "application/vnd.5gtango.nsd":
610 nsd_path = os.path.join(
611 self.package_content_path,
612 make_relative_path(f.get("source")))
613 break # always use the first NSD for now
614 if nsd_path is None:
615 raise OnBoardingException("No NSD with type 'application/vnd.5gtango.nsd' found.")
616 self.nsd = load_yaml(nsd_path)
617 GK.net.deployed_nsds.append(self.nsd) # TODO this seems strange (remove?)
618 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
619 else:
620 raise OnBoardingException(
621 "No 'package_content' section in package manifest:\n{}"
622 .format(self.manifest))
623
624 def _load_vnfd(self):
625 """
626 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
627 :return:
628 """
629 # first make a list of all the vnfds in the package
630 vnfd_set = dict()
631 if "package_content" in self.manifest:
632 for pc in self.manifest.get("package_content"):
633 if pc.get(
634 "content-type") == "application/vnd.5gtango.vnfd":
635 vnfd_path = os.path.join(
636 self.package_content_path,
637 make_relative_path(pc.get("source")))
638 vnfd = load_yaml(vnfd_path)
639 vnfd_set[vnfd.get("name")] = vnfd
640 if len(vnfd_set) < 1:
641 raise OnBoardingException("No VNFDs found.")
642 # then link each vnf_id in the nsd to its vnfd
643 for v in self.nsd.get("network_functions"):
644 if v.get("vnf_name") in vnfd_set:
645 self.vnfds[v.get("vnf_id")] = vnfd_set[v.get("vnf_name")]
646 LOG.debug("Loaded VNFD: {0} id: {1}"
647 .format(v.get("vnf_name"), v.get("vnf_id")))
648
649 def _connect_elines(self, eline_fwd_links, instance_uuid, subnets):
650 """
651 Connect all E-LINE links in the NSD
652 Attention: This method DOES NOT support multi V/CDU VNFs!
653 :param eline_fwd_links: list of E-LINE links in the NSD
654 :param: instance_uuid of the service
655 :param: subnets list of subnets to be used
656 :return:
657 """
658 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
659 # eg. different services get a unique cookie for their flowrules
660 cookie = 1
661 for link in eline_fwd_links:
662 LOG.info("Found E-Line: {}".format(link))
663 src_id, src_if_name = parse_interface(
664 link["connection_points_reference"][0])
665 dst_id, dst_if_name = parse_interface(
666 link["connection_points_reference"][1])
667 LOG.info("Searching C/VDU for E-Line: src={}, src_if={}, dst={}, dst_if={}"
668 .format(src_id, src_if_name, dst_id, dst_if_name))
669 # handle C/VDUs (ugly hack, only one V/CDU per VNF for now)
670 src_units = self._get_vnf_instance_units(instance_uuid, src_id)
671 dst_units = self._get_vnf_instance_units(instance_uuid, dst_id)
672 if src_units is None or dst_units is None:
673 LOG.info("No VNF-VNF link. Skipping: src={}, src_if={}, dst={}, dst_if={}"
674 .format(src_id, src_if_name, dst_id, dst_if_name))
675 return
676 # we only support VNFs with one V/CDU right now
677 if len(src_units) != 1 or len(dst_units) != 1:
678 raise BaseException("LLCM does not support E-LINES for multi V/CDU VNFs.")
679 # get the full name from that C/VDU and use it as src_id and dst_id
680 src_id = src_units[0].name
681 dst_id = dst_units[0].name
682 # from here we have all info we need
683 LOG.info("Creating E-Line for C/VDU: src={}, src_if={}, dst={}, dst_if={}"
684 .format(src_id, src_if_name, dst_id, dst_if_name))
685 # get involved vnfis
686 src_vnfi = src_units[0]
687 dst_vnfi = dst_units[0]
688 # proceed with chaining setup
689 setChaining = False
690 if src_vnfi is not None and dst_vnfi is not None:
691 setChaining = True
692 # re-configure the VNFs IP assignment and ensure that a new
693 # subnet is used for each E-Link
694 eline_net = subnets.pop(0)
695 ip1 = "{0}/{1}".format(str(eline_net[1]),
696 eline_net.prefixlen)
697 ip2 = "{0}/{1}".format(str(eline_net[2]),
698 eline_net.prefixlen)
699 # check if VNFs have fixed IPs (ip/address field in VNFDs)
700 if (self._get_vnfd_cp_from_vnfi(
701 src_vnfi, src_if_name).get("ip") is None and
702 self._get_vnfd_cp_from_vnfi(
703 src_vnfi, src_if_name).get("address") is None):
704 self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
705 # check if VNFs have fixed IPs (ip field in VNFDs)
706 if (self._get_vnfd_cp_from_vnfi(
707 dst_vnfi, dst_if_name).get("ip") is None and
708 self._get_vnfd_cp_from_vnfi(
709 dst_vnfi, dst_if_name).get("address") is None):
710 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
711 # set the chaining
712 if setChaining:
713 GK.net.setChain(
714 src_id, dst_id,
715 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
716 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
717
718 def _get_vnfd_cp_from_vnfi(self, vnfi, ifname):
719 """
720 Gets the connection point data structure from the VNFD
721 of the given VNFI using ifname.
722 """
723 if vnfi.vnfd is None:
724 return {}
725 cps = vnfi.vnfd.get("connection_points")
726 for cp in cps:
727 if cp.get("id") == ifname:
728 return cp
729
730 def _connect_elans(self, elan_fwd_links, instance_uuid, subnets):
731 """
732 Connect all E-LAN/E-Tree links in the NSD
733 This method supports multi-V/CDU VNFs if the connection
734 point names of the DUs are the same as the ones in the NSD.
735 :param elan_fwd_links: list of E-LAN links in the NSD
736 :param: instance_uuid of the service
737 :param: subnets list of subnets to be used
738 :return:
739 """
740 for link in elan_fwd_links:
741 # a new E-LAN/E-Tree
742 elan_vnf_list = []
743 lan_net = subnets.pop(0)
744 lan_hosts = list(lan_net.hosts())
745
746 # generate lan ip address for all interfaces (of all involved (V/CDUs))
747 for intf_ref in link["connection_points_reference"]:
748 vnf_id, intf_name = parse_interface(intf_ref)
749 if vnf_id is None:
750 continue # skip references to NS connection points
751 units = self._get_vnf_instance_units(instance_uuid, vnf_id)
752 if units is None:
753 continue # skip if no deployment unit is present
754 # iterate over all involved deployment units
755 for uvnfi in units:
756 # Attention: we apply a simplification for multi DU VNFs here:
757 # the connection points of all involved DUs have to have the same
758 # name as the connection points of the surrounding VNF to be mapped.
759 # This is because we do not consider links specified in the VNFDs
760 container_name = uvnfi.name
761
762 ip_address = None
763 # get the interface of the unit
764 intf = self._get_vnfd_cp_from_vnfi(uvnfi, intf_name)
765 # check if there is a manually assigned address
766 if intf is not None:
767 if intf.get("address"):
768 ip_address = intf.get("address")
769 if ip_address is None:
770 # automatically asign an IP from our pool
771 ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)),
772 lan_net.prefixlen)
773 LOG.debug(
774 "Setting up E-LAN/E-Tree interface. (%s:%s) -> %s" % (
775 container_name, intf_name, ip_address))
776 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
777 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
778 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is
779 # necessary.
780 vnfi = self._get_vnf_instance(instance_uuid, container_name)
781 if vnfi is not None:
782 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
783 # add this vnf and interface to the E-LAN for tagging
784 elan_vnf_list.append(
785 {'name': container_name, 'interface': intf_name})
786 # install the VLAN tags for this E-LAN
787 GK.net.setLAN(elan_vnf_list)
788
789 def _load_docker_files(self):
790 """
791 Get all paths to Dockerfiles from VNFDs and store them in dict.
792 :return:
793 """
794 for vnf_id, v in self.vnfds.iteritems():
795 for vu in v.get("virtual_deployment_units", []):
796 vnf_container_name = get_container_name(vnf_id, vu.get("id"))
797 if vu.get("vm_image_format") == "docker":
798 vm_image = vu.get("vm_image")
799 docker_path = os.path.join(
800 self.package_content_path,
801 make_relative_path(vm_image))
802 self.local_docker_files[vnf_container_name] = docker_path
803 LOG.debug("Found Dockerfile (%r): %r" % (vnf_container_name, docker_path))
804 for cu in v.get("cloudnative_deployment_units", []):
805 vnf_container_name = get_container_name(vnf_id, cu.get("id"))
806 image = cu.get("image")
807 docker_path = os.path.join(
808 self.package_content_path,
809 make_relative_path(image))
810 self.local_docker_files[vnf_container_name] = docker_path
811 LOG.debug("Found Dockerfile (%r): %r" % (vnf_container_name, docker_path))
812
813 def _load_docker_urls(self):
814 """
815 Get all URLs to pre-build docker images in some repo.
816 :return:
817 """
818 for vnf_id, v in self.vnfds.iteritems():
819 for vu in v.get("virtual_deployment_units", []):
820 vnf_container_name = get_container_name(vnf_id, vu.get("id"))
821 if vu.get("vm_image_format") == "docker":
822 url = vu.get("vm_image")
823 if url is not None:
824 url = url.replace("http://", "")
825 self.remote_docker_image_urls[vnf_container_name] = url
826 LOG.debug("Found Docker image URL (%r): %r" %
827 (vnf_container_name,
828 self.remote_docker_image_urls[vnf_container_name]))
829 for cu in v.get("cloudnative_deployment_units", []):
830 vnf_container_name = get_container_name(vnf_id, cu.get("id"))
831 url = cu.get("image")
832 if url is not None:
833 url = url.replace("http://", "")
834 self.remote_docker_image_urls[vnf_container_name] = url
835 LOG.debug("Found Docker image URL (%r): %r" %
836 (vnf_container_name,
837 self.remote_docker_image_urls[vnf_container_name]))
838
839 def _build_images_from_dockerfiles(self):
840 """
841 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
842 """
843 if GK_STANDALONE_MODE:
844 return # do not build anything in standalone mode
845 dc = DockerClient()
846 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(
847 self.local_docker_files))
848 for k, v in self.local_docker_files.iteritems():
849 for line in dc.build(path=v.replace(
850 "Dockerfile", ""), tag=k, rm=False, nocache=False):
851 LOG.debug("DOCKER BUILD: %s" % line)
852 LOG.info("Docker image created: %s" % k)
853
854 def _pull_predefined_dockerimages(self):
855 """
856 If the package contains URLs to pre-build Docker images, we download them with this method.
857 """
858 dc = DockerClient()
859 for url in self.remote_docker_image_urls.itervalues():
860 # only pull if not present (speedup for development)
861 if not FORCE_PULL:
862 if len(dc.images.list(name=url)) > 0:
863 LOG.debug("Image %r present. Skipping pull." % url)
864 continue
865 LOG.info("Pulling image: %r" % url)
866 # this seems to fail with latest docker api version 2.0.2
867 # dc.images.pull(url,
868 # insecure_registry=True)
869 # using docker cli instead
870 cmd = ["docker",
871 "pull",
872 url,
873 ]
874 Popen(cmd).wait()
875
876 def _check_docker_image_exists(self, image_name):
877 """
878 Query the docker service and check if the given image exists
879 :param image_name: name of the docker image
880 :return:
881 """
882 return len(DockerClient().images.list(name=image_name)) > 0
883
884 def _place(self, vnfd, vnfid, vdu, ssiid):
885 """
886 Do placement. Return the name of the DC to place
887 the given VDU.
888 """
889 assert(len(self.vnfds) > 0)
890 assert(len(GK.dcs) > 0)
891 if PLACEMENT_ALGORITHM_OBJ is None:
892 LOG.error("No placement algorithm given. Using FirstDcPlacement!")
893 p = FirstDcPlacement()
894 else:
895 p = PLACEMENT_ALGORITHM_OBJ
896 cname = get_container_name(vnfid, vdu.get("id"), ssiid)
897 rdc = p.place(GK.dcs, vnfd, vnfid, vdu, ssiid, cname)
898 LOG.info("Placement: '{}' --> '{}'".format(cname, rdc))
899 return rdc
900
901 def _calculate_cpu_cfs_values(self, cpu_time_percentage):
902 """
903 Calculate cpu period and quota for CFS
904 :param cpu_time_percentage: percentage of overall CPU to be used
905 :return: cpu_period, cpu_quota
906 """
907 if cpu_time_percentage is None:
908 return -1, -1
909 if cpu_time_percentage < 0:
910 return -1, -1
911 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
912 # Attention minimum cpu_quota is 1ms (micro)
913 cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
914 LOG.debug("cpu_period is %r, cpu_percentage is %r" %
915 (cpu_period, cpu_time_percentage))
916 # calculate the fraction of cpu time for this container
917 cpu_quota = cpu_period * cpu_time_percentage
918 # ATTENTION >= 1000 to avoid a invalid argument system error ... no
919 # idea why
920 if cpu_quota < 1000:
921 LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
922 cpu_quota = 1000
923 LOG.warning("Increased CPU quota to avoid system error.")
924 LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" %
925 (cpu_period, cpu_quota))
926 return int(cpu_period), int(cpu_quota)
927
928
929 """
930 Some (simple) placement algorithms
931 """
932
933
934 class FirstDcPlacement(object):
935 """
936 Placement: Always use one and the same data center from the GK.dcs dict.
937 """
938
939 def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
940 return list(dcs.itervalues())[0]
941
942
943 class RoundRobinDcPlacement(object):
944 """
945 Placement: Distribute VNFs across all available DCs in a round robin fashion.
946 """
947
948 def __init__(self):
949 self.count = 0
950
951 def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
952 dcs_list = list(dcs.itervalues())
953 rdc = dcs_list[self.count % len(dcs_list)]
954 self.count += 1 # inc. count to use next DC
955 return rdc
956
957
958 class StaticConfigPlacement(object):
959 """
960 Placement: Fixed assignment based on config file.
961 """
962
963 def __init__(self, path=None):
964 if path is None:
965 path = "static_placement.yml"
966 path = os.path.expanduser(path)
967 self.static_placement = dict()
968 try:
969 self.static_placement = load_yaml(path)
970 except BaseException as ex:
971 LOG.error(ex)
972 LOG.error("Couldn't load placement from {}"
973 .format(path))
974 LOG.info("Loaded static placement: {}"
975 .format(self.static_placement))
976
977 def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
978 # check for container name entry
979 if cname not in self.static_placement:
980 LOG.error("Coudn't find {} in placement".format(cname))
981 LOG.error("Using first DC as fallback!")
982 return list(dcs.itervalues())[0]
983 # lookup
984 candidate_dc = self.static_placement.get(cname)
985 # check if DC exsits
986 if candidate_dc not in dcs:
987 LOG.error("Coudn't find DC {}".format(candidate_dc))
988 LOG.error("Using first DC as fallback!")
989 return list(dcs.itervalues())[0]
990 # return correct DC
991 return dcs.get(candidate_dc)
992
993
994 """
995 Resource definitions and API endpoints
996 """
997
998
999 class Packages(fr.Resource):
1000
1001 def post(self):
1002 """
1003 Upload a *.son service package to the dummy gatekeeper.
1004
1005 We expect request with a *.son file and store it in UPLOAD_FOLDER
1006 :return: UUID
1007 """
1008 try:
1009 # get file contents
1010 LOG.info("POST /packages called")
1011 # lets search for the package in the request
1012 is_file_object = False # make API more robust: file can be in data or in files field
1013 if "package" in request.files:
1014 son_file = request.files["package"]
1015 is_file_object = True
1016 elif len(request.data) > 0:
1017 son_file = request.data
1018 else:
1019 return {"service_uuid": None, "size": 0, "sha1": None,
1020 "error": "upload failed. file not found."}, 500
1021 # generate a uuid to reference this package
1022 service_uuid = str(uuid.uuid4())
1023 file_hash = hashlib.sha1(str(son_file)).hexdigest()
1024 # ensure that upload folder exists
1025 ensure_dir(UPLOAD_FOLDER)
1026 upload_path = os.path.join(UPLOAD_FOLDER, "%s.tgo" % service_uuid)
1027 # store *.son file to disk
1028 if is_file_object:
1029 son_file.save(upload_path)
1030 else:
1031 with open(upload_path, 'wb') as f:
1032 f.write(son_file)
1033 size = os.path.getsize(upload_path)
1034
1035 # first stop and delete any other running services
1036 if AUTO_DELETE:
1037 service_list = copy.copy(GK.services)
1038 for service_uuid in service_list:
1039 instances_list = copy.copy(
1040 GK.services[service_uuid].instances)
1041 for instance_uuid in instances_list:
1042 # valid service and instance UUID, stop service
1043 GK.services.get(service_uuid).stop_service(
1044 instance_uuid)
1045 LOG.info("service instance with uuid %r stopped." %
1046 instance_uuid)
1047
1048 # create a service object and register it
1049 s = Service(service_uuid, file_hash, upload_path)
1050 GK.register_service_package(service_uuid, s)
1051
1052 # automatically deploy the service
1053 if AUTO_DEPLOY:
1054 # ok, we have a service uuid, lets start the service
1055 reset_subnets()
1056 GK.services.get(service_uuid).start_service()
1057
1058 # generate the JSON result
1059 return {"service_uuid": service_uuid, "size": size,
1060 "sha1": file_hash, "error": None}, 201
1061 except BaseException:
1062 LOG.exception("Service package upload failed:")
1063 return {"service_uuid": None, "size": 0,
1064 "sha1": None, "error": "upload failed"}, 500
1065
1066 def get(self):
1067 """
1068 Return a list of package descriptor headers.
1069 Fakes the behavior of 5GTANGO's GK API to be
1070 compatible with tng-cli.
1071 :return: list
1072 """
1073 LOG.info("GET /packages")
1074 result = list()
1075 for suuid, sobj in GK.services.iteritems():
1076 pkg = dict()
1077 pkg["pd"] = dict()
1078 pkg["uuid"] = suuid
1079 pkg["pd"]["name"] = sobj.manifest.get("name")
1080 pkg["pd"]["version"] = sobj.manifest.get("version")
1081 pkg["created_at"] = sobj.created_at
1082 result.append(pkg)
1083 return result, 200, CORS_HEADER
1084
1085
1086 class Services(fr.Resource):
1087
1088 def get(self):
1089 """
1090 Return a list of services.
1091 Fakes the behavior of 5GTANGO's GK API to be
1092 compatible with tng-cli.
1093 :return: list
1094 """
1095 LOG.info("GET /services")
1096 result = list()
1097 for suuid, sobj in GK.services.iteritems():
1098 service = dict()
1099 service["nsd"] = dict()
1100 service["uuid"] = suuid
1101 service["nsd"]["name"] = sobj.nsd.get("name")
1102 service["nsd"]["version"] = sobj.nsd.get("version")
1103 service["created_at"] = sobj.created_at
1104 result.append(service)
1105 return result, 200, CORS_HEADER
1106
1107
1108 class Instantiations(fr.Resource):
1109
1110 def post(self):
1111 """
1112 Instantiate a service specified by its UUID.
1113 Will return a new UUID to identify the running service instance.
1114 :return: UUID
1115 """
1116 LOG.info("POST /instantiations (or /requests) called")
1117 # try to extract the service uuid from the request
1118 json_data = request.get_json(force=True)
1119 service_uuid = json_data.get("service_uuid")
1120 service_name = json_data.get("service_name")
1121 if service_name is None:
1122 # lets be fuzzy
1123 service_name = service_uuid
1124 # first try to find by service_name
1125 if service_name is not None:
1126 for s_uuid, s in GK.services.iteritems():
1127 if s.manifest.get("name") == service_name:
1128 LOG.info("Searched for: {}. Found service w. UUID: {}"
1129 .format(service_name, s_uuid))
1130 service_uuid = s_uuid
1131 # lets be a bit fuzzy here to make testing easier
1132 if (service_uuid is None or service_uuid ==
1133 "latest") and len(GK.services) > 0:
1134 # if we don't get a service uuid, we simple start the first service
1135 # in the list
1136 service_uuid = list(GK.services.iterkeys())[0]
1137 if service_uuid in GK.services:
1138 # ok, we have a service uuid, lets start the service
1139 service_instance_uuid = GK.services.get(
1140 service_uuid).start_service()
1141 # multiple ID fields to be compatible with tng-bench and tng-cli
1142 return ({"service_instance_uuid": service_instance_uuid,
1143 "id": service_instance_uuid}, 201)
1144 LOG.error("Service not found: {}/{}".format(service_uuid, service_name))
1145 return "Service not found", 404
1146
1147 def get(self):
1148 """
1149 Returns a list of UUIDs containing all running services.
1150 :return: dict / list
1151 """
1152 LOG.debug("GET /instantiations or /api/v3/records/services")
1153 # return {"service_instantiations_list": [
1154 # list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
1155 result = list()
1156 for suuid, sobj in GK.services.iteritems():
1157 for iuuid, iobj in sobj.instances.iteritems():
1158 inst = dict()
1159 inst["uuid"] = iobj.get("uuid")
1160 inst["instance_name"] = "{}-inst.{}".format(
1161 iobj.get("name"), iobj.get("ssiid"))
1162 inst["status"] = "running"
1163 inst["created_at"] = iobj.get("created_at")
1164 result.append(inst)
1165 return result, 200, CORS_HEADER
1166
1167 def delete(self):
1168 """
1169 Stops a running service specified by its service and instance UUID.
1170 """
1171 # try to extract the service and instance UUID from the request
1172 json_data = request.get_json(force=True)
1173 service_uuid_input = json_data.get("service_uuid")
1174 instance_uuid_input = json_data.get("service_instance_uuid")
1175 if len(GK.services) < 1:
1176 return "No service on-boarded.", 404
1177 # try to be fuzzy
1178 if service_uuid_input is None:
1179 # if we don't get a service uuid we stop all services
1180 service_uuid_list = list(GK.services.iterkeys())
1181 LOG.info("No service_uuid given, stopping all.")
1182 else:
1183 service_uuid_list = [service_uuid_input]
1184 # for each service
1185 for service_uuid in service_uuid_list:
1186 if instance_uuid_input is None:
1187 instance_uuid_list = list(
1188 GK.services[service_uuid].instances.iterkeys())
1189 else:
1190 instance_uuid_list = [instance_uuid_input]
1191 # for all service instances
1192 for instance_uuid in instance_uuid_list:
1193 if (service_uuid in GK.services and
1194 instance_uuid in GK.services[service_uuid].instances):
1195 # valid service and instance UUID, stop service
1196 GK.services.get(service_uuid).stop_service(instance_uuid)
1197 LOG.info("Service instance with uuid %r stopped." % instance_uuid)
1198 return "Service(s) stopped.", 200
1199
1200
1201 class Exit(fr.Resource):
1202
1203 def put(self):
1204 """
1205 Stop the running Containernet instance regardless of data transmitted
1206 """
1207 list(GK.dcs.values())[0].net.stop()
1208
1209
1210 def generate_subnets(prefix, base, subnet_size=50, mask=24):
1211 # Generate a list of ipaddress in subnets
1212 r = list()
1213 for net in range(base, base + subnet_size):
1214 subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
1215 r.append(ipaddress.ip_network(unicode(subnet)))
1216 return r
1217
1218
1219 def reset_subnets():
1220 global ELINE_SUBNETS
1221 global ELAN_SUBNETS
1222 # private subnet definitions for the generated interfaces
1223 # 30.0.xxx.0/24
1224 ELAN_SUBNETS = generate_subnets('30.0', 0, subnet_size=50, mask=24)
1225 # 20.0.xxx.0/24
1226 ELINE_SUBNETS = generate_subnets('20.0', 0, subnet_size=50, mask=24)
1227
1228
1229 def initialize_GK():
1230 global GK
1231 GK = Gatekeeper()
1232
1233
1234 # create a single, global GK object
1235 GK = None
1236 initialize_GK()
1237 # setup Flask
1238 http_server = None
1239 app = Flask(__name__)
1240 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
1241 api = fr.Api(app)
1242 # define endpoints
1243 api.add_resource(Packages, '/packages', '/api/v2/packages', '/api/v3/packages')
1244 api.add_resource(Services, '/services', '/api/v2/services', '/api/v3/services')
1245 api.add_resource(Instantiations, '/instantiations',
1246 '/api/v2/instantiations', '/api/v2/requests', '/api/v3/requests',
1247 '/api/v3/records/services')
1248 api.add_resource(Exit, '/emulator/exit')
1249
1250
1251 def start_rest_api(host, port, datacenters=dict()):
1252 global http_server
1253 GK.dcs = datacenters
1254 GK.net = get_dc_network()
1255 # start the Flask server (not the best performance but ok for our use case)
1256 # app.run(host=host,
1257 # port=port,
1258 # debug=True,
1259 # use_reloader=False # this is needed to run Flask in a non-main thread
1260 # )
1261 http_server = WSGIServer((host, port), app, log=open("/dev/null", "w"))
1262 http_server.serve_forever()
1263
1264
1265 def stop_rest_api():
1266 if http_server:
1267 http_server.close()
1268
1269
1270 def ensure_dir(name):
1271 if not os.path.exists(name):
1272 os.makedirs(name)
1273
1274
1275 def load_yaml(path):
1276 with open(path, "r") as f:
1277 try:
1278 r = yaml.load(f)
1279 except yaml.YAMLError as exc:
1280 LOG.exception("YAML parse error: %r" % str(exc))
1281 r = dict()
1282 return r
1283
1284
1285 def make_relative_path(path):
1286 if path.startswith("file://"):
1287 path = path.replace("file://", "", 1)
1288 if path.startswith("/"):
1289 path = path.replace("/", "", 1)
1290 return path
1291
1292
1293 def get_dc_network():
1294 """
1295 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
1296 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
1297 :return:
1298 """
1299 assert (len(GK.dcs) > 0)
1300 return GK.dcs.values()[0].net
1301
1302
1303 def parse_interface(interface_name):
1304 """
1305 convert the interface name in the nsd to the according vnf_id, vnf_interface names
1306 :param interface_name:
1307 :return:
1308 """
1309 if ':' in interface_name:
1310 vnf_id, vnf_interface = interface_name.split(':')
1311 else:
1312 vnf_id = None
1313 vnf_interface = interface_name
1314 return vnf_id, vnf_interface
1315
1316
1317 def get_container_name(vnf_id, vdu_id, ssiid=None):
1318 if ssiid is not None:
1319 return "{}.{}.{}".format(vnf_id, vdu_id, ssiid)
1320 return "{}.{}".format(vnf_id, vdu_id)
1321
1322
1323 def get_triple_id(descr):
1324 return "{}.{}.{}".format(
1325 descr.get("vendor"), descr.get("name"), descr.get("version"))
1326
1327
1328 def update_port_mapping_multi_instance(ssiid, port_bindings):
1329 """
1330 Port_bindings are used to expose ports of the deployed containers.
1331 They would collide if we deploy multiple service instances.
1332 This function adds a offset to them which is based on the
1333 short service instance id (SSIID).
1334 MULTI_INSTANCE_PORT_OFFSET
1335 """
1336 def _offset(p):
1337 return p + MULTI_INSTANCE_PORT_OFFSET * ssiid
1338
1339 port_bindings = {k: _offset(v) for k, v in port_bindings.iteritems()}
1340 return port_bindings
1341
1342
1343 if __name__ == '__main__':
1344 """
1345 Lets allow to run the API in standalone mode.
1346 """
1347 GK_STANDALONE_MODE = True
1348 logging.getLogger("werkzeug").setLevel(logging.INFO)
1349 start_rest_api("0.0.0.0", 8000)