5GTANGO LLCM: Made APIs compatible with tng-cli
[osm/vim-emu.git] / src / emuvim / api / tango / llcm.py
1
2 # Copyright (c) 2018 SONATA-NFV, 5GTANGO and Paderborn University
3 # ALL RIGHTS RESERVED.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 # Neither the name of the SONATA-NFV, 5GTANGO, Paderborn University
18 # nor the names of its contributors may be used to endorse or promote
19 # products derived from this software without specific prior written
20 # permission.
21 #
22 # This work has been performed in the framework of the SONATA project,
23 # funded by the European Commission under Grant number 671517 through
24 # the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 # acknowledge the contributions of their colleagues of the SONATA
26 # partner consortium (www.sonata-nfv.eu).
27 #
28 # This work has also been performed in the framework of the 5GTANGO project,
29 # funded by the European Commission under Grant number 761493 through
30 # the Horizon 2020 and 5G-PPP programmes. The authors would like to
31 # acknowledge the contributions of their colleagues of the 5GTANGO
32 # partner consortium (www.5gtango.eu).
33 import logging
34 import os
35 import uuid
36 import hashlib
37 import zipfile
38 import yaml
39 import threading
40 import datetime
41 from docker import DockerClient
42 from flask import Flask, request
43 import flask_restful as fr
44 from gevent.pywsgi import WSGIServer
45 from subprocess import Popen
46 import ipaddress
47 import copy
48 import time
49
50
51 LOG = logging.getLogger("5gtango.llcm")
52 LOG.setLevel(logging.INFO)
53
54
55 GK_STORAGE = "/tmp/vim-emu-tango-llcm/"
56 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
57 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
58
59 # Enable Dockerfile build functionality
60 BUILD_DOCKERFILE = False
61
62 # flag to indicate that we run without the emulator (only the bare API for
63 # integration testing)
64 GK_STANDALONE_MODE = False
65
66 # should a new version of an image be pulled even if its available
67 FORCE_PULL = False
68
69 # flag to indicate if we use bidirectional forwarding rules in the
70 # automatic chaining process
71 BIDIRECTIONAL_CHAIN = True
72
73 # override the management interfaces in the descriptors with default
74 # docker0 interfaces in the containers
75 USE_DOCKER_MGMT = False
76
77 # automatically deploy uploaded packages (no need to execute son-access
78 # deploy --latest separately)
79 AUTO_DEPLOY = False
80
81 # and also automatically terminate any other running services
82 AUTO_DELETE = False
83
84 # global subnet definitions (see reset_subnets())
85 ELAN_SUBNETS = None
86 ELINE_SUBNETS = None
87
88 # Time in seconds to wait for vnf stop scripts to execute fully
89 VNF_STOP_WAIT_TIME = 5
90
91 # If services are instantiated multiple times, the public port
92 # mappings need to be adapted to avoid colisions. We use this
93 # offset for this: NEW_PORT (SSIID * OFFSET) + ORIGINAL_PORT
94 MULTI_INSTANCE_PORT_OFFSET = 1000
95
96
97 class OnBoardingException(BaseException):
98 pass
99
100
101 class Gatekeeper(object):
102
103 def __init__(self):
104 self.services = dict()
105 self.dcs = dict()
106 self.net = None
107 # used to generate short names for VNFs (Mininet limitation)
108 self.vnf_counter = 0
109 reset_subnets()
110 LOG.info("Initialized 5GTANGO LLCM module.")
111
112 def register_service_package(self, service_uuid, service):
113 """
114 register new service package
115 :param service_uuid
116 :param service object
117 """
118 self.services[service_uuid] = service
119 # lets perform all steps needed to onboard the service
120 service.onboard()
121
122
123 class Service(object):
124 """
125 This class represents a NS uploaded as a *.son package to the
126 dummy gatekeeper.
127 Can have multiple running instances of this service.
128 """
129
130 def __init__(self,
131 service_uuid,
132 package_file_hash,
133 package_file_path):
134 self.uuid = service_uuid
135 self.package_file_hash = package_file_hash
136 self.package_file_path = package_file_path
137 self.package_content_path = os.path.join(
138 CATALOG_FOLDER, "services/%s" % self.uuid)
139 self.manifest = None
140 self.nsd = None
141 self.vnfds = dict()
142 self.local_docker_files = dict()
143 self.remote_docker_image_urls = dict()
144 self.instances = dict()
145 self._instance_counter = 0
146 self.created_at = str(datetime.datetime.now())
147
148 def onboard(self):
149 """
150 Do all steps to prepare this service to be instantiated
151 :return:
152 """
153 # 1. extract the contents of the package and store them in our catalog
154 self._unpack_service_package()
155 # 2. read in all descriptor files
156 self._load_package_descriptor()
157 self._load_nsd()
158 self._load_vnfd()
159 if self.nsd is None:
160 raise OnBoardingException("No NSD found.")
161 if len(self.vnfds) < 1:
162 raise OnBoardingException("No VNFDs found.")
163 # 3. prepare container images (e.g. download or build Dockerfile)
164 if BUILD_DOCKERFILE:
165 self._load_docker_files()
166 self._build_images_from_dockerfiles()
167 else:
168 self._load_docker_urls()
169 self._pull_predefined_dockerimages()
170 # 4. reserve subnets
171 eline_fwd_links, elan_fwd_links = self._get_elines_and_elans()
172 self.eline_subnets = [ELINE_SUBNETS.pop(0) for _ in eline_fwd_links]
173 self.elan_subnets = [ELAN_SUBNETS.pop(0) for _ in elan_fwd_links]
174 LOG.debug("Reserved subnets for service '{}': E-Line: {} / E-LAN: {}"
175 .format(self.manifest.get("name"),
176 self.eline_subnets, self.elan_subnets))
177 LOG.info("On-boarded service: {}".format(self.manifest.get("name")))
178
179 def start_service(self):
180 """
181 This methods creates and starts a new service instance.
182 It computes placements, iterates over all VNFDs, and starts
183 each VNFD as a Docker container in the data center selected
184 by the placement algorithm.
185 :return:
186 """
187 LOG.info("Starting service {} ({})"
188 .format(get_triple_id(self.nsd), self.uuid))
189
190 # 1. each service instance gets a new uuid to identify it
191 instance_uuid = str(uuid.uuid4())
192 # build a instances dict (a bit like a NSR :))
193 self.instances[instance_uuid] = dict()
194 self.instances[instance_uuid]["uuid"] = self.uuid
195 # SSIID = short service instance ID (to postfix Container names)
196 self.instances[instance_uuid]["ssiid"] = self._instance_counter
197 self.instances[instance_uuid]["name"] = get_triple_id(self.nsd)
198 self.instances[instance_uuid]["vnf_instances"] = list()
199 self.instances[instance_uuid]["created_at"] = str(datetime.datetime.now())
200 # increase for next instance
201 self._instance_counter += 1
202
203 # 2. compute placement of this service instance (adds DC names to
204 # VNFDs)
205 # self._calculate_placement(FirstDcPlacement)
206 self._calculate_placement(RoundRobinDcPlacement)
207 # 3. start all vnfds that we have in the service
208 for vnf_id in self.vnfds:
209 vnfd = self.vnfds[vnf_id]
210 # attention: returns a list of started deployment units
211 vnfis = self._start_vnfd(
212 vnfd, vnf_id, self.instances[instance_uuid]["ssiid"])
213 # add list of VNFIs to total VNFI list
214 self.instances[instance_uuid]["vnf_instances"].extend(vnfis)
215
216 # 4. Deploy E-Line, E-Tree and E-LAN links
217 # Attention: Only done if ""forwarding_graphs" section in NSD exists,
218 # even if "forwarding_graphs" are not used directly.
219 # Attention2: Do a copy of *_subnets with list() is important here!
220 eline_fwd_links, elan_fwd_links = self._get_elines_and_elans()
221 # 5a. deploy E-Line links
222 GK.net.deployed_elines.extend(eline_fwd_links) # bookkeeping
223 self._connect_elines(eline_fwd_links, instance_uuid, list(self.eline_subnets))
224 # 5b. deploy E-Tree/E-LAN links
225 GK.net.deployed_elans.extend(elan_fwd_links) # bookkeeping
226 self._connect_elans(elan_fwd_links, instance_uuid, list(self.elan_subnets))
227
228 # 6. run the emulator specific entrypoint scripts in the VNFIs of this
229 # service instance
230 self._trigger_emulator_start_scripts_in_vnfis(
231 self.instances[instance_uuid]["vnf_instances"])
232 # done
233 LOG.info("Service '{}' started. Instance id: {} SSIID: {}"
234 .format(self.instances[instance_uuid]["name"],
235 instance_uuid,
236 self.instances[instance_uuid]["ssiid"]))
237 return instance_uuid
238
239 def stop_service(self, instance_uuid):
240 """
241 This method stops a running service instance.
242 It iterates over all VNF instances, stopping them each
243 and removing them from their data center.
244 :param instance_uuid: the uuid of the service instance to be stopped
245 """
246 LOG.info("Stopping service %r" % self.uuid)
247 # get relevant information
248 # instance_uuid = str(self.uuid.uuid4())
249 vnf_instances = self.instances[instance_uuid]["vnf_instances"]
250 # trigger stop skripts in vnf instances and wait a few seconds for
251 # completion
252 self._trigger_emulator_stop_scripts_in_vnfis(vnf_instances)
253 time.sleep(VNF_STOP_WAIT_TIME)
254 # stop all vnfs
255 for v in vnf_instances:
256 self._stop_vnfi(v)
257 # last step: remove the instance from the list of all instances
258 del self.instances[instance_uuid]
259
260 def _get_elines_and_elans(self):
261 """
262 Get the E-Line, E-LAN, E-Tree links from the NSD.
263 """
264 # Attention: Only done if ""forwarding_graphs" section in NSD exists,
265 # even if "forwarding_graphs" are not used directly.
266 eline_fwd_links = list()
267 elan_fwd_links = list()
268 if "virtual_links" in self.nsd and "forwarding_graphs" in self.nsd:
269 vlinks = self.nsd["virtual_links"]
270 # constituent virtual links are not checked
271 eline_fwd_links = [l for l in vlinks if (
272 l["connectivity_type"] == "E-Line")]
273 elan_fwd_links = [l for l in vlinks if (
274 l["connectivity_type"] == "E-LAN" or
275 l["connectivity_type"] == "E-Tree")] # Treat E-Tree as E-LAN
276 return eline_fwd_links, elan_fwd_links
277
278 def _get_resource_limits(self, deployment_unit):
279 """
280 Extract resource limits from deployment units.
281 """
282 # defaults
283 cpu_list = None
284 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(1.0))
285 mem_limit = None
286 # update from descriptor
287 if "resource_requirements" in deployment_unit:
288 res_req = deployment_unit.get("resource_requirements")
289 cpu_list = res_req.get("cpu").get("cpuset")
290 if cpu_list is None:
291 cpu_list = res_req.get("cpu").get("vcpus")
292 if cpu_list is not None:
293 # attention: docker expects list as string w/o spaces:
294 cpu_list = str(cpu_list).replace(" ", "").strip()
295 cpu_bw = res_req.get("cpu").get("cpu_bw")
296 if cpu_bw is None:
297 cpu_bw = 1.0
298 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
299 mem_limit = res_req.get("memory").get("size")
300 mem_unit = str(res_req.get("memory").get("size_unit", "GB"))
301 if mem_limit is not None:
302 mem_limit = int(mem_limit)
303 # to bytes
304 if "G" in mem_unit:
305 mem_limit = mem_limit * 1024 * 1024 * 1024
306 elif "M" in mem_unit:
307 mem_limit = mem_limit * 1024 * 1024
308 elif "K" in mem_unit:
309 mem_limit = mem_limit * 1024
310 return cpu_list, cpu_period, cpu_quota, mem_limit
311
312 def _start_vnfd(self, vnfd, vnf_id, ssiid, **kwargs):
313 """
314 Start a single VNFD of this service
315 :param vnfd: vnfd descriptor dict
316 :param vnf_id: unique id of this vnf in the nsd
317 :return:
318 """
319 vnfis = list()
320 # the vnf_name refers to the container image to be deployed
321 vnf_name = vnfd.get("name")
322 # combine VDUs and CDUs
323 deployment_units = (vnfd.get("virtual_deployment_units", []) +
324 vnfd.get("cloudnative_deployment_units", []))
325 # iterate over all deployment units within each VNFDs
326 for u in deployment_units:
327 # 0. vnf_container_name = vnf_id.vdu_id
328 vnf_container_name = get_container_name(vnf_id, u.get("id"))
329 vnf_container_instance_name = get_container_name(vnf_id, u.get("id"), ssiid)
330 # 1. get the name of the docker image to star
331 if vnf_container_name not in self.remote_docker_image_urls:
332 raise Exception("No image name for %r found. Abort." % vnf_container_name)
333 docker_image_name = self.remote_docker_image_urls.get(vnf_container_name)
334 # 2. select datacenter to start the VNF in
335 target_dc = vnfd.get("dc")
336 # 3. perform some checks to ensure we can start the container
337 assert(docker_image_name is not None)
338 assert(target_dc is not None)
339 if not self._check_docker_image_exists(docker_image_name):
340 raise Exception("Docker image {} not found. Abort."
341 .format(docker_image_name))
342
343 # 4. get the resource limits
344 cpu_list, cpu_period, cpu_quota, mem_limit = self._get_resource_limits(u)
345
346 # get connection points defined for the DU
347 intfs = u.get("connection_points", [])
348 # do some re-naming of fields to be compatible to containernet
349 for i in intfs:
350 if i.get("address"):
351 i["ip"] = i.get("address")
352
353 # get ports and port_bindings from the port and publish fields of CNFD
354 # see: https://github.com/containernet/containernet/wiki/Exposing-and-mapping-network-ports
355 ports = list() # Containernet naming
356 port_bindings = dict()
357 for i in intfs:
358 if i.get("port"):
359 if not isinstance(i.get("port"), int):
360 LOG.info("Field 'port' is no int CP: {}".format(i))
361 else:
362 ports.append(i.get("port"))
363 if i.get("publish"):
364 if not isinstance(i.get("publish"), dict):
365 LOG.info("Field 'publish' is no dict CP: {}".format(i))
366 else:
367 port_bindings.update(i.get("publish"))
368 # update port mapping for cases where service is deployed > 1 times
369 port_bindings = update_port_mapping_multi_instance(ssiid, port_bindings)
370 if len(ports) > 0:
371 LOG.info("{} exposes ports: {}".format(vnf_container_instance_name, ports))
372 if len(port_bindings) > 0:
373 LOG.info("{} publishes ports: {}".format(vnf_container_instance_name, port_bindings))
374
375 # 5. collect additional information to start container
376 volumes = list()
377 cenv = dict()
378 # 5.1 inject descriptor based start/stop commands into env (overwrite)
379 VNFD_CMD_START = u.get("vm_cmd_start")
380 VNFD_CMD_STOP = u.get("vm_cmd_stop")
381 if VNFD_CMD_START and not VNFD_CMD_START == "None":
382 LOG.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_START) +
383 " Overwriting SON_EMU_CMD.")
384 cenv["SON_EMU_CMD"] = VNFD_CMD_START
385 if VNFD_CMD_STOP and not VNFD_CMD_STOP == "None":
386 LOG.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_STOP) +
387 " Overwriting SON_EMU_CMD_STOP.")
388 cenv["SON_EMU_CMD_STOP"] = VNFD_CMD_STOP
389
390 # 6. Start the container
391 LOG.info("Starting %r as %r in DC %r" %
392 (vnf_name, vnf_container_instance_name, vnfd.get("dc")))
393 LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
394 # start the container
395 vnfi = target_dc.startCompute(
396 vnf_container_instance_name,
397 network=intfs,
398 image=docker_image_name,
399 cpu_quota=cpu_quota,
400 cpu_period=cpu_period,
401 cpuset_cpus=cpu_list,
402 mem_limit=mem_limit,
403 volumes=volumes,
404 properties=cenv, # environment
405 ports=ports,
406 port_bindings=port_bindings,
407 # only publish if explicitly stated in descriptor
408 publish_all_ports=False,
409 type=kwargs.get('type', 'docker'))
410 # add vnfd reference to vnfi
411 vnfi.vnfd = vnfd
412 # add container name
413 vnfi.vnf_container_name = vnf_container_name
414 vnfi.vnf_container_instance_name = vnf_container_instance_name
415 vnfi.ssiid = ssiid
416 # store vnfi
417 vnfis.append(vnfi)
418 return vnfis
419
420 def _stop_vnfi(self, vnfi):
421 """
422 Stop a VNF instance.
423 :param vnfi: vnf instance to be stopped
424 """
425 # Find the correct datacenter
426 status = vnfi.getStatus()
427 dc = vnfi.datacenter
428 # stop the vnfi
429 LOG.info("Stopping the vnf instance contained in %r in DC %r" %
430 (status["name"], dc))
431 dc.stopCompute(status["name"])
432
433 def _get_vnf_instance(self, instance_uuid, vnf_id):
434 """
435 Returns VNFI object for a given "vnf_id" or "vnf_container_name" taken from an NSD.
436 :return: single object
437 """
438 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
439 if str(vnfi.name) == str(vnf_id):
440 return vnfi
441 LOG.warning("No container with name: {0} found.".format(vnf_id))
442 return None
443
444 def _get_vnf_instance_units(self, instance_uuid, vnf_id):
445 """
446 Returns a list of VNFI objects (all deployment units) for a given
447 "vnf_id" taken from an NSD.
448 :return: list
449 """
450 if vnf_id is None:
451 return None
452 r = list()
453 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
454 if vnf_id in vnfi.name:
455 r.append(vnfi)
456 if len(r) > 0:
457 LOG.debug("Found units: {} for vnf_id: {}"
458 .format([i.name for i in r], vnf_id))
459 return r
460 LOG.warning("No container(s) with name: {0} found.".format(vnf_id))
461 return None
462
463 @staticmethod
464 def _vnf_reconfigure_network(vnfi, if_name, net_str=None, new_name=None):
465 """
466 Reconfigure the network configuration of a specific interface
467 of a running container.
468 :param vnfi: container instance
469 :param if_name: interface name
470 :param net_str: network configuration string, e.g., 1.2.3.4/24
471 :return:
472 """
473 # assign new ip address
474 if net_str is not None:
475 intf = vnfi.intf(intf=if_name)
476 if intf is not None:
477 intf.setIP(net_str)
478 LOG.debug("Reconfigured network of %s:%s to %r" %
479 (vnfi.name, if_name, net_str))
480 else:
481 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (
482 vnfi.name, if_name))
483
484 if new_name is not None:
485 vnfi.cmd('ip link set', if_name, 'down')
486 vnfi.cmd('ip link set', if_name, 'name', new_name)
487 vnfi.cmd('ip link set', new_name, 'up')
488 LOG.debug("Reconfigured interface name of %s:%s to %s" %
489 (vnfi.name, if_name, new_name))
490
491 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
492 for vnfi in vnfi_list:
493 config = vnfi.dcinfo.get("Config", dict())
494 env = config.get("Env", list())
495 for env_var in env:
496 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
497 if var == "SON_EMU_CMD" or var == "VIM_EMU_CMD":
498 LOG.info("Executing script in '{}': {}={}"
499 .format(vnfi.name, var, cmd))
500 # execute command in new thread to ensure that GK is not
501 # blocked by VNF
502 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
503 t.daemon = True
504 t.start()
505 break # only execute one command
506
507 def _trigger_emulator_stop_scripts_in_vnfis(self, vnfi_list):
508 for vnfi in vnfi_list:
509 config = vnfi.dcinfo.get("Config", dict())
510 env = config.get("Env", list())
511 for env_var in env:
512 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
513 if var == "SON_EMU_CMD_STOP" or var == "VIM_EMU_CMD_STOP":
514 LOG.info("Executing script in '{}': {}={}"
515 .format(vnfi.name, var, cmd))
516 # execute command in new thread to ensure that GK is not
517 # blocked by VNF
518 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
519 t.daemon = True
520 t.start()
521 break # only execute one command
522
523 def _unpack_service_package(self):
524 """
525 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
526 """
527 LOG.info("Unzipping: %r" % self.package_file_path)
528 with zipfile.ZipFile(self.package_file_path, "r") as z:
529 z.extractall(self.package_content_path)
530
531 def _load_package_descriptor(self):
532 """
533 Load the main package descriptor YAML and keep it as dict.
534 :return:
535 """
536 self.manifest = load_yaml(
537 os.path.join(
538 self.package_content_path, "TOSCA-Metadata/NAPD.yaml"))
539
540 def _load_nsd(self):
541 """
542 Load the entry NSD YAML and keep it as dict.
543 :return:
544 """
545 if "package_content" in self.manifest:
546 nsd_path = None
547 for f in self.manifest.get("package_content"):
548 if f.get("content-type") == "application/vnd.5gtango.nsd":
549 nsd_path = os.path.join(
550 self.package_content_path,
551 make_relative_path(f.get("source")))
552 break # always use the first NSD for now
553 if nsd_path is None:
554 raise OnBoardingException("No NSD with type 'application/vnd.5gtango.nsd' found.")
555 self.nsd = load_yaml(nsd_path)
556 GK.net.deployed_nsds.append(self.nsd) # TODO this seems strange (remove?)
557 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
558 else:
559 raise OnBoardingException(
560 "No 'package_content' section in package manifest:\n{}"
561 .format(self.manifest))
562
563 def _load_vnfd(self):
564 """
565 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
566 :return:
567 """
568 # first make a list of all the vnfds in the package
569 vnfd_set = dict()
570 if "package_content" in self.manifest:
571 for pc in self.manifest.get("package_content"):
572 if pc.get(
573 "content-type") == "application/vnd.5gtango.vnfd":
574 vnfd_path = os.path.join(
575 self.package_content_path,
576 make_relative_path(pc.get("source")))
577 vnfd = load_yaml(vnfd_path)
578 vnfd_set[vnfd.get("name")] = vnfd
579 if len(vnfd_set) < 1:
580 raise OnBoardingException("No VNFDs found.")
581 # then link each vnf_id in the nsd to its vnfd
582 for v in self.nsd.get("network_functions"):
583 if v.get("vnf_name") in vnfd_set:
584 self.vnfds[v.get("vnf_id")] = vnfd_set[v.get("vnf_name")]
585 LOG.debug("Loaded VNFD: {0} id: {1}"
586 .format(v.get("vnf_name"), v.get("vnf_id")))
587
588 def _connect_elines(self, eline_fwd_links, instance_uuid, subnets):
589 """
590 Connect all E-LINE links in the NSD
591 Attention: This method DOES NOT support multi V/CDU VNFs!
592 :param eline_fwd_links: list of E-LINE links in the NSD
593 :param: instance_uuid of the service
594 :param: subnets list of subnets to be used
595 :return:
596 """
597 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
598 # eg. different services get a unique cookie for their flowrules
599 cookie = 1
600 for link in eline_fwd_links:
601 LOG.info("Found E-Line: {}".format(link))
602 src_id, src_if_name = parse_interface(
603 link["connection_points_reference"][0])
604 dst_id, dst_if_name = parse_interface(
605 link["connection_points_reference"][1])
606 LOG.info("Searching C/VDU for E-Line: src={}, src_if={}, dst={}, dst_if={}"
607 .format(src_id, src_if_name, dst_id, dst_if_name))
608 # handle C/VDUs (ugly hack, only one V/CDU per VNF for now)
609 src_units = self._get_vnf_instance_units(instance_uuid, src_id)
610 dst_units = self._get_vnf_instance_units(instance_uuid, dst_id)
611 if src_units is None or dst_units is None:
612 LOG.info("No VNF-VNF link. Skipping: src={}, src_if={}, dst={}, dst_if={}"
613 .format(src_id, src_if_name, dst_id, dst_if_name))
614 return
615 # we only support VNFs with one V/CDU right now
616 if len(src_units) != 1 or len(dst_units) != 1:
617 raise BaseException("LLCM does not support E-LINES for multi V/CDU VNFs.")
618 # get the full name from that C/VDU and use it as src_id and dst_id
619 src_id = src_units[0].name
620 dst_id = dst_units[0].name
621 # from here we have all info we need
622 LOG.info("Creating E-Line for C/VDU: src={}, src_if={}, dst={}, dst_if={}"
623 .format(src_id, src_if_name, dst_id, dst_if_name))
624 # get involved vnfis
625 src_vnfi = src_units[0]
626 dst_vnfi = dst_units[0]
627 # proceed with chaining setup
628 setChaining = False
629 if src_vnfi is not None and dst_vnfi is not None:
630 setChaining = True
631 # re-configure the VNFs IP assignment and ensure that a new
632 # subnet is used for each E-Link
633 eline_net = subnets.pop(0)
634 ip1 = "{0}/{1}".format(str(eline_net[1]),
635 eline_net.prefixlen)
636 ip2 = "{0}/{1}".format(str(eline_net[2]),
637 eline_net.prefixlen)
638 # check if VNFs have fixed IPs (ip/address field in VNFDs)
639 if (self._get_vnfd_cp_from_vnfi(
640 src_vnfi, src_if_name).get("ip") is None and
641 self._get_vnfd_cp_from_vnfi(
642 src_vnfi, src_if_name).get("address") is None):
643 self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
644 # check if VNFs have fixed IPs (ip field in VNFDs)
645 if (self._get_vnfd_cp_from_vnfi(
646 dst_vnfi, dst_if_name).get("ip") is None and
647 self._get_vnfd_cp_from_vnfi(
648 dst_vnfi, dst_if_name).get("address") is None):
649 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
650 # set the chaining
651 if setChaining:
652 GK.net.setChain(
653 src_id, dst_id,
654 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
655 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
656
657 def _get_vnfd_cp_from_vnfi(self, vnfi, ifname):
658 """
659 Gets the connection point data structure from the VNFD
660 of the given VNFI using ifname.
661 """
662 if vnfi.vnfd is None:
663 return {}
664 cps = vnfi.vnfd.get("connection_points")
665 for cp in cps:
666 if cp.get("id") == ifname:
667 return cp
668
669 def _connect_elans(self, elan_fwd_links, instance_uuid, subnets):
670 """
671 Connect all E-LAN/E-Tree links in the NSD
672 This method supports multi-V/CDU VNFs if the connection
673 point names of the DUs are the same as the ones in the NSD.
674 :param elan_fwd_links: list of E-LAN links in the NSD
675 :param: instance_uuid of the service
676 :param: subnets list of subnets to be used
677 :return:
678 """
679 for link in elan_fwd_links:
680 # a new E-LAN/E-Tree
681 elan_vnf_list = []
682 lan_net = subnets.pop(0)
683 lan_hosts = list(lan_net.hosts())
684
685 # generate lan ip address for all interfaces (of all involved (V/CDUs))
686 for intf in link["connection_points_reference"]:
687 vnf_id, intf_name = parse_interface(intf)
688 if vnf_id is None:
689 continue # skip references to NS connection points
690 units = self._get_vnf_instance_units(instance_uuid, vnf_id)
691 if units is None:
692 continue # skip if no deployment unit is present
693 # iterate over all involved deployment units
694 for uvnfi in units:
695 # Attention: we apply a simplification for multi DU VNFs here:
696 # the connection points of all involved DUs have to have the same
697 # name as the connection points of the surrounding VNF to be mapped.
698 # This is because we do not consider links specified in the VNFds
699 container_name = uvnfi.name
700 ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)),
701 lan_net.prefixlen)
702 LOG.debug(
703 "Setting up E-LAN/E-Tree interface. (%s:%s) -> %s" % (
704 container_name, intf_name, ip_address))
705 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
706 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
707 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is
708 # necessary.
709 vnfi = self._get_vnf_instance(instance_uuid, container_name)
710 if vnfi is not None:
711 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
712 # add this vnf and interface to the E-LAN for tagging
713 elan_vnf_list.append(
714 {'name': container_name, 'interface': intf_name})
715 # install the VLAN tags for this E-LAN
716 GK.net.setLAN(elan_vnf_list)
717
718 def _load_docker_files(self):
719 """
720 Get all paths to Dockerfiles from VNFDs and store them in dict.
721 :return:
722 """
723 for vnf_id, v in self.vnfds.iteritems():
724 for vu in v.get("virtual_deployment_units", []):
725 vnf_container_name = get_container_name(vnf_id, vu.get("id"))
726 if vu.get("vm_image_format") == "docker":
727 vm_image = vu.get("vm_image")
728 docker_path = os.path.join(
729 self.package_content_path,
730 make_relative_path(vm_image))
731 self.local_docker_files[vnf_container_name] = docker_path
732 LOG.debug("Found Dockerfile (%r): %r" % (vnf_container_name, docker_path))
733 for cu in v.get("cloudnative_deployment_units", []):
734 vnf_container_name = get_container_name(vnf_id, cu.get("id"))
735 image = cu.get("image")
736 docker_path = os.path.join(
737 self.package_content_path,
738 make_relative_path(image))
739 self.local_docker_files[vnf_container_name] = docker_path
740 LOG.debug("Found Dockerfile (%r): %r" % (vnf_container_name, docker_path))
741
742 def _load_docker_urls(self):
743 """
744 Get all URLs to pre-build docker images in some repo.
745 :return:
746 """
747 for vnf_id, v in self.vnfds.iteritems():
748 for vu in v.get("virtual_deployment_units", []):
749 vnf_container_name = get_container_name(vnf_id, vu.get("id"))
750 if vu.get("vm_image_format") == "docker":
751 url = vu.get("vm_image")
752 if url is not None:
753 url = url.replace("http://", "")
754 self.remote_docker_image_urls[vnf_container_name] = url
755 LOG.debug("Found Docker image URL (%r): %r" %
756 (vnf_container_name,
757 self.remote_docker_image_urls[vnf_container_name]))
758 for cu in v.get("cloudnative_deployment_units", []):
759 vnf_container_name = get_container_name(vnf_id, cu.get("id"))
760 url = cu.get("image")
761 if url is not None:
762 url = url.replace("http://", "")
763 self.remote_docker_image_urls[vnf_container_name] = url
764 LOG.debug("Found Docker image URL (%r): %r" %
765 (vnf_container_name,
766 self.remote_docker_image_urls[vnf_container_name]))
767
768 def _build_images_from_dockerfiles(self):
769 """
770 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
771 """
772 if GK_STANDALONE_MODE:
773 return # do not build anything in standalone mode
774 dc = DockerClient()
775 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(
776 self.local_docker_files))
777 for k, v in self.local_docker_files.iteritems():
778 for line in dc.build(path=v.replace(
779 "Dockerfile", ""), tag=k, rm=False, nocache=False):
780 LOG.debug("DOCKER BUILD: %s" % line)
781 LOG.info("Docker image created: %s" % k)
782
783 def _pull_predefined_dockerimages(self):
784 """
785 If the package contains URLs to pre-build Docker images, we download them with this method.
786 """
787 dc = DockerClient()
788 for url in self.remote_docker_image_urls.itervalues():
789 # only pull if not present (speedup for development)
790 if not FORCE_PULL:
791 if len(dc.images.list(name=url)) > 0:
792 LOG.debug("Image %r present. Skipping pull." % url)
793 continue
794 LOG.info("Pulling image: %r" % url)
795 # this seems to fail with latest docker api version 2.0.2
796 # dc.images.pull(url,
797 # insecure_registry=True)
798 # using docker cli instead
799 cmd = ["docker",
800 "pull",
801 url,
802 ]
803 Popen(cmd).wait()
804
805 def _check_docker_image_exists(self, image_name):
806 """
807 Query the docker service and check if the given image exists
808 :param image_name: name of the docker image
809 :return:
810 """
811 return len(DockerClient().images.list(name=image_name)) > 0
812
813 def _calculate_placement(self, algorithm):
814 """
815 Do placement by adding the a field "dc" to
816 each VNFD that points to one of our
817 data center objects known to the gatekeeper.
818 """
819 assert(len(self.vnfds) > 0)
820 assert(len(GK.dcs) > 0)
821 # instantiate algorithm an place
822 p = algorithm()
823 p.place(self.nsd, self.vnfds, GK.dcs)
824 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
825 # lets print the placement result
826 for name, vnfd in self.vnfds.iteritems():
827 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
828
829 def _calculate_cpu_cfs_values(self, cpu_time_percentage):
830 """
831 Calculate cpu period and quota for CFS
832 :param cpu_time_percentage: percentage of overall CPU to be used
833 :return: cpu_period, cpu_quota
834 """
835 if cpu_time_percentage is None:
836 return -1, -1
837 if cpu_time_percentage < 0:
838 return -1, -1
839 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
840 # Attention minimum cpu_quota is 1ms (micro)
841 cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
842 LOG.debug("cpu_period is %r, cpu_percentage is %r" %
843 (cpu_period, cpu_time_percentage))
844 # calculate the fraction of cpu time for this container
845 cpu_quota = cpu_period * cpu_time_percentage
846 # ATTENTION >= 1000 to avoid a invalid argument system error ... no
847 # idea why
848 if cpu_quota < 1000:
849 LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
850 cpu_quota = 1000
851 LOG.warning("Increased CPU quota to avoid system error.")
852 LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" %
853 (cpu_period, cpu_quota))
854 return int(cpu_period), int(cpu_quota)
855
856
857 """
858 Some (simple) placement algorithms
859 """
860
861
862 class FirstDcPlacement(object):
863 """
864 Placement: Always use one and the same data center from the GK.dcs dict.
865 """
866
867 def place(self, nsd, vnfds, dcs):
868 for id, vnfd in vnfds.iteritems():
869 vnfd["dc"] = list(dcs.itervalues())[0]
870
871
872 class RoundRobinDcPlacement(object):
873 """
874 Placement: Distribute VNFs across all available DCs in a round robin fashion.
875 """
876
877 def place(self, nsd, vnfds, dcs):
878 c = 0
879 dcs_list = list(dcs.itervalues())
880 for id, vnfd in vnfds.iteritems():
881 vnfd["dc"] = dcs_list[c % len(dcs_list)]
882 c += 1 # inc. c to use next DC
883
884
885 """
886 Resource definitions and API endpoints
887 """
888
889
890 class Packages(fr.Resource):
891
892 def post(self):
893 """
894 Upload a *.son service package to the dummy gatekeeper.
895
896 We expect request with a *.son file and store it in UPLOAD_FOLDER
897 :return: UUID
898 """
899 try:
900 # get file contents
901 LOG.info("POST /packages called")
902 # lets search for the package in the request
903 is_file_object = False # make API more robust: file can be in data or in files field
904 if "package" in request.files:
905 son_file = request.files["package"]
906 is_file_object = True
907 elif len(request.data) > 0:
908 son_file = request.data
909 else:
910 return {"service_uuid": None, "size": 0, "sha1": None,
911 "error": "upload failed. file not found."}, 500
912 # generate a uuid to reference this package
913 service_uuid = str(uuid.uuid4())
914 file_hash = hashlib.sha1(str(son_file)).hexdigest()
915 # ensure that upload folder exists
916 ensure_dir(UPLOAD_FOLDER)
917 upload_path = os.path.join(UPLOAD_FOLDER, "%s.tgo" % service_uuid)
918 # store *.son file to disk
919 if is_file_object:
920 son_file.save(upload_path)
921 else:
922 with open(upload_path, 'wb') as f:
923 f.write(son_file)
924 size = os.path.getsize(upload_path)
925
926 # first stop and delete any other running services
927 if AUTO_DELETE:
928 service_list = copy.copy(GK.services)
929 for service_uuid in service_list:
930 instances_list = copy.copy(
931 GK.services[service_uuid].instances)
932 for instance_uuid in instances_list:
933 # valid service and instance UUID, stop service
934 GK.services.get(service_uuid).stop_service(
935 instance_uuid)
936 LOG.info("service instance with uuid %r stopped." %
937 instance_uuid)
938
939 # create a service object and register it
940 s = Service(service_uuid, file_hash, upload_path)
941 GK.register_service_package(service_uuid, s)
942
943 # automatically deploy the service
944 if AUTO_DEPLOY:
945 # ok, we have a service uuid, lets start the service
946 reset_subnets()
947 GK.services.get(service_uuid).start_service()
948
949 # generate the JSON result
950 return {"service_uuid": service_uuid, "size": size,
951 "sha1": file_hash, "error": None}, 201
952 except BaseException:
953 LOG.exception("Service package upload failed:")
954 return {"service_uuid": None, "size": 0,
955 "sha1": None, "error": "upload failed"}, 500
956
957 def get(self):
958 """
959 Return a list of package descriptor headers.
960 Fakes the behavior of 5GTANGO's GK API to be
961 compatible with tng-cli.
962 :return: list
963 """
964 LOG.info("GET /packages")
965 result = list()
966 for suuid, sobj in GK.services.iteritems():
967 pkg = dict()
968 pkg["pd"] = dict()
969 pkg["uuid"] = suuid
970 pkg["pd"]["name"] = sobj.manifest.get("name")
971 pkg["pd"]["version"] = sobj.manifest.get("version")
972 pkg["created_at"] = sobj.created_at
973 result.append(pkg)
974 return result, 200
975
976
977 class Services(fr.Resource):
978
979 def get(self):
980 """
981 Return a list of services.
982 Fakes the behavior of 5GTANGO's GK API to be
983 compatible with tng-cli.
984 :return: list
985 """
986 LOG.info("GET /services")
987 result = list()
988 for suuid, sobj in GK.services.iteritems():
989 service = dict()
990 service["nsd"] = dict()
991 service["uuid"] = suuid
992 service["nsd"]["name"] = sobj.nsd.get("name")
993 service["nsd"]["version"] = sobj.nsd.get("version")
994 service["created_at"] = sobj.created_at
995 result.append(service)
996 return result, 200
997
998
999 class Instantiations(fr.Resource):
1000
1001 def post(self):
1002 """
1003 Instantiate a service specified by its UUID.
1004 Will return a new UUID to identify the running service instance.
1005 :return: UUID
1006 """
1007 LOG.info("POST /instantiations (or /requests) called")
1008 # try to extract the service uuid from the request
1009 json_data = request.get_json(force=True)
1010 service_uuid = json_data.get("service_uuid")
1011 service_name = json_data.get("service_name")
1012 if service_name is None:
1013 # lets be fuzzy
1014 service_name = service_uuid
1015 # first try to find by service_name
1016 if service_name is not None:
1017 for s_uuid, s in GK.services.iteritems():
1018 if s.manifest.get("name") == service_name:
1019 LOG.info("Searched for: {}. Found service w. UUID: {}"
1020 .format(service_name, s_uuid))
1021 service_uuid = s_uuid
1022 # lets be a bit fuzzy here to make testing easier
1023 if (service_uuid is None or service_uuid ==
1024 "latest") and len(GK.services) > 0:
1025 # if we don't get a service uuid, we simple start the first service
1026 # in the list
1027 service_uuid = list(GK.services.iterkeys())[0]
1028 if service_uuid in GK.services:
1029 # ok, we have a service uuid, lets start the service
1030 service_instance_uuid = GK.services.get(
1031 service_uuid).start_service()
1032 # multiple ID fields to be compatible with tng-bench and tng-cli
1033 return ({"service_instance_uuid": service_instance_uuid,
1034 "id": service_instance_uuid}, 201)
1035 LOG.error("Service not found: {}/{}".format(service_uuid, service_name))
1036 return "Service not found", 404
1037
1038 def get(self):
1039 """
1040 Returns a list of UUIDs containing all running services.
1041 :return: dict / list
1042 """
1043 LOG.info("GET /instantiations or /api/v3/records/services")
1044 # return {"service_instantiations_list": [
1045 # list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
1046 result = list()
1047 for suuid, sobj in GK.services.iteritems():
1048 for iuuid, iobj in sobj.instances.iteritems():
1049 inst = dict()
1050 inst["uuid"] = iobj.get("uuid")
1051 inst["instance_name"] = "{}-inst.{}".format(
1052 iobj.get("name"), iobj.get("ssiid"))
1053 inst["status"] = "running"
1054 inst["created_at"] = iobj.get("created_at")
1055 result.append(inst)
1056 return result, 200
1057
1058 def delete(self):
1059 """
1060 Stops a running service specified by its service and instance UUID.
1061 """
1062 # try to extract the service and instance UUID from the request
1063 json_data = request.get_json(force=True)
1064 service_uuid_input = json_data.get("service_uuid")
1065 instance_uuid_input = json_data.get("service_instance_uuid")
1066 if len(GK.services) < 1:
1067 return "No service on-boarded.", 404
1068 # try to be fuzzy
1069 if service_uuid_input is None:
1070 # if we don't get a service uuid we stop all services
1071 service_uuid_list = list(GK.services.iterkeys())
1072 LOG.info("No service_uuid given, stopping all.")
1073 else:
1074 service_uuid_list = [service_uuid_input]
1075 # for each service
1076 for service_uuid in service_uuid_list:
1077 if instance_uuid_input is None:
1078 instance_uuid_list = list(
1079 GK.services[service_uuid].instances.iterkeys())
1080 else:
1081 instance_uuid_list = [instance_uuid_input]
1082 # for all service instances
1083 for instance_uuid in instance_uuid_list:
1084 if (service_uuid in GK.services and
1085 instance_uuid in GK.services[service_uuid].instances):
1086 # valid service and instance UUID, stop service
1087 GK.services.get(service_uuid).stop_service(instance_uuid)
1088 LOG.info("Service instance with uuid %r stopped." % instance_uuid)
1089 return "Service(s) stopped.", 200
1090
1091
1092 class Exit(fr.Resource):
1093
1094 def put(self):
1095 """
1096 Stop the running Containernet instance regardless of data transmitted
1097 """
1098 list(GK.dcs.values())[0].net.stop()
1099
1100
1101 def generate_subnets(prefix, base, subnet_size=50, mask=24):
1102 # Generate a list of ipaddress in subnets
1103 r = list()
1104 for net in range(base, base + subnet_size):
1105 subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
1106 r.append(ipaddress.ip_network(unicode(subnet)))
1107 return r
1108
1109
1110 def reset_subnets():
1111 global ELINE_SUBNETS
1112 global ELAN_SUBNETS
1113 # private subnet definitions for the generated interfaces
1114 # 30.0.xxx.0/24
1115 ELAN_SUBNETS = generate_subnets('30.0', 0, subnet_size=50, mask=24)
1116 # 20.0.xxx.0/24
1117 ELINE_SUBNETS = generate_subnets('20.0', 0, subnet_size=50, mask=24)
1118
1119
1120 def initialize_GK():
1121 global GK
1122 GK = Gatekeeper()
1123
1124
1125 # create a single, global GK object
1126 GK = None
1127 initialize_GK()
1128 # setup Flask
1129 http_server = None
1130 app = Flask(__name__)
1131 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
1132 api = fr.Api(app)
1133 # define endpoints
1134 api.add_resource(Packages, '/packages', '/api/v2/packages', '/api/v3/packages')
1135 api.add_resource(Services, '/services', '/api/v2/services', '/api/v3/services')
1136 api.add_resource(Instantiations, '/instantiations',
1137 '/api/v2/instantiations', '/api/v2/requests', '/api/v3/requests',
1138 '/api/v3/records/services')
1139 api.add_resource(Exit, '/emulator/exit')
1140
1141
1142 def start_rest_api(host, port, datacenters=dict()):
1143 global http_server
1144 GK.dcs = datacenters
1145 GK.net = get_dc_network()
1146 # start the Flask server (not the best performance but ok for our use case)
1147 # app.run(host=host,
1148 # port=port,
1149 # debug=True,
1150 # use_reloader=False # this is needed to run Flask in a non-main thread
1151 # )
1152 http_server = WSGIServer((host, port), app, log=open("/dev/null", "w"))
1153 http_server.serve_forever()
1154
1155
1156 def stop_rest_api():
1157 if http_server:
1158 http_server.close()
1159
1160
1161 def ensure_dir(name):
1162 if not os.path.exists(name):
1163 os.makedirs(name)
1164
1165
1166 def load_yaml(path):
1167 with open(path, "r") as f:
1168 try:
1169 r = yaml.load(f)
1170 except yaml.YAMLError as exc:
1171 LOG.exception("YAML parse error: %r" % str(exc))
1172 r = dict()
1173 return r
1174
1175
1176 def make_relative_path(path):
1177 if path.startswith("file://"):
1178 path = path.replace("file://", "", 1)
1179 if path.startswith("/"):
1180 path = path.replace("/", "", 1)
1181 return path
1182
1183
1184 def get_dc_network():
1185 """
1186 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
1187 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
1188 :return:
1189 """
1190 assert (len(GK.dcs) > 0)
1191 return GK.dcs.values()[0].net
1192
1193
1194 def parse_interface(interface_name):
1195 """
1196 convert the interface name in the nsd to the according vnf_id, vnf_interface names
1197 :param interface_name:
1198 :return:
1199 """
1200 if ':' in interface_name:
1201 vnf_id, vnf_interface = interface_name.split(':')
1202 else:
1203 vnf_id = None
1204 vnf_interface = interface_name
1205 return vnf_id, vnf_interface
1206
1207
1208 def get_container_name(vnf_id, vdu_id, ssiid=None):
1209 if ssiid is not None:
1210 return "{}.{}.{}".format(vnf_id, vdu_id, ssiid)
1211 return "{}.{}".format(vnf_id, vdu_id)
1212
1213
1214 def get_triple_id(descr):
1215 return "{}.{}.{}".format(
1216 descr.get("vendor"), descr.get("name"), descr.get("version"))
1217
1218
1219 def update_port_mapping_multi_instance(ssiid, port_bindings):
1220 """
1221 Port_bindings are used to expose ports of the deployed containers.
1222 They would collide if we deploy multiple service instances.
1223 This function adds a offset to them which is based on the
1224 short service instance id (SSIID).
1225 MULTI_INSTANCE_PORT_OFFSET
1226 """
1227 def _offset(p):
1228 return p + MULTI_INSTANCE_PORT_OFFSET * ssiid
1229
1230 port_bindings = {k: _offset(v) for k, v in port_bindings.iteritems()}
1231 return port_bindings
1232
1233
1234 if __name__ == '__main__':
1235 """
1236 Lets allow to run the API in standalone mode.
1237 """
1238 GK_STANDALONE_MODE = True
1239 logging.getLogger("werkzeug").setLevel(logging.INFO)
1240 start_rest_api("0.0.0.0", 8000)