Merge pull request #167 from mpeuster/master
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 """
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
30
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
33 """
34
35 import logging
36 import os
37 import uuid
38 import hashlib
39 import zipfile
40 import yaml
41 import threading
42 from docker import Client as DockerClient
43 from flask import Flask, request
44 import flask_restful as fr
45 from collections import defaultdict
46 import pkg_resources
47
48 logging.basicConfig()
49 LOG = logging.getLogger("sonata-dummy-gatekeeper")
50 LOG.setLevel(logging.DEBUG)
51 logging.getLogger("werkzeug").setLevel(logging.WARNING)
52
53 GK_STORAGE = "/tmp/son-dummy-gk/"
54 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
55 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
56
57 # Enable Dockerfile build functionality
58 BUILD_DOCKERFILE = False
59
60 # flag to indicate that we run without the emulator (only the bare API for integration testing)
61 GK_STANDALONE_MODE = False
62
63 # should a new version of an image be pulled even if its available
64 FORCE_PULL = False
65
66 # Automatically deploy SAPs (endpoints) of the service as new containers
67 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
68 DEPLOY_SAP = False
69
70 class Gatekeeper(object):
71
72 def __init__(self):
73 self.services = dict()
74 self.dcs = dict()
75 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
76 LOG.info("Create SONATA dummy gatekeeper.")
77
78 def register_service_package(self, service_uuid, service):
79 """
80 register new service package
81 :param service_uuid
82 :param service object
83 """
84 self.services[service_uuid] = service
85 # lets perform all steps needed to onboard the service
86 service.onboard()
87
88 def get_next_vnf_name(self):
89 self.vnf_counter += 1
90 return "vnf%d" % self.vnf_counter
91
92
93 class Service(object):
94 """
95 This class represents a NS uploaded as a *.son package to the
96 dummy gatekeeper.
97 Can have multiple running instances of this service.
98 """
99
100 def __init__(self,
101 service_uuid,
102 package_file_hash,
103 package_file_path):
104 self.uuid = service_uuid
105 self.package_file_hash = package_file_hash
106 self.package_file_path = package_file_path
107 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
108 self.manifest = None
109 self.nsd = None
110 self.vnfds = dict()
111 self.local_docker_files = dict()
112 self.remote_docker_image_urls = dict()
113 self.instances = dict()
114 self.vnf_name2docker_name = dict()
115 self.sap_identifiers = set()
116 # lets generate a set of subnet configurations used for e-line chaining setup
117 self.eline_subnets_src = generate_subnet_strings(50, start=200, subnet_size=24, ip=1)
118 self.eline_subnets_dst = generate_subnet_strings(50, start=200, subnet_size=24, ip=2)
119
120
121 def onboard(self):
122 """
123 Do all steps to prepare this service to be instantiated
124 :return:
125 """
126 # 1. extract the contents of the package and store them in our catalog
127 self._unpack_service_package()
128 # 2. read in all descriptor files
129 self._load_package_descriptor()
130 self._load_nsd()
131 self._load_vnfd()
132 if DEPLOY_SAP:
133 self._load_saps()
134 # 3. prepare container images (e.g. download or build Dockerfile)
135 if BUILD_DOCKERFILE:
136 self._load_docker_files()
137 self._build_images_from_dockerfiles()
138 else:
139 self._load_docker_urls()
140 self._pull_predefined_dockerimages()
141 LOG.info("On-boarded service: %r" % self.manifest.get("name"))
142
143 def start_service(self):
144 """
145 This methods creates and starts a new service instance.
146 It computes placements, iterates over all VNFDs, and starts
147 each VNFD as a Docker container in the data center selected
148 by the placement algorithm.
149 :return:
150 """
151 LOG.info("Starting service %r" % self.uuid)
152
153 # 1. each service instance gets a new uuid to identify it
154 instance_uuid = str(uuid.uuid4())
155 # build a instances dict (a bit like a NSR :))
156 self.instances[instance_uuid] = dict()
157 self.instances[instance_uuid]["vnf_instances"] = list()
158
159 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
160 vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
161 reduce(lambda x, y: dict(x, **y),
162 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
163 self.nsd["network_functions"])))
164
165 # 3. compute placement of this service instance (adds DC names to VNFDs)
166 if not GK_STANDALONE_MODE:
167 self._calculate_placement(FirstDcPlacement)
168 # iterate over all vnfds that we have to start
169 for vnfd in self.vnfds.itervalues():
170 vnfi = None
171 if not GK_STANDALONE_MODE:
172 vnfi = self._start_vnfd(vnfd)
173 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
174
175 vlinks = self.nsd["virtual_links"]
176 fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
177 eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
178 elan_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-LAN")]
179
180 # 4a. deploy E-Line links
181 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
182 # eg. different services get a unique cookie for their flowrules
183 cookie = 1
184 for link in eline_fwd_links:
185 src_id, src_if_name = link["connection_points_reference"][0].split(":")
186 dst_id, dst_if_name = link["connection_points_reference"][1].split(":")
187
188 # check if there is a SAP in the link
189 if src_id in self.sap_identifiers:
190 src_docker_name = "{0}_{1}".format(src_id, src_if_name)
191 src_id = src_docker_name
192 else:
193 src_docker_name = src_id
194
195 if dst_id in self.sap_identifiers:
196 dst_docker_name = "{0}_{1}".format(dst_id, dst_if_name)
197 dst_id = dst_docker_name
198 else:
199 dst_docker_name = dst_id
200
201 src_name = vnf_id2vnf_name[src_id]
202 dst_name = vnf_id2vnf_name[dst_id]
203
204 LOG.debug(
205 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
206 src_name, src_id, src_if_name, dst_name, dst_id, dst_if_name))
207
208 if (src_name in self.vnfds) and (dst_name in self.vnfds):
209 network = self.vnfds[src_name].get("dc").net # there should be a cleaner way to find the DCNetwork
210 LOG.debug(src_docker_name)
211 ret = network.setChain(
212 src_docker_name, dst_docker_name,
213 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
214 bidirectional=True, cmd="add-flow", cookie=cookie, priority=10)
215
216 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
217 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
218 if src_vnfi is not None:
219 self._vnf_reconfigure_network(src_vnfi, src_if_name, self.eline_subnets_src.pop(0))
220 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
221 if dst_vnfi is not None:
222 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, self.eline_subnets_dst.pop(0))
223
224 # 4b. deploy E-LAN links
225 base = 10
226 for link in elan_fwd_links:
227 # generate lan ip address
228 ip = 1
229 for intf in link["connection_points_reference"]:
230 ip_address = generate_lan_string("10.0", base, subnet_size=24, ip=ip)
231 vnf_id, intf_name = intf.split(":")
232 if vnf_id in self.sap_identifiers:
233 src_docker_name = "{0}_{1}".format(vnf_id, intf_name)
234 vnf_id = src_docker_name
235 vnf_name = vnf_id2vnf_name[vnf_id]
236 LOG.debug(
237 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
238 vnf_name, vnf_id, intf_name, ip_address))
239
240 if vnf_name in self.vnfds:
241 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
242 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
243 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
244 vnfi = self._get_vnf_instance(instance_uuid, vnf_name)
245 if vnfi is not None:
246 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
247 # increase for the next ip address on this E-LAN
248 ip += 1
249 # increase the base ip address for the next E-LAN
250 base += 1
251
252 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
253 self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
254
255 LOG.info("Service started. Instance id: %r" % instance_uuid)
256 return instance_uuid
257
258 def _start_vnfd(self, vnfd):
259 """
260 Start a single VNFD of this service
261 :param vnfd: vnfd descriptor dict
262 :return:
263 """
264 # iterate over all deployment units within each VNFDs
265 for u in vnfd.get("virtual_deployment_units"):
266 # 1. get the name of the docker image to start and the assigned DC
267 vnf_name = vnfd.get("name")
268 if vnf_name not in self.remote_docker_image_urls:
269 raise Exception("No image name for %r found. Abort." % vnf_name)
270 docker_name = self.remote_docker_image_urls.get(vnf_name)
271 target_dc = vnfd.get("dc")
272 # 2. perform some checks to ensure we can start the container
273 assert(docker_name is not None)
274 assert(target_dc is not None)
275 if not self._check_docker_image_exists(docker_name):
276 raise Exception("Docker image %r not found. Abort." % docker_name)
277 # 3. do the dc.startCompute(name="foobar") call to run the container
278 # TODO consider flavors, and other annotations
279 intfs = vnfd.get("connection_points")
280
281 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
282 # use the vnf_id in the nsd as docker name
283 # so deployed containers can be easily mapped back to the nsd
284 vnf_name2id = defaultdict(lambda: "NotExistingNode",
285 reduce(lambda x, y: dict(x, **y),
286 map(lambda d: {d["vnf_name"]: d["vnf_id"]},
287 self.nsd["network_functions"])))
288 self.vnf_name2docker_name[vnf_name] = vnf_name2id[vnf_name]
289 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
290
291 LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
292 LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
293 vnfi = target_dc.startCompute(self.vnf_name2docker_name[vnf_name], network=intfs, image=docker_name, flavor_name="small")
294 return vnfi
295
296 def _get_vnf_instance(self, instance_uuid, name):
297 """
298 Returns the Docker object for the given VNF name (or Docker name).
299 :param instance_uuid: UUID of the service instance to search in.
300 :param name: VNF name or Docker name. We are fuzzy here.
301 :return:
302 """
303 dn = name
304 if name in self.vnf_name2docker_name:
305 dn = self.vnf_name2docker_name[name]
306 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
307 if vnfi.name == dn:
308 return vnfi
309 LOG.warning("No container with name: %r found.")
310 return None
311
312 @staticmethod
313 def _vnf_reconfigure_network(vnfi, if_name, net_str):
314 """
315 Reconfigure the network configuration of a specific interface
316 of a running container.
317 :param vnfi: container instacne
318 :param if_name: interface name
319 :param net_str: network configuration string, e.g., 1.2.3.4/24
320 :return:
321 """
322 intf = vnfi.intf(intf=if_name)
323 if intf is not None:
324 intf.setIP(net_str)
325 LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
326 else:
327 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
328
329
330 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
331 for vnfi in vnfi_list:
332 config = vnfi.dcinfo.get("Config", dict())
333 env = config.get("Env", list())
334 for env_var in env:
335 if "SON_EMU_CMD=" in env_var:
336 cmd = str(env_var.split("=")[1])
337 LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
338 # execute command in new thread to ensure that GK is not blocked by VNF
339 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
340 t.daemon = True
341 t.start()
342
343 def _unpack_service_package(self):
344 """
345 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
346 """
347 LOG.info("Unzipping: %r" % self.package_file_path)
348 with zipfile.ZipFile(self.package_file_path, "r") as z:
349 z.extractall(self.package_content_path)
350
351
352 def _load_package_descriptor(self):
353 """
354 Load the main package descriptor YAML and keep it as dict.
355 :return:
356 """
357 self.manifest = load_yaml(
358 os.path.join(
359 self.package_content_path, "META-INF/MANIFEST.MF"))
360
361 def _load_nsd(self):
362 """
363 Load the entry NSD YAML and keep it as dict.
364 :return:
365 """
366 if "entry_service_template" in self.manifest:
367 nsd_path = os.path.join(
368 self.package_content_path,
369 make_relative_path(self.manifest.get("entry_service_template")))
370 self.nsd = load_yaml(nsd_path)
371 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
372
373 def _load_vnfd(self):
374 """
375 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
376 :return:
377 """
378 if "package_content" in self.manifest:
379 for pc in self.manifest.get("package_content"):
380 if pc.get("content-type") == "application/sonata.function_descriptor":
381 vnfd_path = os.path.join(
382 self.package_content_path,
383 make_relative_path(pc.get("name")))
384 vnfd = load_yaml(vnfd_path)
385 self.vnfds[vnfd.get("name")] = vnfd
386 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
387
388 def _load_saps(self):
389 # Each Service Access Point (connection_point) in the nsd is getting its own container
390 SAPs = [p["id"] for p in self.nsd["connection_points"] if p["type"] == "interface"]
391 for sap in SAPs:
392 # endpoints needed in this service
393 sap_vnf_id, sap_vnf_interface = sap.split(':')
394 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
395 self.sap_identifiers.add(sap_vnf_id)
396
397 sap_docker_name = sap.replace(':', '_')
398
399 # add SAP to self.vnfds
400 sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
401 sap_vnfd = load_yaml(sapfile)
402 sap_vnfd["connection_points"][0]["id"] = sap_vnf_interface
403 sap_vnfd["name"] = sap_docker_name
404 self.vnfds[sap_docker_name] = sap_vnfd
405 # add SAP vnf to list in the NSD so it is deployed later on
406 # each SAP get a unique VNFD and vnf_id in the NSD
407 self.nsd["network_functions"].append({"vnf_id": sap_docker_name, "vnf_name": sap_docker_name})
408 LOG.debug("Loaded SAP: %r" % sap_vnfd.get("name"))
409
410 def _load_docker_files(self):
411 """
412 Get all paths to Dockerfiles from VNFDs and store them in dict.
413 :return:
414 """
415 for k, v in self.vnfds.iteritems():
416 for vu in v.get("virtual_deployment_units"):
417 if vu.get("vm_image_format") == "docker":
418 vm_image = vu.get("vm_image")
419 docker_path = os.path.join(
420 self.package_content_path,
421 make_relative_path(vm_image))
422 self.local_docker_files[k] = docker_path
423 LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
424
425 def _load_docker_urls(self):
426 """
427 Get all URLs to pre-build docker images in some repo.
428 :return:
429 """
430 for k, v in self.vnfds.iteritems():
431 for vu in v.get("virtual_deployment_units"):
432 if vu.get("vm_image_format") == "docker":
433 url = vu.get("vm_image")
434 if url is not None:
435 url = url.replace("http://", "")
436 self.remote_docker_image_urls[k] = url
437 LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
438
439 def _build_images_from_dockerfiles(self):
440 """
441 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
442 """
443 if GK_STANDALONE_MODE:
444 return # do not build anything in standalone mode
445 dc = DockerClient()
446 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
447 for k, v in self.local_docker_files.iteritems():
448 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
449 LOG.debug("DOCKER BUILD: %s" % line)
450 LOG.info("Docker image created: %s" % k)
451
452 def _pull_predefined_dockerimages(self):
453 """
454 If the package contains URLs to pre-build Docker images, we download them with this method.
455 """
456 dc = DockerClient()
457 for url in self.remote_docker_image_urls.itervalues():
458 if not FORCE_PULL: # only pull if not present (speedup for development)
459 if len(dc.images(name=url)) > 0:
460 LOG.debug("Image %r present. Skipping pull." % url)
461 continue
462 LOG.info("Pulling image: %r" % url)
463 dc.pull(url,
464 insecure_registry=True)
465
466 def _check_docker_image_exists(self, image_name):
467 """
468 Query the docker service and check if the given image exists
469 :param image_name: name of the docker image
470 :return:
471 """
472 return len(DockerClient().images(image_name)) > 0
473
474 def _calculate_placement(self, algorithm):
475 """
476 Do placement by adding the a field "dc" to
477 each VNFD that points to one of our
478 data center objects known to the gatekeeper.
479 """
480 assert(len(self.vnfds) > 0)
481 assert(len(GK.dcs) > 0)
482 # instantiate algorithm an place
483 p = algorithm()
484 p.place(self.nsd, self.vnfds, GK.dcs)
485 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
486 # lets print the placement result
487 for name, vnfd in self.vnfds.iteritems():
488 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
489
490
491 """
492 Some (simple) placement algorithms
493 """
494
495
496 class FirstDcPlacement(object):
497 """
498 Placement: Always use one and the same data center from the GK.dcs dict.
499 """
500 def place(self, nsd, vnfds, dcs):
501 for name, vnfd in vnfds.iteritems():
502 vnfd["dc"] = list(dcs.itervalues())[0]
503
504
505 """
506 Resource definitions and API endpoints
507 """
508
509
510 class Packages(fr.Resource):
511
512 def post(self):
513 """
514 Upload a *.son service package to the dummy gatekeeper.
515
516 We expect request with a *.son file and store it in UPLOAD_FOLDER
517 :return: UUID
518 """
519 try:
520 # get file contents
521 print(request.files)
522 # lets search for the package in the request
523 if "package" in request.files:
524 son_file = request.files["package"]
525 # elif "file" in request.files:
526 # son_file = request.files["file"]
527 else:
528 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
529 # generate a uuid to reference this package
530 service_uuid = str(uuid.uuid4())
531 file_hash = hashlib.sha1(str(son_file)).hexdigest()
532 # ensure that upload folder exists
533 ensure_dir(UPLOAD_FOLDER)
534 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
535 # store *.son file to disk
536 son_file.save(upload_path)
537 size = os.path.getsize(upload_path)
538 # create a service object and register it
539 s = Service(service_uuid, file_hash, upload_path)
540 GK.register_service_package(service_uuid, s)
541 # generate the JSON result
542 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}
543 except Exception as ex:
544 LOG.exception("Service package upload failed:")
545 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
546
547 def get(self):
548 """
549 Return a list of UUID's of uploaded service packages.
550 :return: dict/list
551 """
552 LOG.info("GET /packages")
553 return {"service_uuid_list": list(GK.services.iterkeys())}
554
555
556 class Instantiations(fr.Resource):
557
558 def post(self):
559 """
560 Instantiate a service specified by its UUID.
561 Will return a new UUID to identify the running service instance.
562 :return: UUID
563 """
564 # try to extract the service uuid from the request
565 json_data = request.get_json(force=True)
566 service_uuid = json_data.get("service_uuid")
567
568 # lets be a bit fuzzy here to make testing easier
569 if service_uuid is None and len(GK.services) > 0:
570 # if we don't get a service uuid, we simple start the first service in the list
571 service_uuid = list(GK.services.iterkeys())[0]
572
573 if service_uuid in GK.services:
574 # ok, we have a service uuid, lets start the service
575 service_instance_uuid = GK.services.get(service_uuid).start_service()
576 return {"service_instance_uuid": service_instance_uuid}
577 return "Service not found", 404
578
579 def get(self):
580 """
581 Returns a list of UUIDs containing all running services.
582 :return: dict / list
583 """
584 LOG.info("GET /instantiations")
585 return {"service_instantiations_list": [
586 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
587
588
589 # create a single, global GK object
590 GK = Gatekeeper()
591 # setup Flask
592 app = Flask(__name__)
593 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
594 api = fr.Api(app)
595 # define endpoints
596 api.add_resource(Packages, '/packages')
597 api.add_resource(Instantiations, '/instantiations')
598
599
600 def start_rest_api(host, port, datacenters=dict()):
601 GK.dcs = datacenters
602 # start the Flask server (not the best performance but ok for our use case)
603 app.run(host=host,
604 port=port,
605 debug=True,
606 use_reloader=False # this is needed to run Flask in a non-main thread
607 )
608
609
610 def ensure_dir(name):
611 if not os.path.exists(name):
612 os.makedirs(name)
613
614
615 def load_yaml(path):
616 with open(path, "r") as f:
617 try:
618 r = yaml.load(f)
619 except yaml.YAMLError as exc:
620 LOG.exception("YAML parse error")
621 r = dict()
622 return r
623
624
625 def make_relative_path(path):
626 if path.startswith("file://"):
627 path = path.replace("file://", "", 1)
628 if path.startswith("/"):
629 path = path.replace("/", "", 1)
630 return path
631
632
633 def generate_lan_string(prefix, base, subnet_size=24, ip=0):
634 """
635 Helper to generate different network configuration strings.
636 """
637 r = "%s.%d.%d/%d" % (prefix, base, ip, subnet_size)
638 return r
639
640
641 def generate_subnet_strings(n, start=1, subnet_size=24, ip=0):
642 """
643 Helper to generate different network configuration strings.
644 """
645 r = list()
646 for i in range(start, start + n):
647 r.append("%d.0.0.%d/%d" % (i, ip, subnet_size))
648 return r
649
650
651 if __name__ == '__main__':
652 """
653 Lets allow to run the API in standalone mode.
654 """
655 GK_STANDALONE_MODE = True
656 logging.getLogger("werkzeug").setLevel(logging.INFO)
657 start_rest_api("0.0.0.0", 8000)
658