cleanup (pep)
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 """
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
30
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
33 """
34
35 import logging
36 import os
37 import uuid
38 import hashlib
39 import zipfile
40 import yaml
41 import threading
42 from docker import Client as DockerClient
43 from flask import Flask, request
44 import flask_restful as fr
45 from collections import defaultdict
46 import pkg_resources
47
48 logging.basicConfig()
49 LOG = logging.getLogger("sonata-dummy-gatekeeper")
50 LOG.setLevel(logging.DEBUG)
51 logging.getLogger("werkzeug").setLevel(logging.WARNING)
52
53 GK_STORAGE = "/tmp/son-dummy-gk/"
54 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
55 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
56
57 # Enable Dockerfile build functionality
58 BUILD_DOCKERFILE = False
59
60 # flag to indicate that we run without the emulator (only the bare API for integration testing)
61 GK_STANDALONE_MODE = False
62
63 # should a new version of an image be pulled even if its available
64 FORCE_PULL = False
65
66 # Automatically deploy SAPs (endpoints) of the service as new containers
67 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
68 DEPLOY_SAP = False
69
70 class Gatekeeper(object):
71
72 def __init__(self):
73 self.services = dict()
74 self.dcs = dict()
75 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
76 LOG.info("Create SONATA dummy gatekeeper.")
77
78 def register_service_package(self, service_uuid, service):
79 """
80 register new service package
81 :param service_uuid
82 :param service object
83 """
84 self.services[service_uuid] = service
85 # lets perform all steps needed to onboard the service
86 service.onboard()
87
88 def get_next_vnf_name(self):
89 self.vnf_counter += 1
90 return "vnf%d" % self.vnf_counter
91
92
93 class Service(object):
94 """
95 This class represents a NS uploaded as a *.son package to the
96 dummy gatekeeper.
97 Can have multiple running instances of this service.
98 """
99
100 def __init__(self,
101 service_uuid,
102 package_file_hash,
103 package_file_path):
104 self.uuid = service_uuid
105 self.package_file_hash = package_file_hash
106 self.package_file_path = package_file_path
107 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
108 self.manifest = None
109 self.nsd = None
110 self.vnfds = dict()
111 self.local_docker_files = dict()
112 self.remote_docker_image_urls = dict()
113 self.instances = dict()
114 self.vnf_name2docker_name = dict()
115 self.sap_identifiers = set()
116 # lets generate a set of subnet configurations used for e-line chaining setup
117 self.eline_subnets_src = generate_subnet_strings(50, start=200, subnet_size=24, ip=1)
118 self.eline_subnets_dst = generate_subnet_strings(50, start=200, subnet_size=24, ip=2)
119
120
121 def onboard(self):
122 """
123 Do all steps to prepare this service to be instantiated
124 :return:
125 """
126 # 1. extract the contents of the package and store them in our catalog
127 self._unpack_service_package()
128 # 2. read in all descriptor files
129 self._load_package_descriptor()
130 self._load_nsd()
131 self._load_vnfd()
132 if DEPLOY_SAP:
133 self._load_saps()
134 # 3. prepare container images (e.g. download or build Dockerfile)
135 if BUILD_DOCKERFILE:
136 self._load_docker_files()
137 self._build_images_from_dockerfiles()
138 else:
139 self._load_docker_urls()
140 self._pull_predefined_dockerimages()
141 LOG.info("On-boarded service: %r" % self.manifest.get("name"))
142
143 def start_service(self):
144 """
145 This methods creates and starts a new service instance.
146 It computes placements, iterates over all VNFDs, and starts
147 each VNFD as a Docker container in the data center selected
148 by the placement algorithm.
149 :return:
150 """
151 LOG.info("Starting service %r" % self.uuid)
152
153 # 1. each service instance gets a new uuid to identify it
154 instance_uuid = str(uuid.uuid4())
155 # build a instances dict (a bit like a NSR :))
156 self.instances[instance_uuid] = dict()
157 self.instances[instance_uuid]["vnf_instances"] = list()
158
159 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
160 vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
161 reduce(lambda x, y: dict(x, **y),
162 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
163 self.nsd["network_functions"])))
164
165 # 3. compute placement of this service instance (adds DC names to VNFDs)
166 if not GK_STANDALONE_MODE:
167 #self._calculate_placement(FirstDcPlacement)
168 self._calculate_placement(RoundRobinDcPlacement)
169 # iterate over all vnfds that we have to start
170 for vnfd in self.vnfds.itervalues():
171 vnfi = None
172 if not GK_STANDALONE_MODE:
173 vnfi = self._start_vnfd(vnfd)
174 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
175
176 vlinks = self.nsd["virtual_links"]
177 fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
178 eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
179 elan_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-LAN")]
180
181 # 4a. deploy E-Line links
182 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
183 # eg. different services get a unique cookie for their flowrules
184 cookie = 1
185 for link in eline_fwd_links:
186 src_id, src_if_name = link["connection_points_reference"][0].split(":")
187 dst_id, dst_if_name = link["connection_points_reference"][1].split(":")
188
189 # check if there is a SAP in the link
190 if src_id in self.sap_identifiers:
191 src_docker_name = "{0}_{1}".format(src_id, src_if_name)
192 src_id = src_docker_name
193 else:
194 src_docker_name = src_id
195
196 if dst_id in self.sap_identifiers:
197 dst_docker_name = "{0}_{1}".format(dst_id, dst_if_name)
198 dst_id = dst_docker_name
199 else:
200 dst_docker_name = dst_id
201
202 src_name = vnf_id2vnf_name[src_id]
203 dst_name = vnf_id2vnf_name[dst_id]
204
205 LOG.debug(
206 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
207 src_name, src_id, src_if_name, dst_name, dst_id, dst_if_name))
208
209 if (src_name in self.vnfds) and (dst_name in self.vnfds):
210 network = self.vnfds[src_name].get("dc").net # there should be a cleaner way to find the DCNetwork
211 LOG.debug(src_docker_name)
212 ret = network.setChain(
213 src_docker_name, dst_docker_name,
214 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
215 bidirectional=True, cmd="add-flow", cookie=cookie, priority=10)
216
217 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
218 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
219 if src_vnfi is not None:
220 self._vnf_reconfigure_network(src_vnfi, src_if_name, self.eline_subnets_src.pop(0))
221 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
222 if dst_vnfi is not None:
223 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, self.eline_subnets_dst.pop(0))
224
225 # 4b. deploy E-LAN links
226 base = 10
227 for link in elan_fwd_links:
228 # generate lan ip address
229 ip = 1
230 for intf in link["connection_points_reference"]:
231 ip_address = generate_lan_string("10.0", base, subnet_size=24, ip=ip)
232 vnf_id, intf_name = intf.split(":")
233 if vnf_id in self.sap_identifiers:
234 src_docker_name = "{0}_{1}".format(vnf_id, intf_name)
235 vnf_id = src_docker_name
236 vnf_name = vnf_id2vnf_name[vnf_id]
237 LOG.debug(
238 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
239 vnf_name, vnf_id, intf_name, ip_address))
240
241 if vnf_name in self.vnfds:
242 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
243 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
244 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
245 vnfi = self._get_vnf_instance(instance_uuid, vnf_name)
246 if vnfi is not None:
247 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
248 # increase for the next ip address on this E-LAN
249 ip += 1
250 # increase the base ip address for the next E-LAN
251 base += 1
252
253 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
254 self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
255
256 LOG.info("Service started. Instance id: %r" % instance_uuid)
257 return instance_uuid
258
259 def _start_vnfd(self, vnfd):
260 """
261 Start a single VNFD of this service
262 :param vnfd: vnfd descriptor dict
263 :return:
264 """
265 # iterate over all deployment units within each VNFDs
266 for u in vnfd.get("virtual_deployment_units"):
267 # 1. get the name of the docker image to start and the assigned DC
268 vnf_name = vnfd.get("name")
269 if vnf_name not in self.remote_docker_image_urls:
270 raise Exception("No image name for %r found. Abort." % vnf_name)
271 docker_name = self.remote_docker_image_urls.get(vnf_name)
272 target_dc = vnfd.get("dc")
273 # 2. perform some checks to ensure we can start the container
274 assert(docker_name is not None)
275 assert(target_dc is not None)
276 if not self._check_docker_image_exists(docker_name):
277 raise Exception("Docker image %r not found. Abort." % docker_name)
278 # 3. do the dc.startCompute(name="foobar") call to run the container
279 # TODO consider flavors, and other annotations
280 intfs = vnfd.get("connection_points")
281
282 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
283 # use the vnf_id in the nsd as docker name
284 # so deployed containers can be easily mapped back to the nsd
285 vnf_name2id = defaultdict(lambda: "NotExistingNode",
286 reduce(lambda x, y: dict(x, **y),
287 map(lambda d: {d["vnf_name"]: d["vnf_id"]},
288 self.nsd["network_functions"])))
289 self.vnf_name2docker_name[vnf_name] = vnf_name2id[vnf_name]
290 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
291
292 LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
293 LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
294 vnfi = target_dc.startCompute(self.vnf_name2docker_name[vnf_name], network=intfs, image=docker_name, flavor_name="small")
295 return vnfi
296
297 def _get_vnf_instance(self, instance_uuid, name):
298 """
299 Returns the Docker object for the given VNF name (or Docker name).
300 :param instance_uuid: UUID of the service instance to search in.
301 :param name: VNF name or Docker name. We are fuzzy here.
302 :return:
303 """
304 dn = name
305 if name in self.vnf_name2docker_name:
306 dn = self.vnf_name2docker_name[name]
307 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
308 if vnfi.name == dn:
309 return vnfi
310 LOG.warning("No container with name: %r found.")
311 return None
312
313 @staticmethod
314 def _vnf_reconfigure_network(vnfi, if_name, net_str):
315 """
316 Reconfigure the network configuration of a specific interface
317 of a running container.
318 :param vnfi: container instacne
319 :param if_name: interface name
320 :param net_str: network configuration string, e.g., 1.2.3.4/24
321 :return:
322 """
323 intf = vnfi.intf(intf=if_name)
324 if intf is not None:
325 intf.setIP(net_str)
326 LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
327 else:
328 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
329
330
331 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
332 for vnfi in vnfi_list:
333 config = vnfi.dcinfo.get("Config", dict())
334 env = config.get("Env", list())
335 for env_var in env:
336 if "SON_EMU_CMD=" in env_var:
337 cmd = str(env_var.split("=")[1])
338 LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
339 # execute command in new thread to ensure that GK is not blocked by VNF
340 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
341 t.daemon = True
342 t.start()
343
344 def _unpack_service_package(self):
345 """
346 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
347 """
348 LOG.info("Unzipping: %r" % self.package_file_path)
349 with zipfile.ZipFile(self.package_file_path, "r") as z:
350 z.extractall(self.package_content_path)
351
352
353 def _load_package_descriptor(self):
354 """
355 Load the main package descriptor YAML and keep it as dict.
356 :return:
357 """
358 self.manifest = load_yaml(
359 os.path.join(
360 self.package_content_path, "META-INF/MANIFEST.MF"))
361
362 def _load_nsd(self):
363 """
364 Load the entry NSD YAML and keep it as dict.
365 :return:
366 """
367 if "entry_service_template" in self.manifest:
368 nsd_path = os.path.join(
369 self.package_content_path,
370 make_relative_path(self.manifest.get("entry_service_template")))
371 self.nsd = load_yaml(nsd_path)
372 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
373
374 def _load_vnfd(self):
375 """
376 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
377 :return:
378 """
379 if "package_content" in self.manifest:
380 for pc in self.manifest.get("package_content"):
381 if pc.get("content-type") == "application/sonata.function_descriptor":
382 vnfd_path = os.path.join(
383 self.package_content_path,
384 make_relative_path(pc.get("name")))
385 vnfd = load_yaml(vnfd_path)
386 self.vnfds[vnfd.get("name")] = vnfd
387 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
388
389 def _load_saps(self):
390 # Each Service Access Point (connection_point) in the nsd is getting its own container
391 SAPs = [p["id"] for p in self.nsd["connection_points"] if p["type"] == "interface"]
392 for sap in SAPs:
393 # endpoints needed in this service
394 sap_vnf_id, sap_vnf_interface = sap.split(':')
395 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
396 self.sap_identifiers.add(sap_vnf_id)
397
398 sap_docker_name = sap.replace(':', '_')
399
400 # add SAP to self.vnfds
401 sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
402 sap_vnfd = load_yaml(sapfile)
403 sap_vnfd["connection_points"][0]["id"] = sap_vnf_interface
404 sap_vnfd["name"] = sap_docker_name
405 self.vnfds[sap_docker_name] = sap_vnfd
406 # add SAP vnf to list in the NSD so it is deployed later on
407 # each SAP get a unique VNFD and vnf_id in the NSD
408 self.nsd["network_functions"].append({"vnf_id": sap_docker_name, "vnf_name": sap_docker_name})
409 LOG.debug("Loaded SAP: %r" % sap_vnfd.get("name"))
410
411 def _load_docker_files(self):
412 """
413 Get all paths to Dockerfiles from VNFDs and store them in dict.
414 :return:
415 """
416 for k, v in self.vnfds.iteritems():
417 for vu in v.get("virtual_deployment_units"):
418 if vu.get("vm_image_format") == "docker":
419 vm_image = vu.get("vm_image")
420 docker_path = os.path.join(
421 self.package_content_path,
422 make_relative_path(vm_image))
423 self.local_docker_files[k] = docker_path
424 LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
425
426 def _load_docker_urls(self):
427 """
428 Get all URLs to pre-build docker images in some repo.
429 :return:
430 """
431 for k, v in self.vnfds.iteritems():
432 for vu in v.get("virtual_deployment_units"):
433 if vu.get("vm_image_format") == "docker":
434 url = vu.get("vm_image")
435 if url is not None:
436 url = url.replace("http://", "")
437 self.remote_docker_image_urls[k] = url
438 LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
439
440 def _build_images_from_dockerfiles(self):
441 """
442 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
443 """
444 if GK_STANDALONE_MODE:
445 return # do not build anything in standalone mode
446 dc = DockerClient()
447 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
448 for k, v in self.local_docker_files.iteritems():
449 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
450 LOG.debug("DOCKER BUILD: %s" % line)
451 LOG.info("Docker image created: %s" % k)
452
453 def _pull_predefined_dockerimages(self):
454 """
455 If the package contains URLs to pre-build Docker images, we download them with this method.
456 """
457 dc = DockerClient()
458 for url in self.remote_docker_image_urls.itervalues():
459 if not FORCE_PULL: # only pull if not present (speedup for development)
460 if len(dc.images(name=url)) > 0:
461 LOG.debug("Image %r present. Skipping pull." % url)
462 continue
463 LOG.info("Pulling image: %r" % url)
464 dc.pull(url,
465 insecure_registry=True)
466
467 def _check_docker_image_exists(self, image_name):
468 """
469 Query the docker service and check if the given image exists
470 :param image_name: name of the docker image
471 :return:
472 """
473 return len(DockerClient().images(image_name)) > 0
474
475 def _calculate_placement(self, algorithm):
476 """
477 Do placement by adding the a field "dc" to
478 each VNFD that points to one of our
479 data center objects known to the gatekeeper.
480 """
481 assert(len(self.vnfds) > 0)
482 assert(len(GK.dcs) > 0)
483 # instantiate algorithm an place
484 p = algorithm()
485 p.place(self.nsd, self.vnfds, GK.dcs)
486 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
487 # lets print the placement result
488 for name, vnfd in self.vnfds.iteritems():
489 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
490
491
492 """
493 Some (simple) placement algorithms
494 """
495
496
497 class FirstDcPlacement(object):
498 """
499 Placement: Always use one and the same data center from the GK.dcs dict.
500 """
501 def place(self, nsd, vnfds, dcs):
502 for name, vnfd in vnfds.iteritems():
503 vnfd["dc"] = list(dcs.itervalues())[0]
504
505
506 class RoundRobinDcPlacement(object):
507 """
508 Placement: Distribute VNFs across all available DCs in a round robin fashion.
509 """
510 def place(self, nsd, vnfds, dcs):
511 c = 0
512 dcs_list = list(dcs.itervalues())
513 for name, vnfd in vnfds.iteritems():
514 vnfd["dc"] = dcs_list[c % len(dcs_list)]
515 c += 1 # inc. c to use next DC
516
517
518
519
520 """
521 Resource definitions and API endpoints
522 """
523
524
525 class Packages(fr.Resource):
526
527 def post(self):
528 """
529 Upload a *.son service package to the dummy gatekeeper.
530
531 We expect request with a *.son file and store it in UPLOAD_FOLDER
532 :return: UUID
533 """
534 try:
535 # get file contents
536 print(request.files)
537 # lets search for the package in the request
538 if "package" in request.files:
539 son_file = request.files["package"]
540 # elif "file" in request.files:
541 # son_file = request.files["file"]
542 else:
543 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
544 # generate a uuid to reference this package
545 service_uuid = str(uuid.uuid4())
546 file_hash = hashlib.sha1(str(son_file)).hexdigest()
547 # ensure that upload folder exists
548 ensure_dir(UPLOAD_FOLDER)
549 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
550 # store *.son file to disk
551 son_file.save(upload_path)
552 size = os.path.getsize(upload_path)
553 # create a service object and register it
554 s = Service(service_uuid, file_hash, upload_path)
555 GK.register_service_package(service_uuid, s)
556 # generate the JSON result
557 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}
558 except Exception as ex:
559 LOG.exception("Service package upload failed:")
560 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
561
562 def get(self):
563 """
564 Return a list of UUID's of uploaded service packages.
565 :return: dict/list
566 """
567 LOG.info("GET /packages")
568 return {"service_uuid_list": list(GK.services.iterkeys())}
569
570
571 class Instantiations(fr.Resource):
572
573 def post(self):
574 """
575 Instantiate a service specified by its UUID.
576 Will return a new UUID to identify the running service instance.
577 :return: UUID
578 """
579 # try to extract the service uuid from the request
580 json_data = request.get_json(force=True)
581 service_uuid = json_data.get("service_uuid")
582
583 # lets be a bit fuzzy here to make testing easier
584 if service_uuid is None and len(GK.services) > 0:
585 # if we don't get a service uuid, we simple start the first service in the list
586 service_uuid = list(GK.services.iterkeys())[0]
587
588 if service_uuid in GK.services:
589 # ok, we have a service uuid, lets start the service
590 service_instance_uuid = GK.services.get(service_uuid).start_service()
591 return {"service_instance_uuid": service_instance_uuid}
592 return "Service not found", 404
593
594 def get(self):
595 """
596 Returns a list of UUIDs containing all running services.
597 :return: dict / list
598 """
599 LOG.info("GET /instantiations")
600 return {"service_instantiations_list": [
601 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
602
603
604 # create a single, global GK object
605 GK = Gatekeeper()
606 # setup Flask
607 app = Flask(__name__)
608 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
609 api = fr.Api(app)
610 # define endpoints
611 api.add_resource(Packages, '/packages')
612 api.add_resource(Instantiations, '/instantiations')
613
614
615 def start_rest_api(host, port, datacenters=dict()):
616 GK.dcs = datacenters
617 # start the Flask server (not the best performance but ok for our use case)
618 app.run(host=host,
619 port=port,
620 debug=True,
621 use_reloader=False # this is needed to run Flask in a non-main thread
622 )
623
624
625 def ensure_dir(name):
626 if not os.path.exists(name):
627 os.makedirs(name)
628
629
630 def load_yaml(path):
631 with open(path, "r") as f:
632 try:
633 r = yaml.load(f)
634 except yaml.YAMLError as exc:
635 LOG.exception("YAML parse error")
636 r = dict()
637 return r
638
639
640 def make_relative_path(path):
641 if path.startswith("file://"):
642 path = path.replace("file://", "", 1)
643 if path.startswith("/"):
644 path = path.replace("/", "", 1)
645 return path
646
647
648 def generate_lan_string(prefix, base, subnet_size=24, ip=0):
649 """
650 Helper to generate different network configuration strings.
651 """
652 r = "%s.%d.%d/%d" % (prefix, base, ip, subnet_size)
653 return r
654
655
656 def generate_subnet_strings(n, start=1, subnet_size=24, ip=0):
657 """
658 Helper to generate different network configuration strings.
659 """
660 r = list()
661 for i in range(start, start + n):
662 r.append("%d.0.0.%d/%d" % (i, ip, subnet_size))
663 return r
664
665
666 if __name__ == '__main__':
667 """
668 Lets allow to run the API in standalone mode.
669 """
670 GK_STANDALONE_MODE = True
671 logging.getLogger("werkzeug").setLevel(logging.INFO)
672 start_rest_api("0.0.0.0", 8000)
673