Fix: Lets keep the SAP naming scheme and just change the NSDs used for the demo....
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 """
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
30
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
33 """
34
35 import logging
36 import os
37 import uuid
38 import hashlib
39 import zipfile
40 import yaml
41 import threading
42 from docker import Client as DockerClient
43 from flask import Flask, request
44 import flask_restful as fr
45 from collections import defaultdict
46 import pkg_resources
47
48 logging.basicConfig()
49 LOG = logging.getLogger("sonata-dummy-gatekeeper")
50 LOG.setLevel(logging.DEBUG)
51 logging.getLogger("werkzeug").setLevel(logging.WARNING)
52
53 GK_STORAGE = "/tmp/son-dummy-gk/"
54 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
55 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
56
57 # Enable Dockerfile build functionality
58 BUILD_DOCKERFILE = False
59
60 # flag to indicate that we run without the emulator (only the bare API for integration testing)
61 GK_STANDALONE_MODE = False
62
63 # should a new version of an image be pulled even if its available
64 FORCE_PULL = False
65
66 # Automatically deploy SAPs (endpoints) of the service as new containers
67 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
68 DEPLOY_SAP = False
69
70 # flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
71 BIDIRECTIONAL_CHAIN = False
72
73 class Gatekeeper(object):
74
75 def __init__(self):
76 self.services = dict()
77 self.dcs = dict()
78 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
79 LOG.info("Create SONATA dummy gatekeeper.")
80
81 def register_service_package(self, service_uuid, service):
82 """
83 register new service package
84 :param service_uuid
85 :param service object
86 """
87 self.services[service_uuid] = service
88 # lets perform all steps needed to onboard the service
89 service.onboard()
90
91 def get_next_vnf_name(self):
92 self.vnf_counter += 1
93 return "vnf%d" % self.vnf_counter
94
95
96 class Service(object):
97 """
98 This class represents a NS uploaded as a *.son package to the
99 dummy gatekeeper.
100 Can have multiple running instances of this service.
101 """
102
103 def __init__(self,
104 service_uuid,
105 package_file_hash,
106 package_file_path):
107 self.uuid = service_uuid
108 self.package_file_hash = package_file_hash
109 self.package_file_path = package_file_path
110 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
111 self.manifest = None
112 self.nsd = None
113 self.vnfds = dict()
114 self.local_docker_files = dict()
115 self.remote_docker_image_urls = dict()
116 self.instances = dict()
117 self.vnf_name2docker_name = dict()
118 self.sap_identifiers = set()
119 # lets generate a set of subnet configurations used for e-line chaining setup
120 self.eline_subnets_src = generate_subnet_strings(50, start=200, subnet_size=24, ip=1)
121 self.eline_subnets_dst = generate_subnet_strings(50, start=200, subnet_size=24, ip=2)
122
123
124 def onboard(self):
125 """
126 Do all steps to prepare this service to be instantiated
127 :return:
128 """
129 # 1. extract the contents of the package and store them in our catalog
130 self._unpack_service_package()
131 # 2. read in all descriptor files
132 self._load_package_descriptor()
133 self._load_nsd()
134 self._load_vnfd()
135 if DEPLOY_SAP:
136 self._load_saps()
137 # 3. prepare container images (e.g. download or build Dockerfile)
138 if BUILD_DOCKERFILE:
139 self._load_docker_files()
140 self._build_images_from_dockerfiles()
141 else:
142 self._load_docker_urls()
143 self._pull_predefined_dockerimages()
144 LOG.info("On-boarded service: %r" % self.manifest.get("name"))
145
146 def start_service(self):
147 """
148 This methods creates and starts a new service instance.
149 It computes placements, iterates over all VNFDs, and starts
150 each VNFD as a Docker container in the data center selected
151 by the placement algorithm.
152 :return:
153 """
154 LOG.info("Starting service %r" % self.uuid)
155
156 # 1. each service instance gets a new uuid to identify it
157 instance_uuid = str(uuid.uuid4())
158 # build a instances dict (a bit like a NSR :))
159 self.instances[instance_uuid] = dict()
160 self.instances[instance_uuid]["vnf_instances"] = list()
161
162 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
163 vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
164 reduce(lambda x, y: dict(x, **y),
165 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
166 self.nsd["network_functions"])))
167
168 # 3. compute placement of this service instance (adds DC names to VNFDs)
169 if not GK_STANDALONE_MODE:
170 #self._calculate_placement(FirstDcPlacement)
171 self._calculate_placement(RoundRobinDcPlacement)
172 # iterate over all vnfds that we have to start
173 for vnfd in self.vnfds.itervalues():
174 vnfi = None
175 if not GK_STANDALONE_MODE:
176 vnfi = self._start_vnfd(vnfd)
177 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
178
179 vlinks = self.nsd["virtual_links"]
180 fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
181 eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
182 elan_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-LAN")]
183
184 # 4a. deploy E-Line links
185 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
186 # eg. different services get a unique cookie for their flowrules
187 cookie = 1
188 for link in eline_fwd_links:
189 src_id, src_if_name = link["connection_points_reference"][0].split(":")
190 dst_id, dst_if_name = link["connection_points_reference"][1].split(":")
191
192 # check if there is a SAP in the link
193 if src_id in self.sap_identifiers:
194 src_docker_name = "{0}_{1}".format(src_id, src_if_name)
195 src_id = src_docker_name
196 else:
197 src_docker_name = src_id
198
199 if dst_id in self.sap_identifiers:
200 dst_docker_name = "{0}_{1}".format(dst_id, dst_if_name)
201 dst_id = dst_docker_name
202 else:
203 dst_docker_name = dst_id
204
205 src_name = vnf_id2vnf_name[src_id]
206 dst_name = vnf_id2vnf_name[dst_id]
207
208 LOG.debug(
209 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
210 src_name, src_id, src_if_name, dst_name, dst_id, dst_if_name))
211
212 if (src_name in self.vnfds) and (dst_name in self.vnfds):
213 network = self.vnfds[src_name].get("dc").net # there should be a cleaner way to find the DCNetwork
214 LOG.debug(src_docker_name)
215 ret = network.setChain(
216 src_docker_name, dst_docker_name,
217 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
218 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
219
220 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
221 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
222 if src_vnfi is not None:
223 self._vnf_reconfigure_network(src_vnfi, src_if_name, self.eline_subnets_src.pop(0))
224 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
225 if dst_vnfi is not None:
226 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, self.eline_subnets_dst.pop(0))
227
228 # 4b. deploy E-LAN links
229 base = 10
230 for link in elan_fwd_links:
231 # generate lan ip address
232 ip = 1
233 for intf in link["connection_points_reference"]:
234 ip_address = generate_lan_string("10.0", base, subnet_size=24, ip=ip)
235 vnf_id, intf_name = intf.split(":")
236 if vnf_id in self.sap_identifiers:
237 src_docker_name = "{0}_{1}".format(vnf_id, intf_name)
238 vnf_id = src_docker_name
239 vnf_name = vnf_id2vnf_name[vnf_id]
240 LOG.debug(
241 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
242 vnf_name, vnf_id, intf_name, ip_address))
243
244 if vnf_name in self.vnfds:
245 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
246 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
247 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
248 vnfi = self._get_vnf_instance(instance_uuid, vnf_name)
249 if vnfi is not None:
250 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
251 # increase for the next ip address on this E-LAN
252 ip += 1
253 # increase the base ip address for the next E-LAN
254 base += 1
255
256 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
257 self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
258
259 LOG.info("Service started. Instance id: %r" % instance_uuid)
260 return instance_uuid
261
262 def _start_vnfd(self, vnfd):
263 """
264 Start a single VNFD of this service
265 :param vnfd: vnfd descriptor dict
266 :return:
267 """
268 # iterate over all deployment units within each VNFDs
269 for u in vnfd.get("virtual_deployment_units"):
270 # 1. get the name of the docker image to start and the assigned DC
271 vnf_name = vnfd.get("name")
272 if vnf_name not in self.remote_docker_image_urls:
273 raise Exception("No image name for %r found. Abort." % vnf_name)
274 docker_name = self.remote_docker_image_urls.get(vnf_name)
275 target_dc = vnfd.get("dc")
276 # 2. perform some checks to ensure we can start the container
277 assert(docker_name is not None)
278 assert(target_dc is not None)
279 if not self._check_docker_image_exists(docker_name):
280 raise Exception("Docker image %r not found. Abort." % docker_name)
281 # 3. do the dc.startCompute(name="foobar") call to run the container
282 # TODO consider flavors, and other annotations
283 intfs = vnfd.get("connection_points")
284
285 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
286 # use the vnf_id in the nsd as docker name
287 # so deployed containers can be easily mapped back to the nsd
288 vnf_name2id = defaultdict(lambda: "NotExistingNode",
289 reduce(lambda x, y: dict(x, **y),
290 map(lambda d: {d["vnf_name"]: d["vnf_id"]},
291 self.nsd["network_functions"])))
292 self.vnf_name2docker_name[vnf_name] = vnf_name2id[vnf_name]
293 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
294
295 LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
296 LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
297 vnfi = target_dc.startCompute(self.vnf_name2docker_name[vnf_name], network=intfs, image=docker_name, flavor_name="small")
298 return vnfi
299
300 def _get_vnf_instance(self, instance_uuid, name):
301 """
302 Returns the Docker object for the given VNF name (or Docker name).
303 :param instance_uuid: UUID of the service instance to search in.
304 :param name: VNF name or Docker name. We are fuzzy here.
305 :return:
306 """
307 dn = name
308 if name in self.vnf_name2docker_name:
309 dn = self.vnf_name2docker_name[name]
310 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
311 if vnfi.name == dn:
312 return vnfi
313 LOG.warning("No container with name: %r found.")
314 return None
315
316 @staticmethod
317 def _vnf_reconfigure_network(vnfi, if_name, net_str):
318 """
319 Reconfigure the network configuration of a specific interface
320 of a running container.
321 :param vnfi: container instacne
322 :param if_name: interface name
323 :param net_str: network configuration string, e.g., 1.2.3.4/24
324 :return:
325 """
326 intf = vnfi.intf(intf=if_name)
327 if intf is not None:
328 intf.setIP(net_str)
329 LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
330 else:
331 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
332
333
334 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
335 for vnfi in vnfi_list:
336 config = vnfi.dcinfo.get("Config", dict())
337 env = config.get("Env", list())
338 for env_var in env:
339 if "SON_EMU_CMD=" in env_var:
340 cmd = str(env_var.split("=")[1])
341 LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
342 # execute command in new thread to ensure that GK is not blocked by VNF
343 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
344 t.daemon = True
345 t.start()
346
347 def _unpack_service_package(self):
348 """
349 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
350 """
351 LOG.info("Unzipping: %r" % self.package_file_path)
352 with zipfile.ZipFile(self.package_file_path, "r") as z:
353 z.extractall(self.package_content_path)
354
355
356 def _load_package_descriptor(self):
357 """
358 Load the main package descriptor YAML and keep it as dict.
359 :return:
360 """
361 self.manifest = load_yaml(
362 os.path.join(
363 self.package_content_path, "META-INF/MANIFEST.MF"))
364
365 def _load_nsd(self):
366 """
367 Load the entry NSD YAML and keep it as dict.
368 :return:
369 """
370 if "entry_service_template" in self.manifest:
371 nsd_path = os.path.join(
372 self.package_content_path,
373 make_relative_path(self.manifest.get("entry_service_template")))
374 self.nsd = load_yaml(nsd_path)
375 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
376
377 def _load_vnfd(self):
378 """
379 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
380 :return:
381 """
382 if "package_content" in self.manifest:
383 for pc in self.manifest.get("package_content"):
384 if pc.get("content-type") == "application/sonata.function_descriptor":
385 vnfd_path = os.path.join(
386 self.package_content_path,
387 make_relative_path(pc.get("name")))
388 vnfd = load_yaml(vnfd_path)
389 self.vnfds[vnfd.get("name")] = vnfd
390 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
391
392 def _load_saps(self):
393 # Each Service Access Point (connection_point) in the nsd is getting its own container
394 SAPs = [p["id"] for p in self.nsd["connection_points"] if p["type"] == "interface"]
395 for sap in SAPs:
396 # endpoints needed in this service
397 sap_vnf_id, sap_vnf_interface = sap.split(':')
398 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
399 self.sap_identifiers.add(sap_vnf_id)
400
401 sap_docker_name = "%s_%s" % (sap_vnf_id, sap_vnf_interface)
402
403 # add SAP to self.vnfds
404 sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
405 sap_vnfd = load_yaml(sapfile)
406 sap_vnfd["connection_points"][0]["id"] = sap_vnf_interface
407 sap_vnfd["name"] = sap_docker_name
408 self.vnfds[sap_docker_name] = sap_vnfd
409 # add SAP vnf to list in the NSD so it is deployed later on
410 # each SAP get a unique VNFD and vnf_id in the NSD
411 self.nsd["network_functions"].append({"vnf_id": sap_docker_name, "vnf_name": sap_docker_name})
412 LOG.debug("Loaded SAP: %r" % sap_vnfd.get("name"))
413
414 def _load_docker_files(self):
415 """
416 Get all paths to Dockerfiles from VNFDs and store them in dict.
417 :return:
418 """
419 for k, v in self.vnfds.iteritems():
420 for vu in v.get("virtual_deployment_units"):
421 if vu.get("vm_image_format") == "docker":
422 vm_image = vu.get("vm_image")
423 docker_path = os.path.join(
424 self.package_content_path,
425 make_relative_path(vm_image))
426 self.local_docker_files[k] = docker_path
427 LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
428
429 def _load_docker_urls(self):
430 """
431 Get all URLs to pre-build docker images in some repo.
432 :return:
433 """
434 for k, v in self.vnfds.iteritems():
435 for vu in v.get("virtual_deployment_units"):
436 if vu.get("vm_image_format") == "docker":
437 url = vu.get("vm_image")
438 if url is not None:
439 url = url.replace("http://", "")
440 self.remote_docker_image_urls[k] = url
441 LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
442
443 def _build_images_from_dockerfiles(self):
444 """
445 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
446 """
447 if GK_STANDALONE_MODE:
448 return # do not build anything in standalone mode
449 dc = DockerClient()
450 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
451 for k, v in self.local_docker_files.iteritems():
452 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
453 LOG.debug("DOCKER BUILD: %s" % line)
454 LOG.info("Docker image created: %s" % k)
455
456 def _pull_predefined_dockerimages(self):
457 """
458 If the package contains URLs to pre-build Docker images, we download them with this method.
459 """
460 dc = DockerClient()
461 for url in self.remote_docker_image_urls.itervalues():
462 if not FORCE_PULL: # only pull if not present (speedup for development)
463 if len(dc.images(name=url)) > 0:
464 LOG.debug("Image %r present. Skipping pull." % url)
465 continue
466 LOG.info("Pulling image: %r" % url)
467 dc.pull(url,
468 insecure_registry=True)
469
470 def _check_docker_image_exists(self, image_name):
471 """
472 Query the docker service and check if the given image exists
473 :param image_name: name of the docker image
474 :return:
475 """
476 return len(DockerClient().images(image_name)) > 0
477
478 def _calculate_placement(self, algorithm):
479 """
480 Do placement by adding the a field "dc" to
481 each VNFD that points to one of our
482 data center objects known to the gatekeeper.
483 """
484 assert(len(self.vnfds) > 0)
485 assert(len(GK.dcs) > 0)
486 # instantiate algorithm an place
487 p = algorithm()
488 p.place(self.nsd, self.vnfds, GK.dcs)
489 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
490 # lets print the placement result
491 for name, vnfd in self.vnfds.iteritems():
492 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
493
494
495 """
496 Some (simple) placement algorithms
497 """
498
499
500 class FirstDcPlacement(object):
501 """
502 Placement: Always use one and the same data center from the GK.dcs dict.
503 """
504 def place(self, nsd, vnfds, dcs):
505 for name, vnfd in vnfds.iteritems():
506 vnfd["dc"] = list(dcs.itervalues())[0]
507
508
509 class RoundRobinDcPlacement(object):
510 """
511 Placement: Distribute VNFs across all available DCs in a round robin fashion.
512 """
513 def place(self, nsd, vnfds, dcs):
514 c = 0
515 dcs_list = list(dcs.itervalues())
516 for name, vnfd in vnfds.iteritems():
517 vnfd["dc"] = dcs_list[c % len(dcs_list)]
518 c += 1 # inc. c to use next DC
519
520
521
522
523 """
524 Resource definitions and API endpoints
525 """
526
527
528 class Packages(fr.Resource):
529
530 def post(self):
531 """
532 Upload a *.son service package to the dummy gatekeeper.
533
534 We expect request with a *.son file and store it in UPLOAD_FOLDER
535 :return: UUID
536 """
537 try:
538 # get file contents
539 print(request.files)
540 # lets search for the package in the request
541 if "package" in request.files:
542 son_file = request.files["package"]
543 # elif "file" in request.files:
544 # son_file = request.files["file"]
545 else:
546 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
547 # generate a uuid to reference this package
548 service_uuid = str(uuid.uuid4())
549 file_hash = hashlib.sha1(str(son_file)).hexdigest()
550 # ensure that upload folder exists
551 ensure_dir(UPLOAD_FOLDER)
552 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
553 # store *.son file to disk
554 son_file.save(upload_path)
555 size = os.path.getsize(upload_path)
556 # create a service object and register it
557 s = Service(service_uuid, file_hash, upload_path)
558 GK.register_service_package(service_uuid, s)
559 # generate the JSON result
560 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}
561 except Exception as ex:
562 LOG.exception("Service package upload failed:")
563 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
564
565 def get(self):
566 """
567 Return a list of UUID's of uploaded service packages.
568 :return: dict/list
569 """
570 LOG.info("GET /packages")
571 return {"service_uuid_list": list(GK.services.iterkeys())}
572
573
574 class Instantiations(fr.Resource):
575
576 def post(self):
577 """
578 Instantiate a service specified by its UUID.
579 Will return a new UUID to identify the running service instance.
580 :return: UUID
581 """
582 # try to extract the service uuid from the request
583 json_data = request.get_json(force=True)
584 service_uuid = json_data.get("service_uuid")
585
586 # lets be a bit fuzzy here to make testing easier
587 if service_uuid is None and len(GK.services) > 0:
588 # if we don't get a service uuid, we simple start the first service in the list
589 service_uuid = list(GK.services.iterkeys())[0]
590
591 if service_uuid in GK.services:
592 # ok, we have a service uuid, lets start the service
593 service_instance_uuid = GK.services.get(service_uuid).start_service()
594 return {"service_instance_uuid": service_instance_uuid}
595 return "Service not found", 404
596
597 def get(self):
598 """
599 Returns a list of UUIDs containing all running services.
600 :return: dict / list
601 """
602 LOG.info("GET /instantiations")
603 return {"service_instantiations_list": [
604 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
605
606
607 # create a single, global GK object
608 GK = Gatekeeper()
609 # setup Flask
610 app = Flask(__name__)
611 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
612 api = fr.Api(app)
613 # define endpoints
614 api.add_resource(Packages, '/packages')
615 api.add_resource(Instantiations, '/instantiations')
616
617
618 def start_rest_api(host, port, datacenters=dict()):
619 GK.dcs = datacenters
620 # start the Flask server (not the best performance but ok for our use case)
621 app.run(host=host,
622 port=port,
623 debug=True,
624 use_reloader=False # this is needed to run Flask in a non-main thread
625 )
626
627
628 def ensure_dir(name):
629 if not os.path.exists(name):
630 os.makedirs(name)
631
632
633 def load_yaml(path):
634 with open(path, "r") as f:
635 try:
636 r = yaml.load(f)
637 except yaml.YAMLError as exc:
638 LOG.exception("YAML parse error")
639 r = dict()
640 return r
641
642
643 def make_relative_path(path):
644 if path.startswith("file://"):
645 path = path.replace("file://", "", 1)
646 if path.startswith("/"):
647 path = path.replace("/", "", 1)
648 return path
649
650
651 def generate_lan_string(prefix, base, subnet_size=24, ip=0):
652 """
653 Helper to generate different network configuration strings.
654 """
655 r = "%s.%d.%d/%d" % (prefix, base, ip, subnet_size)
656 return r
657
658
659 def generate_subnet_strings(n, start=1, subnet_size=24, ip=0):
660 """
661 Helper to generate different network configuration strings.
662 """
663 r = list()
664 for i in range(start, start + n):
665 r.append("%d.0.0.%d/%d" % (i, ip, subnet_size))
666 return r
667
668
669 if __name__ == '__main__':
670 """
671 Lets allow to run the API in standalone mode.
672 """
673 GK_STANDALONE_MODE = True
674 logging.getLogger("werkzeug").setLevel(logging.INFO)
675 start_rest_api("0.0.0.0", 8000)
676