update to use latest docker api version
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 """
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
30
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
33 """
34
35 import logging
36 import os
37 import uuid
38 import hashlib
39 import zipfile
40 import yaml
41 import threading
42 from docker import DockerClient
43 from flask import Flask, request
44 import flask_restful as fr
45 from collections import defaultdict
46 import pkg_resources
47
48 logging.basicConfig()
49 LOG = logging.getLogger("sonata-dummy-gatekeeper")
50 LOG.setLevel(logging.DEBUG)
51 logging.getLogger("werkzeug").setLevel(logging.WARNING)
52
53 GK_STORAGE = "/tmp/son-dummy-gk/"
54 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
55 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
56
57 # Enable Dockerfile build functionality
58 BUILD_DOCKERFILE = False
59
60 # flag to indicate that we run without the emulator (only the bare API for integration testing)
61 GK_STANDALONE_MODE = False
62
63 # should a new version of an image be pulled even if its available
64 FORCE_PULL = False
65
66 # Automatically deploy SAPs (endpoints) of the service as new containers
67 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
68 DEPLOY_SAP = False
69
70 # flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
71 BIDIRECTIONAL_CHAIN = False
72
73 class Gatekeeper(object):
74
75 def __init__(self):
76 self.services = dict()
77 self.dcs = dict()
78 self.net = None
79 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
80 LOG.info("Create SONATA dummy gatekeeper.")
81
82 def register_service_package(self, service_uuid, service):
83 """
84 register new service package
85 :param service_uuid
86 :param service object
87 """
88 self.services[service_uuid] = service
89 # lets perform all steps needed to onboard the service
90 service.onboard()
91
92 def get_next_vnf_name(self):
93 self.vnf_counter += 1
94 return "vnf%d" % self.vnf_counter
95
96
97 class Service(object):
98 """
99 This class represents a NS uploaded as a *.son package to the
100 dummy gatekeeper.
101 Can have multiple running instances of this service.
102 """
103
104 def __init__(self,
105 service_uuid,
106 package_file_hash,
107 package_file_path):
108 self.uuid = service_uuid
109 self.package_file_hash = package_file_hash
110 self.package_file_path = package_file_path
111 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
112 self.manifest = None
113 self.nsd = None
114 self.vnfds = dict()
115 self.local_docker_files = dict()
116 self.remote_docker_image_urls = dict()
117 self.instances = dict()
118 self.vnf_name2docker_name = dict()
119 self.sap_identifiers = set()
120 # lets generate a set of subnet configurations used for e-line chaining setup
121 self.eline_subnets_src = generate_subnet_strings(50, start=200, subnet_size=24, ip=1)
122 self.eline_subnets_dst = generate_subnet_strings(50, start=200, subnet_size=24, ip=2)
123
124 def onboard(self):
125 """
126 Do all steps to prepare this service to be instantiated
127 :return:
128 """
129 # 1. extract the contents of the package and store them in our catalog
130 self._unpack_service_package()
131 # 2. read in all descriptor files
132 self._load_package_descriptor()
133 self._load_nsd()
134 self._load_vnfd()
135 if DEPLOY_SAP:
136 self._load_saps()
137 # 3. prepare container images (e.g. download or build Dockerfile)
138 if BUILD_DOCKERFILE:
139 self._load_docker_files()
140 self._build_images_from_dockerfiles()
141 else:
142 self._load_docker_urls()
143 self._pull_predefined_dockerimages()
144 LOG.info("On-boarded service: %r" % self.manifest.get("name"))
145
146 def start_service(self):
147 """
148 This methods creates and starts a new service instance.
149 It computes placements, iterates over all VNFDs, and starts
150 each VNFD as a Docker container in the data center selected
151 by the placement algorithm.
152 :return:
153 """
154 LOG.info("Starting service %r" % self.uuid)
155
156 # 1. each service instance gets a new uuid to identify it
157 instance_uuid = str(uuid.uuid4())
158 # build a instances dict (a bit like a NSR :))
159 self.instances[instance_uuid] = dict()
160 self.instances[instance_uuid]["vnf_instances"] = list()
161
162 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
163 vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
164 reduce(lambda x, y: dict(x, **y),
165 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
166 self.nsd["network_functions"])))
167
168 # 3. compute placement of this service instance (adds DC names to VNFDs)
169 if not GK_STANDALONE_MODE:
170 #self._calculate_placement(FirstDcPlacement)
171 self._calculate_placement(RoundRobinDcPlacement)
172 # iterate over all vnfds that we have to start
173 for vnfd in self.vnfds.itervalues():
174 vnfi = None
175 if not GK_STANDALONE_MODE:
176 vnfi = self._start_vnfd(vnfd)
177 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
178
179 vlinks = self.nsd["virtual_links"]
180 fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
181 eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
182 elan_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-LAN")]
183
184 GK.net.deployed_elines.extend(eline_fwd_links)
185 GK.net.deployed_elans.extend(elan_fwd_links)
186
187 # 4a. deploy E-Line links
188 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
189 # eg. different services get a unique cookie for their flowrules
190 cookie = 1
191 for link in eline_fwd_links:
192 src_id, src_if_name = link["connection_points_reference"][0].split(":")
193 dst_id, dst_if_name = link["connection_points_reference"][1].split(":")
194
195 # check if there is a SAP in the link
196 if src_id in self.sap_identifiers:
197 src_docker_name = "{0}_{1}".format(src_id, src_if_name)
198 src_id = src_docker_name
199 else:
200 src_docker_name = src_id
201
202 if dst_id in self.sap_identifiers:
203 dst_docker_name = "{0}_{1}".format(dst_id, dst_if_name)
204 dst_id = dst_docker_name
205 else:
206 dst_docker_name = dst_id
207
208 src_name = vnf_id2vnf_name[src_id]
209 dst_name = vnf_id2vnf_name[dst_id]
210
211 LOG.debug(
212 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
213 src_name, src_id, src_if_name, dst_name, dst_id, dst_if_name))
214
215 if (src_name in self.vnfds) and (dst_name in self.vnfds):
216 network = self.vnfds[src_name].get("dc").net # there should be a cleaner way to find the DCNetwork
217 LOG.debug(src_docker_name)
218 ret = network.setChain(
219 src_docker_name, dst_docker_name,
220 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
221 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
222
223 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
224 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
225 if src_vnfi is not None:
226 self._vnf_reconfigure_network(src_vnfi, src_if_name, self.eline_subnets_src.pop(0))
227 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
228 if dst_vnfi is not None:
229 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, self.eline_subnets_dst.pop(0))
230
231 # 4b. deploy E-LAN links
232 base = 10
233 for link in elan_fwd_links:
234
235 elan_vnf_list=[]
236
237 # generate lan ip address
238 ip = 1
239 for intf in link["connection_points_reference"]:
240 ip_address = generate_lan_string("10.0", base, subnet_size=24, ip=ip)
241 vnf_id, intf_name = intf.split(":")
242 if vnf_id in self.sap_identifiers:
243 src_docker_name = "{0}_{1}".format(vnf_id, intf_name)
244 vnf_id = src_docker_name
245 else:
246 src_docker_name = vnf_id
247 vnf_name = vnf_id2vnf_name[vnf_id]
248 LOG.debug(
249 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
250 vnf_name, vnf_id, intf_name, ip_address))
251
252 if vnf_name in self.vnfds:
253 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
254 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
255 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
256 vnfi = self._get_vnf_instance(instance_uuid, vnf_name)
257 if vnfi is not None:
258 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
259 # increase for the next ip address on this E-LAN
260 ip += 1
261
262 # add this vnf and interface to the E-LAN for tagging
263 network = self.vnfds[vnf_name].get("dc").net # there should be a cleaner way to find the DCNetwork
264 elan_vnf_list.append({'name':src_docker_name,'interface':intf_name})
265
266
267 # install the VLAN tags for this E-LAN
268 network.setLAN(elan_vnf_list)
269 # increase the base ip address for the next E-LAN
270 base += 1
271
272 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
273 self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
274
275 LOG.info("Service started. Instance id: %r" % instance_uuid)
276 return instance_uuid
277
278 def stop_service(self, instance_uuid):
279 """
280 This method stops a running service instance.
281 It iterates over all VNF instances, stopping them each
282 and removing them from their data center.
283
284 :param instance_uuid: the uuid of the service instance to be stopped
285 """
286 LOG.info("Stopping service %r" % self.uuid)
287 # get relevant information
288 # instance_uuid = str(self.uuid.uuid4())
289 vnf_instances = self.instances[instance_uuid]["vnf_instances"]
290
291 for v in vnf_instances:
292 self._stop_vnfi(v)
293
294 if not GK_STANDALONE_MODE:
295 # remove placement?
296 # self._remove_placement(RoundRobinPlacement)
297 None
298
299 # last step: remove the instance from the list of all instances
300 del self.instances[instance_uuid]
301
302 def _start_vnfd(self, vnfd):
303 """
304 Start a single VNFD of this service
305 :param vnfd: vnfd descriptor dict
306 :return:
307 """
308 # iterate over all deployment units within each VNFDs
309 for u in vnfd.get("virtual_deployment_units"):
310 # 1. get the name of the docker image to start and the assigned DC
311 vnf_name = vnfd.get("name")
312 if vnf_name not in self.remote_docker_image_urls:
313 raise Exception("No image name for %r found. Abort." % vnf_name)
314 docker_name = self.remote_docker_image_urls.get(vnf_name)
315 target_dc = vnfd.get("dc")
316 # 2. perform some checks to ensure we can start the container
317 assert(docker_name is not None)
318 assert(target_dc is not None)
319 if not self._check_docker_image_exists(docker_name):
320 raise Exception("Docker image %r not found. Abort." % docker_name)
321 # 3. do the dc.startCompute(name="foobar") call to run the container
322 # TODO consider flavors, and other annotations
323 intfs = vnfd.get("connection_points")
324
325 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
326 # use the vnf_id in the nsd as docker name
327 # so deployed containers can be easily mapped back to the nsd
328 vnf_name2id = defaultdict(lambda: "NotExistingNode",
329 reduce(lambda x, y: dict(x, **y),
330 map(lambda d: {d["vnf_name"]: d["vnf_id"]},
331 self.nsd["network_functions"])))
332 self.vnf_name2docker_name[vnf_name] = vnf_name2id[vnf_name]
333 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
334
335 LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
336 LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
337 vnfi = target_dc.startCompute(self.vnf_name2docker_name[vnf_name], network=intfs, image=docker_name, flavor_name="small")
338 return vnfi
339
340 def _stop_vnfi(self, vnfi):
341 """
342 Stop a VNF instance.
343
344 :param vnfi: vnf instance to be stopped
345 """
346 # Find the correct datacenter
347 status = vnfi.getStatus()
348 dc = vnfi.datacenter
349 # stop the vnfi
350 LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc))
351 dc.stopCompute(status["name"])
352
353 def _get_vnf_instance(self, instance_uuid, name):
354 """
355 Returns the Docker object for the given VNF name (or Docker name).
356 :param instance_uuid: UUID of the service instance to search in.
357 :param name: VNF name or Docker name. We are fuzzy here.
358 :return:
359 """
360 dn = name
361 if name in self.vnf_name2docker_name:
362 dn = self.vnf_name2docker_name[name]
363 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
364 if vnfi.name == dn:
365 return vnfi
366 LOG.warning("No container with name: %r found.")
367 return None
368
369 @staticmethod
370 def _vnf_reconfigure_network(vnfi, if_name, net_str):
371 """
372 Reconfigure the network configuration of a specific interface
373 of a running container.
374 :param vnfi: container instacne
375 :param if_name: interface name
376 :param net_str: network configuration string, e.g., 1.2.3.4/24
377 :return:
378 """
379 intf = vnfi.intf(intf=if_name)
380 if intf is not None:
381 intf.setIP(net_str)
382 LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
383 else:
384 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
385
386
387 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
388 for vnfi in vnfi_list:
389 config = vnfi.dcinfo.get("Config", dict())
390 env = config.get("Env", list())
391 for env_var in env:
392 if "SON_EMU_CMD=" in env_var:
393 cmd = str(env_var.split("=")[1])
394 LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
395 # execute command in new thread to ensure that GK is not blocked by VNF
396 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
397 t.daemon = True
398 t.start()
399
400 def _unpack_service_package(self):
401 """
402 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
403 """
404 LOG.info("Unzipping: %r" % self.package_file_path)
405 with zipfile.ZipFile(self.package_file_path, "r") as z:
406 z.extractall(self.package_content_path)
407
408
409 def _load_package_descriptor(self):
410 """
411 Load the main package descriptor YAML and keep it as dict.
412 :return:
413 """
414 self.manifest = load_yaml(
415 os.path.join(
416 self.package_content_path, "META-INF/MANIFEST.MF"))
417
418 def _load_nsd(self):
419 """
420 Load the entry NSD YAML and keep it as dict.
421 :return:
422 """
423 if "entry_service_template" in self.manifest:
424 nsd_path = os.path.join(
425 self.package_content_path,
426 make_relative_path(self.manifest.get("entry_service_template")))
427 self.nsd = load_yaml(nsd_path)
428 GK.net.deployed_nsds.append(self.nsd)
429 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
430
431 def _load_vnfd(self):
432 """
433 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
434 :return:
435 """
436 if "package_content" in self.manifest:
437 for pc in self.manifest.get("package_content"):
438 if pc.get("content-type") == "application/sonata.function_descriptor":
439 vnfd_path = os.path.join(
440 self.package_content_path,
441 make_relative_path(pc.get("name")))
442 vnfd = load_yaml(vnfd_path)
443 self.vnfds[vnfd.get("name")] = vnfd
444 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
445
446 def _load_saps(self):
447 # Each Service Access Point (connection_point) in the nsd is getting its own container
448 SAPs = [p["id"] for p in self.nsd["connection_points"] if p["type"] == "interface"]
449 for sap in SAPs:
450 # endpoints needed in this service
451 sap_vnf_id, sap_vnf_interface = sap.split(':')
452 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
453 self.sap_identifiers.add(sap_vnf_id)
454
455 sap_docker_name = "%s_%s" % (sap_vnf_id, sap_vnf_interface)
456
457 # add SAP to self.vnfds
458 sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
459 sap_vnfd = load_yaml(sapfile)
460 sap_vnfd["connection_points"][0]["id"] = sap_vnf_interface
461 sap_vnfd["name"] = sap_docker_name
462 self.vnfds[sap_docker_name] = sap_vnfd
463 # add SAP vnf to list in the NSD so it is deployed later on
464 # each SAP get a unique VNFD and vnf_id in the NSD
465 self.nsd["network_functions"].append({"vnf_id": sap_docker_name, "vnf_name": sap_docker_name})
466 LOG.debug("Loaded SAP: %r" % sap_vnfd.get("name"))
467
468 def _load_docker_files(self):
469 """
470 Get all paths to Dockerfiles from VNFDs and store them in dict.
471 :return:
472 """
473 for k, v in self.vnfds.iteritems():
474 for vu in v.get("virtual_deployment_units"):
475 if vu.get("vm_image_format") == "docker":
476 vm_image = vu.get("vm_image")
477 docker_path = os.path.join(
478 self.package_content_path,
479 make_relative_path(vm_image))
480 self.local_docker_files[k] = docker_path
481 LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
482
483 def _load_docker_urls(self):
484 """
485 Get all URLs to pre-build docker images in some repo.
486 :return:
487 """
488 for k, v in self.vnfds.iteritems():
489 for vu in v.get("virtual_deployment_units"):
490 if vu.get("vm_image_format") == "docker":
491 url = vu.get("vm_image")
492 if url is not None:
493 url = url.replace("http://", "")
494 self.remote_docker_image_urls[k] = url
495 LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
496
497 def _build_images_from_dockerfiles(self):
498 """
499 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
500 """
501 if GK_STANDALONE_MODE:
502 return # do not build anything in standalone mode
503 dc = DockerClient()
504 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
505 for k, v in self.local_docker_files.iteritems():
506 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
507 LOG.debug("DOCKER BUILD: %s" % line)
508 LOG.info("Docker image created: %s" % k)
509
510 def _pull_predefined_dockerimages(self):
511 """
512 If the package contains URLs to pre-build Docker images, we download them with this method.
513 """
514 dc = DockerClient()
515 for url in self.remote_docker_image_urls.itervalues():
516 if not FORCE_PULL: # only pull if not present (speedup for development)
517 if len(dc.images.list(name=url)) > 0:
518 LOG.debug("Image %r present. Skipping pull." % url)
519 continue
520 LOG.info("Pulling image: %r" % url)
521 dc.pull(url,
522 insecure_registry=True)
523
524 def _check_docker_image_exists(self, image_name):
525 """
526 Query the docker service and check if the given image exists
527 :param image_name: name of the docker image
528 :return:
529 """
530 return len(DockerClient().images.list(name=image_name)) > 0
531
532 def _calculate_placement(self, algorithm):
533 """
534 Do placement by adding the a field "dc" to
535 each VNFD that points to one of our
536 data center objects known to the gatekeeper.
537 """
538 assert(len(self.vnfds) > 0)
539 assert(len(GK.dcs) > 0)
540 # instantiate algorithm an place
541 p = algorithm()
542 p.place(self.nsd, self.vnfds, GK.dcs)
543 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
544 # lets print the placement result
545 for name, vnfd in self.vnfds.iteritems():
546 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
547
548
549 """
550 Some (simple) placement algorithms
551 """
552
553
554 class FirstDcPlacement(object):
555 """
556 Placement: Always use one and the same data center from the GK.dcs dict.
557 """
558 def place(self, nsd, vnfds, dcs):
559 for name, vnfd in vnfds.iteritems():
560 vnfd["dc"] = list(dcs.itervalues())[0]
561
562
563 class RoundRobinDcPlacement(object):
564 """
565 Placement: Distribute VNFs across all available DCs in a round robin fashion.
566 """
567 def place(self, nsd, vnfds, dcs):
568 c = 0
569 dcs_list = list(dcs.itervalues())
570 for name, vnfd in vnfds.iteritems():
571 vnfd["dc"] = dcs_list[c % len(dcs_list)]
572 c += 1 # inc. c to use next DC
573
574
575
576
577 """
578 Resource definitions and API endpoints
579 """
580
581
582 class Packages(fr.Resource):
583
584 def post(self):
585 """
586 Upload a *.son service package to the dummy gatekeeper.
587
588 We expect request with a *.son file and store it in UPLOAD_FOLDER
589 :return: UUID
590 """
591 try:
592 # get file contents
593 print(request.files)
594 # lets search for the package in the request
595 if "package" in request.files:
596 son_file = request.files["package"]
597 # elif "file" in request.files:
598 # son_file = request.files["file"]
599 else:
600 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
601 # generate a uuid to reference this package
602 service_uuid = str(uuid.uuid4())
603 file_hash = hashlib.sha1(str(son_file)).hexdigest()
604 # ensure that upload folder exists
605 ensure_dir(UPLOAD_FOLDER)
606 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
607 # store *.son file to disk
608 son_file.save(upload_path)
609 size = os.path.getsize(upload_path)
610 # create a service object and register it
611 s = Service(service_uuid, file_hash, upload_path)
612 GK.register_service_package(service_uuid, s)
613 # generate the JSON result
614 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201
615 except Exception as ex:
616 LOG.exception("Service package upload failed:")
617 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
618
619 def get(self):
620 """
621 Return a list of UUID's of uploaded service packages.
622 :return: dict/list
623 """
624 LOG.info("GET /packages")
625 return {"service_uuid_list": list(GK.services.iterkeys())}
626
627
628 class Instantiations(fr.Resource):
629
630 def post(self):
631 """
632 Instantiate a service specified by its UUID.
633 Will return a new UUID to identify the running service instance.
634 :return: UUID
635 """
636 # try to extract the service uuid from the request
637 json_data = request.get_json(force=True)
638 service_uuid = json_data.get("service_uuid")
639
640 # lets be a bit fuzzy here to make testing easier
641 if service_uuid is None and len(GK.services) > 0:
642 # if we don't get a service uuid, we simple start the first service in the list
643 service_uuid = list(GK.services.iterkeys())[0]
644
645 if service_uuid in GK.services:
646 # ok, we have a service uuid, lets start the service
647 service_instance_uuid = GK.services.get(service_uuid).start_service()
648 return {"service_instance_uuid": service_instance_uuid}, 201
649 return "Service not found", 404
650
651 def get(self):
652 """
653 Returns a list of UUIDs containing all running services.
654 :return: dict / list
655 """
656 LOG.info("GET /instantiations")
657 return {"service_instantiations_list": [
658 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
659
660 def delete(self):
661 """
662 Stops a running service specified by its service and instance UUID.
663 """
664 # try to extract the service and instance UUID from the request
665 json_data = request.get_json(force=True)
666 service_uuid = json_data.get("service_uuid")
667 instance_uuid = json_data.get("service_instance_uuid")
668
669 # try to be fuzzy
670 if service_uuid is None and len(GK.services) > 0:
671 #if we don't get a service uuid, we simply stop the last service in the list
672 service_uuid = list(GK.services.iterkeys())[0]
673 if instance_uuid is None and len(GK.services[service_uuid].instances) > 0:
674 instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0]
675
676 if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
677 # valid service and instance UUID, stop service
678 GK.services.get(service_uuid).stop_service(instance_uuid)
679 del GK.services.get(service_uuid).instances[instance_uuid]
680 return
681 return "Service not found", 404
682
683 class Exit(fr.Resource):
684
685 def put(self):
686 """
687 Stop the running Containernet instance regardless of data transmitted
688 """
689 GK.net.stop()
690
691
692 def initialize_GK():
693 global GK
694 GK = Gatekeeper()
695
696
697
698 # create a single, global GK object
699 GK = None
700 initialize_GK()
701 # setup Flask
702 app = Flask(__name__)
703 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
704 api = fr.Api(app)
705 # define endpoints
706 api.add_resource(Packages, '/packages')
707 api.add_resource(Instantiations, '/instantiations')
708 api.add_resource(Exit, '/emulator/exit')
709
710
711 #def initialize_GK():
712 # global GK
713 # GK = Gatekeeper()
714
715
716 def start_rest_api(host, port, datacenters=dict()):
717 GK.dcs = datacenters
718 GK.net = get_dc_network()
719 # start the Flask server (not the best performance but ok for our use case)
720 app.run(host=host,
721 port=port,
722 debug=True,
723 use_reloader=False # this is needed to run Flask in a non-main thread
724 )
725
726
727 def ensure_dir(name):
728 if not os.path.exists(name):
729 os.makedirs(name)
730
731
732 def load_yaml(path):
733 with open(path, "r") as f:
734 try:
735 r = yaml.load(f)
736 except yaml.YAMLError as exc:
737 LOG.exception("YAML parse error")
738 r = dict()
739 return r
740
741
742 def make_relative_path(path):
743 if path.startswith("file://"):
744 path = path.replace("file://", "", 1)
745 if path.startswith("/"):
746 path = path.replace("/", "", 1)
747 return path
748
749
750 def generate_lan_string(prefix, base, subnet_size=24, ip=0):
751 """
752 Helper to generate different network configuration strings.
753 """
754 r = "%s.%d.%d/%d" % (prefix, base, ip, subnet_size)
755 return r
756
757
758 def generate_subnet_strings(n, start=1, subnet_size=24, ip=0):
759 """
760 Helper to generate different network configuration strings.
761 """
762 r = list()
763 for i in range(start, start + n):
764 r.append("%d.0.0.%d/%d" % (i, ip, subnet_size))
765 return r
766
767 def get_dc_network():
768 """
769 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
770 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
771 :return:
772 """
773 assert (len(GK.dcs) > 0)
774 return GK.dcs.values()[0].net
775
776 if __name__ == '__main__':
777 """
778 Lets allow to run the API in standalone mode.
779 """
780 GK_STANDALONE_MODE = True
781 logging.getLogger("werkzeug").setLevel(logging.INFO)
782 start_rest_api("0.0.0.0", 8000)
783