2 Copyright (c) 2015 SONATA-NFV and Paderborn University
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
9 http://www.apache.org/licenses/LICENSE-2.0
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
42 from docker
import DockerClient
, APIClient
43 from flask
import Flask
, request
44 import flask_restful
as fr
45 from collections
import defaultdict
47 from subprocess
import Popen
50 LOG
= logging
.getLogger("sonata-dummy-gatekeeper")
51 LOG
.setLevel(logging
.DEBUG
)
52 logging
.getLogger("werkzeug").setLevel(logging
.WARNING
)
54 GK_STORAGE
= "/tmp/son-dummy-gk/"
55 UPLOAD_FOLDER
= os
.path
.join(GK_STORAGE
, "uploads/")
56 CATALOG_FOLDER
= os
.path
.join(GK_STORAGE
, "catalog/")
58 # Enable Dockerfile build functionality
59 BUILD_DOCKERFILE
= False
61 # flag to indicate that we run without the emulator (only the bare API for integration testing)
62 GK_STANDALONE_MODE
= False
64 # should a new version of an image be pulled even if its available
67 # Automatically deploy SAPs (endpoints) of the service as new containers
68 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
71 # flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
72 BIDIRECTIONAL_CHAIN
= False
74 class Gatekeeper(object):
77 self
.services
= dict()
80 self
.vnf_counter
= 0 # used to generate short names for VNFs (Mininet limitation)
81 LOG
.info("Create SONATA dummy gatekeeper.")
83 def register_service_package(self
, service_uuid
, service
):
85 register new service package
89 self
.services
[service_uuid
] = service
90 # lets perform all steps needed to onboard the service
93 def get_next_vnf_name(self
):
95 return "vnf%d" % self
.vnf_counter
98 class Service(object):
100 This class represents a NS uploaded as a *.son package to the
102 Can have multiple running instances of this service.
109 self
.uuid
= service_uuid
110 self
.package_file_hash
= package_file_hash
111 self
.package_file_path
= package_file_path
112 self
.package_content_path
= os
.path
.join(CATALOG_FOLDER
, "services/%s" % self
.uuid
)
116 self
.local_docker_files
= dict()
117 self
.remote_docker_image_urls
= dict()
118 self
.instances
= dict()
119 self
.vnf_name2docker_name
= dict()
120 self
.sap_identifiers
= set()
121 # lets generate a set of subnet configurations used for e-line chaining setup
122 self
.eline_subnets_src
= generate_subnet_strings(50, start
=200, subnet_size
=24, ip
=1)
123 self
.eline_subnets_dst
= generate_subnet_strings(50, start
=200, subnet_size
=24, ip
=2)
127 Do all steps to prepare this service to be instantiated
130 # 1. extract the contents of the package and store them in our catalog
131 self
._unpack
_service
_package
()
132 # 2. read in all descriptor files
133 self
._load
_package
_descriptor
()
138 # 3. prepare container images (e.g. download or build Dockerfile)
140 self
._load
_docker
_files
()
141 self
._build
_images
_from
_dockerfiles
()
143 self
._load
_docker
_urls
()
144 self
._pull
_predefined
_dockerimages
()
145 LOG
.info("On-boarded service: %r" % self
.manifest
.get("name"))
147 def start_service(self
):
149 This methods creates and starts a new service instance.
150 It computes placements, iterates over all VNFDs, and starts
151 each VNFD as a Docker container in the data center selected
152 by the placement algorithm.
155 LOG
.info("Starting service %r" % self
.uuid
)
157 # 1. each service instance gets a new uuid to identify it
158 instance_uuid
= str(uuid
.uuid4())
159 # build a instances dict (a bit like a NSR :))
160 self
.instances
[instance_uuid
] = dict()
161 self
.instances
[instance_uuid
]["vnf_instances"] = list()
163 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
164 vnf_id2vnf_name
= defaultdict(lambda: "NotExistingNode",
165 reduce(lambda x
, y
: dict(x
, **y
),
166 map(lambda d
: {d
["vnf_id"]: d
["vnf_name"]},
167 self
.nsd
["network_functions"])))
169 # 3. compute placement of this service instance (adds DC names to VNFDs)
170 if not GK_STANDALONE_MODE
:
171 #self._calculate_placement(FirstDcPlacement)
172 self
._calculate
_placement
(RoundRobinDcPlacement
)
173 # iterate over all vnfds that we have to start
174 for vnfd
in self
.vnfds
.itervalues():
176 if not GK_STANDALONE_MODE
:
177 vnfi
= self
._start
_vnfd
(vnfd
)
178 self
.instances
[instance_uuid
]["vnf_instances"].append(vnfi
)
180 if "virtual_links" in self
.nsd
:
181 vlinks
= self
.nsd
["virtual_links"]
182 fwd_links
= self
.nsd
["forwarding_graphs"][0]["constituent_virtual_links"]
183 eline_fwd_links
= [l
for l
in vlinks
if (l
["id"] in fwd_links
) and (l
["connectivity_type"] == "E-Line")]
184 elan_fwd_links
= [l
for l
in vlinks
if (l
["id"] in fwd_links
) and (l
["connectivity_type"] == "E-LAN")]
186 GK
.net
.deployed_elines
.extend(eline_fwd_links
)
187 GK
.net
.deployed_elans
.extend(elan_fwd_links
)
189 # 4a. deploy E-Line links
190 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
191 # eg. different services get a unique cookie for their flowrules
193 for link
in eline_fwd_links
:
194 src_id
, src_if_name
= link
["connection_points_reference"][0].split(":")
195 dst_id
, dst_if_name
= link
["connection_points_reference"][1].split(":")
197 # check if there is a SAP in the link
198 if src_id
in self
.sap_identifiers
:
199 src_docker_name
= "{0}_{1}".format(src_id
, src_if_name
)
200 src_id
= src_docker_name
202 src_docker_name
= src_id
204 if dst_id
in self
.sap_identifiers
:
205 dst_docker_name
= "{0}_{1}".format(dst_id
, dst_if_name
)
206 dst_id
= dst_docker_name
208 dst_docker_name
= dst_id
210 src_name
= vnf_id2vnf_name
[src_id
]
211 dst_name
= vnf_id2vnf_name
[dst_id
]
214 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
215 src_name
, src_id
, src_if_name
, dst_name
, dst_id
, dst_if_name
))
217 if (src_name
in self
.vnfds
) and (dst_name
in self
.vnfds
):
218 network
= self
.vnfds
[src_name
].get("dc").net
# there should be a cleaner way to find the DCNetwork
219 LOG
.debug(src_docker_name
)
220 ret
= network
.setChain(
221 src_docker_name
, dst_docker_name
,
222 vnf_src_interface
=src_if_name
, vnf_dst_interface
=dst_if_name
,
223 bidirectional
=BIDIRECTIONAL_CHAIN
, cmd
="add-flow", cookie
=cookie
, priority
=10)
225 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
226 src_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, src_name
)
227 if src_vnfi
is not None:
228 self
._vnf
_reconfigure
_network
(src_vnfi
, src_if_name
, self
.eline_subnets_src
.pop(0))
229 dst_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, dst_name
)
230 if dst_vnfi
is not None:
231 self
._vnf
_reconfigure
_network
(dst_vnfi
, dst_if_name
, self
.eline_subnets_dst
.pop(0))
233 # 4b. deploy E-LAN links
235 for link
in elan_fwd_links
:
239 # generate lan ip address
241 for intf
in link
["connection_points_reference"]:
242 ip_address
= generate_lan_string("10.0", base
, subnet_size
=24, ip
=ip
)
243 vnf_id
, intf_name
= intf
.split(":")
244 if vnf_id
in self
.sap_identifiers
:
245 src_docker_name
= "{0}_{1}".format(vnf_id
, intf_name
)
246 vnf_id
= src_docker_name
248 src_docker_name
= vnf_id
249 vnf_name
= vnf_id2vnf_name
[vnf_id
]
251 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
252 vnf_name
, vnf_id
, intf_name
, ip_address
))
254 if vnf_name
in self
.vnfds
:
255 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
256 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
257 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
258 vnfi
= self
._get
_vnf
_instance
(instance_uuid
, vnf_name
)
260 self
._vnf
_reconfigure
_network
(vnfi
, intf_name
, ip_address
)
261 # increase for the next ip address on this E-LAN
264 # add this vnf and interface to the E-LAN for tagging
265 network
= self
.vnfds
[vnf_name
].get("dc").net
# there should be a cleaner way to find the DCNetwork
266 elan_vnf_list
.append({'name':src_docker_name
,'interface':intf_name
})
269 # install the VLAN tags for this E-LAN
270 network
.setLAN(elan_vnf_list
)
271 # increase the base ip address for the next E-LAN
274 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
275 self
._trigger
_emulator
_start
_scripts
_in
_vnfis
(self
.instances
[instance_uuid
]["vnf_instances"])
277 LOG
.info("Service started. Instance id: %r" % instance_uuid
)
280 def stop_service(self
, instance_uuid
):
282 This method stops a running service instance.
283 It iterates over all VNF instances, stopping them each
284 and removing them from their data center.
286 :param instance_uuid: the uuid of the service instance to be stopped
288 LOG
.info("Stopping service %r" % self
.uuid
)
289 # get relevant information
290 # instance_uuid = str(self.uuid.uuid4())
291 vnf_instances
= self
.instances
[instance_uuid
]["vnf_instances"]
293 for v
in vnf_instances
:
296 if not GK_STANDALONE_MODE
:
298 # self._remove_placement(RoundRobinPlacement)
301 # last step: remove the instance from the list of all instances
302 del self
.instances
[instance_uuid
]
304 def _start_vnfd(self
, vnfd
):
306 Start a single VNFD of this service
307 :param vnfd: vnfd descriptor dict
310 # iterate over all deployment units within each VNFDs
311 for u
in vnfd
.get("virtual_deployment_units"):
312 # 1. get the name of the docker image to start and the assigned DC
313 vnf_name
= vnfd
.get("name")
314 if vnf_name
not in self
.remote_docker_image_urls
:
315 raise Exception("No image name for %r found. Abort." % vnf_name
)
316 docker_name
= self
.remote_docker_image_urls
.get(vnf_name
)
317 target_dc
= vnfd
.get("dc")
318 # 2. perform some checks to ensure we can start the container
319 assert(docker_name
is not None)
320 assert(target_dc
is not None)
321 if not self
._check
_docker
_image
_exists
(docker_name
):
322 raise Exception("Docker image %r not found. Abort." % docker_name
)
324 # 3. get the resource limits
325 res_req
= u
.get("resource_requirements")
326 cpu_list
= res_req
.get("cpu").get("cores")
327 if not cpu_list
or len(cpu_list
)==0:
329 cpu_bw
= res_req
.get("cpu").get("cpu_bw")
332 mem_num
= str(res_req
.get("memory").get("size"))
335 mem_unit
= str(res_req
.get("memory").get("size_unit"))
338 mem_limit
= float(mem_num
)
340 mem_limit
=mem_limit
*1024*1024*1024
342 mem_limit
=mem_limit
*1024*1024
344 mem_limit
=mem_limit
*1024
345 mem_lim
= int(mem_limit
)
346 cpu_period
, cpu_quota
= self
._calculate
_cpu
_cfs
_values
(float(cpu_bw
))
348 # 4. do the dc.startCompute(name="foobar") call to run the container
349 # TODO consider flavors, and other annotations
350 intfs
= vnfd
.get("connection_points")
352 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
353 # use the vnf_id in the nsd as docker name
354 # so deployed containers can be easily mapped back to the nsd
355 vnf_name2id
= defaultdict(lambda: "NotExistingNode",
356 reduce(lambda x
, y
: dict(x
, **y
),
357 map(lambda d
: {d
["vnf_name"]: d
["vnf_id"]},
358 self
.nsd
["network_functions"])))
359 self
.vnf_name2docker_name
[vnf_name
] = vnf_name2id
[vnf_name
]
360 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
362 LOG
.info("Starting %r as %r in DC %r" % (vnf_name
, self
.vnf_name2docker_name
[vnf_name
], vnfd
.get("dc")))
363 LOG
.debug("Interfaces for %r: %r" % (vnf_name
, intfs
))
364 vnfi
= target_dc
.startCompute(self
.vnf_name2docker_name
[vnf_name
], network
=intfs
, image
=docker_name
, flavor_name
="small",
365 cpu_quota
=cpu_quota
, cpu_period
=cpu_period
, cpuset
=cpu_list
, mem_limit
=mem_lim
)
368 def _stop_vnfi(self
, vnfi
):
372 :param vnfi: vnf instance to be stopped
374 # Find the correct datacenter
375 status
= vnfi
.getStatus()
378 LOG
.info("Stopping the vnf instance contained in %r in DC %r" % (status
["name"], dc
))
379 dc
.stopCompute(status
["name"])
381 def _get_vnf_instance(self
, instance_uuid
, name
):
383 Returns the Docker object for the given VNF name (or Docker name).
384 :param instance_uuid: UUID of the service instance to search in.
385 :param name: VNF name or Docker name. We are fuzzy here.
389 if name
in self
.vnf_name2docker_name
:
390 dn
= self
.vnf_name2docker_name
[name
]
391 for vnfi
in self
.instances
[instance_uuid
]["vnf_instances"]:
394 LOG
.warning("No container with name: %r found.")
398 def _vnf_reconfigure_network(vnfi
, if_name
, net_str
):
400 Reconfigure the network configuration of a specific interface
401 of a running container.
402 :param vnfi: container instacne
403 :param if_name: interface name
404 :param net_str: network configuration string, e.g., 1.2.3.4/24
407 intf
= vnfi
.intf(intf
=if_name
)
410 LOG
.debug("Reconfigured network of %s:%s to %r" % (vnfi
.name
, if_name
, net_str
))
412 LOG
.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi
.name
, if_name
))
415 def _trigger_emulator_start_scripts_in_vnfis(self
, vnfi_list
):
416 for vnfi
in vnfi_list
:
417 config
= vnfi
.dcinfo
.get("Config", dict())
418 env
= config
.get("Env", list())
420 var
, cmd
= map(str.strip
, map(str, env_var
.split('=', 1)))
421 LOG
.debug("%r = %r" % (var
, cmd
))
422 if var
=="SON_EMU_CMD":
423 LOG
.info("Executing entry point script in %r: %r" % (vnfi
.name
, cmd
))
424 # execute command in new thread to ensure that GK is not blocked by VNF
425 t
= threading
.Thread(target
=vnfi
.cmdPrint
, args
=(cmd
,))
429 def _unpack_service_package(self
):
431 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
433 LOG
.info("Unzipping: %r" % self
.package_file_path
)
434 with zipfile
.ZipFile(self
.package_file_path
, "r") as z
:
435 z
.extractall(self
.package_content_path
)
438 def _load_package_descriptor(self
):
440 Load the main package descriptor YAML and keep it as dict.
443 self
.manifest
= load_yaml(
445 self
.package_content_path
, "META-INF/MANIFEST.MF"))
449 Load the entry NSD YAML and keep it as dict.
452 if "entry_service_template" in self
.manifest
:
453 nsd_path
= os
.path
.join(
454 self
.package_content_path
,
455 make_relative_path(self
.manifest
.get("entry_service_template")))
456 self
.nsd
= load_yaml(nsd_path
)
457 GK
.net
.deployed_nsds
.append(self
.nsd
)
458 LOG
.debug("Loaded NSD: %r" % self
.nsd
.get("name"))
460 def _load_vnfd(self
):
462 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
465 if "package_content" in self
.manifest
:
466 for pc
in self
.manifest
.get("package_content"):
467 if pc
.get("content-type") == "application/sonata.function_descriptor":
468 vnfd_path
= os
.path
.join(
469 self
.package_content_path
,
470 make_relative_path(pc
.get("name")))
471 vnfd
= load_yaml(vnfd_path
)
472 self
.vnfds
[vnfd
.get("name")] = vnfd
473 LOG
.debug("Loaded VNFD: %r" % vnfd
.get("name"))
475 def _load_saps(self
):
476 # Each Service Access Point (connection_point) in the nsd is getting its own container
477 SAPs
= [p
["id"] for p
in self
.nsd
["connection_points"] if p
["type"] == "interface"]
479 # endpoints needed in this service
480 sap_vnf_id
, sap_vnf_interface
= sap
.split(':')
481 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
482 self
.sap_identifiers
.add(sap_vnf_id
)
484 sap_docker_name
= "%s_%s" % (sap_vnf_id
, sap_vnf_interface
)
486 # add SAP to self.vnfds
487 sapfile
= pkg_resources
.resource_filename(__name__
, "sap_vnfd.yml")
488 sap_vnfd
= load_yaml(sapfile
)
489 sap_vnfd
["connection_points"][0]["id"] = sap_vnf_interface
490 sap_vnfd
["name"] = sap_docker_name
491 self
.vnfds
[sap_docker_name
] = sap_vnfd
492 # add SAP vnf to list in the NSD so it is deployed later on
493 # each SAP get a unique VNFD and vnf_id in the NSD
494 self
.nsd
["network_functions"].append({"vnf_id": sap_docker_name
, "vnf_name": sap_docker_name
})
495 LOG
.debug("Loaded SAP: %r" % sap_vnfd
.get("name"))
497 def _load_docker_files(self
):
499 Get all paths to Dockerfiles from VNFDs and store them in dict.
502 for k
, v
in self
.vnfds
.iteritems():
503 for vu
in v
.get("virtual_deployment_units"):
504 if vu
.get("vm_image_format") == "docker":
505 vm_image
= vu
.get("vm_image")
506 docker_path
= os
.path
.join(
507 self
.package_content_path
,
508 make_relative_path(vm_image
))
509 self
.local_docker_files
[k
] = docker_path
510 LOG
.debug("Found Dockerfile (%r): %r" % (k
, docker_path
))
512 def _load_docker_urls(self
):
514 Get all URLs to pre-build docker images in some repo.
517 for k
, v
in self
.vnfds
.iteritems():
518 for vu
in v
.get("virtual_deployment_units"):
519 if vu
.get("vm_image_format") == "docker":
520 url
= vu
.get("vm_image")
522 url
= url
.replace("http://", "")
523 self
.remote_docker_image_urls
[k
] = url
524 LOG
.debug("Found Docker image URL (%r): %r" % (k
, self
.remote_docker_image_urls
[k
]))
526 def _build_images_from_dockerfiles(self
):
528 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
530 if GK_STANDALONE_MODE
:
531 return # do not build anything in standalone mode
533 LOG
.info("Building %d Docker images (this may take several minutes) ..." % len(self
.local_docker_files
))
534 for k
, v
in self
.local_docker_files
.iteritems():
535 for line
in dc
.build(path
=v
.replace("Dockerfile", ""), tag
=k
, rm
=False, nocache
=False):
536 LOG
.debug("DOCKER BUILD: %s" % line
)
537 LOG
.info("Docker image created: %s" % k
)
539 def _pull_predefined_dockerimages(self
):
541 If the package contains URLs to pre-build Docker images, we download them with this method.
544 for url
in self
.remote_docker_image_urls
.itervalues():
545 if not FORCE_PULL
: # only pull if not present (speedup for development)
546 if len(dc
.images
.list(name
=url
)) > 0:
547 LOG
.debug("Image %r present. Skipping pull." % url
)
549 LOG
.info("Pulling image: %r" % url
)
550 # this seems to fail with latest docker api version 2.0.2
551 # dc.images.pull(url,
552 # insecure_registry=True)
553 #using docker cli instead
563 def _check_docker_image_exists(self
, image_name
):
565 Query the docker service and check if the given image exists
566 :param image_name: name of the docker image
569 return len(DockerClient().images
.list(name
=image_name
)) > 0
571 def _calculate_placement(self
, algorithm
):
573 Do placement by adding the a field "dc" to
574 each VNFD that points to one of our
575 data center objects known to the gatekeeper.
577 assert(len(self
.vnfds
) > 0)
578 assert(len(GK
.dcs
) > 0)
579 # instantiate algorithm an place
581 p
.place(self
.nsd
, self
.vnfds
, GK
.dcs
)
582 LOG
.info("Using placement algorithm: %r" % p
.__class
__.__name
__)
583 # lets print the placement result
584 for name
, vnfd
in self
.vnfds
.iteritems():
585 LOG
.info("Placed VNF %r on DC %r" % (name
, str(vnfd
.get("dc"))))
587 def _calculate_cpu_cfs_values(self
, cpu_time_percentage
):
589 Calculate cpu period and quota for CFS
590 :param cpu_time_percentage: percentage of overall CPU to be used
591 :return: cpu_period, cpu_quota
593 if cpu_time_percentage
is None:
595 if cpu_time_percentage
< 0:
597 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
598 # Attention minimum cpu_quota is 1ms (micro)
599 cpu_period
= 1000000 # lets consider a fixed period of 1000000 microseconds for now
600 LOG
.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period
, cpu_time_percentage
))
601 cpu_quota
= cpu_period
* cpu_time_percentage
# calculate the fraction of cpu time for this container
602 # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
604 LOG
.debug("cpu_quota before correcting: %r" % cpu_quota
)
606 LOG
.warning("Increased CPU quota to avoid system error.")
607 LOG
.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period
, cpu_quota
))
608 return int(cpu_period
), int(cpu_quota
)
612 Some (simple) placement algorithms
616 class FirstDcPlacement(object):
618 Placement: Always use one and the same data center from the GK.dcs dict.
620 def place(self
, nsd
, vnfds
, dcs
):
621 for name
, vnfd
in vnfds
.iteritems():
622 vnfd
["dc"] = list(dcs
.itervalues())[0]
625 class RoundRobinDcPlacement(object):
627 Placement: Distribute VNFs across all available DCs in a round robin fashion.
629 def place(self
, nsd
, vnfds
, dcs
):
631 dcs_list
= list(dcs
.itervalues())
632 for name
, vnfd
in vnfds
.iteritems():
633 vnfd
["dc"] = dcs_list
[c
% len(dcs_list
)]
634 c
+= 1 # inc. c to use next DC
640 Resource definitions and API endpoints
644 class Packages(fr
.Resource
):
648 Upload a *.son service package to the dummy gatekeeper.
650 We expect request with a *.son file and store it in UPLOAD_FOLDER
656 # lets search for the package in the request
657 if "package" in request
.files
:
658 son_file
= request
.files
["package"]
659 # elif "file" in request.files:
660 # son_file = request.files["file"]
662 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
663 # generate a uuid to reference this package
664 service_uuid
= str(uuid
.uuid4())
665 file_hash
= hashlib
.sha1(str(son_file
)).hexdigest()
666 # ensure that upload folder exists
667 ensure_dir(UPLOAD_FOLDER
)
668 upload_path
= os
.path
.join(UPLOAD_FOLDER
, "%s.son" % service_uuid
)
669 # store *.son file to disk
670 son_file
.save(upload_path
)
671 size
= os
.path
.getsize(upload_path
)
672 # create a service object and register it
673 s
= Service(service_uuid
, file_hash
, upload_path
)
674 GK
.register_service_package(service_uuid
, s
)
675 # generate the JSON result
676 return {"service_uuid": service_uuid
, "size": size
, "sha1": file_hash
, "error": None}, 201
677 except Exception as ex
:
678 LOG
.exception("Service package upload failed:")
679 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
683 Return a list of UUID's of uploaded service packages.
686 LOG
.info("GET /packages")
687 return {"service_uuid_list": list(GK
.services
.iterkeys())}
690 class Instantiations(fr
.Resource
):
694 Instantiate a service specified by its UUID.
695 Will return a new UUID to identify the running service instance.
698 # try to extract the service uuid from the request
699 json_data
= request
.get_json(force
=True)
700 service_uuid
= json_data
.get("service_uuid")
702 # lets be a bit fuzzy here to make testing easier
703 if service_uuid
is None and len(GK
.services
) > 0:
704 # if we don't get a service uuid, we simple start the first service in the list
705 service_uuid
= list(GK
.services
.iterkeys())[0]
707 if service_uuid
in GK
.services
:
708 # ok, we have a service uuid, lets start the service
709 service_instance_uuid
= GK
.services
.get(service_uuid
).start_service()
710 return {"service_instance_uuid": service_instance_uuid
}, 201
711 return "Service not found", 404
715 Returns a list of UUIDs containing all running services.
718 LOG
.info("GET /instantiations")
719 return {"service_instantiations_list": [
720 list(s
.instances
.iterkeys()) for s
in GK
.services
.itervalues()]}
724 Stops a running service specified by its service and instance UUID.
726 # try to extract the service and instance UUID from the request
727 json_data
= request
.get_json(force
=True)
728 service_uuid
= json_data
.get("service_uuid")
729 instance_uuid
= json_data
.get("service_instance_uuid")
732 if service_uuid
is None and len(GK
.services
) > 0:
733 #if we don't get a service uuid, we simply stop the last service in the list
734 service_uuid
= list(GK
.services
.iterkeys())[0]
735 if instance_uuid
is None and len(GK
.services
[service_uuid
].instances
) > 0:
736 instance_uuid
= list(GK
.services
[service_uuid
].instances
.iterkeys())[0]
738 if service_uuid
in GK
.services
and instance_uuid
in GK
.services
[service_uuid
].instances
:
739 # valid service and instance UUID, stop service
740 GK
.services
.get(service_uuid
).stop_service(instance_uuid
)
741 return "service instance with uuid %r stopped." % instance_uuid
,200
742 return "Service not found", 404
744 class Exit(fr
.Resource
):
748 Stop the running Containernet instance regardless of data transmitted
750 list(GK
.dcs
.values())[0].net
.stop()
759 # create a single, global GK object
763 app
= Flask(__name__
)
764 app
.config
['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
767 api
.add_resource(Packages
, '/packages')
768 api
.add_resource(Instantiations
, '/instantiations')
769 api
.add_resource(Exit
, '/emulator/exit')
772 #def initialize_GK():
777 def start_rest_api(host
, port
, datacenters
=dict()):
779 GK
.net
= get_dc_network()
780 # start the Flask server (not the best performance but ok for our use case)
784 use_reloader
=False # this is needed to run Flask in a non-main thread
788 def ensure_dir(name
):
789 if not os
.path
.exists(name
):
794 with
open(path
, "r") as f
:
797 except yaml
.YAMLError
as exc
:
798 LOG
.exception("YAML parse error")
803 def make_relative_path(path
):
804 if path
.startswith("file://"):
805 path
= path
.replace("file://", "", 1)
806 if path
.startswith("/"):
807 path
= path
.replace("/", "", 1)
811 def generate_lan_string(prefix
, base
, subnet_size
=24, ip
=0):
813 Helper to generate different network configuration strings.
815 r
= "%s.%d.%d/%d" % (prefix
, base
, ip
, subnet_size
)
819 def generate_subnet_strings(n
, start
=1, subnet_size
=24, ip
=0):
821 Helper to generate different network configuration strings.
824 for i
in range(start
, start
+ n
):
825 r
.append("%d.0.0.%d/%d" % (i
, ip
, subnet_size
))
828 def get_dc_network():
830 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
831 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
834 assert (len(GK
.dcs
) > 0)
835 return GK
.dcs
.values()[0].net
837 if __name__
== '__main__':
839 Lets allow to run the API in standalone mode.
841 GK_STANDALONE_MODE
= True
842 logging
.getLogger("werkzeug").setLevel(logging
.INFO
)
843 start_rest_api("0.0.0.0", 8000)