2 Copyright (c) 2015 SONATA-NFV and Paderborn University
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
9 http://www.apache.org/licenses/LICENSE-2.0
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
42 from docker
import DockerClient
, APIClient
43 from flask
import Flask
, request
44 import flask_restful
as fr
45 from collections
import defaultdict
47 from subprocess
import Popen
50 LOG
= logging
.getLogger("sonata-dummy-gatekeeper")
51 LOG
.setLevel(logging
.DEBUG
)
52 logging
.getLogger("werkzeug").setLevel(logging
.WARNING
)
54 GK_STORAGE
= "/tmp/son-dummy-gk/"
55 UPLOAD_FOLDER
= os
.path
.join(GK_STORAGE
, "uploads/")
56 CATALOG_FOLDER
= os
.path
.join(GK_STORAGE
, "catalog/")
58 # Enable Dockerfile build functionality
59 BUILD_DOCKERFILE
= False
61 # flag to indicate that we run without the emulator (only the bare API for integration testing)
62 GK_STANDALONE_MODE
= False
64 # should a new version of an image be pulled even if its available
67 # Automatically deploy SAPs (endpoints) of the service as new containers
68 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
71 # flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
72 BIDIRECTIONAL_CHAIN
= False
74 class Gatekeeper(object):
77 self
.services
= dict()
80 self
.vnf_counter
= 0 # used to generate short names for VNFs (Mininet limitation)
81 LOG
.info("Create SONATA dummy gatekeeper.")
83 def register_service_package(self
, service_uuid
, service
):
85 register new service package
89 self
.services
[service_uuid
] = service
90 # lets perform all steps needed to onboard the service
93 def get_next_vnf_name(self
):
95 return "vnf%d" % self
.vnf_counter
98 class Service(object):
100 This class represents a NS uploaded as a *.son package to the
102 Can have multiple running instances of this service.
109 self
.uuid
= service_uuid
110 self
.package_file_hash
= package_file_hash
111 self
.package_file_path
= package_file_path
112 self
.package_content_path
= os
.path
.join(CATALOG_FOLDER
, "services/%s" % self
.uuid
)
116 self
.local_docker_files
= dict()
117 self
.remote_docker_image_urls
= dict()
118 self
.instances
= dict()
119 self
.vnf_name2docker_name
= dict()
120 self
.sap_identifiers
= set()
121 # lets generate a set of subnet configurations used for e-line chaining setup
122 self
.eline_subnets_src
= generate_subnet_strings(50, start
=200, subnet_size
=24, ip
=1)
123 self
.eline_subnets_dst
= generate_subnet_strings(50, start
=200, subnet_size
=24, ip
=2)
127 Do all steps to prepare this service to be instantiated
130 # 1. extract the contents of the package and store them in our catalog
131 self
._unpack
_service
_package
()
132 # 2. read in all descriptor files
133 self
._load
_package
_descriptor
()
138 # 3. prepare container images (e.g. download or build Dockerfile)
140 self
._load
_docker
_files
()
141 self
._build
_images
_from
_dockerfiles
()
143 self
._load
_docker
_urls
()
144 self
._pull
_predefined
_dockerimages
()
145 LOG
.info("On-boarded service: %r" % self
.manifest
.get("name"))
147 def start_service(self
):
149 This methods creates and starts a new service instance.
150 It computes placements, iterates over all VNFDs, and starts
151 each VNFD as a Docker container in the data center selected
152 by the placement algorithm.
155 LOG
.info("Starting service %r" % self
.uuid
)
157 # 1. each service instance gets a new uuid to identify it
158 instance_uuid
= str(uuid
.uuid4())
159 # build a instances dict (a bit like a NSR :))
160 self
.instances
[instance_uuid
] = dict()
161 self
.instances
[instance_uuid
]["vnf_instances"] = list()
163 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
164 vnf_id2vnf_name
= defaultdict(lambda: "NotExistingNode",
165 reduce(lambda x
, y
: dict(x
, **y
),
166 map(lambda d
: {d
["vnf_id"]: d
["vnf_name"]},
167 self
.nsd
["network_functions"])))
169 # 3. compute placement of this service instance (adds DC names to VNFDs)
170 if not GK_STANDALONE_MODE
:
171 #self._calculate_placement(FirstDcPlacement)
172 self
._calculate
_placement
(RoundRobinDcPlacement
)
173 # iterate over all vnfds that we have to start
174 for vnfd
in self
.vnfds
.itervalues():
176 if not GK_STANDALONE_MODE
:
177 vnfi
= self
._start
_vnfd
(vnfd
)
178 self
.instances
[instance_uuid
]["vnf_instances"].append(vnfi
)
180 if "virtual_links" in self
.nsd
:
181 vlinks
= self
.nsd
["virtual_links"]
182 fwd_links
= self
.nsd
["forwarding_graphs"][0]["constituent_virtual_links"]
183 eline_fwd_links
= [l
for l
in vlinks
if (l
["id"] in fwd_links
) and (l
["connectivity_type"] == "E-Line")]
184 elan_fwd_links
= [l
for l
in vlinks
if (l
["id"] in fwd_links
) and (l
["connectivity_type"] == "E-LAN")]
186 GK
.net
.deployed_elines
.extend(eline_fwd_links
)
187 GK
.net
.deployed_elans
.extend(elan_fwd_links
)
189 # 4a. deploy E-Line links
190 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
191 # eg. different services get a unique cookie for their flowrules
193 for link
in eline_fwd_links
:
194 src_id
, src_if_name
= link
["connection_points_reference"][0].split(":")
195 dst_id
, dst_if_name
= link
["connection_points_reference"][1].split(":")
197 # check if there is a SAP in the link
198 if src_id
in self
.sap_identifiers
:
199 src_docker_name
= "{0}_{1}".format(src_id
, src_if_name
)
200 src_id
= src_docker_name
202 src_docker_name
= src_id
204 if dst_id
in self
.sap_identifiers
:
205 dst_docker_name
= "{0}_{1}".format(dst_id
, dst_if_name
)
206 dst_id
= dst_docker_name
208 dst_docker_name
= dst_id
210 src_name
= vnf_id2vnf_name
[src_id
]
211 dst_name
= vnf_id2vnf_name
[dst_id
]
214 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
215 src_name
, src_id
, src_if_name
, dst_name
, dst_id
, dst_if_name
))
217 if (src_name
in self
.vnfds
) and (dst_name
in self
.vnfds
):
218 network
= self
.vnfds
[src_name
].get("dc").net
# there should be a cleaner way to find the DCNetwork
219 LOG
.debug(src_docker_name
)
220 ret
= network
.setChain(
221 src_docker_name
, dst_docker_name
,
222 vnf_src_interface
=src_if_name
, vnf_dst_interface
=dst_if_name
,
223 bidirectional
=BIDIRECTIONAL_CHAIN
, cmd
="add-flow", cookie
=cookie
, priority
=10)
225 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
226 src_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, src_name
)
227 if src_vnfi
is not None:
228 self
._vnf
_reconfigure
_network
(src_vnfi
, src_if_name
, self
.eline_subnets_src
.pop(0))
229 dst_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, dst_name
)
230 if dst_vnfi
is not None:
231 self
._vnf
_reconfigure
_network
(dst_vnfi
, dst_if_name
, self
.eline_subnets_dst
.pop(0))
233 # 4b. deploy E-LAN links
235 for link
in elan_fwd_links
:
239 # generate lan ip address
241 for intf
in link
["connection_points_reference"]:
242 ip_address
= generate_lan_string("10.0", base
, subnet_size
=24, ip
=ip
)
243 vnf_id
, intf_name
= intf
.split(":")
244 if vnf_id
in self
.sap_identifiers
:
245 src_docker_name
= "{0}_{1}".format(vnf_id
, intf_name
)
246 vnf_id
= src_docker_name
248 src_docker_name
= vnf_id
249 vnf_name
= vnf_id2vnf_name
[vnf_id
]
251 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
252 vnf_name
, vnf_id
, intf_name
, ip_address
))
254 if vnf_name
in self
.vnfds
:
255 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
256 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
257 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
258 vnfi
= self
._get
_vnf
_instance
(instance_uuid
, vnf_name
)
260 self
._vnf
_reconfigure
_network
(vnfi
, intf_name
, ip_address
)
261 # increase for the next ip address on this E-LAN
264 # add this vnf and interface to the E-LAN for tagging
265 network
= self
.vnfds
[vnf_name
].get("dc").net
# there should be a cleaner way to find the DCNetwork
266 elan_vnf_list
.append({'name':src_docker_name
,'interface':intf_name
})
269 # install the VLAN tags for this E-LAN
270 network
.setLAN(elan_vnf_list
)
271 # increase the base ip address for the next E-LAN
274 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
275 self
._trigger
_emulator
_start
_scripts
_in
_vnfis
(self
.instances
[instance_uuid
]["vnf_instances"])
277 LOG
.info("Service started. Instance id: %r" % instance_uuid
)
280 def stop_service(self
, instance_uuid
):
282 This method stops a running service instance.
283 It iterates over all VNF instances, stopping them each
284 and removing them from their data center.
286 :param instance_uuid: the uuid of the service instance to be stopped
288 LOG
.info("Stopping service %r" % self
.uuid
)
289 # get relevant information
290 # instance_uuid = str(self.uuid.uuid4())
291 vnf_instances
= self
.instances
[instance_uuid
]["vnf_instances"]
293 for v
in vnf_instances
:
296 if not GK_STANDALONE_MODE
:
298 # self._remove_placement(RoundRobinPlacement)
301 # last step: remove the instance from the list of all instances
302 del self
.instances
[instance_uuid
]
304 def _start_vnfd(self
, vnfd
):
306 Start a single VNFD of this service
307 :param vnfd: vnfd descriptor dict
310 # iterate over all deployment units within each VNFDs
311 for u
in vnfd
.get("virtual_deployment_units"):
312 # 1. get the name of the docker image to start and the assigned DC
313 vnf_name
= vnfd
.get("name")
314 if vnf_name
not in self
.remote_docker_image_urls
:
315 raise Exception("No image name for %r found. Abort." % vnf_name
)
316 docker_name
= self
.remote_docker_image_urls
.get(vnf_name
)
317 target_dc
= vnfd
.get("dc")
318 # 2. perform some checks to ensure we can start the container
319 assert(docker_name
is not None)
320 assert(target_dc
is not None)
321 if not self
._check
_docker
_image
_exists
(docker_name
):
322 raise Exception("Docker image %r not found. Abort." % docker_name
)
324 # 3. get the resource limits
325 res_req
= u
.get("resource_requirements")
326 cpu_list
= res_req
.get("cpu").get("cores")
327 if not cpu_list
or len(cpu_list
)==0:
329 cpu_bw
= res_req
.get("cpu").get("cpu_bw")
332 mem_num
= str(res_req
.get("memory").get("size"))
335 mem_unit
= str(res_req
.get("memory").get("size_unit"))
338 mem_limit
= float(mem_num
)
340 mem_limit
=mem_limit
*1024*1024*1024
342 mem_limit
=mem_limit
*1024*1024
344 mem_limit
=mem_limit
*1024
345 mem_lim
= int(mem_limit
)
346 cpu_period
, cpu_quota
= self
._calculate
_cpu
_cfs
_values
(float(cpu_bw
))
348 # 4. generate the volume paths for the docker container
350 # a volume to extract log files
351 docker_log_path
= "/tmp/results/%s/%s"%(self
.uuid
,vnf_name
)
352 LOG
.debug("LOG path for vnf %s is %s."%(vnf_name
,docker_log_path
))
353 if not os
.path
.exists(docker_log_path
):
354 LOG
.debug("Creating folder %s"%docker
_log
_path
)
355 os
.makedirs(docker_log_path
)
357 volumes
.append(docker_log_path
+":/mnt/share/")
360 # 5. do the dc.startCompute(name="foobar") call to run the container
361 # TODO consider flavors, and other annotations
362 intfs
= vnfd
.get("connection_points")
364 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
365 # use the vnf_id in the nsd as docker name
366 # so deployed containers can be easily mapped back to the nsd
367 vnf_name2id
= defaultdict(lambda: "NotExistingNode",
368 reduce(lambda x
, y
: dict(x
, **y
),
369 map(lambda d
: {d
["vnf_name"]: d
["vnf_id"]},
370 self
.nsd
["network_functions"])))
371 self
.vnf_name2docker_name
[vnf_name
] = vnf_name2id
[vnf_name
]
372 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
374 LOG
.info("Starting %r as %r in DC %r" % (vnf_name
, self
.vnf_name2docker_name
[vnf_name
], vnfd
.get("dc")))
375 LOG
.debug("Interfaces for %r: %r" % (vnf_name
, intfs
))
376 vnfi
= target_dc
.startCompute(
377 self
.vnf_name2docker_name
[vnf_name
],
382 cpu_period
=cpu_period
,
388 def _stop_vnfi(self
, vnfi
):
392 :param vnfi: vnf instance to be stopped
394 # Find the correct datacenter
395 status
= vnfi
.getStatus()
399 LOG
.info("Stopping the vnf instance contained in %r in DC %r" % (status
["name"], dc
))
400 dc
.stopCompute(status
["name"])
402 def _get_vnf_instance(self
, instance_uuid
, name
):
404 Returns the Docker object for the given VNF name (or Docker name).
405 :param instance_uuid: UUID of the service instance to search in.
406 :param name: VNF name or Docker name. We are fuzzy here.
410 if name
in self
.vnf_name2docker_name
:
411 dn
= self
.vnf_name2docker_name
[name
]
412 for vnfi
in self
.instances
[instance_uuid
]["vnf_instances"]:
415 LOG
.warning("No container with name: %r found.")
419 def _vnf_reconfigure_network(vnfi
, if_name
, net_str
):
421 Reconfigure the network configuration of a specific interface
422 of a running container.
423 :param vnfi: container instacne
424 :param if_name: interface name
425 :param net_str: network configuration string, e.g., 1.2.3.4/24
428 intf
= vnfi
.intf(intf
=if_name
)
431 LOG
.debug("Reconfigured network of %s:%s to %r" % (vnfi
.name
, if_name
, net_str
))
433 LOG
.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi
.name
, if_name
))
436 def _trigger_emulator_start_scripts_in_vnfis(self
, vnfi_list
):
437 for vnfi
in vnfi_list
:
438 config
= vnfi
.dcinfo
.get("Config", dict())
439 env
= config
.get("Env", list())
441 var
, cmd
= map(str.strip
, map(str, env_var
.split('=', 1)))
442 LOG
.debug("%r = %r" % (var
, cmd
))
443 if var
=="SON_EMU_CMD":
444 LOG
.info("Executing entry point script in %r: %r" % (vnfi
.name
, cmd
))
445 # execute command in new thread to ensure that GK is not blocked by VNF
446 t
= threading
.Thread(target
=vnfi
.cmdPrint
, args
=(cmd
,))
450 def _unpack_service_package(self
):
452 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
454 LOG
.info("Unzipping: %r" % self
.package_file_path
)
455 with zipfile
.ZipFile(self
.package_file_path
, "r") as z
:
456 z
.extractall(self
.package_content_path
)
459 def _load_package_descriptor(self
):
461 Load the main package descriptor YAML and keep it as dict.
464 self
.manifest
= load_yaml(
466 self
.package_content_path
, "META-INF/MANIFEST.MF"))
470 Load the entry NSD YAML and keep it as dict.
473 if "entry_service_template" in self
.manifest
:
474 nsd_path
= os
.path
.join(
475 self
.package_content_path
,
476 make_relative_path(self
.manifest
.get("entry_service_template")))
477 self
.nsd
= load_yaml(nsd_path
)
478 GK
.net
.deployed_nsds
.append(self
.nsd
)
479 LOG
.debug("Loaded NSD: %r" % self
.nsd
.get("name"))
481 def _load_vnfd(self
):
483 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
486 if "package_content" in self
.manifest
:
487 for pc
in self
.manifest
.get("package_content"):
488 if pc
.get("content-type") == "application/sonata.function_descriptor":
489 vnfd_path
= os
.path
.join(
490 self
.package_content_path
,
491 make_relative_path(pc
.get("name")))
492 vnfd
= load_yaml(vnfd_path
)
493 self
.vnfds
[vnfd
.get("name")] = vnfd
494 LOG
.debug("Loaded VNFD: %r" % vnfd
.get("name"))
496 def _load_saps(self
):
497 # Each Service Access Point (connection_point) in the nsd is getting its own container
498 SAPs
= [p
["id"] for p
in self
.nsd
["connection_points"] if p
["type"] == "interface"]
500 # endpoints needed in this service
501 sap_vnf_id
, sap_vnf_interface
= sap
.split(':')
502 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
503 self
.sap_identifiers
.add(sap_vnf_id
)
505 sap_docker_name
= "%s_%s" % (sap_vnf_id
, sap_vnf_interface
)
507 # add SAP to self.vnfds
508 sapfile
= pkg_resources
.resource_filename(__name__
, "sap_vnfd.yml")
509 sap_vnfd
= load_yaml(sapfile
)
510 sap_vnfd
["connection_points"][0]["id"] = sap_vnf_interface
511 sap_vnfd
["name"] = sap_docker_name
512 self
.vnfds
[sap_docker_name
] = sap_vnfd
513 # add SAP vnf to list in the NSD so it is deployed later on
514 # each SAP get a unique VNFD and vnf_id in the NSD
515 self
.nsd
["network_functions"].append({"vnf_id": sap_docker_name
, "vnf_name": sap_docker_name
})
516 LOG
.debug("Loaded SAP: %r" % sap_vnfd
.get("name"))
518 def _load_docker_files(self
):
520 Get all paths to Dockerfiles from VNFDs and store them in dict.
523 for k
, v
in self
.vnfds
.iteritems():
524 for vu
in v
.get("virtual_deployment_units"):
525 if vu
.get("vm_image_format") == "docker":
526 vm_image
= vu
.get("vm_image")
527 docker_path
= os
.path
.join(
528 self
.package_content_path
,
529 make_relative_path(vm_image
))
530 self
.local_docker_files
[k
] = docker_path
531 LOG
.debug("Found Dockerfile (%r): %r" % (k
, docker_path
))
533 def _load_docker_urls(self
):
535 Get all URLs to pre-build docker images in some repo.
538 for k
, v
in self
.vnfds
.iteritems():
539 for vu
in v
.get("virtual_deployment_units"):
540 if vu
.get("vm_image_format") == "docker":
541 url
= vu
.get("vm_image")
543 url
= url
.replace("http://", "")
544 self
.remote_docker_image_urls
[k
] = url
545 LOG
.debug("Found Docker image URL (%r): %r" % (k
, self
.remote_docker_image_urls
[k
]))
547 def _build_images_from_dockerfiles(self
):
549 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
551 if GK_STANDALONE_MODE
:
552 return # do not build anything in standalone mode
554 LOG
.info("Building %d Docker images (this may take several minutes) ..." % len(self
.local_docker_files
))
555 for k
, v
in self
.local_docker_files
.iteritems():
556 for line
in dc
.build(path
=v
.replace("Dockerfile", ""), tag
=k
, rm
=False, nocache
=False):
557 LOG
.debug("DOCKER BUILD: %s" % line
)
558 LOG
.info("Docker image created: %s" % k
)
560 def _pull_predefined_dockerimages(self
):
562 If the package contains URLs to pre-build Docker images, we download them with this method.
565 for url
in self
.remote_docker_image_urls
.itervalues():
566 if not FORCE_PULL
: # only pull if not present (speedup for development)
567 if len(dc
.images
.list(name
=url
)) > 0:
568 LOG
.debug("Image %r present. Skipping pull." % url
)
570 LOG
.info("Pulling image: %r" % url
)
571 # this seems to fail with latest docker api version 2.0.2
572 # dc.images.pull(url,
573 # insecure_registry=True)
574 #using docker cli instead
584 def _check_docker_image_exists(self
, image_name
):
586 Query the docker service and check if the given image exists
587 :param image_name: name of the docker image
590 return len(DockerClient().images
.list(name
=image_name
)) > 0
592 def _calculate_placement(self
, algorithm
):
594 Do placement by adding the a field "dc" to
595 each VNFD that points to one of our
596 data center objects known to the gatekeeper.
598 assert(len(self
.vnfds
) > 0)
599 assert(len(GK
.dcs
) > 0)
600 # instantiate algorithm an place
602 p
.place(self
.nsd
, self
.vnfds
, GK
.dcs
)
603 LOG
.info("Using placement algorithm: %r" % p
.__class
__.__name
__)
604 # lets print the placement result
605 for name
, vnfd
in self
.vnfds
.iteritems():
606 LOG
.info("Placed VNF %r on DC %r" % (name
, str(vnfd
.get("dc"))))
608 def _calculate_cpu_cfs_values(self
, cpu_time_percentage
):
610 Calculate cpu period and quota for CFS
611 :param cpu_time_percentage: percentage of overall CPU to be used
612 :return: cpu_period, cpu_quota
614 if cpu_time_percentage
is None:
616 if cpu_time_percentage
< 0:
618 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
619 # Attention minimum cpu_quota is 1ms (micro)
620 cpu_period
= 1000000 # lets consider a fixed period of 1000000 microseconds for now
621 LOG
.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period
, cpu_time_percentage
))
622 cpu_quota
= cpu_period
* cpu_time_percentage
# calculate the fraction of cpu time for this container
623 # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
625 LOG
.debug("cpu_quota before correcting: %r" % cpu_quota
)
627 LOG
.warning("Increased CPU quota to avoid system error.")
628 LOG
.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period
, cpu_quota
))
629 return int(cpu_period
), int(cpu_quota
)
633 Some (simple) placement algorithms
637 class FirstDcPlacement(object):
639 Placement: Always use one and the same data center from the GK.dcs dict.
641 def place(self
, nsd
, vnfds
, dcs
):
642 for name
, vnfd
in vnfds
.iteritems():
643 vnfd
["dc"] = list(dcs
.itervalues())[0]
646 class RoundRobinDcPlacement(object):
648 Placement: Distribute VNFs across all available DCs in a round robin fashion.
650 def place(self
, nsd
, vnfds
, dcs
):
652 dcs_list
= list(dcs
.itervalues())
653 for name
, vnfd
in vnfds
.iteritems():
654 vnfd
["dc"] = dcs_list
[c
% len(dcs_list
)]
655 c
+= 1 # inc. c to use next DC
661 Resource definitions and API endpoints
665 class Packages(fr
.Resource
):
669 Upload a *.son service package to the dummy gatekeeper.
671 We expect request with a *.son file and store it in UPLOAD_FOLDER
676 LOG
.info("POST /packages called")
677 # lets search for the package in the request
678 is_file_object
= False # make API more robust: file can be in data or in files field
679 if "package" in request
.files
:
680 son_file
= request
.files
["package"]
681 is_file_object
= True
682 elif len(request
.data
) > 0:
683 son_file
= request
.data
685 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
686 # generate a uuid to reference this package
687 service_uuid
= str(uuid
.uuid4())
688 file_hash
= hashlib
.sha1(str(son_file
)).hexdigest()
689 # ensure that upload folder exists
690 ensure_dir(UPLOAD_FOLDER
)
691 upload_path
= os
.path
.join(UPLOAD_FOLDER
, "%s.son" % service_uuid
)
692 # store *.son file to disk
694 son_file
.save(upload_path
)
696 with
open(upload_path
, 'wb') as f
:
698 size
= os
.path
.getsize(upload_path
)
699 # create a service object and register it
700 s
= Service(service_uuid
, file_hash
, upload_path
)
701 GK
.register_service_package(service_uuid
, s
)
702 # generate the JSON result
703 return {"service_uuid": service_uuid
, "size": size
, "sha1": file_hash
, "error": None}, 201
704 except Exception as ex
:
705 LOG
.exception("Service package upload failed:")
706 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
710 Return a list of UUID's of uploaded service packages.
713 LOG
.info("GET /packages")
714 return {"service_uuid_list": list(GK
.services
.iterkeys())}
717 class Instantiations(fr
.Resource
):
721 Instantiate a service specified by its UUID.
722 Will return a new UUID to identify the running service instance.
725 LOG
.info("POST /instantiations (or /requests) called")
726 # try to extract the service uuid from the request
727 json_data
= request
.get_json(force
=True)
728 service_uuid
= json_data
.get("service_uuid")
730 # lets be a bit fuzzy here to make testing easier
731 if (service_uuid
is None or service_uuid
=="latest") and len(GK
.services
) > 0:
732 # if we don't get a service uuid, we simple start the first service in the list
733 service_uuid
= list(GK
.services
.iterkeys())[0]
734 if service_uuid
in GK
.services
:
735 # ok, we have a service uuid, lets start the service
736 service_instance_uuid
= GK
.services
.get(service_uuid
).start_service()
737 return {"service_instance_uuid": service_instance_uuid
}, 201
738 return "Service not found", 404
742 Returns a list of UUIDs containing all running services.
745 LOG
.info("GET /instantiations")
746 return {"service_instantiations_list": [
747 list(s
.instances
.iterkeys()) for s
in GK
.services
.itervalues()]}
751 Stops a running service specified by its service and instance UUID.
753 # try to extract the service and instance UUID from the request
754 json_data
= request
.get_json(force
=True)
755 service_uuid
= json_data
.get("service_uuid")
756 instance_uuid
= json_data
.get("service_instance_uuid")
759 if service_uuid
is None and len(GK
.services
) > 0:
760 #if we don't get a service uuid, we simply stop the last service in the list
761 service_uuid
= list(GK
.services
.iterkeys())[0]
762 if instance_uuid
is None and len(GK
.services
[service_uuid
].instances
) > 0:
763 instance_uuid
= list(GK
.services
[service_uuid
].instances
.iterkeys())[0]
765 if service_uuid
in GK
.services
and instance_uuid
in GK
.services
[service_uuid
].instances
:
766 # valid service and instance UUID, stop service
767 GK
.services
.get(service_uuid
).stop_service(instance_uuid
)
768 return "service instance with uuid %r stopped." % instance_uuid
,200
769 return "Service not found", 404
771 class Exit(fr
.Resource
):
775 Stop the running Containernet instance regardless of data transmitted
777 list(GK
.dcs
.values())[0].net
.stop()
786 # create a single, global GK object
790 app
= Flask(__name__
)
791 app
.config
['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
794 api
.add_resource(Packages
, '/packages', '/api/v2/packages')
795 api
.add_resource(Instantiations
, '/instantiations', '/api/v2/instantiations', '/api/v2/requests')
796 api
.add_resource(Exit
, '/emulator/exit')
799 #def initialize_GK():
804 def start_rest_api(host
, port
, datacenters
=dict()):
806 GK
.net
= get_dc_network()
807 # start the Flask server (not the best performance but ok for our use case)
811 use_reloader
=False # this is needed to run Flask in a non-main thread
815 def ensure_dir(name
):
816 if not os
.path
.exists(name
):
821 with
open(path
, "r") as f
:
824 except yaml
.YAMLError
as exc
:
825 LOG
.exception("YAML parse error")
830 def make_relative_path(path
):
831 if path
.startswith("file://"):
832 path
= path
.replace("file://", "", 1)
833 if path
.startswith("/"):
834 path
= path
.replace("/", "", 1)
838 def generate_lan_string(prefix
, base
, subnet_size
=24, ip
=0):
840 Helper to generate different network configuration strings.
842 r
= "%s.%d.%d/%d" % (prefix
, base
, ip
, subnet_size
)
846 def generate_subnet_strings(n
, start
=1, subnet_size
=24, ip
=0):
848 Helper to generate different network configuration strings.
851 for i
in range(start
, start
+ n
):
852 r
.append("%d.0.0.%d/%d" % (i
, ip
, subnet_size
))
855 def get_dc_network():
857 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
858 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
861 assert (len(GK
.dcs
) > 0)
862 return GK
.dcs
.values()[0].net
864 if __name__
== '__main__':
866 Lets allow to run the API in standalone mode.
868 GK_STANDALONE_MODE
= True
869 logging
.getLogger("werkzeug").setLevel(logging
.INFO
)
870 start_rest_api("0.0.0.0", 8000)