29ebc0bd7118bbd1aa5019c6c600407c440fd391
2 This module implements a simple REST API that behaves like SONATA's gatekeeper.
4 It is only used to support the development of SONATA's SDK tools and to demonstrate
5 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
14 from docker
import Client
as DockerClient
15 from flask
import Flask
, request
16 import flask_restful
as fr
18 LOG
= logging
.getLogger("sonata-dummy-gatekeeper")
19 LOG
.setLevel(logging
.DEBUG
)
20 logging
.getLogger("werkzeug").setLevel(logging
.WARNING
)
23 UPLOAD_FOLDER
= "/tmp/son-dummy-gk/uploads/"
24 CATALOG_FOLDER
= "/tmp/son-dummy-gk/catalog/"
27 class Gatekeeper(object):
30 self
.services
= dict()
32 self
.vnf_counter
= 0 # used to generate short names for VNFs (Mininet limitation)
33 LOG
.info("Create SONATA dummy gatekeeper.")
35 def register_service_package(self
, service_uuid
, service
):
37 register new service package
41 self
.services
[service_uuid
] = service
42 # lets perform all steps needed to onboard the service
45 def get_next_vnf_name(self
):
47 return "sonvnf%d" % self
.vnf_counter
50 class Service(object):
52 This class represents a NS uploaded as a *.son package to the
54 Can have multiple running instances of this service.
61 self
.uuid
= service_uuid
62 self
.package_file_hash
= package_file_hash
63 self
.package_file_path
= package_file_path
64 self
.package_content_path
= os
.path
.join(CATALOG_FOLDER
, "services/%s" % self
.uuid
)
68 self
.local_docker_files
= dict()
69 self
.instances
= dict()
73 Do all steps to prepare this service to be instantiated
76 # 1. extract the contents of the package and store them in our catalog
77 self
._unpack
_service
_package
()
78 # 2. read in all descriptor files
79 self
._load
_package
_descriptor
()
82 self
._load
_docker
_files
()
83 # 3. prepare container images (e.g. download or build Dockerfile)
84 self
._build
_images
_from
_dockerfiles
()
85 self
._download
_predefined
_dockerimages
()
87 LOG
.info("On-boarded service: %r" % self
.manifest
.get("package_name"))
89 def start_service(self
):
91 This methods creates and starts a new service instance.
92 It computes placements, iterates over all VNFDs, and starts
93 each VNFD as a Docker container in the data center selected
94 by the placement algorithm.
97 LOG
.info("Starting service %r" % self
.uuid
)
98 # 1. each service instance gets a new uuid to identify it
99 instance_uuid
= str(uuid
.uuid4())
100 # build a instances dict (a bit like a NSR :))
101 self
.instances
[instance_uuid
] = dict()
102 self
.instances
[instance_uuid
]["vnf_instances"] = list()
103 # 2. compute placement of this service instance (adds DC names to VNFDs)
104 self
._calculate
_placement
(FirstDcPlacement
)
105 # iterate over all vnfds that we have to start
106 for vnfd
in self
.vnfds
.itervalues():
107 # iterate over all deployment units within each VNFDs
108 for u
in vnfd
.get("virtual_deployment_units"):
109 # 3. get the name of the docker image to start and the assigned DC
110 docker_name
= u
.get("vm_image")
111 target_dc
= vnfd
.get("dc")
112 # 4. perform some checks to ensure we can start the container
113 assert(docker_name
is not None)
114 assert(target_dc
is not None)
115 if not self
._check
_docker
_image
_exists
(docker_name
):
116 raise Exception("Docker image %r not found. Abort." % docker_name
)
117 # 5. do the dc.startCompute(name="foobar") call to run the container
118 # TODO consider flavors, and other annotations
119 vnfi
= target_dc
.startCompute(GK
.get_next_vnf_name(), image
=docker_name
, flavor_name
="small")
120 # 6. store references to the compute objects in self.instances
121 self
.instances
[instance_uuid
]["vnf_instances"].append(vnfi
)
122 LOG
.info("Service started. Instance id: %r" % instance_uuid
)
125 def _unpack_service_package(self
):
127 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
129 with zipfile
.ZipFile(self
.package_file_path
, "r") as z
:
130 z
.extractall(self
.package_content_path
)
132 def _load_package_descriptor(self
):
134 Load the main package descriptor YAML and keep it as dict.
137 self
.manifest
= load_yaml(
139 self
.package_content_path
, "META-INF/MANIFEST.MF"))
143 Load the entry NSD YAML and keep it as dict.
146 if "entry_service_template" in self
.manifest
:
147 nsd_path
= os
.path
.join(
148 self
.package_content_path
,
149 make_relative_path(self
.manifest
.get("entry_service_template")))
150 self
.nsd
= load_yaml(nsd_path
)
151 LOG
.debug("Loaded NSD: %r" % self
.nsd
.get("ns_name"))
153 def _load_vnfd(self
):
155 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
158 if "package_content" in self
.manifest
:
159 for pc
in self
.manifest
.get("package_content"):
160 if pc
.get("content-type") == "application/sonata.function_descriptor":
161 vnfd_path
= os
.path
.join(
162 self
.package_content_path
,
163 make_relative_path(pc
.get("name")))
164 vnfd
= load_yaml(vnfd_path
)
165 self
.vnfds
[vnfd
.get("vnf_name")] = vnfd
166 LOG
.debug("Loaded VNFD: %r" % vnfd
.get("vnf_name"))
168 def _load_docker_files(self
):
170 Get all paths to Dockerfiles from MANIFEST.MF and store them in dict.
173 if "package_content" in self
.manifest
:
174 for df
in self
.manifest
.get("package_content"):
175 if df
.get("content-type") == "application/sonata.docker_files":
176 docker_path
= os
.path
.join(
177 self
.package_content_path
,
178 make_relative_path(df
.get("name")))
179 # FIXME: Mapping to docker image names is hardcoded because of the missing mapping in the example package
180 self
.local_docker_files
[helper_map_docker_name(df
.get("name"))] = docker_path
181 LOG
.debug("Found Dockerfile: %r" % docker_path
)
183 def _build_images_from_dockerfiles(self
):
185 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
188 LOG
.info("Building %d Docker images (this may take several minutes) ..." % len(self
.local_docker_files
))
189 for k
, v
in self
.local_docker_files
.iteritems():
190 for line
in dc
.build(path
=v
.replace("Dockerfile", ""), tag
=k
, rm
=False, nocache
=False):
191 LOG
.debug("DOCKER BUILD: %s" % line
)
192 LOG
.info("Docker image created: %s" % k
)
194 def _download_predefined_dockerimages(self
):
196 If the package contains URLs to pre-build Docker images, we download them with this method.
198 # TODO implement this if we want to be able to download docker images instead of building them
201 def _check_docker_image_exists(self
, image_name
):
203 Query the docker service and check if the given image exists
204 :param image_name: name of the docker image
207 return len(DockerClient().images(image_name
)) > 0
209 def _calculate_placement(self
, algorithm
):
211 Do placement by adding the a field "dc" to
212 each VNFD that points to one of our
213 data center objects known to the gatekeeper.
215 assert(len(self
.vnfds
) > 0)
216 assert(len(GK
.dcs
) > 0)
217 # instantiate algorithm an place
219 p
.place(self
.nsd
, self
.vnfds
, GK
.dcs
)
220 LOG
.info("Using placement algorithm: %r" % p
.__class
__.__name
__)
221 # lets print the placement result
222 for name
, vnfd
in self
.vnfds
.iteritems():
223 LOG
.info("Placed VNF %r on DC %r" % (name
, str(vnfd
.get("dc"))))
227 Some (simple) placement algorithms
231 class FirstDcPlacement(object):
233 Placement: Always use one and the same data center from the GK.dcs dict.
235 def place(self
, nsd
, vnfds
, dcs
):
236 for name
, vnfd
in vnfds
.iteritems():
237 vnfd
["dc"] = list(dcs
.itervalues())[0]
241 Resource definitions and API endpoints
245 class Packages(fr
.Resource
):
249 Upload a *.son service package to the dummy gatekeeper.
251 We expect request with a *.son file and store it in UPLOAD_FOLDER
256 son_file
= request
.files
['file']
257 # generate a uuid to reference this package
258 service_uuid
= str(uuid
.uuid4())
259 file_hash
= hashlib
.sha1(str(son_file
)).hexdigest()
260 # ensure that upload folder exists
261 ensure_dir(UPLOAD_FOLDER
)
262 upload_path
= os
.path
.join(UPLOAD_FOLDER
, "%s.son" % service_uuid
)
263 # store *.son file to disk
264 son_file
.save(upload_path
)
265 size
= os
.path
.getsize(upload_path
)
266 # create a service object and register it
267 s
= Service(service_uuid
, file_hash
, upload_path
)
268 GK
.register_service_package(service_uuid
, s
)
269 # generate the JSON result
270 return {"service_uuid": service_uuid
, "size": size
, "sha1": file_hash
, "error": None}
271 except Exception as ex
:
272 LOG
.exception("Service package upload failed:")
273 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}
277 Return a list of UUID's of uploaded service packages.
280 return {"service_uuid_list": list(GK
.services
.iterkeys())}
283 class Instantiations(fr
.Resource
):
287 Instantiate a service specified by its UUID.
288 Will return a new UUID to identify the running service instance.
291 # try to extract the service uuid from the request
292 json_data
= request
.get_json(force
=True)
293 service_uuid
= json_data
.get("service_uuid")
295 # lets be a bit fuzzy here to make testing easier
296 if service_uuid
is None and len(GK
.services
) > 0:
297 # if we don't get a service uuid, we simple start the first service in the list
298 service_uuid
= list(GK
.services
.iterkeys())[0]
300 if service_uuid
in GK
.services
:
301 # ok, we have a service uuid, lets start the service
302 service_instance_uuid
= GK
.services
.get(service_uuid
).start_service()
303 return {"service_instance_uuid": service_instance_uuid
}
304 return "Service not found", 404
308 Returns a list of UUIDs containing all running services.
311 return {"service_instance_list": [
312 list(s
.instances
.iterkeys()) for s
in GK
.services
.itervalues()]}
315 # create a single, global GK object
318 app
= Flask(__name__
)
319 app
.config
['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
322 api
.add_resource(Packages
, '/api/packages')
323 api
.add_resource(Instantiations
, '/api/instantiations')
326 def start_rest_api(host
, port
, datacenters
=dict()):
328 # start the Flask server (not the best performance but ok for our use case)
332 use_reloader
=False # this is needed to run Flask in a non-main thread
336 def ensure_dir(name
):
337 if not os
.path
.exists(name
):
342 with
open(path
, "r") as f
:
345 except yaml
.YAMLError
as exc
:
346 LOG
.exception("YAML parse error")
351 def make_relative_path(path
):
352 if path
.startswith("/"):
353 return path
.replace("/", "", 1)
357 def helper_map_docker_name(name
):
359 Quick hack to fix missing dependency in example package.
361 # FIXME remove this when package description is fixed
363 "/docker_files/iperf/Dockerfile": "iperf_docker",
364 "/docker_files/firewall/Dockerfile": "fw_docker",
365 "/docker_files/tcpdump/Dockerfile": "tcpdump_docker"
367 return mapping
.get(name
)
370 if __name__
== '__main__':
372 Lets allow to run the API in standalone mode.
374 logging
.getLogger("werkzeug").setLevel(logging
.INFO
)
375 start_rest_api("0.0.0.0", 8000)