First steps torwards pulling pre-build docker images in the fake gatekeeper
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 This module implements a simple REST API that behaves like SONATA's gatekeeper.
3
4 It is only used to support the development of SONATA's SDK tools and to demonstrate
5 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
6 """
7
8 import logging
9 import os
10 import uuid
11 import hashlib
12 import zipfile
13 import yaml
14 from docker import Client as DockerClient
15 from flask import Flask, request
16 import flask_restful as fr
17
18 logging.basicConfig()
19 LOG = logging.getLogger("sonata-dummy-gatekeeper")
20 LOG.setLevel(logging.DEBUG)
21 logging.getLogger("werkzeug").setLevel(logging.WARNING)
22
23 GK_STORAGE = "/tmp/son-dummy-gk/"
24 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
25 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
26
27 # Enable Dockerfile build functionality
28 BUILD_DOCKERFILE = False
29
30 # flag to indicate that we run without the emulator (only the bare API for integration testing)
31 GK_STANDALONE_MODE = False
32
33
34 class Gatekeeper(object):
35
36 def __init__(self):
37 self.services = dict()
38 self.dcs = dict()
39 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
40 LOG.info("Create SONATA dummy gatekeeper.")
41
42 def register_service_package(self, service_uuid, service):
43 """
44 register new service package
45 :param service_uuid
46 :param service object
47 """
48 self.services[service_uuid] = service
49 # lets perform all steps needed to onboard the service
50 service.onboard()
51
52 def get_next_vnf_name(self):
53 self.vnf_counter += 1
54 return "vnf%d" % self.vnf_counter
55
56
57 class Service(object):
58 """
59 This class represents a NS uploaded as a *.son package to the
60 dummy gatekeeper.
61 Can have multiple running instances of this service.
62 """
63
64 def __init__(self,
65 service_uuid,
66 package_file_hash,
67 package_file_path):
68 self.uuid = service_uuid
69 self.package_file_hash = package_file_hash
70 self.package_file_path = package_file_path
71 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
72 self.manifest = None
73 self.nsd = None
74 self.vnfds = dict()
75 self.local_docker_files = dict()
76 self.remote_docker_image_urls = dict()
77 self.instances = dict()
78
79 def onboard(self):
80 """
81 Do all steps to prepare this service to be instantiated
82 :return:
83 """
84 # 1. extract the contents of the package and store them in our catalog
85 self._unpack_service_package()
86 # 2. read in all descriptor files
87 self._load_package_descriptor()
88 self._load_nsd()
89 self._load_vnfd()
90 # 3. prepare container images (e.g. download or build Dockerfile)
91 if BUILD_DOCKERFILE:
92 self._load_docker_files()
93 self._build_images_from_dockerfiles()
94 else:
95 self._load_docker_urls()
96 self._pull_predefined_dockerimages()
97 LOG.info("On-boarded service: %r" % self.manifest.get("package_name"))
98
99 def start_service(self):
100 """
101 This methods creates and starts a new service instance.
102 It computes placements, iterates over all VNFDs, and starts
103 each VNFD as a Docker container in the data center selected
104 by the placement algorithm.
105 :return:
106 """
107 LOG.info("Starting service %r" % self.uuid)
108 # 1. each service instance gets a new uuid to identify it
109 instance_uuid = str(uuid.uuid4())
110 # build a instances dict (a bit like a NSR :))
111 self.instances[instance_uuid] = dict()
112 self.instances[instance_uuid]["vnf_instances"] = list()
113 # 2. compute placement of this service instance (adds DC names to VNFDs)
114 if not GK_STANDALONE_MODE:
115 self._calculate_placement(FirstDcPlacement)
116 # iterate over all vnfds that we have to start
117 for vnfd in self.vnfds.itervalues():
118 vnfi = None
119 if not GK_STANDALONE_MODE:
120 vnfi = self._start_vnfd(vnfd)
121 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
122 LOG.info("Service started. Instance id: %r" % instance_uuid)
123 return instance_uuid
124
125 def _start_vnfd(self, vnfd):
126 """
127 Start a single VNFD of this service
128 :param vnfd: vnfd descriptor dict
129 :return:
130 """
131 # iterate over all deployment units within each VNFDs
132 for u in vnfd.get("virtual_deployment_units"):
133 # 1. get the name of the docker image to start and the assigned DC
134 docker_name = vnfd.get("name")
135 target_dc = vnfd.get("dc")
136 # 2. perform some checks to ensure we can start the container
137 assert(docker_name is not None)
138 assert(target_dc is not None)
139 if not self._check_docker_image_exists(docker_name):
140 raise Exception("Docker image %r not found. Abort." % docker_name)
141 # 3. do the dc.startCompute(name="foobar") call to run the container
142 # TODO consider flavors, and other annotations
143 vnfi = target_dc.startCompute(GK.get_next_vnf_name(), image=docker_name, flavor_name="small")
144 # 6. store references to the compute objects in self.instances
145 return vnfi
146
147 def _unpack_service_package(self):
148 """
149 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
150 """
151 LOG.info("Unzipping: %r" % self.package_file_path)
152 with zipfile.ZipFile(self.package_file_path, "r") as z:
153 z.extractall(self.package_content_path)
154
155
156 def _load_package_descriptor(self):
157 """
158 Load the main package descriptor YAML and keep it as dict.
159 :return:
160 """
161 self.manifest = load_yaml(
162 os.path.join(
163 self.package_content_path, "META-INF/MANIFEST.MF"))
164
165 def _load_nsd(self):
166 """
167 Load the entry NSD YAML and keep it as dict.
168 :return:
169 """
170 if "entry_service_template" in self.manifest:
171 nsd_path = os.path.join(
172 self.package_content_path,
173 make_relative_path(self.manifest.get("entry_service_template")))
174 self.nsd = load_yaml(nsd_path)
175 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
176
177 def _load_vnfd(self):
178 """
179 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
180 :return:
181 """
182 if "package_content" in self.manifest:
183 for pc in self.manifest.get("package_content"):
184 if pc.get("content-type") == "application/sonata.function_descriptor":
185 vnfd_path = os.path.join(
186 self.package_content_path,
187 make_relative_path(pc.get("name")))
188 vnfd = load_yaml(vnfd_path)
189 self.vnfds[vnfd.get("name")] = vnfd
190 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
191
192 def _load_docker_files(self):
193 """
194 Get all paths to Dockerfiles from VNFDs and store them in dict.
195 :return:
196 """
197 for k, v in self.vnfds.iteritems():
198 for vu in v.get("virtual_deployment_units"):
199 if vu.get("vm_image_format") == "docker":
200 vm_image = vu.get("vm_image")
201 docker_path = os.path.join(
202 self.package_content_path,
203 make_relative_path(vm_image))
204 self.local_docker_files[k] = docker_path
205 LOG.debug("Found Dockerfile: %r" % docker_path)
206
207 def _load_docker_urls(self):
208 """
209 Get all URLs to pre-build docker images in some repo.
210 :return:
211 """
212 for k, v in self.vnfds.iteritems():
213 for vu in v.get("virtual_deployment_units"):
214 if vu.get("vm_image_format") == "docker":
215 self.remote_docker_image_urls[k] = vu.get("vm_image")
216 LOG.debug("Found Docker image URL: %r" % self.remote_docker_image_urls[k])
217
218 def _build_images_from_dockerfiles(self):
219 """
220 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
221 """
222 if GK_STANDALONE_MODE:
223 return # do not build anything in standalone mode
224 dc = DockerClient()
225 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
226 for k, v in self.local_docker_files.iteritems():
227 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
228 LOG.debug("DOCKER BUILD: %s" % line)
229 LOG.info("Docker image created: %s" % k)
230
231 def _pull_predefined_dockerimages(self):
232 """
233 If the package contains URLs to pre-build Docker images, we download them with this method.
234 """
235 # TODO implement this
236 pass
237
238 def _check_docker_image_exists(self, image_name):
239 """
240 Query the docker service and check if the given image exists
241 :param image_name: name of the docker image
242 :return:
243 """
244 return len(DockerClient().images(image_name)) > 0
245
246 def _calculate_placement(self, algorithm):
247 """
248 Do placement by adding the a field "dc" to
249 each VNFD that points to one of our
250 data center objects known to the gatekeeper.
251 """
252 assert(len(self.vnfds) > 0)
253 assert(len(GK.dcs) > 0)
254 # instantiate algorithm an place
255 p = algorithm()
256 p.place(self.nsd, self.vnfds, GK.dcs)
257 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
258 # lets print the placement result
259 for name, vnfd in self.vnfds.iteritems():
260 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
261
262
263 """
264 Some (simple) placement algorithms
265 """
266
267
268 class FirstDcPlacement(object):
269 """
270 Placement: Always use one and the same data center from the GK.dcs dict.
271 """
272 def place(self, nsd, vnfds, dcs):
273 for name, vnfd in vnfds.iteritems():
274 vnfd["dc"] = list(dcs.itervalues())[0]
275
276
277 """
278 Resource definitions and API endpoints
279 """
280
281
282 class Packages(fr.Resource):
283
284 def post(self):
285 """
286 Upload a *.son service package to the dummy gatekeeper.
287
288 We expect request with a *.son file and store it in UPLOAD_FOLDER
289 :return: UUID
290 """
291 try:
292 # get file contents
293 print(request.files)
294 # lets search for the package in the request
295 if "package" in request.files:
296 son_file = request.files["package"]
297 # elif "file" in request.files:
298 # son_file = request.files["file"]
299 else:
300 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
301 # generate a uuid to reference this package
302 service_uuid = str(uuid.uuid4())
303 file_hash = hashlib.sha1(str(son_file)).hexdigest()
304 # ensure that upload folder exists
305 ensure_dir(UPLOAD_FOLDER)
306 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
307 # store *.son file to disk
308 son_file.save(upload_path)
309 size = os.path.getsize(upload_path)
310 # create a service object and register it
311 s = Service(service_uuid, file_hash, upload_path)
312 GK.register_service_package(service_uuid, s)
313 # generate the JSON result
314 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}
315 except Exception as ex:
316 LOG.exception("Service package upload failed:")
317 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
318
319 def get(self):
320 """
321 Return a list of UUID's of uploaded service packages.
322 :return: dict/list
323 """
324 return {"service_uuid_list": list(GK.services.iterkeys())}
325
326
327 class Instantiations(fr.Resource):
328
329 def post(self):
330 """
331 Instantiate a service specified by its UUID.
332 Will return a new UUID to identify the running service instance.
333 :return: UUID
334 """
335 # try to extract the service uuid from the request
336 json_data = request.get_json(force=True)
337 service_uuid = json_data.get("service_uuid")
338
339 # lets be a bit fuzzy here to make testing easier
340 if service_uuid is None and len(GK.services) > 0:
341 # if we don't get a service uuid, we simple start the first service in the list
342 service_uuid = list(GK.services.iterkeys())[0]
343
344 if service_uuid in GK.services:
345 # ok, we have a service uuid, lets start the service
346 service_instance_uuid = GK.services.get(service_uuid).start_service()
347 return {"service_instance_uuid": service_instance_uuid}
348 return "Service not found", 404
349
350 def get(self):
351 """
352 Returns a list of UUIDs containing all running services.
353 :return: dict / list
354 """
355 return {"service_instance_list": [
356 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
357
358
359 # create a single, global GK object
360 GK = Gatekeeper()
361 # setup Flask
362 app = Flask(__name__)
363 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
364 api = fr.Api(app)
365 # define endpoints
366 api.add_resource(Packages, '/packages')
367 api.add_resource(Instantiations, '/instantiations')
368
369
370 def start_rest_api(host, port, datacenters=dict()):
371 GK.dcs = datacenters
372 # start the Flask server (not the best performance but ok for our use case)
373 app.run(host=host,
374 port=port,
375 debug=True,
376 use_reloader=False # this is needed to run Flask in a non-main thread
377 )
378
379
380 def ensure_dir(name):
381 if not os.path.exists(name):
382 os.makedirs(name)
383
384
385 def load_yaml(path):
386 with open(path, "r") as f:
387 try:
388 r = yaml.load(f)
389 except yaml.YAMLError as exc:
390 LOG.exception("YAML parse error")
391 r = dict()
392 return r
393
394
395 def make_relative_path(path):
396 if path.startswith("file://"):
397 path = path.replace("file://", "", 1)
398 if path.startswith("/"):
399 path = path.replace("/", "", 1)
400 return path
401
402
403 if __name__ == '__main__':
404 """
405 Lets allow to run the API in standalone mode.
406 """
407 GK_STANDALONE_MODE = True
408 logging.getLogger("werkzeug").setLevel(logging.INFO)
409 start_rest_api("0.0.0.0", 8000)
410