Fix: Ensure that interface names are RTNETLINK compatible
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 """
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
30
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
33 """
34
35 import logging
36 import os
37 import uuid
38 import hashlib
39 import zipfile
40 import yaml
41 from docker import Client as DockerClient
42 from flask import Flask, request
43 import flask_restful as fr
44 from collections import defaultdict
45
46 logging.basicConfig()
47 LOG = logging.getLogger("sonata-dummy-gatekeeper")
48 LOG.setLevel(logging.DEBUG)
49 logging.getLogger("werkzeug").setLevel(logging.WARNING)
50
51 GK_STORAGE = "/tmp/son-dummy-gk/"
52 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
53 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
54
55 # Enable Dockerfile build functionality
56 BUILD_DOCKERFILE = False
57
58 # flag to indicate that we run without the emulator (only the bare API for integration testing)
59 GK_STANDALONE_MODE = False
60
61 # should a new version of an image be pulled even if its available
62 FORCE_PULL = False
63
64 class Gatekeeper(object):
65
66 def __init__(self):
67 self.services = dict()
68 self.dcs = dict()
69 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
70 LOG.info("Create SONATA dummy gatekeeper.")
71
72 def register_service_package(self, service_uuid, service):
73 """
74 register new service package
75 :param service_uuid
76 :param service object
77 """
78 self.services[service_uuid] = service
79 # lets perform all steps needed to onboard the service
80 service.onboard()
81
82 def get_next_vnf_name(self):
83 self.vnf_counter += 1
84 return "vnf%d" % self.vnf_counter
85
86
87 class Service(object):
88 """
89 This class represents a NS uploaded as a *.son package to the
90 dummy gatekeeper.
91 Can have multiple running instances of this service.
92 """
93
94 def __init__(self,
95 service_uuid,
96 package_file_hash,
97 package_file_path):
98 self.uuid = service_uuid
99 self.package_file_hash = package_file_hash
100 self.package_file_path = package_file_path
101 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
102 self.manifest = None
103 self.nsd = None
104 self.vnfds = dict()
105 self.local_docker_files = dict()
106 self.remote_docker_image_urls = dict()
107 self.instances = dict()
108 self.vnfname2num = dict()
109
110 def onboard(self):
111 """
112 Do all steps to prepare this service to be instantiated
113 :return:
114 """
115 # 1. extract the contents of the package and store them in our catalog
116 self._unpack_service_package()
117 # 2. read in all descriptor files
118 self._load_package_descriptor()
119 self._load_nsd()
120 self._load_vnfd()
121 # 3. prepare container images (e.g. download or build Dockerfile)
122 if BUILD_DOCKERFILE:
123 self._load_docker_files()
124 self._build_images_from_dockerfiles()
125 else:
126 self._load_docker_urls()
127 self._pull_predefined_dockerimages()
128 LOG.info("On-boarded service: %r" % self.manifest.get("package_name"))
129
130 def start_service(self):
131 """
132 This methods creates and starts a new service instance.
133 It computes placements, iterates over all VNFDs, and starts
134 each VNFD as a Docker container in the data center selected
135 by the placement algorithm.
136 :return:
137 """
138 LOG.info("Starting service %r" % self.uuid)
139
140 # 1. each service instance gets a new uuid to identify it
141 instance_uuid = str(uuid.uuid4())
142 # build a instances dict (a bit like a NSR :))
143 self.instances[instance_uuid] = dict()
144 self.instances[instance_uuid]["vnf_instances"] = list()
145
146 # 2. compute placement of this service instance (adds DC names to VNFDs)
147 if not GK_STANDALONE_MODE:
148 self._calculate_placement(FirstDcPlacement)
149 # iterate over all vnfds that we have to start
150 for vnfd in self.vnfds.itervalues():
151 vnfi = None
152 if not GK_STANDALONE_MODE:
153 vnfi = self._start_vnfd(vnfd)
154 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
155
156 # 3. Configure the chaining of the network functions (currently only E-Line links supported)
157 nfid2name = defaultdict(lambda :"NotExistingNode",
158 reduce(lambda x,y: dict(x, **y),
159 map(lambda d:{d["vnf_id"]:d["vnf_name"]},
160 self.nsd["network_functions"])))
161
162 vlinks = self.nsd["virtual_links"]
163 fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
164 eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
165
166 cookie = 1 # not clear why this is needed - to check with Steven
167 for link in eline_fwd_links:
168 src_node, src_port = link["connection_points_reference"][0].split(":")
169 dst_node, dst_port = link["connection_points_reference"][1].split(":")
170
171 srcname = nfid2name[src_node]
172 dstname = nfid2name[dst_node]
173 LOG.debug("src name: "+srcname+" dst name: "+dstname)
174
175 if (srcname in self.vnfds) and (dstname in self.vnfds) :
176 network = self.vnfds[srcname].get("dc").net # there should be a cleaner way to find the DCNetwork
177 src_vnf = self.vnfname2num[srcname]
178 dst_vnf = self.vnfname2num[dstname]
179 ret = network.setChain(src_vnf, dst_vnf, vnf_src_interface=src_port, vnf_dst_interface=dst_port, bidirectional = True, cmd="add-flow", cookie = cookie)
180 cookie += 1
181
182 # 4. run the emulator specific entrypoint scripts in the VNFIs of this service instance
183 self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
184
185 LOG.info("Service started. Instance id: %r" % instance_uuid)
186 return instance_uuid
187
188 def _start_vnfd(self, vnfd):
189 """
190 Start a single VNFD of this service
191 :param vnfd: vnfd descriptor dict
192 :return:
193 """
194 # iterate over all deployment units within each VNFDs
195 for u in vnfd.get("virtual_deployment_units"):
196 # 1. get the name of the docker image to start and the assigned DC
197 vnf_name = vnfd.get("name")
198 if vnf_name not in self.remote_docker_image_urls:
199 raise Exception("No image name for %r found. Abort." % vnf_name)
200 docker_name = self.remote_docker_image_urls.get(vnf_name)
201 target_dc = vnfd.get("dc")
202 # 2. perform some checks to ensure we can start the container
203 assert(docker_name is not None)
204 assert(target_dc is not None)
205 if not self._check_docker_image_exists(docker_name):
206 raise Exception("Docker image %r not found. Abort." % docker_name)
207 # 3. do the dc.startCompute(name="foobar") call to run the container
208 # TODO consider flavors, and other annotations
209 intfs = vnfd.get("connection_points")
210 self.vnfname2num[vnf_name] = GK.get_next_vnf_name()
211 LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnfname2num[vnf_name], vnfd.get("dc")))
212 LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
213 vnfi = target_dc.startCompute(self.vnfname2num[vnf_name], network=intfs, image=docker_name, flavor_name="small")
214 return vnfi
215
216 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
217 for vnfi in vnfi_list:
218 config = vnfi.dcinfo.get("Config", dict())
219 env = config.get("Env", list())
220 for env_var in env:
221 if "SON_EMU_CMD=" in env_var:
222 cmd = str(env_var.split("=")[1])
223 LOG.info("Executing entrypoint script in %r: %r" % (vnfi.name, cmd))
224 vnfi.cmdPrint(cmd)
225
226 def _unpack_service_package(self):
227 """
228 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
229 """
230 LOG.info("Unzipping: %r" % self.package_file_path)
231 with zipfile.ZipFile(self.package_file_path, "r") as z:
232 z.extractall(self.package_content_path)
233
234
235 def _load_package_descriptor(self):
236 """
237 Load the main package descriptor YAML and keep it as dict.
238 :return:
239 """
240 self.manifest = load_yaml(
241 os.path.join(
242 self.package_content_path, "META-INF/MANIFEST.MF"))
243
244 def _load_nsd(self):
245 """
246 Load the entry NSD YAML and keep it as dict.
247 :return:
248 """
249 if "entry_service_template" in self.manifest:
250 nsd_path = os.path.join(
251 self.package_content_path,
252 make_relative_path(self.manifest.get("entry_service_template")))
253 self.nsd = load_yaml(nsd_path)
254 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
255
256 def _load_vnfd(self):
257 """
258 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
259 :return:
260 """
261 if "package_content" in self.manifest:
262 for pc in self.manifest.get("package_content"):
263 if pc.get("content-type") == "application/sonata.function_descriptor":
264 vnfd_path = os.path.join(
265 self.package_content_path,
266 make_relative_path(pc.get("name")))
267 vnfd = load_yaml(vnfd_path)
268 self.vnfds[vnfd.get("name")] = vnfd
269 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
270
271 def _load_docker_files(self):
272 """
273 Get all paths to Dockerfiles from VNFDs and store them in dict.
274 :return:
275 """
276 for k, v in self.vnfds.iteritems():
277 for vu in v.get("virtual_deployment_units"):
278 if vu.get("vm_image_format") == "docker":
279 vm_image = vu.get("vm_image")
280 docker_path = os.path.join(
281 self.package_content_path,
282 make_relative_path(vm_image))
283 self.local_docker_files[k] = docker_path
284 LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
285
286 def _load_docker_urls(self):
287 """
288 Get all URLs to pre-build docker images in some repo.
289 :return:
290 """
291 for k, v in self.vnfds.iteritems():
292 for vu in v.get("virtual_deployment_units"):
293 if vu.get("vm_image_format") == "docker":
294 url = vu.get("vm_image")
295 if url is not None:
296 url = url.replace("http://", "")
297 self.remote_docker_image_urls[k] = url
298 LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
299
300 def _build_images_from_dockerfiles(self):
301 """
302 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
303 """
304 if GK_STANDALONE_MODE:
305 return # do not build anything in standalone mode
306 dc = DockerClient()
307 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
308 for k, v in self.local_docker_files.iteritems():
309 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
310 LOG.debug("DOCKER BUILD: %s" % line)
311 LOG.info("Docker image created: %s" % k)
312
313 def _pull_predefined_dockerimages(self):
314 """
315 If the package contains URLs to pre-build Docker images, we download them with this method.
316 """
317 dc = DockerClient()
318 for url in self.remote_docker_image_urls.itervalues():
319 if not FORCE_PULL: # only pull if not present (speedup for development)
320 if len(dc.images(name=url)) > 0:
321 LOG.debug("Image %r present. Skipping pull." % url)
322 continue
323 LOG.info("Pulling image: %r" % url)
324 dc.pull(url,
325 insecure_registry=True)
326
327 def _check_docker_image_exists(self, image_name):
328 """
329 Query the docker service and check if the given image exists
330 :param image_name: name of the docker image
331 :return:
332 """
333 return len(DockerClient().images(image_name)) > 0
334
335 def _calculate_placement(self, algorithm):
336 """
337 Do placement by adding the a field "dc" to
338 each VNFD that points to one of our
339 data center objects known to the gatekeeper.
340 """
341 assert(len(self.vnfds) > 0)
342 assert(len(GK.dcs) > 0)
343 # instantiate algorithm an place
344 p = algorithm()
345 p.place(self.nsd, self.vnfds, GK.dcs)
346 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
347 # lets print the placement result
348 for name, vnfd in self.vnfds.iteritems():
349 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
350
351
352 """
353 Some (simple) placement algorithms
354 """
355
356
357 class FirstDcPlacement(object):
358 """
359 Placement: Always use one and the same data center from the GK.dcs dict.
360 """
361 def place(self, nsd, vnfds, dcs):
362 for name, vnfd in vnfds.iteritems():
363 vnfd["dc"] = list(dcs.itervalues())[0]
364
365
366 """
367 Resource definitions and API endpoints
368 """
369
370
371 class Packages(fr.Resource):
372
373 def post(self):
374 """
375 Upload a *.son service package to the dummy gatekeeper.
376
377 We expect request with a *.son file and store it in UPLOAD_FOLDER
378 :return: UUID
379 """
380 try:
381 # get file contents
382 print(request.files)
383 # lets search for the package in the request
384 if "package" in request.files:
385 son_file = request.files["package"]
386 # elif "file" in request.files:
387 # son_file = request.files["file"]
388 else:
389 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
390 # generate a uuid to reference this package
391 service_uuid = str(uuid.uuid4())
392 file_hash = hashlib.sha1(str(son_file)).hexdigest()
393 # ensure that upload folder exists
394 ensure_dir(UPLOAD_FOLDER)
395 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
396 # store *.son file to disk
397 son_file.save(upload_path)
398 size = os.path.getsize(upload_path)
399 # create a service object and register it
400 s = Service(service_uuid, file_hash, upload_path)
401 GK.register_service_package(service_uuid, s)
402 # generate the JSON result
403 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}
404 except Exception as ex:
405 LOG.exception("Service package upload failed:")
406 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
407
408 def get(self):
409 """
410 Return a list of UUID's of uploaded service packages.
411 :return: dict/list
412 """
413 return {"service_uuid_list": list(GK.services.iterkeys())}
414
415
416 class Instantiations(fr.Resource):
417
418 def post(self):
419 """
420 Instantiate a service specified by its UUID.
421 Will return a new UUID to identify the running service instance.
422 :return: UUID
423 """
424 # try to extract the service uuid from the request
425 json_data = request.get_json(force=True)
426 service_uuid = json_data.get("service_uuid")
427
428 # lets be a bit fuzzy here to make testing easier
429 if service_uuid is None and len(GK.services) > 0:
430 # if we don't get a service uuid, we simple start the first service in the list
431 service_uuid = list(GK.services.iterkeys())[0]
432
433 if service_uuid in GK.services:
434 # ok, we have a service uuid, lets start the service
435 service_instance_uuid = GK.services.get(service_uuid).start_service()
436 return {"service_instance_uuid": service_instance_uuid}
437 return "Service not found", 404
438
439 def get(self):
440 """
441 Returns a list of UUIDs containing all running services.
442 :return: dict / list
443 """
444 return {"service_instance_list": [
445 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
446
447
448 # create a single, global GK object
449 GK = Gatekeeper()
450 # setup Flask
451 app = Flask(__name__)
452 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
453 api = fr.Api(app)
454 # define endpoints
455 api.add_resource(Packages, '/packages')
456 api.add_resource(Instantiations, '/instantiations')
457
458
459 def start_rest_api(host, port, datacenters=dict()):
460 GK.dcs = datacenters
461 # start the Flask server (not the best performance but ok for our use case)
462 app.run(host=host,
463 port=port,
464 debug=True,
465 use_reloader=False # this is needed to run Flask in a non-main thread
466 )
467
468
469 def ensure_dir(name):
470 if not os.path.exists(name):
471 os.makedirs(name)
472
473
474 def load_yaml(path):
475 with open(path, "r") as f:
476 try:
477 r = yaml.load(f)
478 except yaml.YAMLError as exc:
479 LOG.exception("YAML parse error")
480 r = dict()
481 return r
482
483
484 def make_relative_path(path):
485 if path.startswith("file://"):
486 path = path.replace("file://", "", 1)
487 if path.startswith("/"):
488 path = path.replace("/", "", 1)
489 return path
490
491
492 if __name__ == '__main__':
493 """
494 Lets allow to run the API in standalone mode.
495 """
496 GK_STANDALONE_MODE = True
497 logging.getLogger("werkzeug").setLevel(logging.INFO)
498 start_rest_api("0.0.0.0", 8000)
499