Merge remote-tracking branch 'upstream/master'
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 """
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
30
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
33 """
34
35 import logging
36 import os
37 import uuid
38 import hashlib
39 import zipfile
40 import yaml
41 from docker import Client as DockerClient
42 from flask import Flask, request
43 import flask_restful as fr
44 from collections import defaultdict
45
46 logging.basicConfig()
47 LOG = logging.getLogger("sonata-dummy-gatekeeper")
48 LOG.setLevel(logging.DEBUG)
49 logging.getLogger("werkzeug").setLevel(logging.WARNING)
50
51 GK_STORAGE = "/tmp/son-dummy-gk/"
52 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
53 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
54
55 # Enable Dockerfile build functionality
56 BUILD_DOCKERFILE = False
57
58 # flag to indicate that we run without the emulator (only the bare API for integration testing)
59 GK_STANDALONE_MODE = False
60
61 # should a new version of an image be pulled even if its available
62 FORCE_PULL = False
63
64 class Gatekeeper(object):
65
66 def __init__(self):
67 self.services = dict()
68 self.dcs = dict()
69 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
70 LOG.info("Create SONATA dummy gatekeeper.")
71
72 def register_service_package(self, service_uuid, service):
73 """
74 register new service package
75 :param service_uuid
76 :param service object
77 """
78 self.services[service_uuid] = service
79 # lets perform all steps needed to onboard the service
80 service.onboard()
81
82 def get_next_vnf_name(self):
83 self.vnf_counter += 1
84 return "vnf%d" % self.vnf_counter
85
86
87 class Service(object):
88 """
89 This class represents a NS uploaded as a *.son package to the
90 dummy gatekeeper.
91 Can have multiple running instances of this service.
92 """
93
94 def __init__(self,
95 service_uuid,
96 package_file_hash,
97 package_file_path):
98 self.uuid = service_uuid
99 self.package_file_hash = package_file_hash
100 self.package_file_path = package_file_path
101 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
102 self.manifest = None
103 self.nsd = None
104 self.vnfds = dict()
105 self.local_docker_files = dict()
106 self.remote_docker_image_urls = dict()
107 self.instances = dict()
108 self.vnf_name2docker_name = dict()
109 # lets generate a set of subnet configurations used for e-line chaining setup
110 self.eline_subnets_src = generate_subnet_strings(50, start=200, subnet_size=24, ip=1)
111 self.eline_subnets_dst = generate_subnet_strings(50, start=200, subnet_size=24, ip=2)
112
113
114 def onboard(self):
115 """
116 Do all steps to prepare this service to be instantiated
117 :return:
118 """
119 # 1. extract the contents of the package and store them in our catalog
120 self._unpack_service_package()
121 # 2. read in all descriptor files
122 self._load_package_descriptor()
123 self._load_nsd()
124 self._load_vnfd()
125 # 3. prepare container images (e.g. download or build Dockerfile)
126 if BUILD_DOCKERFILE:
127 self._load_docker_files()
128 self._build_images_from_dockerfiles()
129 else:
130 self._load_docker_urls()
131 self._pull_predefined_dockerimages()
132 LOG.info("On-boarded service: %r" % self.manifest.get("name"))
133
134 def start_service(self):
135 """
136 This methods creates and starts a new service instance.
137 It computes placements, iterates over all VNFDs, and starts
138 each VNFD as a Docker container in the data center selected
139 by the placement algorithm.
140 :return:
141 """
142 LOG.info("Starting service %r" % self.uuid)
143
144 # 1. each service instance gets a new uuid to identify it
145 instance_uuid = str(uuid.uuid4())
146 # build a instances dict (a bit like a NSR :))
147 self.instances[instance_uuid] = dict()
148 self.instances[instance_uuid]["vnf_instances"] = list()
149
150 # 2. compute placement of this service instance (adds DC names to VNFDs)
151 if not GK_STANDALONE_MODE:
152 self._calculate_placement(FirstDcPlacement)
153 # iterate over all vnfds that we have to start
154 for vnfd in self.vnfds.itervalues():
155 vnfi = None
156 if not GK_STANDALONE_MODE:
157 vnfi = self._start_vnfd(vnfd)
158 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
159
160 # 3. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
161 vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
162 reduce(lambda x, y: dict(x, **y),
163 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
164 self.nsd["network_functions"])))
165
166 vlinks = self.nsd["virtual_links"]
167 fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
168 eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
169 elan_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-LAN")]
170
171 # 3a. deploy E-Line links
172 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
173 # eg. different services get a unique cookie for their flowrules
174 cookie = 1
175 for link in eline_fwd_links:
176 src_id, src_if_name = link["connection_points_reference"][0].split(":")
177 dst_id, dst_if_name = link["connection_points_reference"][1].split(":")
178
179 src_name = vnf_id2vnf_name[src_id]
180 dst_name = vnf_id2vnf_name[dst_id]
181
182 LOG.debug(
183 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
184 src_name, src_id, src_if_name, dst_name, dst_id, dst_if_name))
185
186 if (src_name in self.vnfds) and (dst_name in self.vnfds):
187 network = self.vnfds[src_name].get("dc").net # there should be a cleaner way to find the DCNetwork
188 src_docker_name = self.vnf_name2docker_name[src_name]
189 dst_docker_name = self.vnf_name2docker_name[dst_name]
190 LOG.debug(src_docker_name)
191 ret = network.setChain(
192 src_docker_name, dst_docker_name,
193 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
194 bidirectional=True, cmd="add-flow", cookie=cookie, priority=10)
195
196 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
197 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
198 if src_vnfi is not None:
199 self._vnf_reconfigure_network(src_vnfi, src_if_name, self.eline_subnets_src.pop(0))
200 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
201 if dst_vnfi is not None:
202 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, self.eline_subnets_dst.pop(0))
203
204 # 3b. deploy E-LAN links
205 base = 10
206 for link in elan_fwd_links:
207 # generate lan ip address
208 ip = 1
209 for intf in link["connection_points_reference"]:
210 ip_address = generate_lan_string("10.0", base, subnet_size=24, ip=ip)
211 vnf_id, intf_name = intf.split(":")
212 vnf_name = vnf_id2vnf_name[vnf_id]
213 LOG.debug(
214 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
215 vnf_name, vnf_id, intf_name, ip_address))
216
217 if vnf_name in self.vnfds:
218 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
219 # E-LAN relies on the learning switch capability of the infrastructure switch in dockernet,
220 # so no explicit chaining is necessary
221 vnfi = self._get_vnf_instance(instance_uuid, vnf_name)
222 if vnfi is not None:
223 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
224 # increase for the next ip address on this E-LAN
225 ip += 1
226 # increase the base ip address for the next E-LAN
227 base += 1
228
229
230
231 # 4. run the emulator specific entrypoint scripts in the VNFIs of this service instance
232 self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
233
234 LOG.info("Service started. Instance id: %r" % instance_uuid)
235 return instance_uuid
236
237 def _start_vnfd(self, vnfd):
238 """
239 Start a single VNFD of this service
240 :param vnfd: vnfd descriptor dict
241 :return:
242 """
243 # iterate over all deployment units within each VNFDs
244 for u in vnfd.get("virtual_deployment_units"):
245 # 1. get the name of the docker image to start and the assigned DC
246 vnf_name = vnfd.get("name")
247 if vnf_name not in self.remote_docker_image_urls:
248 raise Exception("No image name for %r found. Abort." % vnf_name)
249 docker_name = self.remote_docker_image_urls.get(vnf_name)
250 target_dc = vnfd.get("dc")
251 # 2. perform some checks to ensure we can start the container
252 assert(docker_name is not None)
253 assert(target_dc is not None)
254 if not self._check_docker_image_exists(docker_name):
255 raise Exception("Docker image %r not found. Abort." % docker_name)
256 # 3. do the dc.startCompute(name="foobar") call to run the container
257 # TODO consider flavors, and other annotations
258 intfs = vnfd.get("connection_points")
259
260 # use the vnf_id in the nsd as docker name
261 # so deployed containers can be easily mapped back to the nsd
262 vnf_name2id = defaultdict(lambda: "NotExistingNode",
263 reduce(lambda x, y: dict(x, **y),
264 map(lambda d: {d["vnf_name"]: d["vnf_id"]},
265 self.nsd["network_functions"])))
266 self.vnf_name2docker_name[vnf_name] = vnf_name2id[vnf_name]
267 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
268
269 LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
270 LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
271 vnfi = target_dc.startCompute(self.vnf_name2docker_name[vnf_name], network=intfs, image=docker_name, flavor_name="small")
272 return vnfi
273
274 def _get_vnf_instance(self, instance_uuid, name):
275 """
276 Returns the Docker object for the given VNF name (or Docker name).
277 :param instance_uuid: UUID of the service instance to search in.
278 :param name: VNF name or Docker name. We are fuzzy here.
279 :return:
280 """
281 dn = name
282 if name in self.vnf_name2docker_name:
283 dn = self.vnf_name2docker_name[name]
284 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
285 if vnfi.name == dn:
286 return vnfi
287 LOG.warning("No container with name: %r found.")
288 return None
289
290 @staticmethod
291 def _vnf_reconfigure_network(vnfi, if_name, net_str):
292 """
293 Reconfigure the network configuration of a specific interface
294 of a running container.
295 :param vnfi: container instacne
296 :param if_name: interface name
297 :param net_str: network configuration string, e.g., 1.2.3.4/24
298 :return:
299 """
300 intf = vnfi.intf(intf=if_name)
301 if intf is not None:
302 intf.setIP(net_str)
303 LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
304 else:
305 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
306
307
308 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
309 for vnfi in vnfi_list:
310 config = vnfi.dcinfo.get("Config", dict())
311 env = config.get("Env", list())
312 for env_var in env:
313 if "SON_EMU_CMD=" in env_var:
314 cmd = str(env_var.split("=")[1])
315 LOG.info("Executing entrypoint script in %r: %r" % (vnfi.name, cmd))
316 vnfi.cmdPrint(cmd)
317
318 def _unpack_service_package(self):
319 """
320 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
321 """
322 LOG.info("Unzipping: %r" % self.package_file_path)
323 with zipfile.ZipFile(self.package_file_path, "r") as z:
324 z.extractall(self.package_content_path)
325
326
327 def _load_package_descriptor(self):
328 """
329 Load the main package descriptor YAML and keep it as dict.
330 :return:
331 """
332 self.manifest = load_yaml(
333 os.path.join(
334 self.package_content_path, "META-INF/MANIFEST.MF"))
335
336 def _load_nsd(self):
337 """
338 Load the entry NSD YAML and keep it as dict.
339 :return:
340 """
341 if "entry_service_template" in self.manifest:
342 nsd_path = os.path.join(
343 self.package_content_path,
344 make_relative_path(self.manifest.get("entry_service_template")))
345 self.nsd = load_yaml(nsd_path)
346 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
347
348 def _load_vnfd(self):
349 """
350 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
351 :return:
352 """
353 if "package_content" in self.manifest:
354 for pc in self.manifest.get("package_content"):
355 if pc.get("content-type") == "application/sonata.function_descriptor":
356 vnfd_path = os.path.join(
357 self.package_content_path,
358 make_relative_path(pc.get("name")))
359 vnfd = load_yaml(vnfd_path)
360 self.vnfds[vnfd.get("name")] = vnfd
361 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
362
363 def _load_docker_files(self):
364 """
365 Get all paths to Dockerfiles from VNFDs and store them in dict.
366 :return:
367 """
368 for k, v in self.vnfds.iteritems():
369 for vu in v.get("virtual_deployment_units"):
370 if vu.get("vm_image_format") == "docker":
371 vm_image = vu.get("vm_image")
372 docker_path = os.path.join(
373 self.package_content_path,
374 make_relative_path(vm_image))
375 self.local_docker_files[k] = docker_path
376 LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
377
378 def _load_docker_urls(self):
379 """
380 Get all URLs to pre-build docker images in some repo.
381 :return:
382 """
383 for k, v in self.vnfds.iteritems():
384 for vu in v.get("virtual_deployment_units"):
385 if vu.get("vm_image_format") == "docker":
386 url = vu.get("vm_image")
387 if url is not None:
388 url = url.replace("http://", "")
389 self.remote_docker_image_urls[k] = url
390 LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
391
392 def _build_images_from_dockerfiles(self):
393 """
394 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
395 """
396 if GK_STANDALONE_MODE:
397 return # do not build anything in standalone mode
398 dc = DockerClient()
399 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
400 for k, v in self.local_docker_files.iteritems():
401 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
402 LOG.debug("DOCKER BUILD: %s" % line)
403 LOG.info("Docker image created: %s" % k)
404
405 def _pull_predefined_dockerimages(self):
406 """
407 If the package contains URLs to pre-build Docker images, we download them with this method.
408 """
409 dc = DockerClient()
410 for url in self.remote_docker_image_urls.itervalues():
411 if not FORCE_PULL: # only pull if not present (speedup for development)
412 if len(dc.images(name=url)) > 0:
413 LOG.debug("Image %r present. Skipping pull." % url)
414 continue
415 LOG.info("Pulling image: %r" % url)
416 dc.pull(url,
417 insecure_registry=True)
418
419 def _check_docker_image_exists(self, image_name):
420 """
421 Query the docker service and check if the given image exists
422 :param image_name: name of the docker image
423 :return:
424 """
425 return len(DockerClient().images(image_name)) > 0
426
427 def _calculate_placement(self, algorithm):
428 """
429 Do placement by adding the a field "dc" to
430 each VNFD that points to one of our
431 data center objects known to the gatekeeper.
432 """
433 assert(len(self.vnfds) > 0)
434 assert(len(GK.dcs) > 0)
435 # instantiate algorithm an place
436 p = algorithm()
437 p.place(self.nsd, self.vnfds, GK.dcs)
438 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
439 # lets print the placement result
440 for name, vnfd in self.vnfds.iteritems():
441 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
442
443
444 """
445 Some (simple) placement algorithms
446 """
447
448
449 class FirstDcPlacement(object):
450 """
451 Placement: Always use one and the same data center from the GK.dcs dict.
452 """
453 def place(self, nsd, vnfds, dcs):
454 for name, vnfd in vnfds.iteritems():
455 vnfd["dc"] = list(dcs.itervalues())[0]
456
457
458 """
459 Resource definitions and API endpoints
460 """
461
462
463 class Packages(fr.Resource):
464
465 def post(self):
466 """
467 Upload a *.son service package to the dummy gatekeeper.
468
469 We expect request with a *.son file and store it in UPLOAD_FOLDER
470 :return: UUID
471 """
472 try:
473 # get file contents
474 print(request.files)
475 # lets search for the package in the request
476 if "package" in request.files:
477 son_file = request.files["package"]
478 # elif "file" in request.files:
479 # son_file = request.files["file"]
480 else:
481 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
482 # generate a uuid to reference this package
483 service_uuid = str(uuid.uuid4())
484 file_hash = hashlib.sha1(str(son_file)).hexdigest()
485 # ensure that upload folder exists
486 ensure_dir(UPLOAD_FOLDER)
487 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
488 # store *.son file to disk
489 son_file.save(upload_path)
490 size = os.path.getsize(upload_path)
491 # create a service object and register it
492 s = Service(service_uuid, file_hash, upload_path)
493 GK.register_service_package(service_uuid, s)
494 # generate the JSON result
495 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}
496 except Exception as ex:
497 LOG.exception("Service package upload failed:")
498 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
499
500 def get(self):
501 """
502 Return a list of UUID's of uploaded service packages.
503 :return: dict/list
504 """
505 LOG.info("GET /packages")
506 return {"service_uuid_list": list(GK.services.iterkeys())}
507
508
509 class Instantiations(fr.Resource):
510
511 def post(self):
512 """
513 Instantiate a service specified by its UUID.
514 Will return a new UUID to identify the running service instance.
515 :return: UUID
516 """
517 # try to extract the service uuid from the request
518 json_data = request.get_json(force=True)
519 service_uuid = json_data.get("service_uuid")
520
521 # lets be a bit fuzzy here to make testing easier
522 if service_uuid is None and len(GK.services) > 0:
523 # if we don't get a service uuid, we simple start the first service in the list
524 service_uuid = list(GK.services.iterkeys())[0]
525
526 if service_uuid in GK.services:
527 # ok, we have a service uuid, lets start the service
528 service_instance_uuid = GK.services.get(service_uuid).start_service()
529 return {"service_instance_uuid": service_instance_uuid}
530 return "Service not found", 404
531
532 def get(self):
533 """
534 Returns a list of UUIDs containing all running services.
535 :return: dict / list
536 """
537 LOG.info("GET /instantiations")
538 return {"service_instantiations_list": [
539 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
540
541
542 # create a single, global GK object
543 GK = Gatekeeper()
544 # setup Flask
545 app = Flask(__name__)
546 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
547 api = fr.Api(app)
548 # define endpoints
549 api.add_resource(Packages, '/packages')
550 api.add_resource(Instantiations, '/instantiations')
551
552
553 def start_rest_api(host, port, datacenters=dict()):
554 GK.dcs = datacenters
555 # start the Flask server (not the best performance but ok for our use case)
556 app.run(host=host,
557 port=port,
558 debug=True,
559 use_reloader=False # this is needed to run Flask in a non-main thread
560 )
561
562
563 def ensure_dir(name):
564 if not os.path.exists(name):
565 os.makedirs(name)
566
567
568 def load_yaml(path):
569 with open(path, "r") as f:
570 try:
571 r = yaml.load(f)
572 except yaml.YAMLError as exc:
573 LOG.exception("YAML parse error")
574 r = dict()
575 return r
576
577
578 def make_relative_path(path):
579 if path.startswith("file://"):
580 path = path.replace("file://", "", 1)
581 if path.startswith("/"):
582 path = path.replace("/", "", 1)
583 return path
584
585
586 def generate_lan_string(prefix, base, subnet_size=24, ip=0):
587 """
588 Helper to generate different network configuration strings.
589 """
590 r = "%s.%d.%d/%d" % (prefix, base, ip, subnet_size)
591 return r
592
593
594 def generate_subnet_strings(n, start=1, subnet_size=24, ip=0):
595 """
596 Helper to generate different network configuration strings.
597 """
598 r = list()
599 for i in range(start, start + n):
600 r.append("%d.0.0.%d/%d" % (i, ip, subnet_size))
601 return r
602
603
604 if __name__ == '__main__':
605 """
606 Lets allow to run the API in standalone mode.
607 """
608 GK_STANDALONE_MODE = True
609 logging.getLogger("werkzeug").setLevel(logging.INFO)
610 start_rest_api("0.0.0.0", 8000)
611