merge with latest upstream status
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 """
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
30
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
33 """
34
35 import logging
36 import os
37 import uuid
38 import hashlib
39 import zipfile
40 import yaml
41 import threading
42 from docker import DockerClient
43 from flask import Flask, request
44 import flask_restful as fr
45 from collections import defaultdict
46 import pkg_resources
47
48 logging.basicConfig()
49 LOG = logging.getLogger("sonata-dummy-gatekeeper")
50 LOG.setLevel(logging.DEBUG)
51 logging.getLogger("werkzeug").setLevel(logging.WARNING)
52
53 GK_STORAGE = "/tmp/son-dummy-gk/"
54 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
55 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
56
57 # Enable Dockerfile build functionality
58 BUILD_DOCKERFILE = False
59
60 # flag to indicate that we run without the emulator (only the bare API for integration testing)
61 GK_STANDALONE_MODE = False
62
63 # should a new version of an image be pulled even if its available
64 FORCE_PULL = False
65
66 # Automatically deploy SAPs (endpoints) of the service as new containers
67 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
68 DEPLOY_SAP = False
69
70 # flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
71 BIDIRECTIONAL_CHAIN = False
72
73 class Gatekeeper(object):
74
75 def __init__(self):
76 self.services = dict()
77 self.dcs = dict()
78 self.net = None
79 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
80 LOG.info("Create SONATA dummy gatekeeper.")
81
82 def register_service_package(self, service_uuid, service):
83 """
84 register new service package
85 :param service_uuid
86 :param service object
87 """
88 self.services[service_uuid] = service
89 # lets perform all steps needed to onboard the service
90 service.onboard()
91
92 def get_next_vnf_name(self):
93 self.vnf_counter += 1
94 return "vnf%d" % self.vnf_counter
95
96
97 class Service(object):
98 """
99 This class represents a NS uploaded as a *.son package to the
100 dummy gatekeeper.
101 Can have multiple running instances of this service.
102 """
103
104 def __init__(self,
105 service_uuid,
106 package_file_hash,
107 package_file_path):
108 self.uuid = service_uuid
109 self.package_file_hash = package_file_hash
110 self.package_file_path = package_file_path
111 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
112 self.manifest = None
113 self.nsd = None
114 self.vnfds = dict()
115 self.local_docker_files = dict()
116 self.remote_docker_image_urls = dict()
117 self.instances = dict()
118 self.vnf_name2docker_name = dict()
119 self.sap_identifiers = set()
120 # lets generate a set of subnet configurations used for e-line chaining setup
121 self.eline_subnets_src = generate_subnet_strings(50, start=200, subnet_size=24, ip=1)
122 self.eline_subnets_dst = generate_subnet_strings(50, start=200, subnet_size=24, ip=2)
123
124 def onboard(self):
125 """
126 Do all steps to prepare this service to be instantiated
127 :return:
128 """
129 # 1. extract the contents of the package and store them in our catalog
130 self._unpack_service_package()
131 # 2. read in all descriptor files
132 self._load_package_descriptor()
133 self._load_nsd()
134 self._load_vnfd()
135 if DEPLOY_SAP:
136 self._load_saps()
137 # 3. prepare container images (e.g. download or build Dockerfile)
138 if BUILD_DOCKERFILE:
139 self._load_docker_files()
140 self._build_images_from_dockerfiles()
141 else:
142 self._load_docker_urls()
143 self._pull_predefined_dockerimages()
144 LOG.info("On-boarded service: %r" % self.manifest.get("name"))
145
146 def start_service(self):
147 """
148 This methods creates and starts a new service instance.
149 It computes placements, iterates over all VNFDs, and starts
150 each VNFD as a Docker container in the data center selected
151 by the placement algorithm.
152 :return:
153 """
154 LOG.info("Starting service %r" % self.uuid)
155
156 # 1. each service instance gets a new uuid to identify it
157 instance_uuid = str(uuid.uuid4())
158 # build a instances dict (a bit like a NSR :))
159 self.instances[instance_uuid] = dict()
160 self.instances[instance_uuid]["vnf_instances"] = list()
161
162 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
163 vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
164 reduce(lambda x, y: dict(x, **y),
165 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
166 self.nsd["network_functions"])))
167
168 # 3. compute placement of this service instance (adds DC names to VNFDs)
169 if not GK_STANDALONE_MODE:
170 #self._calculate_placement(FirstDcPlacement)
171 self._calculate_placement(RoundRobinDcPlacement)
172 # iterate over all vnfds that we have to start
173 for vnfd in self.vnfds.itervalues():
174 vnfi = None
175 if not GK_STANDALONE_MODE:
176 vnfi = self._start_vnfd(vnfd)
177 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
178
179 if "virtual_links" in self.nsd:
180 vlinks = self.nsd["virtual_links"]
181 fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
182 eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
183 elan_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-LAN")]
184
185 GK.net.deployed_elines.extend(eline_fwd_links)
186 GK.net.deployed_elans.extend(elan_fwd_links)
187
188 # 4a. deploy E-Line links
189 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
190 # eg. different services get a unique cookie for their flowrules
191 cookie = 1
192 for link in eline_fwd_links:
193 src_id, src_if_name = link["connection_points_reference"][0].split(":")
194 dst_id, dst_if_name = link["connection_points_reference"][1].split(":")
195
196 # check if there is a SAP in the link
197 if src_id in self.sap_identifiers:
198 src_docker_name = "{0}_{1}".format(src_id, src_if_name)
199 src_id = src_docker_name
200 else:
201 src_docker_name = src_id
202
203 if dst_id in self.sap_identifiers:
204 dst_docker_name = "{0}_{1}".format(dst_id, dst_if_name)
205 dst_id = dst_docker_name
206 else:
207 dst_docker_name = dst_id
208
209 src_name = vnf_id2vnf_name[src_id]
210 dst_name = vnf_id2vnf_name[dst_id]
211
212 LOG.debug(
213 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
214 src_name, src_id, src_if_name, dst_name, dst_id, dst_if_name))
215
216 if (src_name in self.vnfds) and (dst_name in self.vnfds):
217 network = self.vnfds[src_name].get("dc").net # there should be a cleaner way to find the DCNetwork
218 LOG.debug(src_docker_name)
219 ret = network.setChain(
220 src_docker_name, dst_docker_name,
221 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
222 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
223
224 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
225 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
226 if src_vnfi is not None:
227 self._vnf_reconfigure_network(src_vnfi, src_if_name, self.eline_subnets_src.pop(0))
228 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
229 if dst_vnfi is not None:
230 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, self.eline_subnets_dst.pop(0))
231
232 # 4b. deploy E-LAN links
233 base = 10
234 for link in elan_fwd_links:
235
236 elan_vnf_list=[]
237
238 # generate lan ip address
239 ip = 1
240 for intf in link["connection_points_reference"]:
241 ip_address = generate_lan_string("10.0", base, subnet_size=24, ip=ip)
242 vnf_id, intf_name = intf.split(":")
243 if vnf_id in self.sap_identifiers:
244 src_docker_name = "{0}_{1}".format(vnf_id, intf_name)
245 vnf_id = src_docker_name
246 else:
247 src_docker_name = vnf_id
248 vnf_name = vnf_id2vnf_name[vnf_id]
249 LOG.debug(
250 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
251 vnf_name, vnf_id, intf_name, ip_address))
252
253 if vnf_name in self.vnfds:
254 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
255 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
256 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
257 vnfi = self._get_vnf_instance(instance_uuid, vnf_name)
258 if vnfi is not None:
259 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
260 # increase for the next ip address on this E-LAN
261 ip += 1
262
263 # add this vnf and interface to the E-LAN for tagging
264 network = self.vnfds[vnf_name].get("dc").net # there should be a cleaner way to find the DCNetwork
265 elan_vnf_list.append({'name':src_docker_name,'interface':intf_name})
266
267
268 # install the VLAN tags for this E-LAN
269 network.setLAN(elan_vnf_list)
270 # increase the base ip address for the next E-LAN
271 base += 1
272
273 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
274 self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
275
276 LOG.info("Service started. Instance id: %r" % instance_uuid)
277 return instance_uuid
278
279 def stop_service(self, instance_uuid):
280 """
281 This method stops a running service instance.
282 It iterates over all VNF instances, stopping them each
283 and removing them from their data center.
284
285 :param instance_uuid: the uuid of the service instance to be stopped
286 """
287 LOG.info("Stopping service %r" % self.uuid)
288 # get relevant information
289 # instance_uuid = str(self.uuid.uuid4())
290 vnf_instances = self.instances[instance_uuid]["vnf_instances"]
291
292 for v in vnf_instances:
293 self._stop_vnfi(v)
294
295 if not GK_STANDALONE_MODE:
296 # remove placement?
297 # self._remove_placement(RoundRobinPlacement)
298 None
299
300 # last step: remove the instance from the list of all instances
301 del self.instances[instance_uuid]
302
303 def _start_vnfd(self, vnfd):
304 """
305 Start a single VNFD of this service
306 :param vnfd: vnfd descriptor dict
307 :return:
308 """
309 # iterate over all deployment units within each VNFDs
310 for u in vnfd.get("virtual_deployment_units"):
311 # 1. get the name of the docker image to start and the assigned DC
312 vnf_name = vnfd.get("name")
313 if vnf_name not in self.remote_docker_image_urls:
314 raise Exception("No image name for %r found. Abort." % vnf_name)
315 docker_name = self.remote_docker_image_urls.get(vnf_name)
316 target_dc = vnfd.get("dc")
317 # 2. perform some checks to ensure we can start the container
318 assert(docker_name is not None)
319 assert(target_dc is not None)
320 if not self._check_docker_image_exists(docker_name):
321 raise Exception("Docker image %r not found. Abort." % docker_name)
322
323 # 3. get the resource limits
324 res_req = u.get("resource_requirements")
325 cpu_list = res_req.get("cpu").get("cores")
326 if not cpu_list or len(cpu_list)==0:
327 cpu_list="1"
328 cpu_bw = res_req.get("cpu").get("cpu_bw")
329 if not cpu_bw:
330 cpu_bw=1
331 mem_num = str(res_req.get("memory").get("size"))
332 if len(mem_num)==0:
333 mem_num="2"
334 mem_unit = str(res_req.get("memory").get("size_unit"))
335 if str(mem_unit)==0:
336 mem_unit="GB"
337 mem_limit = float(mem_num)
338 if mem_unit=="GB":
339 mem_limit=mem_limit*1024*1024*1024
340 elif mem_unit=="MB":
341 mem_limit=mem_limit*1024*1024
342 elif mem_unit=="KB":
343 mem_limit=mem_limit*1024
344 mem_lim = int(mem_limit)
345 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
346
347 # 4. do the dc.startCompute(name="foobar") call to run the container
348 # TODO consider flavors, and other annotations
349 intfs = vnfd.get("connection_points")
350
351 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
352 # use the vnf_id in the nsd as docker name
353 # so deployed containers can be easily mapped back to the nsd
354 vnf_name2id = defaultdict(lambda: "NotExistingNode",
355 reduce(lambda x, y: dict(x, **y),
356 map(lambda d: {d["vnf_name"]: d["vnf_id"]},
357 self.nsd["network_functions"])))
358 self.vnf_name2docker_name[vnf_name] = vnf_name2id[vnf_name]
359 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
360
361 LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
362 LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
363 vnfi = target_dc.startCompute(self.vnf_name2docker_name[vnf_name], network=intfs, image=docker_name, flavor_name="small",
364 cpu_quota=cpu_quota, cpu_period=cpu_period, cpuset=cpu_list, mem_limit=mem_lim)
365 return vnfi
366
367 def _stop_vnfi(self, vnfi):
368 """
369 Stop a VNF instance.
370
371 :param vnfi: vnf instance to be stopped
372 """
373 # Find the correct datacenter
374 status = vnfi.getStatus()
375 dc = vnfi.datacenter
376 # stop the vnfi
377 LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc))
378 dc.stopCompute(status["name"])
379
380 def _get_vnf_instance(self, instance_uuid, name):
381 """
382 Returns the Docker object for the given VNF name (or Docker name).
383 :param instance_uuid: UUID of the service instance to search in.
384 :param name: VNF name or Docker name. We are fuzzy here.
385 :return:
386 """
387 dn = name
388 if name in self.vnf_name2docker_name:
389 dn = self.vnf_name2docker_name[name]
390 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
391 if vnfi.name == dn:
392 return vnfi
393 LOG.warning("No container with name: %r found.")
394 return None
395
396 @staticmethod
397 def _vnf_reconfigure_network(vnfi, if_name, net_str):
398 """
399 Reconfigure the network configuration of a specific interface
400 of a running container.
401 :param vnfi: container instacne
402 :param if_name: interface name
403 :param net_str: network configuration string, e.g., 1.2.3.4/24
404 :return:
405 """
406 intf = vnfi.intf(intf=if_name)
407 if intf is not None:
408 intf.setIP(net_str)
409 LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
410 else:
411 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
412
413
414 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
415 for vnfi in vnfi_list:
416 config = vnfi.dcinfo.get("Config", dict())
417 env = config.get("Env", list())
418 for env_var in env:
419 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
420 LOG.debug("%r = %r" % (var , cmd))
421 if var=="SON_EMU_CMD":
422 LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
423 # execute command in new thread to ensure that GK is not blocked by VNF
424 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
425 t.daemon = True
426 t.start()
427
428 def _unpack_service_package(self):
429 """
430 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
431 """
432 LOG.info("Unzipping: %r" % self.package_file_path)
433 with zipfile.ZipFile(self.package_file_path, "r") as z:
434 z.extractall(self.package_content_path)
435
436
437 def _load_package_descriptor(self):
438 """
439 Load the main package descriptor YAML and keep it as dict.
440 :return:
441 """
442 self.manifest = load_yaml(
443 os.path.join(
444 self.package_content_path, "META-INF/MANIFEST.MF"))
445
446 def _load_nsd(self):
447 """
448 Load the entry NSD YAML and keep it as dict.
449 :return:
450 """
451 if "entry_service_template" in self.manifest:
452 nsd_path = os.path.join(
453 self.package_content_path,
454 make_relative_path(self.manifest.get("entry_service_template")))
455 self.nsd = load_yaml(nsd_path)
456 GK.net.deployed_nsds.append(self.nsd)
457 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
458
459 def _load_vnfd(self):
460 """
461 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
462 :return:
463 """
464 if "package_content" in self.manifest:
465 for pc in self.manifest.get("package_content"):
466 if pc.get("content-type") == "application/sonata.function_descriptor":
467 vnfd_path = os.path.join(
468 self.package_content_path,
469 make_relative_path(pc.get("name")))
470 vnfd = load_yaml(vnfd_path)
471 self.vnfds[vnfd.get("name")] = vnfd
472 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
473
474 def _load_saps(self):
475 # Each Service Access Point (connection_point) in the nsd is getting its own container
476 SAPs = [p["id"] for p in self.nsd["connection_points"] if p["type"] == "interface"]
477 for sap in SAPs:
478 # endpoints needed in this service
479 sap_vnf_id, sap_vnf_interface = sap.split(':')
480 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
481 self.sap_identifiers.add(sap_vnf_id)
482
483 sap_docker_name = "%s_%s" % (sap_vnf_id, sap_vnf_interface)
484
485 # add SAP to self.vnfds
486 sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
487 sap_vnfd = load_yaml(sapfile)
488 sap_vnfd["connection_points"][0]["id"] = sap_vnf_interface
489 sap_vnfd["name"] = sap_docker_name
490 self.vnfds[sap_docker_name] = sap_vnfd
491 # add SAP vnf to list in the NSD so it is deployed later on
492 # each SAP get a unique VNFD and vnf_id in the NSD
493 self.nsd["network_functions"].append({"vnf_id": sap_docker_name, "vnf_name": sap_docker_name})
494 LOG.debug("Loaded SAP: %r" % sap_vnfd.get("name"))
495
496 def _load_docker_files(self):
497 """
498 Get all paths to Dockerfiles from VNFDs and store them in dict.
499 :return:
500 """
501 for k, v in self.vnfds.iteritems():
502 for vu in v.get("virtual_deployment_units"):
503 if vu.get("vm_image_format") == "docker":
504 vm_image = vu.get("vm_image")
505 docker_path = os.path.join(
506 self.package_content_path,
507 make_relative_path(vm_image))
508 self.local_docker_files[k] = docker_path
509 LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
510
511 def _load_docker_urls(self):
512 """
513 Get all URLs to pre-build docker images in some repo.
514 :return:
515 """
516 for k, v in self.vnfds.iteritems():
517 for vu in v.get("virtual_deployment_units"):
518 if vu.get("vm_image_format") == "docker":
519 url = vu.get("vm_image")
520 if url is not None:
521 url = url.replace("http://", "")
522 self.remote_docker_image_urls[k] = url
523 LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
524
525 def _build_images_from_dockerfiles(self):
526 """
527 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
528 """
529 if GK_STANDALONE_MODE:
530 return # do not build anything in standalone mode
531 dc = DockerClient()
532 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
533 for k, v in self.local_docker_files.iteritems():
534 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
535 LOG.debug("DOCKER BUILD: %s" % line)
536 LOG.info("Docker image created: %s" % k)
537
538 def _pull_predefined_dockerimages(self):
539 """
540 If the package contains URLs to pre-build Docker images, we download them with this method.
541 """
542 dc = DockerClient()
543 for url in self.remote_docker_image_urls.itervalues():
544 if not FORCE_PULL: # only pull if not present (speedup for development)
545 if len(dc.images.list(name=url)) > 0:
546 LOG.debug("Image %r present. Skipping pull." % url)
547 continue
548 LOG.info("Pulling image: %r" % url)
549 dc.pull(url,
550 insecure_registry=True)
551
552 def _check_docker_image_exists(self, image_name):
553 """
554 Query the docker service and check if the given image exists
555 :param image_name: name of the docker image
556 :return:
557 """
558 return len(DockerClient().images.list(name=image_name)) > 0
559
560 def _calculate_placement(self, algorithm):
561 """
562 Do placement by adding the a field "dc" to
563 each VNFD that points to one of our
564 data center objects known to the gatekeeper.
565 """
566 assert(len(self.vnfds) > 0)
567 assert(len(GK.dcs) > 0)
568 # instantiate algorithm an place
569 p = algorithm()
570 p.place(self.nsd, self.vnfds, GK.dcs)
571 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
572 # lets print the placement result
573 for name, vnfd in self.vnfds.iteritems():
574 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
575
576 def _calculate_cpu_cfs_values(self, cpu_time_percentage):
577 """
578 Calculate cpu period and quota for CFS
579 :param cpu_time_percentage: percentage of overall CPU to be used
580 :return: cpu_period, cpu_quota
581 """
582 if cpu_time_percentage is None:
583 return -1, -1
584 if cpu_time_percentage < 0:
585 return -1, -1
586 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
587 # Attention minimum cpu_quota is 1ms (micro)
588 cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
589 LOG.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period, cpu_time_percentage))
590 cpu_quota = cpu_period * cpu_time_percentage # calculate the fraction of cpu time for this container
591 # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
592 if cpu_quota < 1000:
593 LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
594 cpu_quota = 1000
595 LOG.warning("Increased CPU quota to avoid system error.")
596 LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period, cpu_quota))
597 return int(cpu_period), int(cpu_quota)
598
599
600 """
601 Some (simple) placement algorithms
602 """
603
604
605 class FirstDcPlacement(object):
606 """
607 Placement: Always use one and the same data center from the GK.dcs dict.
608 """
609 def place(self, nsd, vnfds, dcs):
610 for name, vnfd in vnfds.iteritems():
611 vnfd["dc"] = list(dcs.itervalues())[0]
612
613
614 class RoundRobinDcPlacement(object):
615 """
616 Placement: Distribute VNFs across all available DCs in a round robin fashion.
617 """
618 def place(self, nsd, vnfds, dcs):
619 c = 0
620 dcs_list = list(dcs.itervalues())
621 for name, vnfd in vnfds.iteritems():
622 vnfd["dc"] = dcs_list[c % len(dcs_list)]
623 c += 1 # inc. c to use next DC
624
625
626
627
628 """
629 Resource definitions and API endpoints
630 """
631
632
633 class Packages(fr.Resource):
634
635 def post(self):
636 """
637 Upload a *.son service package to the dummy gatekeeper.
638
639 We expect request with a *.son file and store it in UPLOAD_FOLDER
640 :return: UUID
641 """
642 try:
643 # get file contents
644 print(request.files)
645 # lets search for the package in the request
646 if "package" in request.files:
647 son_file = request.files["package"]
648 # elif "file" in request.files:
649 # son_file = request.files["file"]
650 else:
651 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
652 # generate a uuid to reference this package
653 service_uuid = str(uuid.uuid4())
654 file_hash = hashlib.sha1(str(son_file)).hexdigest()
655 # ensure that upload folder exists
656 ensure_dir(UPLOAD_FOLDER)
657 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
658 # store *.son file to disk
659 son_file.save(upload_path)
660 size = os.path.getsize(upload_path)
661 # create a service object and register it
662 s = Service(service_uuid, file_hash, upload_path)
663 GK.register_service_package(service_uuid, s)
664 # generate the JSON result
665 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201
666 except Exception as ex:
667 LOG.exception("Service package upload failed:")
668 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
669
670 def get(self):
671 """
672 Return a list of UUID's of uploaded service packages.
673 :return: dict/list
674 """
675 LOG.info("GET /packages")
676 return {"service_uuid_list": list(GK.services.iterkeys())}
677
678
679 class Instantiations(fr.Resource):
680
681 def post(self):
682 """
683 Instantiate a service specified by its UUID.
684 Will return a new UUID to identify the running service instance.
685 :return: UUID
686 """
687 # try to extract the service uuid from the request
688 json_data = request.get_json(force=True)
689 service_uuid = json_data.get("service_uuid")
690
691 # lets be a bit fuzzy here to make testing easier
692 if service_uuid is None and len(GK.services) > 0:
693 # if we don't get a service uuid, we simple start the first service in the list
694 service_uuid = list(GK.services.iterkeys())[0]
695
696 if service_uuid in GK.services:
697 # ok, we have a service uuid, lets start the service
698 service_instance_uuid = GK.services.get(service_uuid).start_service()
699 return {"service_instance_uuid": service_instance_uuid}, 201
700 return "Service not found", 404
701
702 def get(self):
703 """
704 Returns a list of UUIDs containing all running services.
705 :return: dict / list
706 """
707 LOG.info("GET /instantiations")
708 return {"service_instantiations_list": [
709 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
710
711 def delete(self):
712 """
713 Stops a running service specified by its service and instance UUID.
714 """
715 # try to extract the service and instance UUID from the request
716 json_data = request.get_json(force=True)
717 service_uuid = json_data.get("service_uuid")
718 instance_uuid = json_data.get("service_instance_uuid")
719
720 # try to be fuzzy
721 if service_uuid is None and len(GK.services) > 0:
722 #if we don't get a service uuid, we simply stop the last service in the list
723 service_uuid = list(GK.services.iterkeys())[0]
724 if instance_uuid is None and len(GK.services[service_uuid].instances) > 0:
725 instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0]
726
727 if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
728 # valid service and instance UUID, stop service
729 GK.services.get(service_uuid).stop_service(instance_uuid)
730 return "service instance with uuid %r stopped." % instance_uuid,200
731 return "Service not found", 404
732
733 class Exit(fr.Resource):
734
735 def put(self):
736 """
737 Stop the running Containernet instance regardless of data transmitted
738 """
739 list(GK.dcs.values())[0].net.stop()
740
741
742 def initialize_GK():
743 global GK
744 GK = Gatekeeper()
745
746
747
748 # create a single, global GK object
749 GK = None
750 initialize_GK()
751 # setup Flask
752 app = Flask(__name__)
753 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
754 api = fr.Api(app)
755 # define endpoints
756 api.add_resource(Packages, '/packages')
757 api.add_resource(Instantiations, '/instantiations')
758 api.add_resource(Exit, '/emulator/exit')
759
760
761 #def initialize_GK():
762 # global GK
763 # GK = Gatekeeper()
764
765
766 def start_rest_api(host, port, datacenters=dict()):
767 GK.dcs = datacenters
768 GK.net = get_dc_network()
769 # start the Flask server (not the best performance but ok for our use case)
770 app.run(host=host,
771 port=port,
772 debug=True,
773 use_reloader=False # this is needed to run Flask in a non-main thread
774 )
775
776
777 def ensure_dir(name):
778 if not os.path.exists(name):
779 os.makedirs(name)
780
781
782 def load_yaml(path):
783 with open(path, "r") as f:
784 try:
785 r = yaml.load(f)
786 except yaml.YAMLError as exc:
787 LOG.exception("YAML parse error")
788 r = dict()
789 return r
790
791
792 def make_relative_path(path):
793 if path.startswith("file://"):
794 path = path.replace("file://", "", 1)
795 if path.startswith("/"):
796 path = path.replace("/", "", 1)
797 return path
798
799
800 def generate_lan_string(prefix, base, subnet_size=24, ip=0):
801 """
802 Helper to generate different network configuration strings.
803 """
804 r = "%s.%d.%d/%d" % (prefix, base, ip, subnet_size)
805 return r
806
807
808 def generate_subnet_strings(n, start=1, subnet_size=24, ip=0):
809 """
810 Helper to generate different network configuration strings.
811 """
812 r = list()
813 for i in range(start, start + n):
814 r.append("%d.0.0.%d/%d" % (i, ip, subnet_size))
815 return r
816
817 def get_dc_network():
818 """
819 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
820 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
821 :return:
822 """
823 assert (len(GK.dcs) > 0)
824 return GK.dcs.values()[0].net
825
826 if __name__ == '__main__':
827 """
828 Lets allow to run the API in standalone mode.
829 """
830 GK_STANDALONE_MODE = True
831 logging.getLogger("werkzeug").setLevel(logging.INFO)
832 start_rest_api("0.0.0.0", 8000)
833