fix unittest issue with docker pull
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 """
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
30
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
33 """
34
35 import logging
36 import os
37 import uuid
38 import hashlib
39 import zipfile
40 import yaml
41 import threading
42 from docker import DockerClient, APIClient
43 from flask import Flask, request
44 import flask_restful as fr
45 from collections import defaultdict
46 import pkg_resources
47 from subprocess import Popen
48
49 logging.basicConfig()
50 LOG = logging.getLogger("sonata-dummy-gatekeeper")
51 LOG.setLevel(logging.DEBUG)
52 logging.getLogger("werkzeug").setLevel(logging.WARNING)
53
54 GK_STORAGE = "/tmp/son-dummy-gk/"
55 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
56 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
57
58 # Enable Dockerfile build functionality
59 BUILD_DOCKERFILE = False
60
61 # flag to indicate that we run without the emulator (only the bare API for integration testing)
62 GK_STANDALONE_MODE = False
63
64 # should a new version of an image be pulled even if its available
65 FORCE_PULL = False
66
67 # Automatically deploy SAPs (endpoints) of the service as new containers
68 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
69 DEPLOY_SAP = False
70
71 # flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
72 BIDIRECTIONAL_CHAIN = False
73
74 class Gatekeeper(object):
75
76 def __init__(self):
77 self.services = dict()
78 self.dcs = dict()
79 self.net = None
80 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
81 LOG.info("Create SONATA dummy gatekeeper.")
82
83 def register_service_package(self, service_uuid, service):
84 """
85 register new service package
86 :param service_uuid
87 :param service object
88 """
89 self.services[service_uuid] = service
90 # lets perform all steps needed to onboard the service
91 service.onboard()
92
93 def get_next_vnf_name(self):
94 self.vnf_counter += 1
95 return "vnf%d" % self.vnf_counter
96
97
98 class Service(object):
99 """
100 This class represents a NS uploaded as a *.son package to the
101 dummy gatekeeper.
102 Can have multiple running instances of this service.
103 """
104
105 def __init__(self,
106 service_uuid,
107 package_file_hash,
108 package_file_path):
109 self.uuid = service_uuid
110 self.package_file_hash = package_file_hash
111 self.package_file_path = package_file_path
112 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
113 self.manifest = None
114 self.nsd = None
115 self.vnfds = dict()
116 self.local_docker_files = dict()
117 self.remote_docker_image_urls = dict()
118 self.instances = dict()
119 self.vnf_name2docker_name = dict()
120 self.sap_identifiers = set()
121 # lets generate a set of subnet configurations used for e-line chaining setup
122 self.eline_subnets_src = generate_subnet_strings(50, start=200, subnet_size=24, ip=1)
123 self.eline_subnets_dst = generate_subnet_strings(50, start=200, subnet_size=24, ip=2)
124
125 def onboard(self):
126 """
127 Do all steps to prepare this service to be instantiated
128 :return:
129 """
130 # 1. extract the contents of the package and store them in our catalog
131 self._unpack_service_package()
132 # 2. read in all descriptor files
133 self._load_package_descriptor()
134 self._load_nsd()
135 self._load_vnfd()
136 if DEPLOY_SAP:
137 self._load_saps()
138 # 3. prepare container images (e.g. download or build Dockerfile)
139 if BUILD_DOCKERFILE:
140 self._load_docker_files()
141 self._build_images_from_dockerfiles()
142 else:
143 self._load_docker_urls()
144 self._pull_predefined_dockerimages()
145 LOG.info("On-boarded service: %r" % self.manifest.get("name"))
146
147 def start_service(self):
148 """
149 This methods creates and starts a new service instance.
150 It computes placements, iterates over all VNFDs, and starts
151 each VNFD as a Docker container in the data center selected
152 by the placement algorithm.
153 :return:
154 """
155 LOG.info("Starting service %r" % self.uuid)
156
157 # 1. each service instance gets a new uuid to identify it
158 instance_uuid = str(uuid.uuid4())
159 # build a instances dict (a bit like a NSR :))
160 self.instances[instance_uuid] = dict()
161 self.instances[instance_uuid]["vnf_instances"] = list()
162
163 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
164 vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
165 reduce(lambda x, y: dict(x, **y),
166 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
167 self.nsd["network_functions"])))
168
169 # 3. compute placement of this service instance (adds DC names to VNFDs)
170 if not GK_STANDALONE_MODE:
171 #self._calculate_placement(FirstDcPlacement)
172 self._calculate_placement(RoundRobinDcPlacement)
173 # iterate over all vnfds that we have to start
174 for vnfd in self.vnfds.itervalues():
175 vnfi = None
176 if not GK_STANDALONE_MODE:
177 vnfi = self._start_vnfd(vnfd)
178 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
179
180 if "virtual_links" in self.nsd:
181 vlinks = self.nsd["virtual_links"]
182 fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
183 eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
184 elan_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-LAN")]
185
186 GK.net.deployed_elines.extend(eline_fwd_links)
187 GK.net.deployed_elans.extend(elan_fwd_links)
188
189 # 4a. deploy E-Line links
190 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
191 # eg. different services get a unique cookie for their flowrules
192 cookie = 1
193 for link in eline_fwd_links:
194 src_id, src_if_name = link["connection_points_reference"][0].split(":")
195 dst_id, dst_if_name = link["connection_points_reference"][1].split(":")
196
197 # check if there is a SAP in the link
198 if src_id in self.sap_identifiers:
199 src_docker_name = "{0}_{1}".format(src_id, src_if_name)
200 src_id = src_docker_name
201 else:
202 src_docker_name = src_id
203
204 if dst_id in self.sap_identifiers:
205 dst_docker_name = "{0}_{1}".format(dst_id, dst_if_name)
206 dst_id = dst_docker_name
207 else:
208 dst_docker_name = dst_id
209
210 src_name = vnf_id2vnf_name[src_id]
211 dst_name = vnf_id2vnf_name[dst_id]
212
213 LOG.debug(
214 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
215 src_name, src_id, src_if_name, dst_name, dst_id, dst_if_name))
216
217 if (src_name in self.vnfds) and (dst_name in self.vnfds):
218 network = self.vnfds[src_name].get("dc").net # there should be a cleaner way to find the DCNetwork
219 LOG.debug(src_docker_name)
220 ret = network.setChain(
221 src_docker_name, dst_docker_name,
222 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
223 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
224
225 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
226 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
227 if src_vnfi is not None:
228 self._vnf_reconfigure_network(src_vnfi, src_if_name, self.eline_subnets_src.pop(0))
229 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
230 if dst_vnfi is not None:
231 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, self.eline_subnets_dst.pop(0))
232
233 # 4b. deploy E-LAN links
234 base = 10
235 for link in elan_fwd_links:
236
237 elan_vnf_list=[]
238
239 # generate lan ip address
240 ip = 1
241 for intf in link["connection_points_reference"]:
242 ip_address = generate_lan_string("10.0", base, subnet_size=24, ip=ip)
243 vnf_id, intf_name = intf.split(":")
244 if vnf_id in self.sap_identifiers:
245 src_docker_name = "{0}_{1}".format(vnf_id, intf_name)
246 vnf_id = src_docker_name
247 else:
248 src_docker_name = vnf_id
249 vnf_name = vnf_id2vnf_name[vnf_id]
250 LOG.debug(
251 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
252 vnf_name, vnf_id, intf_name, ip_address))
253
254 if vnf_name in self.vnfds:
255 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
256 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
257 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
258 vnfi = self._get_vnf_instance(instance_uuid, vnf_name)
259 if vnfi is not None:
260 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
261 # increase for the next ip address on this E-LAN
262 ip += 1
263
264 # add this vnf and interface to the E-LAN for tagging
265 network = self.vnfds[vnf_name].get("dc").net # there should be a cleaner way to find the DCNetwork
266 elan_vnf_list.append({'name':src_docker_name,'interface':intf_name})
267
268
269 # install the VLAN tags for this E-LAN
270 network.setLAN(elan_vnf_list)
271 # increase the base ip address for the next E-LAN
272 base += 1
273
274 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
275 self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
276
277 LOG.info("Service started. Instance id: %r" % instance_uuid)
278 return instance_uuid
279
280 def stop_service(self, instance_uuid):
281 """
282 This method stops a running service instance.
283 It iterates over all VNF instances, stopping them each
284 and removing them from their data center.
285
286 :param instance_uuid: the uuid of the service instance to be stopped
287 """
288 LOG.info("Stopping service %r" % self.uuid)
289 # get relevant information
290 # instance_uuid = str(self.uuid.uuid4())
291 vnf_instances = self.instances[instance_uuid]["vnf_instances"]
292
293 for v in vnf_instances:
294 self._stop_vnfi(v)
295
296 if not GK_STANDALONE_MODE:
297 # remove placement?
298 # self._remove_placement(RoundRobinPlacement)
299 None
300
301 # last step: remove the instance from the list of all instances
302 del self.instances[instance_uuid]
303
304 def _start_vnfd(self, vnfd):
305 """
306 Start a single VNFD of this service
307 :param vnfd: vnfd descriptor dict
308 :return:
309 """
310 # iterate over all deployment units within each VNFDs
311 for u in vnfd.get("virtual_deployment_units"):
312 # 1. get the name of the docker image to start and the assigned DC
313 vnf_name = vnfd.get("name")
314 if vnf_name not in self.remote_docker_image_urls:
315 raise Exception("No image name for %r found. Abort." % vnf_name)
316 docker_name = self.remote_docker_image_urls.get(vnf_name)
317 target_dc = vnfd.get("dc")
318 # 2. perform some checks to ensure we can start the container
319 assert(docker_name is not None)
320 assert(target_dc is not None)
321 if not self._check_docker_image_exists(docker_name):
322 raise Exception("Docker image %r not found. Abort." % docker_name)
323
324 # 3. get the resource limits
325 res_req = u.get("resource_requirements")
326 cpu_list = res_req.get("cpu").get("cores")
327 if not cpu_list or len(cpu_list)==0:
328 cpu_list="1"
329 cpu_bw = res_req.get("cpu").get("cpu_bw")
330 if not cpu_bw:
331 cpu_bw=1
332 mem_num = str(res_req.get("memory").get("size"))
333 if len(mem_num)==0:
334 mem_num="2"
335 mem_unit = str(res_req.get("memory").get("size_unit"))
336 if str(mem_unit)==0:
337 mem_unit="GB"
338 mem_limit = float(mem_num)
339 if mem_unit=="GB":
340 mem_limit=mem_limit*1024*1024*1024
341 elif mem_unit=="MB":
342 mem_limit=mem_limit*1024*1024
343 elif mem_unit=="KB":
344 mem_limit=mem_limit*1024
345 mem_lim = int(mem_limit)
346 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
347
348 # 4. do the dc.startCompute(name="foobar") call to run the container
349 # TODO consider flavors, and other annotations
350 intfs = vnfd.get("connection_points")
351
352 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
353 # use the vnf_id in the nsd as docker name
354 # so deployed containers can be easily mapped back to the nsd
355 vnf_name2id = defaultdict(lambda: "NotExistingNode",
356 reduce(lambda x, y: dict(x, **y),
357 map(lambda d: {d["vnf_name"]: d["vnf_id"]},
358 self.nsd["network_functions"])))
359 self.vnf_name2docker_name[vnf_name] = vnf_name2id[vnf_name]
360 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
361
362 LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
363 LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
364 vnfi = target_dc.startCompute(self.vnf_name2docker_name[vnf_name], network=intfs, image=docker_name, flavor_name="small",
365 cpu_quota=cpu_quota, cpu_period=cpu_period, cpuset=cpu_list, mem_limit=mem_lim)
366 return vnfi
367
368 def _stop_vnfi(self, vnfi):
369 """
370 Stop a VNF instance.
371
372 :param vnfi: vnf instance to be stopped
373 """
374 # Find the correct datacenter
375 status = vnfi.getStatus()
376 dc = vnfi.datacenter
377 # stop the vnfi
378 LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc))
379 dc.stopCompute(status["name"])
380
381 def _get_vnf_instance(self, instance_uuid, name):
382 """
383 Returns the Docker object for the given VNF name (or Docker name).
384 :param instance_uuid: UUID of the service instance to search in.
385 :param name: VNF name or Docker name. We are fuzzy here.
386 :return:
387 """
388 dn = name
389 if name in self.vnf_name2docker_name:
390 dn = self.vnf_name2docker_name[name]
391 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
392 if vnfi.name == dn:
393 return vnfi
394 LOG.warning("No container with name: %r found.")
395 return None
396
397 @staticmethod
398 def _vnf_reconfigure_network(vnfi, if_name, net_str):
399 """
400 Reconfigure the network configuration of a specific interface
401 of a running container.
402 :param vnfi: container instacne
403 :param if_name: interface name
404 :param net_str: network configuration string, e.g., 1.2.3.4/24
405 :return:
406 """
407 intf = vnfi.intf(intf=if_name)
408 if intf is not None:
409 intf.setIP(net_str)
410 LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
411 else:
412 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
413
414
415 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
416 for vnfi in vnfi_list:
417 config = vnfi.dcinfo.get("Config", dict())
418 env = config.get("Env", list())
419 for env_var in env:
420 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
421 LOG.debug("%r = %r" % (var , cmd))
422 if var=="SON_EMU_CMD":
423 LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
424 # execute command in new thread to ensure that GK is not blocked by VNF
425 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
426 t.daemon = True
427 t.start()
428
429 def _unpack_service_package(self):
430 """
431 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
432 """
433 LOG.info("Unzipping: %r" % self.package_file_path)
434 with zipfile.ZipFile(self.package_file_path, "r") as z:
435 z.extractall(self.package_content_path)
436
437
438 def _load_package_descriptor(self):
439 """
440 Load the main package descriptor YAML and keep it as dict.
441 :return:
442 """
443 self.manifest = load_yaml(
444 os.path.join(
445 self.package_content_path, "META-INF/MANIFEST.MF"))
446
447 def _load_nsd(self):
448 """
449 Load the entry NSD YAML and keep it as dict.
450 :return:
451 """
452 if "entry_service_template" in self.manifest:
453 nsd_path = os.path.join(
454 self.package_content_path,
455 make_relative_path(self.manifest.get("entry_service_template")))
456 self.nsd = load_yaml(nsd_path)
457 GK.net.deployed_nsds.append(self.nsd)
458 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
459
460 def _load_vnfd(self):
461 """
462 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
463 :return:
464 """
465 if "package_content" in self.manifest:
466 for pc in self.manifest.get("package_content"):
467 if pc.get("content-type") == "application/sonata.function_descriptor":
468 vnfd_path = os.path.join(
469 self.package_content_path,
470 make_relative_path(pc.get("name")))
471 vnfd = load_yaml(vnfd_path)
472 self.vnfds[vnfd.get("name")] = vnfd
473 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
474
475 def _load_saps(self):
476 # Each Service Access Point (connection_point) in the nsd is getting its own container
477 SAPs = [p["id"] for p in self.nsd["connection_points"] if p["type"] == "interface"]
478 for sap in SAPs:
479 # endpoints needed in this service
480 sap_vnf_id, sap_vnf_interface = sap.split(':')
481 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
482 self.sap_identifiers.add(sap_vnf_id)
483
484 sap_docker_name = "%s_%s" % (sap_vnf_id, sap_vnf_interface)
485
486 # add SAP to self.vnfds
487 sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
488 sap_vnfd = load_yaml(sapfile)
489 sap_vnfd["connection_points"][0]["id"] = sap_vnf_interface
490 sap_vnfd["name"] = sap_docker_name
491 self.vnfds[sap_docker_name] = sap_vnfd
492 # add SAP vnf to list in the NSD so it is deployed later on
493 # each SAP get a unique VNFD and vnf_id in the NSD
494 self.nsd["network_functions"].append({"vnf_id": sap_docker_name, "vnf_name": sap_docker_name})
495 LOG.debug("Loaded SAP: %r" % sap_vnfd.get("name"))
496
497 def _load_docker_files(self):
498 """
499 Get all paths to Dockerfiles from VNFDs and store them in dict.
500 :return:
501 """
502 for k, v in self.vnfds.iteritems():
503 for vu in v.get("virtual_deployment_units"):
504 if vu.get("vm_image_format") == "docker":
505 vm_image = vu.get("vm_image")
506 docker_path = os.path.join(
507 self.package_content_path,
508 make_relative_path(vm_image))
509 self.local_docker_files[k] = docker_path
510 LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
511
512 def _load_docker_urls(self):
513 """
514 Get all URLs to pre-build docker images in some repo.
515 :return:
516 """
517 for k, v in self.vnfds.iteritems():
518 for vu in v.get("virtual_deployment_units"):
519 if vu.get("vm_image_format") == "docker":
520 url = vu.get("vm_image")
521 if url is not None:
522 url = url.replace("http://", "")
523 self.remote_docker_image_urls[k] = url
524 LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
525
526 def _build_images_from_dockerfiles(self):
527 """
528 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
529 """
530 if GK_STANDALONE_MODE:
531 return # do not build anything in standalone mode
532 dc = DockerClient()
533 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
534 for k, v in self.local_docker_files.iteritems():
535 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
536 LOG.debug("DOCKER BUILD: %s" % line)
537 LOG.info("Docker image created: %s" % k)
538
539 def _pull_predefined_dockerimages(self):
540 """
541 If the package contains URLs to pre-build Docker images, we download them with this method.
542 """
543 dc = DockerClient()
544 for url in self.remote_docker_image_urls.itervalues():
545 if not FORCE_PULL: # only pull if not present (speedup for development)
546 if len(dc.images.list(name=url)) > 0:
547 LOG.debug("Image %r present. Skipping pull." % url)
548 continue
549 LOG.info("Pulling image: %r" % url)
550 # this seems to fail with latest docker api version 2.0.2
551 # dc.images.pull(url,
552 # insecure_registry=True)
553 #using docker cli instead
554 cmd = ["docker",
555 "pull",
556 url,
557 ]
558 Popen(cmd).wait()
559
560
561
562
563 def _check_docker_image_exists(self, image_name):
564 """
565 Query the docker service and check if the given image exists
566 :param image_name: name of the docker image
567 :return:
568 """
569 return len(DockerClient().images.list(name=image_name)) > 0
570
571 def _calculate_placement(self, algorithm):
572 """
573 Do placement by adding the a field "dc" to
574 each VNFD that points to one of our
575 data center objects known to the gatekeeper.
576 """
577 assert(len(self.vnfds) > 0)
578 assert(len(GK.dcs) > 0)
579 # instantiate algorithm an place
580 p = algorithm()
581 p.place(self.nsd, self.vnfds, GK.dcs)
582 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
583 # lets print the placement result
584 for name, vnfd in self.vnfds.iteritems():
585 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
586
587 def _calculate_cpu_cfs_values(self, cpu_time_percentage):
588 """
589 Calculate cpu period and quota for CFS
590 :param cpu_time_percentage: percentage of overall CPU to be used
591 :return: cpu_period, cpu_quota
592 """
593 if cpu_time_percentage is None:
594 return -1, -1
595 if cpu_time_percentage < 0:
596 return -1, -1
597 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
598 # Attention minimum cpu_quota is 1ms (micro)
599 cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
600 LOG.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period, cpu_time_percentage))
601 cpu_quota = cpu_period * cpu_time_percentage # calculate the fraction of cpu time for this container
602 # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
603 if cpu_quota < 1000:
604 LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
605 cpu_quota = 1000
606 LOG.warning("Increased CPU quota to avoid system error.")
607 LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period, cpu_quota))
608 return int(cpu_period), int(cpu_quota)
609
610
611 """
612 Some (simple) placement algorithms
613 """
614
615
616 class FirstDcPlacement(object):
617 """
618 Placement: Always use one and the same data center from the GK.dcs dict.
619 """
620 def place(self, nsd, vnfds, dcs):
621 for name, vnfd in vnfds.iteritems():
622 vnfd["dc"] = list(dcs.itervalues())[0]
623
624
625 class RoundRobinDcPlacement(object):
626 """
627 Placement: Distribute VNFs across all available DCs in a round robin fashion.
628 """
629 def place(self, nsd, vnfds, dcs):
630 c = 0
631 dcs_list = list(dcs.itervalues())
632 for name, vnfd in vnfds.iteritems():
633 vnfd["dc"] = dcs_list[c % len(dcs_list)]
634 c += 1 # inc. c to use next DC
635
636
637
638
639 """
640 Resource definitions and API endpoints
641 """
642
643
644 class Packages(fr.Resource):
645
646 def post(self):
647 """
648 Upload a *.son service package to the dummy gatekeeper.
649
650 We expect request with a *.son file and store it in UPLOAD_FOLDER
651 :return: UUID
652 """
653 try:
654 # get file contents
655 print(request.files)
656 # lets search for the package in the request
657 if "package" in request.files:
658 son_file = request.files["package"]
659 # elif "file" in request.files:
660 # son_file = request.files["file"]
661 else:
662 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
663 # generate a uuid to reference this package
664 service_uuid = str(uuid.uuid4())
665 file_hash = hashlib.sha1(str(son_file)).hexdigest()
666 # ensure that upload folder exists
667 ensure_dir(UPLOAD_FOLDER)
668 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
669 # store *.son file to disk
670 son_file.save(upload_path)
671 size = os.path.getsize(upload_path)
672 # create a service object and register it
673 s = Service(service_uuid, file_hash, upload_path)
674 GK.register_service_package(service_uuid, s)
675 # generate the JSON result
676 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201
677 except Exception as ex:
678 LOG.exception("Service package upload failed:")
679 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
680
681 def get(self):
682 """
683 Return a list of UUID's of uploaded service packages.
684 :return: dict/list
685 """
686 LOG.info("GET /packages")
687 return {"service_uuid_list": list(GK.services.iterkeys())}
688
689
690 class Instantiations(fr.Resource):
691
692 def post(self):
693 """
694 Instantiate a service specified by its UUID.
695 Will return a new UUID to identify the running service instance.
696 :return: UUID
697 """
698 # try to extract the service uuid from the request
699 json_data = request.get_json(force=True)
700 service_uuid = json_data.get("service_uuid")
701
702 # lets be a bit fuzzy here to make testing easier
703 if service_uuid is None and len(GK.services) > 0:
704 # if we don't get a service uuid, we simple start the first service in the list
705 service_uuid = list(GK.services.iterkeys())[0]
706
707 if service_uuid in GK.services:
708 # ok, we have a service uuid, lets start the service
709 service_instance_uuid = GK.services.get(service_uuid).start_service()
710 return {"service_instance_uuid": service_instance_uuid}, 201
711 return "Service not found", 404
712
713 def get(self):
714 """
715 Returns a list of UUIDs containing all running services.
716 :return: dict / list
717 """
718 LOG.info("GET /instantiations")
719 return {"service_instantiations_list": [
720 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
721
722 def delete(self):
723 """
724 Stops a running service specified by its service and instance UUID.
725 """
726 # try to extract the service and instance UUID from the request
727 json_data = request.get_json(force=True)
728 service_uuid = json_data.get("service_uuid")
729 instance_uuid = json_data.get("service_instance_uuid")
730
731 # try to be fuzzy
732 if service_uuid is None and len(GK.services) > 0:
733 #if we don't get a service uuid, we simply stop the last service in the list
734 service_uuid = list(GK.services.iterkeys())[0]
735 if instance_uuid is None and len(GK.services[service_uuid].instances) > 0:
736 instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0]
737
738 if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
739 # valid service and instance UUID, stop service
740 GK.services.get(service_uuid).stop_service(instance_uuid)
741 return "service instance with uuid %r stopped." % instance_uuid,200
742 return "Service not found", 404
743
744 class Exit(fr.Resource):
745
746 def put(self):
747 """
748 Stop the running Containernet instance regardless of data transmitted
749 """
750 list(GK.dcs.values())[0].net.stop()
751
752
753 def initialize_GK():
754 global GK
755 GK = Gatekeeper()
756
757
758
759 # create a single, global GK object
760 GK = None
761 initialize_GK()
762 # setup Flask
763 app = Flask(__name__)
764 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
765 api = fr.Api(app)
766 # define endpoints
767 api.add_resource(Packages, '/packages')
768 api.add_resource(Instantiations, '/instantiations')
769 api.add_resource(Exit, '/emulator/exit')
770
771
772 #def initialize_GK():
773 # global GK
774 # GK = Gatekeeper()
775
776
777 def start_rest_api(host, port, datacenters=dict()):
778 GK.dcs = datacenters
779 GK.net = get_dc_network()
780 # start the Flask server (not the best performance but ok for our use case)
781 app.run(host=host,
782 port=port,
783 debug=True,
784 use_reloader=False # this is needed to run Flask in a non-main thread
785 )
786
787
788 def ensure_dir(name):
789 if not os.path.exists(name):
790 os.makedirs(name)
791
792
793 def load_yaml(path):
794 with open(path, "r") as f:
795 try:
796 r = yaml.load(f)
797 except yaml.YAMLError as exc:
798 LOG.exception("YAML parse error")
799 r = dict()
800 return r
801
802
803 def make_relative_path(path):
804 if path.startswith("file://"):
805 path = path.replace("file://", "", 1)
806 if path.startswith("/"):
807 path = path.replace("/", "", 1)
808 return path
809
810
811 def generate_lan_string(prefix, base, subnet_size=24, ip=0):
812 """
813 Helper to generate different network configuration strings.
814 """
815 r = "%s.%d.%d/%d" % (prefix, base, ip, subnet_size)
816 return r
817
818
819 def generate_subnet_strings(n, start=1, subnet_size=24, ip=0):
820 """
821 Helper to generate different network configuration strings.
822 """
823 r = list()
824 for i in range(start, start + n):
825 r.append("%d.0.0.%d/%d" % (i, ip, subnet_size))
826 return r
827
828 def get_dc_network():
829 """
830 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
831 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
832 :return:
833 """
834 assert (len(GK.dcs) > 0)
835 return GK.dcs.values()[0].net
836
837 if __name__ == '__main__':
838 """
839 Lets allow to run the API in standalone mode.
840 """
841 GK_STANDALONE_MODE = True
842 logging.getLogger("werkzeug").setLevel(logging.INFO)
843 start_rest_api("0.0.0.0", 8000)
844