attempt to add a log file directory to every vnf created
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 """
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
30
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
33 """
34
35 import logging
36 import os
37 import uuid
38 import hashlib
39 import zipfile
40 import yaml
41 import threading
42 from docker import DockerClient, APIClient
43 from flask import Flask, request
44 import flask_restful as fr
45 from collections import defaultdict
46 import pkg_resources
47 from subprocess import Popen
48 import tempfile
49
50 logging.basicConfig()
51 LOG = logging.getLogger("sonata-dummy-gatekeeper")
52 LOG.setLevel(logging.DEBUG)
53 logging.getLogger("werkzeug").setLevel(logging.WARNING)
54
55 GK_STORAGE = "/tmp/son-dummy-gk/"
56 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
57 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
58
59 # Enable Dockerfile build functionality
60 BUILD_DOCKERFILE = False
61
62 # flag to indicate that we run without the emulator (only the bare API for integration testing)
63 GK_STANDALONE_MODE = False
64
65 # should a new version of an image be pulled even if its available
66 FORCE_PULL = False
67
68 # Automatically deploy SAPs (endpoints) of the service as new containers
69 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
70 DEPLOY_SAP = False
71
72 # flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
73 BIDIRECTIONAL_CHAIN = False
74
75 class Gatekeeper(object):
76
77 def __init__(self):
78 self.services = dict()
79 self.dcs = dict()
80 self.net = None
81 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
82 LOG.info("Create SONATA dummy gatekeeper.")
83
84 def register_service_package(self, service_uuid, service):
85 """
86 register new service package
87 :param service_uuid
88 :param service object
89 """
90 self.services[service_uuid] = service
91 # lets perform all steps needed to onboard the service
92 service.onboard()
93
94 def get_next_vnf_name(self):
95 self.vnf_counter += 1
96 return "vnf%d" % self.vnf_counter
97
98
99 class Service(object):
100 """
101 This class represents a NS uploaded as a *.son package to the
102 dummy gatekeeper.
103 Can have multiple running instances of this service.
104 """
105
106 def __init__(self,
107 service_uuid,
108 package_file_hash,
109 package_file_path):
110 self.uuid = service_uuid
111 self.package_file_hash = package_file_hash
112 self.package_file_path = package_file_path
113 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
114 self.manifest = None
115 self.nsd = None
116 self.vnfds = dict()
117 self.local_docker_files = dict()
118 self.remote_docker_image_urls = dict()
119 self.instances = dict()
120 self.vnf_name2docker_name = dict()
121 self.sap_identifiers = set()
122 # lets generate a set of subnet configurations used for e-line chaining setup
123 self.eline_subnets_src = generate_subnet_strings(50, start=200, subnet_size=24, ip=1)
124 self.eline_subnets_dst = generate_subnet_strings(50, start=200, subnet_size=24, ip=2)
125
126 def onboard(self):
127 """
128 Do all steps to prepare this service to be instantiated
129 :return:
130 """
131 # 1. extract the contents of the package and store them in our catalog
132 self._unpack_service_package()
133 # 2. read in all descriptor files
134 self._load_package_descriptor()
135 self._load_nsd()
136 self._load_vnfd()
137 if DEPLOY_SAP:
138 self._load_saps()
139 # 3. prepare container images (e.g. download or build Dockerfile)
140 if BUILD_DOCKERFILE:
141 self._load_docker_files()
142 self._build_images_from_dockerfiles()
143 else:
144 self._load_docker_urls()
145 self._pull_predefined_dockerimages()
146 LOG.info("On-boarded service: %r" % self.manifest.get("name"))
147
148 def start_service(self):
149 """
150 This methods creates and starts a new service instance.
151 It computes placements, iterates over all VNFDs, and starts
152 each VNFD as a Docker container in the data center selected
153 by the placement algorithm.
154 :return:
155 """
156 LOG.info("Starting service %r" % self.uuid)
157
158 # 1. each service instance gets a new uuid to identify it
159 instance_uuid = str(uuid.uuid4())
160 # build a instances dict (a bit like a NSR :))
161 self.instances[instance_uuid] = dict()
162 self.instances[instance_uuid]["vnf_instances"] = list()
163
164 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
165 vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
166 reduce(lambda x, y: dict(x, **y),
167 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
168 self.nsd["network_functions"])))
169
170 # 3. compute placement of this service instance (adds DC names to VNFDs)
171 if not GK_STANDALONE_MODE:
172 #self._calculate_placement(FirstDcPlacement)
173 self._calculate_placement(RoundRobinDcPlacement)
174 # iterate over all vnfds that we have to start
175 for vnfd in self.vnfds.itervalues():
176 vnfi = None
177 if not GK_STANDALONE_MODE:
178 vnfi = self._start_vnfd(vnfd)
179 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
180
181 if "virtual_links" in self.nsd:
182 vlinks = self.nsd["virtual_links"]
183 fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
184 eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
185 elan_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-LAN")]
186
187 GK.net.deployed_elines.extend(eline_fwd_links)
188 GK.net.deployed_elans.extend(elan_fwd_links)
189
190 # 4a. deploy E-Line links
191 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
192 # eg. different services get a unique cookie for their flowrules
193 cookie = 1
194 for link in eline_fwd_links:
195 src_id, src_if_name = link["connection_points_reference"][0].split(":")
196 dst_id, dst_if_name = link["connection_points_reference"][1].split(":")
197
198 # check if there is a SAP in the link
199 if src_id in self.sap_identifiers:
200 src_docker_name = "{0}_{1}".format(src_id, src_if_name)
201 src_id = src_docker_name
202 else:
203 src_docker_name = src_id
204
205 if dst_id in self.sap_identifiers:
206 dst_docker_name = "{0}_{1}".format(dst_id, dst_if_name)
207 dst_id = dst_docker_name
208 else:
209 dst_docker_name = dst_id
210
211 src_name = vnf_id2vnf_name[src_id]
212 dst_name = vnf_id2vnf_name[dst_id]
213
214 LOG.debug(
215 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
216 src_name, src_id, src_if_name, dst_name, dst_id, dst_if_name))
217
218 if (src_name in self.vnfds) and (dst_name in self.vnfds):
219 network = self.vnfds[src_name].get("dc").net # there should be a cleaner way to find the DCNetwork
220 LOG.debug(src_docker_name)
221 ret = network.setChain(
222 src_docker_name, dst_docker_name,
223 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
224 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
225
226 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
227 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
228 if src_vnfi is not None:
229 self._vnf_reconfigure_network(src_vnfi, src_if_name, self.eline_subnets_src.pop(0))
230 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
231 if dst_vnfi is not None:
232 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, self.eline_subnets_dst.pop(0))
233
234 # 4b. deploy E-LAN links
235 base = 10
236 for link in elan_fwd_links:
237
238 elan_vnf_list=[]
239
240 # generate lan ip address
241 ip = 1
242 for intf in link["connection_points_reference"]:
243 ip_address = generate_lan_string("10.0", base, subnet_size=24, ip=ip)
244 vnf_id, intf_name = intf.split(":")
245 if vnf_id in self.sap_identifiers:
246 src_docker_name = "{0}_{1}".format(vnf_id, intf_name)
247 vnf_id = src_docker_name
248 else:
249 src_docker_name = vnf_id
250 vnf_name = vnf_id2vnf_name[vnf_id]
251 LOG.debug(
252 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
253 vnf_name, vnf_id, intf_name, ip_address))
254
255 if vnf_name in self.vnfds:
256 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
257 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
258 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
259 vnfi = self._get_vnf_instance(instance_uuid, vnf_name)
260 if vnfi is not None:
261 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
262 # increase for the next ip address on this E-LAN
263 ip += 1
264
265 # add this vnf and interface to the E-LAN for tagging
266 network = self.vnfds[vnf_name].get("dc").net # there should be a cleaner way to find the DCNetwork
267 elan_vnf_list.append({'name':src_docker_name,'interface':intf_name})
268
269
270 # install the VLAN tags for this E-LAN
271 network.setLAN(elan_vnf_list)
272 # increase the base ip address for the next E-LAN
273 base += 1
274
275 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
276 self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
277
278 LOG.info("Service started. Instance id: %r" % instance_uuid)
279 return instance_uuid
280
281 def stop_service(self, instance_uuid):
282 """
283 This method stops a running service instance.
284 It iterates over all VNF instances, stopping them each
285 and removing them from their data center.
286
287 :param instance_uuid: the uuid of the service instance to be stopped
288 """
289 LOG.info("Stopping service %r" % self.uuid)
290 # get relevant information
291 # instance_uuid = str(self.uuid.uuid4())
292 vnf_instances = self.instances[instance_uuid]["vnf_instances"]
293
294 for v in vnf_instances:
295 self._stop_vnfi(v)
296
297 if not GK_STANDALONE_MODE:
298 # remove placement?
299 # self._remove_placement(RoundRobinPlacement)
300 None
301
302 # last step: remove the instance from the list of all instances
303 del self.instances[instance_uuid]
304
305 def _start_vnfd(self, vnfd):
306 """
307 Start a single VNFD of this service
308 :param vnfd: vnfd descriptor dict
309 :return:
310 """
311 # iterate over all deployment units within each VNFDs
312 for u in vnfd.get("virtual_deployment_units"):
313 # 1. get the name of the docker image to start and the assigned DC
314 vnf_name = vnfd.get("name")
315 if vnf_name not in self.remote_docker_image_urls:
316 raise Exception("No image name for %r found. Abort." % vnf_name)
317 docker_name = self.remote_docker_image_urls.get(vnf_name)
318 target_dc = vnfd.get("dc")
319 # 2. perform some checks to ensure we can start the container
320 assert(docker_name is not None)
321 assert(target_dc is not None)
322 if not self._check_docker_image_exists(docker_name):
323 raise Exception("Docker image %r not found. Abort." % docker_name)
324
325 # 3. get the resource limits
326 res_req = u.get("resource_requirements")
327 cpu_list = res_req.get("cpu").get("cores")
328 if not cpu_list or len(cpu_list)==0:
329 cpu_list="1"
330 cpu_bw = res_req.get("cpu").get("cpu_bw")
331 if not cpu_bw:
332 cpu_bw=1
333 mem_num = str(res_req.get("memory").get("size"))
334 if len(mem_num)==0:
335 mem_num="2"
336 mem_unit = str(res_req.get("memory").get("size_unit"))
337 if str(mem_unit)==0:
338 mem_unit="GB"
339 mem_limit = float(mem_num)
340 if mem_unit=="GB":
341 mem_limit=mem_limit*1024*1024*1024
342 elif mem_unit=="MB":
343 mem_limit=mem_limit*1024*1024
344 elif mem_unit=="KB":
345 mem_limit=mem_limit*1024
346 mem_lim = int(mem_limit)
347 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
348
349 # 4. generate the volume paths for the docker container
350 volumes=list()
351 # a volume to extract log files
352 #tempfile.mkdtemp(dir="/tmp/results/%s/%s"%(self.uuid,vnf_name))
353 docker_log_path = "/tmp/results/%s/%s"%(self.uuid,vnf_name)
354 LOG.debug("LOG path for vnf %s is %s."%(vnf_name,docker_log_path))
355 #docker_log_path = tempfile.mkdtemp(dir=docker_log_path)
356 if not os.path.exists(docker_log_path):
357 os.makedirs(docker_log_path)
358 with open(docker_log_path+"/testfile", "w") as tf:
359 tf.write("placeholder")
360 tf.close()
361
362 volumes.append(docker_log_path+":/mnt/share/")
363
364
365 # 5. do the dc.startCompute(name="foobar") call to run the container
366 # TODO consider flavors, and other annotations
367 intfs = vnfd.get("connection_points")
368
369 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
370 # use the vnf_id in the nsd as docker name
371 # so deployed containers can be easily mapped back to the nsd
372 vnf_name2id = defaultdict(lambda: "NotExistingNode",
373 reduce(lambda x, y: dict(x, **y),
374 map(lambda d: {d["vnf_name"]: d["vnf_id"]},
375 self.nsd["network_functions"])))
376 self.vnf_name2docker_name[vnf_name] = vnf_name2id[vnf_name]
377 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
378
379 LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
380 LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
381 vnfi = target_dc.startCompute(
382 self.vnf_name2docker_name[vnf_name],
383 network=intfs,
384 image=docker_name,
385 flavor_name="small",
386 cpu_quota=cpu_quota,
387 cpu_period=cpu_period,
388 cpuset=cpu_list,
389 mem_limit=mem_lim,
390 volumes=volumes)
391 return vnfi
392
393 def _stop_vnfi(self, vnfi):
394 """
395 Stop a VNF instance.
396
397 :param vnfi: vnf instance to be stopped
398 """
399 # Find the correct datacenter
400 status = vnfi.getStatus()
401 dc = vnfi.datacenter
402 # stop the vnfi
403 LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc))
404 dc.stopCompute(status["name"])
405
406 def _get_vnf_instance(self, instance_uuid, name):
407 """
408 Returns the Docker object for the given VNF name (or Docker name).
409 :param instance_uuid: UUID of the service instance to search in.
410 :param name: VNF name or Docker name. We are fuzzy here.
411 :return:
412 """
413 dn = name
414 if name in self.vnf_name2docker_name:
415 dn = self.vnf_name2docker_name[name]
416 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
417 if vnfi.name == dn:
418 return vnfi
419 LOG.warning("No container with name: %r found.")
420 return None
421
422 @staticmethod
423 def _vnf_reconfigure_network(vnfi, if_name, net_str):
424 """
425 Reconfigure the network configuration of a specific interface
426 of a running container.
427 :param vnfi: container instacne
428 :param if_name: interface name
429 :param net_str: network configuration string, e.g., 1.2.3.4/24
430 :return:
431 """
432 intf = vnfi.intf(intf=if_name)
433 if intf is not None:
434 intf.setIP(net_str)
435 LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
436 else:
437 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
438
439
440 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
441 for vnfi in vnfi_list:
442 config = vnfi.dcinfo.get("Config", dict())
443 env = config.get("Env", list())
444 for env_var in env:
445 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
446 LOG.debug("%r = %r" % (var , cmd))
447 if var=="SON_EMU_CMD":
448 LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
449 # execute command in new thread to ensure that GK is not blocked by VNF
450 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
451 t.daemon = True
452 t.start()
453
454 def _unpack_service_package(self):
455 """
456 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
457 """
458 LOG.info("Unzipping: %r" % self.package_file_path)
459 with zipfile.ZipFile(self.package_file_path, "r") as z:
460 z.extractall(self.package_content_path)
461
462
463 def _load_package_descriptor(self):
464 """
465 Load the main package descriptor YAML and keep it as dict.
466 :return:
467 """
468 self.manifest = load_yaml(
469 os.path.join(
470 self.package_content_path, "META-INF/MANIFEST.MF"))
471
472 def _load_nsd(self):
473 """
474 Load the entry NSD YAML and keep it as dict.
475 :return:
476 """
477 if "entry_service_template" in self.manifest:
478 nsd_path = os.path.join(
479 self.package_content_path,
480 make_relative_path(self.manifest.get("entry_service_template")))
481 self.nsd = load_yaml(nsd_path)
482 GK.net.deployed_nsds.append(self.nsd)
483 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
484
485 def _load_vnfd(self):
486 """
487 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
488 :return:
489 """
490 if "package_content" in self.manifest:
491 for pc in self.manifest.get("package_content"):
492 if pc.get("content-type") == "application/sonata.function_descriptor":
493 vnfd_path = os.path.join(
494 self.package_content_path,
495 make_relative_path(pc.get("name")))
496 vnfd = load_yaml(vnfd_path)
497 self.vnfds[vnfd.get("name")] = vnfd
498 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
499
500 def _load_saps(self):
501 # Each Service Access Point (connection_point) in the nsd is getting its own container
502 SAPs = [p["id"] for p in self.nsd["connection_points"] if p["type"] == "interface"]
503 for sap in SAPs:
504 # endpoints needed in this service
505 sap_vnf_id, sap_vnf_interface = sap.split(':')
506 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
507 self.sap_identifiers.add(sap_vnf_id)
508
509 sap_docker_name = "%s_%s" % (sap_vnf_id, sap_vnf_interface)
510
511 # add SAP to self.vnfds
512 sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
513 sap_vnfd = load_yaml(sapfile)
514 sap_vnfd["connection_points"][0]["id"] = sap_vnf_interface
515 sap_vnfd["name"] = sap_docker_name
516 self.vnfds[sap_docker_name] = sap_vnfd
517 # add SAP vnf to list in the NSD so it is deployed later on
518 # each SAP get a unique VNFD and vnf_id in the NSD
519 self.nsd["network_functions"].append({"vnf_id": sap_docker_name, "vnf_name": sap_docker_name})
520 LOG.debug("Loaded SAP: %r" % sap_vnfd.get("name"))
521
522 def _load_docker_files(self):
523 """
524 Get all paths to Dockerfiles from VNFDs and store them in dict.
525 :return:
526 """
527 for k, v in self.vnfds.iteritems():
528 for vu in v.get("virtual_deployment_units"):
529 if vu.get("vm_image_format") == "docker":
530 vm_image = vu.get("vm_image")
531 docker_path = os.path.join(
532 self.package_content_path,
533 make_relative_path(vm_image))
534 self.local_docker_files[k] = docker_path
535 LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
536
537 def _load_docker_urls(self):
538 """
539 Get all URLs to pre-build docker images in some repo.
540 :return:
541 """
542 for k, v in self.vnfds.iteritems():
543 for vu in v.get("virtual_deployment_units"):
544 if vu.get("vm_image_format") == "docker":
545 url = vu.get("vm_image")
546 if url is not None:
547 url = url.replace("http://", "")
548 self.remote_docker_image_urls[k] = url
549 LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
550
551 def _build_images_from_dockerfiles(self):
552 """
553 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
554 """
555 if GK_STANDALONE_MODE:
556 return # do not build anything in standalone mode
557 dc = DockerClient()
558 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
559 for k, v in self.local_docker_files.iteritems():
560 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
561 LOG.debug("DOCKER BUILD: %s" % line)
562 LOG.info("Docker image created: %s" % k)
563
564 def _pull_predefined_dockerimages(self):
565 """
566 If the package contains URLs to pre-build Docker images, we download them with this method.
567 """
568 dc = DockerClient()
569 for url in self.remote_docker_image_urls.itervalues():
570 if not FORCE_PULL: # only pull if not present (speedup for development)
571 if len(dc.images.list(name=url)) > 0:
572 LOG.debug("Image %r present. Skipping pull." % url)
573 continue
574 LOG.info("Pulling image: %r" % url)
575 # this seems to fail with latest docker api version 2.0.2
576 # dc.images.pull(url,
577 # insecure_registry=True)
578 #using docker cli instead
579 cmd = ["docker",
580 "pull",
581 url,
582 ]
583 Popen(cmd).wait()
584
585
586
587
588 def _check_docker_image_exists(self, image_name):
589 """
590 Query the docker service and check if the given image exists
591 :param image_name: name of the docker image
592 :return:
593 """
594 return len(DockerClient().images.list(name=image_name)) > 0
595
596 def _calculate_placement(self, algorithm):
597 """
598 Do placement by adding the a field "dc" to
599 each VNFD that points to one of our
600 data center objects known to the gatekeeper.
601 """
602 assert(len(self.vnfds) > 0)
603 assert(len(GK.dcs) > 0)
604 # instantiate algorithm an place
605 p = algorithm()
606 p.place(self.nsd, self.vnfds, GK.dcs)
607 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
608 # lets print the placement result
609 for name, vnfd in self.vnfds.iteritems():
610 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
611
612 def _calculate_cpu_cfs_values(self, cpu_time_percentage):
613 """
614 Calculate cpu period and quota for CFS
615 :param cpu_time_percentage: percentage of overall CPU to be used
616 :return: cpu_period, cpu_quota
617 """
618 if cpu_time_percentage is None:
619 return -1, -1
620 if cpu_time_percentage < 0:
621 return -1, -1
622 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
623 # Attention minimum cpu_quota is 1ms (micro)
624 cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
625 LOG.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period, cpu_time_percentage))
626 cpu_quota = cpu_period * cpu_time_percentage # calculate the fraction of cpu time for this container
627 # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
628 if cpu_quota < 1000:
629 LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
630 cpu_quota = 1000
631 LOG.warning("Increased CPU quota to avoid system error.")
632 LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period, cpu_quota))
633 return int(cpu_period), int(cpu_quota)
634
635
636 """
637 Some (simple) placement algorithms
638 """
639
640
641 class FirstDcPlacement(object):
642 """
643 Placement: Always use one and the same data center from the GK.dcs dict.
644 """
645 def place(self, nsd, vnfds, dcs):
646 for name, vnfd in vnfds.iteritems():
647 vnfd["dc"] = list(dcs.itervalues())[0]
648
649
650 class RoundRobinDcPlacement(object):
651 """
652 Placement: Distribute VNFs across all available DCs in a round robin fashion.
653 """
654 def place(self, nsd, vnfds, dcs):
655 c = 0
656 dcs_list = list(dcs.itervalues())
657 for name, vnfd in vnfds.iteritems():
658 vnfd["dc"] = dcs_list[c % len(dcs_list)]
659 c += 1 # inc. c to use next DC
660
661
662
663
664 """
665 Resource definitions and API endpoints
666 """
667
668
669 class Packages(fr.Resource):
670
671 def post(self):
672 """
673 Upload a *.son service package to the dummy gatekeeper.
674
675 We expect request with a *.son file and store it in UPLOAD_FOLDER
676 :return: UUID
677 """
678 try:
679 # get file contents
680 LOG.info("POST /packages called")
681 # lets search for the package in the request
682 is_file_object = False # make API more robust: file can be in data or in files field
683 if "package" in request.files:
684 son_file = request.files["package"]
685 is_file_object = True
686 elif len(request.data) > 0:
687 son_file = request.data
688 else:
689 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
690 # generate a uuid to reference this package
691 service_uuid = str(uuid.uuid4())
692 file_hash = hashlib.sha1(str(son_file)).hexdigest()
693 # ensure that upload folder exists
694 ensure_dir(UPLOAD_FOLDER)
695 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
696 # store *.son file to disk
697 if is_file_object:
698 son_file.save(upload_path)
699 else:
700 with open(upload_path, 'wb') as f:
701 f.write(son_file)
702 size = os.path.getsize(upload_path)
703 # create a service object and register it
704 s = Service(service_uuid, file_hash, upload_path)
705 GK.register_service_package(service_uuid, s)
706 # generate the JSON result
707 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201
708 except Exception as ex:
709 LOG.exception("Service package upload failed:")
710 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
711
712 def get(self):
713 """
714 Return a list of UUID's of uploaded service packages.
715 :return: dict/list
716 """
717 LOG.info("GET /packages")
718 return {"service_uuid_list": list(GK.services.iterkeys())}
719
720
721 class Instantiations(fr.Resource):
722
723 def post(self):
724 """
725 Instantiate a service specified by its UUID.
726 Will return a new UUID to identify the running service instance.
727 :return: UUID
728 """
729 LOG.info("POST /instantiations (or /reqeusts) called")
730 # try to extract the service uuid from the request
731 json_data = request.get_json(force=True)
732 service_uuid = json_data.get("service_uuid")
733
734 # lets be a bit fuzzy here to make testing easier
735 if (service_uuid is None or service_uuid=="latest") and len(GK.services) > 0:
736 # if we don't get a service uuid, we simple start the first service in the list
737 service_uuid = list(GK.services.iterkeys())[0]
738 if service_uuid in GK.services:
739 # ok, we have a service uuid, lets start the service
740 service_instance_uuid = GK.services.get(service_uuid).start_service()
741 return {"service_instance_uuid": service_instance_uuid}, 201
742 return "Service not found", 404
743
744 def get(self):
745 """
746 Returns a list of UUIDs containing all running services.
747 :return: dict / list
748 """
749 LOG.info("GET /instantiations")
750 return {"service_instantiations_list": [
751 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
752
753 def delete(self):
754 """
755 Stops a running service specified by its service and instance UUID.
756 """
757 # try to extract the service and instance UUID from the request
758 json_data = request.get_json(force=True)
759 service_uuid = json_data.get("service_uuid")
760 instance_uuid = json_data.get("service_instance_uuid")
761
762 # try to be fuzzy
763 if service_uuid is None and len(GK.services) > 0:
764 #if we don't get a service uuid, we simply stop the last service in the list
765 service_uuid = list(GK.services.iterkeys())[0]
766 if instance_uuid is None and len(GK.services[service_uuid].instances) > 0:
767 instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0]
768
769 if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
770 # valid service and instance UUID, stop service
771 GK.services.get(service_uuid).stop_service(instance_uuid)
772 return "service instance with uuid %r stopped." % instance_uuid,200
773 return "Service not found", 404
774
775 class Exit(fr.Resource):
776
777 def put(self):
778 """
779 Stop the running Containernet instance regardless of data transmitted
780 """
781 list(GK.dcs.values())[0].net.stop()
782
783
784 def initialize_GK():
785 global GK
786 GK = Gatekeeper()
787
788
789
790 # create a single, global GK object
791 GK = None
792 initialize_GK()
793 # setup Flask
794 app = Flask(__name__)
795 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
796 api = fr.Api(app)
797 # define endpoints
798 api.add_resource(Packages, '/packages', '/api/v2/packages')
799 api.add_resource(Instantiations, '/instantiations', '/api/v2/instantiations', '/api/v2/requests')
800 api.add_resource(Exit, '/emulator/exit')
801
802
803 #def initialize_GK():
804 # global GK
805 # GK = Gatekeeper()
806
807
808 def start_rest_api(host, port, datacenters=dict()):
809 GK.dcs = datacenters
810 GK.net = get_dc_network()
811 # start the Flask server (not the best performance but ok for our use case)
812 app.run(host=host,
813 port=port,
814 debug=True,
815 use_reloader=False # this is needed to run Flask in a non-main thread
816 )
817
818
819 def ensure_dir(name):
820 if not os.path.exists(name):
821 os.makedirs(name)
822
823
824 def load_yaml(path):
825 with open(path, "r") as f:
826 try:
827 r = yaml.load(f)
828 except yaml.YAMLError as exc:
829 LOG.exception("YAML parse error")
830 r = dict()
831 return r
832
833
834 def make_relative_path(path):
835 if path.startswith("file://"):
836 path = path.replace("file://", "", 1)
837 if path.startswith("/"):
838 path = path.replace("/", "", 1)
839 return path
840
841
842 def generate_lan_string(prefix, base, subnet_size=24, ip=0):
843 """
844 Helper to generate different network configuration strings.
845 """
846 r = "%s.%d.%d/%d" % (prefix, base, ip, subnet_size)
847 return r
848
849
850 def generate_subnet_strings(n, start=1, subnet_size=24, ip=0):
851 """
852 Helper to generate different network configuration strings.
853 """
854 r = list()
855 for i in range(start, start + n):
856 r.append("%d.0.0.%d/%d" % (i, ip, subnet_size))
857 return r
858
859 def get_dc_network():
860 """
861 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
862 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
863 :return:
864 """
865 assert (len(GK.dcs) > 0)
866 return GK.dcs.values()[0].net
867
868 if __name__ == '__main__':
869 """
870 Lets allow to run the API in standalone mode.
871 """
872 GK_STANDALONE_MODE = True
873 logging.getLogger("werkzeug").setLevel(logging.INFO)
874 start_rest_api("0.0.0.0", 8000)
875