Merge branch 'master' of https://github.com/sonata-nfv/son-emu
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 """
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
30
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
33 """
34
35 import logging
36 import os
37 import uuid
38 import hashlib
39 import zipfile
40 import yaml
41 import threading
42 from docker import DockerClient, APIClient
43 from flask import Flask, request
44 import flask_restful as fr
45 from collections import defaultdict
46 import pkg_resources
47 from subprocess import Popen
48
49 logging.basicConfig()
50 LOG = logging.getLogger("sonata-dummy-gatekeeper")
51 LOG.setLevel(logging.DEBUG)
52 logging.getLogger("werkzeug").setLevel(logging.WARNING)
53
54 GK_STORAGE = "/tmp/son-dummy-gk/"
55 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
56 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
57
58 # Enable Dockerfile build functionality
59 BUILD_DOCKERFILE = False
60
61 # flag to indicate that we run without the emulator (only the bare API for integration testing)
62 GK_STANDALONE_MODE = False
63
64 # should a new version of an image be pulled even if its available
65 FORCE_PULL = False
66
67 # Automatically deploy SAPs (endpoints) of the service as new containers
68 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
69 DEPLOY_SAP = False
70
71 # flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
72 BIDIRECTIONAL_CHAIN = False
73
74 class Gatekeeper(object):
75
76 def __init__(self):
77 self.services = dict()
78 self.dcs = dict()
79 self.net = None
80 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
81 LOG.info("Create SONATA dummy gatekeeper.")
82
83 def register_service_package(self, service_uuid, service):
84 """
85 register new service package
86 :param service_uuid
87 :param service object
88 """
89 self.services[service_uuid] = service
90 # lets perform all steps needed to onboard the service
91 service.onboard()
92
93 def get_next_vnf_name(self):
94 self.vnf_counter += 1
95 return "vnf%d" % self.vnf_counter
96
97
98 class Service(object):
99 """
100 This class represents a NS uploaded as a *.son package to the
101 dummy gatekeeper.
102 Can have multiple running instances of this service.
103 """
104
105 def __init__(self,
106 service_uuid,
107 package_file_hash,
108 package_file_path):
109 self.uuid = service_uuid
110 self.package_file_hash = package_file_hash
111 self.package_file_path = package_file_path
112 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
113 self.manifest = None
114 self.nsd = None
115 self.vnfds = dict()
116 self.local_docker_files = dict()
117 self.remote_docker_image_urls = dict()
118 self.instances = dict()
119 self.vnf_name2docker_name = dict()
120 self.sap_identifiers = set()
121 # lets generate a set of subnet configurations used for e-line chaining setup
122 self.eline_subnets_src = generate_subnet_strings(50, start=200, subnet_size=24, ip=1)
123 self.eline_subnets_dst = generate_subnet_strings(50, start=200, subnet_size=24, ip=2)
124
125 def onboard(self):
126 """
127 Do all steps to prepare this service to be instantiated
128 :return:
129 """
130 # 1. extract the contents of the package and store them in our catalog
131 self._unpack_service_package()
132 # 2. read in all descriptor files
133 self._load_package_descriptor()
134 self._load_nsd()
135 self._load_vnfd()
136 if DEPLOY_SAP:
137 self._load_saps()
138 # 3. prepare container images (e.g. download or build Dockerfile)
139 if BUILD_DOCKERFILE:
140 self._load_docker_files()
141 self._build_images_from_dockerfiles()
142 else:
143 self._load_docker_urls()
144 self._pull_predefined_dockerimages()
145 LOG.info("On-boarded service: %r" % self.manifest.get("name"))
146
147 def start_service(self):
148 """
149 This methods creates and starts a new service instance.
150 It computes placements, iterates over all VNFDs, and starts
151 each VNFD as a Docker container in the data center selected
152 by the placement algorithm.
153 :return:
154 """
155 LOG.info("Starting service %r" % self.uuid)
156
157 # 1. each service instance gets a new uuid to identify it
158 instance_uuid = str(uuid.uuid4())
159 # build a instances dict (a bit like a NSR :))
160 self.instances[instance_uuid] = dict()
161 self.instances[instance_uuid]["vnf_instances"] = list()
162
163 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
164 vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
165 reduce(lambda x, y: dict(x, **y),
166 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
167 self.nsd["network_functions"])))
168
169 # 3. compute placement of this service instance (adds DC names to VNFDs)
170 if not GK_STANDALONE_MODE:
171 #self._calculate_placement(FirstDcPlacement)
172 self._calculate_placement(RoundRobinDcPlacement)
173 # iterate over all vnfds that we have to start
174 for vnfd in self.vnfds.itervalues():
175 vnfi = None
176 if not GK_STANDALONE_MODE:
177 vnfi = self._start_vnfd(vnfd)
178 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
179
180 if "virtual_links" in self.nsd:
181 vlinks = self.nsd["virtual_links"]
182 fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
183 eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
184 elan_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-LAN")]
185
186 GK.net.deployed_elines.extend(eline_fwd_links)
187 GK.net.deployed_elans.extend(elan_fwd_links)
188
189 # 4a. deploy E-Line links
190 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
191 # eg. different services get a unique cookie for their flowrules
192 cookie = 1
193 for link in eline_fwd_links:
194 src_id, src_if_name = link["connection_points_reference"][0].split(":")
195 dst_id, dst_if_name = link["connection_points_reference"][1].split(":")
196
197 # check if there is a SAP in the link
198 if src_id in self.sap_identifiers:
199 src_docker_name = "{0}_{1}".format(src_id, src_if_name)
200 src_id = src_docker_name
201 else:
202 src_docker_name = src_id
203
204 if dst_id in self.sap_identifiers:
205 dst_docker_name = "{0}_{1}".format(dst_id, dst_if_name)
206 dst_id = dst_docker_name
207 else:
208 dst_docker_name = dst_id
209
210 src_name = vnf_id2vnf_name[src_id]
211 dst_name = vnf_id2vnf_name[dst_id]
212
213 LOG.debug(
214 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
215 src_name, src_id, src_if_name, dst_name, dst_id, dst_if_name))
216
217 if (src_name in self.vnfds) and (dst_name in self.vnfds):
218 network = self.vnfds[src_name].get("dc").net # there should be a cleaner way to find the DCNetwork
219 LOG.debug(src_docker_name)
220 ret = network.setChain(
221 src_docker_name, dst_docker_name,
222 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
223 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
224
225 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
226 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
227 if src_vnfi is not None:
228 self._vnf_reconfigure_network(src_vnfi, src_if_name, self.eline_subnets_src.pop(0))
229 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
230 if dst_vnfi is not None:
231 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, self.eline_subnets_dst.pop(0))
232
233 # 4b. deploy E-LAN links
234 base = 10
235 for link in elan_fwd_links:
236
237 elan_vnf_list=[]
238
239 # generate lan ip address
240 ip = 1
241 for intf in link["connection_points_reference"]:
242 ip_address = generate_lan_string("10.0", base, subnet_size=24, ip=ip)
243 vnf_id, intf_name = intf.split(":")
244 if vnf_id in self.sap_identifiers:
245 src_docker_name = "{0}_{1}".format(vnf_id, intf_name)
246 vnf_id = src_docker_name
247 else:
248 src_docker_name = vnf_id
249 vnf_name = vnf_id2vnf_name[vnf_id]
250 LOG.debug(
251 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
252 vnf_name, vnf_id, intf_name, ip_address))
253
254 if vnf_name in self.vnfds:
255 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
256 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
257 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
258 vnfi = self._get_vnf_instance(instance_uuid, vnf_name)
259 if vnfi is not None:
260 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
261 # increase for the next ip address on this E-LAN
262 ip += 1
263
264 # add this vnf and interface to the E-LAN for tagging
265 network = self.vnfds[vnf_name].get("dc").net # there should be a cleaner way to find the DCNetwork
266 elan_vnf_list.append({'name':src_docker_name,'interface':intf_name})
267
268
269 # install the VLAN tags for this E-LAN
270 network.setLAN(elan_vnf_list)
271 # increase the base ip address for the next E-LAN
272 base += 1
273
274 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
275 self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
276
277 LOG.info("Service started. Instance id: %r" % instance_uuid)
278 return instance_uuid
279
280 def stop_service(self, instance_uuid):
281 """
282 This method stops a running service instance.
283 It iterates over all VNF instances, stopping them each
284 and removing them from their data center.
285
286 :param instance_uuid: the uuid of the service instance to be stopped
287 """
288 LOG.info("Stopping service %r" % self.uuid)
289 # get relevant information
290 # instance_uuid = str(self.uuid.uuid4())
291 vnf_instances = self.instances[instance_uuid]["vnf_instances"]
292
293 for v in vnf_instances:
294 self._stop_vnfi(v)
295
296 if not GK_STANDALONE_MODE:
297 # remove placement?
298 # self._remove_placement(RoundRobinPlacement)
299 None
300
301 # last step: remove the instance from the list of all instances
302 del self.instances[instance_uuid]
303
304 def _start_vnfd(self, vnfd):
305 """
306 Start a single VNFD of this service
307 :param vnfd: vnfd descriptor dict
308 :return:
309 """
310 # iterate over all deployment units within each VNFDs
311 for u in vnfd.get("virtual_deployment_units"):
312 # 1. get the name of the docker image to start and the assigned DC
313 vnf_name = vnfd.get("name")
314 if vnf_name not in self.remote_docker_image_urls:
315 raise Exception("No image name for %r found. Abort." % vnf_name)
316 docker_name = self.remote_docker_image_urls.get(vnf_name)
317 target_dc = vnfd.get("dc")
318 # 2. perform some checks to ensure we can start the container
319 assert(docker_name is not None)
320 assert(target_dc is not None)
321 if not self._check_docker_image_exists(docker_name):
322 raise Exception("Docker image %r not found. Abort." % docker_name)
323
324 # 3. get the resource limits
325 res_req = u.get("resource_requirements")
326 cpu_list = res_req.get("cpu").get("cores")
327 if not cpu_list or len(cpu_list)==0:
328 cpu_list="1"
329 cpu_bw = res_req.get("cpu").get("cpu_bw")
330 if not cpu_bw:
331 cpu_bw=1
332 mem_num = str(res_req.get("memory").get("size"))
333 if len(mem_num)==0:
334 mem_num="2"
335 mem_unit = str(res_req.get("memory").get("size_unit"))
336 if str(mem_unit)==0:
337 mem_unit="GB"
338 mem_limit = float(mem_num)
339 if mem_unit=="GB":
340 mem_limit=mem_limit*1024*1024*1024
341 elif mem_unit=="MB":
342 mem_limit=mem_limit*1024*1024
343 elif mem_unit=="KB":
344 mem_limit=mem_limit*1024
345 mem_lim = int(mem_limit)
346 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
347
348 # 4. generate the volume paths for the docker container
349 volumes=list()
350 # a volume to extract log files
351 docker_log_path = "/tmp/results/%s/%s"%(self.uuid,vnf_name)
352 LOG.debug("LOG path for vnf %s is %s."%(vnf_name,docker_log_path))
353 if not os.path.exists(docker_log_path):
354 LOG.debug("Creating folder %s"%docker_log_path)
355 os.makedirs(docker_log_path)
356
357 volumes.append(docker_log_path+":/mnt/share/")
358
359
360 # 5. do the dc.startCompute(name="foobar") call to run the container
361 # TODO consider flavors, and other annotations
362 intfs = vnfd.get("connection_points")
363
364 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
365 # use the vnf_id in the nsd as docker name
366 # so deployed containers can be easily mapped back to the nsd
367 vnf_name2id = defaultdict(lambda: "NotExistingNode",
368 reduce(lambda x, y: dict(x, **y),
369 map(lambda d: {d["vnf_name"]: d["vnf_id"]},
370 self.nsd["network_functions"])))
371 self.vnf_name2docker_name[vnf_name] = vnf_name2id[vnf_name]
372 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
373
374 LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
375 LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
376 vnfi = target_dc.startCompute(
377 self.vnf_name2docker_name[vnf_name],
378 network=intfs,
379 image=docker_name,
380 flavor_name="small",
381 cpu_quota=cpu_quota,
382 cpu_period=cpu_period,
383 cpuset=cpu_list,
384 mem_limit=mem_lim,
385 volumes=volumes)
386 return vnfi
387
388 def _stop_vnfi(self, vnfi):
389 """
390 Stop a VNF instance.
391
392 :param vnfi: vnf instance to be stopped
393 """
394 # Find the correct datacenter
395 status = vnfi.getStatus()
396 dc = vnfi.datacenter
397
398 # stop the vnfi
399 LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc))
400 dc.stopCompute(status["name"])
401
402 def _get_vnf_instance(self, instance_uuid, name):
403 """
404 Returns the Docker object for the given VNF name (or Docker name).
405 :param instance_uuid: UUID of the service instance to search in.
406 :param name: VNF name or Docker name. We are fuzzy here.
407 :return:
408 """
409 dn = name
410 if name in self.vnf_name2docker_name:
411 dn = self.vnf_name2docker_name[name]
412 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
413 if vnfi.name == dn:
414 return vnfi
415 LOG.warning("No container with name: %r found.")
416 return None
417
418 @staticmethod
419 def _vnf_reconfigure_network(vnfi, if_name, net_str):
420 """
421 Reconfigure the network configuration of a specific interface
422 of a running container.
423 :param vnfi: container instacne
424 :param if_name: interface name
425 :param net_str: network configuration string, e.g., 1.2.3.4/24
426 :return:
427 """
428 intf = vnfi.intf(intf=if_name)
429 if intf is not None:
430 intf.setIP(net_str)
431 LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
432 else:
433 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
434
435
436 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
437 for vnfi in vnfi_list:
438 config = vnfi.dcinfo.get("Config", dict())
439 env = config.get("Env", list())
440 for env_var in env:
441 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
442 LOG.debug("%r = %r" % (var , cmd))
443 if var=="SON_EMU_CMD":
444 LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
445 # execute command in new thread to ensure that GK is not blocked by VNF
446 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
447 t.daemon = True
448 t.start()
449
450 def _unpack_service_package(self):
451 """
452 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
453 """
454 LOG.info("Unzipping: %r" % self.package_file_path)
455 with zipfile.ZipFile(self.package_file_path, "r") as z:
456 z.extractall(self.package_content_path)
457
458
459 def _load_package_descriptor(self):
460 """
461 Load the main package descriptor YAML and keep it as dict.
462 :return:
463 """
464 self.manifest = load_yaml(
465 os.path.join(
466 self.package_content_path, "META-INF/MANIFEST.MF"))
467
468 def _load_nsd(self):
469 """
470 Load the entry NSD YAML and keep it as dict.
471 :return:
472 """
473 if "entry_service_template" in self.manifest:
474 nsd_path = os.path.join(
475 self.package_content_path,
476 make_relative_path(self.manifest.get("entry_service_template")))
477 self.nsd = load_yaml(nsd_path)
478 GK.net.deployed_nsds.append(self.nsd)
479 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
480
481 def _load_vnfd(self):
482 """
483 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
484 :return:
485 """
486 if "package_content" in self.manifest:
487 for pc in self.manifest.get("package_content"):
488 if pc.get("content-type") == "application/sonata.function_descriptor":
489 vnfd_path = os.path.join(
490 self.package_content_path,
491 make_relative_path(pc.get("name")))
492 vnfd = load_yaml(vnfd_path)
493 self.vnfds[vnfd.get("name")] = vnfd
494 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
495
496 def _load_saps(self):
497 # Each Service Access Point (connection_point) in the nsd is getting its own container
498 SAPs = [p["id"] for p in self.nsd["connection_points"] if p["type"] == "interface"]
499 for sap in SAPs:
500 # endpoints needed in this service
501 sap_vnf_id, sap_vnf_interface = sap.split(':')
502 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
503 self.sap_identifiers.add(sap_vnf_id)
504
505 sap_docker_name = "%s_%s" % (sap_vnf_id, sap_vnf_interface)
506
507 # add SAP to self.vnfds
508 sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
509 sap_vnfd = load_yaml(sapfile)
510 sap_vnfd["connection_points"][0]["id"] = sap_vnf_interface
511 sap_vnfd["name"] = sap_docker_name
512 self.vnfds[sap_docker_name] = sap_vnfd
513 # add SAP vnf to list in the NSD so it is deployed later on
514 # each SAP get a unique VNFD and vnf_id in the NSD
515 self.nsd["network_functions"].append({"vnf_id": sap_docker_name, "vnf_name": sap_docker_name})
516 LOG.debug("Loaded SAP: %r" % sap_vnfd.get("name"))
517
518 def _load_docker_files(self):
519 """
520 Get all paths to Dockerfiles from VNFDs and store them in dict.
521 :return:
522 """
523 for k, v in self.vnfds.iteritems():
524 for vu in v.get("virtual_deployment_units"):
525 if vu.get("vm_image_format") == "docker":
526 vm_image = vu.get("vm_image")
527 docker_path = os.path.join(
528 self.package_content_path,
529 make_relative_path(vm_image))
530 self.local_docker_files[k] = docker_path
531 LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
532
533 def _load_docker_urls(self):
534 """
535 Get all URLs to pre-build docker images in some repo.
536 :return:
537 """
538 for k, v in self.vnfds.iteritems():
539 for vu in v.get("virtual_deployment_units"):
540 if vu.get("vm_image_format") == "docker":
541 url = vu.get("vm_image")
542 if url is not None:
543 url = url.replace("http://", "")
544 self.remote_docker_image_urls[k] = url
545 LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
546
547 def _build_images_from_dockerfiles(self):
548 """
549 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
550 """
551 if GK_STANDALONE_MODE:
552 return # do not build anything in standalone mode
553 dc = DockerClient()
554 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
555 for k, v in self.local_docker_files.iteritems():
556 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
557 LOG.debug("DOCKER BUILD: %s" % line)
558 LOG.info("Docker image created: %s" % k)
559
560 def _pull_predefined_dockerimages(self):
561 """
562 If the package contains URLs to pre-build Docker images, we download them with this method.
563 """
564 dc = DockerClient()
565 for url in self.remote_docker_image_urls.itervalues():
566 if not FORCE_PULL: # only pull if not present (speedup for development)
567 if len(dc.images.list(name=url)) > 0:
568 LOG.debug("Image %r present. Skipping pull." % url)
569 continue
570 LOG.info("Pulling image: %r" % url)
571 # this seems to fail with latest docker api version 2.0.2
572 # dc.images.pull(url,
573 # insecure_registry=True)
574 #using docker cli instead
575 cmd = ["docker",
576 "pull",
577 url,
578 ]
579 Popen(cmd).wait()
580
581
582
583
584 def _check_docker_image_exists(self, image_name):
585 """
586 Query the docker service and check if the given image exists
587 :param image_name: name of the docker image
588 :return:
589 """
590 return len(DockerClient().images.list(name=image_name)) > 0
591
592 def _calculate_placement(self, algorithm):
593 """
594 Do placement by adding the a field "dc" to
595 each VNFD that points to one of our
596 data center objects known to the gatekeeper.
597 """
598 assert(len(self.vnfds) > 0)
599 assert(len(GK.dcs) > 0)
600 # instantiate algorithm an place
601 p = algorithm()
602 p.place(self.nsd, self.vnfds, GK.dcs)
603 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
604 # lets print the placement result
605 for name, vnfd in self.vnfds.iteritems():
606 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
607
608 def _calculate_cpu_cfs_values(self, cpu_time_percentage):
609 """
610 Calculate cpu period and quota for CFS
611 :param cpu_time_percentage: percentage of overall CPU to be used
612 :return: cpu_period, cpu_quota
613 """
614 if cpu_time_percentage is None:
615 return -1, -1
616 if cpu_time_percentage < 0:
617 return -1, -1
618 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
619 # Attention minimum cpu_quota is 1ms (micro)
620 cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
621 LOG.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period, cpu_time_percentage))
622 cpu_quota = cpu_period * cpu_time_percentage # calculate the fraction of cpu time for this container
623 # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
624 if cpu_quota < 1000:
625 LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
626 cpu_quota = 1000
627 LOG.warning("Increased CPU quota to avoid system error.")
628 LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period, cpu_quota))
629 return int(cpu_period), int(cpu_quota)
630
631
632 """
633 Some (simple) placement algorithms
634 """
635
636
637 class FirstDcPlacement(object):
638 """
639 Placement: Always use one and the same data center from the GK.dcs dict.
640 """
641 def place(self, nsd, vnfds, dcs):
642 for name, vnfd in vnfds.iteritems():
643 vnfd["dc"] = list(dcs.itervalues())[0]
644
645
646 class RoundRobinDcPlacement(object):
647 """
648 Placement: Distribute VNFs across all available DCs in a round robin fashion.
649 """
650 def place(self, nsd, vnfds, dcs):
651 c = 0
652 dcs_list = list(dcs.itervalues())
653 for name, vnfd in vnfds.iteritems():
654 vnfd["dc"] = dcs_list[c % len(dcs_list)]
655 c += 1 # inc. c to use next DC
656
657
658
659
660 """
661 Resource definitions and API endpoints
662 """
663
664
665 class Packages(fr.Resource):
666
667 def post(self):
668 """
669 Upload a *.son service package to the dummy gatekeeper.
670
671 We expect request with a *.son file and store it in UPLOAD_FOLDER
672 :return: UUID
673 """
674 try:
675 # get file contents
676 LOG.info("POST /packages called")
677 # lets search for the package in the request
678 is_file_object = False # make API more robust: file can be in data or in files field
679 if "package" in request.files:
680 son_file = request.files["package"]
681 is_file_object = True
682 elif len(request.data) > 0:
683 son_file = request.data
684 else:
685 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
686 # generate a uuid to reference this package
687 service_uuid = str(uuid.uuid4())
688 file_hash = hashlib.sha1(str(son_file)).hexdigest()
689 # ensure that upload folder exists
690 ensure_dir(UPLOAD_FOLDER)
691 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
692 # store *.son file to disk
693 if is_file_object:
694 son_file.save(upload_path)
695 else:
696 with open(upload_path, 'wb') as f:
697 f.write(son_file)
698 size = os.path.getsize(upload_path)
699 # create a service object and register it
700 s = Service(service_uuid, file_hash, upload_path)
701 GK.register_service_package(service_uuid, s)
702 # generate the JSON result
703 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201
704 except Exception as ex:
705 LOG.exception("Service package upload failed:")
706 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
707
708 def get(self):
709 """
710 Return a list of UUID's of uploaded service packages.
711 :return: dict/list
712 """
713 LOG.info("GET /packages")
714 return {"service_uuid_list": list(GK.services.iterkeys())}
715
716
717 class Instantiations(fr.Resource):
718
719 def post(self):
720 """
721 Instantiate a service specified by its UUID.
722 Will return a new UUID to identify the running service instance.
723 :return: UUID
724 """
725 LOG.info("POST /instantiations (or /requests) called")
726 # try to extract the service uuid from the request
727 json_data = request.get_json(force=True)
728 service_uuid = json_data.get("service_uuid")
729
730 # lets be a bit fuzzy here to make testing easier
731 if (service_uuid is None or service_uuid=="latest") and len(GK.services) > 0:
732 # if we don't get a service uuid, we simple start the first service in the list
733 service_uuid = list(GK.services.iterkeys())[0]
734 if service_uuid in GK.services:
735 # ok, we have a service uuid, lets start the service
736 service_instance_uuid = GK.services.get(service_uuid).start_service()
737 return {"service_instance_uuid": service_instance_uuid}, 201
738 return "Service not found", 404
739
740 def get(self):
741 """
742 Returns a list of UUIDs containing all running services.
743 :return: dict / list
744 """
745 LOG.info("GET /instantiations")
746 return {"service_instantiations_list": [
747 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
748
749 def delete(self):
750 """
751 Stops a running service specified by its service and instance UUID.
752 """
753 # try to extract the service and instance UUID from the request
754 json_data = request.get_json(force=True)
755 service_uuid = json_data.get("service_uuid")
756 instance_uuid = json_data.get("service_instance_uuid")
757
758 # try to be fuzzy
759 if service_uuid is None and len(GK.services) > 0:
760 #if we don't get a service uuid, we simply stop the last service in the list
761 service_uuid = list(GK.services.iterkeys())[0]
762 if instance_uuid is None and len(GK.services[service_uuid].instances) > 0:
763 instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0]
764
765 if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
766 # valid service and instance UUID, stop service
767 GK.services.get(service_uuid).stop_service(instance_uuid)
768 return "service instance with uuid %r stopped." % instance_uuid,200
769 return "Service not found", 404
770
771 class Exit(fr.Resource):
772
773 def put(self):
774 """
775 Stop the running Containernet instance regardless of data transmitted
776 """
777 list(GK.dcs.values())[0].net.stop()
778
779
780 def initialize_GK():
781 global GK
782 GK = Gatekeeper()
783
784
785
786 # create a single, global GK object
787 GK = None
788 initialize_GK()
789 # setup Flask
790 app = Flask(__name__)
791 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
792 api = fr.Api(app)
793 # define endpoints
794 api.add_resource(Packages, '/packages', '/api/v2/packages')
795 api.add_resource(Instantiations, '/instantiations', '/api/v2/instantiations', '/api/v2/requests')
796 api.add_resource(Exit, '/emulator/exit')
797
798
799 #def initialize_GK():
800 # global GK
801 # GK = Gatekeeper()
802
803
804 def start_rest_api(host, port, datacenters=dict()):
805 GK.dcs = datacenters
806 GK.net = get_dc_network()
807 # start the Flask server (not the best performance but ok for our use case)
808 app.run(host=host,
809 port=port,
810 debug=True,
811 use_reloader=False # this is needed to run Flask in a non-main thread
812 )
813
814
815 def ensure_dir(name):
816 if not os.path.exists(name):
817 os.makedirs(name)
818
819
820 def load_yaml(path):
821 with open(path, "r") as f:
822 try:
823 r = yaml.load(f)
824 except yaml.YAMLError as exc:
825 LOG.exception("YAML parse error")
826 r = dict()
827 return r
828
829
830 def make_relative_path(path):
831 if path.startswith("file://"):
832 path = path.replace("file://", "", 1)
833 if path.startswith("/"):
834 path = path.replace("/", "", 1)
835 return path
836
837
838 def generate_lan_string(prefix, base, subnet_size=24, ip=0):
839 """
840 Helper to generate different network configuration strings.
841 """
842 r = "%s.%d.%d/%d" % (prefix, base, ip, subnet_size)
843 return r
844
845
846 def generate_subnet_strings(n, start=1, subnet_size=24, ip=0):
847 """
848 Helper to generate different network configuration strings.
849 """
850 r = list()
851 for i in range(start, start + n):
852 r.append("%d.0.0.%d/%d" % (i, ip, subnet_size))
853 return r
854
855 def get_dc_network():
856 """
857 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
858 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
859 :return:
860 """
861 assert (len(GK.dcs) > 0)
862 return GK.dcs.values()[0].net
863
864 if __name__ == '__main__':
865 """
866 Lets allow to run the API in standalone mode.
867 """
868 GK_STANDALONE_MODE = True
869 logging.getLogger("werkzeug").setLevel(logging.INFO)
870 start_rest_api("0.0.0.0", 8000)
871