Merge branch 'master' of https://github.com/sonata-nfv/son-emu
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 """
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
30
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
33 """
34
35 import logging
36 import os
37 import uuid
38 import hashlib
39 import zipfile
40 import yaml
41 import threading
42 from docker import Client as DockerClient
43 from flask import Flask, request
44 import flask_restful as fr
45 from collections import defaultdict
46 import pkg_resources
47
48 logging.basicConfig()
49 LOG = logging.getLogger("sonata-dummy-gatekeeper")
50 LOG.setLevel(logging.DEBUG)
51 logging.getLogger("werkzeug").setLevel(logging.WARNING)
52
53 GK_STORAGE = "/tmp/son-dummy-gk/"
54 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
55 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
56
57 # Enable Dockerfile build functionality
58 BUILD_DOCKERFILE = False
59
60 # flag to indicate that we run without the emulator (only the bare API for integration testing)
61 GK_STANDALONE_MODE = False
62
63 # should a new version of an image be pulled even if its available
64 FORCE_PULL = False
65
66 # Automatically deploy SAPs (endpoints) of the service as new containers
67 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
68 DEPLOY_SAP = False
69
70 # flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
71 BIDIRECTIONAL_CHAIN = False
72
73 class Gatekeeper(object):
74
75 def __init__(self):
76 self.services = dict()
77 self.dcs = dict()
78 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
79 LOG.info("Create SONATA dummy gatekeeper.")
80
81 def register_service_package(self, service_uuid, service):
82 """
83 register new service package
84 :param service_uuid
85 :param service object
86 """
87 self.services[service_uuid] = service
88 # lets perform all steps needed to onboard the service
89 service.onboard()
90
91 def get_next_vnf_name(self):
92 self.vnf_counter += 1
93 return "vnf%d" % self.vnf_counter
94
95
96 class Service(object):
97 """
98 This class represents a NS uploaded as a *.son package to the
99 dummy gatekeeper.
100 Can have multiple running instances of this service.
101 """
102
103 def __init__(self,
104 service_uuid,
105 package_file_hash,
106 package_file_path):
107 self.uuid = service_uuid
108 self.package_file_hash = package_file_hash
109 self.package_file_path = package_file_path
110 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
111 self.manifest = None
112 self.nsd = None
113 self.vnfds = dict()
114 self.local_docker_files = dict()
115 self.remote_docker_image_urls = dict()
116 self.instances = dict()
117 self.vnf_name2docker_name = dict()
118 self.sap_identifiers = set()
119 # lets generate a set of subnet configurations used for e-line chaining setup
120 self.eline_subnets_src = generate_subnet_strings(50, start=200, subnet_size=24, ip=1)
121 self.eline_subnets_dst = generate_subnet_strings(50, start=200, subnet_size=24, ip=2)
122
123 def onboard(self):
124 """
125 Do all steps to prepare this service to be instantiated
126 :return:
127 """
128 # 1. extract the contents of the package and store them in our catalog
129 self._unpack_service_package()
130 # 2. read in all descriptor files
131 self._load_package_descriptor()
132 self._load_nsd()
133 self._load_vnfd()
134 if DEPLOY_SAP:
135 self._load_saps()
136 # 3. prepare container images (e.g. download or build Dockerfile)
137 if BUILD_DOCKERFILE:
138 self._load_docker_files()
139 self._build_images_from_dockerfiles()
140 else:
141 self._load_docker_urls()
142 self._pull_predefined_dockerimages()
143 LOG.info("On-boarded service: %r" % self.manifest.get("name"))
144
145 def start_service(self):
146 """
147 This methods creates and starts a new service instance.
148 It computes placements, iterates over all VNFDs, and starts
149 each VNFD as a Docker container in the data center selected
150 by the placement algorithm.
151 :return:
152 """
153 LOG.info("Starting service %r" % self.uuid)
154
155 # 1. each service instance gets a new uuid to identify it
156 instance_uuid = str(uuid.uuid4())
157 # build a instances dict (a bit like a NSR :))
158 self.instances[instance_uuid] = dict()
159 self.instances[instance_uuid]["vnf_instances"] = list()
160
161 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
162 vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
163 reduce(lambda x, y: dict(x, **y),
164 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
165 self.nsd["network_functions"])))
166
167 # 3. compute placement of this service instance (adds DC names to VNFDs)
168 if not GK_STANDALONE_MODE:
169 #self._calculate_placement(FirstDcPlacement)
170 self._calculate_placement(RoundRobinDcPlacement)
171 # iterate over all vnfds that we have to start
172 for vnfd in self.vnfds.itervalues():
173 vnfi = None
174 if not GK_STANDALONE_MODE:
175 vnfi = self._start_vnfd(vnfd)
176 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
177
178 vlinks = self.nsd["virtual_links"]
179 fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
180 eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
181 elan_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-LAN")]
182
183 # 4a. deploy E-Line links
184 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
185 # eg. different services get a unique cookie for their flowrules
186 cookie = 1
187 for link in eline_fwd_links:
188 src_id, src_if_name = link["connection_points_reference"][0].split(":")
189 dst_id, dst_if_name = link["connection_points_reference"][1].split(":")
190
191 # check if there is a SAP in the link
192 if src_id in self.sap_identifiers:
193 src_docker_name = "{0}_{1}".format(src_id, src_if_name)
194 src_id = src_docker_name
195 else:
196 src_docker_name = src_id
197
198 if dst_id in self.sap_identifiers:
199 dst_docker_name = "{0}_{1}".format(dst_id, dst_if_name)
200 dst_id = dst_docker_name
201 else:
202 dst_docker_name = dst_id
203
204 src_name = vnf_id2vnf_name[src_id]
205 dst_name = vnf_id2vnf_name[dst_id]
206
207 LOG.debug(
208 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
209 src_name, src_id, src_if_name, dst_name, dst_id, dst_if_name))
210
211 if (src_name in self.vnfds) and (dst_name in self.vnfds):
212 network = self.vnfds[src_name].get("dc").net # there should be a cleaner way to find the DCNetwork
213 LOG.debug(src_docker_name)
214 ret = network.setChain(
215 src_docker_name, dst_docker_name,
216 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
217 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
218
219 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
220 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
221 if src_vnfi is not None:
222 self._vnf_reconfigure_network(src_vnfi, src_if_name, self.eline_subnets_src.pop(0))
223 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
224 if dst_vnfi is not None:
225 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, self.eline_subnets_dst.pop(0))
226
227 # 4b. deploy E-LAN links
228 base = 10
229 for link in elan_fwd_links:
230 # generate lan ip address
231 ip = 1
232 for intf in link["connection_points_reference"]:
233 ip_address = generate_lan_string("10.0", base, subnet_size=24, ip=ip)
234 vnf_id, intf_name = intf.split(":")
235 if vnf_id in self.sap_identifiers:
236 src_docker_name = "{0}_{1}".format(vnf_id, intf_name)
237 vnf_id = src_docker_name
238 vnf_name = vnf_id2vnf_name[vnf_id]
239 LOG.debug(
240 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
241 vnf_name, vnf_id, intf_name, ip_address))
242
243 if vnf_name in self.vnfds:
244 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
245 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
246 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
247 vnfi = self._get_vnf_instance(instance_uuid, vnf_name)
248 if vnfi is not None:
249 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
250 # increase for the next ip address on this E-LAN
251 ip += 1
252 # increase the base ip address for the next E-LAN
253 base += 1
254
255 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
256 self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
257
258 LOG.info("Service started. Instance id: %r" % instance_uuid)
259 return instance_uuid
260
261 def stop_service(self, instance_uuid):
262 """
263 This method stops a running service instance.
264 It iterates over all VNF instances, stopping them each
265 and removing them from their data center.
266
267 :param instance_uuid: the uuid of the service instance to be stopped
268 """
269 LOG.info("Stopping service %r" % self.uuid)
270 # get relevant information
271 # instance_uuid = str(self.uuid.uuid4())
272 vnf_instances = self.instances[instance_uuid]["vnf_instances"]
273
274 for v in vnf_instances:
275 self._stop_vnfi(v)
276
277 if not GK_STANDALONE_MODE:
278 # remove placement?
279 # self._remove_placement(RoundRobinPlacement)
280 None
281
282 # last step: remove the instance from the list of all instances
283 del self.instances[instance_uuid]
284
285 def _start_vnfd(self, vnfd):
286 """
287 Start a single VNFD of this service
288 :param vnfd: vnfd descriptor dict
289 :return:
290 """
291 # iterate over all deployment units within each VNFDs
292 for u in vnfd.get("virtual_deployment_units"):
293 # 1. get the name of the docker image to start and the assigned DC
294 vnf_name = vnfd.get("name")
295 if vnf_name not in self.remote_docker_image_urls:
296 raise Exception("No image name for %r found. Abort." % vnf_name)
297 docker_name = self.remote_docker_image_urls.get(vnf_name)
298 target_dc = vnfd.get("dc")
299 # 2. perform some checks to ensure we can start the container
300 assert(docker_name is not None)
301 assert(target_dc is not None)
302 if not self._check_docker_image_exists(docker_name):
303 raise Exception("Docker image %r not found. Abort." % docker_name)
304
305 # 3. get the resource limits
306 res_req = u.get("resource_requirements")
307 cpu_list = res_req.get("cpu").get("cores")
308 if not cpu_list or len(cpu_list)==0:
309 cpu_list="1"
310 cpu_bw = res_req.get("cpu").get("cpu_bw")
311 if not cpu_bw:
312 cpu_bw=1
313 mem_num = str(res_req.get("memory").get("size"))
314 if len(mem_num)==0:
315 mem_num="2"
316 mem_unit = str(res_req.get("memory").get("size_unit"))
317 if str(mem_unit)==0:
318 mem_unit="GB"
319 mem_limit = float(mem_num)
320 if mem_unit=="GB":
321 mem_limit=mem_limit*1024*1024*1024
322 elif mem_unit=="MB":
323 mem_limit=mem_limit*1024*1024
324 elif mem_unit=="KB":
325 mem_limit=mem_limit*1024
326 mem_lim = int(mem_limit)
327 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
328
329 # 4. do the dc.startCompute(name="foobar") call to run the container
330 # TODO consider flavors, and other annotations
331 intfs = vnfd.get("connection_points")
332
333 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
334 # use the vnf_id in the nsd as docker name
335 # so deployed containers can be easily mapped back to the nsd
336 vnf_name2id = defaultdict(lambda: "NotExistingNode",
337 reduce(lambda x, y: dict(x, **y),
338 map(lambda d: {d["vnf_name"]: d["vnf_id"]},
339 self.nsd["network_functions"])))
340 self.vnf_name2docker_name[vnf_name] = vnf_name2id[vnf_name]
341 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
342
343 LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
344 LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
345 vnfi = target_dc.startCompute(self.vnf_name2docker_name[vnf_name], network=intfs, image=docker_name, flavor_name="small", \
346 cpu_quota=cpu_quota, cpu_period=cpu_period, cpuset=cpu_list, mem_limit=mem_lim)
347 return vnfi
348
349 def _stop_vnfi(self, vnfi):
350 """
351 Stop a VNF instance.
352
353 :param vnfi: vnf instance to be stopped
354 """
355 # Find the correct datacenter
356 status = vnfi.getStatus()
357 dc = vnfi.datacenter
358 # stop the vnfi
359 LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc))
360 dc.stopCompute(status["name"])
361
362 def _get_vnf_instance(self, instance_uuid, name):
363 """
364 Returns the Docker object for the given VNF name (or Docker name).
365 :param instance_uuid: UUID of the service instance to search in.
366 :param name: VNF name or Docker name. We are fuzzy here.
367 :return:
368 """
369 dn = name
370 if name in self.vnf_name2docker_name:
371 dn = self.vnf_name2docker_name[name]
372 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
373 if vnfi.name == dn:
374 return vnfi
375 LOG.warning("No container with name: %r found.")
376 return None
377
378 @staticmethod
379 def _vnf_reconfigure_network(vnfi, if_name, net_str):
380 """
381 Reconfigure the network configuration of a specific interface
382 of a running container.
383 :param vnfi: container instacne
384 :param if_name: interface name
385 :param net_str: network configuration string, e.g., 1.2.3.4/24
386 :return:
387 """
388 intf = vnfi.intf(intf=if_name)
389 if intf is not None:
390 intf.setIP(net_str)
391 LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
392 else:
393 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
394
395
396 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
397 for vnfi in vnfi_list:
398 config = vnfi.dcinfo.get("Config", dict())
399 env = config.get("Env", list())
400 for env_var in env:
401 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
402 LOG.debug("%r = %r" % (var , cmd))
403 if var=="SON_EMU_CMD":
404 LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
405 # execute command in new thread to ensure that GK is not blocked by VNF
406 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
407 t.daemon = True
408 t.start()
409
410 def _unpack_service_package(self):
411 """
412 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
413 """
414 LOG.info("Unzipping: %r" % self.package_file_path)
415 with zipfile.ZipFile(self.package_file_path, "r") as z:
416 z.extractall(self.package_content_path)
417
418
419 def _load_package_descriptor(self):
420 """
421 Load the main package descriptor YAML and keep it as dict.
422 :return:
423 """
424 self.manifest = load_yaml(
425 os.path.join(
426 self.package_content_path, "META-INF/MANIFEST.MF"))
427
428 def _load_nsd(self):
429 """
430 Load the entry NSD YAML and keep it as dict.
431 :return:
432 """
433 if "entry_service_template" in self.manifest:
434 nsd_path = os.path.join(
435 self.package_content_path,
436 make_relative_path(self.manifest.get("entry_service_template")))
437 self.nsd = load_yaml(nsd_path)
438 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
439
440 def _load_vnfd(self):
441 """
442 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
443 :return:
444 """
445 if "package_content" in self.manifest:
446 for pc in self.manifest.get("package_content"):
447 if pc.get("content-type") == "application/sonata.function_descriptor":
448 vnfd_path = os.path.join(
449 self.package_content_path,
450 make_relative_path(pc.get("name")))
451 vnfd = load_yaml(vnfd_path)
452 self.vnfds[vnfd.get("name")] = vnfd
453 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
454
455 def _load_saps(self):
456 # Each Service Access Point (connection_point) in the nsd is getting its own container
457 SAPs = [p["id"] for p in self.nsd["connection_points"] if p["type"] == "interface"]
458 for sap in SAPs:
459 # endpoints needed in this service
460 sap_vnf_id, sap_vnf_interface = sap.split(':')
461 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
462 self.sap_identifiers.add(sap_vnf_id)
463
464 sap_docker_name = "%s_%s" % (sap_vnf_id, sap_vnf_interface)
465
466 # add SAP to self.vnfds
467 sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
468 sap_vnfd = load_yaml(sapfile)
469 sap_vnfd["connection_points"][0]["id"] = sap_vnf_interface
470 sap_vnfd["name"] = sap_docker_name
471 self.vnfds[sap_docker_name] = sap_vnfd
472 # add SAP vnf to list in the NSD so it is deployed later on
473 # each SAP get a unique VNFD and vnf_id in the NSD
474 self.nsd["network_functions"].append({"vnf_id": sap_docker_name, "vnf_name": sap_docker_name})
475 LOG.debug("Loaded SAP: %r" % sap_vnfd.get("name"))
476
477 def _load_docker_files(self):
478 """
479 Get all paths to Dockerfiles from VNFDs and store them in dict.
480 :return:
481 """
482 for k, v in self.vnfds.iteritems():
483 for vu in v.get("virtual_deployment_units"):
484 if vu.get("vm_image_format") == "docker":
485 vm_image = vu.get("vm_image")
486 docker_path = os.path.join(
487 self.package_content_path,
488 make_relative_path(vm_image))
489 self.local_docker_files[k] = docker_path
490 LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
491
492 def _load_docker_urls(self):
493 """
494 Get all URLs to pre-build docker images in some repo.
495 :return:
496 """
497 for k, v in self.vnfds.iteritems():
498 for vu in v.get("virtual_deployment_units"):
499 if vu.get("vm_image_format") == "docker":
500 url = vu.get("vm_image")
501 if url is not None:
502 url = url.replace("http://", "")
503 self.remote_docker_image_urls[k] = url
504 LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
505
506 def _build_images_from_dockerfiles(self):
507 """
508 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
509 """
510 if GK_STANDALONE_MODE:
511 return # do not build anything in standalone mode
512 dc = DockerClient()
513 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
514 for k, v in self.local_docker_files.iteritems():
515 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
516 LOG.debug("DOCKER BUILD: %s" % line)
517 LOG.info("Docker image created: %s" % k)
518
519 def _pull_predefined_dockerimages(self):
520 """
521 If the package contains URLs to pre-build Docker images, we download them with this method.
522 """
523 dc = DockerClient()
524 for url in self.remote_docker_image_urls.itervalues():
525 if not FORCE_PULL: # only pull if not present (speedup for development)
526 if len(dc.images(name=url)) > 0:
527 LOG.debug("Image %r present. Skipping pull." % url)
528 continue
529 LOG.info("Pulling image: %r" % url)
530 dc.pull(url,
531 insecure_registry=True)
532
533 def _check_docker_image_exists(self, image_name):
534 """
535 Query the docker service and check if the given image exists
536 :param image_name: name of the docker image
537 :return:
538 """
539 return len(DockerClient().images(image_name)) > 0
540
541 def _calculate_placement(self, algorithm):
542 """
543 Do placement by adding the a field "dc" to
544 each VNFD that points to one of our
545 data center objects known to the gatekeeper.
546 """
547 assert(len(self.vnfds) > 0)
548 assert(len(GK.dcs) > 0)
549 # instantiate algorithm an place
550 p = algorithm()
551 p.place(self.nsd, self.vnfds, GK.dcs)
552 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
553 # lets print the placement result
554 for name, vnfd in self.vnfds.iteritems():
555 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
556
557 def _calculate_cpu_cfs_values(self, cpu_time_percentage):
558 """
559 Calculate cpu period and quota for CFS
560 :param cpu_time_percentage: percentage of overall CPU to be used
561 :return: cpu_period, cpu_quota
562 """
563 if cpu_time_percentage is None:
564 return -1, -1
565 if cpu_time_percentage < 0:
566 return -1, -1
567 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
568 # Attention minimum cpu_quota is 1ms (micro)
569 cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
570 LOG.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period, cpu_time_percentage))
571 cpu_quota = cpu_period * cpu_time_percentage # calculate the fraction of cpu time for this container
572 # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
573 if cpu_quota < 1000:
574 LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
575 cpu_quota = 1000
576 LOG.warning("Increased CPU quota to avoid system error.")
577 LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period, cpu_quota))
578 return int(cpu_period), int(cpu_quota)
579
580
581 """
582 Some (simple) placement algorithms
583 """
584
585
586 class FirstDcPlacement(object):
587 """
588 Placement: Always use one and the same data center from the GK.dcs dict.
589 """
590 def place(self, nsd, vnfds, dcs):
591 for name, vnfd in vnfds.iteritems():
592 vnfd["dc"] = list(dcs.itervalues())[0]
593
594
595 class RoundRobinDcPlacement(object):
596 """
597 Placement: Distribute VNFs across all available DCs in a round robin fashion.
598 """
599 def place(self, nsd, vnfds, dcs):
600 c = 0
601 dcs_list = list(dcs.itervalues())
602 for name, vnfd in vnfds.iteritems():
603 vnfd["dc"] = dcs_list[c % len(dcs_list)]
604 c += 1 # inc. c to use next DC
605
606
607
608
609 """
610 Resource definitions and API endpoints
611 """
612
613
614 class Packages(fr.Resource):
615
616 def post(self):
617 """
618 Upload a *.son service package to the dummy gatekeeper.
619
620 We expect request with a *.son file and store it in UPLOAD_FOLDER
621 :return: UUID
622 """
623 try:
624 # get file contents
625 print(request.files)
626 # lets search for the package in the request
627 if "package" in request.files:
628 son_file = request.files["package"]
629 # elif "file" in request.files:
630 # son_file = request.files["file"]
631 else:
632 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
633 # generate a uuid to reference this package
634 service_uuid = str(uuid.uuid4())
635 file_hash = hashlib.sha1(str(son_file)).hexdigest()
636 # ensure that upload folder exists
637 ensure_dir(UPLOAD_FOLDER)
638 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
639 # store *.son file to disk
640 son_file.save(upload_path)
641 size = os.path.getsize(upload_path)
642 # create a service object and register it
643 s = Service(service_uuid, file_hash, upload_path)
644 GK.register_service_package(service_uuid, s)
645 # generate the JSON result
646 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201
647 except Exception as ex:
648 LOG.exception("Service package upload failed:")
649 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
650
651 def get(self):
652 """
653 Return a list of UUID's of uploaded service packages.
654 :return: dict/list
655 """
656 LOG.info("GET /packages")
657 return {"service_uuid_list": list(GK.services.iterkeys())}
658
659
660 class Instantiations(fr.Resource):
661
662 def post(self):
663 """
664 Instantiate a service specified by its UUID.
665 Will return a new UUID to identify the running service instance.
666 :return: UUID
667 """
668 # try to extract the service uuid from the request
669 json_data = request.get_json(force=True)
670 service_uuid = json_data.get("service_uuid")
671
672 # lets be a bit fuzzy here to make testing easier
673 if service_uuid is None and len(GK.services) > 0:
674 # if we don't get a service uuid, we simple start the first service in the list
675 service_uuid = list(GK.services.iterkeys())[0]
676
677 if service_uuid in GK.services:
678 # ok, we have a service uuid, lets start the service
679 service_instance_uuid = GK.services.get(service_uuid).start_service()
680 return {"service_instance_uuid": service_instance_uuid}, 201
681 return "Service not found", 404
682
683 def get(self):
684 """
685 Returns a list of UUIDs containing all running services.
686 :return: dict / list
687 """
688 LOG.info("GET /instantiations")
689 return {"service_instantiations_list": [
690 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
691
692 def delete(self):
693 """
694 Stops a running service specified by its service and instance UUID.
695 """
696 # try to extract the service and instance UUID from the request
697 json_data = request.get_json(force=True)
698 service_uuid = json_data.get("service_uuid")
699 instance_uuid = json_data.get("service_instance_uuid")
700
701 # try to be fuzzy
702 if service_uuid is None and len(GK.services) > 0:
703 #if we don't get a service uuid, we simply stop the last service in the list
704 service_uuid = list(GK.services.iterkeys())[0]
705 if instance_uuid is None and len(GK.services[service_uuid].instances) > 0:
706 instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0]
707
708 if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
709 # valid service and instance UUID, stop service
710 GK.services.get(service_uuid).stop_service(instance_uuid)
711 del GK.services.get(service_uuid).instances[instance_uuid]
712 return
713 return "Service not found", 404
714
715 class Exit(fr.Resource):
716
717 def put(self):
718 """
719 Stop the running Containernet instance regardless of data transmitted
720 """
721 GK.net.stop()
722
723
724 def initialize_GK():
725 global GK
726 GK = Gatekeeper()
727
728
729
730 # create a single, global GK object
731 GK = None
732 initialize_GK()
733 # setup Flask
734 app = Flask(__name__)
735 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
736 api = fr.Api(app)
737 # define endpoints
738 api.add_resource(Packages, '/packages')
739 api.add_resource(Instantiations, '/instantiations')
740 api.add_resource(Exit, '/emulator/exit')
741
742
743 #def initialize_GK():
744 # global GK
745 # GK = Gatekeeper()
746
747
748 def start_rest_api(host, port, datacenters=dict()):
749 GK.dcs = datacenters
750 # start the Flask server (not the best performance but ok for our use case)
751 app.run(host=host,
752 port=port,
753 debug=True,
754 use_reloader=False # this is needed to run Flask in a non-main thread
755 )
756
757
758 def ensure_dir(name):
759 if not os.path.exists(name):
760 os.makedirs(name)
761
762
763 def load_yaml(path):
764 with open(path, "r") as f:
765 try:
766 r = yaml.load(f)
767 except yaml.YAMLError as exc:
768 LOG.exception("YAML parse error")
769 r = dict()
770 return r
771
772
773 def make_relative_path(path):
774 if path.startswith("file://"):
775 path = path.replace("file://", "", 1)
776 if path.startswith("/"):
777 path = path.replace("/", "", 1)
778 return path
779
780
781 def generate_lan_string(prefix, base, subnet_size=24, ip=0):
782 """
783 Helper to generate different network configuration strings.
784 """
785 r = "%s.%d.%d/%d" % (prefix, base, ip, subnet_size)
786 return r
787
788
789 def generate_subnet_strings(n, start=1, subnet_size=24, ip=0):
790 """
791 Helper to generate different network configuration strings.
792 """
793 r = list()
794 for i in range(start, start + n):
795 r.append("%d.0.0.%d/%d" % (i, ip, subnet_size))
796 return r
797
798
799 if __name__ == '__main__':
800 """
801 Lets allow to run the API in standalone mode.
802 """
803 GK_STANDALONE_MODE = True
804 logging.getLogger("werkzeug").setLevel(logging.INFO)
805 start_rest_api("0.0.0.0", 8000)
806