included sonata-stress-service package with shorter vnf_ids, added support for servic...
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 """
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
30
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
33 """
34
35 import logging
36 import os
37 import uuid
38 import hashlib
39 import zipfile
40 import yaml
41 import threading
42 from docker import Client as DockerClient
43 from flask import Flask, request
44 import flask_restful as fr
45 from collections import defaultdict
46 import pkg_resources
47
48 logging.basicConfig()
49 LOG = logging.getLogger("sonata-dummy-gatekeeper")
50 LOG.setLevel(logging.DEBUG)
51 logging.getLogger("werkzeug").setLevel(logging.WARNING)
52
53 GK_STORAGE = "/tmp/son-dummy-gk/"
54 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
55 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
56
57 # Enable Dockerfile build functionality
58 BUILD_DOCKERFILE = False
59
60 # flag to indicate that we run without the emulator (only the bare API for integration testing)
61 GK_STANDALONE_MODE = False
62
63 # should a new version of an image be pulled even if its available
64 FORCE_PULL = False
65
66 # Automatically deploy SAPs (endpoints) of the service as new containers
67 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
68 DEPLOY_SAP = False
69
70 # flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
71 BIDIRECTIONAL_CHAIN = False
72
73 class Gatekeeper(object):
74
75 def __init__(self):
76 self.services = dict()
77 self.dcs = dict()
78 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
79 LOG.info("Create SONATA dummy gatekeeper.")
80
81 def register_service_package(self, service_uuid, service):
82 """
83 register new service package
84 :param service_uuid
85 :param service object
86 """
87 self.services[service_uuid] = service
88 # lets perform all steps needed to onboard the service
89 service.onboard()
90
91 def get_next_vnf_name(self):
92 self.vnf_counter += 1
93 return "vnf%d" % self.vnf_counter
94
95
96 class Service(object):
97 """
98 This class represents a NS uploaded as a *.son package to the
99 dummy gatekeeper.
100 Can have multiple running instances of this service.
101 """
102
103 def __init__(self,
104 service_uuid,
105 package_file_hash,
106 package_file_path):
107 self.uuid = service_uuid
108 self.package_file_hash = package_file_hash
109 self.package_file_path = package_file_path
110 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
111 self.manifest = None
112 self.nsd = None
113 self.vnfds = dict()
114 self.local_docker_files = dict()
115 self.remote_docker_image_urls = dict()
116 self.instances = dict()
117 self.vnf_name2docker_name = dict()
118 self.sap_identifiers = set()
119 # lets generate a set of subnet configurations used for e-line chaining setup
120 self.eline_subnets_src = generate_subnet_strings(50, start=200, subnet_size=24, ip=1)
121 self.eline_subnets_dst = generate_subnet_strings(50, start=200, subnet_size=24, ip=2)
122
123 def onboard(self):
124 """
125 Do all steps to prepare this service to be instantiated
126 :return:
127 """
128 # 1. extract the contents of the package and store them in our catalog
129 self._unpack_service_package()
130 # 2. read in all descriptor files
131 self._load_package_descriptor()
132 self._load_nsd()
133 self._load_vnfd()
134 if DEPLOY_SAP:
135 self._load_saps()
136 # 3. prepare container images (e.g. download or build Dockerfile)
137 if BUILD_DOCKERFILE:
138 self._load_docker_files()
139 self._build_images_from_dockerfiles()
140 else:
141 self._load_docker_urls()
142 self._pull_predefined_dockerimages()
143 LOG.info("On-boarded service: %r" % self.manifest.get("name"))
144
145 def start_service(self):
146 """
147 This methods creates and starts a new service instance.
148 It computes placements, iterates over all VNFDs, and starts
149 each VNFD as a Docker container in the data center selected
150 by the placement algorithm.
151 :return:
152 """
153 LOG.info("Starting service %r" % self.uuid)
154
155 # 1. each service instance gets a new uuid to identify it
156 instance_uuid = str(uuid.uuid4())
157 # build a instances dict (a bit like a NSR :))
158 self.instances[instance_uuid] = dict()
159 self.instances[instance_uuid]["vnf_instances"] = list()
160
161 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
162 vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
163 reduce(lambda x, y: dict(x, **y),
164 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
165 self.nsd["network_functions"])))
166
167 # 3. compute placement of this service instance (adds DC names to VNFDs)
168 if not GK_STANDALONE_MODE:
169 #self._calculate_placement(FirstDcPlacement)
170 self._calculate_placement(RoundRobinDcPlacement)
171 # iterate over all vnfds that we have to start
172 for vnfd in self.vnfds.itervalues():
173 vnfi = None
174 if not GK_STANDALONE_MODE:
175 vnfi = self._start_vnfd(vnfd)
176 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
177
178 if "virtual_links" in self.nsd:
179 vlinks = self.nsd["virtual_links"]
180 fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
181 eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
182 elan_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-LAN")]
183
184 # 4a. deploy E-Line links
185 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
186 # eg. different services get a unique cookie for their flowrules
187 cookie = 1
188 for link in eline_fwd_links:
189 src_id, src_if_name = link["connection_points_reference"][0].split(":")
190 dst_id, dst_if_name = link["connection_points_reference"][1].split(":")
191
192 # check if there is a SAP in the link
193 if src_id in self.sap_identifiers:
194 src_docker_name = "{0}_{1}".format(src_id, src_if_name)
195 src_id = src_docker_name
196 else:
197 src_docker_name = src_id
198
199 if dst_id in self.sap_identifiers:
200 dst_docker_name = "{0}_{1}".format(dst_id, dst_if_name)
201 dst_id = dst_docker_name
202 else:
203 dst_docker_name = dst_id
204
205 src_name = vnf_id2vnf_name[src_id]
206 dst_name = vnf_id2vnf_name[dst_id]
207
208 LOG.debug(
209 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
210 src_name, src_id, src_if_name, dst_name, dst_id, dst_if_name))
211
212 if (src_name in self.vnfds) and (dst_name in self.vnfds):
213 network = self.vnfds[src_name].get("dc").net # there should be a cleaner way to find the DCNetwork
214 LOG.debug(src_docker_name)
215 ret = network.setChain(
216 src_docker_name, dst_docker_name,
217 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
218 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
219
220 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
221 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
222 if src_vnfi is not None:
223 self._vnf_reconfigure_network(src_vnfi, src_if_name, self.eline_subnets_src.pop(0))
224 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
225 if dst_vnfi is not None:
226 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, self.eline_subnets_dst.pop(0))
227
228 # 4b. deploy E-LAN links
229 base = 10
230 for link in elan_fwd_links:
231 # generate lan ip address
232 ip = 1
233 for intf in link["connection_points_reference"]:
234 ip_address = generate_lan_string("10.0", base, subnet_size=24, ip=ip)
235 vnf_id, intf_name = intf.split(":")
236 if vnf_id in self.sap_identifiers:
237 src_docker_name = "{0}_{1}".format(vnf_id, intf_name)
238 vnf_id = src_docker_name
239 vnf_name = vnf_id2vnf_name[vnf_id]
240 LOG.debug(
241 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
242 vnf_name, vnf_id, intf_name, ip_address))
243
244 if vnf_name in self.vnfds:
245 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
246 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
247 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
248 vnfi = self._get_vnf_instance(instance_uuid, vnf_name)
249 if vnfi is not None:
250 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
251 # increase for the next ip address on this E-LAN
252 ip += 1
253 # increase the base ip address for the next E-LAN
254 base += 1
255
256 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
257 self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
258
259 LOG.info("Service started. Instance id: %r" % instance_uuid)
260 return instance_uuid
261
262 def stop_service(self, instance_uuid):
263 """
264 This method stops a running service instance.
265 It iterates over all VNF instances, stopping them each
266 and removing them from their data center.
267
268 :param instance_uuid: the uuid of the service instance to be stopped
269 """
270 LOG.info("Stopping service %r" % self.uuid)
271 # get relevant information
272 # instance_uuid = str(self.uuid.uuid4())
273 vnf_instances = self.instances[instance_uuid]["vnf_instances"]
274
275 for v in vnf_instances:
276 self._stop_vnfi(v)
277
278 if not GK_STANDALONE_MODE:
279 # remove placement?
280 # self._remove_placement(RoundRobinPlacement)
281 None
282
283 # last step: remove the instance from the list of all instances
284 del self.instances[instance_uuid]
285
286 def _start_vnfd(self, vnfd):
287 """
288 Start a single VNFD of this service
289 :param vnfd: vnfd descriptor dict
290 :return:
291 """
292 # iterate over all deployment units within each VNFDs
293 for u in vnfd.get("virtual_deployment_units"):
294 # 1. get the name of the docker image to start and the assigned DC
295 vnf_name = vnfd.get("name")
296 if vnf_name not in self.remote_docker_image_urls:
297 raise Exception("No image name for %r found. Abort." % vnf_name)
298 docker_name = self.remote_docker_image_urls.get(vnf_name)
299 target_dc = vnfd.get("dc")
300 # 2. perform some checks to ensure we can start the container
301 assert(docker_name is not None)
302 assert(target_dc is not None)
303 if not self._check_docker_image_exists(docker_name):
304 raise Exception("Docker image %r not found. Abort." % docker_name)
305
306 # 3. get the resource limits
307 res_req = u.get("resource_requirements")
308 cpu_list = res_req.get("cpu").get("cores")
309 if not cpu_list or len(cpu_list)==0:
310 cpu_list="1"
311 cpu_bw = res_req.get("cpu").get("cpu_bw")
312 if not cpu_bw:
313 cpu_bw=1
314 mem_num = str(res_req.get("memory").get("size"))
315 if len(mem_num)==0:
316 mem_num="2"
317 mem_unit = str(res_req.get("memory").get("size_unit"))
318 if str(mem_unit)==0:
319 mem_unit="GB"
320 mem_limit = float(mem_num)
321 if mem_unit=="GB":
322 mem_limit=mem_limit*1024*1024*1024
323 elif mem_unit=="MB":
324 mem_limit=mem_limit*1024*1024
325 elif mem_unit=="KB":
326 mem_limit=mem_limit*1024
327 mem_lim = int(mem_limit)
328 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
329
330 # 4. do the dc.startCompute(name="foobar") call to run the container
331 # TODO consider flavors, and other annotations
332 intfs = vnfd.get("connection_points")
333
334 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
335 # use the vnf_id in the nsd as docker name
336 # so deployed containers can be easily mapped back to the nsd
337 vnf_name2id = defaultdict(lambda: "NotExistingNode",
338 reduce(lambda x, y: dict(x, **y),
339 map(lambda d: {d["vnf_name"]: d["vnf_id"]},
340 self.nsd["network_functions"])))
341 self.vnf_name2docker_name[vnf_name] = vnf_name2id[vnf_name]
342 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
343
344 LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
345 LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
346 vnfi = target_dc.startCompute(self.vnf_name2docker_name[vnf_name], network=intfs, image=docker_name, flavor_name="small",
347 cpu_quota=cpu_quota, cpu_period=cpu_period, cpuset=cpu_list, mem_limit=mem_lim)
348 return vnfi
349
350 def _stop_vnfi(self, vnfi):
351 """
352 Stop a VNF instance.
353
354 :param vnfi: vnf instance to be stopped
355 """
356 # Find the correct datacenter
357 status = vnfi.getStatus()
358 dc = vnfi.datacenter
359 # stop the vnfi
360 LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc))
361 dc.stopCompute(status["name"])
362
363 def _get_vnf_instance(self, instance_uuid, name):
364 """
365 Returns the Docker object for the given VNF name (or Docker name).
366 :param instance_uuid: UUID of the service instance to search in.
367 :param name: VNF name or Docker name. We are fuzzy here.
368 :return:
369 """
370 dn = name
371 if name in self.vnf_name2docker_name:
372 dn = self.vnf_name2docker_name[name]
373 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
374 if vnfi.name == dn:
375 return vnfi
376 LOG.warning("No container with name: %r found.")
377 return None
378
379 @staticmethod
380 def _vnf_reconfigure_network(vnfi, if_name, net_str):
381 """
382 Reconfigure the network configuration of a specific interface
383 of a running container.
384 :param vnfi: container instacne
385 :param if_name: interface name
386 :param net_str: network configuration string, e.g., 1.2.3.4/24
387 :return:
388 """
389 intf = vnfi.intf(intf=if_name)
390 if intf is not None:
391 intf.setIP(net_str)
392 LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
393 else:
394 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
395
396
397 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
398 for vnfi in vnfi_list:
399 config = vnfi.dcinfo.get("Config", dict())
400 env = config.get("Env", list())
401 for env_var in env:
402 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
403 LOG.debug("%r = %r" % (var , cmd))
404 if var=="SON_EMU_CMD":
405 LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
406 # execute command in new thread to ensure that GK is not blocked by VNF
407 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
408 t.daemon = True
409 t.start()
410
411 def _unpack_service_package(self):
412 """
413 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
414 """
415 LOG.info("Unzipping: %r" % self.package_file_path)
416 with zipfile.ZipFile(self.package_file_path, "r") as z:
417 z.extractall(self.package_content_path)
418
419
420 def _load_package_descriptor(self):
421 """
422 Load the main package descriptor YAML and keep it as dict.
423 :return:
424 """
425 self.manifest = load_yaml(
426 os.path.join(
427 self.package_content_path, "META-INF/MANIFEST.MF"))
428
429 def _load_nsd(self):
430 """
431 Load the entry NSD YAML and keep it as dict.
432 :return:
433 """
434 if "entry_service_template" in self.manifest:
435 nsd_path = os.path.join(
436 self.package_content_path,
437 make_relative_path(self.manifest.get("entry_service_template")))
438 self.nsd = load_yaml(nsd_path)
439 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
440
441 def _load_vnfd(self):
442 """
443 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
444 :return:
445 """
446 if "package_content" in self.manifest:
447 for pc in self.manifest.get("package_content"):
448 if pc.get("content-type") == "application/sonata.function_descriptor":
449 vnfd_path = os.path.join(
450 self.package_content_path,
451 make_relative_path(pc.get("name")))
452 vnfd = load_yaml(vnfd_path)
453 self.vnfds[vnfd.get("name")] = vnfd
454 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
455
456 def _load_saps(self):
457 # Each Service Access Point (connection_point) in the nsd is getting its own container
458 SAPs = [p["id"] for p in self.nsd["connection_points"] if p["type"] == "interface"]
459 for sap in SAPs:
460 # endpoints needed in this service
461 sap_vnf_id, sap_vnf_interface = sap.split(':')
462 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
463 self.sap_identifiers.add(sap_vnf_id)
464
465 sap_docker_name = "%s_%s" % (sap_vnf_id, sap_vnf_interface)
466
467 # add SAP to self.vnfds
468 sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
469 sap_vnfd = load_yaml(sapfile)
470 sap_vnfd["connection_points"][0]["id"] = sap_vnf_interface
471 sap_vnfd["name"] = sap_docker_name
472 self.vnfds[sap_docker_name] = sap_vnfd
473 # add SAP vnf to list in the NSD so it is deployed later on
474 # each SAP get a unique VNFD and vnf_id in the NSD
475 self.nsd["network_functions"].append({"vnf_id": sap_docker_name, "vnf_name": sap_docker_name})
476 LOG.debug("Loaded SAP: %r" % sap_vnfd.get("name"))
477
478 def _load_docker_files(self):
479 """
480 Get all paths to Dockerfiles from VNFDs and store them in dict.
481 :return:
482 """
483 for k, v in self.vnfds.iteritems():
484 for vu in v.get("virtual_deployment_units"):
485 if vu.get("vm_image_format") == "docker":
486 vm_image = vu.get("vm_image")
487 docker_path = os.path.join(
488 self.package_content_path,
489 make_relative_path(vm_image))
490 self.local_docker_files[k] = docker_path
491 LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
492
493 def _load_docker_urls(self):
494 """
495 Get all URLs to pre-build docker images in some repo.
496 :return:
497 """
498 for k, v in self.vnfds.iteritems():
499 for vu in v.get("virtual_deployment_units"):
500 if vu.get("vm_image_format") == "docker":
501 url = vu.get("vm_image")
502 if url is not None:
503 url = url.replace("http://", "")
504 self.remote_docker_image_urls[k] = url
505 LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
506
507 def _build_images_from_dockerfiles(self):
508 """
509 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
510 """
511 if GK_STANDALONE_MODE:
512 return # do not build anything in standalone mode
513 dc = DockerClient()
514 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
515 for k, v in self.local_docker_files.iteritems():
516 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
517 LOG.debug("DOCKER BUILD: %s" % line)
518 LOG.info("Docker image created: %s" % k)
519
520 def _pull_predefined_dockerimages(self):
521 """
522 If the package contains URLs to pre-build Docker images, we download them with this method.
523 """
524 dc = DockerClient()
525 for url in self.remote_docker_image_urls.itervalues():
526 if not FORCE_PULL: # only pull if not present (speedup for development)
527 if len(dc.images(name=url)) > 0:
528 LOG.debug("Image %r present. Skipping pull." % url)
529 continue
530 LOG.info("Pulling image: %r" % url)
531 dc.pull(url,
532 insecure_registry=True)
533
534 def _check_docker_image_exists(self, image_name):
535 """
536 Query the docker service and check if the given image exists
537 :param image_name: name of the docker image
538 :return:
539 """
540 return len(DockerClient().images(image_name)) > 0
541
542 def _calculate_placement(self, algorithm):
543 """
544 Do placement by adding the a field "dc" to
545 each VNFD that points to one of our
546 data center objects known to the gatekeeper.
547 """
548 assert(len(self.vnfds) > 0)
549 assert(len(GK.dcs) > 0)
550 # instantiate algorithm an place
551 p = algorithm()
552 p.place(self.nsd, self.vnfds, GK.dcs)
553 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
554 # lets print the placement result
555 for name, vnfd in self.vnfds.iteritems():
556 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
557
558 def _calculate_cpu_cfs_values(self, cpu_time_percentage):
559 """
560 Calculate cpu period and quota for CFS
561 :param cpu_time_percentage: percentage of overall CPU to be used
562 :return: cpu_period, cpu_quota
563 """
564 if cpu_time_percentage is None:
565 return -1, -1
566 if cpu_time_percentage < 0:
567 return -1, -1
568 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
569 # Attention minimum cpu_quota is 1ms (micro)
570 cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
571 LOG.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period, cpu_time_percentage))
572 cpu_quota = cpu_period * cpu_time_percentage # calculate the fraction of cpu time for this container
573 # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
574 if cpu_quota < 1000:
575 LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
576 cpu_quota = 1000
577 LOG.warning("Increased CPU quota to avoid system error.")
578 LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period, cpu_quota))
579 return int(cpu_period), int(cpu_quota)
580
581
582 """
583 Some (simple) placement algorithms
584 """
585
586
587 class FirstDcPlacement(object):
588 """
589 Placement: Always use one and the same data center from the GK.dcs dict.
590 """
591 def place(self, nsd, vnfds, dcs):
592 for name, vnfd in vnfds.iteritems():
593 vnfd["dc"] = list(dcs.itervalues())[0]
594
595
596 class RoundRobinDcPlacement(object):
597 """
598 Placement: Distribute VNFs across all available DCs in a round robin fashion.
599 """
600 def place(self, nsd, vnfds, dcs):
601 c = 0
602 dcs_list = list(dcs.itervalues())
603 for name, vnfd in vnfds.iteritems():
604 vnfd["dc"] = dcs_list[c % len(dcs_list)]
605 c += 1 # inc. c to use next DC
606
607
608
609
610 """
611 Resource definitions and API endpoints
612 """
613
614
615 class Packages(fr.Resource):
616
617 def post(self):
618 """
619 Upload a *.son service package to the dummy gatekeeper.
620
621 We expect request with a *.son file and store it in UPLOAD_FOLDER
622 :return: UUID
623 """
624 try:
625 # get file contents
626 print(request.files)
627 # lets search for the package in the request
628 if "package" in request.files:
629 son_file = request.files["package"]
630 # elif "file" in request.files:
631 # son_file = request.files["file"]
632 else:
633 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
634 # generate a uuid to reference this package
635 service_uuid = str(uuid.uuid4())
636 file_hash = hashlib.sha1(str(son_file)).hexdigest()
637 # ensure that upload folder exists
638 ensure_dir(UPLOAD_FOLDER)
639 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
640 # store *.son file to disk
641 son_file.save(upload_path)
642 size = os.path.getsize(upload_path)
643 # create a service object and register it
644 s = Service(service_uuid, file_hash, upload_path)
645 GK.register_service_package(service_uuid, s)
646 # generate the JSON result
647 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201
648 except Exception as ex:
649 LOG.exception("Service package upload failed:")
650 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
651
652 def get(self):
653 """
654 Return a list of UUID's of uploaded service packages.
655 :return: dict/list
656 """
657 LOG.info("GET /packages")
658 return {"service_uuid_list": list(GK.services.iterkeys())}
659
660
661 class Instantiations(fr.Resource):
662
663 def post(self):
664 """
665 Instantiate a service specified by its UUID.
666 Will return a new UUID to identify the running service instance.
667 :return: UUID
668 """
669 # try to extract the service uuid from the request
670 json_data = request.get_json(force=True)
671 service_uuid = json_data.get("service_uuid")
672
673 # lets be a bit fuzzy here to make testing easier
674 if service_uuid is None and len(GK.services) > 0:
675 # if we don't get a service uuid, we simple start the first service in the list
676 service_uuid = list(GK.services.iterkeys())[0]
677
678 if service_uuid in GK.services:
679 # ok, we have a service uuid, lets start the service
680 service_instance_uuid = GK.services.get(service_uuid).start_service()
681 return {"service_instance_uuid": service_instance_uuid}, 201
682 return "Service not found", 404
683
684 def get(self):
685 """
686 Returns a list of UUIDs containing all running services.
687 :return: dict / list
688 """
689 LOG.info("GET /instantiations")
690 return {"service_instantiations_list": [
691 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
692
693 def delete(self):
694 """
695 Stops a running service specified by its service and instance UUID.
696 """
697 # try to extract the service and instance UUID from the request
698 json_data = request.get_json(force=True)
699 service_uuid = json_data.get("service_uuid")
700 instance_uuid = json_data.get("service_instance_uuid")
701
702 # try to be fuzzy
703 if service_uuid is None and len(GK.services) > 0:
704 #if we don't get a service uuid, we simply stop the last service in the list
705 service_uuid = list(GK.services.iterkeys())[0]
706 if instance_uuid is None and len(GK.services[service_uuid].instances) > 0:
707 instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0]
708
709 if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
710 # valid service and instance UUID, stop service
711 GK.services.get(service_uuid).stop_service(instance_uuid)
712 return "service instance with uuid %r stopped." % instance_uuid,200
713 return "Service not found", 404
714
715 class Exit(fr.Resource):
716
717 def put(self):
718 """
719 Stop the running Containernet instance regardless of data transmitted
720 """
721 list(GK.dcs.values())[0].net.stop()
722
723
724 def initialize_GK():
725 global GK
726 GK = Gatekeeper()
727
728
729
730 # create a single, global GK object
731 GK = None
732 initialize_GK()
733 # setup Flask
734 app = Flask(__name__)
735 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
736 api = fr.Api(app)
737 # define endpoints
738 api.add_resource(Packages, '/packages')
739 api.add_resource(Instantiations, '/instantiations')
740 api.add_resource(Exit, '/emulator/exit')
741
742
743 #def initialize_GK():
744 # global GK
745 # GK = Gatekeeper()
746
747
748 def start_rest_api(host, port, datacenters=dict()):
749 GK.dcs = datacenters
750 # start the Flask server (not the best performance but ok for our use case)
751 app.run(host=host,
752 port=port,
753 debug=True,
754 use_reloader=False # this is needed to run Flask in a non-main thread
755 )
756
757
758 def ensure_dir(name):
759 if not os.path.exists(name):
760 os.makedirs(name)
761
762
763 def load_yaml(path):
764 with open(path, "r") as f:
765 try:
766 r = yaml.load(f)
767 except yaml.YAMLError as exc:
768 LOG.exception("YAML parse error")
769 r = dict()
770 return r
771
772
773 def make_relative_path(path):
774 if path.startswith("file://"):
775 path = path.replace("file://", "", 1)
776 if path.startswith("/"):
777 path = path.replace("/", "", 1)
778 return path
779
780
781 def generate_lan_string(prefix, base, subnet_size=24, ip=0):
782 """
783 Helper to generate different network configuration strings.
784 """
785 r = "%s.%d.%d/%d" % (prefix, base, ip, subnet_size)
786 return r
787
788
789 def generate_subnet_strings(n, start=1, subnet_size=24, ip=0):
790 """
791 Helper to generate different network configuration strings.
792 """
793 r = list()
794 for i in range(start, start + n):
795 r.append("%d.0.0.%d/%d" % (i, ip, subnet_size))
796 return r
797
798
799 if __name__ == '__main__':
800 """
801 Lets allow to run the API in standalone mode.
802 """
803 GK_STANDALONE_MODE = True
804 logging.getLogger("werkzeug").setLevel(logging.INFO)
805 start_rest_api("0.0.0.0", 8000)
806