Merge pull request #214 from mpeuster/master
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 """
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
30
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
33 """
34
35 import logging
36 import os
37 import uuid
38 import hashlib
39 import zipfile
40 import yaml
41 import threading
42 from docker import DockerClient, APIClient
43 from flask import Flask, request
44 import flask_restful as fr
45 from collections import defaultdict
46 import pkg_resources
47 from subprocess import Popen
48 from random import randint
49 import ipaddress
50
51 logging.basicConfig()
52 LOG = logging.getLogger("sonata-dummy-gatekeeper")
53 LOG.setLevel(logging.DEBUG)
54 logging.getLogger("werkzeug").setLevel(logging.WARNING)
55
56 GK_STORAGE = "/tmp/son-dummy-gk/"
57 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
58 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
59
60 # Enable Dockerfile build functionality
61 BUILD_DOCKERFILE = False
62
63 # flag to indicate that we run without the emulator (only the bare API for integration testing)
64 GK_STANDALONE_MODE = False
65
66 # should a new version of an image be pulled even if its available
67 FORCE_PULL = False
68
69 # Automatically deploy SAPs (endpoints) of the service as new containers
70 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
71 DEPLOY_SAP = False
72
73 # flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
74 BIDIRECTIONAL_CHAIN = False
75
76 # override the management interfaces in the descriptors with default docker0 interfaces in the containers
77 USE_DOCKER_MGMT = True
78
79 def generate_subnets(prefix, base, subnet_size=50, mask=24):
80 # Generate a list of ipaddress in subnets
81 r = list()
82 for net in range(base, base + subnet_size):
83 subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
84 r.append(ipaddress.ip_network(unicode(subnet)))
85 return r
86 # private subnet definitions for the generated interfaces
87 # 10.10.xxx.0/24
88 SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=24)
89 # 10.20.xxx.0/24
90 ELAN_SUBNETS = generate_subnets('10.20', 0, subnet_size=50, mask=24)
91 # 10.30.xxx.0/30
92 ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
93
94
95 class Gatekeeper(object):
96
97 def __init__(self):
98 self.services = dict()
99 self.dcs = dict()
100 self.net = None
101 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
102 LOG.info("Create SONATA dummy gatekeeper.")
103
104 def register_service_package(self, service_uuid, service):
105 """
106 register new service package
107 :param service_uuid
108 :param service object
109 """
110 self.services[service_uuid] = service
111 # lets perform all steps needed to onboard the service
112 service.onboard()
113
114 def get_next_vnf_name(self):
115 self.vnf_counter += 1
116 return "vnf%d" % self.vnf_counter
117
118
119 class Service(object):
120 """
121 This class represents a NS uploaded as a *.son package to the
122 dummy gatekeeper.
123 Can have multiple running instances of this service.
124 """
125
126 def __init__(self,
127 service_uuid,
128 package_file_hash,
129 package_file_path):
130 self.uuid = service_uuid
131 self.package_file_hash = package_file_hash
132 self.package_file_path = package_file_path
133 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
134 self.manifest = None
135 self.nsd = None
136 self.vnfds = dict()
137 self.saps = dict()
138 self.saps_ext = list()
139 self.saps_int = list()
140 self.local_docker_files = dict()
141 self.remote_docker_image_urls = dict()
142 self.instances = dict()
143 self.vnf_name2docker_name = dict()
144 self.vnf_id2vnf_name = dict()
145
146 def onboard(self):
147 """
148 Do all steps to prepare this service to be instantiated
149 :return:
150 """
151 # 1. extract the contents of the package and store them in our catalog
152 self._unpack_service_package()
153 # 2. read in all descriptor files
154 self._load_package_descriptor()
155 self._load_nsd()
156 self._load_vnfd()
157 if DEPLOY_SAP:
158 self._load_saps()
159 # create dict to translate vnf names
160 self.vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
161 reduce(lambda x, y: dict(x, **y),
162 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
163 self.nsd["network_functions"])))
164 # 3. prepare container images (e.g. download or build Dockerfile)
165 if BUILD_DOCKERFILE:
166 self._load_docker_files()
167 self._build_images_from_dockerfiles()
168 else:
169 self._load_docker_urls()
170 self._pull_predefined_dockerimages()
171 LOG.info("On-boarded service: %r" % self.manifest.get("name"))
172
173 def start_service(self):
174 """
175 This methods creates and starts a new service instance.
176 It computes placements, iterates over all VNFDs, and starts
177 each VNFD as a Docker container in the data center selected
178 by the placement algorithm.
179 :return:
180 """
181 LOG.info("Starting service %r" % self.uuid)
182
183 # 1. each service instance gets a new uuid to identify it
184 instance_uuid = str(uuid.uuid4())
185 # build a instances dict (a bit like a NSR :))
186 self.instances[instance_uuid] = dict()
187 self.instances[instance_uuid]["vnf_instances"] = list()
188
189 # 2. compute placement of this service instance (adds DC names to VNFDs)
190 if not GK_STANDALONE_MODE:
191 #self._calculate_placement(FirstDcPlacement)
192 self._calculate_placement(RoundRobinDcPlacementWithSAPs)
193
194 # 3. start all vnfds that we have in the service (except SAPs)
195 for vnfd in self.vnfds.itervalues():
196 vnfi = None
197 if not GK_STANDALONE_MODE:
198 vnfi = self._start_vnfd(vnfd)
199 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
200
201 # 4. start all SAPs in the service
202 for sap in self.saps:
203 self._start_sap(self.saps[sap], instance_uuid)
204
205 # 5. Deploy E-Line and E_LAN links
206 if "virtual_links" in self.nsd:
207 vlinks = self.nsd["virtual_links"]
208 # constituent virtual links are not checked
209 #fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
210 eline_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-Line")]
211 elan_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-LAN")]
212
213 GK.net.deployed_elines.extend(eline_fwd_links)
214 GK.net.deployed_elans.extend(elan_fwd_links)
215
216 # 5a. deploy E-Line links
217 self._connect_elines(eline_fwd_links, instance_uuid)
218
219 # 5b. deploy E-LAN links
220 self._connect_elans(elan_fwd_links, instance_uuid)
221
222 # 6. run the emulator specific entrypoint scripts in the VNFIs of this service instance
223 self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
224
225 LOG.info("Service started. Instance id: %r" % instance_uuid)
226 return instance_uuid
227
228 def stop_service(self, instance_uuid):
229 """
230 This method stops a running service instance.
231 It iterates over all VNF instances, stopping them each
232 and removing them from their data center.
233
234 :param instance_uuid: the uuid of the service instance to be stopped
235 """
236 LOG.info("Stopping service %r" % self.uuid)
237 # get relevant information
238 # instance_uuid = str(self.uuid.uuid4())
239 vnf_instances = self.instances[instance_uuid]["vnf_instances"]
240
241 for v in vnf_instances:
242 self._stop_vnfi(v)
243
244 if not GK_STANDALONE_MODE:
245 # remove placement?
246 # self._remove_placement(RoundRobinPlacement)
247 None
248
249 # last step: remove the instance from the list of all instances
250 del self.instances[instance_uuid]
251
252 def _start_vnfd(self, vnfd):
253 """
254 Start a single VNFD of this service
255 :param vnfd: vnfd descriptor dict
256 :return:
257 """
258 # iterate over all deployment units within each VNFDs
259 for u in vnfd.get("virtual_deployment_units"):
260 # 1. get the name of the docker image to start and the assigned DC
261 vnf_name = vnfd.get("name")
262 if vnf_name not in self.remote_docker_image_urls:
263 raise Exception("No image name for %r found. Abort." % vnf_name)
264 docker_name = self.remote_docker_image_urls.get(vnf_name)
265 target_dc = vnfd.get("dc")
266 # 2. perform some checks to ensure we can start the container
267 assert(docker_name is not None)
268 assert(target_dc is not None)
269 if not self._check_docker_image_exists(docker_name):
270 raise Exception("Docker image %r not found. Abort." % docker_name)
271
272 # 3. get the resource limits
273 res_req = u.get("resource_requirements")
274 cpu_list = res_req.get("cpu").get("cores")
275 if cpu_list is None:
276 cpu_list = res_req.get("cpu").get("vcpus")
277 if cpu_list is None:
278 cpu_list="1"
279 cpu_bw = res_req.get("cpu").get("cpu_bw")
280 if not cpu_bw:
281 cpu_bw=1
282 mem_num = str(res_req.get("memory").get("size"))
283 if len(mem_num)==0:
284 mem_num="2"
285 mem_unit = str(res_req.get("memory").get("size_unit"))
286 if str(mem_unit)==0:
287 mem_unit="GB"
288 mem_limit = float(mem_num)
289 if mem_unit=="GB":
290 mem_limit=mem_limit*1024*1024*1024
291 elif mem_unit=="MB":
292 mem_limit=mem_limit*1024*1024
293 elif mem_unit=="KB":
294 mem_limit=mem_limit*1024
295 mem_lim = int(mem_limit)
296 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
297
298 vnf_name2id = defaultdict(lambda: "NotExistingNode",
299 reduce(lambda x, y: dict(x, **y),
300 map(lambda d: {d["vnf_name"]: d["vnf_id"]},
301 self.nsd["network_functions"])))
302
303 # check if we need to deploy the management ports (defined as type:management both on in the vnfd and nsd)
304 intfs = vnfd.get("connection_points", [])
305 mgmt_intf_names = []
306 if USE_DOCKER_MGMT:
307 vnf_id = vnf_name2id[vnf_name]
308 mgmt_intfs = [vnf_id + ':' + intf['id'] for intf in intfs if intf.get('type') == 'management']
309 # check if any of these management interfaces are used in a management-type network in the nsd
310 for nsd_intf_name in mgmt_intfs:
311 vlinks = [ l["connection_points_reference"] for l in self.nsd.get("virtual_links", [])]
312 for link in vlinks:
313 if nsd_intf_name in link and self.check_mgmt_interface(link):
314 # this is indeed a management interface and can be skipped
315 vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(nsd_intf_name)
316 found_interfaces = [intf for intf in intfs if intf.get('id') == vnf_interface]
317 intfs.remove(found_interfaces[0])
318 mgmt_intf_names.append(vnf_interface)
319
320 # 4. generate the volume paths for the docker container
321 volumes=list()
322 # a volume to extract log files
323 docker_log_path = "/tmp/results/%s/%s"%(self.uuid,vnf_name)
324 LOG.debug("LOG path for vnf %s is %s."%(vnf_name,docker_log_path))
325 if not os.path.exists(docker_log_path):
326 LOG.debug("Creating folder %s"%docker_log_path)
327 os.makedirs(docker_log_path)
328
329 volumes.append(docker_log_path+":/mnt/share/")
330
331
332 # 5. do the dc.startCompute(name="foobar") call to run the container
333 # TODO consider flavors, and other annotations
334 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
335 # use the vnf_id in the nsd as docker name
336 # so deployed containers can be easily mapped back to the nsd
337
338 self.vnf_name2docker_name[vnf_name] = vnf_name2id[vnf_name]
339
340 LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
341 LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
342 vnfi = target_dc.startCompute(
343 self.vnf_name2docker_name[vnf_name],
344 network=intfs,
345 image=docker_name,
346 flavor_name="small",
347 cpu_quota=cpu_quota,
348 cpu_period=cpu_period,
349 cpuset=cpu_list,
350 mem_limit=mem_lim,
351 volumes=volumes)
352
353 # rename the docker0 interfaces (eth0) to the management port name defined in the VNFD
354 if USE_DOCKER_MGMT:
355 for intf_name in mgmt_intf_names:
356 self._vnf_reconfigure_network(vnfi, 'eth0', new_name=intf_name)
357
358 return vnfi
359
360 def _stop_vnfi(self, vnfi):
361 """
362 Stop a VNF instance.
363
364 :param vnfi: vnf instance to be stopped
365 """
366 # Find the correct datacenter
367 status = vnfi.getStatus()
368 dc = vnfi.datacenter
369
370 # stop the vnfi
371 LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc))
372 dc.stopCompute(status["name"])
373
374 def _get_vnf_instance(self, instance_uuid, name):
375 """
376 Returns the Docker object for the given VNF name (or Docker name).
377 :param instance_uuid: UUID of the service instance to search in.
378 :param name: VNF name or Docker name. We are fuzzy here.
379 :return:
380 """
381 dn = name
382 if name in self.vnf_name2docker_name:
383 dn = self.vnf_name2docker_name[name]
384 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
385 if vnfi.name == dn:
386 return vnfi
387 LOG.warning("No container with name: {0} found.".format(dn))
388 return None
389
390 @staticmethod
391 def _vnf_reconfigure_network(vnfi, if_name, net_str=None, new_name=None):
392 """
393 Reconfigure the network configuration of a specific interface
394 of a running container.
395 :param vnfi: container instance
396 :param if_name: interface name
397 :param net_str: network configuration string, e.g., 1.2.3.4/24
398 :return:
399 """
400
401 # assign new ip address
402 if net_str is not None:
403 intf = vnfi.intf(intf=if_name)
404 if intf is not None:
405 intf.setIP(net_str)
406 LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
407 else:
408 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
409
410 if new_name is not None:
411 vnfi.cmd('ip link set', if_name, 'down')
412 vnfi.cmd('ip link set', if_name, 'name', new_name)
413 vnfi.cmd('ip link set', new_name, 'up')
414 LOG.debug("Reconfigured interface name of %s:%s to %s" % (vnfi.name, if_name, new_name))
415
416
417
418 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
419 for vnfi in vnfi_list:
420 config = vnfi.dcinfo.get("Config", dict())
421 env = config.get("Env", list())
422 for env_var in env:
423 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
424 LOG.debug("%r = %r" % (var , cmd))
425 if var=="SON_EMU_CMD":
426 LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
427 # execute command in new thread to ensure that GK is not blocked by VNF
428 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
429 t.daemon = True
430 t.start()
431
432 def _unpack_service_package(self):
433 """
434 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
435 """
436 LOG.info("Unzipping: %r" % self.package_file_path)
437 with zipfile.ZipFile(self.package_file_path, "r") as z:
438 z.extractall(self.package_content_path)
439
440
441 def _load_package_descriptor(self):
442 """
443 Load the main package descriptor YAML and keep it as dict.
444 :return:
445 """
446 self.manifest = load_yaml(
447 os.path.join(
448 self.package_content_path, "META-INF/MANIFEST.MF"))
449
450 def _load_nsd(self):
451 """
452 Load the entry NSD YAML and keep it as dict.
453 :return:
454 """
455 if "entry_service_template" in self.manifest:
456 nsd_path = os.path.join(
457 self.package_content_path,
458 make_relative_path(self.manifest.get("entry_service_template")))
459 self.nsd = load_yaml(nsd_path)
460 GK.net.deployed_nsds.append(self.nsd)
461
462 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
463
464 def _load_vnfd(self):
465 """
466 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
467 :return:
468 """
469 if "package_content" in self.manifest:
470 for pc in self.manifest.get("package_content"):
471 if pc.get("content-type") == "application/sonata.function_descriptor":
472 vnfd_path = os.path.join(
473 self.package_content_path,
474 make_relative_path(pc.get("name")))
475 vnfd = load_yaml(vnfd_path)
476 self.vnfds[vnfd.get("name")] = vnfd
477 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
478
479 def _load_saps(self):
480 # create list of all SAPs
481 # check if we need to deploy management ports
482 if USE_DOCKER_MGMT:
483 LOG.debug("nsd: {0}".format(self.nsd))
484 SAPs = [p for p in self.nsd["connection_points"] if 'management' not in p.get('type')]
485 else:
486 SAPs = [p for p in self.nsd["connection_points"]]
487
488 for sap in SAPs:
489 # endpoint needed in this service
490 sap_id, sap_interface, sap_docker_name = parse_interface(sap['id'])
491 # make sure SAP has type set (default internal)
492 sap["type"] = sap.get("type", 'internal')
493
494 # Each Service Access Point (connection_point) in the nsd is an IP address on the host
495 if sap["type"] == "external":
496 # add to vnfds to calculate placement later on
497 sap_net = SAP_SUBNETS.pop(0)
498 self.saps[sap_docker_name] = {"name": sap_docker_name , "type": "external", "net": sap_net}
499 # add SAP vnf to list in the NSD so it is deployed later on
500 # each SAP get a unique VNFD and vnf_id in the NSD and custom type (only defined in the dummygatekeeper)
501 self.nsd["network_functions"].append(
502 {"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_ext"})
503
504 # Each Service Access Point (connection_point) in the nsd is getting its own container (default)
505 elif sap["type"] == "internal" or sap["type"] == "management":
506 # add SAP to self.vnfds
507 sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
508 sap_vnfd = load_yaml(sapfile)
509 sap_vnfd["connection_points"][0]["id"] = sap_interface
510 sap_vnfd["name"] = sap_docker_name
511 sap_vnfd["type"] = "internal"
512 # add to vnfds to calculate placement later on and deploy
513 self.saps[sap_docker_name] = sap_vnfd
514 # add SAP vnf to list in the NSD so it is deployed later on
515 # each SAP get a unique VNFD and vnf_id in the NSD
516 self.nsd["network_functions"].append(
517 {"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_int"})
518
519 LOG.debug("Loaded SAP: name: {0}, type: {1}".format(sap_docker_name, sap['type']))
520
521 # create sap lists
522 self.saps_ext = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "external"]
523 self.saps_int = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "internal"]
524
525 def _start_sap(self, sap, instance_uuid):
526 if not DEPLOY_SAP:
527 return
528
529 LOG.info('start SAP: {0} ,type: {1}'.format(sap['name'],sap['type']))
530 if sap["type"] == "internal":
531 vnfi = None
532 if not GK_STANDALONE_MODE:
533 vnfi = self._start_vnfd(sap)
534 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
535
536 elif sap["type"] == "external":
537 target_dc = sap.get("dc")
538 # add interface to dc switch
539 target_dc.attachExternalSAP(sap['name'], str(sap['net']))
540
541 def _connect_elines(self, eline_fwd_links, instance_uuid):
542 """
543 Connect all E-LINE links in the NSD
544 :param eline_fwd_links: list of E-LINE links in the NSD
545 :param: instance_uuid of the service
546 :return:
547 """
548 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
549 # eg. different services get a unique cookie for their flowrules
550 cookie = 1
551 for link in eline_fwd_links:
552 # check if we need to deploy this link when its a management link:
553 if USE_DOCKER_MGMT:
554 if self.check_mgmt_interface(link["connection_points_reference"]):
555 continue
556
557 src_id, src_if_name, src_sap_id = parse_interface(link["connection_points_reference"][0])
558 dst_id, dst_if_name, dst_sap_id = parse_interface(link["connection_points_reference"][1])
559
560 setChaining = False
561 # check if there is a SAP in the link and chain everything together
562 if src_sap_id in self.saps and dst_sap_id in self.saps:
563 LOG.info('2 SAPs cannot be chained together : {0} - {1}'.format(src_sap_id, dst_sap_id))
564 continue
565
566 elif src_sap_id in self.saps_ext:
567 src_id = src_sap_id
568 src_if_name = src_sap_id
569 src_name = self.vnf_id2vnf_name[src_id]
570 dst_name = self.vnf_id2vnf_name[dst_id]
571 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
572 if dst_vnfi is not None:
573 # choose first ip address in sap subnet
574 sap_net = self.saps[src_sap_id]['net']
575 sap_ip = "{0}/{1}".format(str(sap_net[1]), sap_net.prefixlen)
576 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, sap_ip)
577 setChaining = True
578
579 elif dst_sap_id in self.saps_ext:
580 dst_id = dst_sap_id
581 dst_if_name = dst_sap_id
582 src_name = self.vnf_id2vnf_name[src_id]
583 dst_name = self.vnf_id2vnf_name[dst_id]
584 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
585 if src_vnfi is not None:
586 sap_net = self.saps[dst_sap_id]['net']
587 sap_ip = "{0}/{1}".format(str(sap_net[1]), sap_net.prefixlen)
588 self._vnf_reconfigure_network(src_vnfi, src_if_name, sap_ip)
589 setChaining = True
590
591 # Link between 2 VNFs
592 else:
593 # make sure we use the correct sap vnf name
594 if src_sap_id in self.saps_int:
595 src_id = src_sap_id
596 if dst_sap_id in self.saps_int:
597 dst_id = dst_sap_id
598 src_name = self.vnf_id2vnf_name[src_id]
599 dst_name = self.vnf_id2vnf_name[dst_id]
600 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
601 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
602 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
603 if src_vnfi is not None and dst_vnfi is not None:
604 eline_net = ELINE_SUBNETS.pop(0)
605 ip1 = "{0}/{1}".format(str(eline_net[1]), eline_net.prefixlen)
606 ip2 = "{0}/{1}".format(str(eline_net[2]), eline_net.prefixlen)
607 self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
608 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
609 setChaining = True
610
611 # Set the chaining
612 if setChaining:
613 ret = GK.net.setChain(
614 src_id, dst_id,
615 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
616 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
617 LOG.debug(
618 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
619 src_name, src_id, src_if_name, dst_name, dst_id, dst_if_name))
620
621
622 def _connect_elans(self, elan_fwd_links, instance_uuid):
623 """
624 Connect all E-LAN links in the NSD
625 :param elan_fwd_links: list of E-LAN links in the NSD
626 :param: instance_uuid of the service
627 :return:
628 """
629 for link in elan_fwd_links:
630 # check if we need to deploy this link when its a management link:
631 if USE_DOCKER_MGMT:
632 if self.check_mgmt_interface(link["connection_points_reference"]):
633 continue
634
635 elan_vnf_list = []
636 # check if an external SAP is in the E-LAN (then a subnet is already defined)
637 intfs_elan = [intf for intf in link["connection_points_reference"]]
638 lan_sap = self.check_ext_saps(intfs_elan)
639 if lan_sap:
640 lan_net = self.saps[lan_sap]['net']
641 lan_hosts = list(lan_net.hosts())
642 sap_ip = str(lan_hosts.pop(0))
643 else:
644 lan_net = ELAN_SUBNETS.pop(0)
645 lan_hosts = list(lan_net.hosts())
646
647 # generate lan ip address for all interfaces except external SAPs
648 for intf in link["connection_points_reference"]:
649
650 # skip external SAPs, they already have an ip
651 vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(intf)
652 if vnf_sap_docker_name in self.saps_ext:
653 elan_vnf_list.append({'name': vnf_sap_docker_name, 'interface': vnf_interface})
654 continue
655
656 ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)), lan_net.prefixlen)
657 vnf_id, intf_name, vnf_sap_id = parse_interface(intf)
658
659 # make sure we use the correct sap vnf name
660 src_docker_name = vnf_id
661 if vnf_sap_id in self.saps_int:
662 src_docker_name = vnf_sap_id
663 vnf_id = vnf_sap_id
664
665 vnf_name = self.vnf_id2vnf_name[vnf_id]
666 LOG.debug(
667 "Setting up E-LAN interface. %s(%s:%s) -> %s" % (
668 vnf_name, vnf_id, intf_name, ip_address))
669
670 if vnf_name in self.vnfds:
671 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
672 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
673 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
674 vnfi = self._get_vnf_instance(instance_uuid, vnf_name)
675 if vnfi is not None:
676 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
677 # add this vnf and interface to the E-LAN for tagging
678 elan_vnf_list.append({'name': src_docker_name, 'interface': intf_name})
679
680 # install the VLAN tags for this E-LAN
681 GK.net.setLAN(elan_vnf_list)
682
683
684 def _load_docker_files(self):
685 """
686 Get all paths to Dockerfiles from VNFDs and store them in dict.
687 :return:
688 """
689 for k, v in self.vnfds.iteritems():
690 for vu in v.get("virtual_deployment_units"):
691 if vu.get("vm_image_format") == "docker":
692 vm_image = vu.get("vm_image")
693 docker_path = os.path.join(
694 self.package_content_path,
695 make_relative_path(vm_image))
696 self.local_docker_files[k] = docker_path
697 LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
698
699 def _load_docker_urls(self):
700 """
701 Get all URLs to pre-build docker images in some repo.
702 :return:
703 """
704 # also merge sap dicts, because internal saps also need a docker container
705 all_vnfs = self.vnfds.copy()
706 all_vnfs.update(self.saps)
707
708 for k, v in all_vnfs.iteritems():
709 for vu in v.get("virtual_deployment_units", {}):
710 if vu.get("vm_image_format") == "docker":
711 url = vu.get("vm_image")
712 if url is not None:
713 url = url.replace("http://", "")
714 self.remote_docker_image_urls[k] = url
715 LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
716
717 def _build_images_from_dockerfiles(self):
718 """
719 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
720 """
721 if GK_STANDALONE_MODE:
722 return # do not build anything in standalone mode
723 dc = DockerClient()
724 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
725 for k, v in self.local_docker_files.iteritems():
726 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
727 LOG.debug("DOCKER BUILD: %s" % line)
728 LOG.info("Docker image created: %s" % k)
729
730 def _pull_predefined_dockerimages(self):
731 """
732 If the package contains URLs to pre-build Docker images, we download them with this method.
733 """
734 dc = DockerClient()
735 for url in self.remote_docker_image_urls.itervalues():
736 if not FORCE_PULL: # only pull if not present (speedup for development)
737 if len(dc.images.list(name=url)) > 0:
738 LOG.debug("Image %r present. Skipping pull." % url)
739 continue
740 LOG.info("Pulling image: %r" % url)
741 # this seems to fail with latest docker api version 2.0.2
742 # dc.images.pull(url,
743 # insecure_registry=True)
744 #using docker cli instead
745 cmd = ["docker",
746 "pull",
747 url,
748 ]
749 Popen(cmd).wait()
750
751
752
753
754 def _check_docker_image_exists(self, image_name):
755 """
756 Query the docker service and check if the given image exists
757 :param image_name: name of the docker image
758 :return:
759 """
760 return len(DockerClient().images.list(name=image_name)) > 0
761
762 def _calculate_placement(self, algorithm):
763 """
764 Do placement by adding the a field "dc" to
765 each VNFD that points to one of our
766 data center objects known to the gatekeeper.
767 """
768 assert(len(self.vnfds) > 0)
769 assert(len(GK.dcs) > 0)
770 # instantiate algorithm an place
771 p = algorithm()
772 p.place(self.nsd, self.vnfds, self.saps, GK.dcs)
773 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
774 # lets print the placement result
775 for name, vnfd in self.vnfds.iteritems():
776 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
777 for sap in self.saps:
778 sap_dict = self.saps[sap]
779 LOG.info("Placed SAP %r on DC %r" % (sap, str(sap_dict.get("dc"))))
780
781
782 def _calculate_cpu_cfs_values(self, cpu_time_percentage):
783 """
784 Calculate cpu period and quota for CFS
785 :param cpu_time_percentage: percentage of overall CPU to be used
786 :return: cpu_period, cpu_quota
787 """
788 if cpu_time_percentage is None:
789 return -1, -1
790 if cpu_time_percentage < 0:
791 return -1, -1
792 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
793 # Attention minimum cpu_quota is 1ms (micro)
794 cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
795 LOG.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period, cpu_time_percentage))
796 cpu_quota = cpu_period * cpu_time_percentage # calculate the fraction of cpu time for this container
797 # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
798 if cpu_quota < 1000:
799 LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
800 cpu_quota = 1000
801 LOG.warning("Increased CPU quota to avoid system error.")
802 LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period, cpu_quota))
803 return int(cpu_period), int(cpu_quota)
804
805 def check_ext_saps(self, intf_list):
806 # check if the list of interfacs contains an externl SAP
807 saps_ext = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "external"]
808 for intf_name in intf_list:
809 vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(intf_name)
810 if vnf_sap_docker_name in saps_ext:
811 return vnf_sap_docker_name
812
813 def check_mgmt_interface(self, intf_list):
814 SAPs_mgmt = [p.get('id') for p in self.nsd["connection_points"] if 'management' in p.get('type')]
815 for intf_name in intf_list:
816 if intf_name in SAPs_mgmt:
817 return True
818
819 """
820 Some (simple) placement algorithms
821 """
822
823
824 class FirstDcPlacement(object):
825 """
826 Placement: Always use one and the same data center from the GK.dcs dict.
827 """
828 def place(self, nsd, vnfds, saps, dcs):
829 for name, vnfd in vnfds.iteritems():
830 vnfd["dc"] = list(dcs.itervalues())[0]
831
832
833 class RoundRobinDcPlacement(object):
834 """
835 Placement: Distribute VNFs across all available DCs in a round robin fashion.
836 """
837 def place(self, nsd, vnfds, saps, dcs):
838 c = 0
839 dcs_list = list(dcs.itervalues())
840 for name, vnfd in vnfds.iteritems():
841 vnfd["dc"] = dcs_list[c % len(dcs_list)]
842 c += 1 # inc. c to use next DC
843
844 class RoundRobinDcPlacementWithSAPs(object):
845 """
846 Placement: Distribute VNFs across all available DCs in a round robin fashion,
847 every SAP is instantiated on the same DC as the connected VNF.
848 """
849 def place(self, nsd, vnfds, saps, dcs):
850
851 # place vnfs
852 c = 0
853 dcs_list = list(dcs.itervalues())
854 for name, vnfd in vnfds.iteritems():
855 vnfd["dc"] = dcs_list[c % len(dcs_list)]
856 c += 1 # inc. c to use next DC
857
858 # place SAPs
859 vlinks = nsd.get("virtual_links", [])
860 eline_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-Line")]
861 elan_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-LAN")]
862
863 vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
864 reduce(lambda x, y: dict(x, **y),
865 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
866 nsd["network_functions"])))
867
868 # SAPs on E-Line links are placed on the same DC as the VNF on the E-Line
869 for link in eline_fwd_links:
870 src_id, src_if_name, src_sap_id = parse_interface(link["connection_points_reference"][0])
871 dst_id, dst_if_name, dst_sap_id = parse_interface(link["connection_points_reference"][1])
872
873 # check if there is a SAP in the link
874 if src_sap_id in saps:
875 dst_vnf_name = vnf_id2vnf_name[dst_id]
876 # get dc where connected vnf is mapped to
877 dc = vnfds[dst_vnf_name]['dc']
878 saps[src_sap_id]['dc'] = dc
879
880 if dst_sap_id in saps:
881 src_vnf_name = vnf_id2vnf_name[src_id]
882 # get dc where connected vnf is mapped to
883 dc = vnfds[src_vnf_name]['dc']
884 saps[dst_sap_id]['dc'] = dc
885
886 # SAPs on E-LANs are placed on a random DC
887 dcs_list = list(dcs.itervalues())
888 dc_len = len(dcs_list)
889 for link in elan_fwd_links:
890 for intf in link["connection_points_reference"]:
891 # find SAP interfaces
892 intf_id, intf_name, intf_sap_id = parse_interface(intf)
893 if intf_sap_id in saps:
894 dc = dcs_list[randint(0, dc_len-1)]
895 saps[intf_sap_id]['dc'] = dc
896
897
898
899 """
900 Resource definitions and API endpoints
901 """
902
903
904 class Packages(fr.Resource):
905
906 def post(self):
907 """
908 Upload a *.son service package to the dummy gatekeeper.
909
910 We expect request with a *.son file and store it in UPLOAD_FOLDER
911 :return: UUID
912 """
913 try:
914 # get file contents
915 LOG.info("POST /packages called")
916 # lets search for the package in the request
917 is_file_object = False # make API more robust: file can be in data or in files field
918 if "package" in request.files:
919 son_file = request.files["package"]
920 is_file_object = True
921 elif len(request.data) > 0:
922 son_file = request.data
923 else:
924 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
925 # generate a uuid to reference this package
926 service_uuid = str(uuid.uuid4())
927 file_hash = hashlib.sha1(str(son_file)).hexdigest()
928 # ensure that upload folder exists
929 ensure_dir(UPLOAD_FOLDER)
930 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
931 # store *.son file to disk
932 if is_file_object:
933 son_file.save(upload_path)
934 else:
935 with open(upload_path, 'wb') as f:
936 f.write(son_file)
937 size = os.path.getsize(upload_path)
938 # create a service object and register it
939 s = Service(service_uuid, file_hash, upload_path)
940 GK.register_service_package(service_uuid, s)
941 # generate the JSON result
942 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201
943 except Exception as ex:
944 LOG.exception("Service package upload failed:")
945 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
946
947 def get(self):
948 """
949 Return a list of UUID's of uploaded service packages.
950 :return: dict/list
951 """
952 LOG.info("GET /packages")
953 return {"service_uuid_list": list(GK.services.iterkeys())}
954
955
956 class Instantiations(fr.Resource):
957
958 def post(self):
959 """
960 Instantiate a service specified by its UUID.
961 Will return a new UUID to identify the running service instance.
962 :return: UUID
963 """
964 LOG.info("POST /instantiations (or /requests) called")
965 # try to extract the service uuid from the request
966 json_data = request.get_json(force=True)
967 service_uuid = json_data.get("service_uuid")
968
969 # lets be a bit fuzzy here to make testing easier
970 if (service_uuid is None or service_uuid=="latest") and len(GK.services) > 0:
971 # if we don't get a service uuid, we simple start the first service in the list
972 service_uuid = list(GK.services.iterkeys())[0]
973 if service_uuid in GK.services:
974 # ok, we have a service uuid, lets start the service
975 service_instance_uuid = GK.services.get(service_uuid).start_service()
976 return {"service_instance_uuid": service_instance_uuid}, 201
977 return "Service not found", 404
978
979 def get(self):
980 """
981 Returns a list of UUIDs containing all running services.
982 :return: dict / list
983 """
984 LOG.info("GET /instantiations")
985 return {"service_instantiations_list": [
986 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
987
988 def delete(self):
989 """
990 Stops a running service specified by its service and instance UUID.
991 """
992 # try to extract the service and instance UUID from the request
993 json_data = request.get_json(force=True)
994 service_uuid = json_data.get("service_uuid")
995 instance_uuid = json_data.get("service_instance_uuid")
996
997 # try to be fuzzy
998 if service_uuid is None and len(GK.services) > 0:
999 #if we don't get a service uuid, we simply stop the last service in the list
1000 service_uuid = list(GK.services.iterkeys())[0]
1001 if instance_uuid is None and len(GK.services[service_uuid].instances) > 0:
1002 instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0]
1003
1004 if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
1005 # valid service and instance UUID, stop service
1006 GK.services.get(service_uuid).stop_service(instance_uuid)
1007 return "service instance with uuid %r stopped." % instance_uuid,200
1008 return "Service not found", 404
1009
1010 class Exit(fr.Resource):
1011
1012 def put(self):
1013 """
1014 Stop the running Containernet instance regardless of data transmitted
1015 """
1016 list(GK.dcs.values())[0].net.stop()
1017
1018
1019 def initialize_GK():
1020 global GK
1021 GK = Gatekeeper()
1022
1023
1024
1025 # create a single, global GK object
1026 GK = None
1027 initialize_GK()
1028 # setup Flask
1029 app = Flask(__name__)
1030 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
1031 api = fr.Api(app)
1032 # define endpoints
1033 api.add_resource(Packages, '/packages', '/api/v2/packages')
1034 api.add_resource(Instantiations, '/instantiations', '/api/v2/instantiations', '/api/v2/requests')
1035 api.add_resource(Exit, '/emulator/exit')
1036
1037
1038
1039 def start_rest_api(host, port, datacenters=dict()):
1040 GK.dcs = datacenters
1041 GK.net = get_dc_network()
1042 # start the Flask server (not the best performance but ok for our use case)
1043 app.run(host=host,
1044 port=port,
1045 debug=True,
1046 use_reloader=False # this is needed to run Flask in a non-main thread
1047 )
1048
1049
1050 def ensure_dir(name):
1051 if not os.path.exists(name):
1052 os.makedirs(name)
1053
1054
1055 def load_yaml(path):
1056 with open(path, "r") as f:
1057 try:
1058 r = yaml.load(f)
1059 except yaml.YAMLError as exc:
1060 LOG.exception("YAML parse error")
1061 r = dict()
1062 return r
1063
1064
1065 def make_relative_path(path):
1066 if path.startswith("file://"):
1067 path = path.replace("file://", "", 1)
1068 if path.startswith("/"):
1069 path = path.replace("/", "", 1)
1070 return path
1071
1072
1073 def get_dc_network():
1074 """
1075 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
1076 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
1077 :return:
1078 """
1079 assert (len(GK.dcs) > 0)
1080 return GK.dcs.values()[0].net
1081
1082
1083 def parse_interface(interface_name):
1084 """
1085 convert the interface name in the nsd to the according vnf_id, vnf_interface names
1086 :param interface_name:
1087 :return:
1088 """
1089
1090 if ':' in interface_name:
1091 vnf_id, vnf_interface = interface_name.split(':')
1092 vnf_sap_docker_name = interface_name.replace(':', '_')
1093 else:
1094 vnf_id = interface_name
1095 vnf_interface = interface_name
1096 vnf_sap_docker_name = interface_name
1097
1098 return vnf_id, vnf_interface, vnf_sap_docker_name
1099
1100 if __name__ == '__main__':
1101 """
1102 Lets allow to run the API in standalone mode.
1103 """
1104 GK_STANDALONE_MODE = True
1105 logging.getLogger("werkzeug").setLevel(logging.INFO)
1106 start_rest_api("0.0.0.0", 8000)
1107