Add flag to the topology file to auto-deploy an uploaded service.
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 """
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
30
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
33 """
34
35 import logging
36 import os
37 import uuid
38 import hashlib
39 import zipfile
40 import yaml
41 import threading
42 from docker import DockerClient, APIClient
43 from flask import Flask, request
44 import flask_restful as fr
45 from collections import defaultdict
46 import pkg_resources
47 from subprocess import Popen
48 from random import randint
49 import ipaddress
50
51 logging.basicConfig()
52 LOG = logging.getLogger("sonata-dummy-gatekeeper")
53 LOG.setLevel(logging.DEBUG)
54 logging.getLogger("werkzeug").setLevel(logging.WARNING)
55
56 GK_STORAGE = "/tmp/son-dummy-gk/"
57 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
58 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
59
60 # Enable Dockerfile build functionality
61 BUILD_DOCKERFILE = False
62
63 # flag to indicate that we run without the emulator (only the bare API for integration testing)
64 GK_STANDALONE_MODE = False
65
66 # should a new version of an image be pulled even if its available
67 FORCE_PULL = False
68
69 # Automatically deploy SAPs (endpoints) of the service as new containers
70 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
71 DEPLOY_SAP = False
72
73 # flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
74 BIDIRECTIONAL_CHAIN = False
75
76 # override the management interfaces in the descriptors with default docker0 interfaces in the containers
77 USE_DOCKER_MGMT = False
78
79 # automatically deploy uploaded packages (no need to execute son-access deploy --latest separately)
80 AUTO_DEPLOY = True
81
82 def generate_subnets(prefix, base, subnet_size=50, mask=24):
83 # Generate a list of ipaddress in subnets
84 r = list()
85 for net in range(base, base + subnet_size):
86 subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
87 r.append(ipaddress.ip_network(unicode(subnet)))
88 return r
89 # private subnet definitions for the generated interfaces
90 # 10.10.xxx.0/24
91 SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=30)
92 # 10.20.xxx.0/30
93 ELAN_SUBNETS = generate_subnets('10.20', 0, subnet_size=50, mask=24)
94 # 10.30.xxx.0/30
95 ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
96
97
98 class Gatekeeper(object):
99
100 def __init__(self):
101 self.services = dict()
102 self.dcs = dict()
103 self.net = None
104 self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
105 LOG.info("Create SONATA dummy gatekeeper.")
106
107 def register_service_package(self, service_uuid, service):
108 """
109 register new service package
110 :param service_uuid
111 :param service object
112 """
113 self.services[service_uuid] = service
114 # lets perform all steps needed to onboard the service
115 service.onboard()
116
117 def get_next_vnf_name(self):
118 self.vnf_counter += 1
119 return "vnf%d" % self.vnf_counter
120
121
122 class Service(object):
123 """
124 This class represents a NS uploaded as a *.son package to the
125 dummy gatekeeper.
126 Can have multiple running instances of this service.
127 """
128
129 def __init__(self,
130 service_uuid,
131 package_file_hash,
132 package_file_path):
133 self.uuid = service_uuid
134 self.package_file_hash = package_file_hash
135 self.package_file_path = package_file_path
136 self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
137 self.manifest = None
138 self.nsd = None
139 self.vnfds = dict()
140 self.saps = dict()
141 self.saps_ext = list()
142 self.saps_int = list()
143 self.local_docker_files = dict()
144 self.remote_docker_image_urls = dict()
145 self.instances = dict()
146 self.vnf_name2docker_name = dict()
147 self.vnf_id2vnf_name = dict()
148
149 def onboard(self):
150 """
151 Do all steps to prepare this service to be instantiated
152 :return:
153 """
154 # 1. extract the contents of the package and store them in our catalog
155 self._unpack_service_package()
156 # 2. read in all descriptor files
157 self._load_package_descriptor()
158 self._load_nsd()
159 self._load_vnfd()
160 if DEPLOY_SAP:
161 self._load_saps()
162 # create dict to translate vnf names
163 self.vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
164 reduce(lambda x, y: dict(x, **y),
165 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
166 self.nsd["network_functions"])))
167 # 3. prepare container images (e.g. download or build Dockerfile)
168 if BUILD_DOCKERFILE:
169 self._load_docker_files()
170 self._build_images_from_dockerfiles()
171 else:
172 self._load_docker_urls()
173 self._pull_predefined_dockerimages()
174 LOG.info("On-boarded service: %r" % self.manifest.get("name"))
175
176 def start_service(self):
177 """
178 This methods creates and starts a new service instance.
179 It computes placements, iterates over all VNFDs, and starts
180 each VNFD as a Docker container in the data center selected
181 by the placement algorithm.
182 :return:
183 """
184 LOG.info("Starting service %r" % self.uuid)
185
186 # 1. each service instance gets a new uuid to identify it
187 instance_uuid = str(uuid.uuid4())
188 # build a instances dict (a bit like a NSR :))
189 self.instances[instance_uuid] = dict()
190 self.instances[instance_uuid]["vnf_instances"] = list()
191
192 # 2. compute placement of this service instance (adds DC names to VNFDs)
193 if not GK_STANDALONE_MODE:
194 #self._calculate_placement(FirstDcPlacement)
195 self._calculate_placement(RoundRobinDcPlacementWithSAPs)
196
197 # 3. start all vnfds that we have in the service (except SAPs)
198 for vnfd in self.vnfds.itervalues():
199 vnfi = None
200 if not GK_STANDALONE_MODE:
201 vnfi = self._start_vnfd(vnfd)
202 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
203
204 # 4. start all SAPs in the service
205 for sap in self.saps:
206 self._start_sap(self.saps[sap], instance_uuid)
207
208 # 5. Deploy E-Line and E_LAN links
209 if "virtual_links" in self.nsd:
210 vlinks = self.nsd["virtual_links"]
211 # constituent virtual links are not checked
212 #fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
213 eline_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-Line")]
214 elan_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-LAN")]
215
216 GK.net.deployed_elines.extend(eline_fwd_links)
217 GK.net.deployed_elans.extend(elan_fwd_links)
218
219 # 5a. deploy E-Line links
220 self._connect_elines(eline_fwd_links, instance_uuid)
221
222 # 5b. deploy E-LAN links
223 self._connect_elans(elan_fwd_links, instance_uuid)
224
225 # 6. run the emulator specific entrypoint scripts in the VNFIs of this service instance
226 self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
227
228 LOG.info("Service started. Instance id: %r" % instance_uuid)
229 return instance_uuid
230
231 def stop_service(self, instance_uuid):
232 """
233 This method stops a running service instance.
234 It iterates over all VNF instances, stopping them each
235 and removing them from their data center.
236
237 :param instance_uuid: the uuid of the service instance to be stopped
238 """
239 LOG.info("Stopping service %r" % self.uuid)
240 # get relevant information
241 # instance_uuid = str(self.uuid.uuid4())
242 vnf_instances = self.instances[instance_uuid]["vnf_instances"]
243
244 for v in vnf_instances:
245 self._stop_vnfi(v)
246
247 if not GK_STANDALONE_MODE:
248 # remove placement?
249 # self._remove_placement(RoundRobinPlacement)
250 None
251
252 # last step: remove the instance from the list of all instances
253 del self.instances[instance_uuid]
254
255 def _start_vnfd(self, vnfd):
256 """
257 Start a single VNFD of this service
258 :param vnfd: vnfd descriptor dict
259 :return:
260 """
261 # iterate over all deployment units within each VNFDs
262 for u in vnfd.get("virtual_deployment_units"):
263 # 1. get the name of the docker image to start and the assigned DC
264 vnf_name = vnfd.get("name")
265 if vnf_name not in self.remote_docker_image_urls:
266 raise Exception("No image name for %r found. Abort." % vnf_name)
267 docker_name = self.remote_docker_image_urls.get(vnf_name)
268 target_dc = vnfd.get("dc")
269 # 2. perform some checks to ensure we can start the container
270 assert(docker_name is not None)
271 assert(target_dc is not None)
272 if not self._check_docker_image_exists(docker_name):
273 raise Exception("Docker image %r not found. Abort." % docker_name)
274
275 # 3. get the resource limits
276 res_req = u.get("resource_requirements")
277 cpu_list = res_req.get("cpu").get("cores")
278 if not cpu_list or len(cpu_list)==0:
279 cpu_list="1"
280 cpu_bw = res_req.get("cpu").get("cpu_bw")
281 if not cpu_bw:
282 cpu_bw=1
283 mem_num = str(res_req.get("memory").get("size"))
284 if len(mem_num)==0:
285 mem_num="2"
286 mem_unit = str(res_req.get("memory").get("size_unit"))
287 if str(mem_unit)==0:
288 mem_unit="GB"
289 mem_limit = float(mem_num)
290 if mem_unit=="GB":
291 mem_limit=mem_limit*1024*1024*1024
292 elif mem_unit=="MB":
293 mem_limit=mem_limit*1024*1024
294 elif mem_unit=="KB":
295 mem_limit=mem_limit*1024
296 mem_lim = int(mem_limit)
297 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
298
299 vnf_name2id = defaultdict(lambda: "NotExistingNode",
300 reduce(lambda x, y: dict(x, **y),
301 map(lambda d: {d["vnf_name"]: d["vnf_id"]},
302 self.nsd["network_functions"])))
303
304 # check if we need to deploy the management ports (defined as type:management both on in the vnfd and nsd)
305 intfs = vnfd.get("connection_points", [])
306 mgmt_intf_names = []
307 if USE_DOCKER_MGMT:
308 vnf_id = vnf_name2id[vnf_name]
309 mgmt_intfs = [vnf_id + ':' + intf['id'] for intf in intfs if intf.get('type') == 'management']
310 # check if any of these management interfaces are used in a management-type network in the nsd
311 for nsd_intf_name in mgmt_intfs:
312 vlinks = [ l["connection_points_reference"] for l in self.nsd.get("virtual_links", [])]
313 for link in vlinks:
314 if nsd_intf_name in link and self.check_mgmt_interface(link):
315 # this is indeed a management interface and can be skipped
316 vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(nsd_intf_name)
317 found_interfaces = [intf for intf in intfs if intf.get('id') == vnf_interface]
318 intfs.remove(found_interfaces[0])
319 mgmt_intf_names.append(vnf_interface)
320
321 # 4. generate the volume paths for the docker container
322 volumes=list()
323 # a volume to extract log files
324 docker_log_path = "/tmp/results/%s/%s"%(self.uuid,vnf_name)
325 LOG.debug("LOG path for vnf %s is %s."%(vnf_name,docker_log_path))
326 if not os.path.exists(docker_log_path):
327 LOG.debug("Creating folder %s"%docker_log_path)
328 os.makedirs(docker_log_path)
329
330 volumes.append(docker_log_path+":/mnt/share/")
331
332
333 # 5. do the dc.startCompute(name="foobar") call to run the container
334 # TODO consider flavors, and other annotations
335 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
336 # use the vnf_id in the nsd as docker name
337 # so deployed containers can be easily mapped back to the nsd
338
339 self.vnf_name2docker_name[vnf_name] = vnf_name2id[vnf_name]
340
341 LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
342 LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
343 vnfi = target_dc.startCompute(
344 self.vnf_name2docker_name[vnf_name],
345 network=intfs,
346 image=docker_name,
347 flavor_name="small",
348 cpu_quota=cpu_quota,
349 cpu_period=cpu_period,
350 cpuset=cpu_list,
351 mem_limit=mem_lim,
352 volumes=volumes)
353
354 # rename the docker0 interfaces (eth0) to the management port name defined in the VNFD
355 if USE_DOCKER_MGMT:
356 for intf_name in mgmt_intf_names:
357 self._vnf_reconfigure_network(vnfi, 'eth0', new_name=intf_name)
358
359 return vnfi
360
361 def _stop_vnfi(self, vnfi):
362 """
363 Stop a VNF instance.
364
365 :param vnfi: vnf instance to be stopped
366 """
367 # Find the correct datacenter
368 status = vnfi.getStatus()
369 dc = vnfi.datacenter
370
371 # stop the vnfi
372 LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc))
373 dc.stopCompute(status["name"])
374
375 def _get_vnf_instance(self, instance_uuid, name):
376 """
377 Returns the Docker object for the given VNF name (or Docker name).
378 :param instance_uuid: UUID of the service instance to search in.
379 :param name: VNF name or Docker name. We are fuzzy here.
380 :return:
381 """
382 dn = name
383 if name in self.vnf_name2docker_name:
384 dn = self.vnf_name2docker_name[name]
385 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
386 if vnfi.name == dn:
387 return vnfi
388 LOG.warning("No container with name: {0} found.".format(dn))
389 return None
390
391 @staticmethod
392 def _vnf_reconfigure_network(vnfi, if_name, net_str=None, new_name=None):
393 """
394 Reconfigure the network configuration of a specific interface
395 of a running container.
396 :param vnfi: container instance
397 :param if_name: interface name
398 :param net_str: network configuration string, e.g., 1.2.3.4/24
399 :return:
400 """
401
402 # assign new ip address
403 if net_str is not None:
404 intf = vnfi.intf(intf=if_name)
405 if intf is not None:
406 intf.setIP(net_str)
407 LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
408 else:
409 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
410
411 if new_name is not None:
412 vnfi.cmd('ip link set', if_name, 'down')
413 vnfi.cmd('ip link set', if_name, 'name', new_name)
414 vnfi.cmd('ip link set', new_name, 'up')
415 LOG.debug("Reconfigured interface name of %s:%s to %s" % (vnfi.name, if_name, new_name))
416
417
418
419 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
420 for vnfi in vnfi_list:
421 config = vnfi.dcinfo.get("Config", dict())
422 env = config.get("Env", list())
423 for env_var in env:
424 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
425 LOG.debug("%r = %r" % (var , cmd))
426 if var=="SON_EMU_CMD":
427 LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
428 # execute command in new thread to ensure that GK is not blocked by VNF
429 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
430 t.daemon = True
431 t.start()
432
433 def _unpack_service_package(self):
434 """
435 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
436 """
437 LOG.info("Unzipping: %r" % self.package_file_path)
438 with zipfile.ZipFile(self.package_file_path, "r") as z:
439 z.extractall(self.package_content_path)
440
441
442 def _load_package_descriptor(self):
443 """
444 Load the main package descriptor YAML and keep it as dict.
445 :return:
446 """
447 self.manifest = load_yaml(
448 os.path.join(
449 self.package_content_path, "META-INF/MANIFEST.MF"))
450
451 def _load_nsd(self):
452 """
453 Load the entry NSD YAML and keep it as dict.
454 :return:
455 """
456 if "entry_service_template" in self.manifest:
457 nsd_path = os.path.join(
458 self.package_content_path,
459 make_relative_path(self.manifest.get("entry_service_template")))
460 self.nsd = load_yaml(nsd_path)
461 GK.net.deployed_nsds.append(self.nsd)
462
463 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
464
465 def _load_vnfd(self):
466 """
467 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
468 :return:
469 """
470 if "package_content" in self.manifest:
471 for pc in self.manifest.get("package_content"):
472 if pc.get("content-type") == "application/sonata.function_descriptor":
473 vnfd_path = os.path.join(
474 self.package_content_path,
475 make_relative_path(pc.get("name")))
476 vnfd = load_yaml(vnfd_path)
477 self.vnfds[vnfd.get("name")] = vnfd
478 LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
479
480 def _load_saps(self):
481 # create list of all SAPs
482 # check if we need to deploy management ports
483 if USE_DOCKER_MGMT:
484 LOG.debug("nsd: {0}".format(self.nsd))
485 SAPs = [p for p in self.nsd["connection_points"] if 'management' not in p.get('type')]
486 else:
487 SAPs = [p for p in self.nsd["connection_points"]]
488
489 for sap in SAPs:
490 # endpoint needed in this service
491 sap_id, sap_interface, sap_docker_name = parse_interface(sap['id'])
492 # make sure SAP has type set (default internal)
493 sap["type"] = sap.get("type", 'internal')
494
495 # Each Service Access Point (connection_point) in the nsd is an IP address on the host
496 if sap["type"] == "external":
497 # add to vnfds to calculate placement later on
498 sap_net = SAP_SUBNETS.pop(0)
499 self.saps[sap_docker_name] = {"name": sap_docker_name , "type": "external", "net": sap_net}
500 # add SAP vnf to list in the NSD so it is deployed later on
501 # each SAP get a unique VNFD and vnf_id in the NSD and custom type (only defined in the dummygatekeeper)
502 self.nsd["network_functions"].append(
503 {"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_ext"})
504
505 # Each Service Access Point (connection_point) in the nsd is getting its own container (default)
506 elif sap["type"] == "internal" or sap["type"] == "management":
507 # add SAP to self.vnfds
508 sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
509 sap_vnfd = load_yaml(sapfile)
510 sap_vnfd["connection_points"][0]["id"] = sap_interface
511 sap_vnfd["name"] = sap_docker_name
512 sap_vnfd["type"] = "internal"
513 # add to vnfds to calculate placement later on and deploy
514 self.saps[sap_docker_name] = sap_vnfd
515 # add SAP vnf to list in the NSD so it is deployed later on
516 # each SAP get a unique VNFD and vnf_id in the NSD
517 self.nsd["network_functions"].append(
518 {"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_int"})
519
520 LOG.debug("Loaded SAP: name: {0}, type: {1}".format(sap_docker_name, sap['type']))
521
522 # create sap lists
523 self.saps_ext = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "external"]
524 self.saps_int = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "internal"]
525
526 def _start_sap(self, sap, instance_uuid):
527 if not DEPLOY_SAP:
528 return
529
530 LOG.info('start SAP: {0} ,type: {1}'.format(sap['name'],sap['type']))
531 if sap["type"] == "internal":
532 vnfi = None
533 if not GK_STANDALONE_MODE:
534 vnfi = self._start_vnfd(sap)
535 self.instances[instance_uuid]["vnf_instances"].append(vnfi)
536
537 elif sap["type"] == "external":
538 target_dc = sap.get("dc")
539 # add interface to dc switch
540 target_dc.attachExternalSAP(sap['name'], sap['net'])
541
542 def _connect_elines(self, eline_fwd_links, instance_uuid):
543 """
544 Connect all E-LINE links in the NSD
545 :param eline_fwd_links: list of E-LINE links in the NSD
546 :param: instance_uuid of the service
547 :return:
548 """
549 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
550 # eg. different services get a unique cookie for their flowrules
551 cookie = 1
552 for link in eline_fwd_links:
553 # check if we need to deploy this link when its a management link:
554 if USE_DOCKER_MGMT:
555 if self.check_mgmt_interface(link["connection_points_reference"]):
556 continue
557
558 src_id, src_if_name, src_sap_id = parse_interface(link["connection_points_reference"][0])
559 dst_id, dst_if_name, dst_sap_id = parse_interface(link["connection_points_reference"][1])
560
561 setChaining = False
562 # check if there is a SAP in the link and chain everything together
563 if src_sap_id in self.saps and dst_sap_id in self.saps:
564 LOG.info('2 SAPs cannot be chained together : {0} - {1}'.format(src_sap_id, dst_sap_id))
565 continue
566
567 elif src_sap_id in self.saps_ext:
568 src_id = src_sap_id
569 # set intf name to None so the chaining function will choose the first one
570 src_if_name = None
571 src_name = self.vnf_id2vnf_name[src_id]
572 dst_name = self.vnf_id2vnf_name[dst_id]
573 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
574 if dst_vnfi is not None:
575 # choose first ip address in sap subnet
576 sap_net = self.saps[src_sap_id]['net']
577 sap_ip = "{0}/{1}".format(str(sap_net[2]), sap_net.prefixlen)
578 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, sap_ip)
579 setChaining = True
580
581 elif dst_sap_id in self.saps_ext:
582 dst_id = dst_sap_id
583 # set intf name to None so the chaining function will choose the first one
584 dst_if_name = None
585 src_name = self.vnf_id2vnf_name[src_id]
586 dst_name = self.vnf_id2vnf_name[dst_id]
587 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
588 if src_vnfi is not None:
589 sap_net = self.saps[dst_sap_id]['net']
590 sap_ip = "{0}/{1}".format(str(sap_net[2]), sap_net.prefixlen)
591 self._vnf_reconfigure_network(src_vnfi, src_if_name, sap_ip)
592 setChaining = True
593
594 # Link between 2 VNFs
595 else:
596 # make sure we use the correct sap vnf name
597 if src_sap_id in self.saps_int:
598 src_id = src_sap_id
599 if dst_sap_id in self.saps_int:
600 dst_id = dst_sap_id
601 src_name = self.vnf_id2vnf_name[src_id]
602 dst_name = self.vnf_id2vnf_name[dst_id]
603 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
604 src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
605 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
606 if src_vnfi is not None and dst_vnfi is not None:
607 eline_net = ELINE_SUBNETS.pop(0)
608 ip1 = "{0}/{1}".format(str(eline_net[1]), eline_net.prefixlen)
609 ip2 = "{0}/{1}".format(str(eline_net[2]), eline_net.prefixlen)
610 self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
611 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
612 setChaining = True
613
614 # Set the chaining
615 if setChaining:
616 ret = GK.net.setChain(
617 src_id, dst_id,
618 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
619 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
620 LOG.debug(
621 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
622 src_name, src_id, src_if_name, dst_name, dst_id, dst_if_name))
623
624
625 def _connect_elans(self, elan_fwd_links, instance_uuid):
626 """
627 Connect all E-LAN links in the NSD
628 :param elan_fwd_links: list of E-LAN links in the NSD
629 :param: instance_uuid of the service
630 :return:
631 """
632 for link in elan_fwd_links:
633 # check if we need to deploy this link when its a management link:
634 if USE_DOCKER_MGMT:
635 if self.check_mgmt_interface(link["connection_points_reference"]):
636 continue
637
638 elan_vnf_list = []
639 # check if an external SAP is in the E-LAN (then a subnet is already defined)
640 intfs_elan = [intf for intf in link["connection_points_reference"]]
641 lan_sap = self.check_ext_saps(intfs_elan)
642 if lan_sap:
643 lan_net = self.saps[lan_sap]['net']
644 lan_hosts = list(lan_net.hosts())
645 sap_ip = str(lan_hosts.pop(0))
646 else:
647 lan_net = ELAN_SUBNETS.pop(0)
648 lan_hosts = list(lan_net.hosts())
649
650 # generate lan ip address for all interfaces except external SAPs
651 for intf in link["connection_points_reference"]:
652
653 # skip external SAPs, they already have an ip
654 vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(intf)
655 if vnf_sap_docker_name in self.saps_ext:
656 elan_vnf_list.append({'name': vnf_sap_docker_name, 'interface': vnf_interface})
657 continue
658
659 ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)), lan_net.prefixlen)
660 vnf_id, intf_name, vnf_sap_id = parse_interface(intf)
661
662 # make sure we use the correct sap vnf name
663 src_docker_name = vnf_id
664 if vnf_sap_id in self.saps_int:
665 src_docker_name = vnf_sap_id
666 vnf_id = vnf_sap_id
667
668 vnf_name = self.vnf_id2vnf_name[vnf_id]
669 LOG.debug(
670 "Setting up E-LAN interface. %s(%s:%s) -> %s" % (
671 vnf_name, vnf_id, intf_name, ip_address))
672
673 if vnf_name in self.vnfds:
674 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
675 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
676 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
677 vnfi = self._get_vnf_instance(instance_uuid, vnf_name)
678 if vnfi is not None:
679 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
680 # add this vnf and interface to the E-LAN for tagging
681 elan_vnf_list.append({'name': src_docker_name, 'interface': intf_name})
682
683 # install the VLAN tags for this E-LAN
684 GK.net.setLAN(elan_vnf_list)
685
686
687 def _load_docker_files(self):
688 """
689 Get all paths to Dockerfiles from VNFDs and store them in dict.
690 :return:
691 """
692 for k, v in self.vnfds.iteritems():
693 for vu in v.get("virtual_deployment_units"):
694 if vu.get("vm_image_format") == "docker":
695 vm_image = vu.get("vm_image")
696 docker_path = os.path.join(
697 self.package_content_path,
698 make_relative_path(vm_image))
699 self.local_docker_files[k] = docker_path
700 LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
701
702 def _load_docker_urls(self):
703 """
704 Get all URLs to pre-build docker images in some repo.
705 :return:
706 """
707 # also merge sap dicts, because internal saps also need a docker container
708 all_vnfs = self.vnfds.copy()
709 all_vnfs.update(self.saps)
710
711 for k, v in all_vnfs.iteritems():
712 for vu in v.get("virtual_deployment_units", {}):
713 if vu.get("vm_image_format") == "docker":
714 url = vu.get("vm_image")
715 if url is not None:
716 url = url.replace("http://", "")
717 self.remote_docker_image_urls[k] = url
718 LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
719
720 def _build_images_from_dockerfiles(self):
721 """
722 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
723 """
724 if GK_STANDALONE_MODE:
725 return # do not build anything in standalone mode
726 dc = DockerClient()
727 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
728 for k, v in self.local_docker_files.iteritems():
729 for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
730 LOG.debug("DOCKER BUILD: %s" % line)
731 LOG.info("Docker image created: %s" % k)
732
733 def _pull_predefined_dockerimages(self):
734 """
735 If the package contains URLs to pre-build Docker images, we download them with this method.
736 """
737 dc = DockerClient()
738 for url in self.remote_docker_image_urls.itervalues():
739 if not FORCE_PULL: # only pull if not present (speedup for development)
740 if len(dc.images.list(name=url)) > 0:
741 LOG.debug("Image %r present. Skipping pull." % url)
742 continue
743 LOG.info("Pulling image: %r" % url)
744 # this seems to fail with latest docker api version 2.0.2
745 # dc.images.pull(url,
746 # insecure_registry=True)
747 #using docker cli instead
748 cmd = ["docker",
749 "pull",
750 url,
751 ]
752 Popen(cmd).wait()
753
754
755
756
757 def _check_docker_image_exists(self, image_name):
758 """
759 Query the docker service and check if the given image exists
760 :param image_name: name of the docker image
761 :return:
762 """
763 return len(DockerClient().images.list(name=image_name)) > 0
764
765 def _calculate_placement(self, algorithm):
766 """
767 Do placement by adding the a field "dc" to
768 each VNFD that points to one of our
769 data center objects known to the gatekeeper.
770 """
771 assert(len(self.vnfds) > 0)
772 assert(len(GK.dcs) > 0)
773 # instantiate algorithm an place
774 p = algorithm()
775 p.place(self.nsd, self.vnfds, self.saps, GK.dcs)
776 LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
777 # lets print the placement result
778 for name, vnfd in self.vnfds.iteritems():
779 LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
780 for sap in self.saps:
781 sap_dict = self.saps[sap]
782 LOG.info("Placed SAP %r on DC %r" % (sap, str(sap_dict.get("dc"))))
783
784
785 def _calculate_cpu_cfs_values(self, cpu_time_percentage):
786 """
787 Calculate cpu period and quota for CFS
788 :param cpu_time_percentage: percentage of overall CPU to be used
789 :return: cpu_period, cpu_quota
790 """
791 if cpu_time_percentage is None:
792 return -1, -1
793 if cpu_time_percentage < 0:
794 return -1, -1
795 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
796 # Attention minimum cpu_quota is 1ms (micro)
797 cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
798 LOG.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period, cpu_time_percentage))
799 cpu_quota = cpu_period * cpu_time_percentage # calculate the fraction of cpu time for this container
800 # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
801 if cpu_quota < 1000:
802 LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
803 cpu_quota = 1000
804 LOG.warning("Increased CPU quota to avoid system error.")
805 LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period, cpu_quota))
806 return int(cpu_period), int(cpu_quota)
807
808 def check_ext_saps(self, intf_list):
809 # check if the list of interfacs contains an externl SAP
810 saps_ext = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "external"]
811 for intf_name in intf_list:
812 vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(intf_name)
813 if vnf_sap_docker_name in saps_ext:
814 return vnf_sap_docker_name
815
816 def check_mgmt_interface(self, intf_list):
817 SAPs_mgmt = [p.get('id') for p in self.nsd["connection_points"] if 'management' in p.get('type')]
818 for intf_name in intf_list:
819 if intf_name in SAPs_mgmt:
820 return True
821
822 """
823 Some (simple) placement algorithms
824 """
825
826
827 class FirstDcPlacement(object):
828 """
829 Placement: Always use one and the same data center from the GK.dcs dict.
830 """
831 def place(self, nsd, vnfds, saps, dcs):
832 for name, vnfd in vnfds.iteritems():
833 vnfd["dc"] = list(dcs.itervalues())[0]
834
835
836 class RoundRobinDcPlacement(object):
837 """
838 Placement: Distribute VNFs across all available DCs in a round robin fashion.
839 """
840 def place(self, nsd, vnfds, saps, dcs):
841 c = 0
842 dcs_list = list(dcs.itervalues())
843 for name, vnfd in vnfds.iteritems():
844 vnfd["dc"] = dcs_list[c % len(dcs_list)]
845 c += 1 # inc. c to use next DC
846
847 class RoundRobinDcPlacementWithSAPs(object):
848 """
849 Placement: Distribute VNFs across all available DCs in a round robin fashion,
850 every SAP is instantiated on the same DC as the connected VNF.
851 """
852 def place(self, nsd, vnfds, saps, dcs):
853
854 # place vnfs
855 c = 0
856 dcs_list = list(dcs.itervalues())
857 for name, vnfd in vnfds.iteritems():
858 vnfd["dc"] = dcs_list[c % len(dcs_list)]
859 c += 1 # inc. c to use next DC
860
861 # place SAPs
862 vlinks = nsd.get("virtual_links", [])
863 eline_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-Line")]
864 elan_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-LAN")]
865
866 vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
867 reduce(lambda x, y: dict(x, **y),
868 map(lambda d: {d["vnf_id"]: d["vnf_name"]},
869 nsd["network_functions"])))
870
871 # SAPs on E-Line links are placed on the same DC as the VNF on the E-Line
872 for link in eline_fwd_links:
873 src_id, src_if_name, src_sap_id = parse_interface(link["connection_points_reference"][0])
874 dst_id, dst_if_name, dst_sap_id = parse_interface(link["connection_points_reference"][1])
875
876 # check if there is a SAP in the link
877 if src_sap_id in saps:
878 dst_vnf_name = vnf_id2vnf_name[dst_id]
879 # get dc where connected vnf is mapped to
880 dc = vnfds[dst_vnf_name]['dc']
881 saps[src_sap_id]['dc'] = dc
882
883 if dst_sap_id in saps:
884 src_vnf_name = vnf_id2vnf_name[src_id]
885 # get dc where connected vnf is mapped to
886 dc = vnfds[src_vnf_name]['dc']
887 saps[dst_sap_id]['dc'] = dc
888
889 # SAPs on E-LANs are placed on a random DC
890 dcs_list = list(dcs.itervalues())
891 dc_len = len(dcs_list)
892 for link in elan_fwd_links:
893 for intf in link["connection_points_reference"]:
894 # find SAP interfaces
895 intf_id, intf_name, intf_sap_id = parse_interface(intf)
896 if intf_sap_id in saps:
897 dc = dcs_list[randint(0, dc_len-1)]
898 saps[intf_sap_id]['dc'] = dc
899
900
901
902 """
903 Resource definitions and API endpoints
904 """
905
906
907 class Packages(fr.Resource):
908
909 def post(self):
910 """
911 Upload a *.son service package to the dummy gatekeeper.
912
913 We expect request with a *.son file and store it in UPLOAD_FOLDER
914 :return: UUID
915 """
916 try:
917 # get file contents
918 LOG.info("POST /packages called")
919 # lets search for the package in the request
920 is_file_object = False # make API more robust: file can be in data or in files field
921 if "package" in request.files:
922 son_file = request.files["package"]
923 is_file_object = True
924 elif len(request.data) > 0:
925 son_file = request.data
926 else:
927 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
928 # generate a uuid to reference this package
929 service_uuid = str(uuid.uuid4())
930 file_hash = hashlib.sha1(str(son_file)).hexdigest()
931 # ensure that upload folder exists
932 ensure_dir(UPLOAD_FOLDER)
933 upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
934 # store *.son file to disk
935 if is_file_object:
936 son_file.save(upload_path)
937 else:
938 with open(upload_path, 'wb') as f:
939 f.write(son_file)
940 size = os.path.getsize(upload_path)
941 # create a service object and register it
942 s = Service(service_uuid, file_hash, upload_path)
943 GK.register_service_package(service_uuid, s)
944
945 # automatically deploy the service
946 if AUTO_DEPLOY:
947 # ok, we have a service uuid, lets start the service
948 service_instance_uuid = GK.services.get(service_uuid).start_service()
949
950 # generate the JSON result
951 return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201
952 except Exception as ex:
953 LOG.exception("Service package upload failed:")
954 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
955
956 def get(self):
957 """
958 Return a list of UUID's of uploaded service packages.
959 :return: dict/list
960 """
961 LOG.info("GET /packages")
962 return {"service_uuid_list": list(GK.services.iterkeys())}
963
964
965 class Instantiations(fr.Resource):
966
967 def post(self):
968 """
969 Instantiate a service specified by its UUID.
970 Will return a new UUID to identify the running service instance.
971 :return: UUID
972 """
973 LOG.info("POST /instantiations (or /requests) called")
974 # try to extract the service uuid from the request
975 json_data = request.get_json(force=True)
976 service_uuid = json_data.get("service_uuid")
977
978 # lets be a bit fuzzy here to make testing easier
979 if (service_uuid is None or service_uuid=="latest") and len(GK.services) > 0:
980 # if we don't get a service uuid, we simple start the first service in the list
981 service_uuid = list(GK.services.iterkeys())[0]
982 if service_uuid in GK.services:
983 # ok, we have a service uuid, lets start the service
984 service_instance_uuid = GK.services.get(service_uuid).start_service()
985 return {"service_instance_uuid": service_instance_uuid}, 201
986 return "Service not found", 404
987
988 def get(self):
989 """
990 Returns a list of UUIDs containing all running services.
991 :return: dict / list
992 """
993 LOG.info("GET /instantiations")
994 return {"service_instantiations_list": [
995 list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
996
997 def delete(self):
998 """
999 Stops a running service specified by its service and instance UUID.
1000 """
1001 # try to extract the service and instance UUID from the request
1002 json_data = request.get_json(force=True)
1003 service_uuid = json_data.get("service_uuid")
1004 instance_uuid = json_data.get("service_instance_uuid")
1005
1006 # try to be fuzzy
1007 if service_uuid is None and len(GK.services) > 0:
1008 #if we don't get a service uuid, we simply stop the last service in the list
1009 service_uuid = list(GK.services.iterkeys())[0]
1010 if instance_uuid is None and len(GK.services[service_uuid].instances) > 0:
1011 instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0]
1012
1013 if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
1014 # valid service and instance UUID, stop service
1015 GK.services.get(service_uuid).stop_service(instance_uuid)
1016 return "service instance with uuid %r stopped." % instance_uuid,200
1017 return "Service not found", 404
1018
1019 class Exit(fr.Resource):
1020
1021 def put(self):
1022 """
1023 Stop the running Containernet instance regardless of data transmitted
1024 """
1025 list(GK.dcs.values())[0].net.stop()
1026
1027
1028 def initialize_GK():
1029 global GK
1030 GK = Gatekeeper()
1031
1032
1033
1034 # create a single, global GK object
1035 GK = None
1036 initialize_GK()
1037 # setup Flask
1038 app = Flask(__name__)
1039 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
1040 api = fr.Api(app)
1041 # define endpoints
1042 api.add_resource(Packages, '/packages', '/api/v2/packages')
1043 api.add_resource(Instantiations, '/instantiations', '/api/v2/instantiations', '/api/v2/requests')
1044 api.add_resource(Exit, '/emulator/exit')
1045
1046
1047
1048 def start_rest_api(host, port, datacenters=dict()):
1049 GK.dcs = datacenters
1050 GK.net = get_dc_network()
1051 # start the Flask server (not the best performance but ok for our use case)
1052 app.run(host=host,
1053 port=port,
1054 debug=True,
1055 use_reloader=False # this is needed to run Flask in a non-main thread
1056 )
1057
1058
1059 def ensure_dir(name):
1060 if not os.path.exists(name):
1061 os.makedirs(name)
1062
1063
1064 def load_yaml(path):
1065 with open(path, "r") as f:
1066 try:
1067 r = yaml.load(f)
1068 except yaml.YAMLError as exc:
1069 LOG.exception("YAML parse error")
1070 r = dict()
1071 return r
1072
1073
1074 def make_relative_path(path):
1075 if path.startswith("file://"):
1076 path = path.replace("file://", "", 1)
1077 if path.startswith("/"):
1078 path = path.replace("/", "", 1)
1079 return path
1080
1081
1082 def get_dc_network():
1083 """
1084 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
1085 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
1086 :return:
1087 """
1088 assert (len(GK.dcs) > 0)
1089 return GK.dcs.values()[0].net
1090
1091
1092 def parse_interface(interface_name):
1093 """
1094 convert the interface name in the nsd to the according vnf_id, vnf_interface names
1095 :param interface_name:
1096 :return:
1097 """
1098
1099 if ':' in interface_name:
1100 vnf_id, vnf_interface = interface_name.split(':')
1101 vnf_sap_docker_name = interface_name.replace(':', '_')
1102 else:
1103 vnf_id = interface_name
1104 vnf_interface = interface_name
1105 vnf_sap_docker_name = interface_name
1106
1107 return vnf_id, vnf_interface, vnf_sap_docker_name
1108
1109 if __name__ == '__main__':
1110 """
1111 Lets allow to run the API in standalone mode.
1112 """
1113 GK_STANDALONE_MODE = True
1114 logging.getLogger("werkzeug").setLevel(logging.INFO)
1115 start_rest_api("0.0.0.0", 8000)
1116