89403889ac4a375dc7a7aac7bad6ee3db7c889e9
[osm/vim-emu.git] / src / emuvim / api / tango / llcm.py
1
2 # Copyright (c) 2018 SONATA-NFV, 5GTANGO and Paderborn University
3 # ALL RIGHTS RESERVED.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 # Neither the name of the SONATA-NFV, 5GTANGO, Paderborn University
18 # nor the names of its contributors may be used to endorse or promote
19 # products derived from this software without specific prior written
20 # permission.
21 #
22 # This work has been performed in the framework of the SONATA project,
23 # funded by the European Commission under Grant number 671517 through
24 # the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 # acknowledge the contributions of their colleagues of the SONATA
26 # partner consortium (www.sonata-nfv.eu).
27 #
28 # This work has also been performed in the framework of the 5GTANGO project,
29 # funded by the European Commission under Grant number 761493 through
30 # the Horizon 2020 and 5G-PPP programmes. The authors would like to
31 # acknowledge the contributions of their colleagues of the 5GTANGO
32 # partner consortium (www.5gtango.eu).
33 import logging
34 import os
35 import uuid
36 import hashlib
37 import zipfile
38 import yaml
39 import threading
40 import datetime
41 from docker import DockerClient
42 from flask import Flask, request
43 import flask_restful as fr
44 from gevent.pywsgi import WSGIServer
45 from subprocess import Popen
46 import ipaddress
47 import copy
48 import time
49
50
51 LOG = logging.getLogger("5gtango.llcm")
52 LOG.setLevel(logging.INFO)
53
54
55 CORS_HEADER = {'Access-Control-Allow-Origin': '*',
56 'Access-Control-Allow-Methods': 'GET,OPTIONS'}
57
58
59 GK_STORAGE = "/tmp/vim-emu-tango-llcm/"
60 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
61 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
62
63 # Enable Dockerfile build functionality
64 BUILD_DOCKERFILE = False
65
66 # flag to indicate that we run without the emulator (only the bare API for
67 # integration testing)
68 GK_STANDALONE_MODE = False
69
70 # should a new version of an image be pulled even if its available
71 FORCE_PULL = False
72
73 # flag to indicate if we use bidirectional forwarding rules in the
74 # automatic chaining process
75 BIDIRECTIONAL_CHAIN = True
76
77 # override the management interfaces in the descriptors with default
78 # docker0 interfaces in the containers
79 USE_DOCKER_MGMT = False
80
81 # automatically deploy uploaded packages (no need to execute son-access
82 # deploy --latest separately)
83 AUTO_DEPLOY = False
84
85 # and also automatically terminate any other running services
86 AUTO_DELETE = False
87
88 # global subnet definitions (see reset_subnets())
89 ELAN_SUBNETS = None
90 ELINE_SUBNETS = None
91
92 # Time in seconds to wait for vnf stop scripts to execute fully
93 VNF_STOP_WAIT_TIME = 5
94
95 # If services are instantiated multiple times, the public port
96 # mappings need to be adapted to avoid colisions. We use this
97 # offset for this: NEW_PORT (SSIID * OFFSET) + ORIGINAL_PORT
98 MULTI_INSTANCE_PORT_OFFSET = 1000
99
100 # Selected Placement Algorithm: Points to the class of the selected
101 # placement algorithm.
102 PLACEMENT_ALGORITHM_OBJ = None
103
104 # Path to folder with <container_name>.env.yml files that contain
105 # environment variables injected into the specific container
106 # when it is started.
107 PER_INSTANCE_ENV_CONFIGURATION_FOLDER = None
108
109
110 class OnBoardingException(BaseException):
111 pass
112
113
114 class Gatekeeper(object):
115
116 def __init__(self):
117 self.services = dict()
118 self.dcs = dict()
119 self.net = None
120 # used to generate short names for VNFs (Mininet limitation)
121 self.vnf_counter = 0
122 reset_subnets()
123 LOG.info("Initialized 5GTANGO LLCM module.")
124
125 def register_service_package(self, service_uuid, service):
126 """
127 register new service package
128 :param service_uuid
129 :param service object
130 """
131 self.services[service_uuid] = service
132 # lets perform all steps needed to onboard the service
133 service.onboard()
134
135
136 class Service(object):
137 """
138 This class represents a NS uploaded as a *.son package to the
139 dummy gatekeeper.
140 Can have multiple running instances of this service.
141 """
142
143 def __init__(self,
144 service_uuid,
145 package_file_hash,
146 package_file_path):
147 self.uuid = service_uuid
148 self.package_file_hash = package_file_hash
149 self.package_file_path = package_file_path
150 self.package_content_path = os.path.join(
151 CATALOG_FOLDER, "services/%s" % self.uuid)
152 self.manifest = None
153 self.nsd = None
154 self.vnfds = dict()
155 self.local_docker_files = dict()
156 self.remote_docker_image_urls = dict()
157 self.instances = dict()
158 self._instance_counter = 0
159 self.created_at = str(datetime.datetime.now())
160
161 def onboard(self):
162 """
163 Do all steps to prepare this service to be instantiated
164 :return:
165 """
166 # 1. extract the contents of the package and store them in our catalog
167 self._unpack_service_package()
168 # 2. read in all descriptor files
169 self._load_package_descriptor()
170 self._load_nsd()
171 self._load_vnfd()
172 if self.nsd is None:
173 raise OnBoardingException("No NSD found.")
174 if len(self.vnfds) < 1:
175 raise OnBoardingException("No VNFDs found.")
176 # 3. prepare container images (e.g. download or build Dockerfile)
177 if BUILD_DOCKERFILE:
178 self._load_docker_files()
179 self._build_images_from_dockerfiles()
180 else:
181 self._load_docker_urls()
182 self._pull_predefined_dockerimages()
183 # 4. reserve subnets
184 eline_fwd_links, elan_fwd_links = self._get_elines_and_elans()
185 self.eline_subnets = [ELINE_SUBNETS.pop(0) for _ in eline_fwd_links]
186 self.elan_subnets = [ELAN_SUBNETS.pop(0) for _ in elan_fwd_links]
187 LOG.debug("Reserved subnets for service '{}': E-Line: {} / E-LAN: {}"
188 .format(self.manifest.get("name"),
189 self.eline_subnets, self.elan_subnets))
190 LOG.info("On-boarded service: {}".format(self.manifest.get("name")))
191
192 def start_service(self):
193 """
194 This methods creates and starts a new service instance.
195 It computes placements, iterates over all VNFDs, and starts
196 each VNFD as a Docker container in the data center selected
197 by the placement algorithm.
198 :return:
199 """
200 LOG.info("Starting service {} ({})"
201 .format(get_triple_id(self.nsd), self.uuid))
202
203 # 1. each service instance gets a new uuid to identify it
204 instance_uuid = str(uuid.uuid4())
205 # build a instances dict (a bit like a NSR :))
206 self.instances[instance_uuid] = dict()
207 self.instances[instance_uuid]["uuid"] = self.uuid
208 # SSIID = short service instance ID (to postfix Container names)
209 self.instances[instance_uuid]["ssiid"] = self._instance_counter
210 self.instances[instance_uuid]["name"] = get_triple_id(self.nsd)
211 self.instances[instance_uuid]["vnf_instances"] = list()
212 self.instances[instance_uuid]["created_at"] = str(datetime.datetime.now())
213 # increase for next instance
214 self._instance_counter += 1
215
216 # 3. start all vnfds that we have in the service
217 for vnf_id in self.vnfds:
218 vnfd = self.vnfds[vnf_id]
219 # attention: returns a list of started deployment units
220 vnfis = self._start_vnfd(
221 vnfd, vnf_id, self.instances[instance_uuid]["ssiid"])
222 # add list of VNFIs to total VNFI list
223 self.instances[instance_uuid]["vnf_instances"].extend(vnfis)
224
225 # 4. Deploy E-Line, E-Tree and E-LAN links
226 # Attention: Only done if ""forwarding_graphs" section in NSD exists,
227 # even if "forwarding_graphs" are not used directly.
228 # Attention2: Do a copy of *_subnets with list() is important here!
229 eline_fwd_links, elan_fwd_links = self._get_elines_and_elans()
230 # 5a. deploy E-Line links
231 GK.net.deployed_elines.extend(eline_fwd_links) # bookkeeping
232 self._connect_elines(eline_fwd_links, instance_uuid, list(self.eline_subnets))
233 # 5b. deploy E-Tree/E-LAN links
234 GK.net.deployed_elans.extend(elan_fwd_links) # bookkeeping
235 self._connect_elans(elan_fwd_links, instance_uuid, list(self.elan_subnets))
236
237 # 6. run the emulator specific entrypoint scripts in the VNFIs of this
238 # service instance
239 self._trigger_emulator_start_scripts_in_vnfis(
240 self.instances[instance_uuid]["vnf_instances"])
241 # done
242 LOG.info("Service '{}' started. Instance id: {} SSIID: {}"
243 .format(self.instances[instance_uuid]["name"],
244 instance_uuid,
245 self.instances[instance_uuid]["ssiid"]))
246 return instance_uuid
247
248 def stop_service(self, instance_uuid):
249 """
250 This method stops a running service instance.
251 It iterates over all VNF instances, stopping them each
252 and removing them from their data center.
253 :param instance_uuid: the uuid of the service instance to be stopped
254 """
255 LOG.info("Stopping service %r" % self.uuid)
256 # get relevant information
257 # instance_uuid = str(self.uuid.uuid4())
258 vnf_instances = self.instances[instance_uuid]["vnf_instances"]
259 # trigger stop skripts in vnf instances and wait a few seconds for
260 # completion
261 self._trigger_emulator_stop_scripts_in_vnfis(vnf_instances)
262 time.sleep(VNF_STOP_WAIT_TIME)
263 # stop all vnfs
264 for v in vnf_instances:
265 self._stop_vnfi(v)
266 # last step: remove the instance from the list of all instances
267 del self.instances[instance_uuid]
268
269 def _get_elines_and_elans(self):
270 """
271 Get the E-Line, E-LAN, E-Tree links from the NSD.
272 """
273 # Attention: Only done if ""forwarding_graphs" section in NSD exists,
274 # even if "forwarding_graphs" are not used directly.
275 eline_fwd_links = list()
276 elan_fwd_links = list()
277 if "virtual_links" in self.nsd and "forwarding_graphs" in self.nsd:
278 vlinks = self.nsd["virtual_links"]
279 # constituent virtual links are not checked
280 eline_fwd_links = [l for l in vlinks if (
281 l["connectivity_type"] == "E-Line")]
282 elan_fwd_links = [l for l in vlinks if (
283 l["connectivity_type"] == "E-LAN" or
284 l["connectivity_type"] == "E-Tree")] # Treat E-Tree as E-LAN
285 return eline_fwd_links, elan_fwd_links
286
287 def _get_resource_limits(self, deployment_unit):
288 """
289 Extract resource limits from deployment units.
290 """
291 # defaults
292 cpu_list = None
293 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(1.0))
294 mem_limit = None
295 # update from descriptor
296 if "resource_requirements" in deployment_unit:
297 res_req = deployment_unit.get("resource_requirements")
298 cpu_list = res_req.get("cpu").get("cpuset")
299 if cpu_list is None:
300 cpu_list = res_req.get("cpu").get("vcpus")
301 if cpu_list is not None:
302 # attention: docker expects list as string w/o spaces:
303 cpu_list = str(cpu_list).replace(" ", "").strip()
304 cpu_bw = res_req.get("cpu").get("cpu_bw")
305 if cpu_bw is None:
306 cpu_bw = 1.0
307 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
308 mem_limit = res_req.get("memory").get("size")
309 mem_unit = str(res_req.get("memory").get("size_unit", "GB"))
310 if mem_limit is not None:
311 mem_limit = int(mem_limit)
312 # to bytes
313 if "G" in mem_unit:
314 mem_limit = mem_limit * 1024 * 1024 * 1024
315 elif "M" in mem_unit:
316 mem_limit = mem_limit * 1024 * 1024
317 elif "K" in mem_unit:
318 mem_limit = mem_limit * 1024
319 return cpu_list, cpu_period, cpu_quota, mem_limit
320
321 def _start_vnfd(self, vnfd, vnf_id, ssiid, **kwargs):
322 """
323 Start a single VNFD of this service
324 :param vnfd: vnfd descriptor dict
325 :param vnf_id: unique id of this vnf in the nsd
326 :return:
327 """
328 vnfis = list()
329 # the vnf_name refers to the container image to be deployed
330 vnf_name = vnfd.get("name")
331 # combine VDUs and CDUs
332 deployment_units = (vnfd.get("virtual_deployment_units", []) +
333 vnfd.get("cloudnative_deployment_units", []))
334 # iterate over all deployment units within each VNFDs
335 for u in deployment_units:
336 # 0. vnf_container_name = vnf_id.vdu_id
337 vnf_container_name = get_container_name(vnf_id, u.get("id"))
338 vnf_container_instance_name = get_container_name(vnf_id, u.get("id"), ssiid)
339 # 1. get the name of the docker image to star
340 if vnf_container_name not in self.remote_docker_image_urls:
341 raise Exception("No image name for %r found. Abort." % vnf_container_name)
342 docker_image_name = self.remote_docker_image_urls.get(vnf_container_name)
343 # 2. select datacenter to start the VNF in
344 target_dc = self._place(vnfd, vnf_id, u, ssiid)
345 # 3. perform some checks to ensure we can start the container
346 assert(docker_image_name is not None)
347 assert(target_dc is not None)
348 if not self._check_docker_image_exists(docker_image_name):
349 raise Exception("Docker image {} not found. Abort."
350 .format(docker_image_name))
351
352 # 4. get the resource limits
353 cpu_list, cpu_period, cpu_quota, mem_limit = self._get_resource_limits(u)
354
355 # get connection points defined for the DU
356 intfs = u.get("connection_points", [])
357 # do some re-naming of fields to be compatible to containernet
358 for i in intfs:
359 if i.get("address"):
360 LOG.info("Found static address for {}: {}"
361 .format(i.get("id"), i.get("address")))
362 i["ip"] = i.get("address")
363
364 # get ports and port_bindings from the port and publish fields of CNFD
365 # see: https://github.com/containernet/containernet/wiki/Exposing-and-mapping-network-ports
366 ports = list() # Containernet naming
367 port_bindings = dict()
368 for i in intfs:
369 if i.get("port"): # field with a single port
370 if not isinstance(i.get("port"), int):
371 LOG.info("Field 'port' is no int CP: {}".format(i))
372 else:
373 ports.append(i.get("port")) # collect all ports
374 if i.get("ports"): # list with multiple ports
375 if not isinstance(i.get("ports"), list):
376 LOG.info("Field 'port' is no list CP: {}".format(i))
377 else:
378 for p in i.get("ports"):
379 if not isinstance(p, int):
380 # do some parsing
381 try:
382 if "/udp" in p:
383 p = tuple(p.split("/"))
384 else:
385 p = int(p)
386 ports.append(p) # collect all ports
387 except BaseException as ex:
388 LOG.error(
389 "Could not parse ports list: {}".format(p))
390 LOG.error(ex)
391 else:
392 ports.append(p) # collect all ports
393 if i.get("publish"):
394 if not isinstance(i.get("publish"), dict):
395 LOG.info("Field 'publish' is no dict CP: {}".format(i))
396 else:
397 port_bindings.update(i.get("publish"))
398 # update port mapping for cases where service is deployed > 1 times
399 port_bindings = update_port_mapping_multi_instance(ssiid, port_bindings)
400 if len(ports) > 0:
401 LOG.info("{} exposes ports: {}".format(vnf_container_instance_name, ports))
402 if len(port_bindings) > 0:
403 LOG.info("{} publishes ports: {}".format(vnf_container_instance_name, port_bindings))
404
405 # 5. collect additional information to start container
406 volumes = list()
407 cenv = dict()
408 # 5.1 inject descriptor based start/stop commands into env (overwrite)
409 VNFD_CMD_START = u.get("vm_cmd_start")
410 VNFD_CMD_STOP = u.get("vm_cmd_stop")
411 if VNFD_CMD_START and not VNFD_CMD_START == "None":
412 LOG.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_START) +
413 " Overwriting SON_EMU_CMD.")
414 cenv["SON_EMU_CMD"] = VNFD_CMD_START
415 if VNFD_CMD_STOP and not VNFD_CMD_STOP == "None":
416 LOG.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_STOP) +
417 " Overwriting SON_EMU_CMD_STOP.")
418 cenv["SON_EMU_CMD_STOP"] = VNFD_CMD_STOP
419
420 # 5.2 inject per instance configurations based on envs
421 conf_envs = self._load_instance_conf_envs(vnf_container_instance_name)
422 cenv.update(conf_envs)
423
424 # 6. Start the container
425 LOG.info("Starting %r as %r in DC %r" %
426 (vnf_name, vnf_container_instance_name, target_dc))
427 LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
428 # start the container
429 vnfi = target_dc.startCompute(
430 vnf_container_instance_name,
431 network=intfs,
432 image=docker_image_name,
433 cpu_quota=cpu_quota,
434 cpu_period=cpu_period,
435 cpuset_cpus=cpu_list,
436 mem_limit=mem_limit,
437 volumes=volumes,
438 properties=cenv, # environment
439 ports=ports,
440 port_bindings=port_bindings,
441 # only publish if explicitly stated in descriptor
442 publish_all_ports=False,
443 type=kwargs.get('type', 'docker'))
444 # add vnfd reference to vnfi
445 vnfi.vnfd = vnfd
446 # add container name
447 vnfi.vnf_container_name = vnf_container_name
448 vnfi.vnf_container_instance_name = vnf_container_instance_name
449 vnfi.ssiid = ssiid
450 # store vnfi
451 vnfis.append(vnfi)
452 return vnfis
453
454 def _stop_vnfi(self, vnfi):
455 """
456 Stop a VNF instance.
457 :param vnfi: vnf instance to be stopped
458 """
459 # Find the correct datacenter
460 status = vnfi.getStatus()
461 dc = vnfi.datacenter
462 # stop the vnfi
463 LOG.info("Stopping the vnf instance contained in %r in DC %r" %
464 (status["name"], dc))
465 dc.stopCompute(status["name"])
466
467 def _get_vnf_instance(self, instance_uuid, vnf_id):
468 """
469 Returns VNFI object for a given "vnf_id" or "vnf_container_name" taken from an NSD.
470 :return: single object
471 """
472 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
473 if str(vnfi.name) == str(vnf_id):
474 return vnfi
475 LOG.warning("No container with name: {0} found.".format(vnf_id))
476 return None
477
478 def _get_vnf_instance_units(self, instance_uuid, vnf_id):
479 """
480 Returns a list of VNFI objects (all deployment units) for a given
481 "vnf_id" taken from an NSD.
482 :return: list
483 """
484 if vnf_id is None:
485 return None
486 r = list()
487 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
488 if vnf_id in vnfi.name:
489 r.append(vnfi)
490 if len(r) > 0:
491 LOG.debug("Found units: {} for vnf_id: {}"
492 .format([i.name for i in r], vnf_id))
493 return r
494 LOG.warning("No container(s) with name: {0} found.".format(vnf_id))
495 return None
496
497 @staticmethod
498 def _vnf_reconfigure_network(vnfi, if_name, net_str=None, new_name=None):
499 """
500 Reconfigure the network configuration of a specific interface
501 of a running container.
502 :param vnfi: container instance
503 :param if_name: interface name
504 :param net_str: network configuration string, e.g., 1.2.3.4/24
505 :return:
506 """
507 # assign new ip address
508 if net_str is not None:
509 intf = vnfi.intf(intf=if_name)
510 if intf is not None:
511 intf.setIP(net_str)
512 LOG.debug("Reconfigured network of %s:%s to %r" %
513 (vnfi.name, if_name, net_str))
514 else:
515 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (
516 vnfi.name, if_name))
517
518 if new_name is not None:
519 vnfi.cmd('ip link set', if_name, 'down')
520 vnfi.cmd('ip link set', if_name, 'name', new_name)
521 vnfi.cmd('ip link set', new_name, 'up')
522 LOG.debug("Reconfigured interface name of %s:%s to %s" %
523 (vnfi.name, if_name, new_name))
524
525 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
526 for vnfi in vnfi_list:
527 config = vnfi.dcinfo.get("Config", dict())
528 env = config.get("Env", list())
529 for env_var in env:
530 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
531 if var == "SON_EMU_CMD" or var == "VIM_EMU_CMD":
532 LOG.info("Executing script in '{}': {}={}"
533 .format(vnfi.name, var, cmd))
534 # execute command in new thread to ensure that GK is not
535 # blocked by VNF
536 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
537 t.daemon = True
538 t.start()
539 break # only execute one command
540
541 def _trigger_emulator_stop_scripts_in_vnfis(self, vnfi_list):
542 for vnfi in vnfi_list:
543 config = vnfi.dcinfo.get("Config", dict())
544 env = config.get("Env", list())
545 for env_var in env:
546 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
547 if var == "SON_EMU_CMD_STOP" or var == "VIM_EMU_CMD_STOP":
548 LOG.info("Executing script in '{}': {}={}"
549 .format(vnfi.name, var, cmd))
550 # execute command in new thread to ensure that GK is not
551 # blocked by VNF
552 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
553 t.daemon = True
554 t.start()
555 break # only execute one command
556
557 def _load_instance_conf_envs(self, cname):
558 """
559 Try to load an instance-specific env file. If not found,
560 just return an empty dict.
561 """
562 if PER_INSTANCE_ENV_CONFIGURATION_FOLDER is None:
563 return dict()
564 try:
565 path = os.path.expanduser(PER_INSTANCE_ENV_CONFIGURATION_FOLDER)
566 path = os.path.join(path, "{}.env.yml".format(cname))
567 res = load_yaml(path)
568 LOG.info("Loaded instance-specific env file for '{}': {}"
569 .format(cname, res))
570 return res
571 except BaseException as ex:
572 LOG.info("No instance-specific env file found for: {}"
573 .format(cname))
574 del ex
575 return dict()
576
577 def _unpack_service_package(self):
578 """
579 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
580 """
581 LOG.info("Unzipping: %r" % self.package_file_path)
582 with zipfile.ZipFile(self.package_file_path, "r") as z:
583 z.extractall(self.package_content_path)
584
585 def _load_package_descriptor(self):
586 """
587 Load the main package descriptor YAML and keep it as dict.
588 :return:
589 """
590 self.manifest = load_yaml(
591 os.path.join(
592 self.package_content_path, "TOSCA-Metadata/NAPD.yaml"))
593
594 def _load_nsd(self):
595 """
596 Load the entry NSD YAML and keep it as dict.
597 :return:
598 """
599 if "package_content" in self.manifest:
600 nsd_path = None
601 for f in self.manifest.get("package_content"):
602 if f.get("content-type") == "application/vnd.5gtango.nsd":
603 nsd_path = os.path.join(
604 self.package_content_path,
605 make_relative_path(f.get("source")))
606 break # always use the first NSD for now
607 if nsd_path is None:
608 raise OnBoardingException("No NSD with type 'application/vnd.5gtango.nsd' found.")
609 self.nsd = load_yaml(nsd_path)
610 GK.net.deployed_nsds.append(self.nsd) # TODO this seems strange (remove?)
611 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
612 else:
613 raise OnBoardingException(
614 "No 'package_content' section in package manifest:\n{}"
615 .format(self.manifest))
616
617 def _load_vnfd(self):
618 """
619 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
620 :return:
621 """
622 # first make a list of all the vnfds in the package
623 vnfd_set = dict()
624 if "package_content" in self.manifest:
625 for pc in self.manifest.get("package_content"):
626 if pc.get(
627 "content-type") == "application/vnd.5gtango.vnfd":
628 vnfd_path = os.path.join(
629 self.package_content_path,
630 make_relative_path(pc.get("source")))
631 vnfd = load_yaml(vnfd_path)
632 vnfd_set[vnfd.get("name")] = vnfd
633 if len(vnfd_set) < 1:
634 raise OnBoardingException("No VNFDs found.")
635 # then link each vnf_id in the nsd to its vnfd
636 for v in self.nsd.get("network_functions"):
637 if v.get("vnf_name") in vnfd_set:
638 self.vnfds[v.get("vnf_id")] = vnfd_set[v.get("vnf_name")]
639 LOG.debug("Loaded VNFD: {0} id: {1}"
640 .format(v.get("vnf_name"), v.get("vnf_id")))
641
642 def _connect_elines(self, eline_fwd_links, instance_uuid, subnets):
643 """
644 Connect all E-LINE links in the NSD
645 Attention: This method DOES NOT support multi V/CDU VNFs!
646 :param eline_fwd_links: list of E-LINE links in the NSD
647 :param: instance_uuid of the service
648 :param: subnets list of subnets to be used
649 :return:
650 """
651 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
652 # eg. different services get a unique cookie for their flowrules
653 cookie = 1
654 for link in eline_fwd_links:
655 LOG.info("Found E-Line: {}".format(link))
656 src_id, src_if_name = parse_interface(
657 link["connection_points_reference"][0])
658 dst_id, dst_if_name = parse_interface(
659 link["connection_points_reference"][1])
660 LOG.info("Searching C/VDU for E-Line: src={}, src_if={}, dst={}, dst_if={}"
661 .format(src_id, src_if_name, dst_id, dst_if_name))
662 # handle C/VDUs (ugly hack, only one V/CDU per VNF for now)
663 src_units = self._get_vnf_instance_units(instance_uuid, src_id)
664 dst_units = self._get_vnf_instance_units(instance_uuid, dst_id)
665 if src_units is None or dst_units is None:
666 LOG.info("No VNF-VNF link. Skipping: src={}, src_if={}, dst={}, dst_if={}"
667 .format(src_id, src_if_name, dst_id, dst_if_name))
668 return
669 # we only support VNFs with one V/CDU right now
670 if len(src_units) != 1 or len(dst_units) != 1:
671 raise BaseException("LLCM does not support E-LINES for multi V/CDU VNFs.")
672 # get the full name from that C/VDU and use it as src_id and dst_id
673 src_id = src_units[0].name
674 dst_id = dst_units[0].name
675 # from here we have all info we need
676 LOG.info("Creating E-Line for C/VDU: src={}, src_if={}, dst={}, dst_if={}"
677 .format(src_id, src_if_name, dst_id, dst_if_name))
678 # get involved vnfis
679 src_vnfi = src_units[0]
680 dst_vnfi = dst_units[0]
681 # proceed with chaining setup
682 setChaining = False
683 if src_vnfi is not None and dst_vnfi is not None:
684 setChaining = True
685 # re-configure the VNFs IP assignment and ensure that a new
686 # subnet is used for each E-Link
687 eline_net = subnets.pop(0)
688 ip1 = "{0}/{1}".format(str(eline_net[1]),
689 eline_net.prefixlen)
690 ip2 = "{0}/{1}".format(str(eline_net[2]),
691 eline_net.prefixlen)
692 # check if VNFs have fixed IPs (ip/address field in VNFDs)
693 if (self._get_vnfd_cp_from_vnfi(
694 src_vnfi, src_if_name).get("ip") is None and
695 self._get_vnfd_cp_from_vnfi(
696 src_vnfi, src_if_name).get("address") is None):
697 self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
698 # check if VNFs have fixed IPs (ip field in VNFDs)
699 if (self._get_vnfd_cp_from_vnfi(
700 dst_vnfi, dst_if_name).get("ip") is None and
701 self._get_vnfd_cp_from_vnfi(
702 dst_vnfi, dst_if_name).get("address") is None):
703 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
704 # set the chaining
705 if setChaining:
706 GK.net.setChain(
707 src_id, dst_id,
708 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
709 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
710
711 def _get_vnfd_cp_from_vnfi(self, vnfi, ifname):
712 """
713 Gets the connection point data structure from the VNFD
714 of the given VNFI using ifname.
715 """
716 if vnfi.vnfd is None:
717 return {}
718 cps = vnfi.vnfd.get("connection_points")
719 for cp in cps:
720 if cp.get("id") == ifname:
721 return cp
722
723 def _connect_elans(self, elan_fwd_links, instance_uuid, subnets):
724 """
725 Connect all E-LAN/E-Tree links in the NSD
726 This method supports multi-V/CDU VNFs if the connection
727 point names of the DUs are the same as the ones in the NSD.
728 :param elan_fwd_links: list of E-LAN links in the NSD
729 :param: instance_uuid of the service
730 :param: subnets list of subnets to be used
731 :return:
732 """
733 for link in elan_fwd_links:
734 # a new E-LAN/E-Tree
735 elan_vnf_list = []
736 lan_net = subnets.pop(0)
737 lan_hosts = list(lan_net.hosts())
738
739 # generate lan ip address for all interfaces (of all involved (V/CDUs))
740 for intf_ref in link["connection_points_reference"]:
741 vnf_id, intf_name = parse_interface(intf_ref)
742 if vnf_id is None:
743 continue # skip references to NS connection points
744 units = self._get_vnf_instance_units(instance_uuid, vnf_id)
745 if units is None:
746 continue # skip if no deployment unit is present
747 # iterate over all involved deployment units
748 for uvnfi in units:
749 # Attention: we apply a simplification for multi DU VNFs here:
750 # the connection points of all involved DUs have to have the same
751 # name as the connection points of the surrounding VNF to be mapped.
752 # This is because we do not consider links specified in the VNFDs
753 container_name = uvnfi.name
754
755 ip_address = None
756 # get the interface of the unit
757 intf = self._get_vnfd_cp_from_vnfi(uvnfi, intf_name)
758 # check if there is a manually assigned address
759 if intf is not None:
760 if intf.get("address"):
761 ip_address = intf.get("address")
762 if ip_address is None:
763 # automatically asign an IP from our pool
764 ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)),
765 lan_net.prefixlen)
766 LOG.debug(
767 "Setting up E-LAN/E-Tree interface. (%s:%s) -> %s" % (
768 container_name, intf_name, ip_address))
769 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
770 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
771 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is
772 # necessary.
773 vnfi = self._get_vnf_instance(instance_uuid, container_name)
774 if vnfi is not None:
775 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
776 # add this vnf and interface to the E-LAN for tagging
777 elan_vnf_list.append(
778 {'name': container_name, 'interface': intf_name})
779 # install the VLAN tags for this E-LAN
780 GK.net.setLAN(elan_vnf_list)
781
782 def _load_docker_files(self):
783 """
784 Get all paths to Dockerfiles from VNFDs and store them in dict.
785 :return:
786 """
787 for vnf_id, v in self.vnfds.iteritems():
788 for vu in v.get("virtual_deployment_units", []):
789 vnf_container_name = get_container_name(vnf_id, vu.get("id"))
790 if vu.get("vm_image_format") == "docker":
791 vm_image = vu.get("vm_image")
792 docker_path = os.path.join(
793 self.package_content_path,
794 make_relative_path(vm_image))
795 self.local_docker_files[vnf_container_name] = docker_path
796 LOG.debug("Found Dockerfile (%r): %r" % (vnf_container_name, docker_path))
797 for cu in v.get("cloudnative_deployment_units", []):
798 vnf_container_name = get_container_name(vnf_id, cu.get("id"))
799 image = cu.get("image")
800 docker_path = os.path.join(
801 self.package_content_path,
802 make_relative_path(image))
803 self.local_docker_files[vnf_container_name] = docker_path
804 LOG.debug("Found Dockerfile (%r): %r" % (vnf_container_name, docker_path))
805
806 def _load_docker_urls(self):
807 """
808 Get all URLs to pre-build docker images in some repo.
809 :return:
810 """
811 for vnf_id, v in self.vnfds.iteritems():
812 for vu in v.get("virtual_deployment_units", []):
813 vnf_container_name = get_container_name(vnf_id, vu.get("id"))
814 if vu.get("vm_image_format") == "docker":
815 url = vu.get("vm_image")
816 if url is not None:
817 url = url.replace("http://", "")
818 self.remote_docker_image_urls[vnf_container_name] = url
819 LOG.debug("Found Docker image URL (%r): %r" %
820 (vnf_container_name,
821 self.remote_docker_image_urls[vnf_container_name]))
822 for cu in v.get("cloudnative_deployment_units", []):
823 vnf_container_name = get_container_name(vnf_id, cu.get("id"))
824 url = cu.get("image")
825 if url is not None:
826 url = url.replace("http://", "")
827 self.remote_docker_image_urls[vnf_container_name] = url
828 LOG.debug("Found Docker image URL (%r): %r" %
829 (vnf_container_name,
830 self.remote_docker_image_urls[vnf_container_name]))
831
832 def _build_images_from_dockerfiles(self):
833 """
834 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
835 """
836 if GK_STANDALONE_MODE:
837 return # do not build anything in standalone mode
838 dc = DockerClient()
839 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(
840 self.local_docker_files))
841 for k, v in self.local_docker_files.iteritems():
842 for line in dc.build(path=v.replace(
843 "Dockerfile", ""), tag=k, rm=False, nocache=False):
844 LOG.debug("DOCKER BUILD: %s" % line)
845 LOG.info("Docker image created: %s" % k)
846
847 def _pull_predefined_dockerimages(self):
848 """
849 If the package contains URLs to pre-build Docker images, we download them with this method.
850 """
851 dc = DockerClient()
852 for url in self.remote_docker_image_urls.itervalues():
853 # only pull if not present (speedup for development)
854 if not FORCE_PULL:
855 if len(dc.images.list(name=url)) > 0:
856 LOG.debug("Image %r present. Skipping pull." % url)
857 continue
858 LOG.info("Pulling image: %r" % url)
859 # this seems to fail with latest docker api version 2.0.2
860 # dc.images.pull(url,
861 # insecure_registry=True)
862 # using docker cli instead
863 cmd = ["docker",
864 "pull",
865 url,
866 ]
867 Popen(cmd).wait()
868
869 def _check_docker_image_exists(self, image_name):
870 """
871 Query the docker service and check if the given image exists
872 :param image_name: name of the docker image
873 :return:
874 """
875 return len(DockerClient().images.list(name=image_name)) > 0
876
877 def _place(self, vnfd, vnfid, vdu, ssiid):
878 """
879 Do placement. Return the name of the DC to place
880 the given VDU.
881 """
882 assert(len(self.vnfds) > 0)
883 assert(len(GK.dcs) > 0)
884 if PLACEMENT_ALGORITHM_OBJ is None:
885 LOG.error("No placement algorithm given. Using FirstDcPlacement!")
886 p = FirstDcPlacement()
887 else:
888 p = PLACEMENT_ALGORITHM_OBJ
889 cname = get_container_name(vnfid, vdu.get("id"), ssiid)
890 rdc = p.place(GK.dcs, vnfd, vnfid, vdu, ssiid, cname)
891 LOG.info("Placement: '{}' --> '{}'".format(cname, rdc))
892 return rdc
893
894 def _calculate_cpu_cfs_values(self, cpu_time_percentage):
895 """
896 Calculate cpu period and quota for CFS
897 :param cpu_time_percentage: percentage of overall CPU to be used
898 :return: cpu_period, cpu_quota
899 """
900 if cpu_time_percentage is None:
901 return -1, -1
902 if cpu_time_percentage < 0:
903 return -1, -1
904 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
905 # Attention minimum cpu_quota is 1ms (micro)
906 cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
907 LOG.debug("cpu_period is %r, cpu_percentage is %r" %
908 (cpu_period, cpu_time_percentage))
909 # calculate the fraction of cpu time for this container
910 cpu_quota = cpu_period * cpu_time_percentage
911 # ATTENTION >= 1000 to avoid a invalid argument system error ... no
912 # idea why
913 if cpu_quota < 1000:
914 LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
915 cpu_quota = 1000
916 LOG.warning("Increased CPU quota to avoid system error.")
917 LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" %
918 (cpu_period, cpu_quota))
919 return int(cpu_period), int(cpu_quota)
920
921
922 """
923 Some (simple) placement algorithms
924 """
925
926
927 class FirstDcPlacement(object):
928 """
929 Placement: Always use one and the same data center from the GK.dcs dict.
930 """
931
932 def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
933 return list(dcs.itervalues())[0]
934
935
936 class RoundRobinDcPlacement(object):
937 """
938 Placement: Distribute VNFs across all available DCs in a round robin fashion.
939 """
940
941 def __init__(self):
942 self.count = 0
943
944 def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
945 dcs_list = list(dcs.itervalues())
946 rdc = dcs_list[self.count % len(dcs_list)]
947 self.count += 1 # inc. count to use next DC
948 return rdc
949
950
951 class StaticConfigPlacement(object):
952 """
953 Placement: Fixed assignment based on config file.
954 """
955
956 def __init__(self, path=None):
957 if path is None:
958 path = "static_placement.yml"
959 path = os.path.expanduser(path)
960 self.static_placement = dict()
961 try:
962 self.static_placement = load_yaml(path)
963 except BaseException as ex:
964 LOG.error(ex)
965 LOG.error("Couldn't load placement from {}"
966 .format(path))
967 LOG.info("Loaded static placement: {}"
968 .format(self.static_placement))
969
970 def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
971 # check for container name entry
972 if cname not in self.static_placement:
973 LOG.error("Coudn't find {} in placement".format(cname))
974 LOG.error("Using first DC as fallback!")
975 return list(dcs.itervalues())[0]
976 # lookup
977 candidate_dc = self.static_placement.get(cname)
978 # check if DC exsits
979 if candidate_dc not in dcs:
980 LOG.error("Coudn't find DC {}".format(candidate_dc))
981 LOG.error("Using first DC as fallback!")
982 return list(dcs.itervalues())[0]
983 # return correct DC
984 return dcs.get(candidate_dc)
985
986
987 """
988 Resource definitions and API endpoints
989 """
990
991
992 class Packages(fr.Resource):
993
994 def post(self):
995 """
996 Upload a *.son service package to the dummy gatekeeper.
997
998 We expect request with a *.son file and store it in UPLOAD_FOLDER
999 :return: UUID
1000 """
1001 try:
1002 # get file contents
1003 LOG.info("POST /packages called")
1004 # lets search for the package in the request
1005 is_file_object = False # make API more robust: file can be in data or in files field
1006 if "package" in request.files:
1007 son_file = request.files["package"]
1008 is_file_object = True
1009 elif len(request.data) > 0:
1010 son_file = request.data
1011 else:
1012 return {"service_uuid": None, "size": 0, "sha1": None,
1013 "error": "upload failed. file not found."}, 500
1014 # generate a uuid to reference this package
1015 service_uuid = str(uuid.uuid4())
1016 file_hash = hashlib.sha1(str(son_file)).hexdigest()
1017 # ensure that upload folder exists
1018 ensure_dir(UPLOAD_FOLDER)
1019 upload_path = os.path.join(UPLOAD_FOLDER, "%s.tgo" % service_uuid)
1020 # store *.son file to disk
1021 if is_file_object:
1022 son_file.save(upload_path)
1023 else:
1024 with open(upload_path, 'wb') as f:
1025 f.write(son_file)
1026 size = os.path.getsize(upload_path)
1027
1028 # first stop and delete any other running services
1029 if AUTO_DELETE:
1030 service_list = copy.copy(GK.services)
1031 for service_uuid in service_list:
1032 instances_list = copy.copy(
1033 GK.services[service_uuid].instances)
1034 for instance_uuid in instances_list:
1035 # valid service and instance UUID, stop service
1036 GK.services.get(service_uuid).stop_service(
1037 instance_uuid)
1038 LOG.info("service instance with uuid %r stopped." %
1039 instance_uuid)
1040
1041 # create a service object and register it
1042 s = Service(service_uuid, file_hash, upload_path)
1043 GK.register_service_package(service_uuid, s)
1044
1045 # automatically deploy the service
1046 if AUTO_DEPLOY:
1047 # ok, we have a service uuid, lets start the service
1048 reset_subnets()
1049 GK.services.get(service_uuid).start_service()
1050
1051 # generate the JSON result
1052 return {"service_uuid": service_uuid, "size": size,
1053 "sha1": file_hash, "error": None}, 201
1054 except BaseException:
1055 LOG.exception("Service package upload failed:")
1056 return {"service_uuid": None, "size": 0,
1057 "sha1": None, "error": "upload failed"}, 500
1058
1059 def get(self):
1060 """
1061 Return a list of package descriptor headers.
1062 Fakes the behavior of 5GTANGO's GK API to be
1063 compatible with tng-cli.
1064 :return: list
1065 """
1066 LOG.info("GET /packages")
1067 result = list()
1068 for suuid, sobj in GK.services.iteritems():
1069 pkg = dict()
1070 pkg["pd"] = dict()
1071 pkg["uuid"] = suuid
1072 pkg["pd"]["name"] = sobj.manifest.get("name")
1073 pkg["pd"]["version"] = sobj.manifest.get("version")
1074 pkg["created_at"] = sobj.created_at
1075 result.append(pkg)
1076 return result, 200, CORS_HEADER
1077
1078
1079 class Services(fr.Resource):
1080
1081 def get(self):
1082 """
1083 Return a list of services.
1084 Fakes the behavior of 5GTANGO's GK API to be
1085 compatible with tng-cli.
1086 :return: list
1087 """
1088 LOG.info("GET /services")
1089 result = list()
1090 for suuid, sobj in GK.services.iteritems():
1091 service = dict()
1092 service["nsd"] = dict()
1093 service["uuid"] = suuid
1094 service["nsd"]["name"] = sobj.nsd.get("name")
1095 service["nsd"]["version"] = sobj.nsd.get("version")
1096 service["created_at"] = sobj.created_at
1097 result.append(service)
1098 return result, 200, CORS_HEADER
1099
1100
1101 class Instantiations(fr.Resource):
1102
1103 def post(self):
1104 """
1105 Instantiate a service specified by its UUID.
1106 Will return a new UUID to identify the running service instance.
1107 :return: UUID
1108 """
1109 LOG.info("POST /instantiations (or /requests) called")
1110 # try to extract the service uuid from the request
1111 json_data = request.get_json(force=True)
1112 service_uuid = json_data.get("service_uuid")
1113 service_name = json_data.get("service_name")
1114 if service_name is None:
1115 # lets be fuzzy
1116 service_name = service_uuid
1117 # first try to find by service_name
1118 if service_name is not None:
1119 for s_uuid, s in GK.services.iteritems():
1120 if s.manifest.get("name") == service_name:
1121 LOG.info("Searched for: {}. Found service w. UUID: {}"
1122 .format(service_name, s_uuid))
1123 service_uuid = s_uuid
1124 # lets be a bit fuzzy here to make testing easier
1125 if (service_uuid is None or service_uuid ==
1126 "latest") and len(GK.services) > 0:
1127 # if we don't get a service uuid, we simple start the first service
1128 # in the list
1129 service_uuid = list(GK.services.iterkeys())[0]
1130 if service_uuid in GK.services:
1131 # ok, we have a service uuid, lets start the service
1132 service_instance_uuid = GK.services.get(
1133 service_uuid).start_service()
1134 # multiple ID fields to be compatible with tng-bench and tng-cli
1135 return ({"service_instance_uuid": service_instance_uuid,
1136 "id": service_instance_uuid}, 201)
1137 LOG.error("Service not found: {}/{}".format(service_uuid, service_name))
1138 return "Service not found", 404
1139
1140 def get(self):
1141 """
1142 Returns a list of UUIDs containing all running services.
1143 :return: dict / list
1144 """
1145 LOG.debug("GET /instantiations or /api/v3/records/services")
1146 # return {"service_instantiations_list": [
1147 # list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
1148 result = list()
1149 for suuid, sobj in GK.services.iteritems():
1150 for iuuid, iobj in sobj.instances.iteritems():
1151 inst = dict()
1152 inst["uuid"] = iobj.get("uuid")
1153 inst["instance_name"] = "{}-inst.{}".format(
1154 iobj.get("name"), iobj.get("ssiid"))
1155 inst["status"] = "running"
1156 inst["created_at"] = iobj.get("created_at")
1157 result.append(inst)
1158 return result, 200, CORS_HEADER
1159
1160 def delete(self):
1161 """
1162 Stops a running service specified by its service and instance UUID.
1163 """
1164 # try to extract the service and instance UUID from the request
1165 json_data = request.get_json(force=True)
1166 service_uuid_input = json_data.get("service_uuid")
1167 instance_uuid_input = json_data.get("service_instance_uuid")
1168 if len(GK.services) < 1:
1169 return "No service on-boarded.", 404
1170 # try to be fuzzy
1171 if service_uuid_input is None:
1172 # if we don't get a service uuid we stop all services
1173 service_uuid_list = list(GK.services.iterkeys())
1174 LOG.info("No service_uuid given, stopping all.")
1175 else:
1176 service_uuid_list = [service_uuid_input]
1177 # for each service
1178 for service_uuid in service_uuid_list:
1179 if instance_uuid_input is None:
1180 instance_uuid_list = list(
1181 GK.services[service_uuid].instances.iterkeys())
1182 else:
1183 instance_uuid_list = [instance_uuid_input]
1184 # for all service instances
1185 for instance_uuid in instance_uuid_list:
1186 if (service_uuid in GK.services and
1187 instance_uuid in GK.services[service_uuid].instances):
1188 # valid service and instance UUID, stop service
1189 GK.services.get(service_uuid).stop_service(instance_uuid)
1190 LOG.info("Service instance with uuid %r stopped." % instance_uuid)
1191 return "Service(s) stopped.", 200
1192
1193
1194 class Exit(fr.Resource):
1195
1196 def put(self):
1197 """
1198 Stop the running Containernet instance regardless of data transmitted
1199 """
1200 list(GK.dcs.values())[0].net.stop()
1201
1202
1203 def generate_subnets(prefix, base, subnet_size=50, mask=24):
1204 # Generate a list of ipaddress in subnets
1205 r = list()
1206 for net in range(base, base + subnet_size):
1207 subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
1208 r.append(ipaddress.ip_network(unicode(subnet)))
1209 return r
1210
1211
1212 def reset_subnets():
1213 global ELINE_SUBNETS
1214 global ELAN_SUBNETS
1215 # private subnet definitions for the generated interfaces
1216 # 30.0.xxx.0/24
1217 ELAN_SUBNETS = generate_subnets('30.0', 0, subnet_size=50, mask=24)
1218 # 20.0.xxx.0/24
1219 ELINE_SUBNETS = generate_subnets('20.0', 0, subnet_size=50, mask=24)
1220
1221
1222 def initialize_GK():
1223 global GK
1224 GK = Gatekeeper()
1225
1226
1227 # create a single, global GK object
1228 GK = None
1229 initialize_GK()
1230 # setup Flask
1231 http_server = None
1232 app = Flask(__name__)
1233 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
1234 api = fr.Api(app)
1235 # define endpoints
1236 api.add_resource(Packages, '/packages', '/api/v2/packages', '/api/v3/packages')
1237 api.add_resource(Services, '/services', '/api/v2/services', '/api/v3/services')
1238 api.add_resource(Instantiations, '/instantiations',
1239 '/api/v2/instantiations', '/api/v2/requests', '/api/v3/requests',
1240 '/api/v3/records/services')
1241 api.add_resource(Exit, '/emulator/exit')
1242
1243
1244 def start_rest_api(host, port, datacenters=dict()):
1245 global http_server
1246 GK.dcs = datacenters
1247 GK.net = get_dc_network()
1248 # start the Flask server (not the best performance but ok for our use case)
1249 # app.run(host=host,
1250 # port=port,
1251 # debug=True,
1252 # use_reloader=False # this is needed to run Flask in a non-main thread
1253 # )
1254 http_server = WSGIServer((host, port), app, log=open("/dev/null", "w"))
1255 http_server.serve_forever()
1256
1257
1258 def stop_rest_api():
1259 if http_server:
1260 http_server.close()
1261
1262
1263 def ensure_dir(name):
1264 if not os.path.exists(name):
1265 os.makedirs(name)
1266
1267
1268 def load_yaml(path):
1269 with open(path, "r") as f:
1270 try:
1271 r = yaml.load(f)
1272 except yaml.YAMLError as exc:
1273 LOG.exception("YAML parse error: %r" % str(exc))
1274 r = dict()
1275 return r
1276
1277
1278 def make_relative_path(path):
1279 if path.startswith("file://"):
1280 path = path.replace("file://", "", 1)
1281 if path.startswith("/"):
1282 path = path.replace("/", "", 1)
1283 return path
1284
1285
1286 def get_dc_network():
1287 """
1288 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
1289 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
1290 :return:
1291 """
1292 assert (len(GK.dcs) > 0)
1293 return GK.dcs.values()[0].net
1294
1295
1296 def parse_interface(interface_name):
1297 """
1298 convert the interface name in the nsd to the according vnf_id, vnf_interface names
1299 :param interface_name:
1300 :return:
1301 """
1302 if ':' in interface_name:
1303 vnf_id, vnf_interface = interface_name.split(':')
1304 else:
1305 vnf_id = None
1306 vnf_interface = interface_name
1307 return vnf_id, vnf_interface
1308
1309
1310 def get_container_name(vnf_id, vdu_id, ssiid=None):
1311 if ssiid is not None:
1312 return "{}.{}.{}".format(vnf_id, vdu_id, ssiid)
1313 return "{}.{}".format(vnf_id, vdu_id)
1314
1315
1316 def get_triple_id(descr):
1317 return "{}.{}.{}".format(
1318 descr.get("vendor"), descr.get("name"), descr.get("version"))
1319
1320
1321 def update_port_mapping_multi_instance(ssiid, port_bindings):
1322 """
1323 Port_bindings are used to expose ports of the deployed containers.
1324 They would collide if we deploy multiple service instances.
1325 This function adds a offset to them which is based on the
1326 short service instance id (SSIID).
1327 MULTI_INSTANCE_PORT_OFFSET
1328 """
1329 def _offset(p):
1330 return p + MULTI_INSTANCE_PORT_OFFSET * ssiid
1331
1332 port_bindings = {k: _offset(v) for k, v in port_bindings.iteritems()}
1333 return port_bindings
1334
1335
1336 if __name__ == '__main__':
1337 """
1338 Lets allow to run the API in standalone mode.
1339 """
1340 GK_STANDALONE_MODE = True
1341 logging.getLogger("werkzeug").setLevel(logging.INFO)
1342 start_rest_api("0.0.0.0", 8000)