5GTANGO LLCM: New placement algo. integration.
[osm/vim-emu.git] / src / emuvim / api / tango / llcm.py
1
2 # Copyright (c) 2018 SONATA-NFV, 5GTANGO and Paderborn University
3 # ALL RIGHTS RESERVED.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 # Neither the name of the SONATA-NFV, 5GTANGO, Paderborn University
18 # nor the names of its contributors may be used to endorse or promote
19 # products derived from this software without specific prior written
20 # permission.
21 #
22 # This work has been performed in the framework of the SONATA project,
23 # funded by the European Commission under Grant number 671517 through
24 # the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 # acknowledge the contributions of their colleagues of the SONATA
26 # partner consortium (www.sonata-nfv.eu).
27 #
28 # This work has also been performed in the framework of the 5GTANGO project,
29 # funded by the European Commission under Grant number 761493 through
30 # the Horizon 2020 and 5G-PPP programmes. The authors would like to
31 # acknowledge the contributions of their colleagues of the 5GTANGO
32 # partner consortium (www.5gtango.eu).
33 import logging
34 import os
35 import uuid
36 import hashlib
37 import zipfile
38 import yaml
39 import threading
40 import datetime
41 from docker import DockerClient
42 from flask import Flask, request
43 import flask_restful as fr
44 from gevent.pywsgi import WSGIServer
45 from subprocess import Popen
46 import ipaddress
47 import copy
48 import time
49
50
51 LOG = logging.getLogger("5gtango.llcm")
52 LOG.setLevel(logging.INFO)
53
54
55 GK_STORAGE = "/tmp/vim-emu-tango-llcm/"
56 UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
57 CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
58
59 # Enable Dockerfile build functionality
60 BUILD_DOCKERFILE = False
61
62 # flag to indicate that we run without the emulator (only the bare API for
63 # integration testing)
64 GK_STANDALONE_MODE = False
65
66 # should a new version of an image be pulled even if its available
67 FORCE_PULL = False
68
69 # flag to indicate if we use bidirectional forwarding rules in the
70 # automatic chaining process
71 BIDIRECTIONAL_CHAIN = True
72
73 # override the management interfaces in the descriptors with default
74 # docker0 interfaces in the containers
75 USE_DOCKER_MGMT = False
76
77 # automatically deploy uploaded packages (no need to execute son-access
78 # deploy --latest separately)
79 AUTO_DEPLOY = False
80
81 # and also automatically terminate any other running services
82 AUTO_DELETE = False
83
84 # global subnet definitions (see reset_subnets())
85 ELAN_SUBNETS = None
86 ELINE_SUBNETS = None
87
88 # Time in seconds to wait for vnf stop scripts to execute fully
89 VNF_STOP_WAIT_TIME = 5
90
91 # If services are instantiated multiple times, the public port
92 # mappings need to be adapted to avoid colisions. We use this
93 # offset for this: NEW_PORT (SSIID * OFFSET) + ORIGINAL_PORT
94 MULTI_INSTANCE_PORT_OFFSET = 1000
95
96
97 # Selected Placement Algorithm: Points to the class of the selected
98 # placement algorithm.
99 PLACEMENT_ALGORITHM_OBJ = None
100
101
102 class OnBoardingException(BaseException):
103 pass
104
105
106 class Gatekeeper(object):
107
108 def __init__(self):
109 self.services = dict()
110 self.dcs = dict()
111 self.net = None
112 # used to generate short names for VNFs (Mininet limitation)
113 self.vnf_counter = 0
114 reset_subnets()
115 LOG.info("Initialized 5GTANGO LLCM module.")
116
117 def register_service_package(self, service_uuid, service):
118 """
119 register new service package
120 :param service_uuid
121 :param service object
122 """
123 self.services[service_uuid] = service
124 # lets perform all steps needed to onboard the service
125 service.onboard()
126
127
128 class Service(object):
129 """
130 This class represents a NS uploaded as a *.son package to the
131 dummy gatekeeper.
132 Can have multiple running instances of this service.
133 """
134
135 def __init__(self,
136 service_uuid,
137 package_file_hash,
138 package_file_path):
139 self.uuid = service_uuid
140 self.package_file_hash = package_file_hash
141 self.package_file_path = package_file_path
142 self.package_content_path = os.path.join(
143 CATALOG_FOLDER, "services/%s" % self.uuid)
144 self.manifest = None
145 self.nsd = None
146 self.vnfds = dict()
147 self.local_docker_files = dict()
148 self.remote_docker_image_urls = dict()
149 self.instances = dict()
150 self._instance_counter = 0
151 self.created_at = str(datetime.datetime.now())
152
153 def onboard(self):
154 """
155 Do all steps to prepare this service to be instantiated
156 :return:
157 """
158 # 1. extract the contents of the package and store them in our catalog
159 self._unpack_service_package()
160 # 2. read in all descriptor files
161 self._load_package_descriptor()
162 self._load_nsd()
163 self._load_vnfd()
164 if self.nsd is None:
165 raise OnBoardingException("No NSD found.")
166 if len(self.vnfds) < 1:
167 raise OnBoardingException("No VNFDs found.")
168 # 3. prepare container images (e.g. download or build Dockerfile)
169 if BUILD_DOCKERFILE:
170 self._load_docker_files()
171 self._build_images_from_dockerfiles()
172 else:
173 self._load_docker_urls()
174 self._pull_predefined_dockerimages()
175 # 4. reserve subnets
176 eline_fwd_links, elan_fwd_links = self._get_elines_and_elans()
177 self.eline_subnets = [ELINE_SUBNETS.pop(0) for _ in eline_fwd_links]
178 self.elan_subnets = [ELAN_SUBNETS.pop(0) for _ in elan_fwd_links]
179 LOG.debug("Reserved subnets for service '{}': E-Line: {} / E-LAN: {}"
180 .format(self.manifest.get("name"),
181 self.eline_subnets, self.elan_subnets))
182 LOG.info("On-boarded service: {}".format(self.manifest.get("name")))
183
184 def start_service(self):
185 """
186 This methods creates and starts a new service instance.
187 It computes placements, iterates over all VNFDs, and starts
188 each VNFD as a Docker container in the data center selected
189 by the placement algorithm.
190 :return:
191 """
192 LOG.info("Starting service {} ({})"
193 .format(get_triple_id(self.nsd), self.uuid))
194
195 # 1. each service instance gets a new uuid to identify it
196 instance_uuid = str(uuid.uuid4())
197 # build a instances dict (a bit like a NSR :))
198 self.instances[instance_uuid] = dict()
199 self.instances[instance_uuid]["uuid"] = self.uuid
200 # SSIID = short service instance ID (to postfix Container names)
201 self.instances[instance_uuid]["ssiid"] = self._instance_counter
202 self.instances[instance_uuid]["name"] = get_triple_id(self.nsd)
203 self.instances[instance_uuid]["vnf_instances"] = list()
204 self.instances[instance_uuid]["created_at"] = str(datetime.datetime.now())
205 # increase for next instance
206 self._instance_counter += 1
207
208 # 3. start all vnfds that we have in the service
209 for vnf_id in self.vnfds:
210 vnfd = self.vnfds[vnf_id]
211 # attention: returns a list of started deployment units
212 vnfis = self._start_vnfd(
213 vnfd, vnf_id, self.instances[instance_uuid]["ssiid"])
214 # add list of VNFIs to total VNFI list
215 self.instances[instance_uuid]["vnf_instances"].extend(vnfis)
216
217 # 4. Deploy E-Line, E-Tree and E-LAN links
218 # Attention: Only done if ""forwarding_graphs" section in NSD exists,
219 # even if "forwarding_graphs" are not used directly.
220 # Attention2: Do a copy of *_subnets with list() is important here!
221 eline_fwd_links, elan_fwd_links = self._get_elines_and_elans()
222 # 5a. deploy E-Line links
223 GK.net.deployed_elines.extend(eline_fwd_links) # bookkeeping
224 self._connect_elines(eline_fwd_links, instance_uuid, list(self.eline_subnets))
225 # 5b. deploy E-Tree/E-LAN links
226 GK.net.deployed_elans.extend(elan_fwd_links) # bookkeeping
227 self._connect_elans(elan_fwd_links, instance_uuid, list(self.elan_subnets))
228
229 # 6. run the emulator specific entrypoint scripts in the VNFIs of this
230 # service instance
231 self._trigger_emulator_start_scripts_in_vnfis(
232 self.instances[instance_uuid]["vnf_instances"])
233 # done
234 LOG.info("Service '{}' started. Instance id: {} SSIID: {}"
235 .format(self.instances[instance_uuid]["name"],
236 instance_uuid,
237 self.instances[instance_uuid]["ssiid"]))
238 return instance_uuid
239
240 def stop_service(self, instance_uuid):
241 """
242 This method stops a running service instance.
243 It iterates over all VNF instances, stopping them each
244 and removing them from their data center.
245 :param instance_uuid: the uuid of the service instance to be stopped
246 """
247 LOG.info("Stopping service %r" % self.uuid)
248 # get relevant information
249 # instance_uuid = str(self.uuid.uuid4())
250 vnf_instances = self.instances[instance_uuid]["vnf_instances"]
251 # trigger stop skripts in vnf instances and wait a few seconds for
252 # completion
253 self._trigger_emulator_stop_scripts_in_vnfis(vnf_instances)
254 time.sleep(VNF_STOP_WAIT_TIME)
255 # stop all vnfs
256 for v in vnf_instances:
257 self._stop_vnfi(v)
258 # last step: remove the instance from the list of all instances
259 del self.instances[instance_uuid]
260
261 def _get_elines_and_elans(self):
262 """
263 Get the E-Line, E-LAN, E-Tree links from the NSD.
264 """
265 # Attention: Only done if ""forwarding_graphs" section in NSD exists,
266 # even if "forwarding_graphs" are not used directly.
267 eline_fwd_links = list()
268 elan_fwd_links = list()
269 if "virtual_links" in self.nsd and "forwarding_graphs" in self.nsd:
270 vlinks = self.nsd["virtual_links"]
271 # constituent virtual links are not checked
272 eline_fwd_links = [l for l in vlinks if (
273 l["connectivity_type"] == "E-Line")]
274 elan_fwd_links = [l for l in vlinks if (
275 l["connectivity_type"] == "E-LAN" or
276 l["connectivity_type"] == "E-Tree")] # Treat E-Tree as E-LAN
277 return eline_fwd_links, elan_fwd_links
278
279 def _get_resource_limits(self, deployment_unit):
280 """
281 Extract resource limits from deployment units.
282 """
283 # defaults
284 cpu_list = None
285 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(1.0))
286 mem_limit = None
287 # update from descriptor
288 if "resource_requirements" in deployment_unit:
289 res_req = deployment_unit.get("resource_requirements")
290 cpu_list = res_req.get("cpu").get("cpuset")
291 if cpu_list is None:
292 cpu_list = res_req.get("cpu").get("vcpus")
293 if cpu_list is not None:
294 # attention: docker expects list as string w/o spaces:
295 cpu_list = str(cpu_list).replace(" ", "").strip()
296 cpu_bw = res_req.get("cpu").get("cpu_bw")
297 if cpu_bw is None:
298 cpu_bw = 1.0
299 cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
300 mem_limit = res_req.get("memory").get("size")
301 mem_unit = str(res_req.get("memory").get("size_unit", "GB"))
302 if mem_limit is not None:
303 mem_limit = int(mem_limit)
304 # to bytes
305 if "G" in mem_unit:
306 mem_limit = mem_limit * 1024 * 1024 * 1024
307 elif "M" in mem_unit:
308 mem_limit = mem_limit * 1024 * 1024
309 elif "K" in mem_unit:
310 mem_limit = mem_limit * 1024
311 return cpu_list, cpu_period, cpu_quota, mem_limit
312
313 def _start_vnfd(self, vnfd, vnf_id, ssiid, **kwargs):
314 """
315 Start a single VNFD of this service
316 :param vnfd: vnfd descriptor dict
317 :param vnf_id: unique id of this vnf in the nsd
318 :return:
319 """
320 vnfis = list()
321 # the vnf_name refers to the container image to be deployed
322 vnf_name = vnfd.get("name")
323 # combine VDUs and CDUs
324 deployment_units = (vnfd.get("virtual_deployment_units", []) +
325 vnfd.get("cloudnative_deployment_units", []))
326 # iterate over all deployment units within each VNFDs
327 for u in deployment_units:
328 # 0. vnf_container_name = vnf_id.vdu_id
329 vnf_container_name = get_container_name(vnf_id, u.get("id"))
330 vnf_container_instance_name = get_container_name(vnf_id, u.get("id"), ssiid)
331 # 1. get the name of the docker image to star
332 if vnf_container_name not in self.remote_docker_image_urls:
333 raise Exception("No image name for %r found. Abort." % vnf_container_name)
334 docker_image_name = self.remote_docker_image_urls.get(vnf_container_name)
335 # 2. select datacenter to start the VNF in
336 target_dc = self._place(vnfd, vnf_id, u, ssiid)
337 # 3. perform some checks to ensure we can start the container
338 assert(docker_image_name is not None)
339 assert(target_dc is not None)
340 if not self._check_docker_image_exists(docker_image_name):
341 raise Exception("Docker image {} not found. Abort."
342 .format(docker_image_name))
343
344 # 4. get the resource limits
345 cpu_list, cpu_period, cpu_quota, mem_limit = self._get_resource_limits(u)
346
347 # get connection points defined for the DU
348 intfs = u.get("connection_points", [])
349 # do some re-naming of fields to be compatible to containernet
350 for i in intfs:
351 if i.get("address"):
352 i["ip"] = i.get("address")
353
354 # get ports and port_bindings from the port and publish fields of CNFD
355 # see: https://github.com/containernet/containernet/wiki/Exposing-and-mapping-network-ports
356 ports = list() # Containernet naming
357 port_bindings = dict()
358 for i in intfs:
359 if i.get("port"):
360 if not isinstance(i.get("port"), int):
361 LOG.info("Field 'port' is no int CP: {}".format(i))
362 else:
363 ports.append(i.get("port"))
364 if i.get("publish"):
365 if not isinstance(i.get("publish"), dict):
366 LOG.info("Field 'publish' is no dict CP: {}".format(i))
367 else:
368 port_bindings.update(i.get("publish"))
369 # update port mapping for cases where service is deployed > 1 times
370 port_bindings = update_port_mapping_multi_instance(ssiid, port_bindings)
371 if len(ports) > 0:
372 LOG.info("{} exposes ports: {}".format(vnf_container_instance_name, ports))
373 if len(port_bindings) > 0:
374 LOG.info("{} publishes ports: {}".format(vnf_container_instance_name, port_bindings))
375
376 # 5. collect additional information to start container
377 volumes = list()
378 cenv = dict()
379 # 5.1 inject descriptor based start/stop commands into env (overwrite)
380 VNFD_CMD_START = u.get("vm_cmd_start")
381 VNFD_CMD_STOP = u.get("vm_cmd_stop")
382 if VNFD_CMD_START and not VNFD_CMD_START == "None":
383 LOG.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_START) +
384 " Overwriting SON_EMU_CMD.")
385 cenv["SON_EMU_CMD"] = VNFD_CMD_START
386 if VNFD_CMD_STOP and not VNFD_CMD_STOP == "None":
387 LOG.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_STOP) +
388 " Overwriting SON_EMU_CMD_STOP.")
389 cenv["SON_EMU_CMD_STOP"] = VNFD_CMD_STOP
390
391 # 6. Start the container
392 LOG.info("Starting %r as %r in DC %r" %
393 (vnf_name, vnf_container_instance_name, target_dc))
394 LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
395 # start the container
396 vnfi = target_dc.startCompute(
397 vnf_container_instance_name,
398 network=intfs,
399 image=docker_image_name,
400 cpu_quota=cpu_quota,
401 cpu_period=cpu_period,
402 cpuset_cpus=cpu_list,
403 mem_limit=mem_limit,
404 volumes=volumes,
405 properties=cenv, # environment
406 ports=ports,
407 port_bindings=port_bindings,
408 # only publish if explicitly stated in descriptor
409 publish_all_ports=False,
410 type=kwargs.get('type', 'docker'))
411 # add vnfd reference to vnfi
412 vnfi.vnfd = vnfd
413 # add container name
414 vnfi.vnf_container_name = vnf_container_name
415 vnfi.vnf_container_instance_name = vnf_container_instance_name
416 vnfi.ssiid = ssiid
417 # store vnfi
418 vnfis.append(vnfi)
419 return vnfis
420
421 def _stop_vnfi(self, vnfi):
422 """
423 Stop a VNF instance.
424 :param vnfi: vnf instance to be stopped
425 """
426 # Find the correct datacenter
427 status = vnfi.getStatus()
428 dc = vnfi.datacenter
429 # stop the vnfi
430 LOG.info("Stopping the vnf instance contained in %r in DC %r" %
431 (status["name"], dc))
432 dc.stopCompute(status["name"])
433
434 def _get_vnf_instance(self, instance_uuid, vnf_id):
435 """
436 Returns VNFI object for a given "vnf_id" or "vnf_container_name" taken from an NSD.
437 :return: single object
438 """
439 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
440 if str(vnfi.name) == str(vnf_id):
441 return vnfi
442 LOG.warning("No container with name: {0} found.".format(vnf_id))
443 return None
444
445 def _get_vnf_instance_units(self, instance_uuid, vnf_id):
446 """
447 Returns a list of VNFI objects (all deployment units) for a given
448 "vnf_id" taken from an NSD.
449 :return: list
450 """
451 if vnf_id is None:
452 return None
453 r = list()
454 for vnfi in self.instances[instance_uuid]["vnf_instances"]:
455 if vnf_id in vnfi.name:
456 r.append(vnfi)
457 if len(r) > 0:
458 LOG.debug("Found units: {} for vnf_id: {}"
459 .format([i.name for i in r], vnf_id))
460 return r
461 LOG.warning("No container(s) with name: {0} found.".format(vnf_id))
462 return None
463
464 @staticmethod
465 def _vnf_reconfigure_network(vnfi, if_name, net_str=None, new_name=None):
466 """
467 Reconfigure the network configuration of a specific interface
468 of a running container.
469 :param vnfi: container instance
470 :param if_name: interface name
471 :param net_str: network configuration string, e.g., 1.2.3.4/24
472 :return:
473 """
474 # assign new ip address
475 if net_str is not None:
476 intf = vnfi.intf(intf=if_name)
477 if intf is not None:
478 intf.setIP(net_str)
479 LOG.debug("Reconfigured network of %s:%s to %r" %
480 (vnfi.name, if_name, net_str))
481 else:
482 LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (
483 vnfi.name, if_name))
484
485 if new_name is not None:
486 vnfi.cmd('ip link set', if_name, 'down')
487 vnfi.cmd('ip link set', if_name, 'name', new_name)
488 vnfi.cmd('ip link set', new_name, 'up')
489 LOG.debug("Reconfigured interface name of %s:%s to %s" %
490 (vnfi.name, if_name, new_name))
491
492 def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
493 for vnfi in vnfi_list:
494 config = vnfi.dcinfo.get("Config", dict())
495 env = config.get("Env", list())
496 for env_var in env:
497 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
498 if var == "SON_EMU_CMD" or var == "VIM_EMU_CMD":
499 LOG.info("Executing script in '{}': {}={}"
500 .format(vnfi.name, var, cmd))
501 # execute command in new thread to ensure that GK is not
502 # blocked by VNF
503 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
504 t.daemon = True
505 t.start()
506 break # only execute one command
507
508 def _trigger_emulator_stop_scripts_in_vnfis(self, vnfi_list):
509 for vnfi in vnfi_list:
510 config = vnfi.dcinfo.get("Config", dict())
511 env = config.get("Env", list())
512 for env_var in env:
513 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
514 if var == "SON_EMU_CMD_STOP" or var == "VIM_EMU_CMD_STOP":
515 LOG.info("Executing script in '{}': {}={}"
516 .format(vnfi.name, var, cmd))
517 # execute command in new thread to ensure that GK is not
518 # blocked by VNF
519 t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
520 t.daemon = True
521 t.start()
522 break # only execute one command
523
524 def _unpack_service_package(self):
525 """
526 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
527 """
528 LOG.info("Unzipping: %r" % self.package_file_path)
529 with zipfile.ZipFile(self.package_file_path, "r") as z:
530 z.extractall(self.package_content_path)
531
532 def _load_package_descriptor(self):
533 """
534 Load the main package descriptor YAML and keep it as dict.
535 :return:
536 """
537 self.manifest = load_yaml(
538 os.path.join(
539 self.package_content_path, "TOSCA-Metadata/NAPD.yaml"))
540
541 def _load_nsd(self):
542 """
543 Load the entry NSD YAML and keep it as dict.
544 :return:
545 """
546 if "package_content" in self.manifest:
547 nsd_path = None
548 for f in self.manifest.get("package_content"):
549 if f.get("content-type") == "application/vnd.5gtango.nsd":
550 nsd_path = os.path.join(
551 self.package_content_path,
552 make_relative_path(f.get("source")))
553 break # always use the first NSD for now
554 if nsd_path is None:
555 raise OnBoardingException("No NSD with type 'application/vnd.5gtango.nsd' found.")
556 self.nsd = load_yaml(nsd_path)
557 GK.net.deployed_nsds.append(self.nsd) # TODO this seems strange (remove?)
558 LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
559 else:
560 raise OnBoardingException(
561 "No 'package_content' section in package manifest:\n{}"
562 .format(self.manifest))
563
564 def _load_vnfd(self):
565 """
566 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
567 :return:
568 """
569 # first make a list of all the vnfds in the package
570 vnfd_set = dict()
571 if "package_content" in self.manifest:
572 for pc in self.manifest.get("package_content"):
573 if pc.get(
574 "content-type") == "application/vnd.5gtango.vnfd":
575 vnfd_path = os.path.join(
576 self.package_content_path,
577 make_relative_path(pc.get("source")))
578 vnfd = load_yaml(vnfd_path)
579 vnfd_set[vnfd.get("name")] = vnfd
580 if len(vnfd_set) < 1:
581 raise OnBoardingException("No VNFDs found.")
582 # then link each vnf_id in the nsd to its vnfd
583 for v in self.nsd.get("network_functions"):
584 if v.get("vnf_name") in vnfd_set:
585 self.vnfds[v.get("vnf_id")] = vnfd_set[v.get("vnf_name")]
586 LOG.debug("Loaded VNFD: {0} id: {1}"
587 .format(v.get("vnf_name"), v.get("vnf_id")))
588
589 def _connect_elines(self, eline_fwd_links, instance_uuid, subnets):
590 """
591 Connect all E-LINE links in the NSD
592 Attention: This method DOES NOT support multi V/CDU VNFs!
593 :param eline_fwd_links: list of E-LINE links in the NSD
594 :param: instance_uuid of the service
595 :param: subnets list of subnets to be used
596 :return:
597 """
598 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
599 # eg. different services get a unique cookie for their flowrules
600 cookie = 1
601 for link in eline_fwd_links:
602 LOG.info("Found E-Line: {}".format(link))
603 src_id, src_if_name = parse_interface(
604 link["connection_points_reference"][0])
605 dst_id, dst_if_name = parse_interface(
606 link["connection_points_reference"][1])
607 LOG.info("Searching C/VDU for E-Line: src={}, src_if={}, dst={}, dst_if={}"
608 .format(src_id, src_if_name, dst_id, dst_if_name))
609 # handle C/VDUs (ugly hack, only one V/CDU per VNF for now)
610 src_units = self._get_vnf_instance_units(instance_uuid, src_id)
611 dst_units = self._get_vnf_instance_units(instance_uuid, dst_id)
612 if src_units is None or dst_units is None:
613 LOG.info("No VNF-VNF link. Skipping: src={}, src_if={}, dst={}, dst_if={}"
614 .format(src_id, src_if_name, dst_id, dst_if_name))
615 return
616 # we only support VNFs with one V/CDU right now
617 if len(src_units) != 1 or len(dst_units) != 1:
618 raise BaseException("LLCM does not support E-LINES for multi V/CDU VNFs.")
619 # get the full name from that C/VDU and use it as src_id and dst_id
620 src_id = src_units[0].name
621 dst_id = dst_units[0].name
622 # from here we have all info we need
623 LOG.info("Creating E-Line for C/VDU: src={}, src_if={}, dst={}, dst_if={}"
624 .format(src_id, src_if_name, dst_id, dst_if_name))
625 # get involved vnfis
626 src_vnfi = src_units[0]
627 dst_vnfi = dst_units[0]
628 # proceed with chaining setup
629 setChaining = False
630 if src_vnfi is not None and dst_vnfi is not None:
631 setChaining = True
632 # re-configure the VNFs IP assignment and ensure that a new
633 # subnet is used for each E-Link
634 eline_net = subnets.pop(0)
635 ip1 = "{0}/{1}".format(str(eline_net[1]),
636 eline_net.prefixlen)
637 ip2 = "{0}/{1}".format(str(eline_net[2]),
638 eline_net.prefixlen)
639 # check if VNFs have fixed IPs (ip/address field in VNFDs)
640 if (self._get_vnfd_cp_from_vnfi(
641 src_vnfi, src_if_name).get("ip") is None and
642 self._get_vnfd_cp_from_vnfi(
643 src_vnfi, src_if_name).get("address") is None):
644 self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
645 # check if VNFs have fixed IPs (ip field in VNFDs)
646 if (self._get_vnfd_cp_from_vnfi(
647 dst_vnfi, dst_if_name).get("ip") is None and
648 self._get_vnfd_cp_from_vnfi(
649 dst_vnfi, dst_if_name).get("address") is None):
650 self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
651 # set the chaining
652 if setChaining:
653 GK.net.setChain(
654 src_id, dst_id,
655 vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
656 bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
657
658 def _get_vnfd_cp_from_vnfi(self, vnfi, ifname):
659 """
660 Gets the connection point data structure from the VNFD
661 of the given VNFI using ifname.
662 """
663 if vnfi.vnfd is None:
664 return {}
665 cps = vnfi.vnfd.get("connection_points")
666 for cp in cps:
667 if cp.get("id") == ifname:
668 return cp
669
670 def _connect_elans(self, elan_fwd_links, instance_uuid, subnets):
671 """
672 Connect all E-LAN/E-Tree links in the NSD
673 This method supports multi-V/CDU VNFs if the connection
674 point names of the DUs are the same as the ones in the NSD.
675 :param elan_fwd_links: list of E-LAN links in the NSD
676 :param: instance_uuid of the service
677 :param: subnets list of subnets to be used
678 :return:
679 """
680 for link in elan_fwd_links:
681 # a new E-LAN/E-Tree
682 elan_vnf_list = []
683 lan_net = subnets.pop(0)
684 lan_hosts = list(lan_net.hosts())
685
686 # generate lan ip address for all interfaces (of all involved (V/CDUs))
687 for intf in link["connection_points_reference"]:
688 vnf_id, intf_name = parse_interface(intf)
689 if vnf_id is None:
690 continue # skip references to NS connection points
691 units = self._get_vnf_instance_units(instance_uuid, vnf_id)
692 if units is None:
693 continue # skip if no deployment unit is present
694 # iterate over all involved deployment units
695 for uvnfi in units:
696 # Attention: we apply a simplification for multi DU VNFs here:
697 # the connection points of all involved DUs have to have the same
698 # name as the connection points of the surrounding VNF to be mapped.
699 # This is because we do not consider links specified in the VNFds
700 container_name = uvnfi.name
701 ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)),
702 lan_net.prefixlen)
703 LOG.debug(
704 "Setting up E-LAN/E-Tree interface. (%s:%s) -> %s" % (
705 container_name, intf_name, ip_address))
706 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
707 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
708 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is
709 # necessary.
710 vnfi = self._get_vnf_instance(instance_uuid, container_name)
711 if vnfi is not None:
712 self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
713 # add this vnf and interface to the E-LAN for tagging
714 elan_vnf_list.append(
715 {'name': container_name, 'interface': intf_name})
716 # install the VLAN tags for this E-LAN
717 GK.net.setLAN(elan_vnf_list)
718
719 def _load_docker_files(self):
720 """
721 Get all paths to Dockerfiles from VNFDs and store them in dict.
722 :return:
723 """
724 for vnf_id, v in self.vnfds.iteritems():
725 for vu in v.get("virtual_deployment_units", []):
726 vnf_container_name = get_container_name(vnf_id, vu.get("id"))
727 if vu.get("vm_image_format") == "docker":
728 vm_image = vu.get("vm_image")
729 docker_path = os.path.join(
730 self.package_content_path,
731 make_relative_path(vm_image))
732 self.local_docker_files[vnf_container_name] = docker_path
733 LOG.debug("Found Dockerfile (%r): %r" % (vnf_container_name, docker_path))
734 for cu in v.get("cloudnative_deployment_units", []):
735 vnf_container_name = get_container_name(vnf_id, cu.get("id"))
736 image = cu.get("image")
737 docker_path = os.path.join(
738 self.package_content_path,
739 make_relative_path(image))
740 self.local_docker_files[vnf_container_name] = docker_path
741 LOG.debug("Found Dockerfile (%r): %r" % (vnf_container_name, docker_path))
742
743 def _load_docker_urls(self):
744 """
745 Get all URLs to pre-build docker images in some repo.
746 :return:
747 """
748 for vnf_id, v in self.vnfds.iteritems():
749 for vu in v.get("virtual_deployment_units", []):
750 vnf_container_name = get_container_name(vnf_id, vu.get("id"))
751 if vu.get("vm_image_format") == "docker":
752 url = vu.get("vm_image")
753 if url is not None:
754 url = url.replace("http://", "")
755 self.remote_docker_image_urls[vnf_container_name] = url
756 LOG.debug("Found Docker image URL (%r): %r" %
757 (vnf_container_name,
758 self.remote_docker_image_urls[vnf_container_name]))
759 for cu in v.get("cloudnative_deployment_units", []):
760 vnf_container_name = get_container_name(vnf_id, cu.get("id"))
761 url = cu.get("image")
762 if url is not None:
763 url = url.replace("http://", "")
764 self.remote_docker_image_urls[vnf_container_name] = url
765 LOG.debug("Found Docker image URL (%r): %r" %
766 (vnf_container_name,
767 self.remote_docker_image_urls[vnf_container_name]))
768
769 def _build_images_from_dockerfiles(self):
770 """
771 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
772 """
773 if GK_STANDALONE_MODE:
774 return # do not build anything in standalone mode
775 dc = DockerClient()
776 LOG.info("Building %d Docker images (this may take several minutes) ..." % len(
777 self.local_docker_files))
778 for k, v in self.local_docker_files.iteritems():
779 for line in dc.build(path=v.replace(
780 "Dockerfile", ""), tag=k, rm=False, nocache=False):
781 LOG.debug("DOCKER BUILD: %s" % line)
782 LOG.info("Docker image created: %s" % k)
783
784 def _pull_predefined_dockerimages(self):
785 """
786 If the package contains URLs to pre-build Docker images, we download them with this method.
787 """
788 dc = DockerClient()
789 for url in self.remote_docker_image_urls.itervalues():
790 # only pull if not present (speedup for development)
791 if not FORCE_PULL:
792 if len(dc.images.list(name=url)) > 0:
793 LOG.debug("Image %r present. Skipping pull." % url)
794 continue
795 LOG.info("Pulling image: %r" % url)
796 # this seems to fail with latest docker api version 2.0.2
797 # dc.images.pull(url,
798 # insecure_registry=True)
799 # using docker cli instead
800 cmd = ["docker",
801 "pull",
802 url,
803 ]
804 Popen(cmd).wait()
805
806 def _check_docker_image_exists(self, image_name):
807 """
808 Query the docker service and check if the given image exists
809 :param image_name: name of the docker image
810 :return:
811 """
812 return len(DockerClient().images.list(name=image_name)) > 0
813
814 def _place(self, vnfd, vnfid, vdu, ssiid):
815 """
816 Do placement. Return the name of the DC to place
817 the given VDU.
818 """
819 assert(len(self.vnfds) > 0)
820 assert(len(GK.dcs) > 0)
821 if PLACEMENT_ALGORITHM_OBJ is None:
822 LOG.error("No placement algorithm given. Using FirstDcPlacement!")
823 p = FirstDcPlacement()
824 else:
825 p = PLACEMENT_ALGORITHM_OBJ
826 cname = get_container_name(vnfid, vdu.get("id"), ssiid)
827 rdc = p.place(GK.dcs, vnfd, vnfid, vdu, ssiid, cname)
828 LOG.info("Placement: '{}' --> '{}'".format(cname, rdc))
829 return rdc
830
831 def _calculate_cpu_cfs_values(self, cpu_time_percentage):
832 """
833 Calculate cpu period and quota for CFS
834 :param cpu_time_percentage: percentage of overall CPU to be used
835 :return: cpu_period, cpu_quota
836 """
837 if cpu_time_percentage is None:
838 return -1, -1
839 if cpu_time_percentage < 0:
840 return -1, -1
841 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
842 # Attention minimum cpu_quota is 1ms (micro)
843 cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
844 LOG.debug("cpu_period is %r, cpu_percentage is %r" %
845 (cpu_period, cpu_time_percentage))
846 # calculate the fraction of cpu time for this container
847 cpu_quota = cpu_period * cpu_time_percentage
848 # ATTENTION >= 1000 to avoid a invalid argument system error ... no
849 # idea why
850 if cpu_quota < 1000:
851 LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
852 cpu_quota = 1000
853 LOG.warning("Increased CPU quota to avoid system error.")
854 LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" %
855 (cpu_period, cpu_quota))
856 return int(cpu_period), int(cpu_quota)
857
858
859 """
860 Some (simple) placement algorithms
861 """
862
863
864 class FirstDcPlacement(object):
865 """
866 Placement: Always use one and the same data center from the GK.dcs dict.
867 """
868
869 def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
870 return list(dcs.itervalues())[0]
871
872
873 class RoundRobinDcPlacement(object):
874 """
875 Placement: Distribute VNFs across all available DCs in a round robin fashion.
876 """
877
878 def __init__(self):
879 self.count = 0
880
881 def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
882 dcs_list = list(dcs.itervalues())
883 rdc = dcs_list[self.count % len(dcs_list)]
884 self.count += 1 # inc. count to use next DC
885 return rdc
886
887
888 class StaticConfigPlacement(object):
889 """
890 Placement: Fixed assignment based on config file.
891 """
892
893 def __init__(self, path=None):
894 if path is None:
895 path = "static_placement.yml"
896 path = os.path.expanduser(path)
897 self.static_placement = dict()
898 try:
899 self.static_placement = load_yaml(path)
900 except BaseException as ex:
901 LOG.error(ex)
902 LOG.error("Couldn't load placement from {}"
903 .format(path))
904 LOG.info("Loaded static placement: {}"
905 .format(self.static_placement))
906
907 def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
908 # check for container name entry
909 if cname not in self.static_placement:
910 LOG.error("Coudn't find {} in placement".format(cname))
911 LOG.error("Using first DC as fallback!")
912 return list(dcs.itervalues())[0]
913 # lookup
914 candidate_dc = self.static_placement.get(cname)
915 # check if DC exsits
916 if candidate_dc not in dcs:
917 LOG.error("Coudn't find DC {}".format(candidate_dc))
918 LOG.error("Using first DC as fallback!")
919 return list(dcs.itervalues())[0]
920 # return correct DC
921 return dcs.get(candidate_dc)
922
923
924 """
925 Resource definitions and API endpoints
926 """
927
928
929 class Packages(fr.Resource):
930
931 def post(self):
932 """
933 Upload a *.son service package to the dummy gatekeeper.
934
935 We expect request with a *.son file and store it in UPLOAD_FOLDER
936 :return: UUID
937 """
938 try:
939 # get file contents
940 LOG.info("POST /packages called")
941 # lets search for the package in the request
942 is_file_object = False # make API more robust: file can be in data or in files field
943 if "package" in request.files:
944 son_file = request.files["package"]
945 is_file_object = True
946 elif len(request.data) > 0:
947 son_file = request.data
948 else:
949 return {"service_uuid": None, "size": 0, "sha1": None,
950 "error": "upload failed. file not found."}, 500
951 # generate a uuid to reference this package
952 service_uuid = str(uuid.uuid4())
953 file_hash = hashlib.sha1(str(son_file)).hexdigest()
954 # ensure that upload folder exists
955 ensure_dir(UPLOAD_FOLDER)
956 upload_path = os.path.join(UPLOAD_FOLDER, "%s.tgo" % service_uuid)
957 # store *.son file to disk
958 if is_file_object:
959 son_file.save(upload_path)
960 else:
961 with open(upload_path, 'wb') as f:
962 f.write(son_file)
963 size = os.path.getsize(upload_path)
964
965 # first stop and delete any other running services
966 if AUTO_DELETE:
967 service_list = copy.copy(GK.services)
968 for service_uuid in service_list:
969 instances_list = copy.copy(
970 GK.services[service_uuid].instances)
971 for instance_uuid in instances_list:
972 # valid service and instance UUID, stop service
973 GK.services.get(service_uuid).stop_service(
974 instance_uuid)
975 LOG.info("service instance with uuid %r stopped." %
976 instance_uuid)
977
978 # create a service object and register it
979 s = Service(service_uuid, file_hash, upload_path)
980 GK.register_service_package(service_uuid, s)
981
982 # automatically deploy the service
983 if AUTO_DEPLOY:
984 # ok, we have a service uuid, lets start the service
985 reset_subnets()
986 GK.services.get(service_uuid).start_service()
987
988 # generate the JSON result
989 return {"service_uuid": service_uuid, "size": size,
990 "sha1": file_hash, "error": None}, 201
991 except BaseException:
992 LOG.exception("Service package upload failed:")
993 return {"service_uuid": None, "size": 0,
994 "sha1": None, "error": "upload failed"}, 500
995
996 def get(self):
997 """
998 Return a list of package descriptor headers.
999 Fakes the behavior of 5GTANGO's GK API to be
1000 compatible with tng-cli.
1001 :return: list
1002 """
1003 LOG.info("GET /packages")
1004 result = list()
1005 for suuid, sobj in GK.services.iteritems():
1006 pkg = dict()
1007 pkg["pd"] = dict()
1008 pkg["uuid"] = suuid
1009 pkg["pd"]["name"] = sobj.manifest.get("name")
1010 pkg["pd"]["version"] = sobj.manifest.get("version")
1011 pkg["created_at"] = sobj.created_at
1012 result.append(pkg)
1013 return result, 200
1014
1015
1016 class Services(fr.Resource):
1017
1018 def get(self):
1019 """
1020 Return a list of services.
1021 Fakes the behavior of 5GTANGO's GK API to be
1022 compatible with tng-cli.
1023 :return: list
1024 """
1025 LOG.info("GET /services")
1026 result = list()
1027 for suuid, sobj in GK.services.iteritems():
1028 service = dict()
1029 service["nsd"] = dict()
1030 service["uuid"] = suuid
1031 service["nsd"]["name"] = sobj.nsd.get("name")
1032 service["nsd"]["version"] = sobj.nsd.get("version")
1033 service["created_at"] = sobj.created_at
1034 result.append(service)
1035 return result, 200
1036
1037
1038 class Instantiations(fr.Resource):
1039
1040 def post(self):
1041 """
1042 Instantiate a service specified by its UUID.
1043 Will return a new UUID to identify the running service instance.
1044 :return: UUID
1045 """
1046 LOG.info("POST /instantiations (or /requests) called")
1047 # try to extract the service uuid from the request
1048 json_data = request.get_json(force=True)
1049 service_uuid = json_data.get("service_uuid")
1050 service_name = json_data.get("service_name")
1051 if service_name is None:
1052 # lets be fuzzy
1053 service_name = service_uuid
1054 # first try to find by service_name
1055 if service_name is not None:
1056 for s_uuid, s in GK.services.iteritems():
1057 if s.manifest.get("name") == service_name:
1058 LOG.info("Searched for: {}. Found service w. UUID: {}"
1059 .format(service_name, s_uuid))
1060 service_uuid = s_uuid
1061 # lets be a bit fuzzy here to make testing easier
1062 if (service_uuid is None or service_uuid ==
1063 "latest") and len(GK.services) > 0:
1064 # if we don't get a service uuid, we simple start the first service
1065 # in the list
1066 service_uuid = list(GK.services.iterkeys())[0]
1067 if service_uuid in GK.services:
1068 # ok, we have a service uuid, lets start the service
1069 service_instance_uuid = GK.services.get(
1070 service_uuid).start_service()
1071 # multiple ID fields to be compatible with tng-bench and tng-cli
1072 return ({"service_instance_uuid": service_instance_uuid,
1073 "id": service_instance_uuid}, 201)
1074 LOG.error("Service not found: {}/{}".format(service_uuid, service_name))
1075 return "Service not found", 404
1076
1077 def get(self):
1078 """
1079 Returns a list of UUIDs containing all running services.
1080 :return: dict / list
1081 """
1082 LOG.info("GET /instantiations or /api/v3/records/services")
1083 # return {"service_instantiations_list": [
1084 # list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
1085 result = list()
1086 for suuid, sobj in GK.services.iteritems():
1087 for iuuid, iobj in sobj.instances.iteritems():
1088 inst = dict()
1089 inst["uuid"] = iobj.get("uuid")
1090 inst["instance_name"] = "{}-inst.{}".format(
1091 iobj.get("name"), iobj.get("ssiid"))
1092 inst["status"] = "running"
1093 inst["created_at"] = iobj.get("created_at")
1094 result.append(inst)
1095 return result, 200
1096
1097 def delete(self):
1098 """
1099 Stops a running service specified by its service and instance UUID.
1100 """
1101 # try to extract the service and instance UUID from the request
1102 json_data = request.get_json(force=True)
1103 service_uuid_input = json_data.get("service_uuid")
1104 instance_uuid_input = json_data.get("service_instance_uuid")
1105 if len(GK.services) < 1:
1106 return "No service on-boarded.", 404
1107 # try to be fuzzy
1108 if service_uuid_input is None:
1109 # if we don't get a service uuid we stop all services
1110 service_uuid_list = list(GK.services.iterkeys())
1111 LOG.info("No service_uuid given, stopping all.")
1112 else:
1113 service_uuid_list = [service_uuid_input]
1114 # for each service
1115 for service_uuid in service_uuid_list:
1116 if instance_uuid_input is None:
1117 instance_uuid_list = list(
1118 GK.services[service_uuid].instances.iterkeys())
1119 else:
1120 instance_uuid_list = [instance_uuid_input]
1121 # for all service instances
1122 for instance_uuid in instance_uuid_list:
1123 if (service_uuid in GK.services and
1124 instance_uuid in GK.services[service_uuid].instances):
1125 # valid service and instance UUID, stop service
1126 GK.services.get(service_uuid).stop_service(instance_uuid)
1127 LOG.info("Service instance with uuid %r stopped." % instance_uuid)
1128 return "Service(s) stopped.", 200
1129
1130
1131 class Exit(fr.Resource):
1132
1133 def put(self):
1134 """
1135 Stop the running Containernet instance regardless of data transmitted
1136 """
1137 list(GK.dcs.values())[0].net.stop()
1138
1139
1140 def generate_subnets(prefix, base, subnet_size=50, mask=24):
1141 # Generate a list of ipaddress in subnets
1142 r = list()
1143 for net in range(base, base + subnet_size):
1144 subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
1145 r.append(ipaddress.ip_network(unicode(subnet)))
1146 return r
1147
1148
1149 def reset_subnets():
1150 global ELINE_SUBNETS
1151 global ELAN_SUBNETS
1152 # private subnet definitions for the generated interfaces
1153 # 30.0.xxx.0/24
1154 ELAN_SUBNETS = generate_subnets('30.0', 0, subnet_size=50, mask=24)
1155 # 20.0.xxx.0/24
1156 ELINE_SUBNETS = generate_subnets('20.0', 0, subnet_size=50, mask=24)
1157
1158
1159 def initialize_GK():
1160 global GK
1161 GK = Gatekeeper()
1162
1163
1164 # create a single, global GK object
1165 GK = None
1166 initialize_GK()
1167 # setup Flask
1168 http_server = None
1169 app = Flask(__name__)
1170 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
1171 api = fr.Api(app)
1172 # define endpoints
1173 api.add_resource(Packages, '/packages', '/api/v2/packages', '/api/v3/packages')
1174 api.add_resource(Services, '/services', '/api/v2/services', '/api/v3/services')
1175 api.add_resource(Instantiations, '/instantiations',
1176 '/api/v2/instantiations', '/api/v2/requests', '/api/v3/requests',
1177 '/api/v3/records/services')
1178 api.add_resource(Exit, '/emulator/exit')
1179
1180
1181 def start_rest_api(host, port, datacenters=dict()):
1182 global http_server
1183 GK.dcs = datacenters
1184 GK.net = get_dc_network()
1185 # start the Flask server (not the best performance but ok for our use case)
1186 # app.run(host=host,
1187 # port=port,
1188 # debug=True,
1189 # use_reloader=False # this is needed to run Flask in a non-main thread
1190 # )
1191 http_server = WSGIServer((host, port), app, log=open("/dev/null", "w"))
1192 http_server.serve_forever()
1193
1194
1195 def stop_rest_api():
1196 if http_server:
1197 http_server.close()
1198
1199
1200 def ensure_dir(name):
1201 if not os.path.exists(name):
1202 os.makedirs(name)
1203
1204
1205 def load_yaml(path):
1206 with open(path, "r") as f:
1207 try:
1208 r = yaml.load(f)
1209 except yaml.YAMLError as exc:
1210 LOG.exception("YAML parse error: %r" % str(exc))
1211 r = dict()
1212 return r
1213
1214
1215 def make_relative_path(path):
1216 if path.startswith("file://"):
1217 path = path.replace("file://", "", 1)
1218 if path.startswith("/"):
1219 path = path.replace("/", "", 1)
1220 return path
1221
1222
1223 def get_dc_network():
1224 """
1225 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
1226 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
1227 :return:
1228 """
1229 assert (len(GK.dcs) > 0)
1230 return GK.dcs.values()[0].net
1231
1232
1233 def parse_interface(interface_name):
1234 """
1235 convert the interface name in the nsd to the according vnf_id, vnf_interface names
1236 :param interface_name:
1237 :return:
1238 """
1239 if ':' in interface_name:
1240 vnf_id, vnf_interface = interface_name.split(':')
1241 else:
1242 vnf_id = None
1243 vnf_interface = interface_name
1244 return vnf_id, vnf_interface
1245
1246
1247 def get_container_name(vnf_id, vdu_id, ssiid=None):
1248 if ssiid is not None:
1249 return "{}.{}.{}".format(vnf_id, vdu_id, ssiid)
1250 return "{}.{}".format(vnf_id, vdu_id)
1251
1252
1253 def get_triple_id(descr):
1254 return "{}.{}.{}".format(
1255 descr.get("vendor"), descr.get("name"), descr.get("version"))
1256
1257
1258 def update_port_mapping_multi_instance(ssiid, port_bindings):
1259 """
1260 Port_bindings are used to expose ports of the deployed containers.
1261 They would collide if we deploy multiple service instances.
1262 This function adds a offset to them which is based on the
1263 short service instance id (SSIID).
1264 MULTI_INSTANCE_PORT_OFFSET
1265 """
1266 def _offset(p):
1267 return p + MULTI_INSTANCE_PORT_OFFSET * ssiid
1268
1269 port_bindings = {k: _offset(v) for k, v in port_bindings.iteritems()}
1270 return port_bindings
1271
1272
1273 if __name__ == '__main__':
1274 """
1275 Lets allow to run the API in standalone mode.
1276 """
1277 GK_STANDALONE_MODE = True
1278 logging.getLogger("werkzeug").setLevel(logging.INFO)
1279 start_rest_api("0.0.0.0", 8000)