+
# Copyright (c) 2018 SONATA-NFV, 5GTANGO and Paderborn University
# ALL RIGHTS RESERVED.
#
import zipfile
import yaml
import threading
+import datetime
from docker import DockerClient
from flask import Flask, request
import flask_restful as fr
-from collections import defaultdict
-import pkg_resources
+from gevent.pywsgi import WSGIServer
from subprocess import Popen
-from random import randint
import ipaddress
import copy
import time
-from functools import reduce
LOG = logging.getLogger("5gtango.llcm")
LOG.setLevel(logging.INFO)
+CORS_HEADER = {'Access-Control-Allow-Origin': '*',
+ 'Access-Control-Allow-Methods': 'GET,OPTIONS'}
+
+
GK_STORAGE = "/tmp/vim-emu-tango-llcm/"
UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
# should a new version of an image be pulled even if its available
FORCE_PULL = False
-# Automatically deploy SAPs (endpoints) of the service as new containers
-# Attention: This is not a configuration switch but a global variable!
-# Don't change its default value.
-DEPLOY_SAP = False
-
# flag to indicate if we use bidirectional forwarding rules in the
# automatic chaining process
BIDIRECTIONAL_CHAIN = True
# and also automatically terminate any other running services
AUTO_DELETE = False
+# global subnet definitions (see reset_subnets())
+ELAN_SUBNETS = None
+ELINE_SUBNETS = None
-def generate_subnets(prefix, base, subnet_size=50, mask=24):
- # Generate a list of ipaddress in subnets
- r = list()
- for net in range(base, base + subnet_size):
- subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
- r.append(ipaddress.ip_network(unicode(subnet)))
- return r
-
+# Time in seconds to wait for vnf stop scripts to execute fully
+VNF_STOP_WAIT_TIME = 5
-# private subnet definitions for the generated interfaces
-# 10.10.xxx.0/24
-SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=30)
-# 10.20.xxx.0/30
-ELAN_SUBNETS = generate_subnets('10.20', 0, subnet_size=50, mask=24)
-# 10.30.xxx.0/30
-ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
+# If services are instantiated multiple times, the public port
+# mappings need to be adapted to avoid colisions. We use this
+# offset for this: NEW_PORT (SSIID * OFFSET) + ORIGINAL_PORT
+MULTI_INSTANCE_PORT_OFFSET = 1000
-# path to the VNFD for the SAP VNF that is deployed as internal SAP point
-SAP_VNFD = None
+# Selected Placement Algorithm: Points to the class of the selected
+# placement algorithm.
+PLACEMENT_ALGORITHM_OBJ = None
-# Time in seconds to wait for vnf stop scripts to execute fully
-VNF_STOP_WAIT_TIME = 5
+# Path to folder with <container_name>.env.yml files that contain
+# environment variables injected into the specific container
+# when it is started.
+PER_INSTANCE_ENV_CONFIGURATION_FOLDER = None
class OnBoardingException(BaseException):
self.net = None
# used to generate short names for VNFs (Mininet limitation)
self.vnf_counter = 0
+ reset_subnets()
LOG.info("Initialized 5GTANGO LLCM module.")
def register_service_package(self, service_uuid, service):
# lets perform all steps needed to onboard the service
service.onboard()
- def get_next_vnf_name(self):
- self.vnf_counter += 1
- return "vnf%d" % self.vnf_counter
-
class Service(object):
"""
self.manifest = None
self.nsd = None
self.vnfds = dict()
- self.saps = dict()
- self.saps_ext = list()
- self.saps_int = list()
self.local_docker_files = dict()
self.remote_docker_image_urls = dict()
self.instances = dict()
- # dict to find the vnf_name for any vnf id
- self.vnf_id2vnf_name = dict()
+ self._instance_counter = 0
+ self.created_at = str(datetime.datetime.now())
def onboard(self):
"""
raise OnBoardingException("No NSD found.")
if len(self.vnfds) < 1:
raise OnBoardingException("No VNFDs found.")
- if DEPLOY_SAP:
- self._load_saps()
# 3. prepare container images (e.g. download or build Dockerfile)
if BUILD_DOCKERFILE:
self._load_docker_files()
else:
self._load_docker_urls()
self._pull_predefined_dockerimages()
- LOG.info("On-boarded service: %r" % self.manifest.get("name"))
+ # 4. reserve subnets
+ eline_fwd_links, elan_fwd_links = self._get_elines_and_elans()
+ self.eline_subnets = [ELINE_SUBNETS.pop(0) for _ in eline_fwd_links]
+ self.elan_subnets = [ELAN_SUBNETS.pop(0) for _ in elan_fwd_links]
+ LOG.debug("Reserved subnets for service '{}': E-Line: {} / E-LAN: {}"
+ .format(self.manifest.get("name"),
+ self.eline_subnets, self.elan_subnets))
+ LOG.info("On-boarded service: {}".format(self.manifest.get("name")))
def start_service(self):
"""
by the placement algorithm.
:return:
"""
- LOG.info("Starting service %r" % self.uuid)
+ LOG.info("Starting service {} ({})"
+ .format(get_triple_id(self.nsd), self.uuid))
# 1. each service instance gets a new uuid to identify it
instance_uuid = str(uuid.uuid4())
# build a instances dict (a bit like a NSR :))
self.instances[instance_uuid] = dict()
+ self.instances[instance_uuid]["uuid"] = self.uuid
+ # SSIID = short service instance ID (to postfix Container names)
+ self.instances[instance_uuid]["ssiid"] = self._instance_counter
+ self.instances[instance_uuid]["name"] = get_triple_id(self.nsd)
self.instances[instance_uuid]["vnf_instances"] = list()
+ self.instances[instance_uuid]["created_at"] = str(datetime.datetime.now())
+ # increase for next instance
+ self._instance_counter += 1
- # 2. compute placement of this service instance (adds DC names to
- # VNFDs)
- if not GK_STANDALONE_MODE:
- # self._calculate_placement(FirstDcPlacement)
- self._calculate_placement(RoundRobinDcPlacementWithSAPs)
- # 3. start all vnfds that we have in the service (except SAPs)
+ # 3. start all vnfds that we have in the service
for vnf_id in self.vnfds:
vnfd = self.vnfds[vnf_id]
- vnfi = None
- if not GK_STANDALONE_MODE:
- vnfi = self._start_vnfd(vnfd, vnf_id)
- self.instances[instance_uuid]["vnf_instances"].append(vnfi)
-
- # 4. start all SAPs in the service
- for sap in self.saps:
- self._start_sap(self.saps[sap], instance_uuid)
+ # attention: returns a list of started deployment units
+ vnfis = self._start_vnfd(
+ vnfd, vnf_id, self.instances[instance_uuid]["ssiid"])
+ # add list of VNFIs to total VNFI list
+ self.instances[instance_uuid]["vnf_instances"].extend(vnfis)
- # 5. Deploy E-Line and E_LAN links
+ # 4. Deploy E-Line, E-Tree and E-LAN links
# Attention: Only done if ""forwarding_graphs" section in NSD exists,
# even if "forwarding_graphs" are not used directly.
- if "virtual_links" in self.nsd and "forwarding_graphs" in self.nsd:
- vlinks = self.nsd["virtual_links"]
- # constituent virtual links are not checked
- # fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
- eline_fwd_links = [l for l in vlinks if (
- l["connectivity_type"] == "E-Line")]
- elan_fwd_links = [l for l in vlinks if (
- l["connectivity_type"] == "E-LAN")]
-
- GK.net.deployed_elines.extend(eline_fwd_links)
- GK.net.deployed_elans.extend(elan_fwd_links)
-
- # 5a. deploy E-Line links
- self._connect_elines(eline_fwd_links, instance_uuid)
-
- # 5b. deploy E-LAN links
- self._connect_elans(elan_fwd_links, instance_uuid)
+ # Attention2: Do a copy of *_subnets with list() is important here!
+ eline_fwd_links, elan_fwd_links = self._get_elines_and_elans()
+ # 5a. deploy E-Line links
+ GK.net.deployed_elines.extend(eline_fwd_links) # bookkeeping
+ self._connect_elines(eline_fwd_links, instance_uuid, list(self.eline_subnets))
+ # 5b. deploy E-Tree/E-LAN links
+ GK.net.deployed_elans.extend(elan_fwd_links) # bookkeeping
+ self._connect_elans(elan_fwd_links, instance_uuid, list(self.elan_subnets))
# 6. run the emulator specific entrypoint scripts in the VNFIs of this
# service instance
self._trigger_emulator_start_scripts_in_vnfis(
self.instances[instance_uuid]["vnf_instances"])
-
- LOG.info("Service started. Instance id: %r" % instance_uuid)
+ # done
+ LOG.info("Service '{}' started. Instance id: {} SSIID: {}"
+ .format(self.instances[instance_uuid]["name"],
+ instance_uuid,
+ self.instances[instance_uuid]["ssiid"]))
return instance_uuid
def stop_service(self, instance_uuid):
This method stops a running service instance.
It iterates over all VNF instances, stopping them each
and removing them from their data center.
-
:param instance_uuid: the uuid of the service instance to be stopped
"""
LOG.info("Stopping service %r" % self.uuid)
# get relevant information
# instance_uuid = str(self.uuid.uuid4())
vnf_instances = self.instances[instance_uuid]["vnf_instances"]
-
# trigger stop skripts in vnf instances and wait a few seconds for
# completion
self._trigger_emulator_stop_scripts_in_vnfis(vnf_instances)
time.sleep(VNF_STOP_WAIT_TIME)
-
+ # stop all vnfs
for v in vnf_instances:
self._stop_vnfi(v)
-
- for sap_name in self.saps_ext:
- ext_sap = self.saps[sap_name]
- target_dc = ext_sap.get("dc")
- target_dc.removeExternalSAP(sap_name)
- LOG.info("Stopping the SAP instance: %r in DC %r" %
- (sap_name, target_dc))
-
- if not GK_STANDALONE_MODE:
- # remove placement?
- # self._remove_placement(RoundRobinPlacement)
- None
# last step: remove the instance from the list of all instances
del self.instances[instance_uuid]
- def _start_vnfd(self, vnfd, vnf_id, **kwargs):
+ def _get_elines_and_elans(self):
+ """
+ Get the E-Line, E-LAN, E-Tree links from the NSD.
+ """
+ # Attention: Only done if ""forwarding_graphs" section in NSD exists,
+ # even if "forwarding_graphs" are not used directly.
+ eline_fwd_links = list()
+ elan_fwd_links = list()
+ if "virtual_links" in self.nsd and "forwarding_graphs" in self.nsd:
+ vlinks = self.nsd["virtual_links"]
+ # constituent virtual links are not checked
+ eline_fwd_links = [l for l in vlinks if (
+ l["connectivity_type"] == "E-Line")]
+ elan_fwd_links = [l for l in vlinks if (
+ l["connectivity_type"] == "E-LAN" or
+ l["connectivity_type"] == "E-Tree")] # Treat E-Tree as E-LAN
+ return eline_fwd_links, elan_fwd_links
+
+ def _get_resource_limits(self, deployment_unit):
+ """
+ Extract resource limits from deployment units.
+ """
+ # defaults
+ cpu_list = None
+ cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(1.0))
+ mem_limit = None
+ # update from descriptor
+ if "resource_requirements" in deployment_unit:
+ res_req = deployment_unit.get("resource_requirements")
+ cpu_list = res_req.get("cpu").get("cpuset")
+ if cpu_list is None:
+ cpu_list = res_req.get("cpu").get("vcpus")
+ if cpu_list is not None:
+ # attention: docker expects list as string w/o spaces:
+ cpu_list = str(cpu_list).replace(" ", "").strip()
+ cpu_bw = res_req.get("cpu").get("cpu_bw")
+ if cpu_bw is None:
+ cpu_bw = 1.0
+ cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
+ mem_limit = res_req.get("memory").get("size")
+ mem_unit = str(res_req.get("memory").get("size_unit", "GB"))
+ if mem_limit is not None:
+ mem_limit = int(mem_limit)
+ # to bytes
+ if "G" in mem_unit:
+ mem_limit = mem_limit * 1024 * 1024 * 1024
+ elif "M" in mem_unit:
+ mem_limit = mem_limit * 1024 * 1024
+ elif "K" in mem_unit:
+ mem_limit = mem_limit * 1024
+ return cpu_list, cpu_period, cpu_quota, mem_limit
+
+ def _start_vnfd(self, vnfd, vnf_id, ssiid, **kwargs):
"""
Start a single VNFD of this service
:param vnfd: vnfd descriptor dict
:param vnf_id: unique id of this vnf in the nsd
:return:
"""
+ vnfis = list()
# the vnf_name refers to the container image to be deployed
vnf_name = vnfd.get("name")
-
+ # combine VDUs and CDUs
+ deployment_units = (vnfd.get("virtual_deployment_units", []) +
+ vnfd.get("cloudnative_deployment_units", []))
# iterate over all deployment units within each VNFDs
- for u in vnfd.get("virtual_deployment_units"):
- # 1. get the name of the docker image to start and the assigned DC
- if vnf_id not in self.remote_docker_image_urls:
- raise Exception("No image name for %r found. Abort." % vnf_id)
- docker_name = self.remote_docker_image_urls.get(vnf_id)
- target_dc = vnfd.get("dc")
- # 2. perform some checks to ensure we can start the container
- assert(docker_name is not None)
+ for u in deployment_units:
+ # 0. vnf_container_name = vnf_id.vdu_id
+ vnf_container_name = get_container_name(vnf_id, u.get("id"))
+ vnf_container_instance_name = get_container_name(vnf_id, u.get("id"), ssiid)
+ # 1. get the name of the docker image to star
+ if vnf_container_name not in self.remote_docker_image_urls:
+ raise Exception("No image name for %r found. Abort." % vnf_container_name)
+ docker_image_name = self.remote_docker_image_urls.get(vnf_container_name)
+ # 2. select datacenter to start the VNF in
+ target_dc = self._place(vnfd, vnf_id, u, ssiid)
+ # 3. perform some checks to ensure we can start the container
+ assert(docker_image_name is not None)
assert(target_dc is not None)
- if not self._check_docker_image_exists(docker_name):
- raise Exception(
- "Docker image %r not found. Abort." % docker_name)
+ if not self._check_docker_image_exists(docker_image_name):
+ raise Exception("Docker image {} not found. Abort."
+ .format(docker_image_name))
- # 3. get the resource limits
- res_req = u.get("resource_requirements")
- cpu_list = res_req.get("cpu").get("cores")
- if cpu_list is None:
- cpu_list = res_req.get("cpu").get("vcpus")
- if cpu_list is None:
- cpu_list = "1"
- cpu_bw = res_req.get("cpu").get("cpu_bw")
- if not cpu_bw:
- cpu_bw = 1
- mem_num = str(res_req.get("memory").get("size"))
- if len(mem_num) == 0:
- mem_num = "2"
- mem_unit = str(res_req.get("memory").get("size_unit"))
- if str(mem_unit) == 0:
- mem_unit = "GB"
- mem_limit = float(mem_num)
- if mem_unit == "GB":
- mem_limit = mem_limit * 1024 * 1024 * 1024
- elif mem_unit == "MB":
- mem_limit = mem_limit * 1024 * 1024
- elif mem_unit == "KB":
- mem_limit = mem_limit * 1024
- mem_lim = int(mem_limit)
- cpu_period, cpu_quota = self._calculate_cpu_cfs_values(
- float(cpu_bw))
-
- # check if we need to deploy the management ports (defined as
- # type:management both on in the vnfd and nsd)
- intfs = vnfd.get("connection_points", [])
+ # 4. get the resource limits
+ cpu_list, cpu_period, cpu_quota, mem_limit = self._get_resource_limits(u)
+
+ # get connection points defined for the DU
+ intfs = u.get("connection_points", [])
# do some re-naming of fields to be compatible to containernet
for i in intfs:
if i.get("address"):
+ LOG.info("Found static address for {}: {}"
+ .format(i.get("id"), i.get("address")))
i["ip"] = i.get("address")
- mgmt_intf_names = []
- if USE_DOCKER_MGMT:
- mgmt_intfs = [vnf_id + ':' + intf['id']
- for intf in intfs if intf.get('type') == 'management']
- # check if any of these management interfaces are used in a
- # management-type network in the nsd
- for nsd_intf_name in mgmt_intfs:
- vlinks = [l["connection_points_reference"]
- for l in self.nsd.get("virtual_links", [])]
- for link in vlinks:
- if nsd_intf_name in link and self.check_mgmt_interface(
- link):
- # this is indeed a management interface and can be
- # skipped
- vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
- nsd_intf_name)
- found_interfaces = [
- intf for intf in intfs if intf.get('id') == vnf_interface]
- intfs.remove(found_interfaces[0])
- mgmt_intf_names.append(vnf_interface)
-
- # 4. generate the volume paths for the docker container
+ # get ports and port_bindings from the port and publish fields of CNFD
+ # see: https://github.com/containernet/containernet/wiki/Exposing-and-mapping-network-ports
+ ports = list() # Containernet naming
+ port_bindings = dict()
+ for i in intfs:
+ if i.get("port"): # field with a single port
+ if not isinstance(i.get("port"), int):
+ LOG.info("Field 'port' is no int CP: {}".format(i))
+ else:
+ ports.append(i.get("port")) # collect all ports
+ if i.get("ports"): # list with multiple ports
+ if not isinstance(i.get("ports"), list):
+ LOG.info("Field 'port' is no list CP: {}".format(i))
+ else:
+ for p in i.get("ports"):
+ if not isinstance(p, int):
+ # do some parsing
+ try:
+ if "/udp" in p:
+ p = tuple(p.split("/"))
+ else:
+ p = int(p)
+ ports.append(p) # collect all ports
+ except BaseException as ex:
+ LOG.error(
+ "Could not parse ports list: {}".format(p))
+ LOG.error(ex)
+ else:
+ ports.append(p) # collect all ports
+ if i.get("publish"):
+ if not isinstance(i.get("publish"), dict):
+ LOG.info("Field 'publish' is no dict CP: {}".format(i))
+ else:
+ port_bindings.update(i.get("publish"))
+ # update port mapping for cases where service is deployed > 1 times
+ port_bindings = update_port_mapping_multi_instance(ssiid, port_bindings)
+ if len(ports) > 0:
+ LOG.info("{} exposes ports: {}".format(vnf_container_instance_name, ports))
+ if len(port_bindings) > 0:
+ LOG.info("{} publishes ports: {}".format(vnf_container_instance_name, port_bindings))
+
+ # 5. collect additional information to start container
volumes = list()
- # a volume to extract log files
- docker_log_path = "/tmp/results/%s/%s" % (self.uuid, vnf_id)
- LOG.debug("LOG path for vnf %s is %s." % (vnf_id, docker_log_path))
- if not os.path.exists(docker_log_path):
- LOG.debug("Creating folder %s" % docker_log_path)
- os.makedirs(docker_log_path)
-
- volumes.append(docker_log_path + ":/mnt/share/")
-
- # 5. do the dc.startCompute(name="foobar") call to run the container
- # TODO consider flavors, and other annotations
- # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
- # use the vnf_id in the nsd as docker name
- # so deployed containers can be easily mapped back to the nsd
+ cenv = dict()
+ # 5.1 inject descriptor based start/stop commands into env (overwrite)
+ VNFD_CMD_START = u.get("vm_cmd_start")
+ VNFD_CMD_STOP = u.get("vm_cmd_stop")
+ if VNFD_CMD_START and not VNFD_CMD_START == "None":
+ LOG.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_START) +
+ " Overwriting SON_EMU_CMD.")
+ cenv["SON_EMU_CMD"] = VNFD_CMD_START
+ if VNFD_CMD_STOP and not VNFD_CMD_STOP == "None":
+ LOG.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_STOP) +
+ " Overwriting SON_EMU_CMD_STOP.")
+ cenv["SON_EMU_CMD_STOP"] = VNFD_CMD_STOP
+
+ # 5.2 inject per instance configurations based on envs
+ conf_envs = self._load_instance_conf_envs(vnf_container_instance_name)
+ cenv.update(conf_envs)
+
+ # 5.3 handle optional ipc_mode setting
+ ipc_mode = u.get("ipc_mode", None)
+ # 5.4 handle optional devices setting
+ devices = u.get("devices", [])
+ # 5.5 handle optional cap_add setting
+ cap_add = u.get("cap_add", [])
+
+ # 6. Start the container
LOG.info("Starting %r as %r in DC %r" %
- (vnf_name, vnf_id, vnfd.get("dc")))
+ (vnf_name, vnf_container_instance_name, target_dc))
LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
+ # start the container
vnfi = target_dc.startCompute(
- vnf_id,
+ vnf_container_instance_name,
network=intfs,
- image=docker_name,
- flavor_name="small",
+ image=docker_image_name,
cpu_quota=cpu_quota,
cpu_period=cpu_period,
- cpuset=cpu_list,
- mem_limit=mem_lim,
+ cpuset_cpus=cpu_list,
+ mem_limit=mem_limit,
volumes=volumes,
+ properties=cenv, # environment
+ ports=ports,
+ port_bindings=port_bindings,
+ # only publish if explicitly stated in descriptor
+ publish_all_ports=False,
+ ipc_mode=ipc_mode,
+ devices=devices,
+ cap_add=cap_add,
type=kwargs.get('type', 'docker'))
-
# add vnfd reference to vnfi
vnfi.vnfd = vnfd
-
- # rename the docker0 interfaces (eth0) to the management port name
- # defined in the VNFD
- if USE_DOCKER_MGMT:
- for intf_name in mgmt_intf_names:
- self._vnf_reconfigure_network(
- vnfi, 'eth0', new_name=intf_name)
-
- return vnfi
+ # add container name
+ vnfi.vnf_container_name = vnf_container_name
+ vnfi.vnf_container_instance_name = vnf_container_instance_name
+ vnfi.ssiid = ssiid
+ # store vnfi
+ vnfis.append(vnfi)
+ return vnfis
def _stop_vnfi(self, vnfi):
"""
Stop a VNF instance.
-
:param vnfi: vnf instance to be stopped
"""
# Find the correct datacenter
status = vnfi.getStatus()
dc = vnfi.datacenter
-
# stop the vnfi
LOG.info("Stopping the vnf instance contained in %r in DC %r" %
(status["name"], dc))
def _get_vnf_instance(self, instance_uuid, vnf_id):
"""
- Returns the Docker object for the given VNF id (or Docker name).
- :param instance_uuid: UUID of the service instance to search in.
- :param name: VNF name or Docker name. We are fuzzy here.
- :return:
+ Returns VNFI object for a given "vnf_id" or "vnf_container_name" taken from an NSD.
+ :return: single object
"""
- dn = vnf_id
for vnfi in self.instances[instance_uuid]["vnf_instances"]:
- if vnfi.name == dn:
+ if str(vnfi.name) == str(vnf_id):
return vnfi
- LOG.warning("No container with name: {0} found.".format(dn))
+ LOG.warning("No container with name: {0} found.".format(vnf_id))
+ return None
+
+ def _get_vnf_instance_units(self, instance_uuid, vnf_id):
+ """
+ Returns a list of VNFI objects (all deployment units) for a given
+ "vnf_id" taken from an NSD.
+ :return: list
+ """
+ if vnf_id is None:
+ return None
+ r = list()
+ for vnfi in self.instances[instance_uuid]["vnf_instances"]:
+ if vnf_id in vnfi.name:
+ r.append(vnfi)
+ if len(r) > 0:
+ LOG.debug("Found units: {} for vnf_id: {}"
+ .format([i.name for i in r], vnf_id))
+ return r
+ LOG.warning("No container(s) with name: {0} found.".format(vnf_id))
return None
@staticmethod
:param net_str: network configuration string, e.g., 1.2.3.4/24
:return:
"""
-
# assign new ip address
if net_str is not None:
intf = vnfi.intf(intf=if_name)
env = config.get("Env", list())
for env_var in env:
var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
- LOG.debug("%r = %r" % (var, cmd))
- if var == "SON_EMU_CMD":
- LOG.info("Executing entry point script in %r: %r" %
- (vnfi.name, cmd))
+ if var == "SON_EMU_CMD" or var == "VIM_EMU_CMD":
+ LOG.info("Executing script in '{}': {}={}"
+ .format(vnfi.name, var, cmd))
# execute command in new thread to ensure that GK is not
# blocked by VNF
t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
t.daemon = True
t.start()
+ break # only execute one command
def _trigger_emulator_stop_scripts_in_vnfis(self, vnfi_list):
for vnfi in vnfi_list:
env = config.get("Env", list())
for env_var in env:
var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
- if var == "SON_EMU_CMD_STOP":
- LOG.info("Executing stop script in %r: %r" %
- (vnfi.name, cmd))
+ if var == "SON_EMU_CMD_STOP" or var == "VIM_EMU_CMD_STOP":
+ LOG.info("Executing script in '{}': {}={}"
+ .format(vnfi.name, var, cmd))
# execute command in new thread to ensure that GK is not
# blocked by VNF
t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
t.daemon = True
t.start()
+ break # only execute one command
+
+ def _load_instance_conf_envs(self, cname):
+ """
+ Try to load an instance-specific env file. If not found,
+ just return an empty dict.
+ """
+ if PER_INSTANCE_ENV_CONFIGURATION_FOLDER is None:
+ return dict()
+ try:
+ path = os.path.expanduser(PER_INSTANCE_ENV_CONFIGURATION_FOLDER)
+ path = os.path.join(path, "{}.env.yml".format(cname))
+ res = load_yaml(path)
+ LOG.info("Loaded instance-specific env file for '{}': {}"
+ .format(cname, res))
+ return res
+ except BaseException as ex:
+ LOG.info("No instance-specific env file found for: {}"
+ .format(cname))
+ del ex
+ return dict()
def _unpack_service_package(self):
"""
raise OnBoardingException("No NSD with type 'application/vnd.5gtango.nsd' found.")
self.nsd = load_yaml(nsd_path)
GK.net.deployed_nsds.append(self.nsd) # TODO this seems strange (remove?)
- # create dict to find the vnf_name for any vnf id
- self.vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
- reduce(lambda x, y: dict(x, **y),
- map(lambda d: {d["vnf_id"]: d["vnf_name"]},
- self.nsd["network_functions"])))
LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
else:
raise OnBoardingException(
Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
:return:
"""
-
# first make a list of all the vnfds in the package
vnfd_set = dict()
if "package_content" in self.manifest:
if len(vnfd_set) < 1:
raise OnBoardingException("No VNFDs found.")
# then link each vnf_id in the nsd to its vnfd
- for vnf_id in self.vnf_id2vnf_name:
- vnf_name = self.vnf_id2vnf_name[vnf_id]
- self.vnfds[vnf_id] = vnfd_set[vnf_name]
- LOG.debug("Loaded VNFD: {0} id: {1}".format(vnf_name, vnf_id))
-
- def _load_saps(self):
- # create list of all SAPs
- # check if we need to deploy management ports
- if USE_DOCKER_MGMT:
- SAPs = [p for p in self.nsd["connection_points"]
- if 'management' not in p.get('type')]
- else:
- SAPs = [p for p in self.nsd["connection_points"]]
-
- for sap in SAPs:
- # endpoint needed in this service
- sap_id, sap_interface, sap_docker_name = parse_interface(sap['id'])
- # make sure SAP has type set (default internal)
- sap["type"] = sap.get("type", 'internal')
-
- # Each Service Access Point (connection_point) in the nsd is an IP
- # address on the host
- if sap["type"] == "external":
- # add to vnfds to calculate placement later on
- sap_net = SAP_SUBNETS.pop(0)
- self.saps[sap_docker_name] = {
- "name": sap_docker_name, "type": "external", "net": sap_net}
- # add SAP vnf to list in the NSD so it is deployed later on
- # each SAP gets a unique VNFD and vnf_id in the NSD and custom
- # type (only defined in the dummygatekeeper)
- self.nsd["network_functions"].append(
- {"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_ext"})
-
- # Each Service Access Point (connection_point) in the nsd is
- # getting its own container (default)
- elif sap["type"] == "internal" or sap["type"] == "management":
- # add SAP to self.vnfds
- if SAP_VNFD is None:
- sapfile = pkg_resources.resource_filename(
- __name__, "sap_vnfd.yml")
- else:
- sapfile = SAP_VNFD
- sap_vnfd = load_yaml(sapfile)
- sap_vnfd["connection_points"][0]["id"] = sap_interface
- sap_vnfd["name"] = sap_docker_name
- sap_vnfd["type"] = "internal"
- # add to vnfds to calculate placement later on and deploy
- self.saps[sap_docker_name] = sap_vnfd
- # add SAP vnf to list in the NSD so it is deployed later on
- # each SAP get a unique VNFD and vnf_id in the NSD
- self.nsd["network_functions"].append(
- {"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_int"})
-
- LOG.debug("Loaded SAP: name: {0}, type: {1}".format(
- sap_docker_name, sap['type']))
-
- # create sap lists
- self.saps_ext = [self.saps[sap]['name']
- for sap in self.saps if self.saps[sap]["type"] == "external"]
- self.saps_int = [self.saps[sap]['name']
- for sap in self.saps if self.saps[sap]["type"] == "internal"]
-
- def _start_sap(self, sap, instance_uuid):
- if not DEPLOY_SAP:
- return
-
- LOG.info('start SAP: {0} ,type: {1}'.format(sap['name'], sap['type']))
- if sap["type"] == "internal":
- vnfi = None
- if not GK_STANDALONE_MODE:
- vnfi = self._start_vnfd(sap, sap['name'], type='sap_int')
- self.instances[instance_uuid]["vnf_instances"].append(vnfi)
-
- elif sap["type"] == "external":
- target_dc = sap.get("dc")
- # add interface to dc switch
- target_dc.attachExternalSAP(sap['name'], sap['net'])
-
- def _connect_elines(self, eline_fwd_links, instance_uuid):
+ for v in self.nsd.get("network_functions"):
+ if v.get("vnf_name") in vnfd_set:
+ self.vnfds[v.get("vnf_id")] = vnfd_set[v.get("vnf_name")]
+ LOG.debug("Loaded VNFD: {0} id: {1}"
+ .format(v.get("vnf_name"), v.get("vnf_id")))
+
+ def _connect_elines(self, eline_fwd_links, instance_uuid, subnets):
"""
Connect all E-LINE links in the NSD
+ Attention: This method DOES NOT support multi V/CDU VNFs!
:param eline_fwd_links: list of E-LINE links in the NSD
:param: instance_uuid of the service
+ :param: subnets list of subnets to be used
:return:
"""
# cookie is used as identifier for the flowrules installed by the dummygatekeeper
cookie = 1
for link in eline_fwd_links:
LOG.info("Found E-Line: {}".format(link))
- # check if we need to deploy this link when its a management link:
- if USE_DOCKER_MGMT:
- if self.check_mgmt_interface(
- link["connection_points_reference"]):
- continue
-
- src_id, src_if_name, src_sap_id = parse_interface(
+ src_id, src_if_name = parse_interface(
link["connection_points_reference"][0])
- dst_id, dst_if_name, dst_sap_id = parse_interface(
+ dst_id, dst_if_name = parse_interface(
link["connection_points_reference"][1])
-
+ LOG.info("Searching C/VDU for E-Line: src={}, src_if={}, dst={}, dst_if={}"
+ .format(src_id, src_if_name, dst_id, dst_if_name))
+ # handle C/VDUs (ugly hack, only one V/CDU per VNF for now)
+ src_units = self._get_vnf_instance_units(instance_uuid, src_id)
+ dst_units = self._get_vnf_instance_units(instance_uuid, dst_id)
+ if src_units is None or dst_units is None:
+ LOG.info("No VNF-VNF link. Skipping: src={}, src_if={}, dst={}, dst_if={}"
+ .format(src_id, src_if_name, dst_id, dst_if_name))
+ return
+ # we only support VNFs with one V/CDU right now
+ if len(src_units) != 1 or len(dst_units) != 1:
+ raise BaseException("LLCM does not support E-LINES for multi V/CDU VNFs.")
+ # get the full name from that C/VDU and use it as src_id and dst_id
+ src_id = src_units[0].name
+ dst_id = dst_units[0].name
+ # from here we have all info we need
+ LOG.info("Creating E-Line for C/VDU: src={}, src_if={}, dst={}, dst_if={}"
+ .format(src_id, src_if_name, dst_id, dst_if_name))
+ # get involved vnfis
+ src_vnfi = src_units[0]
+ dst_vnfi = dst_units[0]
+ # proceed with chaining setup
setChaining = False
- # check if there is a SAP in the link and chain everything together
- if src_sap_id in self.saps and dst_sap_id in self.saps:
- LOG.info(
- '2 SAPs cannot be chained together : {0} - {1}'.format(src_sap_id, dst_sap_id))
- continue
-
- elif src_sap_id in self.saps_ext:
- src_id = src_sap_id
- # set intf name to None so the chaining function will choose
- # the first one
- src_if_name = None
- dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
- if dst_vnfi is not None:
- # choose first ip address in sap subnet
- sap_net = self.saps[src_sap_id]['net']
- sap_ip = "{0}/{1}".format(str(sap_net[2]),
- sap_net.prefixlen)
- self._vnf_reconfigure_network(
- dst_vnfi, dst_if_name, sap_ip)
- setChaining = True
-
- elif dst_sap_id in self.saps_ext:
- dst_id = dst_sap_id
- # set intf name to None so the chaining function will choose
- # the first one
- dst_if_name = None
- src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
- if src_vnfi is not None:
- sap_net = self.saps[dst_sap_id]['net']
- sap_ip = "{0}/{1}".format(str(sap_net[2]),
- sap_net.prefixlen)
- self._vnf_reconfigure_network(
- src_vnfi, src_if_name, sap_ip)
- setChaining = True
-
- # Link between 2 VNFs
- else:
- LOG.info("Creating E-Line: src={}, dst={}"
- .format(src_id, dst_id))
- # make sure we use the correct sap vnf name
- if src_sap_id in self.saps_int:
- src_id = src_sap_id
- if dst_sap_id in self.saps_int:
- dst_id = dst_sap_id
- # get involved vnfis
- src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
- dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
-
- if src_vnfi is not None and dst_vnfi is not None:
- setChaining = True
- # re-configure the VNFs IP assignment and ensure that a new
- # subnet is used for each E-Link
- eline_net = ELINE_SUBNETS.pop(0)
- ip1 = "{0}/{1}".format(str(eline_net[1]),
- eline_net.prefixlen)
- ip2 = "{0}/{1}".format(str(eline_net[2]),
- eline_net.prefixlen)
- # check if VNFs have fixed IPs (address field in VNFDs)
- if (self._get_vnfd_cp_from_vnfi(src_vnfi, src_if_name)
- .get("address") is None):
- self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
- # check if VNFs have fixed IPs (address field in VNFDs)
- if (self._get_vnfd_cp_from_vnfi(dst_vnfi, dst_if_name)
- .get("address") is None):
- self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
-
- # Set the chaining
+ if src_vnfi is not None and dst_vnfi is not None:
+ setChaining = True
+ # re-configure the VNFs IP assignment and ensure that a new
+ # subnet is used for each E-Link
+ eline_net = subnets.pop(0)
+ ip1 = "{0}/{1}".format(str(eline_net[1]),
+ eline_net.prefixlen)
+ ip2 = "{0}/{1}".format(str(eline_net[2]),
+ eline_net.prefixlen)
+ # check if VNFs have fixed IPs (ip/address field in VNFDs)
+ if (self._get_vnfd_cp_from_vnfi(
+ src_vnfi, src_if_name).get("ip") is None and
+ self._get_vnfd_cp_from_vnfi(
+ src_vnfi, src_if_name).get("address") is None):
+ self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
+ # check if VNFs have fixed IPs (ip field in VNFDs)
+ if (self._get_vnfd_cp_from_vnfi(
+ dst_vnfi, dst_if_name).get("ip") is None and
+ self._get_vnfd_cp_from_vnfi(
+ dst_vnfi, dst_if_name).get("address") is None):
+ self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
+ # set the chaining
if setChaining:
GK.net.setChain(
src_id, dst_id,
if cp.get("id") == ifname:
return cp
- def _connect_elans(self, elan_fwd_links, instance_uuid):
+ def _connect_elans(self, elan_fwd_links, instance_uuid, subnets):
"""
- Connect all E-LAN links in the NSD
+ Connect all E-LAN/E-Tree links in the NSD
+ This method supports multi-V/CDU VNFs if the connection
+ point names of the DUs are the same as the ones in the NSD.
:param elan_fwd_links: list of E-LAN links in the NSD
:param: instance_uuid of the service
+ :param: subnets list of subnets to be used
:return:
"""
for link in elan_fwd_links:
- # check if we need to deploy this link when its a management link:
- if USE_DOCKER_MGMT:
- if self.check_mgmt_interface(
- link["connection_points_reference"]):
- continue
-
+ # a new E-LAN/E-Tree
elan_vnf_list = []
- # check if an external SAP is in the E-LAN (then a subnet is
- # already defined)
- intfs_elan = [intf for intf in link["connection_points_reference"]]
- lan_sap = self.check_ext_saps(intfs_elan)
- if lan_sap:
- lan_net = self.saps[lan_sap]['net']
- lan_hosts = list(lan_net.hosts())
- else:
- lan_net = ELAN_SUBNETS.pop(0)
- lan_hosts = list(lan_net.hosts())
-
- # generate lan ip address for all interfaces except external SAPs
- for intf in link["connection_points_reference"]:
-
- # skip external SAPs, they already have an ip
- vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
- intf)
- if vnf_sap_docker_name in self.saps_ext:
- elan_vnf_list.append(
- {'name': vnf_sap_docker_name, 'interface': vnf_interface})
- continue
-
- ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)),
- lan_net.prefixlen)
- vnf_id, intf_name, vnf_sap_id = parse_interface(intf)
-
- # make sure we use the correct sap vnf name
- src_docker_name = vnf_id
- if vnf_sap_id in self.saps_int:
- src_docker_name = vnf_sap_id
- vnf_id = vnf_sap_id
-
- LOG.debug(
- "Setting up E-LAN interface. (%s:%s) -> %s" % (
- vnf_id, intf_name, ip_address))
-
- # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
- # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
- # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
- vnfi = self._get_vnf_instance(instance_uuid, vnf_id)
- if vnfi is not None:
- self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
- # add this vnf and interface to the E-LAN for tagging
- elan_vnf_list.append(
- {'name': src_docker_name, 'interface': intf_name})
-
+ lan_net = subnets.pop(0)
+ lan_hosts = list(lan_net.hosts())
+
+ # generate lan ip address for all interfaces (of all involved (V/CDUs))
+ for intf_ref in link["connection_points_reference"]:
+ vnf_id, intf_name = parse_interface(intf_ref)
+ if vnf_id is None:
+ continue # skip references to NS connection points
+ units = self._get_vnf_instance_units(instance_uuid, vnf_id)
+ if units is None:
+ continue # skip if no deployment unit is present
+ # iterate over all involved deployment units
+ for uvnfi in units:
+ # Attention: we apply a simplification for multi DU VNFs here:
+ # the connection points of all involved DUs have to have the same
+ # name as the connection points of the surrounding VNF to be mapped.
+ # This is because we do not consider links specified in the VNFDs
+ container_name = uvnfi.name
+
+ ip_address = None
+ # get the interface of the unit
+ intf = self._get_vnfd_cp_from_vnfi(uvnfi, intf_name)
+ # check if there is a manually assigned address
+ if intf is not None:
+ if intf.get("address"):
+ ip_address = intf.get("address")
+ if ip_address is None:
+ # automatically asign an IP from our pool
+ ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)),
+ lan_net.prefixlen)
+ LOG.debug(
+ "Setting up E-LAN/E-Tree interface. (%s:%s) -> %s" % (
+ container_name, intf_name, ip_address))
+ # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
+ # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
+ # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is
+ # necessary.
+ vnfi = self._get_vnf_instance(instance_uuid, container_name)
+ if vnfi is not None:
+ self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
+ # add this vnf and interface to the E-LAN for tagging
+ elan_vnf_list.append(
+ {'name': container_name, 'interface': intf_name})
# install the VLAN tags for this E-LAN
GK.net.setLAN(elan_vnf_list)
Get all paths to Dockerfiles from VNFDs and store them in dict.
:return:
"""
- for k, v in self.vnfds.iteritems():
- for vu in v.get("virtual_deployment_units"):
+ for vnf_id, v in self.vnfds.iteritems():
+ for vu in v.get("virtual_deployment_units", []):
+ vnf_container_name = get_container_name(vnf_id, vu.get("id"))
if vu.get("vm_image_format") == "docker":
vm_image = vu.get("vm_image")
docker_path = os.path.join(
self.package_content_path,
make_relative_path(vm_image))
- self.local_docker_files[k] = docker_path
- LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
+ self.local_docker_files[vnf_container_name] = docker_path
+ LOG.debug("Found Dockerfile (%r): %r" % (vnf_container_name, docker_path))
+ for cu in v.get("cloudnative_deployment_units", []):
+ vnf_container_name = get_container_name(vnf_id, cu.get("id"))
+ image = cu.get("image")
+ docker_path = os.path.join(
+ self.package_content_path,
+ make_relative_path(image))
+ self.local_docker_files[vnf_container_name] = docker_path
+ LOG.debug("Found Dockerfile (%r): %r" % (vnf_container_name, docker_path))
def _load_docker_urls(self):
"""
Get all URLs to pre-build docker images in some repo.
:return:
"""
- # also merge sap dicts, because internal saps also need a docker
- # container
- all_vnfs = self.vnfds.copy()
- all_vnfs.update(self.saps)
-
- for k, v in all_vnfs.iteritems():
- for vu in v.get("virtual_deployment_units", {}):
+ for vnf_id, v in self.vnfds.iteritems():
+ for vu in v.get("virtual_deployment_units", []):
+ vnf_container_name = get_container_name(vnf_id, vu.get("id"))
if vu.get("vm_image_format") == "docker":
url = vu.get("vm_image")
if url is not None:
url = url.replace("http://", "")
- self.remote_docker_image_urls[k] = url
+ self.remote_docker_image_urls[vnf_container_name] = url
LOG.debug("Found Docker image URL (%r): %r" %
- (k, self.remote_docker_image_urls[k]))
+ (vnf_container_name,
+ self.remote_docker_image_urls[vnf_container_name]))
+ for cu in v.get("cloudnative_deployment_units", []):
+ vnf_container_name = get_container_name(vnf_id, cu.get("id"))
+ url = cu.get("image")
+ if url is not None:
+ url = url.replace("http://", "")
+ self.remote_docker_image_urls[vnf_container_name] = url
+ LOG.debug("Found Docker image URL (%r): %r" %
+ (vnf_container_name,
+ self.remote_docker_image_urls[vnf_container_name]))
def _build_images_from_dockerfiles(self):
"""
"""
return len(DockerClient().images.list(name=image_name)) > 0
- def _calculate_placement(self, algorithm):
+ def _place(self, vnfd, vnfid, vdu, ssiid):
"""
- Do placement by adding the a field "dc" to
- each VNFD that points to one of our
- data center objects known to the gatekeeper.
+ Do placement. Return the name of the DC to place
+ the given VDU.
"""
assert(len(self.vnfds) > 0)
assert(len(GK.dcs) > 0)
- # instantiate algorithm an place
- p = algorithm()
- p.place(self.nsd, self.vnfds, self.saps, GK.dcs)
- LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
- # lets print the placement result
- for name, vnfd in self.vnfds.iteritems():
- LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
- for sap in self.saps:
- sap_dict = self.saps[sap]
- LOG.info("Placed SAP %r on DC %r" % (sap, str(sap_dict.get("dc"))))
+ if PLACEMENT_ALGORITHM_OBJ is None:
+ LOG.error("No placement algorithm given. Using FirstDcPlacement!")
+ p = FirstDcPlacement()
+ else:
+ p = PLACEMENT_ALGORITHM_OBJ
+ cname = get_container_name(vnfid, vdu.get("id"), ssiid)
+ rdc = p.place(GK.dcs, vnfd, vnfid, vdu, ssiid, cname)
+ LOG.info("Placement: '{}' --> '{}'".format(cname, rdc))
+ return rdc
def _calculate_cpu_cfs_values(self, cpu_time_percentage):
"""
(cpu_period, cpu_quota))
return int(cpu_period), int(cpu_quota)
- def check_ext_saps(self, intf_list):
- # check if the list of interfacs contains an external SAP
- saps_ext = [self.saps[sap]['name']
- for sap in self.saps if self.saps[sap]["type"] == "external"]
- for intf_name in intf_list:
- vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
- intf_name)
- if vnf_sap_docker_name in saps_ext:
- return vnf_sap_docker_name
-
- def check_mgmt_interface(self, intf_list):
- SAPs_mgmt = [p.get('id') for p in self.nsd["connection_points"]
- if 'management' in p.get('type')]
- for intf_name in intf_list:
- if intf_name in SAPs_mgmt:
- return True
-
"""
Some (simple) placement algorithms
Placement: Always use one and the same data center from the GK.dcs dict.
"""
- def place(self, nsd, vnfds, saps, dcs):
- for id, vnfd in vnfds.iteritems():
- vnfd["dc"] = list(dcs.itervalues())[0]
+ def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
+ return list(dcs.itervalues())[0]
class RoundRobinDcPlacement(object):
Placement: Distribute VNFs across all available DCs in a round robin fashion.
"""
- def place(self, nsd, vnfds, saps, dcs):
- c = 0
+ def __init__(self):
+ self.count = 0
+
+ def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
dcs_list = list(dcs.itervalues())
- for id, vnfd in vnfds.iteritems():
- vnfd["dc"] = dcs_list[c % len(dcs_list)]
- c += 1 # inc. c to use next DC
+ rdc = dcs_list[self.count % len(dcs_list)]
+ self.count += 1 # inc. count to use next DC
+ return rdc
-class RoundRobinDcPlacementWithSAPs(object):
+class StaticConfigPlacement(object):
"""
- Placement: Distribute VNFs across all available DCs in a round robin fashion,
- every SAP is instantiated on the same DC as the connected VNF.
+ Placement: Fixed assignment based on config file.
"""
- def place(self, nsd, vnfds, saps, dcs):
-
- # place vnfs
- c = 0
- dcs_list = list(dcs.itervalues())
- for id, vnfd in vnfds.iteritems():
- vnfd["dc"] = dcs_list[c % len(dcs_list)]
- c += 1 # inc. c to use next DC
-
- # place SAPs
- vlinks = nsd.get("virtual_links", [])
- eline_fwd_links = [l for l in vlinks if (
- l["connectivity_type"] == "E-Line")]
- elan_fwd_links = [l for l in vlinks if (
- l["connectivity_type"] == "E-LAN")]
-
- # SAPs on E-Line links are placed on the same DC as the VNF on the
- # E-Line
- for link in eline_fwd_links:
- src_id, src_if_name, src_sap_id = parse_interface(
- link["connection_points_reference"][0])
- dst_id, dst_if_name, dst_sap_id = parse_interface(
- link["connection_points_reference"][1])
-
- # check if there is a SAP in the link
- if src_sap_id in saps:
- # get dc where connected vnf is mapped to
- dc = vnfds[dst_id]['dc']
- saps[src_sap_id]['dc'] = dc
-
- if dst_sap_id in saps:
- # get dc where connected vnf is mapped to
- dc = vnfds[src_id]['dc']
- saps[dst_sap_id]['dc'] = dc
-
- # SAPs on E-LANs are placed on a random DC
- dcs_list = list(dcs.itervalues())
- dc_len = len(dcs_list)
- for link in elan_fwd_links:
- for intf in link["connection_points_reference"]:
- # find SAP interfaces
- intf_id, intf_name, intf_sap_id = parse_interface(intf)
- if intf_sap_id in saps:
- dc = dcs_list[randint(0, dc_len - 1)]
- saps[intf_sap_id]['dc'] = dc
+ def __init__(self, path=None):
+ if path is None:
+ path = "static_placement.yml"
+ path = os.path.expanduser(path)
+ self.static_placement = dict()
+ try:
+ self.static_placement = load_yaml(path)
+ except BaseException as ex:
+ LOG.error(ex)
+ LOG.error("Couldn't load placement from {}"
+ .format(path))
+ LOG.info("Loaded static placement: {}"
+ .format(self.static_placement))
+
+ def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
+ # check for container name entry
+ if cname not in self.static_placement:
+ LOG.error("Coudn't find {} in placement".format(cname))
+ LOG.error("Using first DC as fallback!")
+ return list(dcs.itervalues())[0]
+ # lookup
+ candidate_dc = self.static_placement.get(cname)
+ # check if DC exsits
+ if candidate_dc not in dcs:
+ LOG.error("Coudn't find DC {}".format(candidate_dc))
+ LOG.error("Using first DC as fallback!")
+ return list(dcs.itervalues())[0]
+ # return correct DC
+ return dcs.get(candidate_dc)
"""
def get(self):
"""
- Return a list of UUID's of uploaded service packages.
- :return: dict/list
+ Return a list of package descriptor headers.
+ Fakes the behavior of 5GTANGO's GK API to be
+ compatible with tng-cli.
+ :return: list
"""
LOG.info("GET /packages")
- return {"service_uuid_list": list(GK.services.iterkeys())}
+ result = list()
+ for suuid, sobj in GK.services.iteritems():
+ pkg = dict()
+ pkg["pd"] = dict()
+ pkg["uuid"] = suuid
+ pkg["pd"]["name"] = sobj.manifest.get("name")
+ pkg["pd"]["version"] = sobj.manifest.get("version")
+ pkg["created_at"] = sobj.created_at
+ result.append(pkg)
+ return result, 200, CORS_HEADER
+
+
+class Services(fr.Resource):
+
+ def get(self):
+ """
+ Return a list of services.
+ Fakes the behavior of 5GTANGO's GK API to be
+ compatible with tng-cli.
+ :return: list
+ """
+ LOG.info("GET /services")
+ result = list()
+ for suuid, sobj in GK.services.iteritems():
+ service = dict()
+ service["nsd"] = dict()
+ service["uuid"] = suuid
+ service["nsd"]["name"] = sobj.nsd.get("name")
+ service["nsd"]["version"] = sobj.nsd.get("version")
+ service["created_at"] = sobj.created_at
+ result.append(service)
+ return result, 200, CORS_HEADER
class Instantiations(fr.Resource):
# try to extract the service uuid from the request
json_data = request.get_json(force=True)
service_uuid = json_data.get("service_uuid")
-
+ service_name = json_data.get("service_name")
+ if service_name is None:
+ # lets be fuzzy
+ service_name = service_uuid
+ # first try to find by service_name
+ if service_name is not None:
+ for s_uuid, s in GK.services.iteritems():
+ if s.manifest.get("name") == service_name:
+ LOG.info("Searched for: {}. Found service w. UUID: {}"
+ .format(service_name, s_uuid))
+ service_uuid = s_uuid
# lets be a bit fuzzy here to make testing easier
if (service_uuid is None or service_uuid ==
"latest") and len(GK.services) > 0:
# ok, we have a service uuid, lets start the service
service_instance_uuid = GK.services.get(
service_uuid).start_service()
- return {"service_instance_uuid": service_instance_uuid}, 201
+ # multiple ID fields to be compatible with tng-bench and tng-cli
+ return ({"service_instance_uuid": service_instance_uuid,
+ "id": service_instance_uuid}, 201)
+ LOG.error("Service not found: {}/{}".format(service_uuid, service_name))
return "Service not found", 404
def get(self):
Returns a list of UUIDs containing all running services.
:return: dict / list
"""
- LOG.info("GET /instantiations")
- return {"service_instantiations_list": [
- list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
+ LOG.debug("GET /instantiations or /api/v3/records/services")
+ # return {"service_instantiations_list": [
+ # list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
+ result = list()
+ for suuid, sobj in GK.services.iteritems():
+ for iuuid, iobj in sobj.instances.iteritems():
+ inst = dict()
+ inst["uuid"] = iobj.get("uuid")
+ inst["instance_name"] = "{}-inst.{}".format(
+ iobj.get("name"), iobj.get("ssiid"))
+ inst["status"] = "running"
+ inst["created_at"] = iobj.get("created_at")
+ result.append(inst)
+ return result, 200, CORS_HEADER
def delete(self):
"""
"""
# try to extract the service and instance UUID from the request
json_data = request.get_json(force=True)
- service_uuid = json_data.get("service_uuid")
- instance_uuid = json_data.get("service_instance_uuid")
-
+ service_uuid_input = json_data.get("service_uuid")
+ instance_uuid_input = json_data.get("service_instance_uuid")
+ if len(GK.services) < 1:
+ return "No service on-boarded.", 404
# try to be fuzzy
- if service_uuid is None and len(GK.services) > 0:
- # if we don't get a service uuid, we simply stop the last service
- # in the list
- service_uuid = list(GK.services.iterkeys())[0]
- if instance_uuid is None and len(
- GK.services[service_uuid].instances) > 0:
- instance_uuid = list(
- GK.services[service_uuid].instances.iterkeys())[0]
-
- if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
- # valid service and instance UUID, stop service
- GK.services.get(service_uuid).stop_service(instance_uuid)
- return "service instance with uuid %r stopped." % instance_uuid, 200
- return "Service not found", 404
+ if service_uuid_input is None:
+ # if we don't get a service uuid we stop all services
+ service_uuid_list = list(GK.services.iterkeys())
+ LOG.info("No service_uuid given, stopping all.")
+ else:
+ service_uuid_list = [service_uuid_input]
+ # for each service
+ for service_uuid in service_uuid_list:
+ if instance_uuid_input is None:
+ instance_uuid_list = list(
+ GK.services[service_uuid].instances.iterkeys())
+ else:
+ instance_uuid_list = [instance_uuid_input]
+ # for all service instances
+ for instance_uuid in instance_uuid_list:
+ if (service_uuid in GK.services and
+ instance_uuid in GK.services[service_uuid].instances):
+ # valid service and instance UUID, stop service
+ GK.services.get(service_uuid).stop_service(instance_uuid)
+ LOG.info("Service instance with uuid %r stopped." % instance_uuid)
+ return "Service(s) stopped.", 200
class Exit(fr.Resource):
list(GK.dcs.values())[0].net.stop()
+def generate_subnets(prefix, base, subnet_size=50, mask=24):
+ # Generate a list of ipaddress in subnets
+ r = list()
+ for net in range(base, base + subnet_size):
+ subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
+ r.append(ipaddress.ip_network(unicode(subnet)))
+ return r
+
+
+def reset_subnets():
+ global ELINE_SUBNETS
+ global ELAN_SUBNETS
+ # private subnet definitions for the generated interfaces
+ # 30.0.xxx.0/24
+ ELAN_SUBNETS = generate_subnets('30.0', 0, subnet_size=50, mask=24)
+ # 20.0.xxx.0/24
+ ELINE_SUBNETS = generate_subnets('20.0', 0, subnet_size=50, mask=24)
+
+
def initialize_GK():
global GK
GK = Gatekeeper()
GK = None
initialize_GK()
# setup Flask
+http_server = None
app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
api = fr.Api(app)
# define endpoints
-api.add_resource(Packages, '/packages', '/api/v2/packages')
+api.add_resource(Packages, '/packages', '/api/v2/packages', '/api/v3/packages')
+api.add_resource(Services, '/services', '/api/v2/services', '/api/v3/services')
api.add_resource(Instantiations, '/instantiations',
- '/api/v2/instantiations', '/api/v2/requests')
+ '/api/v2/instantiations', '/api/v2/requests', '/api/v3/requests',
+ '/api/v3/records/services')
api.add_resource(Exit, '/emulator/exit')
def start_rest_api(host, port, datacenters=dict()):
+ global http_server
GK.dcs = datacenters
GK.net = get_dc_network()
# start the Flask server (not the best performance but ok for our use case)
- app.run(host=host,
- port=port,
- debug=True,
- use_reloader=False # this is needed to run Flask in a non-main thread
- )
+ # app.run(host=host,
+ # port=port,
+ # debug=True,
+ # use_reloader=False # this is needed to run Flask in a non-main thread
+ # )
+ http_server = WSGIServer((host, port), app, log=open("/dev/null", "w"))
+ http_server.serve_forever()
+
+
+def stop_rest_api():
+ if http_server:
+ http_server.close()
def ensure_dir(name):
:param interface_name:
:return:
"""
-
if ':' in interface_name:
vnf_id, vnf_interface = interface_name.split(':')
- vnf_sap_docker_name = interface_name.replace(':', '_')
else:
- vnf_id = interface_name
+ vnf_id = None
vnf_interface = interface_name
- vnf_sap_docker_name = interface_name
+ return vnf_id, vnf_interface
- return vnf_id, vnf_interface, vnf_sap_docker_name
+def get_container_name(vnf_id, vdu_id, ssiid=None):
+ if ssiid is not None:
+ return "{}.{}.{}".format(vnf_id, vdu_id, ssiid)
+ return "{}.{}".format(vnf_id, vdu_id)
-def reset_subnets():
- # private subnet definitions for the generated interfaces
- # 10.10.xxx.0/24
- global SAP_SUBNETS
- SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=30)
- # 10.20.xxx.0/30
- global ELAN_SUBNETS
- ELAN_SUBNETS = generate_subnets('10.20', 0, subnet_size=50, mask=24)
- # 10.30.xxx.0/30
- global ELINE_SUBNETS
- ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
+
+def get_triple_id(descr):
+ return "{}.{}.{}".format(
+ descr.get("vendor"), descr.get("name"), descr.get("version"))
+
+
+def update_port_mapping_multi_instance(ssiid, port_bindings):
+ """
+ Port_bindings are used to expose ports of the deployed containers.
+ They would collide if we deploy multiple service instances.
+ This function adds a offset to them which is based on the
+ short service instance id (SSIID).
+ MULTI_INSTANCE_PORT_OFFSET
+ """
+ def _offset(p):
+ return p + MULTI_INSTANCE_PORT_OFFSET * ssiid
+
+ port_bindings = {k: _offset(v) for k, v in port_bindings.iteritems()}
+ return port_bindings
if __name__ == '__main__':