LOG.setLevel(logging.INFO)
+CORS_HEADER = {'Access-Control-Allow-Origin': '*',
+ 'Access-Control-Allow-Methods': 'GET,OPTIONS'}
+
+
GK_STORAGE = "/tmp/vim-emu-tango-llcm/"
UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
# offset for this: NEW_PORT (SSIID * OFFSET) + ORIGINAL_PORT
MULTI_INSTANCE_PORT_OFFSET = 1000
+# Selected Placement Algorithm: Points to the class of the selected
+# placement algorithm.
+PLACEMENT_ALGORITHM_OBJ = None
+
+# Path to folder with <container_name>.env.yml files that contain
+# environment variables injected into the specific container
+# when it is started.
+PER_INSTANCE_ENV_CONFIGURATION_FOLDER = None
+
class OnBoardingException(BaseException):
pass
# increase for next instance
self._instance_counter += 1
- # 2. compute placement of this service instance (adds DC names to
- # VNFDs)
- # self._calculate_placement(FirstDcPlacement)
- self._calculate_placement(RoundRobinDcPlacement)
# 3. start all vnfds that we have in the service
for vnf_id in self.vnfds:
vnfd = self.vnfds[vnf_id]
raise Exception("No image name for %r found. Abort." % vnf_container_name)
docker_image_name = self.remote_docker_image_urls.get(vnf_container_name)
# 2. select datacenter to start the VNF in
- target_dc = vnfd.get("dc")
+ target_dc = self._place(vnfd, vnf_id, u, ssiid)
# 3. perform some checks to ensure we can start the container
assert(docker_image_name is not None)
assert(target_dc is not None)
# do some re-naming of fields to be compatible to containernet
for i in intfs:
if i.get("address"):
+ LOG.info("Found static address for {}: {}"
+ .format(i.get("id"), i.get("address")))
i["ip"] = i.get("address")
# get ports and port_bindings from the port and publish fields of CNFD
ports = list() # Containernet naming
port_bindings = dict()
for i in intfs:
- if i.get("port"):
+ if i.get("port"): # field with a single port
if not isinstance(i.get("port"), int):
LOG.info("Field 'port' is no int CP: {}".format(i))
else:
- ports.append(i.get("port"))
+ ports.append(i.get("port")) # collect all ports
+ if i.get("ports"): # list with multiple ports
+ if not isinstance(i.get("ports"), list):
+ LOG.info("Field 'port' is no list CP: {}".format(i))
+ else:
+ for p in i.get("ports"):
+ if not isinstance(p, int):
+ # do some parsing
+ try:
+ if "/udp" in p:
+ p = tuple(p.split("/"))
+ else:
+ p = int(p)
+ ports.append(p) # collect all ports
+ except BaseException as ex:
+ LOG.error(
+ "Could not parse ports list: {}".format(p))
+ LOG.error(ex)
+ else:
+ ports.append(p) # collect all ports
if i.get("publish"):
if not isinstance(i.get("publish"), dict):
LOG.info("Field 'publish' is no dict CP: {}".format(i))
" Overwriting SON_EMU_CMD_STOP.")
cenv["SON_EMU_CMD_STOP"] = VNFD_CMD_STOP
+ # 5.2 inject per instance configurations based on envs
+ conf_envs = self._load_instance_conf_envs(vnf_container_instance_name)
+ cenv.update(conf_envs)
+
+ # 5.3 handle optional ipc_mode setting
+ ipc_mode = u.get("ipc_mode", None)
+ # 5.4 handle optional devices setting
+ devices = u.get("devices", [])
+ # 5.5 handle optional cap_add setting
+ cap_add = u.get("cap_add", [])
+
# 6. Start the container
LOG.info("Starting %r as %r in DC %r" %
- (vnf_name, vnf_container_instance_name, vnfd.get("dc")))
+ (vnf_name, vnf_container_instance_name, target_dc))
LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
# start the container
vnfi = target_dc.startCompute(
port_bindings=port_bindings,
# only publish if explicitly stated in descriptor
publish_all_ports=False,
+ ipc_mode=ipc_mode,
+ devices=devices,
+ cap_add=cap_add,
type=kwargs.get('type', 'docker'))
# add vnfd reference to vnfi
vnfi.vnfd = vnfd
t.start()
break # only execute one command
+ def _load_instance_conf_envs(self, cname):
+ """
+ Try to load an instance-specific env file. If not found,
+ just return an empty dict.
+ """
+ if PER_INSTANCE_ENV_CONFIGURATION_FOLDER is None:
+ return dict()
+ try:
+ path = os.path.expanduser(PER_INSTANCE_ENV_CONFIGURATION_FOLDER)
+ path = os.path.join(path, "{}.env.yml".format(cname))
+ res = load_yaml(path)
+ LOG.info("Loaded instance-specific env file for '{}': {}"
+ .format(cname, res))
+ return res
+ except BaseException as ex:
+ LOG.info("No instance-specific env file found for: {}"
+ .format(cname))
+ del ex
+ return dict()
+
def _unpack_service_package(self):
"""
unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
lan_hosts = list(lan_net.hosts())
# generate lan ip address for all interfaces (of all involved (V/CDUs))
- for intf in link["connection_points_reference"]:
- vnf_id, intf_name = parse_interface(intf)
+ for intf_ref in link["connection_points_reference"]:
+ vnf_id, intf_name = parse_interface(intf_ref)
if vnf_id is None:
continue # skip references to NS connection points
units = self._get_vnf_instance_units(instance_uuid, vnf_id)
# Attention: we apply a simplification for multi DU VNFs here:
# the connection points of all involved DUs have to have the same
# name as the connection points of the surrounding VNF to be mapped.
- # This is because we do not consider links specified in the VNFds
+ # This is because we do not consider links specified in the VNFDs
container_name = uvnfi.name
- ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)),
- lan_net.prefixlen)
+
+ ip_address = None
+ # get the interface of the unit
+ intf = self._get_vnfd_cp_from_vnfi(uvnfi, intf_name)
+ # check if there is a manually assigned address
+ if intf is not None:
+ if intf.get("address"):
+ ip_address = intf.get("address")
+ if ip_address is None:
+ # automatically asign an IP from our pool
+ ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)),
+ lan_net.prefixlen)
LOG.debug(
"Setting up E-LAN/E-Tree interface. (%s:%s) -> %s" % (
container_name, intf_name, ip_address))
Get all URLs to pre-build docker images in some repo.
:return:
"""
- for vnf_id, v in self.vnfds.iteritems():
+ for vnf_id, v in list(self.vnfds.items()):
for vu in v.get("virtual_deployment_units", []):
vnf_container_name = get_container_name(vnf_id, vu.get("id"))
if vu.get("vm_image_format") == "docker":
dc = DockerClient()
LOG.info("Building %d Docker images (this may take several minutes) ..." % len(
self.local_docker_files))
- for k, v in self.local_docker_files.iteritems():
+ for k, v in list(self.local_docker_files.items()):
for line in dc.build(path=v.replace(
"Dockerfile", ""), tag=k, rm=False, nocache=False):
LOG.debug("DOCKER BUILD: %s" % line)
If the package contains URLs to pre-build Docker images, we download them with this method.
"""
dc = DockerClient()
- for url in self.remote_docker_image_urls.itervalues():
+ for url in list(self.remote_docker_image_urls.values()):
# only pull if not present (speedup for development)
if not FORCE_PULL:
if len(dc.images.list(name=url)) > 0:
"""
return len(DockerClient().images.list(name=image_name)) > 0
- def _calculate_placement(self, algorithm):
+ def _place(self, vnfd, vnfid, vdu, ssiid):
"""
- Do placement by adding the a field "dc" to
- each VNFD that points to one of our
- data center objects known to the gatekeeper.
+ Do placement. Return the name of the DC to place
+ the given VDU.
"""
assert(len(self.vnfds) > 0)
assert(len(GK.dcs) > 0)
- # instantiate algorithm an place
- p = algorithm()
- p.place(self.nsd, self.vnfds, GK.dcs)
- LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
- # lets print the placement result
- for name, vnfd in self.vnfds.iteritems():
- LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
+ if PLACEMENT_ALGORITHM_OBJ is None:
+ LOG.error("No placement algorithm given. Using FirstDcPlacement!")
+ p = FirstDcPlacement()
+ else:
+ p = PLACEMENT_ALGORITHM_OBJ
+ cname = get_container_name(vnfid, vdu.get("id"), ssiid)
+ rdc = p.place(GK.dcs, vnfd, vnfid, vdu, ssiid, cname)
+ LOG.info("Placement: '{}' --> '{}'".format(cname, rdc))
+ return rdc
def _calculate_cpu_cfs_values(self, cpu_time_percentage):
"""
Placement: Always use one and the same data center from the GK.dcs dict.
"""
- def place(self, nsd, vnfds, dcs):
- for id, vnfd in vnfds.iteritems():
- vnfd["dc"] = list(dcs.itervalues())[0]
+ def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
+ return list(dcs.values())[0]
class RoundRobinDcPlacement(object):
Placement: Distribute VNFs across all available DCs in a round robin fashion.
"""
- def place(self, nsd, vnfds, dcs):
- c = 0
- dcs_list = list(dcs.itervalues())
- for id, vnfd in vnfds.iteritems():
- vnfd["dc"] = dcs_list[c % len(dcs_list)]
- c += 1 # inc. c to use next DC
+ def __init__(self):
+ self.count = 0
+
+ def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
+ dcs_list = list(dcs.values())
+ rdc = dcs_list[self.count % len(dcs_list)]
+ self.count += 1 # inc. count to use next DC
+ return rdc
+
+
+class StaticConfigPlacement(object):
+ """
+ Placement: Fixed assignment based on config file.
+ """
+
+ def __init__(self, path=None):
+ if path is None:
+ path = "static_placement.yml"
+ path = os.path.expanduser(path)
+ self.static_placement = dict()
+ try:
+ self.static_placement = load_yaml(path)
+ except BaseException as ex:
+ LOG.error(ex)
+ LOG.error("Couldn't load placement from {}"
+ .format(path))
+ LOG.info("Loaded static placement: {}"
+ .format(self.static_placement))
+
+ def place(self, dcs, vnfd, vnfid, vdu, ssiid, cname):
+ # check for container name entry
+ if cname not in self.static_placement:
+ LOG.error("Coudn't find {} in placement".format(cname))
+ LOG.error("Using first DC as fallback!")
+ return list(dcs.values())[0]
+ # lookup
+ candidate_dc = self.static_placement.get(cname)
+ # check if DC exsits
+ if candidate_dc not in dcs:
+ LOG.error("Coudn't find DC {}".format(candidate_dc))
+ LOG.error("Using first DC as fallback!")
+ return list(dcs.values())[0]
+ # return correct DC
+ return dcs.get(candidate_dc)
"""
"error": "upload failed. file not found."}, 500
# generate a uuid to reference this package
service_uuid = str(uuid.uuid4())
- file_hash = hashlib.sha1(str(son_file)).hexdigest()
+ file_hash = str(son_file)
+ file_hash = hashlib.sha1(file_hash.encode())
+ file_hash = file_hash.hexdigest()
# ensure that upload folder exists
ensure_dir(UPLOAD_FOLDER)
- upload_path = os.path.join(UPLOAD_FOLDER, "%s.tgo" % service_uuid)
+ upload_path = os.path.\
+ join(UPLOAD_FOLDER, "%s.tgo" % service_uuid)
# store *.son file to disk
if is_file_object:
son_file.save(upload_path)
"""
LOG.info("GET /packages")
result = list()
- for suuid, sobj in GK.services.iteritems():
+ for suuid, sobj in GK.services.items():
pkg = dict()
pkg["pd"] = dict()
pkg["uuid"] = suuid
pkg["pd"]["version"] = sobj.manifest.get("version")
pkg["created_at"] = sobj.created_at
result.append(pkg)
- return result, 200
+ return result, 200, CORS_HEADER
class Services(fr.Resource):
"""
LOG.info("GET /services")
result = list()
- for suuid, sobj in GK.services.iteritems():
+ for suuid, sobj in GK.services.items():
service = dict()
service["nsd"] = dict()
service["uuid"] = suuid
service["nsd"]["version"] = sobj.nsd.get("version")
service["created_at"] = sobj.created_at
result.append(service)
- return result, 200
+ return result, 200, CORS_HEADER
class Instantiations(fr.Resource):
service_name = service_uuid
# first try to find by service_name
if service_name is not None:
- for s_uuid, s in GK.services.iteritems():
+ for s_uuid, s in GK.services.items():
if s.manifest.get("name") == service_name:
LOG.info("Searched for: {}. Found service w. UUID: {}"
.format(service_name, s_uuid))
"latest") and len(GK.services) > 0:
# if we don't get a service uuid, we simple start the first service
# in the list
- service_uuid = list(GK.services.iterkeys())[0]
+ service_uuid = list(GK.services.keys())[0]
if service_uuid in GK.services:
# ok, we have a service uuid, lets start the service
service_instance_uuid = GK.services.get(
Returns a list of UUIDs containing all running services.
:return: dict / list
"""
- LOG.info("GET /instantiations or /api/v3/records/services")
+ LOG.debug("GET /instantiations or /api/v3/records/services")
# return {"service_instantiations_list": [
- # list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
+ # list(s.instances.keys()) for s in GK.services.values()]}
result = list()
- for suuid, sobj in GK.services.iteritems():
- for iuuid, iobj in sobj.instances.iteritems():
+ for suuid, sobj in GK.services.items():
+ for iuuid, iobj in sobj.instances.items():
inst = dict()
inst["uuid"] = iobj.get("uuid")
inst["instance_name"] = "{}-inst.{}".format(
inst["status"] = "running"
inst["created_at"] = iobj.get("created_at")
result.append(inst)
- return result, 200
+ return result, 200, CORS_HEADER
def delete(self):
"""
# try to be fuzzy
if service_uuid_input is None:
# if we don't get a service uuid we stop all services
- service_uuid_list = list(GK.services.iterkeys())
+ service_uuid_list = list(GK.services.keys())
LOG.info("No service_uuid given, stopping all.")
else:
service_uuid_list = [service_uuid_input]
for service_uuid in service_uuid_list:
if instance_uuid_input is None:
instance_uuid_list = list(
- GK.services[service_uuid].instances.iterkeys())
+ GK.services[service_uuid].instances.keys())
else:
instance_uuid_list = [instance_uuid_input]
# for all service instances
r = list()
for net in range(base, base + subnet_size):
subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
- r.append(ipaddress.ip_network(unicode(subnet)))
+ r.append(ipaddress.ip_network(subnet))
return r
:return:
"""
assert (len(GK.dcs) > 0)
- return GK.dcs.values()[0].net
+ return list(GK.dcs.values())[0].net
def parse_interface(interface_name):
def _offset(p):
return p + MULTI_INSTANCE_PORT_OFFSET * ssiid
- port_bindings = {k: _offset(v) for k, v in port_bindings.iteritems()}
+ port_bindings = {k: _offset(v) for k, v in port_bindings.items()}
return port_bindings