See the License for the specific language governing permissions and
limitations under the License.
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
+Neither the name of the SONATA-NFV, Paderborn University
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
from subprocess import Popen
from random import randint
import ipaddress
+import copy
+import time
logging.basicConfig()
LOG = logging.getLogger("sonata-dummy-gatekeeper")
BIDIRECTIONAL_CHAIN = False
# override the management interfaces in the descriptors with default docker0 interfaces in the containers
-USE_DOCKER_MGMT = True
+USE_DOCKER_MGMT = False
+
+# automatically deploy uploaded packages (no need to execute son-access deploy --latest separately)
+AUTO_DEPLOY = False
+
+# and also automatically terminate any other running services
+AUTO_DELETE = False
def generate_subnets(prefix, base, subnet_size=50, mask=24):
# Generate a list of ipaddress in subnets
return r
# private subnet definitions for the generated interfaces
# 10.10.xxx.0/24
-SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=24)
-# 10.20.xxx.0/24
+SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=30)
+# 10.20.xxx.0/30
ELAN_SUBNETS = generate_subnets('10.20', 0, subnet_size=50, mask=24)
# 10.30.xxx.0/30
ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
+# path to the VNFD for the SAP VNF that is deployed as internal SAP point
+SAP_VNFD=None
+
+# Time in seconds to wait for vnf stop scripts to execute fully
+VNF_STOP_WAIT_TIME = 5
class Gatekeeper(object):
self.local_docker_files = dict()
self.remote_docker_image_urls = dict()
self.instances = dict()
- self.vnf_name2docker_name = dict()
+ # dict to find the vnf_name for any vnf id
self.vnf_id2vnf_name = dict()
def onboard(self):
self._load_vnfd()
if DEPLOY_SAP:
self._load_saps()
- # create dict to translate vnf names
- self.vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
- reduce(lambda x, y: dict(x, **y),
- map(lambda d: {d["vnf_id"]: d["vnf_name"]},
- self.nsd["network_functions"])))
# 3. prepare container images (e.g. download or build Dockerfile)
if BUILD_DOCKERFILE:
self._load_docker_files()
if not GK_STANDALONE_MODE:
#self._calculate_placement(FirstDcPlacement)
self._calculate_placement(RoundRobinDcPlacementWithSAPs)
-
# 3. start all vnfds that we have in the service (except SAPs)
- for vnfd in self.vnfds.itervalues():
+ for vnf_id in self.vnfds:
+ vnfd = self.vnfds[vnf_id]
vnfi = None
if not GK_STANDALONE_MODE:
- vnfi = self._start_vnfd(vnfd)
+ vnfi = self._start_vnfd(vnfd, vnf_id)
self.instances[instance_uuid]["vnf_instances"].append(vnfi)
# 4. start all SAPs in the service
self._start_sap(self.saps[sap], instance_uuid)
# 5. Deploy E-Line and E_LAN links
- if "virtual_links" in self.nsd:
+ # Attention: Only done if ""forwarding_graphs" section in NSD exists,
+ # even if "forwarding_graphs" are not used directly.
+ if "virtual_links" in self.nsd and "forwarding_graphs" in self.nsd:
vlinks = self.nsd["virtual_links"]
# constituent virtual links are not checked
#fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
# instance_uuid = str(self.uuid.uuid4())
vnf_instances = self.instances[instance_uuid]["vnf_instances"]
+ # trigger stop skripts in vnf instances and wait a few seconds for completion
+ self._trigger_emulator_stop_scripts_in_vnfis(vnf_instances)
+ time.sleep(VNF_STOP_WAIT_TIME)
+
for v in vnf_instances:
self._stop_vnfi(v)
+ for sap_name in self.saps_ext:
+ ext_sap = self.saps[sap_name]
+ target_dc = ext_sap.get("dc")
+ target_dc.removeExternalSAP(sap_name)
+ LOG.info("Stopping the SAP instance: %r in DC %r" % (sap_name, target_dc))
+
if not GK_STANDALONE_MODE:
# remove placement?
# self._remove_placement(RoundRobinPlacement)
# last step: remove the instance from the list of all instances
del self.instances[instance_uuid]
- def _start_vnfd(self, vnfd):
+ def _start_vnfd(self, vnfd, vnf_id, **kwargs):
"""
Start a single VNFD of this service
:param vnfd: vnfd descriptor dict
+ :param vnf_id: unique id of this vnf in the nsd
:return:
"""
+ # the vnf_name refers to the container image to be deployed
+ vnf_name = vnfd.get("name")
+
# iterate over all deployment units within each VNFDs
for u in vnfd.get("virtual_deployment_units"):
# 1. get the name of the docker image to start and the assigned DC
- vnf_name = vnfd.get("name")
- if vnf_name not in self.remote_docker_image_urls:
- raise Exception("No image name for %r found. Abort." % vnf_name)
- docker_name = self.remote_docker_image_urls.get(vnf_name)
+ if vnf_id not in self.remote_docker_image_urls:
+ raise Exception("No image name for %r found. Abort." % vnf_id)
+ docker_name = self.remote_docker_image_urls.get(vnf_id)
target_dc = vnfd.get("dc")
# 2. perform some checks to ensure we can start the container
assert(docker_name is not None)
# 3. get the resource limits
res_req = u.get("resource_requirements")
cpu_list = res_req.get("cpu").get("cores")
- if not cpu_list or len(cpu_list)==0:
+ if cpu_list is None:
+ cpu_list = res_req.get("cpu").get("vcpus")
+ if cpu_list is None:
cpu_list="1"
cpu_bw = res_req.get("cpu").get("cpu_bw")
if not cpu_bw:
mem_lim = int(mem_limit)
cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
- vnf_name2id = defaultdict(lambda: "NotExistingNode",
- reduce(lambda x, y: dict(x, **y),
- map(lambda d: {d["vnf_name"]: d["vnf_id"]},
- self.nsd["network_functions"])))
-
# check if we need to deploy the management ports (defined as type:management both on in the vnfd and nsd)
intfs = vnfd.get("connection_points", [])
mgmt_intf_names = []
if USE_DOCKER_MGMT:
- vnf_id = vnf_name2id[vnf_name]
mgmt_intfs = [vnf_id + ':' + intf['id'] for intf in intfs if intf.get('type') == 'management']
# check if any of these management interfaces are used in a management-type network in the nsd
for nsd_intf_name in mgmt_intfs:
intfs.remove(found_interfaces[0])
mgmt_intf_names.append(vnf_interface)
- # 4. do the dc.startCompute(name="foobar") call to run the container
+ # 4. generate the volume paths for the docker container
+ volumes=list()
+ # a volume to extract log files
+ docker_log_path = "/tmp/results/%s/%s"%(self.uuid,vnf_id)
+ LOG.debug("LOG path for vnf %s is %s."%(vnf_id,docker_log_path))
+ if not os.path.exists(docker_log_path):
+ LOG.debug("Creating folder %s"%docker_log_path)
+ os.makedirs(docker_log_path)
+
+ volumes.append(docker_log_path+":/mnt/share/")
+
+
+ # 5. do the dc.startCompute(name="foobar") call to run the container
# TODO consider flavors, and other annotations
# TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
# use the vnf_id in the nsd as docker name
# so deployed containers can be easily mapped back to the nsd
-
- self.vnf_name2docker_name[vnf_name] = vnf_name2id[vnf_name]
-
- LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
- LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs))
- vnfi = target_dc.startCompute(self.vnf_name2docker_name[vnf_name], network=intfs, image=docker_name, flavor_name="small",
- cpu_quota=cpu_quota, cpu_period=cpu_period, cpuset=cpu_list, mem_limit=mem_lim)
+ LOG.info("Starting %r as %r in DC %r" % (vnf_name, vnf_id, vnfd.get("dc")))
+ LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
+ vnfi = target_dc.startCompute(
+ vnf_id,
+ network=intfs,
+ image=docker_name,
+ flavor_name="small",
+ cpu_quota=cpu_quota,
+ cpu_period=cpu_period,
+ cpuset=cpu_list,
+ mem_limit=mem_lim,
+ volumes=volumes,
+ type=kwargs.get('type','docker'))
# rename the docker0 interfaces (eth0) to the management port name defined in the VNFD
if USE_DOCKER_MGMT:
# Find the correct datacenter
status = vnfi.getStatus()
dc = vnfi.datacenter
+
# stop the vnfi
LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc))
dc.stopCompute(status["name"])
- def _get_vnf_instance(self, instance_uuid, name):
+ def _get_vnf_instance(self, instance_uuid, vnf_id):
"""
- Returns the Docker object for the given VNF name (or Docker name).
+ Returns the Docker object for the given VNF id (or Docker name).
:param instance_uuid: UUID of the service instance to search in.
:param name: VNF name or Docker name. We are fuzzy here.
:return:
"""
- dn = name
- if name in self.vnf_name2docker_name:
- dn = self.vnf_name2docker_name[name]
+ dn = vnf_id
for vnfi in self.instances[instance_uuid]["vnf_instances"]:
if vnfi.name == dn:
return vnfi
t.daemon = True
t.start()
+ def _trigger_emulator_stop_scripts_in_vnfis(self, vnfi_list):
+ for vnfi in vnfi_list:
+ config = vnfi.dcinfo.get("Config", dict())
+ env = config.get("Env", list())
+ for env_var in env:
+ var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
+ if var=="SON_EMU_CMD_STOP":
+ LOG.info("Executing stop script in %r: %r" % (vnfi.name, cmd))
+ # execute command in new thread to ensure that GK is not blocked by VNF
+ t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
+ t.daemon = True
+ t.start()
+
+
+
def _unpack_service_package(self):
"""
unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
make_relative_path(self.manifest.get("entry_service_template")))
self.nsd = load_yaml(nsd_path)
GK.net.deployed_nsds.append(self.nsd)
+ # create dict to find the vnf_name for any vnf id
+ self.vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
+ reduce(lambda x, y: dict(x, **y),
+ map(lambda d: {d["vnf_id"]: d["vnf_name"]},
+ self.nsd["network_functions"])))
LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
:return:
"""
+
+ # first make a list of all the vnfds in the package
+ vnfd_set = dict()
if "package_content" in self.manifest:
for pc in self.manifest.get("package_content"):
if pc.get("content-type") == "application/sonata.function_descriptor":
self.package_content_path,
make_relative_path(pc.get("name")))
vnfd = load_yaml(vnfd_path)
- self.vnfds[vnfd.get("name")] = vnfd
- LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
+ vnfd_set[vnfd.get("name")] = vnfd
+ # then link each vnf_id in the nsd to its vnfd
+ for vnf_id in self.vnf_id2vnf_name:
+ vnf_name = self.vnf_id2vnf_name[vnf_id]
+ self.vnfds[vnf_id] = vnfd_set[vnf_name]
+ LOG.debug("Loaded VNFD: {0} id: {1}".format(vnf_name, vnf_id))
def _load_saps(self):
# create list of all SAPs
# check if we need to deploy management ports
if USE_DOCKER_MGMT:
- LOG.debug("nsd: {0}".format(self.nsd))
SAPs = [p for p in self.nsd["connection_points"] if 'management' not in p.get('type')]
else:
SAPs = [p for p in self.nsd["connection_points"]]
sap_net = SAP_SUBNETS.pop(0)
self.saps[sap_docker_name] = {"name": sap_docker_name , "type": "external", "net": sap_net}
# add SAP vnf to list in the NSD so it is deployed later on
- # each SAP get a unique VNFD and vnf_id in the NSD and custom type (only defined in the dummygatekeeper)
+ # each SAP gets a unique VNFD and vnf_id in the NSD and custom type (only defined in the dummygatekeeper)
self.nsd["network_functions"].append(
{"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_ext"})
# Each Service Access Point (connection_point) in the nsd is getting its own container (default)
elif sap["type"] == "internal" or sap["type"] == "management":
# add SAP to self.vnfds
- sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
+ if SAP_VNFD is None:
+ sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
+ else:
+ sapfile = SAP_VNFD
sap_vnfd = load_yaml(sapfile)
sap_vnfd["connection_points"][0]["id"] = sap_interface
sap_vnfd["name"] = sap_docker_name
if sap["type"] == "internal":
vnfi = None
if not GK_STANDALONE_MODE:
- vnfi = self._start_vnfd(sap)
+ vnfi = self._start_vnfd(sap, sap['name'], type='sap_int')
self.instances[instance_uuid]["vnf_instances"].append(vnfi)
elif sap["type"] == "external":
target_dc = sap.get("dc")
# add interface to dc switch
- target_dc.attachExternalSAP(sap['name'], str(sap['net']))
+ target_dc.attachExternalSAP(sap['name'], sap['net'])
def _connect_elines(self, eline_fwd_links, instance_uuid):
"""
elif src_sap_id in self.saps_ext:
src_id = src_sap_id
- src_if_name = src_sap_id
- src_name = self.vnf_id2vnf_name[src_id]
- dst_name = self.vnf_id2vnf_name[dst_id]
- dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
+ # set intf name to None so the chaining function will choose the first one
+ src_if_name = None
+ dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
if dst_vnfi is not None:
# choose first ip address in sap subnet
sap_net = self.saps[src_sap_id]['net']
- sap_ip = "{0}/{1}".format(str(sap_net[1]), sap_net.prefixlen)
+ sap_ip = "{0}/{1}".format(str(sap_net[2]), sap_net.prefixlen)
self._vnf_reconfigure_network(dst_vnfi, dst_if_name, sap_ip)
setChaining = True
elif dst_sap_id in self.saps_ext:
dst_id = dst_sap_id
- dst_if_name = dst_sap_id
- src_name = self.vnf_id2vnf_name[src_id]
- dst_name = self.vnf_id2vnf_name[dst_id]
- src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
+ # set intf name to None so the chaining function will choose the first one
+ dst_if_name = None
+ src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
if src_vnfi is not None:
sap_net = self.saps[dst_sap_id]['net']
- sap_ip = "{0}/{1}".format(str(sap_net[1]), sap_net.prefixlen)
+ sap_ip = "{0}/{1}".format(str(sap_net[2]), sap_net.prefixlen)
self._vnf_reconfigure_network(src_vnfi, src_if_name, sap_ip)
setChaining = True
src_id = src_sap_id
if dst_sap_id in self.saps_int:
dst_id = dst_sap_id
- src_name = self.vnf_id2vnf_name[src_id]
- dst_name = self.vnf_id2vnf_name[dst_id]
# re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
- src_vnfi = self._get_vnf_instance(instance_uuid, src_name)
- dst_vnfi = self._get_vnf_instance(instance_uuid, dst_name)
+ src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
+ dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
if src_vnfi is not None and dst_vnfi is not None:
eline_net = ELINE_SUBNETS.pop(0)
ip1 = "{0}/{1}".format(str(eline_net[1]), eline_net.prefixlen)
vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
LOG.debug(
- "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
- src_name, src_id, src_if_name, dst_name, dst_id, dst_if_name))
+ "Setting up E-Line link. (%s:%s) -> (%s:%s)" % (
+ src_id, src_if_name, dst_id, dst_if_name))
def _connect_elans(self, elan_fwd_links, instance_uuid):
src_docker_name = vnf_sap_id
vnf_id = vnf_sap_id
- vnf_name = self.vnf_id2vnf_name[vnf_id]
LOG.debug(
- "Setting up E-LAN interface. %s(%s:%s) -> %s" % (
- vnf_name, vnf_id, intf_name, ip_address))
-
- if vnf_name in self.vnfds:
- # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
- # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
- # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
- vnfi = self._get_vnf_instance(instance_uuid, vnf_name)
- if vnfi is not None:
- self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
- # add this vnf and interface to the E-LAN for tagging
- elan_vnf_list.append({'name': src_docker_name, 'interface': intf_name})
+ "Setting up E-LAN interface. (%s:%s) -> %s" % (
+ vnf_id, intf_name, ip_address))
+
+ # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
+ # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
+ # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
+ vnfi = self._get_vnf_instance(instance_uuid, vnf_id)
+ if vnfi is not None:
+ self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
+ # add this vnf and interface to the E-LAN for tagging
+ elan_vnf_list.append({'name': src_docker_name, 'interface': intf_name})
# install the VLAN tags for this E-LAN
GK.net.setLAN(elan_vnf_list)
return int(cpu_period), int(cpu_quota)
def check_ext_saps(self, intf_list):
- # check if the list of interfacs contains an externl SAP
+ # check if the list of interfacs contains an external SAP
saps_ext = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "external"]
for intf_name in intf_list:
vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(intf_name)
Placement: Always use one and the same data center from the GK.dcs dict.
"""
def place(self, nsd, vnfds, saps, dcs):
- for name, vnfd in vnfds.iteritems():
+ for id, vnfd in vnfds.iteritems():
vnfd["dc"] = list(dcs.itervalues())[0]
def place(self, nsd, vnfds, saps, dcs):
c = 0
dcs_list = list(dcs.itervalues())
- for name, vnfd in vnfds.iteritems():
+ for id, vnfd in vnfds.iteritems():
vnfd["dc"] = dcs_list[c % len(dcs_list)]
c += 1 # inc. c to use next DC
# place vnfs
c = 0
dcs_list = list(dcs.itervalues())
- for name, vnfd in vnfds.iteritems():
+ for id, vnfd in vnfds.iteritems():
vnfd["dc"] = dcs_list[c % len(dcs_list)]
c += 1 # inc. c to use next DC
eline_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-Line")]
elan_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-LAN")]
- vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
- reduce(lambda x, y: dict(x, **y),
- map(lambda d: {d["vnf_id"]: d["vnf_name"]},
- nsd["network_functions"])))
-
# SAPs on E-Line links are placed on the same DC as the VNF on the E-Line
for link in eline_fwd_links:
src_id, src_if_name, src_sap_id = parse_interface(link["connection_points_reference"][0])
# check if there is a SAP in the link
if src_sap_id in saps:
- dst_vnf_name = vnf_id2vnf_name[dst_id]
# get dc where connected vnf is mapped to
- dc = vnfds[dst_vnf_name]['dc']
+ dc = vnfds[dst_id]['dc']
saps[src_sap_id]['dc'] = dc
if dst_sap_id in saps:
- src_vnf_name = vnf_id2vnf_name[src_id]
# get dc where connected vnf is mapped to
- dc = vnfds[src_vnf_name]['dc']
+ dc = vnfds[src_id]['dc']
saps[dst_sap_id]['dc'] = dc
# SAPs on E-LANs are placed on a random DC
with open(upload_path, 'wb') as f:
f.write(son_file)
size = os.path.getsize(upload_path)
+
+ # first stop and delete any other running services
+ if AUTO_DELETE:
+ service_list = copy.copy(GK.services)
+ for service_uuid in service_list:
+ instances_list = copy.copy(GK.services[service_uuid].instances)
+ for instance_uuid in instances_list:
+ # valid service and instance UUID, stop service
+ GK.services.get(service_uuid).stop_service(instance_uuid)
+ LOG.info("service instance with uuid %r stopped." % instance_uuid)
+
# create a service object and register it
s = Service(service_uuid, file_hash, upload_path)
GK.register_service_package(service_uuid, s)
+
+ # automatically deploy the service
+ if AUTO_DEPLOY:
+ # ok, we have a service uuid, lets start the service
+ reset_subnets()
+ service_instance_uuid = GK.services.get(service_uuid).start_service()
+
# generate the JSON result
return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201
except Exception as ex:
Will return a new UUID to identify the running service instance.
:return: UUID
"""
- LOG.info("POST /instantiations (or /reqeusts) called")
+ LOG.info("POST /instantiations (or /requests) called")
# try to extract the service uuid from the request
json_data = request.get_json(force=True)
service_uuid = json_data.get("service_uuid")
api.add_resource(Exit, '/emulator/exit')
-#def initialize_GK():
-# global GK
-# GK = Gatekeeper()
-
def start_rest_api(host, port, datacenters=dict()):
GK.dcs = datacenters
return vnf_id, vnf_interface, vnf_sap_docker_name
+def reset_subnets():
+ # private subnet definitions for the generated interfaces
+ # 10.10.xxx.0/24
+ global SAP_SUBNETS
+ SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=30)
+ # 10.20.xxx.0/30
+ global ELAN_SUBNETS
+ ELAN_SUBNETS = generate_subnets('10.20', 0, subnet_size=50, mask=24)
+ # 10.30.xxx.0/30
+ global ELINE_SUBNETS
+ ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
+
if __name__ == '__main__':
"""
Lets allow to run the API in standalone mode.