# SONATA dummy gatekeeper API:
+## Run emulator example with active SONATA dummy gatekeeper:
+The example starts a small network with two data centers.
+
+* `sudo python src/emuvim/examples/sonata_y1_demo_topology_1.py`
+
## Upload a package (*.son) file:
To upload the file `sonata-demo.son` (from son-schema repo) do:
To instantiate (start) a service do:
-* `curl -X POST http://127.0.0.1:8000/api/instantiations -d "{\"service_uuid\":\"59446b64-f941-40a8-b511-effb0512c21b\"}"`
+* Specific service: `curl -X POST http://127.0.0.1:8000/api/instantiations -d "{\"service_uuid\":\"59446b64-f941-40a8-b511-effb0512c21b\"}"`
+* Last uploaded service (makes manual tests easier): `curl -X POST http://127.0.0.1:8000/api/instantiations -d "{}"`
To list all running services do:
<td>GET</td>
<td>-</td>
<td></td>
-<td>{service_instance_uuid_list: ["de4567-f3b9-43ac-ac6b-3d27b461123", "de4567-f3b9-43ac-ac6b-3d27b461124", "de4567-f3b9-43ac-ac6b-3d27b461125"]}</td>
+<td>
+{
+ "service_instance_list": [
+ [
+ "9da044b3-1f7a-40e6-a9b3-9e83a9834249",
+ "9371df14-a595-436a-92b5-fc243b74a9d7"
+ ]
+ ]
+}
+</td>
</tr>
</table>
-## Run REST API as part of the emulator:
-
-* `sudo python src/emuvim/examples/sonata_y1_demo_topology_1.py`
## Run REST API in standalone mode (without emulator):
-
+This is not working yet!!!
* `sudo python src/emuvim/api/sonata/dummygatekeeper.py`
\ No newline at end of file
def __init__(self):
self.services = dict()
+ self.dcs = dict()
+ self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
LOG.info("Create SONATA dummy gatekeeper.")
def register_service_package(self, service_uuid, service):
# lets perform all steps needed to onboard the service
service.onboard()
+ def get_next_vnf_name(self):
+ self.vnf_counter += 1
+ return "sonvnf%d" % self.vnf_counter
+
class Service(object):
"""
self.local_docker_files = dict()
self.instances = dict()
- def start_service(self, service_uuid):
- # TODO implement method
- # 1. parse descriptors
- # 2. do the corresponding dc.startCompute(name="foobar") calls
- # 3. store references to the compute objects in self.instantiations
- pass
-
def onboard(self):
"""
Do all steps to prepare this service to be instantiated
LOG.info("On-boarded service: %r" % self.manifest.get("package_name"))
+ def start_service(self):
+ """
+ This methods creates and starts a new service instance.
+ It computes placements, iterates over all VNFDs, and starts
+ each VNFD as a Docker container in the data center selected
+ by the placement algorithm.
+ :return:
+ """
+ LOG.info("Starting service %r" % self.uuid)
+ # 1. each service instance gets a new uuid to identify it
+ instance_uuid = str(uuid.uuid4())
+ # build a instances dict (a bit like a NSR :))
+ self.instances[instance_uuid] = dict()
+ self.instances[instance_uuid]["vnf_instances"] = list()
+ # 2. compute placement of this service instance (adds DC names to VNFDs)
+ self._calculate_placement(FirstDcPlacement)
+ # iterate over all vnfds that we have to start
+ for vnfd in self.vnfds.itervalues():
+ # iterate over all deployment units within each VNFDs
+ for u in vnfd.get("virtual_deployment_units"):
+ # 3. get the name of the docker image to start and the assigned DC
+ docker_name = u.get("vm_image")
+ target_dc = vnfd.get("dc")
+ # 4. perform some checks to ensure we can start the container
+ assert(docker_name is not None)
+ assert(target_dc is not None)
+ if not self._check_docker_image_exists(docker_name):
+ raise Exception("Docker image %r not found. Abort." % docker_name)
+ # 5. do the dc.startCompute(name="foobar") call to run the container
+ # TODO consider flavors, and other annotations
+ vnfi = target_dc.startCompute(GK.get_next_vnf_name(), image=docker_name, flavor_name="small")
+ # 6. store references to the compute objects in self.instances
+ self.instances[instance_uuid]["vnf_instances"].append(vnfi)
+ LOG.info("Service started. Instance id: %r" % instance_uuid)
+ return instance_uuid
+
def _unpack_service_package(self):
"""
unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
"""
If the package contains URLs to pre-build Docker images, we download them with this method.
"""
- # TODO implement
+ # TODO implement this if we want to be able to download docker images instead of building them
pass
+ def _check_docker_image_exists(self, image_name):
+ """
+ Query the docker service and check if the given image exists
+ :param image_name: name of the docker image
+ :return:
+ """
+ return len(DockerClient().images(image_name)) > 0
+
+ def _calculate_placement(self, algorithm):
+ """
+ Do placement by adding the a field "dc" to
+ each VNFD that points to one of our
+ data center objects known to the gatekeeper.
+ """
+ assert(len(self.vnfds) > 0)
+ assert(len(GK.dcs) > 0)
+ # instantiate algorithm an place
+ p = algorithm()
+ p.place(self.nsd, self.vnfds, GK.dcs)
+ LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
+ # lets print the placement result
+ for name, vnfd in self.vnfds.iteritems():
+ LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
+
+
+"""
+Some (simple) placement algorithms
+"""
+
+
+class FirstDcPlacement(object):
+ """
+ Placement: Always use one and the same data center from the GK.dcs dict.
+ """
+ def place(self, nsd, vnfds, dcs):
+ for name, vnfd in vnfds.iteritems():
+ vnfd["dc"] = list(dcs.itervalues())[0]
+
"""
Resource definitions and API endpoints
Will return a new UUID to identify the running service instance.
:return: UUID
"""
- # TODO implement method (start real service)
+ # try to extract the service uuid from the request
json_data = request.get_json(force=True)
service_uuid = json_data.get("service_uuid")
- if service_uuid is not None:
- service_instance_uuid = str(uuid.uuid4())
- LOG.info("Starting service %r" % service_uuid)
+
+ # lets be a bit fuzzy here to make testing easier
+ if service_uuid is None and len(GK.services) > 0:
+ # if we don't get a service uuid, we simple start the first service in the list
+ service_uuid = list(GK.services.iterkeys())[0]
+
+ if service_uuid in GK.services:
+ # ok, we have a service uuid, lets start the service
+ service_instance_uuid = GK.services.get(service_uuid).start_service()
return {"service_instance_uuid": service_instance_uuid}
- return None
+ return "Service not found", 404
def get(self):
"""
Returns a list of UUIDs containing all running services.
:return: dict / list
"""
- # TODO implement method
- return {"service_instance_uuid_list": list()}
+ return {"service_instance_list": [
+ list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
# create a single, global GK object
api.add_resource(Instantiations, '/api/instantiations')
-def start_rest_api(host, port):
+def start_rest_api(host, port, datacenters=dict()):
+ GK.dcs = datacenters
# start the Flask server (not the best performance but ok for our use case)
app.run(host=host,
port=port,
"""
Quick hack to fix missing dependency in example package.
"""
- # TODO remove this when package description is fixed
+ # FIXME remove this when package description is fixed
mapping = {
"/docker_files/iperf/Dockerfile": "iperf_docker",
"/docker_files/firewall/Dockerfile": "fw_docker",
Dockernet.__init__(
self, switch=OVSKernelSwitch, **kwargs)
- # start Ryu controller
- self.startRyu()
+ # Ryu management
+ self.ryu_process = None
+ if controller == RemoteController:
+ # start Ryu controller
+ self.startRyu()
- # add a remote controller to be able to use Ryu
+ # add the specified controller
self.addController('c0', controller=controller)
# graph of the complete DC network
# start Ryu controller with rest-API
python_install_path = site.getsitepackages()[0]
ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
- ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
+ ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
# change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
# Ryu still uses 6633 as default
ryu_option = '--ofp-tcp-listen-port'
ryu_of_port = '6653'
- ryu_cmd = 'ryu-manager'
+ ryu_cmd = 'ryu-manager'
FNULL = open("/tmp/ryu.log", 'w')
self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
time.sleep(1)
def stopRyu(self):
- if self.ryu_process:
+ if self.ryu_process is not None:
self.ryu_process.terminate()
self.ryu_process.kill()
from mininet.link import Link
import logging
+LOG = logging.getLogger("dcemulator")
+LOG.setLevel(logging.DEBUG)
+
DCDPID_BASE = 1000 # start of switch dpid's used for data center switches
def __init__(
self, name, dimage, **kwargs):
- logging.debug("Create EmulatorCompute instance: %s" % name)
self.datacenter = kwargs.get("datacenter") # pointer to current DC
self.flavor_name = kwargs.get("flavor_name")
-
+ LOG.debug("Starting compute instance %r in data center %r" % (name, str(self.datacenter)))
# call original Docker.__init__
Docker.__init__(self, name, dimage, **kwargs)
"""
self.switch = self.net.addSwitch(
"%s.s1" % self.name, dpid=hex(self._get_next_dc_dpid())[2:])
- logging.debug("created data center switch: %s" % str(self.switch))
+ LOG.debug("created data center switch: %s" % str(self.switch))
def start(self):
pass
if self._resource_model is not None:
# TODO pass resource limits to new container (cf. Dockernet API) Issue #47
(cpu_limit, mem_limit, disk_limit) = alloc = self._resource_model.allocate(name, flavor_name)
- logging.info("Allocation result: %r" % str(alloc))
+ LOG.debug("Allocation result: %r" % str(alloc))
# create the container
d = self.net.addDocker(
"%s" % (name),
raise Exception("There is already an resource model assigned to this DC.")
self._resource_model = rm
self.net.rm_registrar.register(self, rm)
- logging.info("Assigned RM: %r to DC: %r" % (rm, self))
+ LOG.info("Assigned RM: %r to DC: %r" % (rm, self))