* flask_restful (BSD)
* requests (Apache 2.0)
* docker-py (Apache 2.0)
+* paramiko (LGPL)
### 3rd-party code used
* (none)
* **src/emuvim/** all emulator code
* **api/** Data center API endpoint implementations (zerorpc, OpenStack REST, ...)
* **cli/** CLI client to interact with a running emulator
- * **dcemulator/** Dockernet wrapper that introduces the notion of data centers and API endpoints
+ * **dcemulator/** Containernet wrapper that introduces the notion of data centers and API endpoints
* **examples/** Example topology scripts
* **test/** Unit tests
* **ansible/** Ansible install scripts
* `sudo vim /etc/ansible/hosts`
* Add: `localhost ansible_connection=local`
-#### 1. Dockernet
+#### 1. Containernet
* `cd`
-* `git clone -b dockernet-sonata https://github.com/mpeuster/dockernet.git`
-* `cd ~/dockernet/ansible`
+* `git clone https://github.com/mpeuster/containernet.git`
+* `cd ~/containernet/ansible`
* `sudo ansible-playbook install.yml`
* Wait (and have a coffee) ...
* `son-emu-cli compute start -d datacenter1 -n vnf2`
* `son-emu-cli compute list`
* First terminal:
- * `dockernet> vnf1 ping -c 2 vnf2`
+ * `containernet> vnf1 ping -c 2 vnf2`
* Second terminal:
* `son-emu-cli monitor get_rate -vnf vnf1`
- name: install libzmq-dev
apt: pkg=libzmq-dev state=installed
+ - name: install libffi-dev
+ apt: pkg=libffi-dev state=installed
+
- name: install pip
apt: pkg=python-pip state=installed
- name: install prometheus_client
pip: name=prometheus_client state=latest
+ - name: install paramiko
+ pip: name=paramiko state=latest
+
setup(name='emuvim',
version='0.0.1',
- license='TODO',
+ license='Apache 2.0',
description='emuvim is a VIM for the SONATA platform',
url='http://github.com/sonata-emu',
author_email='sonata-dev@sonata-nfv.eu',
'flask_restful',
'docker-py',
'requests',
- 'prometheus_client'
+ 'prometheus_client',
+ 'paramiko'
],
zip_safe=False,
entry_points={
import urllib2
from functools import partial
-from mininet.net import Dockernet
+from mininet.net import Containernet
from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
from mininet.cli import CLI
from mininet.link import TCLink
from emuvim.dcemulator.node import Datacenter, EmulatorCompute
from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
-class DCNetwork(Dockernet):
+class DCNetwork(Containernet):
"""
- Wraps the original Mininet/Dockernet class and provides
+ Wraps the original Mininet/Containernet class and provides
methods to add data centers, switches, etc.
This class is used by topology definition scripts.
dc_emulation_max_mem=512, # emulation max mem in MB
**kwargs):
"""
- Create an extended version of a Dockernet network
+ Create an extended version of a Containernet network
:param dc_emulation_max_cpu: max. CPU time used by containers in data centers
:param kwargs: path through for Mininet parameters
:return:
self.dcs = {}
# call original Docker.__init__ and setup default controller
- Dockernet.__init__(
+ Containernet.__init__(
self, switch=OVSKernelSwitch, **kwargs)
# Ryu management
params["params2"]["ip"] = self.getNextIp()
# ensure that we allow TCLinks between data centers
# TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
- # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
+ # see Containernet issue: https://github.com/mpeuster/containernet/issues/3
if "cls" not in params:
params["cls"] = TCLink
- link = Dockernet.addLink(self, node1, node2, **params)
+ link = Containernet.addLink(self, node1, node2, **params)
# try to give container interfaces a default id
node1_port_id = node1.ports[link.intf1]
# add edge and assigned port number to graph in both directions between node1 and node2
# port_id: id given in descriptor (if available, otherwise same as port)
- # port: portnumber assigned by Dockernet
+ # port: portnumber assigned by Containernet
attr_dict = {}
# possible weight metrics allowed by TClink class:
Wrapper for addDocker method to use custom container class.
"""
self.DCNetwork_graph.add_node(label)
- return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
+ return Containernet.addDocker(self, label, cls=EmulatorCompute, **params)
def removeDocker( self, label, **params ):
"""
Wrapper for removeDocker method to update graph.
"""
self.DCNetwork_graph.remove_node(label)
- return Dockernet.removeDocker(self, label, **params)
+ return Containernet.removeDocker(self, label, **params)
def addSwitch( self, name, add_to_graph=True, **params ):
"""
"""
if add_to_graph:
self.DCNetwork_graph.add_node(name)
- return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
+ return Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
def getAllContainers(self):
"""
# start
for dc in self.dcs.itervalues():
dc.start()
- Dockernet.start(self)
+ Containernet.start(self)
def stop(self):
self.monitor_agent.stop()
# stop emulator net
- Dockernet.stop(self)
+ Containernet.stop(self)
# stop Ryu controller
self.stopRyu()
class EmulatorCompute(Docker):
"""
Emulator specific compute node class.
- Inherits from Dockernet's Docker host class.
+ Inherits from Containernet's Docker host class.
Represents a single container connected to a (logical)
data center.
We can add emulator specific helper functions to it.
# if no --net option is given, network = [{}], so 1 empty dict in the list
# this results in 1 default interface with a default ip address
for nw in network:
- # TODO we cannot use TCLink here (see: https://github.com/mpeuster/dockernet/issues/3)
+ # TODO we cannot use TCLink here (see: https://github.com/mpeuster/containernet/issues/3)
self.net.addLink(d, self.switch, params1=nw, cls=Link, intfName1=nw.get('id'))
# do bookkeeping
self.containers[name] = d
base_url='unix://var/run/docker.sock')
return self.docker_cli
- def getDockernetContainers(self):
+ def getContainernetContainers(self):
"""
- List the containers managed by dockernet
+ List the containers managed by containernet
"""
- return self.getDockerCli().containers(filters={"label": "com.dockernet"})
+ return self.getDockerCli().containers(filters={"label": "com.containernet"})
@staticmethod
def setUp():
# make sure that all pending docker containers are killed
with open(os.devnull, 'w') as devnull:
subprocess.call(
- "sudo docker rm -f $(sudo docker ps --filter 'label=com.dockernet' -a -q)",
+ "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)",
stdout=devnull,
stderr=devnull,
shell=True)
\ No newline at end of file
# start Mininet network
self.startNet()
# check number of running nodes
- self.assertTrue(len(self.getDockernetContainers()) == 0)
+ self.assertTrue(len(self.getContainernetContainers()) == 0)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 1)
# check connectivity by using ping
# start Mininet network
self.startNet()
# check number of running nodes
- self.assertTrue(len(self.getDockernetContainers()) == 0)
+ self.assertTrue(len(self.getContainernetContainers()) == 0)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 2)
# check connectivity by using ping
# start Mininet network
self.startNet()
# check number of running nodes
- self.assertTrue(len(self.getDockernetContainers()) == 0)
+ self.assertTrue(len(self.getContainernetContainers()) == 0)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 5)
# check connectivity by using ping
# add compute resources
vnf1 = self.dc[0].startCompute("vnf1")
# check number of running nodes
- self.assertTrue(len(self.getDockernetContainers()) == 1)
+ self.assertTrue(len(self.getContainernetContainers()) == 1)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 1)
# check compute list result
# add compute resources
vnf1 = self.dc[0].startCompute("vnf1")
# check number of running nodes
- self.assertTrue(len(self.getDockernetContainers()) == 1)
+ self.assertTrue(len(self.getContainernetContainers()) == 1)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 1)
# check compute list result
# remove compute resources
self.dc[0].stopCompute("vnf1")
# check number of running nodes
- self.assertTrue(len(self.getDockernetContainers()) == 0)
+ self.assertTrue(len(self.getContainernetContainers()) == 0)
self.assertTrue(len(self.net.hosts) == 1)
self.assertTrue(len(self.net.switches) == 1)
# check compute list result
# add compute resources
vnf1 = self.dc[0].startCompute("vnf1")
# check number of running nodes
- self.assertTrue(len(self.getDockernetContainers()) == 1)
+ self.assertTrue(len(self.getContainernetContainers()) == 1)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 1)
# check compute list result
vnf1 = self.dc[0].startCompute("vnf1")
vnf2 = self.dc[1].startCompute("vnf2")
# check number of running nodes
- self.assertTrue(len(self.getDockernetContainers()) == 2)
+ self.assertTrue(len(self.getContainernetContainers()) == 2)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 5)
# check compute list result
vnf1 = self.dc[0].startCompute("vnf1")
vnf2 = self.dc[1].startCompute("vnf2")
# check number of running nodes
- self.assertTrue(len(self.getDockernetContainers()) == 2)
+ self.assertTrue(len(self.getContainernetContainers()) == 2)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 5)
# check compute list result
# remove compute resources
self.dc[0].stopCompute("vnf1")
# check number of running nodes
- self.assertTrue(len(self.getDockernetContainers()) == 1)
+ self.assertTrue(len(self.getContainernetContainers()) == 1)
self.assertTrue(len(self.net.hosts) == 1)
self.assertTrue(len(self.net.switches) == 5)
# check compute list result
# start Mininet network
self.startNet()
# check number of running nodes
- self.assertTrue(len(self.getDockernetContainers()) == 0)
+ self.assertTrue(len(self.getContainernetContainers()) == 0)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 1)
# check resource model and resource model registrar
def testInRealTopo(self):
"""
- Start a real container and check if limitations are really passed down to Dockernet.
+ Start a real container and check if limitations are really passed down to Conteinernet.
:return:
"""
# ATTENTION: This test should only be executed if emu runs not inside a Docker container,
# start Mininet network
self.startNet()
# check number of running nodes
- self.assertTrue(len(self.getDockernetContainers()) == 0)
+ self.assertTrue(len(self.getContainernetContainers()) == 0)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 1)
# check resource model and resource model registrar
self.assertEqual(len(r4.json().get("service_instance_list")), 1)
# check number of running nodes
- self.assertTrue(len(self.getDockernetContainers()) == 3)
+ self.assertTrue(len(self.getContainernetContainers()) == 3)
self.assertTrue(len(self.net.hosts) == 5)
self.assertTrue(len(self.net.switches) == 2)
# check compute list result
-FROM cgeoffroy/dockernet
+FROM mpeuster/containernet
+MAINTAINER manuel@peuster.de
ENV SON_EMU_IN_DOCKER 1
-# ensure that we have the latest dockernet code base!
-WORKDIR /
-RUN rm -rf dockernet
-RUN git clone -b dockernet-sonata https://github.com/mpeuster/dockernet.git
-WORKDIR /dockernet
-RUN python setup.py develop
-
WORKDIR /son-emu
COPY . /son-emu/
#! /bin/bash -e
set -x
-#cp /dockernet/util/docker/entrypoint.sh /tmp/x.sh
-#cat /tmp/x.sh | awk 'NR==1{print; print "set -x"} NR!=1' > /dockernet/util/docker/entrypoint.sh
+#cp /containernet/util/docker/entrypoint.sh /tmp/x.sh
+#cat /tmp/x.sh | awk 'NR==1{print; print "set -x"} NR!=1' > /conteinernet/util/docker/entrypoint.sh
-exec /dockernet/util/docker/entrypoint.sh $*
+exec /containernet/util/docker/entrypoint.sh $*