--- /dev/null
+"""
+Copyright (c) 2015 SONATA-NFV
+ALL RIGHTS RESERVED.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
+nor the names of its contributors may be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+This work has been performed in the framework of the SONATA project,
+funded by the European Commission under Grant number 671517 through
+the Horizon 2020 and 5G-PPP programmes. The authors would like to
+acknowledge the contributions of their colleagues of the SONATA
+partner consortium (www.sonata-nfv.eu).
+"""
+"""
+A simple topology with two PoPs for the y1 demo story board.
+
+ (dc1) <<-->> s1 <<-->> (dc2)
+"""
+
+import logging
+from mininet.log import setLogLevel
+from emuvim.dcemulator.net import DCNetwork
+from emuvim.api.rest.rest_api_endpoint import RestApiEndpoint
+from emuvim.api.sonata import SonataDummyGatekeeperEndpoint
+from mininet.node import RemoteController
+import signal
+import sys
+
+
+logging.basicConfig(level=logging.INFO)
+
+net = None
+cli = None
+
+
+def create_topology1():
+ # create topology
+ net = DCNetwork(controller=RemoteController, monitor=False, enable_learning = False)
+ dc1 = net.addDatacenter("dc1")
+ dc2 = net.addDatacenter("dc2")
+ s1 = net.addSwitch("s1")
+ net.addLink(dc1, s1, delay="10ms")
+ net.addLink(dc2, s1, delay="20ms")
+
+ # add the command line interface endpoint to each DC (REST API)
+ rapi1 = RestApiEndpoint("0.0.0.0", 5001)
+ rapi1.connectDatacenter(dc1)
+ rapi1.connectDatacenter(dc2)
+ # run API endpoint server (in another thread, don't block)
+ rapi1.start()
+
+ # add the SONATA dummy gatekeeper to each DC
+ sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000)
+ sdkg1.connectDatacenter(dc1)
+ sdkg1.connectDatacenter(dc2)
+ # run the dummy gatekeeper (in another thread, don't block)
+ sdkg1.start()
+
+ # start the emulation platform
+ net.start()
+ #cli = net.CLI()
+ #net.stop()
+
+def exit_gracefully(signum, frame):
+ """
+ 7. At shutdown, we should receive th SIGTERM signal here and shutdown gracefully
+ """
+ # TODO: investigate why this is not called by the sigterm handler
+
+ global net
+ global cli
+
+ logging.info('Signal handler called with signal {0}'.format(signum))
+ net.stop()
+
+ sys.exit()
+
+def main():
+ setLogLevel('info') # set Mininet loglevel
+ # add the SIGTERM handler (eg. received when son-emu docker container stops)
+ signal.signal(signal.SIGTERM, exit_gracefully)
+ create_topology1()
+
+
+if __name__ == '__main__':
+ main()
RUN apt-get clean
-RUN cd /son-emu/ansible \
- && ansible-playbook install.yml \
- && cd /son-emu \
- # we need to reset the __pycache__ for correct test discovery
- && rm -rf src/emuvim/test/__pycache__ \
- && rm -rf src/emuvim/test/unittests/__pycache__ \
- && rm -rf src/emuvim/test/integrationtests/__pycache__ \
- && python setup.py install \
- && echo 'Done'
+WORKDIR /son-emu/ansible
+RUN ansible-playbook install.yml
+
+WORKDIR /son-emu
+# we need to reset the __pycache__ for correct test discovery
+RUN rm -rf src/emuvim/test/__pycache__
+RUN rm -rf src/emuvim/test/unittests/__pycache__
+RUN rm -rf src/emuvim/test/integrationtests/__pycache__
+RUN python setup.py install
+RUN echo 'Done'
+
ENTRYPOINT ["/son-emu/utils/docker/entrypoint.sh"]
-# dummy GK, zerorpc, DCNetwork zerorpc, cAdvisor
-EXPOSE 5000 4242 5151 8090
+# dummy GK, zerorpc, DCNetwork zerorpc, cAdvisor, restAPI
+EXPOSE 5000 4242 5151 8090 5001
--- /dev/null
+#! /bin/bash -e
+
+# docker stop trap signals
+# https://medium.com/@gchudnov/trapping-signals-in-docker-containers-7a57fdda7d86#.5d6q01x7q
+
+pid=0
+command=""
+term_recvd=0
+
+# send SIGTERM also to the executed command in the docker container (the containernet topology)
+# SIGTERM-handler
+function term_handler() {
+ echo $command
+ pid=$(pgrep -f "$command" | sed -n 1p)
+
+ pid="$!"
+ # avoid that the process triggers its own handler by sending sigterm
+ if [ $pid -ne 0 ] && [ $term_recvd -eq 0 ]; then
+ echo "sigterm received"
+ echo $pid
+ term_recvd=1
+ kill -SIGTERM "$pid"
+ fi
+
+ wait "$pid"
+
+ # do some manual cleanup
+ # remove all containers started by son-emu
+ docker ps -a -q --filter="name=mn.*" | xargs -r docker rm -f
+ # cleanup remaining mininet
+ mn -c
+
+ sleep 5
+ exit 143; # 128 + 15 -- SIGTERM
+}
+
+# setup handlers
+# on callback, kill the last background process, which is `tail -f /dev/null` and execute the specified handler
+trap 'term_handler' SIGTERM
+
+
+service openvswitch-switch start
+
+if [ ! -S /var/run/docker.sock ]; then
+ echo 'Error: the Docker socket file "/var/run/docker.sock" was not found. It should be mounted as a volume.'
+ exit 1
+fi
+
+# this cannot be done from the Dockerfile since we have the socket not mounted during build
+echo 'Pulling the "ubuntu:trusty" image ... please wait'
+docker pull 'ubuntu:trusty'
+
+echo "Welcome to Containernet running within a Docker container ..."
+
+if [[ $# -eq 0 ]]; then
+ exec /bin/bash
+else
+ #remember command to send it also the SIGTERM via the handler
+ command=$*
+ echo $command
+ exec $* &
+ # wait indefinetely
+ while true
+ do
+ sleep 1
+ done
+ echo "done"
+fi