From: peusterm Date: Mon, 7 Mar 2016 20:19:54 +0000 (+0100) Subject: Merge pull request #39 from mpeuster/master X-Git-Tag: v3.1~166 X-Git-Url: https://osm.etsi.org/gitweb/?p=osm%2Fvim-emu.git;a=commitdiff_plain;h=eb477af01a8f2f164cfc756a6a9759f3dd7e9f3a;hp=233754a507b3ae63700afb34d6da1abbc17cdda9 Merge pull request #39 from mpeuster/master some cleanups e.g. examples folder for topologies --- diff --git a/README.md b/README.md index fa45bbf..eefb97c 100755 --- a/README.md +++ b/README.md @@ -52,7 +52,8 @@ In the `~/son-emu` directory: * Otherwise, for a classic installation: * `python setup.py install` * First terminal: - * `sudo python src/emuvim/example_topology.py` + * `sudo python src/emuvim/examples/simple_topology.py +` * Second terminal: * `son-emu-cli compute start -d datacenter1 -n vnf1` * `son-emu-cli compute start -d datacenter1 -n vnf2` diff --git a/setup.py b/setup.py index f134ffe..f2a6ce9 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,6 @@ setup(name='emuvim', packages=find_packages('src'), install_requires=[ 'pyaml', - 'six', 'zerorpc', 'tabulate', 'argparse', diff --git a/src/emuvim/example_topology.py b/src/emuvim/example_topology.py deleted file mode 100755 index a63bd7f..0000000 --- a/src/emuvim/example_topology.py +++ /dev/null @@ -1,121 +0,0 @@ -""" -This is an example topology for the distributed cloud emulator (dcemulator). -(c) 2015 by Manuel Peuster - - -This is an example that shows how a user of the emulation tool can -define network topologies with multiple emulated cloud data centers. - -The definition is done with a Python API which looks very similar to the -Mininet API (in fact it is a wrapper for it). - -We only specify the topology *between* data centers not within a single -data center (data center internal setups or placements are not of interest, -we want to experiment with VNF chains deployed across multiple PoPs). - -The original Mininet API has to be completely hidden and not be used by this -script. -""" -import logging -from mininet.log import setLogLevel -from emuvim.dcemulator.net import DCNetwork -from api.zerorpcapi import ZeroRpcApiEndpoint -from api.zerorpcapi_DCNetwork import ZeroRpcApiEndpointDCNetwork - -logging.basicConfig(level=logging.INFO) - - -def create_topology1(): - """ - 1. Create a data center network object (DCNetwork) - """ - net = DCNetwork() - - """ - 1b. add a monitoring agent to the DCNetwork - """ - mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151) - mon_api.connectDCNetwork(net) - mon_api.start() - """ - 2. Add (logical) data centers to the topology - (each data center is one "bigswitch" in our simplified - first prototype) - """ - dc1 = net.addDatacenter("datacenter1") - dc2 = net.addDatacenter("datacenter2") - dc3 = net.addDatacenter("long_data_center_name3") - dc4 = net.addDatacenter( - "datacenter4", - metadata={"mydata": "we can also add arbitrary metadata to each DC"}) - - """ - 3. You can add additional SDN switches for data center - interconnections to the network. - """ - s1 = net.addSwitch("s1") - - """ - 4. Add links between your data centers and additional switches - to define you topology. - These links can use Mininet's features to limit bw, add delay or jitter. - """ - net.addLink(dc1, dc2) - net.addLink("datacenter1", s1) - net.addLink(s1, dc3) - net.addLink(s1, "datacenter4") - - """ - 5. We want to access and control our data centers from the outside, - e.g., we want to connect an orchestrator to start/stop compute - resources aka. VNFs (represented by Docker containers in the emulated) - - So we need to instantiate API endpoints (e.g. a zerorpc or REST - interface). Depending on the endpoint implementations, we can connect - one or more data centers to it, which can then be controlled through - this API, e.g., start/stop/list compute instances. - """ - # create a new instance of a endpoint implementation - zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242) - # connect data centers to this endpoint - zapi1.connectDatacenter(dc1) - zapi1.connectDatacenter(dc2) - zapi1.connectDatacenter(dc3) - zapi1.connectDatacenter(dc4) - # run API endpoint server (in another thread, don't block) - zapi1.start() - - """ - 5.1. For our example, we create a second endpoint to illustrate that - this is supported by our design. This feature allows us to have - one API endpoint for each data center. This makes the emulation - environment more realistic because you can easily create one - OpenStack-like REST API endpoint for *each* data center. - This will look like a real-world multi PoP/data center deployment - from the perspective of an orchestrator. - """ - zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343) - zapi2.connectDatacenter(dc3) - zapi2.connectDatacenter(dc4) - zapi2.start() - - """ - 6. Finally we are done and can start our network (the emulator). - We can also enter the Mininet CLI to interactively interact - with our compute resources (just like in default Mininet). - But we can also implement fully automated experiments that - can be executed again and again. - """ - net.start() - net.CLI() - # when the user types exit in the CLI, we stop the emulator - net.stop() - - -def main(): - setLogLevel('info') # set Mininet loglevel - create_topology1() - - -if __name__ == '__main__': - main() diff --git a/src/emuvim/examples/simple_topology.py b/src/emuvim/examples/simple_topology.py new file mode 100755 index 0000000..8f14b69 --- /dev/null +++ b/src/emuvim/examples/simple_topology.py @@ -0,0 +1,121 @@ +""" +This is an example topology for the distributed cloud emulator (dcemulator). +(c) 2015 by Manuel Peuster + + +This is an example that shows how a user of the emulation tool can +define network topologies with multiple emulated cloud data centers. + +The definition is done with a Python API which looks very similar to the +Mininet API (in fact it is a wrapper for it). + +We only specify the topology *between* data centers not within a single +data center (data center internal setups or placements are not of interest, +we want to experiment with VNF chains deployed across multiple PoPs). + +The original Mininet API has to be completely hidden and not be used by this +script. +""" +import logging +from mininet.log import setLogLevel +from emuvim.dcemulator.net import DCNetwork +from emuvim.api.zerorpcapi import ZeroRpcApiEndpoint +from emuvim.api.zerorpcapi_DCNetwork import ZeroRpcApiEndpointDCNetwork + +logging.basicConfig(level=logging.INFO) + + +def create_topology1(): + """ + 1. Create a data center network object (DCNetwork) + """ + net = DCNetwork() + + """ + 1b. add a monitoring agent to the DCNetwork + """ + mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151) + mon_api.connectDCNetwork(net) + mon_api.start() + """ + 2. Add (logical) data centers to the topology + (each data center is one "bigswitch" in our simplified + first prototype) + """ + dc1 = net.addDatacenter("datacenter1") + dc2 = net.addDatacenter("datacenter2") + dc3 = net.addDatacenter("long_data_center_name3") + dc4 = net.addDatacenter( + "datacenter4", + metadata={"mydata": "we can also add arbitrary metadata to each DC"}) + + """ + 3. You can add additional SDN switches for data center + interconnections to the network. + """ + s1 = net.addSwitch("s1") + + """ + 4. Add links between your data centers and additional switches + to define you topology. + These links can use Mininet's features to limit bw, add delay or jitter. + """ + net.addLink(dc1, dc2) + net.addLink("datacenter1", s1) + net.addLink(s1, dc3) + net.addLink(s1, "datacenter4") + + """ + 5. We want to access and control our data centers from the outside, + e.g., we want to connect an orchestrator to start/stop compute + resources aka. VNFs (represented by Docker containers in the emulated) + + So we need to instantiate API endpoints (e.g. a zerorpc or REST + interface). Depending on the endpoint implementations, we can connect + one or more data centers to it, which can then be controlled through + this API, e.g., start/stop/list compute instances. + """ + # create a new instance of a endpoint implementation + zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242) + # connect data centers to this endpoint + zapi1.connectDatacenter(dc1) + zapi1.connectDatacenter(dc2) + zapi1.connectDatacenter(dc3) + zapi1.connectDatacenter(dc4) + # run API endpoint server (in another thread, don't block) + zapi1.start() + + """ + 5.1. For our example, we create a second endpoint to illustrate that + this is supported by our design. This feature allows us to have + one API endpoint for each data center. This makes the emulation + environment more realistic because you can easily create one + OpenStack-like REST API endpoint for *each* data center. + This will look like a real-world multi PoP/data center deployment + from the perspective of an orchestrator. + """ + zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343) + zapi2.connectDatacenter(dc3) + zapi2.connectDatacenter(dc4) + zapi2.start() + + """ + 6. Finally we are done and can start our network (the emulator). + We can also enter the Mininet CLI to interactively interact + with our compute resources (just like in default Mininet). + But we can also implement fully automated experiments that + can be executed again and again. + """ + net.start() + net.CLI() + # when the user types exit in the CLI, we stop the emulator + net.stop() + + +def main(): + setLogLevel('info') # set Mininet loglevel + create_topology1() + + +if __name__ == '__main__': + main() diff --git a/src/emuvim/examples/sonata_y1_demo_topology_1.py b/src/emuvim/examples/sonata_y1_demo_topology_1.py new file mode 100644 index 0000000..1588190 --- /dev/null +++ b/src/emuvim/examples/sonata_y1_demo_topology_1.py @@ -0,0 +1,46 @@ +""" +A simple topology with two PoPs for the y1 demo story board. + + (dc1) <<-->> s1 <<-->> (dc2) +""" + +import logging +from mininet.log import setLogLevel +from emuvim.dcemulator.net import DCNetwork +from emuvim.api.zerorpcapi import ZeroRpcApiEndpoint + +logging.basicConfig(level=logging.INFO) + + +def create_topology1(): + # create topology + net = DCNetwork() + dc1 = net.addDatacenter("dc1") + dc2 = net.addDatacenter("dc2") + s1 = net.addSwitch("s1") + net.addLink(dc1, s1) + net.addLink(dc2, s1) + + # create a new instance of a endpoint implementation + zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242) + # connect data centers to this endpoint + zapi1.connectDatacenter(dc1) + zapi1.connectDatacenter(dc2) + # run API endpoint server (in another thread, don't block) + zapi1.start() + + # TODO add "fake gatekeeper" api endpoint and connect it to both dcs + + # start the emulation platform + net.start() + net.CLI() + net.stop() + + +def main(): + setLogLevel('info') # set Mininet loglevel + create_topology1() + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/start_dcnetwork b/start_dcnetwork index 6deb2c9..7280054 100755 --- a/start_dcnetwork +++ b/start_dcnetwork @@ -1,6 +1,7 @@ #!/bin/bash # start DC Network -python src/emuvim/example_topology.py +python src/emuvim/examples/simple_topology.py +