From 80f18b0d8e87b29cd06e6553ca0b5d111bac1ec3 Mon Sep 17 00:00:00 2001 From: stevenvanrossem Date: Mon, 18 Apr 2016 10:56:15 +0200 Subject: [PATCH] new example file for monitoring demo --- src/emuvim/dcemulator/prometheus.yml | 1 + .../examples/monitoring_demo_topology.py | 121 ++++++++++++++++++ 2 files changed, 122 insertions(+) create mode 100755 src/emuvim/examples/monitoring_demo_topology.py diff --git a/src/emuvim/dcemulator/prometheus.yml b/src/emuvim/dcemulator/prometheus.yml index 2915578..b063d56 100644 --- a/src/emuvim/dcemulator/prometheus.yml +++ b/src/emuvim/dcemulator/prometheus.yml @@ -34,3 +34,4 @@ scrape_configs: target_groups: - targets: ['172.17.0.1:8090'] + diff --git a/src/emuvim/examples/monitoring_demo_topology.py b/src/emuvim/examples/monitoring_demo_topology.py new file mode 100755 index 0000000..16c2b50 --- /dev/null +++ b/src/emuvim/examples/monitoring_demo_topology.py @@ -0,0 +1,121 @@ +""" +This is an example topology for the distributed cloud emulator (dcemulator). +(c) 2015 by Manuel Peuster + + +This is an example that shows how a user of the emulation tool can +define network topologies with multiple emulated cloud data centers. + +The definition is done with a Python API which looks very similar to the +Mininet API (in fact it is a wrapper for it). + +We only specify the topology *between* data centers not within a single +data center (data center internal setups or placements are not of interest, +we want to experiment with VNF chains deployed across multiple PoPs). + +The original Mininet API has to be completely hidden and not be used by this +script. +""" +import logging +from mininet.log import setLogLevel +from emuvim.dcemulator.net import DCNetwork +from emuvim.api.zerorpc.compute import ZeroRpcApiEndpoint +from emuvim.api.zerorpc.network import ZeroRpcApiEndpointDCNetwork + +logging.basicConfig(level=logging.INFO) + + +def create_topology1(): + """ + 1. Create a data center network object (DCNetwork) with monitoring enabled + """ + net = DCNetwork(monitor=True) + + """ + 1b. add a monitoring agent to the DCNetwork + """ + mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151) + mon_api.connectDCNetwork(net) + mon_api.start() + """ + 2. Add (logical) data centers to the topology + (each data center is one "bigswitch" in our simplified + first prototype) + """ + dc1 = net.addDatacenter("datacenter1") + dc2 = net.addDatacenter("datacenter2") + dc3 = net.addDatacenter("long_data_center_name3") + dc4 = net.addDatacenter( + "datacenter4", + metadata={"mydata": "we can also add arbitrary metadata to each DC"}) + + """ + 3. You can add additional SDN switches for data center + interconnections to the network. + """ + s1 = net.addSwitch("s1") + + """ + 4. Add links between your data centers and additional switches + to define you topology. + These links can use Mininet's features to limit bw, add delay or jitter. + """ + net.addLink(dc1, dc2) + net.addLink("datacenter1", s1) + net.addLink(s1, dc3) + net.addLink(s1, "datacenter4") + + """ + 5. We want to access and control our data centers from the outside, + e.g., we want to connect an orchestrator to start/stop compute + resources aka. VNFs (represented by Docker containers in the emulated) + + So we need to instantiate API endpoints (e.g. a zerorpc or REST + interface). Depending on the endpoint implementations, we can connect + one or more data centers to it, which can then be controlled through + this API, e.g., start/stop/list compute instances. + """ + # create a new instance of a endpoint implementation + zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242) + # connect data centers to this endpoint + zapi1.connectDatacenter(dc1) + zapi1.connectDatacenter(dc2) + zapi1.connectDatacenter(dc3) + zapi1.connectDatacenter(dc4) + # run API endpoint server (in another thread, don't block) + zapi1.start() + + """ + 5.1. For our example, we create a second endpoint to illustrate that + this is supported by our design. This feature allows us to have + one API endpoint for each data center. This makes the emulation + environment more realistic because you can easily create one + OpenStack-like REST API endpoint for *each* data center. + This will look like a real-world multi PoP/data center deployment + from the perspective of an orchestrator. + """ + zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343) + zapi2.connectDatacenter(dc3) + zapi2.connectDatacenter(dc4) + zapi2.start() + + """ + 6. Finally we are done and can start our network (the emulator). + We can also enter the Mininet CLI to interactively interact + with our compute resources (just like in default Mininet). + But we can also implement fully automated experiments that + can be executed again and again. + """ + net.start() + net.CLI() + # when the user types exit in the CLI, we stop the emulator + net.stop() + + +def main(): + setLogLevel('info') # set Mininet loglevel + create_topology1() + + +if __name__ == '__main__': + main() -- 2.25.1