Merge pull request #54 from mpeuster/master
[osm/vim-emu.git] / src / emuvim / dcemulator / net.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 import logging
6
7 import site
8 import time
9 from subprocess import Popen
10 import os
11
12 from mininet.net import Dockernet
13 from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
14 from mininet.cli import CLI
15 from mininet.link import TCLink
16 import networkx as nx
17 from emuvim.dcemulator.monitoring import DCNetworkMonitor
18 from emuvim.dcemulator.node import Datacenter, EmulatorCompute
19 from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
20
21
22 class DCNetwork(Dockernet):
23 """
24 Wraps the original Mininet/Dockernet class and provides
25 methods to add data centers, switches, etc.
26
27 This class is used by topology definition scripts.
28 """
29
30 def __init__(self, controller=RemoteController, dc_emulation_max_cpu=1.0, **kwargs):
31 """
32 Create an extended version of a Dockernet network
33 :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
34 :param kwargs: path through for Mininet parameters
35 :return:
36 """
37 self.dcs = {}
38
39 # call original Docker.__init__ and setup default controller
40 Dockernet.__init__(
41 self, switch=OVSKernelSwitch, **kwargs)
42
43 # start Ryu controller
44 self.startRyu()
45
46 # add a remote controller to be able to use Ryu
47 self.addController('c0', controller=controller)
48
49 # graph of the complete DC network
50 self.DCNetwork_graph = nx.DiGraph()
51
52 # monitoring agent
53 self.monitor_agent = DCNetworkMonitor(self)
54
55 # initialize resource model registrar
56 self.rm_registrar = ResourceModelRegistrar(dc_emulation_max_cpu)
57
58 def addDatacenter(self, label, metadata={}):
59 """
60 Create and add a logical cloud data center to the network.
61 """
62 if label in self.dcs:
63 raise Exception("Data center label already exists: %s" % label)
64 dc = Datacenter(label, metadata=metadata)
65 dc.net = self # set reference to network
66 self.dcs[label] = dc
67 dc.create() # finally create the data center in our Mininet instance
68 logging.info("added data center: %s" % label)
69 return dc
70
71 def addLink(self, node1, node2, **params):
72 """
73 Able to handle Datacenter objects as link
74 end points.
75 """
76 assert node1 is not None
77 assert node2 is not None
78 logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
79 # ensure type of node1
80 if isinstance( node1, basestring ):
81 if node1 in self.dcs:
82 node1 = self.dcs[node1].switch
83 if isinstance( node1, Datacenter ):
84 node1 = node1.switch
85 # ensure type of node2
86 if isinstance( node2, basestring ):
87 if node2 in self.dcs:
88 node2 = self.dcs[node2].switch
89 if isinstance( node2, Datacenter ):
90 node2 = node2.switch
91 # try to give containers a default IP
92 if isinstance( node1, Docker ):
93 if "params1" not in params:
94 params["params1"] = {}
95 if "ip" not in params["params1"]:
96 params["params1"]["ip"] = self.getNextIp()
97 if isinstance( node2, Docker ):
98 if "params2" not in params:
99 params["params2"] = {}
100 if "ip" not in params["params2"]:
101 params["params2"]["ip"] = self.getNextIp()
102 # ensure that we allow TCLinks between data centers
103 # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
104 # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
105 if "cls" not in params:
106 params["cls"] = TCLink
107
108 link = Dockernet.addLink(self, node1, node2, **params)
109
110 # add edge and assigned port number to graph in both directions between node1 and node2
111 self.DCNetwork_graph.add_edge(node1.name, node2.name, \
112 {'src_port': node1.ports[link.intf1], 'dst_port': node2.ports[link.intf2]})
113 self.DCNetwork_graph.add_edge(node2.name, node1.name, \
114 {'src_port': node2.ports[link.intf2], 'dst_port': node1.ports[link.intf1]})
115
116 return link
117
118 def addDocker( self, label, **params ):
119 """
120 Wrapper for addDocker method to use custom container class.
121 """
122 self.DCNetwork_graph.add_node(label)
123 return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
124
125 def removeDocker( self, label, **params ):
126 """
127 Wrapper for removeDocker method to update graph.
128 """
129 self.DCNetwork_graph.remove_node(label)
130 return Dockernet.removeDocker(self, label, **params)
131
132 def addSwitch( self, name, add_to_graph=True, **params ):
133 """
134 Wrapper for addSwitch method to store switch also in graph.
135 """
136 if add_to_graph:
137 self.DCNetwork_graph.add_node(name)
138 return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
139
140 def getAllContainers(self):
141 """
142 Returns a list with all containers within all data centers.
143 """
144 all_containers = []
145 for dc in self.dcs.itervalues():
146 all_containers += dc.listCompute()
147 return all_containers
148
149 def start(self):
150 # start
151 for dc in self.dcs.itervalues():
152 dc.start()
153 Dockernet.start(self)
154
155 def stop(self):
156 # stop Ryu controller
157 Dockernet.stop(self)
158 self.stopRyu()
159
160 def CLI(self):
161 CLI(self)
162
163 # to remove chain do setChain( src, dst, cmd='del-flows')
164 def setChain(self, vnf_src_name, vnf_dst_name, cmd='add-flow'):
165 # get shortest path
166 path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
167 logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
168
169 current_hop = vnf_src_name
170 for i in range(0,len(path)):
171 next_hop = path[path.index(current_hop)+1]
172 next_node = self.getNodeByName(next_hop)
173
174 if next_hop == vnf_dst_name:
175 return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
176 elif not isinstance( next_node, OVSSwitch ):
177 logging.info("Next node: {0} is not a switch".format(next_hop))
178 return "Next node: {0} is not a switch".format(next_hop)
179
180
181 switch_inport = self.DCNetwork_graph[current_hop][next_hop]['dst_port']
182 next2_hop = path[path.index(current_hop)+2]
183 switch_outport = self.DCNetwork_graph[next_hop][next2_hop]['src_port']
184
185 logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(next_node.name, switch_inport, switch_outport))
186 # set of entry via ovs-ofctl
187 # TODO use rest API of ryu to set flow entries to correct witch dpid
188 if isinstance( next_node, OVSSwitch ):
189 match = 'in_port=%s' % switch_inport
190
191 if cmd=='add-flow':
192 action = 'action=%s' % switch_outport
193 s = ','
194 ofcmd = s.join([match,action])
195 elif cmd=='del-flows':
196 ofcmd = match
197 else:
198 ofcmd=''
199
200 next_node.dpctl(cmd, ofcmd)
201
202 current_hop = next_hop
203
204 return "destination node: {0} not reached".format(vnf_dst_name)
205
206 # start Ryu Openflow controller as Remote Controller for the DCNetwork
207 def startRyu(self):
208 # start Ryu controller with rest-API
209 python_install_path = site.getsitepackages()[0]
210 ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
211 ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
212 # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
213 # Ryu still uses 6633 as default
214 ryu_option = '--ofp-tcp-listen-port'
215 ryu_of_port = '6653'
216 ryu_cmd = 'ryu-manager'
217 FNULL = open("/tmp/ryu.log", 'w')
218 self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
219 time.sleep(1)
220
221 def stopRyu(self):
222 if self.ryu_process:
223 self.ryu_process.terminate()
224 self.ryu_process.kill()
225