Fix: Always use Mininet's default controller for unit tests. Only use the other one...
[osm/vim-emu.git] / src / emuvim / dcemulator / net.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 import logging
6
7 import site
8 import time
9 from subprocess import Popen
10 import os
11
12 from mininet.net import Dockernet
13 from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
14 from mininet.cli import CLI
15 from mininet.link import TCLink
16 import networkx as nx
17 from emuvim.dcemulator.monitoring import DCNetworkMonitor
18 from emuvim.dcemulator.node import Datacenter, EmulatorCompute
19 from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
20
21
22 class DCNetwork(Dockernet):
23 """
24 Wraps the original Mininet/Dockernet class and provides
25 methods to add data centers, switches, etc.
26
27 This class is used by topology definition scripts.
28 """
29
30 def __init__(self, controller=RemoteController, dc_emulation_max_cpu=1.0, **kwargs):
31 """
32 Create an extended version of a Dockernet network
33 :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
34 :param kwargs: path through for Mininet parameters
35 :return:
36 """
37 self.dcs = {}
38
39 # call original Docker.__init__ and setup default controller
40 Dockernet.__init__(
41 self, switch=OVSKernelSwitch, **kwargs)
42
43 # Ryu management
44 self.ryu_process = None
45 if controller == RemoteController:
46 # start Ryu controller
47 self.startRyu()
48
49 # add the specified controller
50 self.addController('c0', controller=controller)
51
52 # graph of the complete DC network
53 self.DCNetwork_graph = nx.DiGraph()
54
55 # monitoring agent
56 self.monitor_agent = DCNetworkMonitor(self)
57
58 # initialize resource model registrar
59 self.rm_registrar = ResourceModelRegistrar(dc_emulation_max_cpu)
60
61 def addDatacenter(self, label, metadata={}):
62 """
63 Create and add a logical cloud data center to the network.
64 """
65 if label in self.dcs:
66 raise Exception("Data center label already exists: %s" % label)
67 dc = Datacenter(label, metadata=metadata)
68 dc.net = self # set reference to network
69 self.dcs[label] = dc
70 dc.create() # finally create the data center in our Mininet instance
71 logging.info("added data center: %s" % label)
72 return dc
73
74 def addLink(self, node1, node2, **params):
75 """
76 Able to handle Datacenter objects as link
77 end points.
78 """
79 assert node1 is not None
80 assert node2 is not None
81 logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
82 # ensure type of node1
83 if isinstance( node1, basestring ):
84 if node1 in self.dcs:
85 node1 = self.dcs[node1].switch
86 if isinstance( node1, Datacenter ):
87 node1 = node1.switch
88 # ensure type of node2
89 if isinstance( node2, basestring ):
90 if node2 in self.dcs:
91 node2 = self.dcs[node2].switch
92 if isinstance( node2, Datacenter ):
93 node2 = node2.switch
94 # try to give containers a default IP
95 if isinstance( node1, Docker ):
96 if "params1" not in params:
97 params["params1"] = {}
98 if "ip" not in params["params1"]:
99 params["params1"]["ip"] = self.getNextIp()
100 if isinstance( node2, Docker ):
101 if "params2" not in params:
102 params["params2"] = {}
103 if "ip" not in params["params2"]:
104 params["params2"]["ip"] = self.getNextIp()
105 # ensure that we allow TCLinks between data centers
106 # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
107 # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
108 if "cls" not in params:
109 params["cls"] = TCLink
110
111 link = Dockernet.addLink(self, node1, node2, **params)
112
113 # add edge and assigned port number to graph in both directions between node1 and node2
114 self.DCNetwork_graph.add_edge(node1.name, node2.name, \
115 {'src_port': node1.ports[link.intf1], 'dst_port': node2.ports[link.intf2]})
116 self.DCNetwork_graph.add_edge(node2.name, node1.name, \
117 {'src_port': node2.ports[link.intf2], 'dst_port': node1.ports[link.intf1]})
118
119 return link
120
121 def addDocker( self, label, **params ):
122 """
123 Wrapper for addDocker method to use custom container class.
124 """
125 self.DCNetwork_graph.add_node(label)
126 return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
127
128 def removeDocker( self, label, **params ):
129 """
130 Wrapper for removeDocker method to update graph.
131 """
132 self.DCNetwork_graph.remove_node(label)
133 return Dockernet.removeDocker(self, label, **params)
134
135 def addSwitch( self, name, add_to_graph=True, **params ):
136 """
137 Wrapper for addSwitch method to store switch also in graph.
138 """
139 if add_to_graph:
140 self.DCNetwork_graph.add_node(name)
141 return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
142
143 def getAllContainers(self):
144 """
145 Returns a list with all containers within all data centers.
146 """
147 all_containers = []
148 for dc in self.dcs.itervalues():
149 all_containers += dc.listCompute()
150 return all_containers
151
152 def start(self):
153 # start
154 for dc in self.dcs.itervalues():
155 dc.start()
156 Dockernet.start(self)
157
158 def stop(self):
159 # stop Ryu controller
160 Dockernet.stop(self)
161 self.stopRyu()
162
163 def CLI(self):
164 CLI(self)
165
166 # to remove chain do setChain( src, dst, cmd='del-flows')
167 def setChain(self, vnf_src_name, vnf_dst_name, cmd='add-flow'):
168 # get shortest path
169 path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
170 logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
171
172 current_hop = vnf_src_name
173 for i in range(0,len(path)):
174 next_hop = path[path.index(current_hop)+1]
175 next_node = self.getNodeByName(next_hop)
176
177 if next_hop == vnf_dst_name:
178 return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
179 elif not isinstance( next_node, OVSSwitch ):
180 logging.info("Next node: {0} is not a switch".format(next_hop))
181 return "Next node: {0} is not a switch".format(next_hop)
182
183
184 switch_inport = self.DCNetwork_graph[current_hop][next_hop]['dst_port']
185 next2_hop = path[path.index(current_hop)+2]
186 switch_outport = self.DCNetwork_graph[next_hop][next2_hop]['src_port']
187
188 logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(next_node.name, switch_inport, switch_outport))
189 # set of entry via ovs-ofctl
190 # TODO use rest API of ryu to set flow entries to correct witch dpid
191 if isinstance( next_node, OVSSwitch ):
192 match = 'in_port=%s' % switch_inport
193
194 if cmd=='add-flow':
195 action = 'action=%s' % switch_outport
196 s = ','
197 ofcmd = s.join([match,action])
198 elif cmd=='del-flows':
199 ofcmd = match
200 else:
201 ofcmd=''
202
203 next_node.dpctl(cmd, ofcmd)
204
205 current_hop = next_hop
206
207 return "destination node: {0} not reached".format(vnf_dst_name)
208
209 # start Ryu Openflow controller as Remote Controller for the DCNetwork
210 def startRyu(self):
211 # start Ryu controller with rest-API
212 python_install_path = site.getsitepackages()[0]
213 ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
214 ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
215 # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
216 # Ryu still uses 6633 as default
217 ryu_option = '--ofp-tcp-listen-port'
218 ryu_of_port = '6653'
219 ryu_cmd = 'ryu-manager'
220 FNULL = open("/tmp/ryu.log", 'w')
221 self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
222 time.sleep(1)
223
224 def stopRyu(self):
225 if self.ryu_process is not None:
226 self.ryu_process.terminate()
227 self.ryu_process.kill()
228