Merge pull request #41 from mpeuster/master
[osm/vim-emu.git] / src / emuvim / dcemulator / net.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 import logging
6
7 import site
8 from subprocess import Popen
9 import os
10
11 from mininet.net import Dockernet
12 from mininet.node import Controller, OVSSwitch, OVSKernelSwitch, Switch, Docker, Host, RemoteController
13 from mininet.cli import CLI
14 from mininet.log import setLogLevel, info, debug
15 from mininet.link import TCLink
16 import networkx as nx
17 from emuvim.dcemulator.monitoring import DCNetworkMonitor
18
19 from emuvim.dcemulator.node import Datacenter, EmulatorCompute
20
21
22 class DCNetwork(Dockernet):
23 """
24 Wraps the original Mininet/Dockernet class and provides
25 methods to add data centers, switches, etc.
26
27 This class is used by topology definition scripts.
28 """
29
30 def __init__(self, **kwargs):
31 self.dcs = {}
32 # create a Mininet/Dockernet network
33 # call original Docker.__init__ and setup default controller
34 #Dockernet.__init__(
35 # self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs)
36 Dockernet.__init__(
37 self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs)
38 self.addController('c0', controller=RemoteController)
39
40 # graph of the complete DC network
41 self.DCNetwork_graph=nx.DiGraph()
42
43 # monitoring agent
44 self.monitor_agent = DCNetworkMonitor(self)
45
46 # start Ryu controller
47 self.startRyu()
48
49
50 def addDatacenter(self, label, metadata={}):
51 """
52 Create and add a logical cloud data center to the network.
53 """
54 if label in self.dcs:
55 raise Exception("Data center label already exists: %s" % label)
56 dc = Datacenter(label, metadata=metadata)
57 dc.net = self # set reference to network
58 self.dcs[label] = dc
59 dc.create() # finally create the data center in our Mininet instance
60 logging.info("added data center: %s" % label)
61 return dc
62
63 def addLink(self, node1, node2, **params):
64 """
65 Able to handle Datacenter objects as link
66 end points.
67 """
68 assert node1 is not None
69 assert node2 is not None
70 logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
71 # ensure type of node1
72 if isinstance( node1, basestring ):
73 if node1 in self.dcs:
74 node1 = self.dcs[node1].switch
75 if isinstance( node1, Datacenter ):
76 node1 = node1.switch
77 # ensure type of node2
78 if isinstance( node2, basestring ):
79 if node2 in self.dcs:
80 node2 = self.dcs[node2].switch
81 if isinstance( node2, Datacenter ):
82 node2 = node2.switch
83 # try to give containers a default IP
84 if isinstance( node1, Docker ):
85 if "params1" not in params:
86 params["params1"] = {}
87 if "ip" not in params["params1"]:
88 params["params1"]["ip"] = self.getNextIp()
89 if isinstance( node2, Docker ):
90 if "params2" not in params:
91 params["params2"] = {}
92 if "ip" not in params["params2"]:
93 params["params2"]["ip"] = self.getNextIp()
94 # ensure that we allow TCLinks between data centers
95 # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
96 # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
97 if "cls" not in params:
98 params["cls"] = TCLink
99
100 link = Dockernet.addLink(self, node1, node2, **params)
101
102 # add edge and assigned port number to graph in both directions between node1 and node2
103 self.DCNetwork_graph.add_edge(node1.name, node2.name, \
104 {'src_port': node1.ports[link.intf1], 'dst_port': node2.ports[link.intf2]})
105 self.DCNetwork_graph.add_edge(node2.name, node1.name, \
106 {'src_port': node2.ports[link.intf2], 'dst_port': node1.ports[link.intf1]})
107
108 return link
109
110 def addDocker( self, label, **params ):
111 """
112 Wrapper for addDocker method to use custom container class.
113 """
114 self.DCNetwork_graph.add_node(label)
115 return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
116
117 def removeDocker( self, label, **params ):
118 """
119 Wrapper for removeDocker method to update graph.
120 """
121 self.DCNetwork_graph.remove_node(label)
122 return Dockernet.removeDocker(self, label, **params)
123
124 def addSwitch( self, name, add_to_graph=True, **params ):
125 """
126 Wrapper for addSwitch method to store switch also in graph.
127 """
128 if add_to_graph:
129 self.DCNetwork_graph.add_node(name)
130 return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
131
132 def getAllContainers(self):
133 """
134 Returns a list with all containers within all data centers.
135 """
136 all_containers = []
137 for dc in self.dcs.itervalues():
138 all_containers += dc.listCompute()
139 return all_containers
140
141 def start(self):
142 # start
143 for dc in self.dcs.itervalues():
144 dc.start()
145 Dockernet.start(self)
146
147 def stop(self):
148 # stop Ryu controller
149 self.ryu_process.terminate()
150 #self.ryu_process.kill()
151 Dockernet.stop(self)
152
153 def CLI(self):
154 CLI(self)
155
156 # to remove chain do setChain( src, dst, cmd='del-flows')
157 def setChain(self, vnf_src_name, vnf_dst_name, cmd='add-flow'):
158 # get shortest path
159 path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
160 logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
161
162 current_hop = vnf_src_name
163 for i in range(0,len(path)):
164 next_hop = path[path.index(current_hop)+1]
165 next_node = self.getNodeByName(next_hop)
166
167 if next_hop == vnf_dst_name:
168 return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
169 elif not isinstance( next_node, OVSSwitch ):
170 logging.info("Next node: {0} is not a switch".format(next_hop))
171 return "Next node: {0} is not a switch".format(next_hop)
172
173
174 switch_inport = self.DCNetwork_graph[current_hop][next_hop]['dst_port']
175 next2_hop = path[path.index(current_hop)+2]
176 switch_outport = self.DCNetwork_graph[next_hop][next2_hop]['src_port']
177
178 logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(next_node.name, switch_inport, switch_outport))
179 # set of entry via ovs-ofctl
180 # TODO use rest API of ryu to set flow entries to correct witch dpid
181 if isinstance( next_node, OVSSwitch ):
182 match = 'in_port=%s' % switch_inport
183
184 if cmd=='add-flow':
185 action = 'action=%s' % switch_outport
186 s = ','
187 ofcmd = s.join([match,action])
188 elif cmd=='del-flows':
189 ofcmd = match
190 else:
191 ofcmd=''
192
193 next_node.dpctl(cmd, ofcmd)
194
195 current_hop = next_hop
196
197 return "destination node: {0} not reached".format(vnf_dst_name)
198
199 # start Ryu Openflow controller as Remote Controller for the DCNetwork
200 def startRyu(self):
201 # start Ryu controller with rest-API
202 python_install_path = site.getsitepackages()[0]
203 ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
204 ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
205 # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
206 # Ryu still uses 6633 as default
207 ryu_option = '--ofp-tcp-listen-port'
208 ryu_of_port = '6653'
209 ryu_cmd = 'ryu-manager'
210 FNULL = open(os.devnull, 'w')
211 self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)