Merge pull request #48 from mpeuster/master
[osm/vim-emu.git] / src / emuvim / dcemulator / net.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 import logging
6
7 import site
8 from subprocess import Popen
9 import os
10
11 from mininet.net import Dockernet
12 from mininet.node import Controller, OVSSwitch, OVSKernelSwitch, Switch, Docker, Host, RemoteController
13 from mininet.cli import CLI
14 from mininet.log import setLogLevel, info, debug
15 from mininet.link import TCLink
16 import networkx as nx
17 from emuvim.dcemulator.monitoring import DCNetworkMonitor
18 from emuvim.dcemulator.node import Datacenter, EmulatorCompute
19 from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
20
21
22 class DCNetwork(Dockernet):
23 """
24 Wraps the original Mininet/Dockernet class and provides
25 methods to add data centers, switches, etc.
26
27 This class is used by topology definition scripts.
28 """
29
30 def __init__(self, dc_emulation_max_cpu=1.0, **kwargs):
31 """
32 Create an extended version of a Dockernet network
33 :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
34 :param kwargs: path through for Mininet parameters
35 :return:
36 """
37 self.dcs = {}
38
39 # call original Docker.__init__ and setup default controller
40 Dockernet.__init__(
41 self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs)
42
43 # ass a remote controller to be able to use Ryu
44 self.addController('c0', controller=RemoteController)
45
46 # graph of the complete DC network
47 self.DCNetwork_graph=nx.DiGraph()
48
49 # monitoring agent
50 self.monitor_agent = DCNetworkMonitor(self)
51
52 # start Ryu controller
53 self.startRyu()
54
55 # initialize resource model registrar
56 self.rm_registrar = ResourceModelRegistrar(dc_emulation_max_cpu)
57
58 def addDatacenter(self, label, metadata={}):
59 """
60 Create and add a logical cloud data center to the network.
61 """
62 if label in self.dcs:
63 raise Exception("Data center label already exists: %s" % label)
64 dc = Datacenter(label, metadata=metadata)
65 dc.net = self # set reference to network
66 self.dcs[label] = dc
67 dc.create() # finally create the data center in our Mininet instance
68 logging.info("added data center: %s" % label)
69 return dc
70
71 def addLink(self, node1, node2, **params):
72 """
73 Able to handle Datacenter objects as link
74 end points.
75 """
76 assert node1 is not None
77 assert node2 is not None
78 logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
79 # ensure type of node1
80 if isinstance( node1, basestring ):
81 if node1 in self.dcs:
82 node1 = self.dcs[node1].switch
83 if isinstance( node1, Datacenter ):
84 node1 = node1.switch
85 # ensure type of node2
86 if isinstance( node2, basestring ):
87 if node2 in self.dcs:
88 node2 = self.dcs[node2].switch
89 if isinstance( node2, Datacenter ):
90 node2 = node2.switch
91 # try to give containers a default IP
92 if isinstance( node1, Docker ):
93 if "params1" not in params:
94 params["params1"] = {}
95 if "ip" not in params["params1"]:
96 params["params1"]["ip"] = self.getNextIp()
97 if isinstance( node2, Docker ):
98 if "params2" not in params:
99 params["params2"] = {}
100 if "ip" not in params["params2"]:
101 params["params2"]["ip"] = self.getNextIp()
102 # ensure that we allow TCLinks between data centers
103 # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
104 # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
105 if "cls" not in params:
106 params["cls"] = TCLink
107
108 link = Dockernet.addLink(self, node1, node2, **params)
109
110 # add edge and assigned port number to graph in both directions between node1 and node2
111 self.DCNetwork_graph.add_edge(node1.name, node2.name, \
112 {'src_port': node1.ports[link.intf1], 'dst_port': node2.ports[link.intf2]})
113 self.DCNetwork_graph.add_edge(node2.name, node1.name, \
114 {'src_port': node2.ports[link.intf2], 'dst_port': node1.ports[link.intf1]})
115
116 return link
117
118 def addDocker( self, label, **params ):
119 """
120 Wrapper for addDocker method to use custom container class.
121 """
122 self.DCNetwork_graph.add_node(label)
123 return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
124
125 def removeDocker( self, label, **params ):
126 """
127 Wrapper for removeDocker method to update graph.
128 """
129 self.DCNetwork_graph.remove_node(label)
130 return Dockernet.removeDocker(self, label, **params)
131
132 def addSwitch( self, name, add_to_graph=True, **params ):
133 """
134 Wrapper for addSwitch method to store switch also in graph.
135 """
136 if add_to_graph:
137 self.DCNetwork_graph.add_node(name)
138 return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
139
140 def getAllContainers(self):
141 """
142 Returns a list with all containers within all data centers.
143 """
144 all_containers = []
145 for dc in self.dcs.itervalues():
146 all_containers += dc.listCompute()
147 return all_containers
148
149 def start(self):
150 # start
151 for dc in self.dcs.itervalues():
152 dc.start()
153 Dockernet.start(self)
154
155 def stop(self):
156 # stop Ryu controller
157 self.ryu_process.terminate()
158 #self.ryu_process.kill()
159 Dockernet.stop(self)
160
161 def CLI(self):
162 CLI(self)
163
164 # to remove chain do setChain( src, dst, cmd='del-flows')
165 def setChain(self, vnf_src_name, vnf_dst_name, cmd='add-flow'):
166 # get shortest path
167 path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
168 logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
169
170 current_hop = vnf_src_name
171 for i in range(0,len(path)):
172 next_hop = path[path.index(current_hop)+1]
173 next_node = self.getNodeByName(next_hop)
174
175 if next_hop == vnf_dst_name:
176 return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
177 elif not isinstance( next_node, OVSSwitch ):
178 logging.info("Next node: {0} is not a switch".format(next_hop))
179 return "Next node: {0} is not a switch".format(next_hop)
180
181
182 switch_inport = self.DCNetwork_graph[current_hop][next_hop]['dst_port']
183 next2_hop = path[path.index(current_hop)+2]
184 switch_outport = self.DCNetwork_graph[next_hop][next2_hop]['src_port']
185
186 logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(next_node.name, switch_inport, switch_outport))
187 # set of entry via ovs-ofctl
188 # TODO use rest API of ryu to set flow entries to correct witch dpid
189 if isinstance( next_node, OVSSwitch ):
190 match = 'in_port=%s' % switch_inport
191
192 if cmd=='add-flow':
193 action = 'action=%s' % switch_outport
194 s = ','
195 ofcmd = s.join([match,action])
196 elif cmd=='del-flows':
197 ofcmd = match
198 else:
199 ofcmd=''
200
201 next_node.dpctl(cmd, ofcmd)
202
203 current_hop = next_hop
204
205 return "destination node: {0} not reached".format(vnf_dst_name)
206
207 # start Ryu Openflow controller as Remote Controller for the DCNetwork
208 def startRyu(self):
209 # start Ryu controller with rest-API
210 python_install_path = site.getsitepackages()[0]
211 ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
212 ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
213 # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
214 # Ryu still uses 6633 as default
215 ryu_option = '--ofp-tcp-listen-port'
216 ryu_of_port = '6653'
217 ryu_cmd = 'ryu-manager'
218 FNULL = open(os.devnull, 'w')
219 self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)