Update install.yml
[osm/vim-emu.git] / emuvim / dcemulator / net.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 import logging
6
7 from mininet.net import Dockernet
8 from mininet.node import Controller, OVSSwitch, OVSKernelSwitch, Switch, Docker, Host, RemoteController
9 from mininet.cli import CLI
10 from mininet.log import setLogLevel, info, debug
11 from mininet.link import TCLink, Link
12 import networkx as nx
13 from monitoring import DCNetworkMonitor
14
15 from node import Datacenter, EmulatorCompute
16
17
18 class DCNetwork(Dockernet):
19 """
20 Wraps the original Mininet/Dockernet class and provides
21 methods to add data centers, switches, etc.
22
23 This class is used by topology definition scripts.
24 """
25
26 def __init__(self, **kwargs):
27 self.dcs = {}
28 # create a Mininet/Dockernet network
29 # call original Docker.__init__ and setup default controller
30 Dockernet.__init__(
31 self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs)
32 self.addController('c0')
33
34 # graph of the complete DC network
35 self.DCNetwork_graph=nx.DiGraph()
36
37 # monitoring agent
38 self.monitor_agent = DCNetworkMonitor(self)
39
40
41 def addDatacenter(self, label, metadata={}):
42 """
43 Create and add a logical cloud data center to the network.
44 """
45 if label in self.dcs:
46 raise Exception("Data center label already exists: %s" % label)
47 dc = Datacenter(label, metadata=metadata)
48 dc.net = self # set reference to network
49 self.dcs[label] = dc
50 dc.create() # finally create the data center in our Mininet instance
51 logging.info("added data center: %s" % label)
52 return dc
53
54 def addLink(self, node1, node2, **params):
55 """
56 Able to handle Datacenter objects as link
57 end points.
58 """
59 assert node1 is not None
60 assert node2 is not None
61 logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
62 # ensure type of node1
63 if isinstance( node1, basestring ):
64 if node1 in self.dcs:
65 node1 = self.dcs[node1].switch
66 if isinstance( node1, Datacenter ):
67 node1 = node1.switch
68 # ensure type of node2
69 if isinstance( node2, basestring ):
70 if node2 in self.dcs:
71 node2 = self.dcs[node2].switch
72 if isinstance( node2, Datacenter ):
73 node2 = node2.switch
74 # try to give containers a default IP
75 if isinstance( node1, Docker ):
76 if not "params1" in params:
77 params["params1"] = {}
78 if not "ip" in params["params1"]:
79 params["params1"]["ip"] = self.getNextIp()
80 if isinstance( node2, Docker ):
81 if not "params2" in params:
82 params["params2"] = {}
83 if not "ip" in params["params2"]:
84 params["params2"]["ip"] = self.getNextIp()
85
86 link = Dockernet.addLink(self, node1, node2, **params) # TODO we need TCLinks with user defined performance here
87
88 # add edge and assigned port number to graph in both directions between node1 and node2
89 self.DCNetwork_graph.add_edge(node1.name, node2.name, \
90 {'src_port': node1.ports[link.intf1], 'dst_port': node2.ports[link.intf2]})
91 self.DCNetwork_graph.add_edge(node2.name, node1.name, \
92 {'src_port': node2.ports[link.intf2], 'dst_port': node1.ports[link.intf1]})
93
94 return link
95
96 def addDocker( self, label, **params ):
97 """
98 Wrapper for addDocker method to use custom container class.
99 """
100 self.DCNetwork_graph.add_node(label)
101 return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
102
103 def removeDocker( self, label, **params ):
104 """
105 Wrapper for removeDocker method to update graph.
106 """
107 self.DCNetwork_graph.remove_node(label)
108 return Dockernet.removeDocker(self, label, **params)
109
110 def addSwitch( self, name, add_to_graph=True, **params ):
111 """
112 Wrapper for addSwitch method to store switch also in graph.
113 """
114 if add_to_graph:
115 self.DCNetwork_graph.add_node(name)
116 return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
117
118 def getAllContainers(self):
119 """
120 Returns a list with all containers within all data centers.
121 """
122 all_containers = []
123 for dc in self.dcs.itervalues():
124 all_containers += dc.listCompute()
125 return all_containers
126
127 def start(self):
128 # start
129 for dc in self.dcs.itervalues():
130 dc.start()
131 Dockernet.start(self)
132
133 def stop(self):
134 Dockernet.stop(self)
135
136 def CLI(self):
137 CLI(self)
138
139 # to remove chain do setChain( src, dst, cmd='del-flows')
140 def setChain(self, vnf_src_name, vnf_dst_name, cmd='add-flow'):
141 # get shortest path
142 path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
143 logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
144
145 current_hop = vnf_src_name
146 for i in range(0,len(path)):
147 next_hop = path[path.index(current_hop)+1]
148 next_node = self.getNodeByName(next_hop)
149
150 if next_hop == vnf_dst_name:
151 return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
152 elif not isinstance( next_node, OVSSwitch ):
153 logging.info("Next node: {0} is not a switch".format(next_hop))
154 return "Next node: {0} is not a switch".format(next_hop)
155
156
157 switch_inport = self.DCNetwork_graph[current_hop][next_hop]['dst_port']
158 next2_hop = path[path.index(current_hop)+2]
159 switch_outport = self.DCNetwork_graph[next_hop][next2_hop]['src_port']
160
161 logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(next_node.name, switch_inport, switch_outport))
162 # set of entry via ovs-ofctl
163 # TODO use rest API of ryu to set flow entries to correct witch dpid
164 if isinstance( next_node, OVSSwitch ):
165 match = 'in_port=%s' % switch_inport
166
167 if cmd=='add-flow':
168 action = 'action=%s' % switch_outport
169 s = ','
170 ofcmd = s.join([match,action])
171 elif cmd=='del-flows':
172 ofcmd = match
173 else:
174 ofcmd=''
175
176 next_node.dpctl(cmd, ofcmd)
177
178 current_hop = next_hop
179
180 return "destination node: {0} not reached".format(vnf_dst_name)