set chaining via VLANs
[osm/vim-emu.git] / src / emuvim / dcemulator / net.py
index da546ab..f7fafdf 100755 (executable)
@@ -5,20 +5,22 @@ Distributed Cloud Emulator (dcemulator)
 import logging
 
 import site
 import logging
 
 import site
+import time
 from subprocess import Popen
 import os
 from subprocess import Popen
 import os
+import re
+
+
 
 from mininet.net import Dockernet
 
 from mininet.net import Dockernet
-from mininet.node import Controller, OVSSwitch, OVSKernelSwitch, Switch, Docker, Host, RemoteController
+from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
 from mininet.cli import CLI
 from mininet.cli import CLI
-from mininet.log import setLogLevel, info, debug
 from mininet.link import TCLink
 import networkx as nx
 from emuvim.dcemulator.monitoring import DCNetworkMonitor
 from emuvim.dcemulator.node import Datacenter, EmulatorCompute
 from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
 
 from mininet.link import TCLink
 import networkx as nx
 from emuvim.dcemulator.monitoring import DCNetworkMonitor
 from emuvim.dcemulator.node import Datacenter, EmulatorCompute
 from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
 
-
 class DCNetwork(Dockernet):
     """
     Wraps the original Mininet/Dockernet class and provides
 class DCNetwork(Dockernet):
     """
     Wraps the original Mininet/Dockernet class and provides
@@ -27,7 +29,10 @@ class DCNetwork(Dockernet):
     This class is used by topology definition scripts.
     """
 
     This class is used by topology definition scripts.
     """
 
-    def __init__(self, dc_emulation_max_cpu=1.0, **kwargs):
+    def __init__(self, controller=RemoteController, monitor=False,
+                 dc_emulation_max_cpu=1.0,  # fraction of overall CPU time for emulation
+                 dc_emulation_max_mem=512,  # emulation max mem in MB
+                 **kwargs):
         """
         Create an extended version of a Dockernet network
         :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
         """
         Create an extended version of a Dockernet network
         :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
@@ -38,30 +43,40 @@ class DCNetwork(Dockernet):
 
         # call original Docker.__init__ and setup default controller
         Dockernet.__init__(
 
         # call original Docker.__init__ and setup default controller
         Dockernet.__init__(
-            self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs)
+            self, switch=OVSKernelSwitch, **kwargs)
+
+        # Ryu management
+        self.ryu_process = None
+        if controller == RemoteController:
+            # start Ryu controller
+            self.startRyu()
 
 
-        # ass a remote controller to be able to use Ryu
-        self.addController('c0', controller=RemoteController)
+        # add the specified controller
+        self.addController('c0', controller=controller)
 
         # graph of the complete DC network
 
         # graph of the complete DC network
-        self.DCNetwork_graph=nx.DiGraph()
+        self.DCNetwork_graph = nx.MultiDiGraph()
 
 
-        # monitoring agent
-        self.monitor_agent = DCNetworkMonitor(self)
+        # initialize pool of vlan tags to setup the SDN paths
+        self.vlans = range(4096)[::-1]
 
 
-        # start Ryu controller
-        self.startRyu()
+        # monitoring agent
+        if monitor:
+            self.monitor_agent = DCNetworkMonitor(self)
+        else:
+            self.monitor_agent = None
 
         # initialize resource model registrar
 
         # initialize resource model registrar
-        self.rm_registrar = ResourceModelRegistrar(dc_emulation_max_cpu)
+        self.rm_registrar = ResourceModelRegistrar(
+            dc_emulation_max_cpu, dc_emulation_max_mem)
 
 
-    def addDatacenter(self, label, metadata={}):
+    def addDatacenter(self, label, metadata={}, resource_log_path=None):
         """
         Create and add a logical cloud data center to the network.
         """
         if label in self.dcs:
             raise Exception("Data center label already exists: %s" % label)
         """
         Create and add a logical cloud data center to the network.
         """
         if label in self.dcs:
             raise Exception("Data center label already exists: %s" % label)
-        dc = Datacenter(label, metadata=metadata)
+        dc = Datacenter(label, metadata=metadata, resource_log_path=resource_log_path)
         dc.net = self  # set reference to network
         self.dcs[label] = dc
         dc.create()  # finally create the data center in our Mininet instance
         dc.net = self  # set reference to network
         self.dcs[label] = dc
         dc.create()  # finally create the data center in our Mininet instance
@@ -107,11 +122,51 @@ class DCNetwork(Dockernet):
 
         link = Dockernet.addLink(self, node1, node2, **params)
 
 
         link = Dockernet.addLink(self, node1, node2, **params)
 
+        # try to give container interfaces a default id
+        node1_port_id = node1.ports[link.intf1]
+        if isinstance(node1, Docker):
+            if "id" in params["params1"]:
+                node1_port_id = params["params1"]["id"]
+        node1_port_name = link.intf1.name
+
+        node2_port_id = node2.ports[link.intf2]
+        if isinstance(node2, Docker):
+            if "id" in params["params2"]:
+                node2_port_id = params["params2"]["id"]
+        node2_port_name = link.intf2.name
+
+
         # add edge and assigned port number to graph in both directions between node1 and node2
         # add edge and assigned port number to graph in both directions between node1 and node2
-        self.DCNetwork_graph.add_edge(node1.name, node2.name, \
-                                      {'src_port': node1.ports[link.intf1], 'dst_port': node2.ports[link.intf2]})
-        self.DCNetwork_graph.add_edge(node2.name, node1.name, \
-                                       {'src_port': node2.ports[link.intf2], 'dst_port': node1.ports[link.intf1]})
+        # port_id: id given in descriptor (if available, otherwise same as port)
+        # port: portnumber assigned by Dockernet
+
+        attr_dict = {}
+        # possible weight metrics allowed by TClink class:
+        weight_metrics = ['bw', 'delay', 'jitter', 'loss']
+        edge_attributes = [p for p in params if p in weight_metrics]
+        for attr in edge_attributes:
+            # if delay: strip ms (need number as weight in graph)
+            match = re.search('([0-9]*\.?[0-9]+)', params[attr])
+            if match:
+                attr_number = match.group(1)
+            else:
+                attr_number = None
+            attr_dict[attr] = attr_number
+
+
+        attr_dict2 = {'src_port_id': node1_port_id, 'src_port_nr': node1.ports[link.intf1],
+                      'src_port_name': node1_port_name,
+                     'dst_port_id': node2_port_id, 'dst_port_nr': node2.ports[link.intf2],
+                      'dst_port_name': node2_port_name}
+        attr_dict2.update(attr_dict)
+        self.DCNetwork_graph.add_edge(node1.name, node2.name, attr_dict=attr_dict2)
+
+        attr_dict2 = {'src_port_id': node2_port_id, 'src_port_nr': node2.ports[link.intf2],
+                      'src_port_name': node2_port_name,
+                     'dst_port_id': node1_port_id, 'dst_port_nr': node1.ports[link.intf1],
+                      'dst_port_name': node1_port_name}
+        attr_dict2.update(attr_dict)
+        self.DCNetwork_graph.add_edge(node2.name, node1.name, attr_dict=attr_dict2)
 
         return link
 
 
         return link
 
@@ -153,67 +208,189 @@ class DCNetwork(Dockernet):
         Dockernet.start(self)
 
     def stop(self):
         Dockernet.start(self)
 
     def stop(self):
-        # stop Ryu controller
-        self.ryu_process.terminate()
-        #self.ryu_process.kill()
+
+        # stop the monitor agent
+        if self.monitor_agent is not None:
+            self.monitor_agent.stop()
+
+        # stop emulator net
         Dockernet.stop(self)
 
         Dockernet.stop(self)
 
+        # stop Ryu controller
+        self.stopRyu()
+
+
     def CLI(self):
         CLI(self)
 
     # to remove chain do setChain( src, dst, cmd='del-flows')
     def CLI(self):
         CLI(self)
 
     # to remove chain do setChain( src, dst, cmd='del-flows')
-    def setChain(self, vnf_src_name, vnf_dst_name, cmd='add-flow'):
+    def setChain(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
+        cmd = kwargs.get('cmd')
+        if cmd == 'add-flow':
+            ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
+            if kwargs.get('bidirectional'):
+                return ret +'\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
+
+        elif cmd == 'del-flows':  # TODO: del-flow to be implemented
+            ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
+            if kwargs.get('bidirectional'):
+                return ret + '\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
+
+        else:
+            return "Command unknown"
+
+
+    def _chainAddFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
+
+        # TODO: this needs to be cleaned up
+        #check if port is specified (vnf:port)
+        if vnf_src_interface is None:
+            # take first interface by default
+            connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
+            link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
+            vnf_src_interface = link_dict[0]['src_port_id']
+
+        for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
+            link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
+            for link in link_dict:
+                if link_dict[link]['src_port_id'] == vnf_src_interface:
+                    # found the right link and connected switch
+                    src_sw = connected_sw
+
+                    src_sw_inport_nr = link_dict[link]['dst_port_nr']
+                    break
+
+        if vnf_dst_interface is None:
+            # take first interface by default
+            connected_sw = self.DCNetwork_graph.neighbors(vnf_dst_name)[0]
+            link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
+            vnf_dst_interface = link_dict[0]['dst_port_id']
+
+        vnf_dst_name = vnf_dst_name.split(':')[0]
+        for connected_sw in self.DCNetwork_graph.neighbors(vnf_dst_name):
+            link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
+            for link in link_dict:
+                if link_dict[link]['dst_port_id'] == vnf_dst_interface:
+                    # found the right link and connected switch
+                    dst_sw = connected_sw
+                    dst_sw_outport_nr = link_dict[link]['src_port_nr']
+                    break
+
+
         # get shortest path
         # get shortest path
-        path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
+        try:
+            # returns the first found shortest path
+            # if all shortest paths are wanted, use: all_shortest_paths
+            path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
+        except:
+            logging.info("No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name))
+            return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
+
         logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
 
         logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
 
-        current_hop = vnf_src_name
+        current_hop = src_sw
+        switch_inport_nr = src_sw_inport_nr
+
+        # choose free vlan if path contains more than 1 switch
+        if len(path) > 1:
+            vlan = self.vlans.pop()
+        else:
+            vlan = None
+
         for i in range(0,len(path)):
         for i in range(0,len(path)):
-            next_hop = path[path.index(current_hop)+1]
+            current_node = self.getNodeByName(current_hop)
+
+            if path.index(current_hop) < len(path)-1:
+                next_hop = path[path.index(current_hop)+1]
+            else:
+                #last switch reached
+                next_hop = vnf_dst_name
+
             next_node = self.getNodeByName(next_hop)
 
             if next_hop == vnf_dst_name:
             next_node = self.getNodeByName(next_hop)
 
             if next_hop == vnf_dst_name:
-                return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
+                switch_outport_nr = dst_sw_outport_nr
+                logging.info("end node reached: {0}".format(vnf_dst_name))
             elif not isinstance( next_node, OVSSwitch ):
                 logging.info("Next node: {0} is not a switch".format(next_hop))
                 return "Next node: {0} is not a switch".format(next_hop)
             elif not isinstance( next_node, OVSSwitch ):
                 logging.info("Next node: {0} is not a switch".format(next_hop))
                 return "Next node: {0} is not a switch".format(next_hop)
+            else:
+                # take first link between switches by default
+                index_edge_out = 0
+                switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
 
 
 
 
-            switch_inport = self.DCNetwork_graph[current_hop][next_hop]['dst_port']
-            next2_hop = path[path.index(current_hop)+2]
-            switch_outport = self.DCNetwork_graph[next_hop][next2_hop]['src_port']
+           # set of entry via ovs-ofctl
+            if isinstance( current_node, OVSSwitch ):
+                kwargs['vlan'] = vlan
+                kwargs['path'] = path
+                kwargs['current_hop'] = current_hop
+                self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+                # TODO set entry via Ryu REST api (in case emulator is running remote...)
 
 
-            logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(next_node.name, switch_inport, switch_outport))
-            # set of entry via ovs-ofctl
-            # TODO use rest API of ryu to set flow entries to correct witch dpid
+            # take first link between switches by default
             if isinstance( next_node, OVSSwitch ):
             if isinstance( next_node, OVSSwitch ):
-                match = 'in_port=%s' % switch_inport
-
-                if cmd=='add-flow':
-                    action = 'action=%s' % switch_outport
-                    s = ','
-                    ofcmd = s.join([match,action])
-                elif cmd=='del-flows':
-                    ofcmd = match
-                else:
-                    ofcmd=''
-
-                next_node.dpctl(cmd, ofcmd)
-
-            current_hop = next_hop
-
-        return "destination node: {0} not reached".format(vnf_dst_name)
+                switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
+                current_hop = next_hop
+
+        return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
+
+    def _set_flow_entry_dpctl(self, node, switch_inport_nr, switch_outport_nr, **kwargs):
+        match = 'in_port=%s' % switch_inport_nr
+
+        cookie = kwargs.get('cookie')
+        match_input = kwargs.get('match')
+        cmd = kwargs.get('cmd')
+        path = kwargs.get('path')
+        current_hop = kwargs.get('current_hop')
+        vlan = kwargs.get('vlan')
+
+        s = ','
+        if cookie:
+            cookie = 'cookie=%s' % cookie
+            match = s.join([cookie, match])
+        if match_input:
+            match = s.join([match, match_input])
+        if cmd == 'add-flow':
+            action = 'action=%s' % switch_outport_nr
+            if vlan != None:
+                if path.index(current_hop) == 0:  # first node
+                    action = ('action=mod_vlan_vid:%s' % vlan) + (',output=%s' % switch_outport_nr)
+                    match = '-O OpenFlow13 ' + match
+                elif path.index(current_hop) == len(path) - 1:  # last node
+                    match += ',dl_vlan=%s' % vlan
+                    action = 'action=strip_vlan,output=%s' % switch_outport_nr
+                else:  # middle nodes
+                    match += ',dl_vlan=%s' % vlan
+            ofcmd = s.join([match, action])
+        elif cmd == 'del-flows':
+            ofcmd = match
+        else:
+            ofcmd = ''
+
+        node.dpctl(cmd, ofcmd)
+        logging.info("{3} in switch: {0} in_port: {1} out_port: {2}".format(node.name, switch_inport_nr,
+                                                                                 switch_outport_nr, cmd))
 
     # start Ryu Openflow controller as Remote Controller for the DCNetwork
     def startRyu(self):
         # start Ryu controller with rest-API
         python_install_path = site.getsitepackages()[0]
         ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
 
     # start Ryu Openflow controller as Remote Controller for the DCNetwork
     def startRyu(self):
         # start Ryu controller with rest-API
         python_install_path = site.getsitepackages()[0]
         ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
-        ryu_path2 =  python_install_path + '/ryu/app/ofctl_rest.py'
+        ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
         # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
         # Ryu still uses 6633 as default
         ryu_option = '--ofp-tcp-listen-port'
         ryu_of_port = '6653'
         # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
         # Ryu still uses 6633 as default
         ryu_option = '--ofp-tcp-listen-port'
         ryu_of_port = '6653'
-        ryu_cmd =  'ryu-manager'
-        FNULL = open(os.devnull, 'w')
-        self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
\ No newline at end of file
+        ryu_cmd = 'ryu-manager'
+        FNULL = open("/tmp/ryu.log", 'w')
+        self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+        # no learning switch
+        #self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+        time.sleep(1)
+
+    def stopRyu(self):
+        if self.ryu_process is not None:
+            self.ryu_process.terminate()
+            self.ryu_process.kill()
+