first implementation profile command
authorstevenvanrossem <steven.vanrossem@intec.ugent.be>
Wed, 4 May 2016 15:28:08 +0000 (17:28 +0200)
committerstevenvanrossem <steven.vanrossem@intec.ugent.be>
Wed, 4 May 2016 15:28:08 +0000 (17:28 +0200)
src/emuvim/api/zerorpc/compute.py
src/emuvim/cli/compute.py
src/emuvim/dcemulator/net.py
src/emuvim/dcemulator/node.py
src/emuvim/examples/monitoring_demo_topology.py

index 39ccec2..9d59183 100644 (file)
@@ -118,18 +118,25 @@ class MultiDatacenterApi(object):
 
         ## VIM/dummy gatekeeper's tasks:
         # start vnf
-        vnf_status = self.compute_action_start(self, dc_label, compute_name, image,
+        vnf_status = self.compute_action_start( dc_label, compute_name, image,
                                   kwargs.get('network'),
                                   kwargs.get('command'))
         # start traffic source (with fixed ip addres, no use for now...)
-        self.compute_action_start(self, dc_label, 'psrc', 'profile_source', [{'id':'output'}], None)
+        self.compute_action_start( dc_label, 'psrc', 'profile_source', [{'id':'output'}], None)
         # link vnf to traffic source
         DCNetwork = self.dcs.get(dc_label).net
-        DCNetwork.setChain()
-
+        DCNetwork.setChain('psrc', compute_name,
+                           vnf_src_interface='output',
+                           vnf_dst_interface=kwargs.get('input'),
+                           cmd='add-flow', weight=None)
 
+        ## SSM/SP tasks:
+        # get monitor data and analyze
 
+        # create table
 
+        ## VIM/dummy gatekeeper's tasks:
+        # remove vnfs and chain
 
 
     def datacenter_list(self):
index 2636315..0cfb024 100755 (executable)
@@ -120,7 +120,7 @@ class ZeroRpcClient(object):
 parser = argparse.ArgumentParser(description='son-emu compute')
 parser.add_argument(
     "command",
-    choices=['start', 'stop', 'list', 'status'],
+    choices=['start', 'stop', 'list', 'status', 'profile'],
     help="Action to be executed.")
 parser.add_argument(
     "--datacenter", "-d", dest="datacenter",
index ed40545..9c5a301 100755 (executable)
@@ -240,7 +240,7 @@ class DCNetwork(Dockernet):
                     #logging.info("conn_sw: {2},{0},{1}".format(link_dict[link]['src_port_id'], vnf_src_interface, connected_sw))
                     src_sw = connected_sw
 
-                    src_sw_inport = link_dict[link]['dst_port']
+                    src_sw_inport_nr = link_dict[link]['dst_port_nr']
                     break
 
         if vnf_dst_interface is None:
@@ -256,7 +256,7 @@ class DCNetwork(Dockernet):
                 if link_dict[link]['dst_port_id'] == vnf_dst_interface:
                     # found the right link and connected switch
                     dst_sw = connected_sw
-                    dst_sw_outport = link_dict[link]['src_port']
+                    dst_sw_outport_nr = link_dict[link]['src_port_nr']
                     break
 
 
@@ -274,7 +274,7 @@ class DCNetwork(Dockernet):
 
         #current_hop = vnf_src_name
         current_hop = src_sw
-        switch_inport = src_sw_inport
+        switch_inport_nr = src_sw_inport_nr
 
         for i in range(0,len(path)):
             current_node = self.getNodeByName(current_hop)
@@ -287,7 +287,7 @@ class DCNetwork(Dockernet):
             next_node = self.getNodeByName(next_hop)
 
             if next_hop == vnf_dst_name:
-                switch_outport = dst_sw_outport
+                switch_outport_nr = dst_sw_outport_nr
                 logging.info("end node reached: {0}".format(vnf_dst_name))
             elif not isinstance( next_node, OVSSwitch ):
                 logging.info("Next node: {0} is not a switch".format(next_hop))
@@ -295,19 +295,19 @@ class DCNetwork(Dockernet):
             else:
                 # take first link between switches by default
                 index_edge_out = 0
-                switch_outport = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port']
+                switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
 
 
-            #logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport, switch_outport))
+            #logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport_nr, switch_outport_nr))
             # set of entry via ovs-ofctl
             # TODO use rest API of ryu to set flow entries to correct dpid
             # TODO this only sets port in to out, no match, so this will give trouble when multiple services are deployed...
             # TODO need multiple matches to do this (VLAN tags)
             if isinstance( current_node, OVSSwitch ):
-                match = 'in_port=%s' % switch_inport
+                match = 'in_port=%s' % switch_inport_nr
 
                 if cmd=='add-flow':
-                    action = 'action=%s' % switch_outport
+                    action = 'action=%s' % switch_outport_nr
                     s = ','
                     ofcmd = s.join([match,action])
                 elif cmd=='del-flows':
@@ -316,11 +316,11 @@ class DCNetwork(Dockernet):
                     ofcmd=''
 
                 current_node.dpctl(cmd, ofcmd)
-                logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport,
-                                                                                     switch_outport))
+                logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport_nr,
+                                                                                     switch_outport_nr))
             # take first link between switches by default
             if isinstance( next_node, OVSSwitch ):
-                switch_inport = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port']
+                switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
                 current_hop = next_hop
 
         return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
index 09dca9c..d533f97 100755 (executable)
@@ -169,7 +169,6 @@ class Datacenter(object):
         # this results in 1 default interface with a default ip address
         for nw in network:
             # TODO we cannot use TCLink here (see: https://github.com/mpeuster/dockernet/issues/3)
-            logging.info('nw: {0}'.format(nw))
             self.net.addLink(d, self.switch, params1=nw, cls=Link, intfName1=nw.get('id'))
         # do bookkeeping
         self.containers[name] = d
index a5cf0b2..fc515a8 100755 (executable)
@@ -29,7 +29,7 @@ def create_topology1():
     """
     1. Create a data center network object (DCNetwork) with monitoring enabled
     """
-    net = DCNetwork(monitor=True)
+    net = DCNetwork(monitor=False)
 
     """
     1b. add a monitoring agent to the DCNetwork
@@ -43,28 +43,28 @@ def create_topology1():
         first prototype)
     """
     dc1 = net.addDatacenter("datacenter1")
-    dc2 = net.addDatacenter("datacenter2")
-    dc3 = net.addDatacenter("long_data_center_name3")
-    dc4 = net.addDatacenter(
-        "datacenter4",
-        metadata={"mydata": "we can also add arbitrary metadata to each DC"})
+    #dc2 = net.addDatacenter("datacenter2")
+    #dc3 = net.addDatacenter("long_data_center_name3")
+    #dc4 = net.addDatacenter(
+    #    "datacenter4",
+    #    metadata={"mydata": "we can also add arbitrary metadata to each DC"})
 
     """
     3. You can add additional SDN switches for data center
        interconnections to the network.
     """
-    s1 = net.addSwitch("s1")
+    #s1 = net.addSwitch("s1")
 
     """
     4. Add links between your data centers and additional switches
        to define you topology.
        These links can use Mininet's features to limit bw, add delay or jitter.
     """
-    net.addLink(dc1, dc2, delay="10ms")
-    net.addLink(dc1, dc2)
-    net.addLink("datacenter1", s1, delay="20ms")
-    net.addLink(s1, dc3)
-    net.addLink(s1, "datacenter4")
+    #net.addLink(dc1, dc2, delay="10ms")
+    #net.addLink(dc1, dc2)
+    #net.addLink("datacenter1", s1, delay="20ms")
+    #net.addLink(s1, dc3)
+    #net.addLink(s1, "datacenter4")
 
 
     """
@@ -81,9 +81,9 @@ def create_topology1():
     zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
     # connect data centers to this endpoint
     zapi1.connectDatacenter(dc1)
-    zapi1.connectDatacenter(dc2)
-    zapi1.connectDatacenter(dc3)
-    zapi1.connectDatacenter(dc4)
+    #zapi1.connectDatacenter(dc2)
+    #zapi1.connectDatacenter(dc3)
+    #zapi1.connectDatacenter(dc4)
     # run API endpoint server (in another thread, don't block)
     zapi1.start()
 
@@ -96,10 +96,10 @@ def create_topology1():
          This will look like a real-world multi PoP/data center deployment
          from the perspective of an orchestrator.
     """
-    zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
-    zapi2.connectDatacenter(dc3)
-    zapi2.connectDatacenter(dc4)
-    zapi2.start()
+    #zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
+    #zapi2.connectDatacenter(dc3)
+    #zapi2.connectDatacenter(dc4)
+    #zapi2.start()
 
     """
     6. Finally we are done and can start our network (the emulator).