Merge pull request #94 from stevenvanrossem/master
authorstevenvanrossem <steven.vanrossem@intec.ugent.be>
Wed, 4 May 2016 15:44:59 +0000 (17:44 +0200)
committerstevenvanrossem <steven.vanrossem@intec.ugent.be>
Wed, 4 May 2016 15:44:59 +0000 (17:44 +0200)
prepare compute functions for monitor functionality

src/emuvim/api/zerorpc/compute.py
src/emuvim/api/zerorpc/network.py
src/emuvim/cli/compute.py
src/emuvim/cli/monitor.py
src/emuvim/dcemulator/net.py
src/emuvim/dcemulator/node.py
src/emuvim/examples/monitoring_demo_topology.py

index f5d0799..9d59183 100644 (file)
@@ -56,9 +56,9 @@ class MultiDatacenterApi(object):
     def __init__(self, dcs):
         self.dcs = dcs
 
-    def compute_action_start(self, dc_label, compute_name, image, network=None, command=None):
+    def compute_action_start(self, dc_label, compute_name, image, network, command):
         """
-        Start a new compute instance: A docker container
+        Start a new compute instance: A docker container (note: zerorpc does not support keyword arguments)
         :param dc_label: name of the DC
         :param compute_name: compute container name
         :param image: image name
@@ -71,7 +71,9 @@ class MultiDatacenterApi(object):
         try:
             c = self.dcs.get(dc_label).startCompute(
                 compute_name, image=image, command=command, network=network)
-            return str(c.name)
+            #return str(c.name)
+            # return docker inspect dict
+            return c.getStatus()
         except Exception as ex:
             logging.exception("RPC error.")
             return ex.message
@@ -111,6 +113,32 @@ class MultiDatacenterApi(object):
             logging.exception("RPC error.")
             return ex.message
 
+    def compute_profile(self, dc_label, compute_name, image, kwargs):
+        # note: zerorpc does not support keyword arguments
+
+        ## VIM/dummy gatekeeper's tasks:
+        # start vnf
+        vnf_status = self.compute_action_start( dc_label, compute_name, image,
+                                  kwargs.get('network'),
+                                  kwargs.get('command'))
+        # start traffic source (with fixed ip addres, no use for now...)
+        self.compute_action_start( dc_label, 'psrc', 'profile_source', [{'id':'output'}], None)
+        # link vnf to traffic source
+        DCNetwork = self.dcs.get(dc_label).net
+        DCNetwork.setChain('psrc', compute_name,
+                           vnf_src_interface='output',
+                           vnf_dst_interface=kwargs.get('input'),
+                           cmd='add-flow', weight=None)
+
+        ## SSM/SP tasks:
+        # get monitor data and analyze
+
+        # create table
+
+        ## VIM/dummy gatekeeper's tasks:
+        # remove vnfs and chain
+
+
     def datacenter_list(self):
         logging.debug("RPC CALL: datacenter list")
         try:
@@ -126,3 +154,10 @@ class MultiDatacenterApi(object):
         except Exception as ex:
             logging.exception("RPC error.")
             return ex.message
+
+'''
+if __name__ == "__main__":
+    test = MultiDatacenterApi({})
+    test.compute_profile('dc1','vnf1', 'image',network='',command='test',other='other')
+'''
+
index 116d13b..b1e5d41 100644 (file)
@@ -117,3 +117,5 @@ class DCNetworkApi(object):
             logging.exception("RPC error.")
             return ex.message
 
+
+
index f2fdc62..0cfb024 100755 (executable)
@@ -35,8 +35,8 @@ class ZeroRpcClient(object):
             args.get("datacenter"),
             args.get("name"),
             args.get("image"),
-            network=nw_list,
-            command=args.get("docker_command")
+            nw_list,
+            args.get("docker_command")
             )
         pp.pprint(r)
 
@@ -79,6 +79,28 @@ class ZeroRpcClient(object):
             args.get("datacenter"), args.get("name"))
         pp.pprint(r)
 
+    def profile(self, args):
+        nw_list = list()
+        if args.get("network") is not None:
+            nw_list = self._parse_network(args.get("network"))
+
+        params = self._create_dict(
+            network=nw_list,
+            command=args.get("docker_command"),
+            input=args.get("input"),
+            output=args.get("output"))
+
+        r = self.c.compute_profile(
+            args.get("datacenter"),
+            args.get("name"),
+            args.get("image"),
+            params
+        )
+        pp.pprint(r)
+
+    def _create_dict(self, **kwargs):
+        return kwargs
+
     def _parse_network(self, network_str):
         '''
         parse the options for all network interfaces of the vnf
@@ -98,7 +120,7 @@ class ZeroRpcClient(object):
 parser = argparse.ArgumentParser(description='son-emu compute')
 parser.add_argument(
     "command",
-    choices=['start', 'stop', 'list', 'status'],
+    choices=['start', 'stop', 'list', 'status', 'profile'],
     help="Action to be executed.")
 parser.add_argument(
     "--datacenter", "-d", dest="datacenter",
@@ -116,6 +138,12 @@ parser.add_argument(
     "--net", dest="network",
     help="Network properties of compute instance e.g. \
           '10.0.0.123/8' or '10.0.0.123/8,11.0.0.123/24' for multiple interfaces.")
+parser.add_argument(
+    "--input", "-in", dest="input",
+    help="input interface of the vnf to profile")
+parser.add_argument(
+    "--output", "-out", dest="output",
+    help="output interface of the vnf to profile")
 
 
 def main(argv):
index 14cce08..3b667f7 100755 (executable)
@@ -69,6 +69,7 @@ parser.add_argument(
     "--metric", "-m", dest="metric",\r
     help="tx_bytes, rx_bytes, tx_packets, rx_packets")\r
 \r
+\r
 def main(argv):\r
     #print "This is the son-emu monitor CLI."\r
     #print "Arguments: %s" % str(argv)\r
index 4028a26..633ec6b 100755 (executable)
@@ -124,12 +124,13 @@ class DCNetwork(Dockernet):
         if isinstance(node1, Docker):
             if "id" in params["params1"]:
                 node1_port_id = params["params1"]["id"]
+        node1_port_name = link.intf1.name
 
         node2_port_id = node2.ports[link.intf2]
         if isinstance(node2, Docker):
             if "id" in params["params2"]:
                 node2_port_id = params["params2"]["id"]
-
+        node2_port_name = link.intf2.name
 
 
         # add edge and assigned port number to graph in both directions between node1 and node2
@@ -150,13 +151,17 @@ class DCNetwork(Dockernet):
             attr_dict[attr] = attr_number
 
 
-        attr_dict2 = {'src_port_id': node1_port_id, 'src_port': node1.ports[link.intf1],
-                     'dst_port_id': node2_port_id, 'dst_port': node2.ports[link.intf2]}
+        attr_dict2 = {'src_port_id': node1_port_id, 'src_port_nr': node1.ports[link.intf1],
+                      'src_port_name': node1_port_name,
+                     'dst_port_id': node2_port_id, 'dst_port_nr': node2.ports[link.intf2],
+                      'dst_port_name': node2_port_name}
         attr_dict2.update(attr_dict)
         self.DCNetwork_graph.add_edge(node1.name, node2.name, attr_dict=attr_dict2)
 
-        attr_dict2 = {'src_port_id': node2_port_id, 'src_port': node2.ports[link.intf2],
-                     'dst_port_id': node1_port_id, 'dst_port': node1.ports[link.intf1]}
+        attr_dict2 = {'src_port_id': node2_port_id, 'src_port_nr': node2.ports[link.intf2],
+                      'src_port_name': node2_port_name,
+                     'dst_port_id': node1_port_id, 'dst_port_nr': node1.ports[link.intf1],
+                      'dst_port_name': node1_port_name}
         attr_dict2.update(attr_dict)
         self.DCNetwork_graph.add_edge(node2.name, node1.name, attr_dict=attr_dict2)
 
@@ -235,7 +240,7 @@ class DCNetwork(Dockernet):
                     #logging.info("conn_sw: {2},{0},{1}".format(link_dict[link]['src_port_id'], vnf_src_interface, connected_sw))
                     src_sw = connected_sw
 
-                    src_sw_inport = link_dict[link]['dst_port']
+                    src_sw_inport_nr = link_dict[link]['dst_port_nr']
                     break
 
         if vnf_dst_interface is None:
@@ -251,7 +256,7 @@ class DCNetwork(Dockernet):
                 if link_dict[link]['dst_port_id'] == vnf_dst_interface:
                     # found the right link and connected switch
                     dst_sw = connected_sw
-                    dst_sw_outport = link_dict[link]['src_port']
+                    dst_sw_outport_nr = link_dict[link]['src_port_nr']
                     break
 
 
@@ -269,7 +274,7 @@ class DCNetwork(Dockernet):
 
         #current_hop = vnf_src_name
         current_hop = src_sw
-        switch_inport = src_sw_inport
+        switch_inport_nr = src_sw_inport_nr
 
         for i in range(0,len(path)):
             current_node = self.getNodeByName(current_hop)
@@ -282,7 +287,7 @@ class DCNetwork(Dockernet):
             next_node = self.getNodeByName(next_hop)
 
             if next_hop == vnf_dst_name:
-                switch_outport = dst_sw_outport
+                switch_outport_nr = dst_sw_outport_nr
                 logging.info("end node reached: {0}".format(vnf_dst_name))
             elif not isinstance( next_node, OVSSwitch ):
                 logging.info("Next node: {0} is not a switch".format(next_hop))
@@ -290,19 +295,19 @@ class DCNetwork(Dockernet):
             else:
                 # take first link between switches by default
                 index_edge_out = 0
-                switch_outport = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port']
+                switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
 
 
-            #logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport, switch_outport))
+            #logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport_nr, switch_outport_nr))
             # set of entry via ovs-ofctl
             # TODO use rest API of ryu to set flow entries to correct dpid
             # TODO this only sets port in to out, no match, so this will give trouble when multiple services are deployed...
             # TODO need multiple matches to do this (VLAN tags)
             if isinstance( current_node, OVSSwitch ):
-                match = 'in_port=%s' % switch_inport
+                match = 'in_port=%s' % switch_inport_nr
 
                 if cmd=='add-flow':
-                    action = 'action=%s' % switch_outport
+                    action = 'action=%s' % switch_outport_nr
                     s = ','
                     ofcmd = s.join([match,action])
                 elif cmd=='del-flows':
@@ -311,11 +316,11 @@ class DCNetwork(Dockernet):
                     ofcmd=''
 
                 current_node.dpctl(cmd, ofcmd)
-                logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport,
-                                                                                     switch_outport))
+                logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport_nr,
+                                                                                     switch_outport_nr))
             # take first link between switches by default
             if isinstance( next_node, OVSSwitch ):
-                switch_inport = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port']
+                switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
                 current_hop = next_hop
 
         return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
@@ -333,9 +338,9 @@ class DCNetwork(Dockernet):
         ryu_of_port = '6653'
         ryu_cmd = 'ryu-manager'
         FNULL = open("/tmp/ryu.log", 'w')
-        #self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+        self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
         # no learning switch
-        self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+        #self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
         time.sleep(1)
 
     def stopRyu(self):
index 3a4fbc2..d533f97 100755 (executable)
@@ -49,6 +49,7 @@ class EmulatorCompute(Docker):
         status = {}
         status["name"] = self.name
         status["network"] = self.getNetworkStatus()
+        status["docker_network"] = self.dcinfo['NetworkSettings']['IPAddress']
         status["image"] = self.dimage
         status["flavor_name"] = self.flavor_name
         status["cpu_quota"] = self.cpu_quota
@@ -168,7 +169,7 @@ class Datacenter(object):
         # this results in 1 default interface with a default ip address
         for nw in network:
             # TODO we cannot use TCLink here (see: https://github.com/mpeuster/dockernet/issues/3)
-            self.net.addLink(d, self.switch, params1=nw, cls=Link)
+            self.net.addLink(d, self.switch, params1=nw, cls=Link, intfName1=nw.get('id'))
         # do bookkeeping
         self.containers[name] = d
         return d  # we might use UUIDs for naming later on
index a5cf0b2..fc515a8 100755 (executable)
@@ -29,7 +29,7 @@ def create_topology1():
     """
     1. Create a data center network object (DCNetwork) with monitoring enabled
     """
-    net = DCNetwork(monitor=True)
+    net = DCNetwork(monitor=False)
 
     """
     1b. add a monitoring agent to the DCNetwork
@@ -43,28 +43,28 @@ def create_topology1():
         first prototype)
     """
     dc1 = net.addDatacenter("datacenter1")
-    dc2 = net.addDatacenter("datacenter2")
-    dc3 = net.addDatacenter("long_data_center_name3")
-    dc4 = net.addDatacenter(
-        "datacenter4",
-        metadata={"mydata": "we can also add arbitrary metadata to each DC"})
+    #dc2 = net.addDatacenter("datacenter2")
+    #dc3 = net.addDatacenter("long_data_center_name3")
+    #dc4 = net.addDatacenter(
+    #    "datacenter4",
+    #    metadata={"mydata": "we can also add arbitrary metadata to each DC"})
 
     """
     3. You can add additional SDN switches for data center
        interconnections to the network.
     """
-    s1 = net.addSwitch("s1")
+    #s1 = net.addSwitch("s1")
 
     """
     4. Add links between your data centers and additional switches
        to define you topology.
        These links can use Mininet's features to limit bw, add delay or jitter.
     """
-    net.addLink(dc1, dc2, delay="10ms")
-    net.addLink(dc1, dc2)
-    net.addLink("datacenter1", s1, delay="20ms")
-    net.addLink(s1, dc3)
-    net.addLink(s1, "datacenter4")
+    #net.addLink(dc1, dc2, delay="10ms")
+    #net.addLink(dc1, dc2)
+    #net.addLink("datacenter1", s1, delay="20ms")
+    #net.addLink(s1, dc3)
+    #net.addLink(s1, "datacenter4")
 
 
     """
@@ -81,9 +81,9 @@ def create_topology1():
     zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
     # connect data centers to this endpoint
     zapi1.connectDatacenter(dc1)
-    zapi1.connectDatacenter(dc2)
-    zapi1.connectDatacenter(dc3)
-    zapi1.connectDatacenter(dc4)
+    #zapi1.connectDatacenter(dc2)
+    #zapi1.connectDatacenter(dc3)
+    #zapi1.connectDatacenter(dc4)
     # run API endpoint server (in another thread, don't block)
     zapi1.start()
 
@@ -96,10 +96,10 @@ def create_topology1():
          This will look like a real-world multi PoP/data center deployment
          from the perspective of an orchestrator.
     """
-    zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
-    zapi2.connectDatacenter(dc3)
-    zapi2.connectDatacenter(dc4)
-    zapi2.start()
+    #zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
+    #zapi2.connectDatacenter(dc3)
+    #zapi2.connectDatacenter(dc4)
+    #zapi2.start()
 
     """
     6. Finally we are done and can start our network (the emulator).