def __init__(self, dcs):
self.dcs = dcs
- def compute_action_start(self, dc_label, compute_name, image, network=None, command=None):
+ def compute_action_start(self, dc_label, compute_name, image, network, command):
"""
- Start a new compute instance: A docker container
+ Start a new compute instance: A docker container (note: zerorpc does not support keyword arguments)
:param dc_label: name of the DC
:param compute_name: compute container name
:param image: image name
try:
c = self.dcs.get(dc_label).startCompute(
compute_name, image=image, command=command, network=network)
- return str(c.name)
+ #return str(c.name)
+ # return docker inspect dict
+ return c.getStatus()
except Exception as ex:
logging.exception("RPC error.")
return ex.message
logging.exception("RPC error.")
return ex.message
+ def compute_profile(self, dc_label, compute_name, image, kwargs):
+ # note: zerorpc does not support keyword arguments
+
+ ## VIM/dummy gatekeeper's tasks:
+ # start vnf
+ vnf_status = self.compute_action_start( dc_label, compute_name, image,
+ kwargs.get('network'),
+ kwargs.get('command'))
+ # start traffic source (with fixed ip addres, no use for now...)
+ self.compute_action_start( dc_label, 'psrc', 'profile_source', [{'id':'output'}], None)
+ # link vnf to traffic source
+ DCNetwork = self.dcs.get(dc_label).net
+ DCNetwork.setChain('psrc', compute_name,
+ vnf_src_interface='output',
+ vnf_dst_interface=kwargs.get('input'),
+ cmd='add-flow', weight=None)
+
+ ## SSM/SP tasks:
+ # get monitor data and analyze
+
+ # create table
+
+ ## VIM/dummy gatekeeper's tasks:
+ # remove vnfs and chain
+
+
def datacenter_list(self):
logging.debug("RPC CALL: datacenter list")
try:
except Exception as ex:
logging.exception("RPC error.")
return ex.message
+
+'''
+if __name__ == "__main__":
+ test = MultiDatacenterApi({})
+ test.compute_profile('dc1','vnf1', 'image',network='',command='test',other='other')
+'''
+
logging.exception("RPC error.")
return ex.message
+
+
args.get("datacenter"),
args.get("name"),
args.get("image"),
- network=nw_list,
- command=args.get("docker_command")
+ nw_list,
+ args.get("docker_command")
)
pp.pprint(r)
args.get("datacenter"), args.get("name"))
pp.pprint(r)
+ def profile(self, args):
+ nw_list = list()
+ if args.get("network") is not None:
+ nw_list = self._parse_network(args.get("network"))
+
+ params = self._create_dict(
+ network=nw_list,
+ command=args.get("docker_command"),
+ input=args.get("input"),
+ output=args.get("output"))
+
+ r = self.c.compute_profile(
+ args.get("datacenter"),
+ args.get("name"),
+ args.get("image"),
+ params
+ )
+ pp.pprint(r)
+
+ def _create_dict(self, **kwargs):
+ return kwargs
+
def _parse_network(self, network_str):
'''
parse the options for all network interfaces of the vnf
parser = argparse.ArgumentParser(description='son-emu compute')
parser.add_argument(
"command",
- choices=['start', 'stop', 'list', 'status'],
+ choices=['start', 'stop', 'list', 'status', 'profile'],
help="Action to be executed.")
parser.add_argument(
"--datacenter", "-d", dest="datacenter",
"--net", dest="network",
help="Network properties of compute instance e.g. \
'10.0.0.123/8' or '10.0.0.123/8,11.0.0.123/24' for multiple interfaces.")
+parser.add_argument(
+ "--input", "-in", dest="input",
+ help="input interface of the vnf to profile")
+parser.add_argument(
+ "--output", "-out", dest="output",
+ help="output interface of the vnf to profile")
def main(argv):
"--metric", "-m", dest="metric",\r
help="tx_bytes, rx_bytes, tx_packets, rx_packets")\r
\r
+\r
def main(argv):\r
#print "This is the son-emu monitor CLI."\r
#print "Arguments: %s" % str(argv)\r
if isinstance(node1, Docker):
if "id" in params["params1"]:
node1_port_id = params["params1"]["id"]
+ node1_port_name = link.intf1.name
node2_port_id = node2.ports[link.intf2]
if isinstance(node2, Docker):
if "id" in params["params2"]:
node2_port_id = params["params2"]["id"]
-
+ node2_port_name = link.intf2.name
# add edge and assigned port number to graph in both directions between node1 and node2
attr_dict[attr] = attr_number
- attr_dict2 = {'src_port_id': node1_port_id, 'src_port': node1.ports[link.intf1],
- 'dst_port_id': node2_port_id, 'dst_port': node2.ports[link.intf2]}
+ attr_dict2 = {'src_port_id': node1_port_id, 'src_port_nr': node1.ports[link.intf1],
+ 'src_port_name': node1_port_name,
+ 'dst_port_id': node2_port_id, 'dst_port_nr': node2.ports[link.intf2],
+ 'dst_port_name': node2_port_name}
attr_dict2.update(attr_dict)
self.DCNetwork_graph.add_edge(node1.name, node2.name, attr_dict=attr_dict2)
- attr_dict2 = {'src_port_id': node2_port_id, 'src_port': node2.ports[link.intf2],
- 'dst_port_id': node1_port_id, 'dst_port': node1.ports[link.intf1]}
+ attr_dict2 = {'src_port_id': node2_port_id, 'src_port_nr': node2.ports[link.intf2],
+ 'src_port_name': node2_port_name,
+ 'dst_port_id': node1_port_id, 'dst_port_nr': node1.ports[link.intf1],
+ 'dst_port_name': node1_port_name}
attr_dict2.update(attr_dict)
self.DCNetwork_graph.add_edge(node2.name, node1.name, attr_dict=attr_dict2)
#logging.info("conn_sw: {2},{0},{1}".format(link_dict[link]['src_port_id'], vnf_src_interface, connected_sw))
src_sw = connected_sw
- src_sw_inport = link_dict[link]['dst_port']
+ src_sw_inport_nr = link_dict[link]['dst_port_nr']
break
if vnf_dst_interface is None:
if link_dict[link]['dst_port_id'] == vnf_dst_interface:
# found the right link and connected switch
dst_sw = connected_sw
- dst_sw_outport = link_dict[link]['src_port']
+ dst_sw_outport_nr = link_dict[link]['src_port_nr']
break
#current_hop = vnf_src_name
current_hop = src_sw
- switch_inport = src_sw_inport
+ switch_inport_nr = src_sw_inport_nr
for i in range(0,len(path)):
current_node = self.getNodeByName(current_hop)
next_node = self.getNodeByName(next_hop)
if next_hop == vnf_dst_name:
- switch_outport = dst_sw_outport
+ switch_outport_nr = dst_sw_outport_nr
logging.info("end node reached: {0}".format(vnf_dst_name))
elif not isinstance( next_node, OVSSwitch ):
logging.info("Next node: {0} is not a switch".format(next_hop))
else:
# take first link between switches by default
index_edge_out = 0
- switch_outport = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port']
+ switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
- #logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport, switch_outport))
+ #logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport_nr, switch_outport_nr))
# set of entry via ovs-ofctl
# TODO use rest API of ryu to set flow entries to correct dpid
# TODO this only sets port in to out, no match, so this will give trouble when multiple services are deployed...
# TODO need multiple matches to do this (VLAN tags)
if isinstance( current_node, OVSSwitch ):
- match = 'in_port=%s' % switch_inport
+ match = 'in_port=%s' % switch_inport_nr
if cmd=='add-flow':
- action = 'action=%s' % switch_outport
+ action = 'action=%s' % switch_outport_nr
s = ','
ofcmd = s.join([match,action])
elif cmd=='del-flows':
ofcmd=''
current_node.dpctl(cmd, ofcmd)
- logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport,
- switch_outport))
+ logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport_nr,
+ switch_outport_nr))
# take first link between switches by default
if isinstance( next_node, OVSSwitch ):
- switch_inport = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port']
+ switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
current_hop = next_hop
return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
ryu_of_port = '6653'
ryu_cmd = 'ryu-manager'
FNULL = open("/tmp/ryu.log", 'w')
- #self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+ self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
# no learning switch
- self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+ #self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
time.sleep(1)
def stopRyu(self):
status = {}
status["name"] = self.name
status["network"] = self.getNetworkStatus()
+ status["docker_network"] = self.dcinfo['NetworkSettings']['IPAddress']
status["image"] = self.dimage
status["flavor_name"] = self.flavor_name
status["cpu_quota"] = self.cpu_quota
# this results in 1 default interface with a default ip address
for nw in network:
# TODO we cannot use TCLink here (see: https://github.com/mpeuster/dockernet/issues/3)
- self.net.addLink(d, self.switch, params1=nw, cls=Link)
+ self.net.addLink(d, self.switch, params1=nw, cls=Link, intfName1=nw.get('id'))
# do bookkeeping
self.containers[name] = d
return d # we might use UUIDs for naming later on
"""
1. Create a data center network object (DCNetwork) with monitoring enabled
"""
- net = DCNetwork(monitor=True)
+ net = DCNetwork(monitor=False)
"""
1b. add a monitoring agent to the DCNetwork
first prototype)
"""
dc1 = net.addDatacenter("datacenter1")
- dc2 = net.addDatacenter("datacenter2")
- dc3 = net.addDatacenter("long_data_center_name3")
- dc4 = net.addDatacenter(
- "datacenter4",
- metadata={"mydata": "we can also add arbitrary metadata to each DC"})
+ #dc2 = net.addDatacenter("datacenter2")
+ #dc3 = net.addDatacenter("long_data_center_name3")
+ #dc4 = net.addDatacenter(
+ # "datacenter4",
+ # metadata={"mydata": "we can also add arbitrary metadata to each DC"})
"""
3. You can add additional SDN switches for data center
interconnections to the network.
"""
- s1 = net.addSwitch("s1")
+ #s1 = net.addSwitch("s1")
"""
4. Add links between your data centers and additional switches
to define you topology.
These links can use Mininet's features to limit bw, add delay or jitter.
"""
- net.addLink(dc1, dc2, delay="10ms")
- net.addLink(dc1, dc2)
- net.addLink("datacenter1", s1, delay="20ms")
- net.addLink(s1, dc3)
- net.addLink(s1, "datacenter4")
+ #net.addLink(dc1, dc2, delay="10ms")
+ #net.addLink(dc1, dc2)
+ #net.addLink("datacenter1", s1, delay="20ms")
+ #net.addLink(s1, dc3)
+ #net.addLink(s1, "datacenter4")
"""
zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
# connect data centers to this endpoint
zapi1.connectDatacenter(dc1)
- zapi1.connectDatacenter(dc2)
- zapi1.connectDatacenter(dc3)
- zapi1.connectDatacenter(dc4)
+ #zapi1.connectDatacenter(dc2)
+ #zapi1.connectDatacenter(dc3)
+ #zapi1.connectDatacenter(dc4)
# run API endpoint server (in another thread, don't block)
zapi1.start()
This will look like a real-world multi PoP/data center deployment
from the perspective of an orchestrator.
"""
- zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
- zapi2.connectDatacenter(dc3)
- zapi2.connectDatacenter(dc4)
- zapi2.start()
+ #zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
+ #zapi2.connectDatacenter(dc3)
+ #zapi2.connectDatacenter(dc4)
+ #zapi2.start()
"""
6. Finally we are done and can start our network (the emulator).