X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=src%2Femuvim%2Fdcemulator%2Fnet.py;h=faf758c22fc3dd1750cdd449d542f705d3c607ee;hb=c304ad3546be8691be4e1f5a5eb4d4d66025a9e3;hp=248dc3481e61bc3337541eb6023a090f30d0af65;hpb=a5d41da5c5e8cd4108c4acddb91b8d4b609e171c;p=osm%2Fvim-emu.git diff --git a/src/emuvim/dcemulator/net.py b/src/emuvim/dcemulator/net.py index 248dc34..faf758c 100755 --- a/src/emuvim/dcemulator/net.py +++ b/src/emuvim/dcemulator/net.py @@ -1,55 +1,86 @@ """ -Distributed Cloud Emulator (dcemulator) -(c) 2015 by Manuel Peuster +Copyright (c) 2015 SONATA-NFV and Paderborn University +ALL RIGHTS RESERVED. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION] +nor the names of its contributors may be used to endorse or promote +products derived from this software without specific prior written +permission. + +This work has been performed in the framework of the SONATA project, +funded by the European Commission under Grant number 671517 through +the Horizon 2020 and 5G-PPP programmes. The authors would like to +acknowledge the contributions of their colleagues of the SONATA +partner consortium (www.sonata-nfv.eu). """ import logging import site import time from subprocess import Popen -import os import re -import urllib2 -from functools import partial +import requests -from mininet.net import Dockernet +from mininet.net import Containernet from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController from mininet.cli import CLI from mininet.link import TCLink +from mininet.clean import cleanup import networkx as nx from emuvim.dcemulator.monitoring import DCNetworkMonitor from emuvim.dcemulator.node import Datacenter, EmulatorCompute from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar -class DCNetwork(Dockernet): +LOG = logging.getLogger("dcemulator.net") +LOG.setLevel(logging.DEBUG) + +class DCNetwork(Containernet): """ - Wraps the original Mininet/Dockernet class and provides + Wraps the original Mininet/Containernet class and provides methods to add data centers, switches, etc. This class is used by topology definition scripts. """ def __init__(self, controller=RemoteController, monitor=False, + enable_learning = True, # in case of RemoteController (Ryu), learning switch behavior can be turned off/on dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation dc_emulation_max_mem=512, # emulation max mem in MB **kwargs): """ - Create an extended version of a Dockernet network + Create an extended version of a Containernet network :param dc_emulation_max_cpu: max. CPU time used by containers in data centers :param kwargs: path through for Mininet parameters :return: """ + # members self.dcs = {} + self.ryu_process = None + + # always cleanup environment before we start the emulator + self.killRyu() + cleanup() # call original Docker.__init__ and setup default controller - Dockernet.__init__( - self, switch=OVSKernelSwitch, **kwargs) + Containernet.__init__( + self, switch=OVSKernelSwitch, controller=controller, **kwargs) # Ryu management - self.ryu_process = None if controller == RemoteController: # start Ryu controller - self.startRyu() + self.startRyu(learning_switch=enable_learning) # add the specified controller self.addController('c0', controller=controller) @@ -61,9 +92,10 @@ class DCNetwork(Dockernet): self.vlans = range(4096)[::-1] # link to Ryu REST_API - ryu_ip = '0.0.0.0' + ryu_ip = 'localhost' ryu_port = '8080' self.ryu_REST_api = 'http://{0}:{1}'.format(ryu_ip, ryu_port) + self.RyuSession = requests.Session() # monitoring agent if monitor: @@ -85,7 +117,7 @@ class DCNetwork(Dockernet): dc.net = self # set reference to network self.dcs[label] = dc dc.create() # finally create the data center in our Mininet instance - logging.info("added data center: %s" % label) + LOG.info("added data center: %s" % label) return dc def addLink(self, node1, node2, **params): @@ -95,7 +127,7 @@ class DCNetwork(Dockernet): """ assert node1 is not None assert node2 is not None - logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2))) + LOG.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2))) # ensure type of node1 if isinstance( node1, basestring ): if node1 in self.dcs: @@ -121,11 +153,11 @@ class DCNetwork(Dockernet): params["params2"]["ip"] = self.getNextIp() # ensure that we allow TCLinks between data centers # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers - # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3 + # see Containernet issue: https://github.com/mpeuster/containernet/issues/3 if "cls" not in params: params["cls"] = TCLink - link = Dockernet.addLink(self, node1, node2, **params) + link = Containernet.addLink(self, node1, node2, **params) # try to give container interfaces a default id node1_port_id = node1.ports[link.intf1] @@ -143,7 +175,7 @@ class DCNetwork(Dockernet): # add edge and assigned port number to graph in both directions between node1 and node2 # port_id: id given in descriptor (if available, otherwise same as port) - # port: portnumber assigned by Dockernet + # port: portnumber assigned by Containernet attr_dict = {} # possible weight metrics allowed by TClink class: @@ -180,14 +212,14 @@ class DCNetwork(Dockernet): Wrapper for addDocker method to use custom container class. """ self.DCNetwork_graph.add_node(label) - return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params) + return Containernet.addDocker(self, label, cls=EmulatorCompute, **params) def removeDocker( self, label, **params ): """ Wrapper for removeDocker method to update graph. """ self.DCNetwork_graph.remove_node(label) - return Dockernet.removeDocker(self, label, **params) + return Containernet.removeDocker(self, label, **params) def addSwitch( self, name, add_to_graph=True, **params ): """ @@ -195,7 +227,7 @@ class DCNetwork(Dockernet): """ if add_to_graph: self.DCNetwork_graph.add_node(name) - return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params) + return Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params) def getAllContainers(self): """ @@ -210,7 +242,7 @@ class DCNetwork(Dockernet): # start for dc in self.dcs.itervalues(): dc.start() - Dockernet.start(self) + Containernet.start(self) def stop(self): @@ -219,35 +251,63 @@ class DCNetwork(Dockernet): self.monitor_agent.stop() # stop emulator net - Dockernet.stop(self) + Containernet.stop(self) # stop Ryu controller - self.stopRyu() + self.killRyu() def CLI(self): CLI(self) - # to remove chain do setChain( src, dst, cmd='del-flows') def setChain(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs): + """ + Chain 2 vnf interfaces together by installing the flowrules in the switches along their path. + Currently the path is found using the default networkx shortest path function. + Each chain gets a unique vlan id , so different chains wil not interfere. + + :param vnf_src_name: vnf name (string) + :param vnf_dst_name: vnf name (string) + :param vnf_src_interface: source interface name (string) + :param vnf_dst_interface: destination interface name (string) + :param cmd: 'add-flow' (default) to add a chain, 'del-flows' to remove a chain + :param cookie: cookie for the installed flowrules (can be used later as identifier for a set of installed chains) + :param match: custom match entry to be added to the flowrules (default: only in_port and vlan tag) + :param priority: custom flowrule priority + :param path: custom path between the two VNFs (list of switches) + :return: output log string + """ cmd = kwargs.get('cmd') if cmd == 'add-flow': ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs) if kwargs.get('bidirectional'): - return ret +'\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs) + if kwargs.get('path') is not None: + kwargs['path'] = list(reversed(kwargs.get('path'))) + ret = ret +'\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs) - elif cmd == 'del-flows': # TODO: del-flow to be implemented + elif cmd == 'del-flows': ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs) if kwargs.get('bidirectional'): - return ret + '\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs) + if kwargs.get('path') is not None: + kwargs['path'] = list(reversed(kwargs.get('path'))) + ret = ret + '\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs) else: - return "Command unknown" + ret = "Command unknown" + + return ret def _chainAddFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs): - # TODO: this needs to be cleaned up + src_sw = None + dst_sw = None + src_sw_inport_nr = 0 + dst_sw_outport_nr = 0 + + LOG.debug("call chainAddFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r", + vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface) + #check if port is specified (vnf:port) if vnf_src_interface is None: # take first interface by default @@ -258,10 +318,10 @@ class DCNetwork(Dockernet): for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name): link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw] for link in link_dict: - if link_dict[link]['src_port_id'] == vnf_src_interface: + if (link_dict[link]['src_port_id'] == vnf_src_interface or + link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call # found the right link and connected switch src_sw = connected_sw - src_sw_inport_nr = link_dict[link]['dst_port_nr'] break @@ -275,23 +335,30 @@ class DCNetwork(Dockernet): for connected_sw in self.DCNetwork_graph.neighbors(vnf_dst_name): link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name] for link in link_dict: - if link_dict[link]['dst_port_id'] == vnf_dst_interface: + if link_dict[link]['dst_port_id'] == vnf_dst_interface or \ + link_dict[link]['dst_port_name'] == vnf_dst_interface: # Fix: we might also get interface names, e.g, from a son-emu-cli call # found the right link and connected switch dst_sw = connected_sw dst_sw_outport_nr = link_dict[link]['src_port_nr'] break - - # get shortest path - try: - # returns the first found shortest path - # if all shortest paths are wanted, use: all_shortest_paths - path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight')) - except: - logging.info("No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)) - return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name) - - logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path)) + path = kwargs.get('path') + if path is None: + # get shortest path + try: + # returns the first found shortest path + # if all shortest paths are wanted, use: all_shortest_paths + path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight')) + except: + LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format( + vnf_src_name, vnf_dst_name, src_sw, dst_sw)) + LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes()) + LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges()) + for e, v in self.DCNetwork_graph.edges(): + LOG.debug("%r" % self.DCNetwork_graph[e][v]) + return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name) + + LOG.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path)) current_hop = src_sw switch_inport_nr = src_sw_inport_nr @@ -306,19 +373,19 @@ class DCNetwork(Dockernet): for i in range(0,len(path)): current_node = self.getNodeByName(current_hop) - if path.index(current_hop) < len(path)-1: - next_hop = path[path.index(current_hop)+1] + if i < len(path) - 1: + next_hop = path[i + 1] else: - #last switch reached + # last switch reached next_hop = vnf_dst_name next_node = self.getNodeByName(next_hop) if next_hop == vnf_dst_name: switch_outport_nr = dst_sw_outport_nr - logging.info("end node reached: {0}".format(vnf_dst_name)) + LOG.info("end node reached: {0}".format(vnf_dst_name)) elif not isinstance( next_node, OVSSwitch ): - logging.info("Next node: {0} is not a switch".format(next_hop)) + LOG.info("Next node: {0} is not a switch".format(next_hop)) return "Next node: {0} is not a switch".format(next_hop) else: # take first link between switches by default @@ -330,11 +397,16 @@ class DCNetwork(Dockernet): if isinstance( current_node, OVSSwitch ): kwargs['vlan'] = vlan kwargs['path'] = path - kwargs['current_hop'] = current_hop - ## set flow entry via ovs-ofctl - #self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs) - ## set flow entry via ryu rest api - self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs) + kwargs['pathindex'] = i + + if self.controller == RemoteController: + ## set flow entry via ryu rest api + self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs) + else: + ## set flow entry via ovs-ofctl + self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs) + + # take first link between switches by default if isinstance( next_node, OVSSwitch ): @@ -350,8 +422,10 @@ class DCNetwork(Dockernet): match_input = kwargs.get('match') cmd = kwargs.get('cmd') path = kwargs.get('path') - current_hop = kwargs.get('current_hop') + index = kwargs.get('pathindex') + vlan = kwargs.get('vlan') + priority = kwargs.get('priority') s = ',' if match_input: @@ -359,9 +433,11 @@ class DCNetwork(Dockernet): flow = {} flow['dpid'] = int(node.dpid, 16) + if cookie: flow['cookie'] = int(cookie) - + if priority: + flow['priority'] = int(priority) flow['actions'] = [] @@ -369,34 +445,37 @@ class DCNetwork(Dockernet): # http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#add-a-flow-entry if cmd == 'add-flow': prefix = 'stats/flowentry/add' - action = {} - action['type'] = 'OUTPUT' - action['port'] = switch_outport_nr - flow['actions'].append(action) if vlan != None: - if path.index(current_hop) == 0: # first node + if index == 0: # first node action = {} action['type'] = 'PUSH_VLAN' # Push a new VLAN tag if a input frame is non-VLAN-tagged action['ethertype'] = 33024 # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged frame + flow['actions'].append(action) + action = {} action['type'] = 'SET_FIELD' action['field'] = 'vlan_vid' - action['value'] = vlan + # ryu expects the field to be masked + action['value'] = vlan | 0x1000 flow['actions'].append(action) - elif path.index(current_hop) == len(path) - 1: # last node + elif index == len(path) -1: # last node match += ',dl_vlan=%s' % vlan action = {} action['type'] = 'POP_VLAN' flow['actions'].append(action) else: # middle nodes match += ',dl_vlan=%s' % vlan - #flow['match'] = self._parse_match(match) + # output action must come last + action = {} + action['type'] = 'OUTPUT' + action['port'] = switch_outport_nr + flow['actions'].append(action) + elif cmd == 'del-flows': - #del(flow['actions']) prefix = 'stats/flowentry/delete' + if cookie: - flow['cookie_mask'] = cookie - #if cookie is None: - # flow['match'] = self._parse_match(match) + # TODO: add cookie_mask as argument + flow['cookie_mask'] = int('0xffffffffffffffff', 16) # need full mask to match complete cookie action = {} action['type'] = 'OUTPUT' @@ -413,7 +492,7 @@ class DCNetwork(Dockernet): match_input = kwargs.get('match') cmd = kwargs.get('cmd') path = kwargs.get('path') - current_hop = kwargs.get('current_hop') + index = kwargs.get('pathindex') vlan = kwargs.get('vlan') s = ',' @@ -425,10 +504,10 @@ class DCNetwork(Dockernet): if cmd == 'add-flow': action = 'action=%s' % switch_outport_nr if vlan != None: - if path.index(current_hop) == 0: # first node + if index == 0: # first node action = ('action=mod_vlan_vid:%s' % vlan) + (',output=%s' % switch_outport_nr) match = '-O OpenFlow13 ' + match - elif path.index(current_hop) == len(path) - 1: # last node + elif index == len(path) - 1: # last node match += ',dl_vlan=%s' % vlan action = 'action=strip_vlan,output=%s' % switch_outport_nr else: # middle nodes @@ -440,11 +519,11 @@ class DCNetwork(Dockernet): ofcmd = '' node.dpctl(cmd, ofcmd) - logging.info("{3} in switch: {0} in_port: {1} out_port: {2}".format(node.name, switch_inport_nr, + LOG.info("{3} in switch: {0} in_port: {1} out_port: {2}".format(node.name, switch_inport_nr, switch_outport_nr, cmd)) # start Ryu Openflow controller as Remote Controller for the DCNetwork - def startRyu(self): + def startRyu(self, learning_switch=True): # start Ryu controller with rest-API python_install_path = site.getsitepackages()[0] ryu_path = python_install_path + '/ryu/app/simple_switch_13.py' @@ -455,30 +534,56 @@ class DCNetwork(Dockernet): ryu_of_port = '6653' ryu_cmd = 'ryu-manager' FNULL = open("/tmp/ryu.log", 'w') - self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL) - # no learning switch - #self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL) + if learning_switch: + self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL) + else: + # no learning switch, but with rest api + self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL) time.sleep(1) - def stopRyu(self): + def killRyu(self): + """ + Stop the Ryu controller that might be started by son-emu. + :return: + """ + # try it nicely if self.ryu_process is not None: self.ryu_process.terminate() self.ryu_process.kill() + # ensure its death ;-) + Popen(['pkill', '-f', 'ryu-manager']) def ryu_REST(self, prefix, dpid=None, data=None): + if dpid: url = self.ryu_REST_api + '/' + str(prefix) + '/' + str(dpid) else: url = self.ryu_REST_api + '/' + str(prefix) if data: - #logging.info('POST: {0}'.format(str(data))) - req = urllib2.Request(url, str(data)) + req = self.RyuSession.post(url, json=data) else: - req = urllib2.Request(url) + req = self.RyuSession.get(url) - ret = urllib2.urlopen(req).read() + + # do extra logging if status code is not 200 (OK) + if req.status_code is not requests.codes.ok: + logging.info( + 'type {0} encoding: {1} text: {2} headers: {3} history: {4}'.format(req.headers['content-type'], + req.encoding, req.text, + req.headers, req.history)) + LOG.info('url: {0}'.format(str(url))) + if data: LOG.info('POST: {0}'.format(str(data))) + LOG.info('status: {0} reason: {1}'.format(req.status_code, req.reason)) + + + if 'json' in req.headers['content-type']: + ret = req.json() + return ret + + ret = req.text.rstrip() return ret + # need to respect that some match fields must be integers # http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#description-of-match-and-actions def _parse_match(self, match):