- name: install requests
pip: name=requests state=latest
- - name: install docker-py
- pip: name=docker-py version=1.7.1
+ - name: install docker
+ pip: name=docker version=2.0.2
- name: install prometheus_client
pip: name=prometheus_client state=latest
'pytest',
'Flask',
'flask_restful',
- 'docker-py==1.7.1',
+ 'docker==2.0.2',
'requests',
'prometheus_client',
'urllib3'
"""
global dcs
- def put(self, dc_label, compute_name):
+ def put(self, dc_label, compute_name, resource=None, value=None):
+ # check if resource update
+ if resource and value:
+ c = self._update_resource(dc_label, compute_name, resource, value)
+ return c.getStatus(), 200
+ # deploy new container
# check if json data is a dict
data = request.json
if data is None:
logging.exception("API error.")
return ex.message, 500
+ def _update_resource(self, dc_label, compute_name, resource, value):
+ #check if container exists
+ d = dcs.get(dc_label).net.getNodeByName(compute_name)
+ if resource == 'cpu':
+ cpu_period = int(dcs.get(dc_label).net.cpu_period)
+ cpu_quota = int(cpu_period * float(value))
+ #put default values back
+ if float(value) <= 0:
+ cpu_period = 100000
+ cpu_quota = -1
+ d.updateCpuLimit(cpu_period=cpu_period, cpu_quota=cpu_quota)
+ return d
+
+
def get(self, dc_label, compute_name):
logging.debug("API CALL: compute status")
"""
global net
- def put(self, vnf_name, vnf_interface=None, metric='tx_packets'):
+ def put(self, vnf_name, vnf_interface=None, metric='tx_packets', cookie=None):
logging.debug("REST CALL: start monitor VNF interface")
try:
- c = net.monitor_agent.setup_metric(vnf_name, vnf_interface, metric)
+ if cookie:
+ c = net.monitor_agent.setup_flow(vnf_name, vnf_interface, metric, cookie)
+ else:
+ c = net.monitor_agent.setup_metric(vnf_name, vnf_interface, metric)
# return monitor message response
return str(c), 200
except Exception as ex:
logging.exception("API error.")
return ex.message, 500
- def delete(self, vnf_name, vnf_interface=None, metric='tx_packets'):
+ def delete(self, vnf_name, vnf_interface=None, metric='tx_packets', cookie=None):
logging.debug("REST CALL: stop monitor VNF interface")
try:
- c = net.monitor_agent.stop_metric(vnf_name, vnf_interface, metric)
+ if cookie:
+ c = net.monitor_agent.stop_flow(vnf_name, vnf_interface, metric, cookie)
+ else:
+ c = net.monitor_agent.stop_metric(vnf_name, vnf_interface, metric)
# return monitor message response
return str(c), 200
except Exception as ex:
c = net.monitor_agent.stop_flow(vnf_name, vnf_interface, metric, cookie)
# return monitor message response
return str(c), 200
+ except Exception as ex:
+ logging.exception("API error.")
+ return ex.message, 500
+
+class MonitorLinkAction(Resource):
+ """
+ Add or remove flow monitoring on chains between VNFs.
+ These chain links are implemented as flow entries in the networks' SDN switches.
+ The monitoring is an extra flow entry on top of the existing chain, with a specific match. (preserving the chaining)
+ The counters of this new monitoring flow are exported
+ :param vnf_src_name: VNF name of the source of the link
+ :param vnf_dst_name: VNF name of the destination of the link
+ :param vnf_src_interface: VNF interface name of the source of the link
+ :param vnf_dst_interface: VNF interface name of the destination of the link
+ :param weight: weight of the link (can be useful for routing calculations)
+ :param match: OpenFlow match format of the flow entry
+ :param bidirectional: boolean value if the link needs to be implemented from src to dst and back
+ :param cookie: cookie value, identifier of the flow entry to be installed.
+ :param priority: integer indicating the priority of the flow entry
+ :param skip_vlan_tag: boolean to indicate whether a new vlan tag should be created for this chain
+ :param monitor: boolean to indicate whether a new vlan tag should be created for this chain
+ :param monitor_placement: 'tx'=place the monitoring flowrule at the beginning of the chain, 'rx'=place at the end of the chain
+ :param metric: tx_packet_rate, tx_byte_rate, rx_packet_rate, rx_byte_rate
+ :return: message string indicating if the chain action is succesful or not
+ """
+
+ # the global net is set from the topology file, and connected via connectDCNetwork function in rest_api_endpoint.py
+ global net
+
+ def put(self, vnf_src_name, vnf_dst_name):
+ logging.debug("REST CALL: monitor link flow add")
+
+ try:
+ command = 'add-flow'
+ return self._MonitorLinkAction(vnf_src_name, vnf_dst_name, command=command)
+ except Exception as ex:
+ logging.exception("API error.")
+ return ex.message, 500
+
+ def delete(self, vnf_src_name, vnf_dst_name):
+ logging.debug("REST CALL: monitor link flow remove")
+
+ try:
+ command = 'del-flows'
+ return self._MonitorLinkAction(vnf_src_name, vnf_dst_name, command=command)
+ except Exception as ex:
+ logging.exception("API error.")
+ return ex.message, 500
+
+ def _MonitorLinkAction(self, vnf_src_name, vnf_dst_name, command=None):
+ # call DCNetwork method, not really datacenter specific API for now...
+ # no check if vnfs are really connected to this datacenter...
+ try:
+ # check if json data is a dict
+ data = request.json
+ if data is None:
+ data = {}
+ elif type(data) is not dict:
+ data = json.loads(request.json)
+
+ vnf_src_interface = data.get("vnf_src_interface")
+ vnf_dst_interface = data.get("vnf_dst_interface")
+ weight = data.get("weight")
+ match = data.get("match")
+ bidirectional = data.get("bidirectional")
+ cookie = data.get("cookie")
+ priority = data.get("priority")
+ skip_vlan_tag = data.get("skip_vlan_tag")
+ monitor = data.get("monitor")
+ monitor_placement = data.get("monitor_placement")
+
+ #first install monitor flow
+ c1 = net.setChain(
+ vnf_src_name, vnf_dst_name,
+ vnf_src_interface=vnf_src_interface,
+ vnf_dst_interface=vnf_dst_interface,
+ cmd=command,
+ weight=weight,
+ match=match,
+ bidirectional=bidirectional,
+ cookie=cookie,
+ priority=priority,
+ skip_vlan_tag=skip_vlan_tag,
+ monitor=monitor,
+ monitor_placement=monitor_placement)
+
+ #then export monitor flow
+ metric = data.get("metric")
+ if 'rx' in monitor_placement:
+ vnf_name = vnf_dst_name
+ vnf_interface = vnf_dst_interface
+ elif 'tx' in monitor_placement:
+ vnf_name = vnf_src_name
+ vnf_interface = vnf_src_interface
+
+ c2 = net.monitor_agent.setup_flow(vnf_name, vnf_interface, metric, cookie)
+
+ # return setChain response
+ return (str(c1) + " " + str(c2)), 200
except Exception as ex:
logging.exception("API error.")
return ex.message, 500
\ No newline at end of file
logging.basicConfig(level=logging.INFO)
+# the global net is set from the topology file, and connected via connectDCNetwork function in rest_api_endpoint.py
net = None
:param bidirectional: boolean value if the link needs to be implemented from src to dst and back
:param cookie: cookie value, identifier of the flow entry to be installed.
:param priority: integer indicating the priority of the flow entry
+ :param skip_vlan_tag: boolean to indicate whether a new vlan tag should be created for this chain
+ :param monitor: boolean to indicate whether a new vlan tag should be created for this chain
+ :param monitor_placement: 'tx'=place the monitoring flowrule at the beginning of the chain, 'rx'=place at the end of the chain
:return: message string indicating if the chain action is succesful or not
"""
bidirectional = data.get("bidirectional")
cookie = data.get("cookie")
priority = data.get("priority")
+ skip_vlan_tag = data.get("skip_vlan_tag")
+ monitor = data.get("monitor")
+ monitor_placement = data.get("monitor_placement")
+
c = net.setChain(
vnf_src_name, vnf_dst_name,
vnf_src_interface=vnf_src_interface,
match=match,
bidirectional=bidirectional,
cookie=cookie,
- priority=priority)
+ priority=priority,
+ skip_vlan_tag=skip_vlan_tag,
+ monitor=monitor,
+ monitor_placement=monitor_placement)
# return setChain response
return str(c), 200
except Exception as ex:
from network import NetworkAction
import monitor
-from monitor import MonitorInterfaceAction, MonitorFlowAction
+from monitor import MonitorInterfaceAction, MonitorFlowAction, MonitorLinkAction
logging.basicConfig(level=logging.INFO)
# setup endpoints
- self.api.add_resource(Compute, "/restapi/compute/<dc_label>/<compute_name>")
+ # compute related actions (start/stop VNFs, get info)
+ self.api.add_resource(Compute,
+ "/restapi/compute/<dc_label>/<compute_name>",
+ "/restapi/compute/<dc_label>/<compute_name>/<resource>/<value>")
self.api.add_resource(ComputeList,
"/restapi/compute",
"/restapi/compute/<dc_label>")
self.api.add_resource(DatacenterStatus, "/restapi/datacenter/<dc_label>")
self.api.add_resource(DatacenterList, "/restapi/datacenter")
- self.api.add_resource(NetworkAction, "/restapi/network/<vnf_src_name>/<vnf_dst_name>", )
+ # network related actions (setup chaining between VNFs)
+ self.api.add_resource(NetworkAction,
+ "/restapi/network/<vnf_src_name>/<vnf_dst_name>")
+
+
+ # monitoring related actions
+ # export a network interface traffic rate counter
self.api.add_resource(MonitorInterfaceAction,
- "/restapi/monitor/<vnf_name>/<metric>",
- "/restapi/monitor/<vnf_name>/<vnf_interface>/<metric>")
+ "/restapi/monitor/vnf/<vnf_name>/<metric>",
+ "/restapi/monitor/vnf/<vnf_name>/<vnf_interface>/<metric>",
+ "/restapi/monitor/vnf/<vnf_name>/<vnf_interface>/<metric>/<cookie>")
+ # export flow traffic counter, of a manually pre-installed flow entry, specified by its cookie
self.api.add_resource(MonitorFlowAction,
- "/restapi/flowmon/<vnf_name>/<metric>/<cookie>",
- "/restapi/flowmon/<vnf_name>/<vnf_interface>/<metric>/<cookie>")
+ "/restapi/monitor/flow/<vnf_name>/<metric>/<cookie>",
+ "/restapi/monitor/flow/<vnf_name>/<vnf_interface>/<metric>/<cookie>")
+ # install monitoring of a specific flow on a pre-existing link in the service.
+ # the traffic counters of the newly installed monitor flow are exported
+ self.api.add_resource(MonitorLinkAction,
+ "/restapi/monitor/link/<vnf_src_name>/<vnf_dst_name>")
logging.debug("Created API endpoint %s(%s:%d)" % (self.__class__.__name__, self.ip, self.port))
import zipfile
import yaml
import threading
-from docker import Client as DockerClient
+from docker import DockerClient
from flask import Flask, request
import flask_restful as fr
from collections import defaultdict
def __init__(self):
self.services = dict()
self.dcs = dict()
+ self.net = None
self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
LOG.info("Create SONATA dummy gatekeeper.")
eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
elan_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-LAN")]
+ GK.net.deployed_elines.extend(eline_fwd_links)
+ GK.net.deployed_elans.extend(elan_fwd_links)
+
# 4a. deploy E-Line links
# cookie is used as identifier for the flowrules installed by the dummygatekeeper
# eg. different services get a unique cookie for their flowrules
# 4b. deploy E-LAN links
base = 10
for link in elan_fwd_links:
+
+ elan_vnf_list=[]
+
# generate lan ip address
ip = 1
for intf in link["connection_points_reference"]:
if vnf_id in self.sap_identifiers:
src_docker_name = "{0}_{1}".format(vnf_id, intf_name)
vnf_id = src_docker_name
+ else:
+ src_docker_name = vnf_id
vnf_name = vnf_id2vnf_name[vnf_id]
LOG.debug(
"Setting up E-LAN link. %s(%s:%s) -> %s" % (
self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
# increase for the next ip address on this E-LAN
ip += 1
+
+ # add this vnf and interface to the E-LAN for tagging
+ network = self.vnfds[vnf_name].get("dc").net # there should be a cleaner way to find the DCNetwork
+ elan_vnf_list.append({'name':src_docker_name,'interface':intf_name})
+
+
+ # install the VLAN tags for this E-LAN
+ network.setLAN(elan_vnf_list)
# increase the base ip address for the next E-LAN
base += 1
self.package_content_path,
make_relative_path(self.manifest.get("entry_service_template")))
self.nsd = load_yaml(nsd_path)
+ GK.net.deployed_nsds.append(self.nsd)
LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
def _load_vnfd(self):
dc = DockerClient()
for url in self.remote_docker_image_urls.itervalues():
if not FORCE_PULL: # only pull if not present (speedup for development)
- if len(dc.images(name=url)) > 0:
+ if len(dc.images.list(name=url)) > 0:
LOG.debug("Image %r present. Skipping pull." % url)
continue
LOG.info("Pulling image: %r" % url)
:param image_name: name of the docker image
:return:
"""
- return len(DockerClient().images(image_name)) > 0
+ return len(DockerClient().images.list(name=image_name)) > 0
def _calculate_placement(self, algorithm):
"""
def start_rest_api(host, port, datacenters=dict()):
GK.dcs = datacenters
+ GK.net = get_dc_network()
# start the Flask server (not the best performance but ok for our use case)
app.run(host=host,
port=port,
r.append("%d.0.0.%d/%d" % (i, ip, subnet_size))
return r
+def get_dc_network():
+ """
+ retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
+ Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
+ :return:
+ """
+ assert (len(GK.dcs) > 0)
+ return GK.dcs.values()[0].net
if __name__ == '__main__':
"""
import pprint
import argparse
import json
+from subprocess import Popen
pp = pprint.PrettyPrinter(indent=4)
if len(c) > 1:
name = c[0]
status = c[1]
- eth0ip = status.get("docker_network", "-")
+ #eth0ip = status.get("docker_network", "-")
+ netw_list = [netw_dict['intf_name'] for netw_dict in status.get("network")]
+ dc_if_list = [netw_dict['dc_portname'] for netw_dict in status.get("network")]
table.append([status.get("datacenter"),
name,
status.get("image"),
- eth0ip,
- status.get("state").get("Status")])
+ ','.join(netw_list),
+ ','.join(dc_if_list)])
+ #status.get("state").get("Status")]
headers = ["Datacenter",
"Container",
"Image",
- "docker0",
- "Status"]
+ "Interface list",
+ "Datacenter interfaces"]
print(tabulate(table, headers=headers, tablefmt="grid"))
def status(self, args):
pp.pprint(list)
+ def xterm(self, args):
+ vnf_names = args.get("vnf_names")
+ for vnf_name in vnf_names:
+ Popen(['xterm', '-xrm', 'XTerm.vt100.allowTitleOps: false', '-T', vnf_name,
+ '-e', "docker exec -it mn.{0} /bin/bash".format(vnf_name)])
parser = argparse.ArgumentParser(description="""son-emu compute
""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"command",
- choices=['start', 'stop', 'list', 'status'],
+ choices=['start', 'stop', 'list', 'status', 'xterm'],
help="Action to be executed.")
+parser.add_argument(
+ "vnf_names",
+ nargs='*',
+ help="vnf names to open an xterm for")
parser.add_argument(
"--datacenter", "-d", dest="datacenter",
help="Data center to which the command should be applied.")
from subprocess import Popen
import re
import requests
+import os
from mininet.net import Containernet
from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
LOG = logging.getLogger("dcemulator.net")
LOG.setLevel(logging.DEBUG)
+# default CPU period used for cpu percentage-based cfs values (microseconds)
+CPU_PERIOD = 1000000
+
class DCNetwork(Containernet):
"""
Wraps the original Mininet/Containernet class and provides
"""
def __init__(self, controller=RemoteController, monitor=False,
- enable_learning = True, # in case of RemoteController (Ryu), learning switch behavior can be turned off/on
+ enable_learning=False, # learning switch behavior of the default ovs switches icw Ryu controller can be turned off/on, needed for E-LAN functionality
dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation
dc_emulation_max_mem=512, # emulation max mem in MB
**kwargs):
# members
self.dcs = {}
self.ryu_process = None
+ #list of deployed nsds.E_Lines and E_LANs (uploaded from the dummy gatekeeper)
+ self.deployed_nsds = []
+ self.deployed_elines = []
+ self.deployed_elans = []
+ self.installed_chains = []
+
# always cleanup environment before we start the emulator
self.killRyu()
Containernet.__init__(
self, switch=OVSKernelSwitch, controller=controller, **kwargs)
+ # default switch configuration
+ enable_ryu_learning = False
+ if enable_learning :
+ self.failMode = 'standalone'
+ enable_ryu_learning = True
+ else:
+ self.failMode = 'secure'
+
# Ryu management
if controller == RemoteController:
# start Ryu controller
- self.startRyu(learning_switch=enable_learning)
+ self.startRyu(learning_switch=enable_ryu_learning)
# add the specified controller
self.addController('c0', controller=controller)
# initialize resource model registrar
self.rm_registrar = ResourceModelRegistrar(
dc_emulation_max_cpu, dc_emulation_max_mem)
+ self.cpu_period = CPU_PERIOD
def addDatacenter(self, label, metadata={}, resource_log_path=None):
"""
"""
assert node1 is not None
assert node2 is not None
- LOG.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
+
# ensure type of node1
if isinstance( node1, basestring ):
if node1 in self.dcs:
attr_dict2.update(attr_dict)
self.DCNetwork_graph.add_edge(node2.name, node1.name, attr_dict=attr_dict2)
+ LOG.debug("addLink: n1={0} intf1={1} -- n2={2} intf2={3}".format(
+ str(node1),node1_port_name, str(node2), node2_port_name))
+
return link
def addDocker( self, label, **params ):
"""
Wrapper for addSwitch method to store switch also in graph.
"""
+
+ # add this switch to the global topology overview
if add_to_graph:
self.DCNetwork_graph.add_node(name)
- return Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
+
+ # set the learning switch behavior
+ if 'failMode' in params :
+ failMode = params['failMode']
+ else :
+ failMode = self.failMode
+
+ s = Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)
+
+ # set flow entry that enables learning switch behavior (needed to enable E-LAN functionality)
+ #LOG.info('failmode {0}'.format(failMode))
+ #if failMode == 'standalone' :
+ # LOG.info('add NORMAL')
+ # s.dpctl('add-flow', 'actions=NORMAL')
+
+ return s
def getAllContainers(self):
"""
def CLI(self):
CLI(self)
+ def setLAN(self, vnf_list):
+ """
+ setup an E-LAN network by assigning the same VLAN tag to each DC interface of the VNFs in the E-LAN
+
+ :param vnf_list: names of the VNFs in this E-LAN [{name:,interface:},...]
+ :return:
+ """
+ src_sw = None
+ src_sw_inport_nr = 0
+ src_sw_inport_name = None
+
+ # get a vlan tag for this E-LAN
+ vlan = self.vlans.pop()
+
+ for vnf in vnf_list:
+ vnf_src_name = vnf['name']
+ vnf_src_interface = vnf['interface']
+
+ # check if port is specified (vnf:port)
+ if vnf_src_interface is None:
+ # take first interface by default
+ connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
+ link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
+ vnf_src_interface = link_dict[0]['src_port_id']
+
+ for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
+ link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
+ for link in link_dict:
+ if (link_dict[link]['src_port_id'] == vnf_src_interface or
+ link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call
+ # found the right link and connected switch
+ src_sw = connected_sw
+ src_sw_inport_nr = link_dict[link]['dst_port_nr']
+ src_sw_inport_name = link_dict[link]['dst_port_name']
+ break
+
+ # set the tag on the dc switch interface
+ LOG.debug('set E-LAN: vnf name: {0} interface: {1} tag: {2}'.format(vnf_src_name, vnf_src_interface,vlan))
+ switch_node = self.getNodeByName(src_sw)
+ self._set_vlan_tag(switch_node, src_sw_inport_name, vlan)
+
+ def _addMonitorFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None,
+ tag=None, **kwargs):
+ """
+ Add a monitoring flow entry that adds a special flowentry/counter at the begin or end of a chain.
+ So this monitoring flowrule exists on top of a previously defined chain rule and uses the same vlan tag/routing.
+ :param vnf_src_name:
+ :param vnf_dst_name:
+ :param vnf_src_interface:
+ :param vnf_dst_interface:
+ :param tag: vlan tag to be used for this chain (same tag as existing chain)
+ :param monitor_placement: 'tx' or 'rx' indicating to place the extra flowentry resp. at the beginning or end of the chain
+ :return:
+ """
+
+ src_sw = None
+ src_sw_inport_nr = 0
+ src_sw_inport_name = None
+ dst_sw = None
+ dst_sw_outport_nr = 0
+ dst_sw_outport_name = None
+
+ LOG.debug("call AddMonitorFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r",
+ vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
+
+ #check if port is specified (vnf:port)
+ if vnf_src_interface is None:
+ # take first interface by default
+ connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
+ link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
+ vnf_src_interface = link_dict[0]['src_port_id']
+
+ for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
+ link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
+ for link in link_dict:
+ if (link_dict[link]['src_port_id'] == vnf_src_interface or
+ link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call
+ # found the right link and connected switch
+ src_sw = connected_sw
+ src_sw_inport_nr = link_dict[link]['dst_port_nr']
+ src_sw_inport_name = link_dict[link]['dst_port_name']
+ break
+
+ if vnf_dst_interface is None:
+ # take first interface by default
+ connected_sw = self.DCNetwork_graph.neighbors(vnf_dst_name)[0]
+ link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
+ vnf_dst_interface = link_dict[0]['dst_port_id']
+
+ vnf_dst_name = vnf_dst_name.split(':')[0]
+ for connected_sw in self.DCNetwork_graph.neighbors(vnf_dst_name):
+ link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
+ for link in link_dict:
+ if link_dict[link]['dst_port_id'] == vnf_dst_interface or \
+ link_dict[link]['dst_port_name'] == vnf_dst_interface: # Fix: we might also get interface names, e.g, from a son-emu-cli call
+ # found the right link and connected switch
+ dst_sw = connected_sw
+ dst_sw_outport_nr = link_dict[link]['src_port_nr']
+ dst_sw_outport_name = link_dict[link]['src_port_name']
+ break
+
+ if not tag >= 0:
+ LOG.exception('tag not valid: {0}'.format(tag))
+
+ # get shortest path
+ try:
+ # returns the first found shortest path
+ # if all shortest paths are wanted, use: all_shortest_paths
+ path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
+ except:
+ LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
+ vnf_src_name, vnf_dst_name, src_sw, dst_sw))
+ LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes())
+ LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges())
+ for e, v in self.DCNetwork_graph.edges():
+ LOG.debug("%r" % self.DCNetwork_graph[e][v])
+ return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
+
+ LOG.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
+
+ current_hop = src_sw
+ switch_inport_nr = src_sw_inport_nr
+
+ cmd = kwargs.get('cmd')
+
+ #iterate through the path to install the flow-entries
+ for i in range(0,len(path)):
+ current_node = self.getNodeByName(current_hop)
+
+ if path.index(current_hop) < len(path)-1:
+ next_hop = path[path.index(current_hop)+1]
+ else:
+ #last switch reached
+ next_hop = vnf_dst_name
+
+ next_node = self.getNodeByName(next_hop)
+
+ if next_hop == vnf_dst_name:
+ switch_outport_nr = dst_sw_outport_nr
+ LOG.info("end node reached: {0}".format(vnf_dst_name))
+ elif not isinstance( next_node, OVSSwitch ):
+ LOG.info("Next node: {0} is not a switch".format(next_hop))
+ return "Next node: {0} is not a switch".format(next_hop)
+ else:
+ # take first link between switches by default
+ index_edge_out = 0
+ switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
+
+
+ # set of entry via ovs-ofctl
+ if isinstance( current_node, OVSSwitch ):
+ kwargs['vlan'] = tag
+ kwargs['path'] = path
+ kwargs['current_hop'] = current_hop
+ kwargs['switch_inport_name'] = src_sw_inport_name
+ kwargs['switch_outport_name'] = dst_sw_outport_name
+ kwargs['skip_vlan_tag'] = True
+
+ monitor_placement = kwargs.get('monitor_placement').strip()
+ # put monitor flow at the dst switch
+ insert_flow = False
+ if monitor_placement == 'tx' and path.index(current_hop) == 0: # first node:
+ insert_flow = True
+ # put monitoring flow at the src switch
+ elif monitor_placement == 'rx' and path.index(current_hop) == len(path) - 1: # last node:
+ insert_flow = True
+ elif monitor_placement not in ['rx', 'tx']:
+ LOG.exception('invalid monitor command: {0}'.format(monitor_placement))
+
+
+ if self.controller == RemoteController and insert_flow:
+ ## set flow entry via ryu rest api
+ self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+ break
+ elif insert_flow:
+ ## set flow entry via ovs-ofctl
+ self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+ break
+
+ # take first link between switches by default
+ if isinstance( next_node, OVSSwitch ):
+ switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
+ current_hop = next_hop
+
+ return "path {2} between {0} and {1}".format(vnf_src_name, vnf_dst_name, cmd)
+
+
def setChain(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
"""
Chain 2 vnf interfaces together by installing the flowrules in the switches along their path.
:param cookie: cookie for the installed flowrules (can be used later as identifier for a set of installed chains)
:param match: custom match entry to be added to the flowrules (default: only in_port and vlan tag)
:param priority: custom flowrule priority
+<<<<<<< HEAD
+ :param monitor: boolean to indicate whether this chain is a monitoring chain
+ :param tag: vlan tag to be used for this chain (pre-defined or new one if none is specified)
+=======
:param path: custom path between the two VNFs (list of switches)
+>>>>>>> upstream/master
:return: output log string
"""
+
+ # special procedure for monitoring flows
+ if kwargs.get('monitor'):
+
+ # check if chain already exists
+ found_chains = [chain_dict for chain_dict in self.installed_chains if
+ (chain_dict['vnf_src_name'] == vnf_src_name and chain_dict['vnf_src_interface'] == vnf_src_interface
+ and chain_dict['vnf_dst_name'] == vnf_dst_name and chain_dict['vnf_dst_interface'] == vnf_dst_interface)]
+
+ if len(found_chains) > 0:
+ # this chain exists, so need an extra monitoring flow
+ # assume only 1 chain per vnf/interface pair
+ LOG.debug('*** installing monitoring chain on top of pre-defined chain from {0}:{1} -> {2}:{3}'.
+ format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
+ tag = found_chains[0]['tag']
+ ret = self._addMonitorFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface,
+ tag=tag, table_id=0, **kwargs)
+ return ret
+ else:
+ # no chain existing (or E-LAN) -> install normal chain
+ LOG.warning('*** installing monitoring chain without pre-defined NSD chain from {0}:{1} -> {2}:{3}'.
+ format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
+ pass
+
+
cmd = kwargs.get('cmd')
- if cmd == 'add-flow':
+ if cmd == 'add-flow' or cmd == 'del-flows':
ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
if kwargs.get('bidirectional'):
if kwargs.get('path') is not None:
kwargs['path'] = list(reversed(kwargs.get('path')))
ret = ret +'\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
- elif cmd == 'del-flows':
- ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
- if kwargs.get('bidirectional'):
- if kwargs.get('path') is not None:
- kwargs['path'] = list(reversed(kwargs.get('path')))
- ret = ret + '\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
-
else:
ret = "Command unknown"
def _chainAddFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
src_sw = None
- dst_sw = None
src_sw_inport_nr = 0
+ src_sw_inport_name = None
+ dst_sw = None
dst_sw_outport_nr = 0
+ dst_sw_outport_name = None
LOG.debug("call chainAddFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r",
vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
# found the right link and connected switch
src_sw = connected_sw
src_sw_inport_nr = link_dict[link]['dst_port_nr']
+ src_sw_inport_name = link_dict[link]['dst_port_name']
break
if vnf_dst_interface is None:
# found the right link and connected switch
dst_sw = connected_sw
dst_sw_outport_nr = link_dict[link]['src_port_nr']
+ dst_sw_outport_name = link_dict[link]['src_port_name']
break
path = kwargs.get('path')
current_hop = src_sw
switch_inport_nr = src_sw_inport_nr
- # choose free vlan if path contains more than 1 switch
+ # choose free vlan
+ ## if path contains more than 1 switch
cmd = kwargs.get('cmd')
vlan = None
if cmd == 'add-flow':
- if len(path) > 1:
+ if kwargs.get('tag'):
+ # use pre-defined tag
+ vlan = kwargs.get('tag')
+ else:
vlan = self.vlans.pop()
+ # store the used vlan tag to identify this chain
+ if not kwargs.get('monitor'):
+ chain_dict = {}
+ chain_dict['vnf_src_name'] = vnf_src_name
+ chain_dict['vnf_dst_name'] = vnf_dst_name
+ chain_dict['vnf_src_interface'] = vnf_src_interface
+ chain_dict['vnf_dst_interface'] = vnf_dst_interface
+ chain_dict['tag'] = vlan
+ self.installed_chains.append(chain_dict)
+
+ #iterate through the path to install the flow-entries
for i in range(0,len(path)):
current_node = self.getNodeByName(current_hop)
if isinstance( current_node, OVSSwitch ):
kwargs['vlan'] = vlan
kwargs['path'] = path
+ kwargs['current_hop'] = current_hop
+ kwargs['switch_inport_name'] = src_sw_inport_name
+ kwargs['switch_outport_name'] = dst_sw_outport_name
kwargs['pathindex'] = i
if self.controller == RemoteController:
## set flow entry via ovs-ofctl
self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
-
-
# take first link between switches by default
if isinstance( next_node, OVSSwitch ):
switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
vlan = kwargs.get('vlan')
priority = kwargs.get('priority')
+ # flag to not set the ovs port vlan tag
+ skip_vlan_tag = kwargs.get('skip_vlan_tag')
+ # table id to put this flowentry
+ table_id = kwargs.get('table_id')
+ if not table_id:
+ table_id = 0
s = ','
if match_input:
if priority:
flow['priority'] = int(priority)
+ flow['table_id'] = table_id
+
flow['actions'] = []
# possible Ryu actions, match fields:
prefix = 'stats/flowentry/add'
if vlan != None:
if index == 0: # first node
- action = {}
- action['type'] = 'PUSH_VLAN' # Push a new VLAN tag if a input frame is non-VLAN-tagged
- action['ethertype'] = 33024 # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged frame
- flow['actions'].append(action)
- action = {}
- action['type'] = 'SET_FIELD'
- action['field'] = 'vlan_vid'
- # ryu expects the field to be masked
- action['value'] = vlan | 0x1000
- flow['actions'].append(action)
- elif index == len(path) -1: # last node
- match += ',dl_vlan=%s' % vlan
- action = {}
- action['type'] = 'POP_VLAN'
- flow['actions'].append(action)
+ # set vlan tag in ovs instance (to isolate E-LANs)
+ if not skip_vlan_tag:
+ in_port_name = kwargs.get('switch_inport_name')
+ self._set_vlan_tag(node, in_port_name, vlan)
+ # set vlan push action if more than 1 switch in the path
+ if len(path) > 1:
+ action = {}
+ action['type'] = 'PUSH_VLAN' # Push a new VLAN tag if a input frame is non-VLAN-tagged
+ action['ethertype'] = 33024 # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged frame
+ flow['actions'].append(action)
+ action = {}
+ action['type'] = 'SET_FIELD'
+ action['field'] = 'vlan_vid'
+ # ryu expects the field to be masked
+ action['value'] = vlan | 0x1000
+ flow['actions'].append(action)
+
+ elif index == len(path) - 1: # last node
+ # set vlan tag in ovs instance (to isolate E-LANs)
+ if not skip_vlan_tag:
+ out_port_name = kwargs.get('switch_outport_name')
+ self._set_vlan_tag(node, out_port_name, vlan)
+ # set vlan pop action if more than 1 switch in the path
+ if len(path) > 1:
+ match += ',dl_vlan=%s' % vlan
+ action = {}
+ action['type'] = 'POP_VLAN'
+ flow['actions'].append(action)
+
else: # middle nodes
match += ',dl_vlan=%s' % vlan
+
# output action must come last
action = {}
action['type'] = 'OUTPUT'
flow['match'] = self._parse_match(match)
self.ryu_REST(prefix, data=flow)
+ def _set_vlan_tag(self, node, switch_port, tag):
+ node.vsctl('set', 'port {0} tag={1}'.format(switch_port,tag))
+ LOG.debug("set vlan in switch: {0} in_port: {1} vlan tag: {2}".format(node.name, switch_port, tag))
+
def _set_flow_entry_dpctl(self, node, switch_inport_nr, switch_outport_nr, **kwargs):
+
match = 'in_port=%s' % switch_inport_nr
cookie = kwargs.get('cookie')
def startRyu(self, learning_switch=True):
# start Ryu controller with rest-API
python_install_path = site.getsitepackages()[0]
- ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
+ # ryu default learning switch
+ #ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
+ #custom learning switch that installs a default NORMAL action in the ovs switches
+ dir_path = os.path.dirname(os.path.realpath(__file__))
+ ryu_path = dir_path + '/son_emu_simple_switch_13.py'
ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
# change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
# Ryu still uses 6633 as default
FNULL = open("/tmp/ryu.log", 'w')
if learning_switch:
self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+ LOG.debug('starting ryu-controller with {0}'.format(ryu_path))
+ LOG.debug('starting ryu-controller with {0}'.format(ryu_path2))
else:
# no learning switch, but with rest api
self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+ LOG.debug('starting ryu-controller with {0}'.format(ryu_path2))
time.sleep(1)
def killRyu(self):
dict.update({match[0]:m2})
return dict
+ def find_connected_dc_interface(self, vnf_src_name, vnf_src_interface):
+ for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
+ link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
+ for link in link_dict:
+ if (link_dict[link]['src_port_id'] == vnf_src_interface or
+ link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call
+ # found the right link and connected switch
+ src_sw = connected_sw
+ src_sw_inport_nr = link_dict[link]['dst_port_nr']
+ src_sw_inport_name = link_dict[link]['dst_port_name']
+ return src_sw_inport_name
DCDPID_BASE = 1000 # start of switch dpid's used for data center switches
-
class EmulatorCompute(Docker):
"""
Emulator specific compute node class.
Helper method to receive information about the virtual networks
this compute instance is connected to.
"""
- # format list of tuples (name, Ip, MAC, isUp, status)
- return [{'intf_name':str(i), 'ip':i.IP(), 'mac':i.MAC(), 'up':i.isUp(), 'status':i.status()}
- for i in self.intfList()]
+ # get all links and find dc switch interface
+ networkStatusList = []
+ for i in self.intfList():
+ vnf_name = self.name
+ vnf_interface = str(i)
+ dc_port_name = self.datacenter.net.find_connected_dc_interface(vnf_name, vnf_interface)
+ # format list of tuples (name, Ip, MAC, isUp, status, dc_portname)
+ intf_dict = {'intf_name': str(i), 'ip': i.IP(), 'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
+ networkStatusList.append(intf_dict)
+
+ return networkStatusList
def getStatus(self):
"""
status["memswap_limit"] = self.memswap_limit
status["state"] = self.dcli.inspect_container(self.dc)["State"]
status["id"] = self.dcli.inspect_container(self.dc)["Id"]
+ status["short_id"] = self.dcli.inspect_container(self.dc)["Id"][:12]
status["datacenter"] = (None if self.datacenter is None
else self.datacenter.label)
return status
if len(network) < 1:
network.append({})
+ # apply hard-set resource limits=0
+ cpu_percentage = kwargs.get('cpu_percent')
+ if cpu_percentage:
+ cpu_period = self.net.cpu_period
+ cpu_quota = self.net.cpu_period * float(cpu_percentage)
+ else:
+ cpu_quota = None
+ cpu_period = None
+
# create the container
d = self.net.addDocker(
"%s" % (name),
dcmd=command,
datacenter=self,
flavor_name=flavor_name,
+ cpu_period = cpu_period,
+ cpu_quota = cpu_quota,
+ environment = {'VNF_NAME':name}
**params
)
+
+
# apply resource limits to container if a resource model is defined
if self._resource_model is not None:
try:
--- /dev/null
+# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ryu.base import app_manager
+from ryu.controller import ofp_event
+from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
+from ryu.controller.handler import set_ev_cls
+from ryu.ofproto import ofproto_v1_3
+from ryu.lib.packet import packet
+from ryu.lib.packet import ethernet
+from ryu.lib.packet import ether_types
+from ryu.topology.event import EventSwitchEnter, EventSwitchLeave, EventSwitchReconnected
+
+class SimpleSwitch13(app_manager.RyuApp):
+ OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
+
+ def __init__(self, *args, **kwargs):
+ super(SimpleSwitch13, self).__init__(*args, **kwargs)
+ self.mac_to_port = {}
+
+ @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
+ def switch_features_handler(self, ev):
+ datapath = ev.msg.datapath
+ ofproto = datapath.ofproto
+ parser = datapath.ofproto_parser
+
+ # install table-miss flow entry
+ #
+ # We specify NO BUFFER to max_len of the output action due to
+ # OVS bug. At this moment, if we specify a lesser number, e.g.,
+ # 128, OVS will send Packet-In with invalid buffer_id and
+ # truncated packet data. In that case, we cannot output packets
+ # correctly. The bug has been fixed in OVS v2.1.0.
+ match = parser.OFPMatch()
+ #actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
+ # ofproto.OFPCML_NO_BUFFER)]
+ actions = [parser.OFPActionOutput(ofproto.OFPCML_NO_BUFFER)]
+ self.add_flow(datapath, 0, match, actions)
+
+ def add_flow(self, datapath, priority, match, actions, buffer_id=None, table_id=0):
+ ofproto = datapath.ofproto
+ parser = datapath.ofproto_parser
+
+ inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
+ actions)]
+ if buffer_id:
+ mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
+ priority=priority, match=match,
+ instructions=inst, table_id=table_id)
+ else:
+ mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
+ match=match, instructions=inst, table_id=table_id)
+ datapath.send_msg(mod)
+
+ # new switch detected
+
+ @set_ev_cls([EventSwitchEnter, EventSwitchReconnected])
+ def _ev_switch_enter_handler(self, ev):
+ datapath = ev.switch.dp
+ self.logger.info('registered OF switch id: %s' % datapath.id)
+ ofproto = datapath.ofproto
+ self.logger.info('OF version: {0}'.format(ofproto))
+ # send NORMAL action for all undefined flows
+ ofp_parser = datapath.ofproto_parser
+ actions = [ofp_parser.OFPActionOutput(ofproto_v1_3.OFPP_NORMAL)]
+ self.add_flow(datapath, 0, None, actions, table_id=0)
+
+
+ @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
+ def _packet_in_handler(self, ev):
+ # If you hit this you might want to increase
+ # the "miss_send_length" of your switch
+ if ev.msg.msg_len < ev.msg.total_len:
+ self.logger.debug("packet truncated: only %s of %s bytes",
+ ev.msg.msg_len, ev.msg.total_len)
+ msg = ev.msg
+ datapath = msg.datapath
+ ofproto = datapath.ofproto
+ parser = datapath.ofproto_parser
+ in_port = msg.match['in_port']
+
+ pkt = packet.Packet(msg.data)
+ eth = pkt.get_protocols(ethernet.ethernet)[0]
+
+ if eth.ethertype == ether_types.ETH_TYPE_LLDP:
+ # ignore lldp packet
+ return
+ dst = eth.dst
+ src = eth.src
+
+ dpid = datapath.id
+ self.mac_to_port.setdefault(dpid, {})
+
+ self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
+
+ # learn a mac address to avoid FLOOD next time.
+ self.mac_to_port[dpid][src] = in_port
+
+ if dst in self.mac_to_port[dpid]:
+ out_port = self.mac_to_port[dpid][dst]
+ else:
+ out_port = ofproto.OFPP_FLOOD
+
+ actions = [parser.OFPActionOutput(out_port)]
+
+ # install a flow to avoid packet_in next time
+ if out_port != ofproto.OFPP_FLOOD:
+ match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
+ # verify if we have a valid buffer_id, if yes avoid to send both
+ # flow_mod & packet_out
+ if msg.buffer_id != ofproto.OFP_NO_BUFFER:
+ self.add_flow(datapath, 1, match, actions, msg.buffer_id)
+ return
+ else:
+ self.add_flow(datapath, 1, match, actions)
+ data = None
+ if msg.buffer_id == ofproto.OFP_NO_BUFFER:
+ data = msg.data
+
+ out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
+ in_port=in_port, actions=actions, data=data)
+ datapath.send_msg(out)
def create_topology1():
# create topology
- net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=False)
+ net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True)
dc1 = net.addDatacenter("dc1")
dc2 = net.addDatacenter("dc2")
s1 = net.addSwitch("s1")
# @unittest.skip("disabled")
def test_GK_Api_start_service(self):
# create network
- self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0)
+ self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0, enable_learning=True)
# setup links
self.net.addLink(self.dc[0], self.h[0])
self.net.addLink(self.dc[0], self.dc[1])
# check compute list result
self.assertEqual(len(self.dc[0].listCompute()), 2)
# check connectivity by using ping
+ ELAN_list=[]
+ for i in [0]:
+ for vnf in self.dc[i].listCompute():
+ # check connection
+ p = self.net.ping([self.h[i], vnf])
+ print p
+ self.assertTrue(p <= 0.0)
+
+ # check E LAN connection
+ network_list = vnf.getNetworkStatus()
+ mgmt_ip = [intf['ip'] for intf in network_list if intf['intf_name'] == 'mgmt']
+ self.assertTrue(len(mgmt_ip) > 0)
+ ip_address = mgmt_ip[0]
+ ELAN_list.append(ip_address)
+ print ip_address
+
+ # check ELAN connection by ping over the mgmt network (needs to be configured as ELAN in the test service)
for vnf in self.dc[0].listCompute():
- p = self.net.ping([self.h[0], vnf])
- self.assertTrue(p <= 50.0)
+ network_list = vnf.getNetworkStatus()
+ mgmt_ip = [intf['ip'] for intf in network_list if intf['intf_name'] == 'mgmt']
+ self.assertTrue(len(mgmt_ip) > 0)
+ ip_address = mgmt_ip[0]
+ print ELAN_list
+ print ip_address
+ test_ip_list = list(ELAN_list)
+ test_ip_list.remove(ip_address)
+ for ip in test_ip_list:
+ p = self.net.ping([vnf],manualdestip=ip)
+ print p
+ self.assertTrue(p <= 0.0)
+
# stop Mininet network
self.stopNet()
initialize_GK()