From: cgeoffroy Date: Thu, 3 Mar 2016 17:18:13 +0000 (+0100) Subject: Moving emuvim into the src directory X-Git-Tag: v3.1~171^2~7 X-Git-Url: https://osm.etsi.org/gitweb/?a=commitdiff_plain;h=3eef9fde234a4379d80e0435bac9ce650407a895;p=osm%2Fvim-emu.git Moving emuvim into the src directory --- diff --git a/README.md b/README.md index c3fa61f..e2b6658 100755 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Contributors: * (This will be replaced / extended by a REST API later) ### Project structure -* **emuvim/** all emulator code +* **src/emuvim/** all emulator code * **api/** Data center API endpoint implementations (zerorpc, OpenStack REST, ...) * **cli/** CLI client to interact with a running emulator * **dcemulator/** Dockernet wrapper that introduces the notion of data centers and API endpoints @@ -44,10 +44,10 @@ Automatic installation is provide through Ansible playbooks. ### Run * First terminal: - * `cd ~/son-emu/emuvim` + * `cd ~/son-emu/src/emuvim` * `sudo python example_topology.py` * Second terminal: - * `cd ~/son-emu/emuvim/cli` + * `cd ~/son-emu/src/emuvim/cli` * `./son-emu-cli compute start -d datacenter1 -n vnf1` * `./son-emu-cli compute start -d datacenter1 -n vnf2` * `./son-emu-cli compute list` @@ -61,7 +61,7 @@ Automatic installation is provide through Ansible playbooks. * `./start_example_chain` sets up an example service chain, using the example docker container from `package_samples` https://github.com/sonata-nfv/packaging_samples/tree/master/VNFs ### Run Unit Tests -* `cd ~/son-emu/emuvim` +* `cd ~/son-emu/src/emuvim` * `sudo python test` or `sudo python test -v` for more outputs ### CLI diff --git a/emuvim/api/__init__.py b/emuvim/api/__init__.py deleted file mode 100755 index e69de29..0000000 diff --git a/emuvim/api/zerorpcapi.py b/emuvim/api/zerorpcapi.py deleted file mode 100755 index 59b960c..0000000 --- a/emuvim/api/zerorpcapi.py +++ /dev/null @@ -1,128 +0,0 @@ -""" -Distributed Cloud Emulator (dcemulator) -(c) 2015 by Manuel Peuster -""" - -import logging -import threading -import zerorpc - -logging.basicConfig(level=logging.INFO) - - -class ZeroRpcApiEndpoint(object): - """ - Simple API endpoint that offers a zerorpc-based - interface. This interface will be used by the - default command line client. - It can be used as a reference to implement - REST interfaces providing the same semantics, - like e.g. OpenStack compute API. - """ - - def __init__(self, listenip, port): - self.dcs = {} - self.ip = listenip - self.port = port - logging.debug("Created API endpoint %s(%s:%d)" % ( - self.__class__.__name__, self.ip, self.port)) - - def connectDatacenter(self, dc): - self.dcs[dc.label] = dc - logging.info("Connected DC(%s) to API endpoint %s(%s:%d)" % ( - dc.label, self.__class__.__name__, self.ip, self.port)) - - def start(self): - thread = threading.Thread(target=self._api_server_thread, args=()) - thread.daemon = True - thread.start() - logging.debug("Started API endpoint %s(%s:%d)" % ( - self.__class__.__name__, self.ip, self.port)) - - def _api_server_thread(self): - s = zerorpc.Server(MultiDatacenterApi(self.dcs)) - s.bind("tcp://%s:%d" % (self.ip, self.port)) - s.run() - - -class MultiDatacenterApi(object): - """ - Just pass through the corresponding request to the - selected data center. Do not implement provisioning - logic here because will will have multiple API - endpoint implementations at the end. - """ - - def __init__(self, dcs): - self.dcs = dcs - - def compute_action_start(self, dc_label, compute_name, image, command, network): - """ - Start a new compute instance: A docker container - :param dc_label: name of the DC - :param compute_name: compute container name - :param image: image name - :param command: command to execute - :param network: - :return: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"}) - """ - # TODO what to return UUID / given name / internal name ? - logging.debug("RPC CALL: compute start") - try: - c = self.dcs.get(dc_label).startCompute( - compute_name, image=image, command=command, network=network) - return str(c.name) - except Exception as ex: - logging.exception("RPC error.") - return ex.message - - def compute_action_stop(self, dc_label, compute_name): - logging.debug("RPC CALL: compute stop") - try: - return self.dcs.get(dc_label).stopCompute(compute_name) - except Exception as ex: - logging.exception("RPC error.") - return ex.message - - def compute_list(self, dc_label): - logging.debug("RPC CALL: compute list") - try: - if dc_label is None: - # return list with all compute nodes in all DCs - all_containers = [] - for dc in self.dcs.itervalues(): - all_containers += dc.listCompute() - return [(c.name, c.getStatus()) - for c in all_containers] - else: - # return list of compute nodes for specified DC - return [(c.name, c.getStatus()) - for c in self.dcs.get(dc_label).listCompute()] - except Exception as ex: - logging.exception("RPC error.") - return ex.message - - def compute_status(self, dc_label, compute_name): - logging.debug("RPC CALL: compute status") - try: - return self.dcs.get( - dc_label).containers.get(compute_name).getStatus() - except Exception as ex: - logging.exception("RPC error.") - return ex.message - - def datacenter_list(self): - logging.debug("RPC CALL: datacenter list") - try: - return [d.getStatus() for d in self.dcs.itervalues()] - except Exception as ex: - logging.exception("RPC error.") - return ex.message - - def datacenter_status(self, dc_label): - logging.debug("RPC CALL: datacenter status") - try: - return self.dcs.get(dc_label).getStatus() - except Exception as ex: - logging.exception("RPC error.") - return ex.message diff --git a/emuvim/api/zerorpcapi_DCNetwork.py b/emuvim/api/zerorpcapi_DCNetwork.py deleted file mode 100755 index 27527aa..0000000 --- a/emuvim/api/zerorpcapi_DCNetwork.py +++ /dev/null @@ -1,104 +0,0 @@ -""" -Distributed Cloud Emulator (dcemulator) -(c) 2015 by Manuel Peuster -""" - -import logging -import threading -import zerorpc - - -logging.basicConfig(level=logging.INFO) - - -class ZeroRpcApiEndpointDCNetwork(object): - """ - Simple API endpoint that offers a zerorpc-based - interface. This interface will be used by the - default command line client. - It can be used as a reference to implement - REST interfaces providing the same semantics, - like e.g. OpenStack compute API. - """ - - def __init__(self, listenip, port, DCNetwork=None): - if DCNetwork : - self.connectDCNetwork(DCNetwork) - self.ip = listenip - self.port = port - logging.debug("Created monitoring API endpoint %s(%s:%d)" % ( - self.__class__.__name__, self.ip, self.port)) - - def connectDCNetwork(self, net): - self.net = net - logging.info("Connected DCNetwork to API endpoint %s(%s:%d)" % ( - self.__class__.__name__, self.ip, self.port)) - - def start(self): - thread = threading.Thread(target=self._api_server_thread, args=()) - thread.daemon = True - thread.start() - logging.debug("Started API endpoint %s(%s:%d)" % ( - self.__class__.__name__, self.ip, self.port)) - - def _api_server_thread(self): - s = zerorpc.Server(DCNetworkApi(self.net)) - s.bind("tcp://%s:%d" % (self.ip, self.port)) - s.run() - - def stop(self): - logging.info("Stop the monitoring API endpoint") - return - - -class DCNetworkApi(object): - """ - The networking and monitoring commands need the scope of the - whole DC network to find the requested vnf. So this API is intended - to work with a DCNetwork. - Just pass through the corresponding request to the - selected data center network. Do not implement provisioning - logic here because will will have multiple API - endpoint implementations at the end. - """ - - def __init__(self, net): - self.net = net - - def network_action_start(self, vnf_src_name, vnf_dst_name): - # call DCNetwork method, not really datacenter specific API for now... - # provided dc name needs to be part of API endpoint - # no check if vnfs are really connected to this datacenter... - logging.debug("RPC CALL: network chain start") - try: - c = self.net.setChain( - vnf_src_name, vnf_dst_name) - return str(c) - except Exception as ex: - logging.exception("RPC error.") - return ex.message - - def network_action_stop(self, vnf_src_name, vnf_dst_name): - # call DCNetwork method, not really datacenter specific API for now... - # provided dc name needs to be part of API endpoint - # no check if vnfs are really connected to this datacenter... - logging.debug("RPC CALL: network chain stop") - try: - c = self.net.setChain( - vnf_src_name, vnf_dst_name, cmd='del-flows') - return c - except Exception as ex: - logging.exception("RPC error.") - return ex.message - - # get egress(default) or ingress rate of a vnf - def monitor_get_rate(self, vnf_name, direction): - logging.debug("RPC CALL: get rate") - try: - c = self.net.monitor_agent.get_rate(vnf_name, direction) - return c - except Exception as ex: - logging.exception("RPC error.") - return ex.message - - diff --git a/emuvim/cli/__init__.py b/emuvim/cli/__init__.py deleted file mode 100755 index e69de29..0000000 diff --git a/emuvim/cli/compute.py b/emuvim/cli/compute.py deleted file mode 100755 index 70de20a..0000000 --- a/emuvim/cli/compute.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -son-emu compute CLI -(c) 2016 by Manuel Peuster -""" - -import argparse -import pprint -from tabulate import tabulate -import zerorpc - - -pp = pprint.PrettyPrinter(indent=4) - - -class ZeroRpcClient(object): - - def __init__(self): - self.c = zerorpc.Client() - self.c.connect("tcp://127.0.0.1:4242") # TODO hard coded for now. we'll change this later - self.cmds = {} - - def execute_command(self, args): - if getattr(self, args["command"]) is not None: - # call the local method with the same name as the command arg - getattr(self, args["command"])(args) - else: - print "Command not implemented." - - def start(self, args): - nw_list = list() - if args.get("network") is not None: - networks = args.get("network").split(",") - for nw in networks: - nw_list.append({"ip": nw}) - r = self.c.compute_action_start( - args.get("datacenter"), - args.get("name"), - args.get("image"), - args.get("docker_command"), - nw_list) - pp.pprint(r) - - def stop(self, args): - r = self.c.compute_action_stop( - args.get("datacenter"), args.get("name")) - pp.pprint(r) - - def list(self, args): - r = self.c.compute_list( - args.get("datacenter")) - table = [] - for c in r: - # for each container add a line to the output table - if len(c) > 1: - name = c[0] - status = c[1] - eth0ip = None - eth0status = "down" - if len(status.get("network")) > 0: - eth0ip = status.get("network")[0][1] - eth0status = "up" if status.get( - "network")[0][3] else "down" - table.append([status.get("datacenter"), - name, - status.get("image"), - eth0ip, - eth0status, - status.get("state").get("Status")]) - headers = ["Datacenter", - "Container", - "Image", - "eth0 IP", - "eth0 status", - "Status"] - print tabulate(table, headers=headers, tablefmt="grid") - - def status(self, args): - r = self.c.compute_status( - args.get("datacenter"), args.get("name")) - pp.pprint(r) - - -parser = argparse.ArgumentParser(description='son-emu compute') -parser.add_argument( - "command", - choices=['start', 'stop', 'list', 'status'], - help="Action to be executed.") -parser.add_argument( - "--datacenter", "-d", dest="datacenter", - help="Data center to in which the compute instance should be executed") -parser.add_argument( - "--name", "-n", dest="name", - help="Name of compute instance e.g. 'vnf1'") -parser.add_argument( - "--image","-i", dest="image", - help="Name of container image to be used e.g. 'ubuntu'") -parser.add_argument( - "--dcmd", "-c", dest="docker_command", - help="Startup command of the container e.g. './start.sh'") -parser.add_argument( - "--net", dest="network", - help="Network properties of compute instance e.g. \ - '10.0.0.123/8' or '10.0.0.123/8,11.0.0.123/24' for multiple interfaces.") - - -def main(argv): - args = vars(parser.parse_args(argv)) - c = ZeroRpcClient() - c.execute_command(args) diff --git a/emuvim/cli/datacenter.py b/emuvim/cli/datacenter.py deleted file mode 100755 index c3850fc..0000000 --- a/emuvim/cli/datacenter.py +++ /dev/null @@ -1,66 +0,0 @@ -""" -son-emu datacenter CLI -(c) 2016 by Manuel Peuster -""" - -import argparse -import pprint -from tabulate import tabulate -import zerorpc - - -pp = pprint.PrettyPrinter(indent=4) - - -class ZeroRpcClient(object): - - def __init__(self): - self.c = zerorpc.Client() - self.c.connect("tcp://127.0.0.1:4242") # TODO hard coded for now. we'll change this later - self.cmds = {} - - def execute_command(self, args): - if getattr(self, args["command"]) is not None: - # call the local method with the same name as the command arg - getattr(self, args["command"])(args) - else: - print "Command not implemented." - - def list(self, args): - r = self.c.datacenter_list() - table = [] - for d in r: - # for each dc add a line to the output table - if len(d) > 0: - table.append([d.get("label"), - d.get("internalname"), - d.get("switch"), - d.get("n_running_containers"), - len(d.get("metadata"))]) - headers = ["Label", - "Internal Name", - "Switch", - "# Containers", - "# Metadata Items"] - print tabulate(table, headers=headers, tablefmt="grid") - - def status(self, args): - r = self.c.datacenter_status( - args.get("datacenter")) - pp.pprint(r) - - -parser = argparse.ArgumentParser(description='son-emu datacenter') -parser.add_argument( - "command", - choices=['list', 'status'], - help="Action to be executed.") -parser.add_argument( - "--datacenter", "-d", dest="datacenter", - help="Data center to which the command should be applied.") - - -def main(argv): - args = vars(parser.parse_args(argv)) - c = ZeroRpcClient() - c.execute_command(args) diff --git a/emuvim/cli/monitor.py b/emuvim/cli/monitor.py deleted file mode 100755 index 6885a3c..0000000 --- a/emuvim/cli/monitor.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -son-emu network CLI -(c) 2016 by Manuel Peuster -""" - -import argparse -import pprint -from tabulate import tabulate -import zerorpc - - -pp = pprint.PrettyPrinter(indent=4) - -class ZeroRpcClient(object): - - def __init__(self): - self.c = zerorpc.Client() - # TODO connect to DCNetwork API - #self.c.connect("tcp://127.0.0.1:4242") # TODO hard coded for now. we'll change this later - self.c.connect("tcp://127.0.0.1:5151") - self.cmds = {} - - def execute_command(self, args): - if getattr(self, args["command"]) is not None: - # call the local method with the same name as the command arg - getattr(self, args["command"])(args) - else: - print "Command not implemented." - - def get_rate(self, args): - r = self.c.monitor_get_rate( - args.get("vnf_name"), - args.get("direction")) - pp.pprint(r) - - -parser = argparse.ArgumentParser(description='son-emu network') -parser.add_argument( - "command", - help="Action to be executed: get_rate") -parser.add_argument( - "--vnf_name", "-vnf", dest="vnf_name", - help="vnf name to be monitored") -parser.add_argument( - "--direction", "-d", dest="direction", - help="in (ingress rate) or out (egress rate)") - -def main(argv): - print "This is the son-emu monitor CLI." - print "Arguments: %s" % str(argv) - args = vars(parser.parse_args(argv)) - c = ZeroRpcClient() - c.execute_command(args) diff --git a/emuvim/cli/network.py b/emuvim/cli/network.py deleted file mode 100755 index 8d4219b..0000000 --- a/emuvim/cli/network.py +++ /dev/null @@ -1,62 +0,0 @@ -""" -son-emu network CLI -(c) 2016 by Manuel Peuster -""" - -import argparse -import pprint -from tabulate import tabulate -import zerorpc - - -pp = pprint.PrettyPrinter(indent=4) - -class ZeroRpcClient(object): - - def __init__(self): - self.c = zerorpc.Client() - # TODO connect to DCNetwork API - #self.c.connect("tcp://127.0.0.1:4242") # TODO hard coded for now. we'll change this later - self.c.connect("tcp://127.0.0.1:5151") - self.cmds = {} - - def execute_command(self, args): - if getattr(self, args["command"]) is not None: - # call the local method with the same name as the command arg - getattr(self, args["command"])(args) - else: - print "Command not implemented." - - def add(self, args): - r = self.c.network_action_start( - #args.get("datacenter"), - args.get("source"), - args.get("destination")) - pp.pprint(r) - - def remove(self, args): - r = self.c.network_action_stop( - #args.get("datacenter"), - args.get("source"), - args.get("destination")) - pp.pprint(r) - - -parser = argparse.ArgumentParser(description='son-emu network') -parser.add_argument( - "command", - help="Action to be executed: add|remove") -parser.add_argument( - "--datacenter", "-d", dest="datacenter", - help="Data center to in which the network action should be initiated") -parser.add_argument( - "--source", "-src", dest="source", - help="vnf name of the source of the chain") -parser.add_argument( - "--destination", "-dst", dest="destination", - help="vnf name of the destination of the chain") - -def main(argv): - args = vars(parser.parse_args(argv)) - c = ZeroRpcClient() - c.execute_command(args) diff --git a/emuvim/cli/son-emu-cli b/emuvim/cli/son-emu-cli deleted file mode 100755 index 61cbd43..0000000 --- a/emuvim/cli/son-emu-cli +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/python -""" - Simple CLI client to interact with a running emulator. - - (c) 2016 by Manuel Peuster - - The CLI offers different tools, e.g., compute, network, ... - Each of these tools is implemented as an independent Python - module. - - cli compute start dc1 my_name flavor_a - cli network create dc1 11.0.0.0/24 -""" - -import sys -import compute -import network -import datacenter -import monitor - -def main(): - if len(sys.argv) < 2: - print "Usage: son-emu-cli " - exit(0) - if sys.argv[1] == "compute": - compute.main(sys.argv[2:]) - elif sys.argv[1] == "network": - network.main(sys.argv[2:]) - elif sys.argv[1] == "datacenter": - datacenter.main(sys.argv[2:]) - elif sys.argv[1] == "monitor": - monitor.main(sys.argv[2:]) - -if __name__ == '__main__': - main() diff --git a/emuvim/dcemulator/__init__.py b/emuvim/dcemulator/__init__.py deleted file mode 100755 index 64f6616..0000000 --- a/emuvim/dcemulator/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -""" -Distributed Cloud Emulator (dcemulator) -(c) 2015 by Manuel Peuster -""" \ No newline at end of file diff --git a/emuvim/dcemulator/link.py b/emuvim/dcemulator/link.py deleted file mode 100755 index e69de29..0000000 diff --git a/emuvim/dcemulator/monitoring.py b/emuvim/dcemulator/monitoring.py deleted file mode 100755 index 094c09b..0000000 --- a/emuvim/dcemulator/monitoring.py +++ /dev/null @@ -1,62 +0,0 @@ -__author__ = 'Administrator' - -import urllib2 -import logging -from mininet.node import OVSSwitch -import ast -logging.basicConfig(level=logging.INFO) - -""" -class to read openflow stats from the Ryu controller of the DCNEtwork -""" - -class DCNetworkMonitor(): - def __init__(self, net): - self.net = net - # link to REST_API - self.ip = '0.0.0.0' - self.port = '8080' - self.REST_api = 'http://{0}:{1}'.format(self.ip,self.port) - - - def get_rate(self, vnf_name, direction='tx'): - try: - vnf_switch = self.net.DCNetwork_graph.neighbors(str(vnf_name)) - - if len(vnf_switch) > 1: - logging.info("vnf: {0} has multiple ports".format(vnf_name)) - return - elif len(vnf_switch) == 0: - logging.info("vnf: {0} is not connected".format(vnf_name)) - return - else: - vnf_switch = vnf_switch[0] - next_node = self.net.getNodeByName(vnf_switch) - - if not isinstance( next_node, OVSSwitch ): - logging.info("vnf: {0} is not connected to switch".format(vnf_name)) - return - - mon_port = self.net.DCNetwork_graph[vnf_name][vnf_switch]['dst_port'] - switch_dpid = x = int(str(next_node.dpid),16) - - ret = self.REST_cmd('stats/port', switch_dpid) - port_stat_dict = ast.literal_eval(ret) - for port_stat in port_stat_dict[str(switch_dpid)]: - if port_stat['port_no'] == mon_port: - return port_stat - break - - return ret - - except Exception as ex: - logging.exception("get_txrate error.") - return ex.message - - - - def REST_cmd(self, prefix, dpid): - url = self.REST_api + '/' + str(prefix) + '/' + str(dpid) - req = urllib2.Request(url) - ret = urllib2.urlopen(req).read() - return ret \ No newline at end of file diff --git a/emuvim/dcemulator/net.py b/emuvim/dcemulator/net.py deleted file mode 100755 index 20ab33a..0000000 --- a/emuvim/dcemulator/net.py +++ /dev/null @@ -1,206 +0,0 @@ -""" -Distributed Cloud Emulator (dcemulator) -(c) 2015 by Manuel Peuster -""" -import logging - -import site -from subprocess import Popen -import os - -from mininet.net import Dockernet -from mininet.node import Controller, OVSSwitch, OVSKernelSwitch, Switch, Docker, Host, RemoteController -from mininet.cli import CLI -from mininet.log import setLogLevel, info, debug -from mininet.link import TCLink, Link -import networkx as nx -from monitoring import DCNetworkMonitor - -from node import Datacenter, EmulatorCompute - - -class DCNetwork(Dockernet): - """ - Wraps the original Mininet/Dockernet class and provides - methods to add data centers, switches, etc. - - This class is used by topology definition scripts. - """ - - def __init__(self, **kwargs): - self.dcs = {} - # create a Mininet/Dockernet network - # call original Docker.__init__ and setup default controller - #Dockernet.__init__( - # self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs) - Dockernet.__init__( - self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs) - self.addController('c0', controller=RemoteController) - - # graph of the complete DC network - self.DCNetwork_graph=nx.DiGraph() - - # monitoring agent - self.monitor_agent = DCNetworkMonitor(self) - - # start Ryu controller - self.startRyu() - - - def addDatacenter(self, label, metadata={}): - """ - Create and add a logical cloud data center to the network. - """ - if label in self.dcs: - raise Exception("Data center label already exists: %s" % label) - dc = Datacenter(label, metadata=metadata) - dc.net = self # set reference to network - self.dcs[label] = dc - dc.create() # finally create the data center in our Mininet instance - logging.info("added data center: %s" % label) - return dc - - def addLink(self, node1, node2, **params): - """ - Able to handle Datacenter objects as link - end points. - """ - assert node1 is not None - assert node2 is not None - logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2))) - # ensure type of node1 - if isinstance( node1, basestring ): - if node1 in self.dcs: - node1 = self.dcs[node1].switch - if isinstance( node1, Datacenter ): - node1 = node1.switch - # ensure type of node2 - if isinstance( node2, basestring ): - if node2 in self.dcs: - node2 = self.dcs[node2].switch - if isinstance( node2, Datacenter ): - node2 = node2.switch - # try to give containers a default IP - if isinstance( node1, Docker ): - if not "params1" in params: - params["params1"] = {} - if not "ip" in params["params1"]: - params["params1"]["ip"] = self.getNextIp() - if isinstance( node2, Docker ): - if not "params2" in params: - params["params2"] = {} - if not "ip" in params["params2"]: - params["params2"]["ip"] = self.getNextIp() - - link = Dockernet.addLink(self, node1, node2, **params) # TODO we need TCLinks with user defined performance here - - # add edge and assigned port number to graph in both directions between node1 and node2 - self.DCNetwork_graph.add_edge(node1.name, node2.name, \ - {'src_port': node1.ports[link.intf1], 'dst_port': node2.ports[link.intf2]}) - self.DCNetwork_graph.add_edge(node2.name, node1.name, \ - {'src_port': node2.ports[link.intf2], 'dst_port': node1.ports[link.intf1]}) - - return link - - def addDocker( self, label, **params ): - """ - Wrapper for addDocker method to use custom container class. - """ - self.DCNetwork_graph.add_node(label) - return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params) - - def removeDocker( self, label, **params ): - """ - Wrapper for removeDocker method to update graph. - """ - self.DCNetwork_graph.remove_node(label) - return Dockernet.removeDocker(self, label, **params) - - def addSwitch( self, name, add_to_graph=True, **params ): - """ - Wrapper for addSwitch method to store switch also in graph. - """ - if add_to_graph: - self.DCNetwork_graph.add_node(name) - return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params) - - def getAllContainers(self): - """ - Returns a list with all containers within all data centers. - """ - all_containers = [] - for dc in self.dcs.itervalues(): - all_containers += dc.listCompute() - return all_containers - - def start(self): - # start - for dc in self.dcs.itervalues(): - dc.start() - Dockernet.start(self) - - def stop(self): - # stop Ryu controller - self.ryu_process.terminate() - #self.ryu_process.kill() - Dockernet.stop(self) - - def CLI(self): - CLI(self) - - # to remove chain do setChain( src, dst, cmd='del-flows') - def setChain(self, vnf_src_name, vnf_dst_name, cmd='add-flow'): - # get shortest path - path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name) - logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path)) - - current_hop = vnf_src_name - for i in range(0,len(path)): - next_hop = path[path.index(current_hop)+1] - next_node = self.getNodeByName(next_hop) - - if next_hop == vnf_dst_name: - return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name) - elif not isinstance( next_node, OVSSwitch ): - logging.info("Next node: {0} is not a switch".format(next_hop)) - return "Next node: {0} is not a switch".format(next_hop) - - - switch_inport = self.DCNetwork_graph[current_hop][next_hop]['dst_port'] - next2_hop = path[path.index(current_hop)+2] - switch_outport = self.DCNetwork_graph[next_hop][next2_hop]['src_port'] - - logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(next_node.name, switch_inport, switch_outport)) - # set of entry via ovs-ofctl - # TODO use rest API of ryu to set flow entries to correct witch dpid - if isinstance( next_node, OVSSwitch ): - match = 'in_port=%s' % switch_inport - - if cmd=='add-flow': - action = 'action=%s' % switch_outport - s = ',' - ofcmd = s.join([match,action]) - elif cmd=='del-flows': - ofcmd = match - else: - ofcmd='' - - next_node.dpctl(cmd, ofcmd) - - current_hop = next_hop - - return "destination node: {0} not reached".format(vnf_dst_name) - - # start Ryu Openflow controller as Remote Controller for the DCNetwork - def startRyu(self): - # start Ryu controller with rest-API - python_install_path = site.getsitepackages()[0] - ryu_path = python_install_path + '/ryu/app/simple_switch_13.py' - ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py' - # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet - # Ryu still uses 6633 as default - ryu_option = '--ofp-tcp-listen-port' - ryu_of_port = '6653' - ryu_cmd = 'ryu-manager' - FNULL = open(os.devnull, 'w') - self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL) \ No newline at end of file diff --git a/emuvim/dcemulator/node.py b/emuvim/dcemulator/node.py deleted file mode 100755 index 336126c..0000000 --- a/emuvim/dcemulator/node.py +++ /dev/null @@ -1,167 +0,0 @@ -""" -Distributed Cloud Emulator (dcemulator) -(c) 2015 by Manuel Peuster -""" -from mininet.node import Docker -import logging - - -DCDPID_BASE = 1000 # start of switch dpid's used for data center switches - - -class EmulatorCompute(Docker): - """ - Emulator specific compute node class. - Inherits from Dockernet's Docker host class. - Represents a single container connected to a (logical) - data center. - We can add emulator specific helper functions to it. - """ - - def __init__( - self, name, dimage, **kwargs): - logging.debug("Create EmulatorCompute instance: %s" % name) - self.datacenter = None # pointer to current DC - - # call original Docker.__init__ - Docker.__init__(self, name, dimage, **kwargs) - - def getNetworkStatus(self): - """ - Helper method to receive information about the virtual networks - this compute instance is connected to. - """ - # format list of tuples (name, Ip, MAC, isUp, status) - return [(str(i), i.IP(), i.MAC(), i.isUp(), i.status()) - for i in self.intfList()] - - def getStatus(self): - """ - Helper method to receive information about this compute instance. - """ - status = {} - status["name"] = self.name - status["network"] = self.getNetworkStatus() - status["image"] = self.dimage - status["cpu_quota"] = self.cpu_quota - status["cpu_period"] = self.cpu_period - status["cpu_shares"] = self.cpu_shares - status["cpuset"] = self.cpuset - status["mem_limit"] = self.mem_limit - status["memswap_limit"] = self.memswap_limit - status["state"] = self.dcli.inspect_container(self.dc)["State"] - status["id"] = self.dcli.inspect_container(self.dc)["Id"] - status["datacenter"] = (None if self.datacenter is None - else self.datacenter.label) - return status - - -class Datacenter(object): - """ - Represents a logical data center to which compute resources - (Docker containers) can be added at runtime. - - Will also implement resource bookkeeping in later versions. - """ - - DC_COUNTER = 1 - - def __init__(self, label, metadata={}): - self.net = None # DCNetwork to which we belong - # each node (DC) has a short internal name used by Mininet - # this is caused by Mininets naming limitations for swtiches etc. - self.name = "dc%d" % Datacenter.DC_COUNTER - Datacenter.DC_COUNTER += 1 - # use this for user defined names that can be longer than self.name - self.label = label - # dict to store arbitrary metadata (e.g. latitude and longitude) - self.metadata = metadata - self.switch = None # first prototype assumes one "bigswitch" per DC - self.containers = {} # keep track of running containers - - def _get_next_dc_dpid(self): - global DCDPID_BASE - DCDPID_BASE += 1 - return DCDPID_BASE - - def create(self): - """ - Each data center is represented by a single switch to which - compute resources can be connected at run time. - - TODO: This will be changed in the future to support multiple networks - per data center - """ - self.switch = self.net.addSwitch( - "%s.s1" % self.name, dpid=hex(self._get_next_dc_dpid())[2:]) - logging.debug("created data center switch: %s" % str(self.switch)) - - def start(self): - pass - - def startCompute(self, name, image=None, command=None, network=None): - """ - Create a new container as compute resource and connect it to this - data center. - :param name: name (string) - :param image: image name (string) - :param command: command (string) - :param network: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"}) - :return: - """ - assert name is not None - # no duplications - if name in [c.name for c in self.net.getAllContainers()]: - raise Exception("Container with name %s already exists." % name) - # set default parameter - if image is None: - image = "ubuntu" - if network is None: - network = {} # {"ip": "10.0.0.254/8"} - if isinstance(network, dict): - network = [network] # if we have only one network, put it in a list - if isinstance(network, list): - if len(network) < 1: - network.append({}) - - # create the container - d = self.net.addDocker("%s" % (name), dimage=image, dcmd=command) - # connect all given networks - for nw in network: - self.net.addLink(d, self.switch, params1=nw) - # do bookkeeping - self.containers[name] = d - d.datacenter = self - return d # we might use UUIDs for naming later on - - def stopCompute(self, name): - """ - Stop and remove a container from this data center. - """ - assert name is not None - if name not in self.containers: - raise Exception("Container with name %s not found." % name) - self.net.removeLink( - link=None, node1=self.containers[name], node2=self.switch) - self.net.removeDocker("%s" % (name)) - del self.containers[name] - return True - - def listCompute(self): - """ - Return a list of all running containers assigned to this - data center. - """ - return list(self.containers.itervalues()) - - def getStatus(self): - """ - Return a dict with status information about this DC. - """ - return { - "label": self.label, - "internalname": self.name, - "switch": self.switch.name, - "n_running_containers": len(self.containers), - "metadata": self.metadata - } diff --git a/emuvim/example_topology.py b/emuvim/example_topology.py deleted file mode 100755 index eba751c..0000000 --- a/emuvim/example_topology.py +++ /dev/null @@ -1,121 +0,0 @@ -""" -This is an example topology for the distributed cloud emulator (dcemulator). -(c) 2015 by Manuel Peuster - - -This is an example that shows how a user of the emulation tool can -define network topologies with multiple emulated cloud data centers. - -The definition is done with a Python API which looks very similar to the -Mininet API (in fact it is a wrapper for it). - -We only specify the topology *between* data centers not within a single -data center (data center internal setups or placements are not of interest, -we want to experiment with VNF chains deployed across multiple PoPs). - -The original Mininet API has to be completely hidden and not be used by this -script. -""" -import logging -from mininet.log import setLogLevel -from dcemulator.net import DCNetwork -from api.zerorpcapi import ZeroRpcApiEndpoint -from api.zerorpcapi_DCNetwork import ZeroRpcApiEndpointDCNetwork - -logging.basicConfig(level=logging.INFO) - - -def create_topology1(): - """ - 1. Create a data center network object (DCNetwork) - """ - net = DCNetwork() - - """ - 1b. add a monitoring agent to the DCNetwork - """ - mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151) - mon_api.connectDCNetwork(net) - mon_api.start() - """ - 2. Add (logical) data centers to the topology - (each data center is one "bigswitch" in our simplified - first prototype) - """ - dc1 = net.addDatacenter("datacenter1") - dc2 = net.addDatacenter("datacenter2") - dc3 = net.addDatacenter("long_data_center_name3") - dc4 = net.addDatacenter( - "datacenter4", - metadata={"mydata": "we can also add arbitrary metadata to each DC"}) - - """ - 3. You can add additional SDN switches for data center - interconnections to the network. - """ - s1 = net.addSwitch("s1") - - """ - 4. Add links between your data centers and additional switches - to define you topology. - These links can use Mininet's features to limit bw, add delay or jitter. - """ - net.addLink(dc1, dc2) - net.addLink("datacenter1", s1) - net.addLink(s1, dc3) - net.addLink(s1, "datacenter4") - - """ - 5. We want to access and control our data centers from the outside, - e.g., we want to connect an orchestrator to start/stop compute - resources aka. VNFs (represented by Docker containers in the emulated) - - So we need to instantiate API endpoints (e.g. a zerorpc or REST - interface). Depending on the endpoint implementations, we can connect - one or more data centers to it, which can then be controlled through - this API, e.g., start/stop/list compute instances. - """ - # create a new instance of a endpoint implementation - zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242) - # connect data centers to this endpoint - zapi1.connectDatacenter(dc1) - zapi1.connectDatacenter(dc2) - zapi1.connectDatacenter(dc3) - zapi1.connectDatacenter(dc4) - # run API endpoint server (in another thread, don't block) - zapi1.start() - - """ - 5.1. For our example, we create a second endpoint to illustrate that - this is supported by our design. This feature allows us to have - one API endpoint for each data center. This makes the emulation - environment more realistic because you can easily create one - OpenStack-like REST API endpoint for *each* data center. - This will look like a real-world multi PoP/data center deployment - from the perspective of an orchestrator. - """ - zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343) - zapi2.connectDatacenter(dc3) - zapi2.connectDatacenter(dc4) - zapi2.start() - - """ - 6. Finally we are done and can start our network (the emulator). - We can also enter the Mininet CLI to interactively interact - with our compute resources (just like in default Mininet). - But we can also implement fully automated experiments that - can be executed again and again. - """ - net.start() - net.CLI() - # when the user types exit in the CLI, we stop the emulator - net.stop() - - -def main(): - setLogLevel('info') # set Mininet loglevel - create_topology1() - - -if __name__ == '__main__': - main() diff --git a/emuvim/test/__main__.py b/emuvim/test/__main__.py deleted file mode 100755 index f7fa66d..0000000 --- a/emuvim/test/__main__.py +++ /dev/null @@ -1,7 +0,0 @@ -import runner -import os - - -if __name__ == '__main__': - thisdir = os.path.dirname( os.path.realpath( __file__ ) ) - runner.main(thisdir) diff --git a/emuvim/test/runner.py b/emuvim/test/runner.py deleted file mode 100755 index 469a99e..0000000 --- a/emuvim/test/runner.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python - -""" -Run all tests - -v : verbose output - -e : emulator test only (no API tests) - -a : API tests only -""" - -from unittest import defaultTestLoader, TextTestRunner, TestSuite -import os -import sys -from mininet.util import ensureRoot -from mininet.clean import cleanup -from mininet.log import setLogLevel - - -def runTests( testDir, verbosity=1, emuonly=False, apionly=False ): - "discover and run all tests in testDir" - # ensure inport paths work - sys.path.append("%s/.." % testDir) - # ensure root and cleanup before starting tests - ensureRoot() - cleanup() - # discover all tests in testDir - testSuite = defaultTestLoader.discover( testDir ) - if emuonly: - testSuiteFiltered = [s for s in testSuite if "Emulator" in str(s)] - testSuite = TestSuite() - testSuite.addTests(testSuiteFiltered) - if apionly: - testSuiteFiltered = [s for s in testSuite if "Api" in str(s)] - testSuite = TestSuite() - testSuite.addTests(testSuiteFiltered) - - # run tests - TextTestRunner( verbosity=verbosity ).run( testSuite ) - - -def main(thisdir): - setLogLevel( 'warning' ) - # get the directory containing example tests - vlevel = 2 if '-v' in sys.argv else 1 - emuonly = ('-e' in sys.argv) - apionly = ('-a' in sys.argv) - runTests( - testDir=thisdir, verbosity=vlevel, emuonly=emuonly, apionly=apionly) - - -if __name__ == '__main__': - thisdir = os.path.dirname( os.path.realpath( __file__ ) ) - main(thisdir) diff --git a/emuvim/test/test_api_zerorpc.py b/emuvim/test/test_api_zerorpc.py deleted file mode 100755 index 2830872..0000000 --- a/emuvim/test/test_api_zerorpc.py +++ /dev/null @@ -1 +0,0 @@ -#TODO we'll need this at some time. But I'am lazy. A good REST API seems to be more important. diff --git a/emuvim/test/test_emulator.py b/emuvim/test/test_emulator.py deleted file mode 100755 index ef22a22..0000000 --- a/emuvim/test/test_emulator.py +++ /dev/null @@ -1,360 +0,0 @@ -""" -Test suite to automatically test emulator functionalities. -Directly interacts with the emulator through the Mininet-like -Python API. - -Does not test API endpoints. This is done in separated test suites. -""" - -import unittest -import os -import time -import subprocess -import docker -from dcemulator.net import DCNetwork -from dcemulator.node import EmulatorCompute -from mininet.node import Host, Controller, OVSSwitch, Docker -from mininet.link import TCLink -from mininet.topo import SingleSwitchTopo, LinearTopo -from mininet.log import setLogLevel -from mininet.util import quietRun -from mininet.clean import cleanup - - -class simpleTestTopology( unittest.TestCase ): - """ - Helper class to do basic test setups. - s1 -- s2 -- s3 -- ... -- sN - """ - - def __init__(self, *args, **kwargs): - self.net = None - self.s = [] # list of switches - self.h = [] # list of hosts - self.d = [] # list of docker containers - self.dc = [] # list of data centers - self.docker_cli = None - super(simpleTestTopology, self).__init__(*args, **kwargs) - - def createNet( - self, - nswitches=0, ndatacenter=0, nhosts=0, ndockers=0, - autolinkswitches=False): - """ - Creates a Mininet instance and automatically adds some - nodes to it. - """ - self.net = net = DCNetwork() - - # add some switches - for i in range(0, nswitches): - self.s.append(self.net.addSwitch('s%d' % i)) - # if specified, chain all switches - if autolinkswitches: - for i in range(0, len(self.s) - 1): - self.net.addLink(self.s[i], self.s[i + 1]) - # add some data centers - for i in range(0, ndatacenter): - self.dc.append( - self.net.addDatacenter( - 'datacenter%d' % i, - metadata={"unittest_dc": i})) - # add some hosts - for i in range(0, nhosts): - self.h.append(self.net.addHost('h%d' % i)) - # add some dockers - for i in range(0, ndockers): - self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu")) - - def startNet(self): - self.net.start() - - def stopNet(self): - self.net.stop() - - def getDockerCli(self): - """ - Helper to interact with local docker instance. - """ - if self.docker_cli is None: - self.docker_cli = docker.Client( - base_url='unix://var/run/docker.sock') - return self.docker_cli - - def getDockernetContainers(self): - """ - List the containers managed by dockernet - """ - return self.getDockerCli().containers(filters={"label": "com.dockernet"}) - - @staticmethod - def setUp(): - pass - - @staticmethod - def tearDown(): - cleanup() - # make sure that all pending docker containers are killed - with open(os.devnull, 'w') as devnull: - subprocess.call( - "sudo docker rm -f $(sudo docker ps --filter 'label=com.dockernet' -a -q)", - stdout=devnull, - stderr=devnull, - shell=True) - - -#@unittest.skip("disabled topology tests for development") -class testEmulatorTopology( simpleTestTopology ): - """ - Tests to check the topology API of the emulator. - """ - - def testSingleDatacenter(self): - """ - Create a single data center and add check if its switch is up - by using manually added hosts. Tests especially the - data center specific addLink method. - """ - # create network - self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - self.net.addLink(self.h[1], self.dc[0]) - # start Mininet network - self.startNet() - # check number of running nodes - assert(len(self.getDockernetContainers()) == 0) - assert(len(self.net.hosts) == 2) - assert(len(self.net.switches) == 1) - # check connectivity by using ping - assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0) - # stop Mininet network - self.stopNet() - - def testMultipleDatacenterDirect(self): - """ - Create a two data centers and interconnect them. - """ - # create network - self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - self.net.addLink(self.h[1], self.dc[1]) - self.net.addLink(self.dc[0], self.dc[1]) - # start Mininet network - self.startNet() - # check number of running nodes - assert(len(self.getDockernetContainers()) == 0) - assert(len(self.net.hosts) == 2) - assert(len(self.net.switches) == 2) - # check connectivity by using ping - assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0) - # stop Mininet network - self.stopNet() - - def testMultipleDatacenterWithIntermediateSwitches(self): - """ - Create a two data centers and interconnect them with additional - switches between them. - """ - # create network - self.createNet( - nswitches=3, ndatacenter=2, nhosts=2, ndockers=0, - autolinkswitches=True) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - self.net.addLink(self.h[1], self.dc[1]) - self.net.addLink(self.dc[0], self.s[0]) - self.net.addLink(self.s[2], self.dc[1]) - # start Mininet network - self.startNet() - # check number of running nodes - assert(len(self.getDockernetContainers()) == 0) - assert(len(self.net.hosts) == 2) - assert(len(self.net.switches) == 5) - # check connectivity by using ping - assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0) - # stop Mininet network - self.stopNet() - - -#@unittest.skip("disabled compute tests for development") -class testEmulatorCompute( simpleTestTopology ): - """ - Tests to check the emulator's API to add and remove - compute resources at runtime. - """ - - def testAddSingleComputeSingleDC(self): - """ - Adds a single compute instance to - a single DC and checks its connectivity with a - manually added host. - """ - # create network - self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - # start Mininet network - self.startNet() - # add compute resources - vnf1 = self.dc[0].startCompute("vnf1") - # check number of running nodes - assert(len(self.getDockernetContainers()) == 1) - assert(len(self.net.hosts) == 2) - assert(len(self.net.switches) == 1) - # check compute list result - assert(len(self.dc[0].listCompute()) == 1) - assert(isinstance(self.dc[0].listCompute()[0], EmulatorCompute)) - assert(self.dc[0].listCompute()[0].name == "vnf1") - # check connectivity by using ping - assert(self.net.ping([self.h[0], vnf1]) <= 0.0) - # stop Mininet network - self.stopNet() - - def testRemoveSingleComputeSingleDC(self): - """ - Test stop method for compute instances. - Check that the instance is really removed. - """ - # create network - self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - # start Mininet network - self.startNet() - # add compute resources - vnf1 = self.dc[0].startCompute("vnf1") - # check number of running nodes - assert(len(self.getDockernetContainers()) == 1) - assert(len(self.net.hosts) == 2) - assert(len(self.net.switches) == 1) - # check compute list result - assert(len(self.dc[0].listCompute()) == 1) - # check connectivity by using ping - assert(self.net.ping([self.h[0], vnf1]) <= 0.0) - # remove compute resources - self.dc[0].stopCompute("vnf1") - # check number of running nodes - assert(len(self.getDockernetContainers()) == 0) - assert(len(self.net.hosts) == 1) - assert(len(self.net.switches) == 1) - # check compute list result - assert(len(self.dc[0].listCompute()) == 0) - # stop Mininet network - self.stopNet() - - def testGetStatusSingleComputeSingleDC(self): - """ - Check if the getStatus functionality of EmulatorCompute - objects works well. - """ - # create network - self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - # start Mininet network - self.startNet() - # add compute resources - vnf1 = self.dc[0].startCompute("vnf1") - # check number of running nodes - assert(len(self.getDockernetContainers()) == 1) - assert(len(self.net.hosts) == 2) - assert(len(self.net.switches) == 1) - # check compute list result - assert(len(self.dc[0].listCompute()) == 1) - assert(isinstance(self.dc[0].listCompute()[0], EmulatorCompute)) - assert(self.dc[0].listCompute()[0].name == "vnf1") - # check connectivity by using ping - assert(self.net.ping([self.h[0], vnf1]) <= 0.0) - # check get status - s = self.dc[0].containers.get("vnf1").getStatus() - assert(s["name"] == "vnf1") - assert(s["state"]["Running"]) - # stop Mininet network - self.stopNet() - - def testConnectivityMultiDC(self): - """ - Test if compute instances started in different data centers - are able to talk to each other. - """ - # create network - self.createNet( - nswitches=3, ndatacenter=2, nhosts=0, ndockers=0, - autolinkswitches=True) - # setup links - self.net.addLink(self.dc[0], self.s[0]) - self.net.addLink(self.dc[1], self.s[2]) - # start Mininet network - self.startNet() - # add compute resources - vnf1 = self.dc[0].startCompute("vnf1") - vnf2 = self.dc[1].startCompute("vnf2") - # check number of running nodes - assert(len(self.getDockernetContainers()) == 2) - assert(len(self.net.hosts) == 2) - assert(len(self.net.switches) == 5) - # check compute list result - assert(len(self.dc[0].listCompute()) == 1) - assert(len(self.dc[1].listCompute()) == 1) - # check connectivity by using ping - assert(self.net.ping([vnf1, vnf2]) <= 0.0) - # stop Mininet network - self.stopNet() - - def testInterleavedAddRemoveMultiDC(self): - """ - Test multiple, interleaved add and remove operations and ensure - that always all expected compute instances are reachable. - """ - # create network - self.createNet( - nswitches=3, ndatacenter=2, nhosts=0, ndockers=0, - autolinkswitches=True) - # setup links - self.net.addLink(self.dc[0], self.s[0]) - self.net.addLink(self.dc[1], self.s[2]) - # start Mininet network - self.startNet() - # add compute resources - vnf1 = self.dc[0].startCompute("vnf1") - vnf2 = self.dc[1].startCompute("vnf2") - # check number of running nodes - assert(len(self.getDockernetContainers()) == 2) - assert(len(self.net.hosts) == 2) - assert(len(self.net.switches) == 5) - # check compute list result - assert(len(self.dc[0].listCompute()) == 1) - assert(len(self.dc[1].listCompute()) == 1) - # check connectivity by using ping - assert(self.net.ping([vnf1, vnf2]) <= 0.0) - # remove compute resources - self.dc[0].stopCompute("vnf1") - # check number of running nodes - assert(len(self.getDockernetContainers()) == 1) - assert(len(self.net.hosts) == 1) - assert(len(self.net.switches) == 5) - # check compute list result - assert(len(self.dc[0].listCompute()) == 0) - assert(len(self.dc[1].listCompute()) == 1) - # add compute resources - vnf3 = self.dc[0].startCompute("vnf3") - vnf4 = self.dc[0].startCompute("vnf4") - # check compute list result - assert(len(self.dc[0].listCompute()) == 2) - assert(len(self.dc[1].listCompute()) == 1) - assert(self.net.ping([vnf3, vnf2]) <= 0.0) - assert(self.net.ping([vnf4, vnf2]) <= 0.0) - # remove compute resources - self.dc[0].stopCompute("vnf3") - self.dc[0].stopCompute("vnf4") - self.dc[1].stopCompute("vnf2") - # check compute list result - assert(len(self.dc[0].listCompute()) == 0) - assert(len(self.dc[1].listCompute()) == 0) - # stop Mininet network - self.stopNet() - -if __name__ == '__main__': - unittest.main() diff --git a/src/emuvim/api/__init__.py b/src/emuvim/api/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/src/emuvim/api/zerorpcapi.py b/src/emuvim/api/zerorpcapi.py new file mode 100755 index 0000000..59b960c --- /dev/null +++ b/src/emuvim/api/zerorpcapi.py @@ -0,0 +1,128 @@ +""" +Distributed Cloud Emulator (dcemulator) +(c) 2015 by Manuel Peuster +""" + +import logging +import threading +import zerorpc + +logging.basicConfig(level=logging.INFO) + + +class ZeroRpcApiEndpoint(object): + """ + Simple API endpoint that offers a zerorpc-based + interface. This interface will be used by the + default command line client. + It can be used as a reference to implement + REST interfaces providing the same semantics, + like e.g. OpenStack compute API. + """ + + def __init__(self, listenip, port): + self.dcs = {} + self.ip = listenip + self.port = port + logging.debug("Created API endpoint %s(%s:%d)" % ( + self.__class__.__name__, self.ip, self.port)) + + def connectDatacenter(self, dc): + self.dcs[dc.label] = dc + logging.info("Connected DC(%s) to API endpoint %s(%s:%d)" % ( + dc.label, self.__class__.__name__, self.ip, self.port)) + + def start(self): + thread = threading.Thread(target=self._api_server_thread, args=()) + thread.daemon = True + thread.start() + logging.debug("Started API endpoint %s(%s:%d)" % ( + self.__class__.__name__, self.ip, self.port)) + + def _api_server_thread(self): + s = zerorpc.Server(MultiDatacenterApi(self.dcs)) + s.bind("tcp://%s:%d" % (self.ip, self.port)) + s.run() + + +class MultiDatacenterApi(object): + """ + Just pass through the corresponding request to the + selected data center. Do not implement provisioning + logic here because will will have multiple API + endpoint implementations at the end. + """ + + def __init__(self, dcs): + self.dcs = dcs + + def compute_action_start(self, dc_label, compute_name, image, command, network): + """ + Start a new compute instance: A docker container + :param dc_label: name of the DC + :param compute_name: compute container name + :param image: image name + :param command: command to execute + :param network: + :return: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"}) + """ + # TODO what to return UUID / given name / internal name ? + logging.debug("RPC CALL: compute start") + try: + c = self.dcs.get(dc_label).startCompute( + compute_name, image=image, command=command, network=network) + return str(c.name) + except Exception as ex: + logging.exception("RPC error.") + return ex.message + + def compute_action_stop(self, dc_label, compute_name): + logging.debug("RPC CALL: compute stop") + try: + return self.dcs.get(dc_label).stopCompute(compute_name) + except Exception as ex: + logging.exception("RPC error.") + return ex.message + + def compute_list(self, dc_label): + logging.debug("RPC CALL: compute list") + try: + if dc_label is None: + # return list with all compute nodes in all DCs + all_containers = [] + for dc in self.dcs.itervalues(): + all_containers += dc.listCompute() + return [(c.name, c.getStatus()) + for c in all_containers] + else: + # return list of compute nodes for specified DC + return [(c.name, c.getStatus()) + for c in self.dcs.get(dc_label).listCompute()] + except Exception as ex: + logging.exception("RPC error.") + return ex.message + + def compute_status(self, dc_label, compute_name): + logging.debug("RPC CALL: compute status") + try: + return self.dcs.get( + dc_label).containers.get(compute_name).getStatus() + except Exception as ex: + logging.exception("RPC error.") + return ex.message + + def datacenter_list(self): + logging.debug("RPC CALL: datacenter list") + try: + return [d.getStatus() for d in self.dcs.itervalues()] + except Exception as ex: + logging.exception("RPC error.") + return ex.message + + def datacenter_status(self, dc_label): + logging.debug("RPC CALL: datacenter status") + try: + return self.dcs.get(dc_label).getStatus() + except Exception as ex: + logging.exception("RPC error.") + return ex.message diff --git a/src/emuvim/api/zerorpcapi_DCNetwork.py b/src/emuvim/api/zerorpcapi_DCNetwork.py new file mode 100755 index 0000000..27527aa --- /dev/null +++ b/src/emuvim/api/zerorpcapi_DCNetwork.py @@ -0,0 +1,104 @@ +""" +Distributed Cloud Emulator (dcemulator) +(c) 2015 by Manuel Peuster +""" + +import logging +import threading +import zerorpc + + +logging.basicConfig(level=logging.INFO) + + +class ZeroRpcApiEndpointDCNetwork(object): + """ + Simple API endpoint that offers a zerorpc-based + interface. This interface will be used by the + default command line client. + It can be used as a reference to implement + REST interfaces providing the same semantics, + like e.g. OpenStack compute API. + """ + + def __init__(self, listenip, port, DCNetwork=None): + if DCNetwork : + self.connectDCNetwork(DCNetwork) + self.ip = listenip + self.port = port + logging.debug("Created monitoring API endpoint %s(%s:%d)" % ( + self.__class__.__name__, self.ip, self.port)) + + def connectDCNetwork(self, net): + self.net = net + logging.info("Connected DCNetwork to API endpoint %s(%s:%d)" % ( + self.__class__.__name__, self.ip, self.port)) + + def start(self): + thread = threading.Thread(target=self._api_server_thread, args=()) + thread.daemon = True + thread.start() + logging.debug("Started API endpoint %s(%s:%d)" % ( + self.__class__.__name__, self.ip, self.port)) + + def _api_server_thread(self): + s = zerorpc.Server(DCNetworkApi(self.net)) + s.bind("tcp://%s:%d" % (self.ip, self.port)) + s.run() + + def stop(self): + logging.info("Stop the monitoring API endpoint") + return + + +class DCNetworkApi(object): + """ + The networking and monitoring commands need the scope of the + whole DC network to find the requested vnf. So this API is intended + to work with a DCNetwork. + Just pass through the corresponding request to the + selected data center network. Do not implement provisioning + logic here because will will have multiple API + endpoint implementations at the end. + """ + + def __init__(self, net): + self.net = net + + def network_action_start(self, vnf_src_name, vnf_dst_name): + # call DCNetwork method, not really datacenter specific API for now... + # provided dc name needs to be part of API endpoint + # no check if vnfs are really connected to this datacenter... + logging.debug("RPC CALL: network chain start") + try: + c = self.net.setChain( + vnf_src_name, vnf_dst_name) + return str(c) + except Exception as ex: + logging.exception("RPC error.") + return ex.message + + def network_action_stop(self, vnf_src_name, vnf_dst_name): + # call DCNetwork method, not really datacenter specific API for now... + # provided dc name needs to be part of API endpoint + # no check if vnfs are really connected to this datacenter... + logging.debug("RPC CALL: network chain stop") + try: + c = self.net.setChain( + vnf_src_name, vnf_dst_name, cmd='del-flows') + return c + except Exception as ex: + logging.exception("RPC error.") + return ex.message + + # get egress(default) or ingress rate of a vnf + def monitor_get_rate(self, vnf_name, direction): + logging.debug("RPC CALL: get rate") + try: + c = self.net.monitor_agent.get_rate(vnf_name, direction) + return c + except Exception as ex: + logging.exception("RPC error.") + return ex.message + + diff --git a/src/emuvim/cli/__init__.py b/src/emuvim/cli/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/src/emuvim/cli/compute.py b/src/emuvim/cli/compute.py new file mode 100755 index 0000000..70de20a --- /dev/null +++ b/src/emuvim/cli/compute.py @@ -0,0 +1,109 @@ +""" +son-emu compute CLI +(c) 2016 by Manuel Peuster +""" + +import argparse +import pprint +from tabulate import tabulate +import zerorpc + + +pp = pprint.PrettyPrinter(indent=4) + + +class ZeroRpcClient(object): + + def __init__(self): + self.c = zerorpc.Client() + self.c.connect("tcp://127.0.0.1:4242") # TODO hard coded for now. we'll change this later + self.cmds = {} + + def execute_command(self, args): + if getattr(self, args["command"]) is not None: + # call the local method with the same name as the command arg + getattr(self, args["command"])(args) + else: + print "Command not implemented." + + def start(self, args): + nw_list = list() + if args.get("network") is not None: + networks = args.get("network").split(",") + for nw in networks: + nw_list.append({"ip": nw}) + r = self.c.compute_action_start( + args.get("datacenter"), + args.get("name"), + args.get("image"), + args.get("docker_command"), + nw_list) + pp.pprint(r) + + def stop(self, args): + r = self.c.compute_action_stop( + args.get("datacenter"), args.get("name")) + pp.pprint(r) + + def list(self, args): + r = self.c.compute_list( + args.get("datacenter")) + table = [] + for c in r: + # for each container add a line to the output table + if len(c) > 1: + name = c[0] + status = c[1] + eth0ip = None + eth0status = "down" + if len(status.get("network")) > 0: + eth0ip = status.get("network")[0][1] + eth0status = "up" if status.get( + "network")[0][3] else "down" + table.append([status.get("datacenter"), + name, + status.get("image"), + eth0ip, + eth0status, + status.get("state").get("Status")]) + headers = ["Datacenter", + "Container", + "Image", + "eth0 IP", + "eth0 status", + "Status"] + print tabulate(table, headers=headers, tablefmt="grid") + + def status(self, args): + r = self.c.compute_status( + args.get("datacenter"), args.get("name")) + pp.pprint(r) + + +parser = argparse.ArgumentParser(description='son-emu compute') +parser.add_argument( + "command", + choices=['start', 'stop', 'list', 'status'], + help="Action to be executed.") +parser.add_argument( + "--datacenter", "-d", dest="datacenter", + help="Data center to in which the compute instance should be executed") +parser.add_argument( + "--name", "-n", dest="name", + help="Name of compute instance e.g. 'vnf1'") +parser.add_argument( + "--image","-i", dest="image", + help="Name of container image to be used e.g. 'ubuntu'") +parser.add_argument( + "--dcmd", "-c", dest="docker_command", + help="Startup command of the container e.g. './start.sh'") +parser.add_argument( + "--net", dest="network", + help="Network properties of compute instance e.g. \ + '10.0.0.123/8' or '10.0.0.123/8,11.0.0.123/24' for multiple interfaces.") + + +def main(argv): + args = vars(parser.parse_args(argv)) + c = ZeroRpcClient() + c.execute_command(args) diff --git a/src/emuvim/cli/datacenter.py b/src/emuvim/cli/datacenter.py new file mode 100755 index 0000000..c3850fc --- /dev/null +++ b/src/emuvim/cli/datacenter.py @@ -0,0 +1,66 @@ +""" +son-emu datacenter CLI +(c) 2016 by Manuel Peuster +""" + +import argparse +import pprint +from tabulate import tabulate +import zerorpc + + +pp = pprint.PrettyPrinter(indent=4) + + +class ZeroRpcClient(object): + + def __init__(self): + self.c = zerorpc.Client() + self.c.connect("tcp://127.0.0.1:4242") # TODO hard coded for now. we'll change this later + self.cmds = {} + + def execute_command(self, args): + if getattr(self, args["command"]) is not None: + # call the local method with the same name as the command arg + getattr(self, args["command"])(args) + else: + print "Command not implemented." + + def list(self, args): + r = self.c.datacenter_list() + table = [] + for d in r: + # for each dc add a line to the output table + if len(d) > 0: + table.append([d.get("label"), + d.get("internalname"), + d.get("switch"), + d.get("n_running_containers"), + len(d.get("metadata"))]) + headers = ["Label", + "Internal Name", + "Switch", + "# Containers", + "# Metadata Items"] + print tabulate(table, headers=headers, tablefmt="grid") + + def status(self, args): + r = self.c.datacenter_status( + args.get("datacenter")) + pp.pprint(r) + + +parser = argparse.ArgumentParser(description='son-emu datacenter') +parser.add_argument( + "command", + choices=['list', 'status'], + help="Action to be executed.") +parser.add_argument( + "--datacenter", "-d", dest="datacenter", + help="Data center to which the command should be applied.") + + +def main(argv): + args = vars(parser.parse_args(argv)) + c = ZeroRpcClient() + c.execute_command(args) diff --git a/src/emuvim/cli/monitor.py b/src/emuvim/cli/monitor.py new file mode 100755 index 0000000..6885a3c --- /dev/null +++ b/src/emuvim/cli/monitor.py @@ -0,0 +1,53 @@ +""" +son-emu network CLI +(c) 2016 by Manuel Peuster +""" + +import argparse +import pprint +from tabulate import tabulate +import zerorpc + + +pp = pprint.PrettyPrinter(indent=4) + +class ZeroRpcClient(object): + + def __init__(self): + self.c = zerorpc.Client() + # TODO connect to DCNetwork API + #self.c.connect("tcp://127.0.0.1:4242") # TODO hard coded for now. we'll change this later + self.c.connect("tcp://127.0.0.1:5151") + self.cmds = {} + + def execute_command(self, args): + if getattr(self, args["command"]) is not None: + # call the local method with the same name as the command arg + getattr(self, args["command"])(args) + else: + print "Command not implemented." + + def get_rate(self, args): + r = self.c.monitor_get_rate( + args.get("vnf_name"), + args.get("direction")) + pp.pprint(r) + + +parser = argparse.ArgumentParser(description='son-emu network') +parser.add_argument( + "command", + help="Action to be executed: get_rate") +parser.add_argument( + "--vnf_name", "-vnf", dest="vnf_name", + help="vnf name to be monitored") +parser.add_argument( + "--direction", "-d", dest="direction", + help="in (ingress rate) or out (egress rate)") + +def main(argv): + print "This is the son-emu monitor CLI." + print "Arguments: %s" % str(argv) + args = vars(parser.parse_args(argv)) + c = ZeroRpcClient() + c.execute_command(args) diff --git a/src/emuvim/cli/network.py b/src/emuvim/cli/network.py new file mode 100755 index 0000000..8d4219b --- /dev/null +++ b/src/emuvim/cli/network.py @@ -0,0 +1,62 @@ +""" +son-emu network CLI +(c) 2016 by Manuel Peuster +""" + +import argparse +import pprint +from tabulate import tabulate +import zerorpc + + +pp = pprint.PrettyPrinter(indent=4) + +class ZeroRpcClient(object): + + def __init__(self): + self.c = zerorpc.Client() + # TODO connect to DCNetwork API + #self.c.connect("tcp://127.0.0.1:4242") # TODO hard coded for now. we'll change this later + self.c.connect("tcp://127.0.0.1:5151") + self.cmds = {} + + def execute_command(self, args): + if getattr(self, args["command"]) is not None: + # call the local method with the same name as the command arg + getattr(self, args["command"])(args) + else: + print "Command not implemented." + + def add(self, args): + r = self.c.network_action_start( + #args.get("datacenter"), + args.get("source"), + args.get("destination")) + pp.pprint(r) + + def remove(self, args): + r = self.c.network_action_stop( + #args.get("datacenter"), + args.get("source"), + args.get("destination")) + pp.pprint(r) + + +parser = argparse.ArgumentParser(description='son-emu network') +parser.add_argument( + "command", + help="Action to be executed: add|remove") +parser.add_argument( + "--datacenter", "-d", dest="datacenter", + help="Data center to in which the network action should be initiated") +parser.add_argument( + "--source", "-src", dest="source", + help="vnf name of the source of the chain") +parser.add_argument( + "--destination", "-dst", dest="destination", + help="vnf name of the destination of the chain") + +def main(argv): + args = vars(parser.parse_args(argv)) + c = ZeroRpcClient() + c.execute_command(args) diff --git a/src/emuvim/cli/son-emu-cli b/src/emuvim/cli/son-emu-cli new file mode 100755 index 0000000..61cbd43 --- /dev/null +++ b/src/emuvim/cli/son-emu-cli @@ -0,0 +1,35 @@ +#!/usr/bin/python +""" + Simple CLI client to interact with a running emulator. + + (c) 2016 by Manuel Peuster + + The CLI offers different tools, e.g., compute, network, ... + Each of these tools is implemented as an independent Python + module. + + cli compute start dc1 my_name flavor_a + cli network create dc1 11.0.0.0/24 +""" + +import sys +import compute +import network +import datacenter +import monitor + +def main(): + if len(sys.argv) < 2: + print "Usage: son-emu-cli " + exit(0) + if sys.argv[1] == "compute": + compute.main(sys.argv[2:]) + elif sys.argv[1] == "network": + network.main(sys.argv[2:]) + elif sys.argv[1] == "datacenter": + datacenter.main(sys.argv[2:]) + elif sys.argv[1] == "monitor": + monitor.main(sys.argv[2:]) + +if __name__ == '__main__': + main() diff --git a/src/emuvim/dcemulator/__init__.py b/src/emuvim/dcemulator/__init__.py new file mode 100755 index 0000000..64f6616 --- /dev/null +++ b/src/emuvim/dcemulator/__init__.py @@ -0,0 +1,4 @@ +""" +Distributed Cloud Emulator (dcemulator) +(c) 2015 by Manuel Peuster +""" \ No newline at end of file diff --git a/src/emuvim/dcemulator/link.py b/src/emuvim/dcemulator/link.py new file mode 100755 index 0000000..e69de29 diff --git a/src/emuvim/dcemulator/monitoring.py b/src/emuvim/dcemulator/monitoring.py new file mode 100755 index 0000000..094c09b --- /dev/null +++ b/src/emuvim/dcemulator/monitoring.py @@ -0,0 +1,62 @@ +__author__ = 'Administrator' + +import urllib2 +import logging +from mininet.node import OVSSwitch +import ast +logging.basicConfig(level=logging.INFO) + +""" +class to read openflow stats from the Ryu controller of the DCNEtwork +""" + +class DCNetworkMonitor(): + def __init__(self, net): + self.net = net + # link to REST_API + self.ip = '0.0.0.0' + self.port = '8080' + self.REST_api = 'http://{0}:{1}'.format(self.ip,self.port) + + + def get_rate(self, vnf_name, direction='tx'): + try: + vnf_switch = self.net.DCNetwork_graph.neighbors(str(vnf_name)) + + if len(vnf_switch) > 1: + logging.info("vnf: {0} has multiple ports".format(vnf_name)) + return + elif len(vnf_switch) == 0: + logging.info("vnf: {0} is not connected".format(vnf_name)) + return + else: + vnf_switch = vnf_switch[0] + next_node = self.net.getNodeByName(vnf_switch) + + if not isinstance( next_node, OVSSwitch ): + logging.info("vnf: {0} is not connected to switch".format(vnf_name)) + return + + mon_port = self.net.DCNetwork_graph[vnf_name][vnf_switch]['dst_port'] + switch_dpid = x = int(str(next_node.dpid),16) + + ret = self.REST_cmd('stats/port', switch_dpid) + port_stat_dict = ast.literal_eval(ret) + for port_stat in port_stat_dict[str(switch_dpid)]: + if port_stat['port_no'] == mon_port: + return port_stat + break + + return ret + + except Exception as ex: + logging.exception("get_txrate error.") + return ex.message + + + + def REST_cmd(self, prefix, dpid): + url = self.REST_api + '/' + str(prefix) + '/' + str(dpid) + req = urllib2.Request(url) + ret = urllib2.urlopen(req).read() + return ret \ No newline at end of file diff --git a/src/emuvim/dcemulator/net.py b/src/emuvim/dcemulator/net.py new file mode 100755 index 0000000..20ab33a --- /dev/null +++ b/src/emuvim/dcemulator/net.py @@ -0,0 +1,206 @@ +""" +Distributed Cloud Emulator (dcemulator) +(c) 2015 by Manuel Peuster +""" +import logging + +import site +from subprocess import Popen +import os + +from mininet.net import Dockernet +from mininet.node import Controller, OVSSwitch, OVSKernelSwitch, Switch, Docker, Host, RemoteController +from mininet.cli import CLI +from mininet.log import setLogLevel, info, debug +from mininet.link import TCLink, Link +import networkx as nx +from monitoring import DCNetworkMonitor + +from node import Datacenter, EmulatorCompute + + +class DCNetwork(Dockernet): + """ + Wraps the original Mininet/Dockernet class and provides + methods to add data centers, switches, etc. + + This class is used by topology definition scripts. + """ + + def __init__(self, **kwargs): + self.dcs = {} + # create a Mininet/Dockernet network + # call original Docker.__init__ and setup default controller + #Dockernet.__init__( + # self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs) + Dockernet.__init__( + self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs) + self.addController('c0', controller=RemoteController) + + # graph of the complete DC network + self.DCNetwork_graph=nx.DiGraph() + + # monitoring agent + self.monitor_agent = DCNetworkMonitor(self) + + # start Ryu controller + self.startRyu() + + + def addDatacenter(self, label, metadata={}): + """ + Create and add a logical cloud data center to the network. + """ + if label in self.dcs: + raise Exception("Data center label already exists: %s" % label) + dc = Datacenter(label, metadata=metadata) + dc.net = self # set reference to network + self.dcs[label] = dc + dc.create() # finally create the data center in our Mininet instance + logging.info("added data center: %s" % label) + return dc + + def addLink(self, node1, node2, **params): + """ + Able to handle Datacenter objects as link + end points. + """ + assert node1 is not None + assert node2 is not None + logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2))) + # ensure type of node1 + if isinstance( node1, basestring ): + if node1 in self.dcs: + node1 = self.dcs[node1].switch + if isinstance( node1, Datacenter ): + node1 = node1.switch + # ensure type of node2 + if isinstance( node2, basestring ): + if node2 in self.dcs: + node2 = self.dcs[node2].switch + if isinstance( node2, Datacenter ): + node2 = node2.switch + # try to give containers a default IP + if isinstance( node1, Docker ): + if not "params1" in params: + params["params1"] = {} + if not "ip" in params["params1"]: + params["params1"]["ip"] = self.getNextIp() + if isinstance( node2, Docker ): + if not "params2" in params: + params["params2"] = {} + if not "ip" in params["params2"]: + params["params2"]["ip"] = self.getNextIp() + + link = Dockernet.addLink(self, node1, node2, **params) # TODO we need TCLinks with user defined performance here + + # add edge and assigned port number to graph in both directions between node1 and node2 + self.DCNetwork_graph.add_edge(node1.name, node2.name, \ + {'src_port': node1.ports[link.intf1], 'dst_port': node2.ports[link.intf2]}) + self.DCNetwork_graph.add_edge(node2.name, node1.name, \ + {'src_port': node2.ports[link.intf2], 'dst_port': node1.ports[link.intf1]}) + + return link + + def addDocker( self, label, **params ): + """ + Wrapper for addDocker method to use custom container class. + """ + self.DCNetwork_graph.add_node(label) + return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params) + + def removeDocker( self, label, **params ): + """ + Wrapper for removeDocker method to update graph. + """ + self.DCNetwork_graph.remove_node(label) + return Dockernet.removeDocker(self, label, **params) + + def addSwitch( self, name, add_to_graph=True, **params ): + """ + Wrapper for addSwitch method to store switch also in graph. + """ + if add_to_graph: + self.DCNetwork_graph.add_node(name) + return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params) + + def getAllContainers(self): + """ + Returns a list with all containers within all data centers. + """ + all_containers = [] + for dc in self.dcs.itervalues(): + all_containers += dc.listCompute() + return all_containers + + def start(self): + # start + for dc in self.dcs.itervalues(): + dc.start() + Dockernet.start(self) + + def stop(self): + # stop Ryu controller + self.ryu_process.terminate() + #self.ryu_process.kill() + Dockernet.stop(self) + + def CLI(self): + CLI(self) + + # to remove chain do setChain( src, dst, cmd='del-flows') + def setChain(self, vnf_src_name, vnf_dst_name, cmd='add-flow'): + # get shortest path + path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name) + logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path)) + + current_hop = vnf_src_name + for i in range(0,len(path)): + next_hop = path[path.index(current_hop)+1] + next_node = self.getNodeByName(next_hop) + + if next_hop == vnf_dst_name: + return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name) + elif not isinstance( next_node, OVSSwitch ): + logging.info("Next node: {0} is not a switch".format(next_hop)) + return "Next node: {0} is not a switch".format(next_hop) + + + switch_inport = self.DCNetwork_graph[current_hop][next_hop]['dst_port'] + next2_hop = path[path.index(current_hop)+2] + switch_outport = self.DCNetwork_graph[next_hop][next2_hop]['src_port'] + + logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(next_node.name, switch_inport, switch_outport)) + # set of entry via ovs-ofctl + # TODO use rest API of ryu to set flow entries to correct witch dpid + if isinstance( next_node, OVSSwitch ): + match = 'in_port=%s' % switch_inport + + if cmd=='add-flow': + action = 'action=%s' % switch_outport + s = ',' + ofcmd = s.join([match,action]) + elif cmd=='del-flows': + ofcmd = match + else: + ofcmd='' + + next_node.dpctl(cmd, ofcmd) + + current_hop = next_hop + + return "destination node: {0} not reached".format(vnf_dst_name) + + # start Ryu Openflow controller as Remote Controller for the DCNetwork + def startRyu(self): + # start Ryu controller with rest-API + python_install_path = site.getsitepackages()[0] + ryu_path = python_install_path + '/ryu/app/simple_switch_13.py' + ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py' + # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet + # Ryu still uses 6633 as default + ryu_option = '--ofp-tcp-listen-port' + ryu_of_port = '6653' + ryu_cmd = 'ryu-manager' + FNULL = open(os.devnull, 'w') + self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL) \ No newline at end of file diff --git a/src/emuvim/dcemulator/node.py b/src/emuvim/dcemulator/node.py new file mode 100755 index 0000000..336126c --- /dev/null +++ b/src/emuvim/dcemulator/node.py @@ -0,0 +1,167 @@ +""" +Distributed Cloud Emulator (dcemulator) +(c) 2015 by Manuel Peuster +""" +from mininet.node import Docker +import logging + + +DCDPID_BASE = 1000 # start of switch dpid's used for data center switches + + +class EmulatorCompute(Docker): + """ + Emulator specific compute node class. + Inherits from Dockernet's Docker host class. + Represents a single container connected to a (logical) + data center. + We can add emulator specific helper functions to it. + """ + + def __init__( + self, name, dimage, **kwargs): + logging.debug("Create EmulatorCompute instance: %s" % name) + self.datacenter = None # pointer to current DC + + # call original Docker.__init__ + Docker.__init__(self, name, dimage, **kwargs) + + def getNetworkStatus(self): + """ + Helper method to receive information about the virtual networks + this compute instance is connected to. + """ + # format list of tuples (name, Ip, MAC, isUp, status) + return [(str(i), i.IP(), i.MAC(), i.isUp(), i.status()) + for i in self.intfList()] + + def getStatus(self): + """ + Helper method to receive information about this compute instance. + """ + status = {} + status["name"] = self.name + status["network"] = self.getNetworkStatus() + status["image"] = self.dimage + status["cpu_quota"] = self.cpu_quota + status["cpu_period"] = self.cpu_period + status["cpu_shares"] = self.cpu_shares + status["cpuset"] = self.cpuset + status["mem_limit"] = self.mem_limit + status["memswap_limit"] = self.memswap_limit + status["state"] = self.dcli.inspect_container(self.dc)["State"] + status["id"] = self.dcli.inspect_container(self.dc)["Id"] + status["datacenter"] = (None if self.datacenter is None + else self.datacenter.label) + return status + + +class Datacenter(object): + """ + Represents a logical data center to which compute resources + (Docker containers) can be added at runtime. + + Will also implement resource bookkeeping in later versions. + """ + + DC_COUNTER = 1 + + def __init__(self, label, metadata={}): + self.net = None # DCNetwork to which we belong + # each node (DC) has a short internal name used by Mininet + # this is caused by Mininets naming limitations for swtiches etc. + self.name = "dc%d" % Datacenter.DC_COUNTER + Datacenter.DC_COUNTER += 1 + # use this for user defined names that can be longer than self.name + self.label = label + # dict to store arbitrary metadata (e.g. latitude and longitude) + self.metadata = metadata + self.switch = None # first prototype assumes one "bigswitch" per DC + self.containers = {} # keep track of running containers + + def _get_next_dc_dpid(self): + global DCDPID_BASE + DCDPID_BASE += 1 + return DCDPID_BASE + + def create(self): + """ + Each data center is represented by a single switch to which + compute resources can be connected at run time. + + TODO: This will be changed in the future to support multiple networks + per data center + """ + self.switch = self.net.addSwitch( + "%s.s1" % self.name, dpid=hex(self._get_next_dc_dpid())[2:]) + logging.debug("created data center switch: %s" % str(self.switch)) + + def start(self): + pass + + def startCompute(self, name, image=None, command=None, network=None): + """ + Create a new container as compute resource and connect it to this + data center. + :param name: name (string) + :param image: image name (string) + :param command: command (string) + :param network: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"}) + :return: + """ + assert name is not None + # no duplications + if name in [c.name for c in self.net.getAllContainers()]: + raise Exception("Container with name %s already exists." % name) + # set default parameter + if image is None: + image = "ubuntu" + if network is None: + network = {} # {"ip": "10.0.0.254/8"} + if isinstance(network, dict): + network = [network] # if we have only one network, put it in a list + if isinstance(network, list): + if len(network) < 1: + network.append({}) + + # create the container + d = self.net.addDocker("%s" % (name), dimage=image, dcmd=command) + # connect all given networks + for nw in network: + self.net.addLink(d, self.switch, params1=nw) + # do bookkeeping + self.containers[name] = d + d.datacenter = self + return d # we might use UUIDs for naming later on + + def stopCompute(self, name): + """ + Stop and remove a container from this data center. + """ + assert name is not None + if name not in self.containers: + raise Exception("Container with name %s not found." % name) + self.net.removeLink( + link=None, node1=self.containers[name], node2=self.switch) + self.net.removeDocker("%s" % (name)) + del self.containers[name] + return True + + def listCompute(self): + """ + Return a list of all running containers assigned to this + data center. + """ + return list(self.containers.itervalues()) + + def getStatus(self): + """ + Return a dict with status information about this DC. + """ + return { + "label": self.label, + "internalname": self.name, + "switch": self.switch.name, + "n_running_containers": len(self.containers), + "metadata": self.metadata + } diff --git a/src/emuvim/example_topology.py b/src/emuvim/example_topology.py new file mode 100755 index 0000000..eba751c --- /dev/null +++ b/src/emuvim/example_topology.py @@ -0,0 +1,121 @@ +""" +This is an example topology for the distributed cloud emulator (dcemulator). +(c) 2015 by Manuel Peuster + + +This is an example that shows how a user of the emulation tool can +define network topologies with multiple emulated cloud data centers. + +The definition is done with a Python API which looks very similar to the +Mininet API (in fact it is a wrapper for it). + +We only specify the topology *between* data centers not within a single +data center (data center internal setups or placements are not of interest, +we want to experiment with VNF chains deployed across multiple PoPs). + +The original Mininet API has to be completely hidden and not be used by this +script. +""" +import logging +from mininet.log import setLogLevel +from dcemulator.net import DCNetwork +from api.zerorpcapi import ZeroRpcApiEndpoint +from api.zerorpcapi_DCNetwork import ZeroRpcApiEndpointDCNetwork + +logging.basicConfig(level=logging.INFO) + + +def create_topology1(): + """ + 1. Create a data center network object (DCNetwork) + """ + net = DCNetwork() + + """ + 1b. add a monitoring agent to the DCNetwork + """ + mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151) + mon_api.connectDCNetwork(net) + mon_api.start() + """ + 2. Add (logical) data centers to the topology + (each data center is one "bigswitch" in our simplified + first prototype) + """ + dc1 = net.addDatacenter("datacenter1") + dc2 = net.addDatacenter("datacenter2") + dc3 = net.addDatacenter("long_data_center_name3") + dc4 = net.addDatacenter( + "datacenter4", + metadata={"mydata": "we can also add arbitrary metadata to each DC"}) + + """ + 3. You can add additional SDN switches for data center + interconnections to the network. + """ + s1 = net.addSwitch("s1") + + """ + 4. Add links between your data centers and additional switches + to define you topology. + These links can use Mininet's features to limit bw, add delay or jitter. + """ + net.addLink(dc1, dc2) + net.addLink("datacenter1", s1) + net.addLink(s1, dc3) + net.addLink(s1, "datacenter4") + + """ + 5. We want to access and control our data centers from the outside, + e.g., we want to connect an orchestrator to start/stop compute + resources aka. VNFs (represented by Docker containers in the emulated) + + So we need to instantiate API endpoints (e.g. a zerorpc or REST + interface). Depending on the endpoint implementations, we can connect + one or more data centers to it, which can then be controlled through + this API, e.g., start/stop/list compute instances. + """ + # create a new instance of a endpoint implementation + zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242) + # connect data centers to this endpoint + zapi1.connectDatacenter(dc1) + zapi1.connectDatacenter(dc2) + zapi1.connectDatacenter(dc3) + zapi1.connectDatacenter(dc4) + # run API endpoint server (in another thread, don't block) + zapi1.start() + + """ + 5.1. For our example, we create a second endpoint to illustrate that + this is supported by our design. This feature allows us to have + one API endpoint for each data center. This makes the emulation + environment more realistic because you can easily create one + OpenStack-like REST API endpoint for *each* data center. + This will look like a real-world multi PoP/data center deployment + from the perspective of an orchestrator. + """ + zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343) + zapi2.connectDatacenter(dc3) + zapi2.connectDatacenter(dc4) + zapi2.start() + + """ + 6. Finally we are done and can start our network (the emulator). + We can also enter the Mininet CLI to interactively interact + with our compute resources (just like in default Mininet). + But we can also implement fully automated experiments that + can be executed again and again. + """ + net.start() + net.CLI() + # when the user types exit in the CLI, we stop the emulator + net.stop() + + +def main(): + setLogLevel('info') # set Mininet loglevel + create_topology1() + + +if __name__ == '__main__': + main() diff --git a/src/emuvim/test/__main__.py b/src/emuvim/test/__main__.py new file mode 100755 index 0000000..f7fa66d --- /dev/null +++ b/src/emuvim/test/__main__.py @@ -0,0 +1,7 @@ +import runner +import os + + +if __name__ == '__main__': + thisdir = os.path.dirname( os.path.realpath( __file__ ) ) + runner.main(thisdir) diff --git a/src/emuvim/test/runner.py b/src/emuvim/test/runner.py new file mode 100755 index 0000000..469a99e --- /dev/null +++ b/src/emuvim/test/runner.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +""" +Run all tests + -v : verbose output + -e : emulator test only (no API tests) + -a : API tests only +""" + +from unittest import defaultTestLoader, TextTestRunner, TestSuite +import os +import sys +from mininet.util import ensureRoot +from mininet.clean import cleanup +from mininet.log import setLogLevel + + +def runTests( testDir, verbosity=1, emuonly=False, apionly=False ): + "discover and run all tests in testDir" + # ensure inport paths work + sys.path.append("%s/.." % testDir) + # ensure root and cleanup before starting tests + ensureRoot() + cleanup() + # discover all tests in testDir + testSuite = defaultTestLoader.discover( testDir ) + if emuonly: + testSuiteFiltered = [s for s in testSuite if "Emulator" in str(s)] + testSuite = TestSuite() + testSuite.addTests(testSuiteFiltered) + if apionly: + testSuiteFiltered = [s for s in testSuite if "Api" in str(s)] + testSuite = TestSuite() + testSuite.addTests(testSuiteFiltered) + + # run tests + TextTestRunner( verbosity=verbosity ).run( testSuite ) + + +def main(thisdir): + setLogLevel( 'warning' ) + # get the directory containing example tests + vlevel = 2 if '-v' in sys.argv else 1 + emuonly = ('-e' in sys.argv) + apionly = ('-a' in sys.argv) + runTests( + testDir=thisdir, verbosity=vlevel, emuonly=emuonly, apionly=apionly) + + +if __name__ == '__main__': + thisdir = os.path.dirname( os.path.realpath( __file__ ) ) + main(thisdir) diff --git a/src/emuvim/test/test_api_zerorpc.py b/src/emuvim/test/test_api_zerorpc.py new file mode 100755 index 0000000..2830872 --- /dev/null +++ b/src/emuvim/test/test_api_zerorpc.py @@ -0,0 +1 @@ +#TODO we'll need this at some time. But I'am lazy. A good REST API seems to be more important. diff --git a/src/emuvim/test/test_emulator.py b/src/emuvim/test/test_emulator.py new file mode 100755 index 0000000..ef22a22 --- /dev/null +++ b/src/emuvim/test/test_emulator.py @@ -0,0 +1,360 @@ +""" +Test suite to automatically test emulator functionalities. +Directly interacts with the emulator through the Mininet-like +Python API. + +Does not test API endpoints. This is done in separated test suites. +""" + +import unittest +import os +import time +import subprocess +import docker +from dcemulator.net import DCNetwork +from dcemulator.node import EmulatorCompute +from mininet.node import Host, Controller, OVSSwitch, Docker +from mininet.link import TCLink +from mininet.topo import SingleSwitchTopo, LinearTopo +from mininet.log import setLogLevel +from mininet.util import quietRun +from mininet.clean import cleanup + + +class simpleTestTopology( unittest.TestCase ): + """ + Helper class to do basic test setups. + s1 -- s2 -- s3 -- ... -- sN + """ + + def __init__(self, *args, **kwargs): + self.net = None + self.s = [] # list of switches + self.h = [] # list of hosts + self.d = [] # list of docker containers + self.dc = [] # list of data centers + self.docker_cli = None + super(simpleTestTopology, self).__init__(*args, **kwargs) + + def createNet( + self, + nswitches=0, ndatacenter=0, nhosts=0, ndockers=0, + autolinkswitches=False): + """ + Creates a Mininet instance and automatically adds some + nodes to it. + """ + self.net = net = DCNetwork() + + # add some switches + for i in range(0, nswitches): + self.s.append(self.net.addSwitch('s%d' % i)) + # if specified, chain all switches + if autolinkswitches: + for i in range(0, len(self.s) - 1): + self.net.addLink(self.s[i], self.s[i + 1]) + # add some data centers + for i in range(0, ndatacenter): + self.dc.append( + self.net.addDatacenter( + 'datacenter%d' % i, + metadata={"unittest_dc": i})) + # add some hosts + for i in range(0, nhosts): + self.h.append(self.net.addHost('h%d' % i)) + # add some dockers + for i in range(0, ndockers): + self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu")) + + def startNet(self): + self.net.start() + + def stopNet(self): + self.net.stop() + + def getDockerCli(self): + """ + Helper to interact with local docker instance. + """ + if self.docker_cli is None: + self.docker_cli = docker.Client( + base_url='unix://var/run/docker.sock') + return self.docker_cli + + def getDockernetContainers(self): + """ + List the containers managed by dockernet + """ + return self.getDockerCli().containers(filters={"label": "com.dockernet"}) + + @staticmethod + def setUp(): + pass + + @staticmethod + def tearDown(): + cleanup() + # make sure that all pending docker containers are killed + with open(os.devnull, 'w') as devnull: + subprocess.call( + "sudo docker rm -f $(sudo docker ps --filter 'label=com.dockernet' -a -q)", + stdout=devnull, + stderr=devnull, + shell=True) + + +#@unittest.skip("disabled topology tests for development") +class testEmulatorTopology( simpleTestTopology ): + """ + Tests to check the topology API of the emulator. + """ + + def testSingleDatacenter(self): + """ + Create a single data center and add check if its switch is up + by using manually added hosts. Tests especially the + data center specific addLink method. + """ + # create network + self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + self.net.addLink(self.h[1], self.dc[0]) + # start Mininet network + self.startNet() + # check number of running nodes + assert(len(self.getDockernetContainers()) == 0) + assert(len(self.net.hosts) == 2) + assert(len(self.net.switches) == 1) + # check connectivity by using ping + assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0) + # stop Mininet network + self.stopNet() + + def testMultipleDatacenterDirect(self): + """ + Create a two data centers and interconnect them. + """ + # create network + self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + self.net.addLink(self.h[1], self.dc[1]) + self.net.addLink(self.dc[0], self.dc[1]) + # start Mininet network + self.startNet() + # check number of running nodes + assert(len(self.getDockernetContainers()) == 0) + assert(len(self.net.hosts) == 2) + assert(len(self.net.switches) == 2) + # check connectivity by using ping + assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0) + # stop Mininet network + self.stopNet() + + def testMultipleDatacenterWithIntermediateSwitches(self): + """ + Create a two data centers and interconnect them with additional + switches between them. + """ + # create network + self.createNet( + nswitches=3, ndatacenter=2, nhosts=2, ndockers=0, + autolinkswitches=True) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + self.net.addLink(self.h[1], self.dc[1]) + self.net.addLink(self.dc[0], self.s[0]) + self.net.addLink(self.s[2], self.dc[1]) + # start Mininet network + self.startNet() + # check number of running nodes + assert(len(self.getDockernetContainers()) == 0) + assert(len(self.net.hosts) == 2) + assert(len(self.net.switches) == 5) + # check connectivity by using ping + assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0) + # stop Mininet network + self.stopNet() + + +#@unittest.skip("disabled compute tests for development") +class testEmulatorCompute( simpleTestTopology ): + """ + Tests to check the emulator's API to add and remove + compute resources at runtime. + """ + + def testAddSingleComputeSingleDC(self): + """ + Adds a single compute instance to + a single DC and checks its connectivity with a + manually added host. + """ + # create network + self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + # check number of running nodes + assert(len(self.getDockernetContainers()) == 1) + assert(len(self.net.hosts) == 2) + assert(len(self.net.switches) == 1) + # check compute list result + assert(len(self.dc[0].listCompute()) == 1) + assert(isinstance(self.dc[0].listCompute()[0], EmulatorCompute)) + assert(self.dc[0].listCompute()[0].name == "vnf1") + # check connectivity by using ping + assert(self.net.ping([self.h[0], vnf1]) <= 0.0) + # stop Mininet network + self.stopNet() + + def testRemoveSingleComputeSingleDC(self): + """ + Test stop method for compute instances. + Check that the instance is really removed. + """ + # create network + self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + # check number of running nodes + assert(len(self.getDockernetContainers()) == 1) + assert(len(self.net.hosts) == 2) + assert(len(self.net.switches) == 1) + # check compute list result + assert(len(self.dc[0].listCompute()) == 1) + # check connectivity by using ping + assert(self.net.ping([self.h[0], vnf1]) <= 0.0) + # remove compute resources + self.dc[0].stopCompute("vnf1") + # check number of running nodes + assert(len(self.getDockernetContainers()) == 0) + assert(len(self.net.hosts) == 1) + assert(len(self.net.switches) == 1) + # check compute list result + assert(len(self.dc[0].listCompute()) == 0) + # stop Mininet network + self.stopNet() + + def testGetStatusSingleComputeSingleDC(self): + """ + Check if the getStatus functionality of EmulatorCompute + objects works well. + """ + # create network + self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + # check number of running nodes + assert(len(self.getDockernetContainers()) == 1) + assert(len(self.net.hosts) == 2) + assert(len(self.net.switches) == 1) + # check compute list result + assert(len(self.dc[0].listCompute()) == 1) + assert(isinstance(self.dc[0].listCompute()[0], EmulatorCompute)) + assert(self.dc[0].listCompute()[0].name == "vnf1") + # check connectivity by using ping + assert(self.net.ping([self.h[0], vnf1]) <= 0.0) + # check get status + s = self.dc[0].containers.get("vnf1").getStatus() + assert(s["name"] == "vnf1") + assert(s["state"]["Running"]) + # stop Mininet network + self.stopNet() + + def testConnectivityMultiDC(self): + """ + Test if compute instances started in different data centers + are able to talk to each other. + """ + # create network + self.createNet( + nswitches=3, ndatacenter=2, nhosts=0, ndockers=0, + autolinkswitches=True) + # setup links + self.net.addLink(self.dc[0], self.s[0]) + self.net.addLink(self.dc[1], self.s[2]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + vnf2 = self.dc[1].startCompute("vnf2") + # check number of running nodes + assert(len(self.getDockernetContainers()) == 2) + assert(len(self.net.hosts) == 2) + assert(len(self.net.switches) == 5) + # check compute list result + assert(len(self.dc[0].listCompute()) == 1) + assert(len(self.dc[1].listCompute()) == 1) + # check connectivity by using ping + assert(self.net.ping([vnf1, vnf2]) <= 0.0) + # stop Mininet network + self.stopNet() + + def testInterleavedAddRemoveMultiDC(self): + """ + Test multiple, interleaved add and remove operations and ensure + that always all expected compute instances are reachable. + """ + # create network + self.createNet( + nswitches=3, ndatacenter=2, nhosts=0, ndockers=0, + autolinkswitches=True) + # setup links + self.net.addLink(self.dc[0], self.s[0]) + self.net.addLink(self.dc[1], self.s[2]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + vnf2 = self.dc[1].startCompute("vnf2") + # check number of running nodes + assert(len(self.getDockernetContainers()) == 2) + assert(len(self.net.hosts) == 2) + assert(len(self.net.switches) == 5) + # check compute list result + assert(len(self.dc[0].listCompute()) == 1) + assert(len(self.dc[1].listCompute()) == 1) + # check connectivity by using ping + assert(self.net.ping([vnf1, vnf2]) <= 0.0) + # remove compute resources + self.dc[0].stopCompute("vnf1") + # check number of running nodes + assert(len(self.getDockernetContainers()) == 1) + assert(len(self.net.hosts) == 1) + assert(len(self.net.switches) == 5) + # check compute list result + assert(len(self.dc[0].listCompute()) == 0) + assert(len(self.dc[1].listCompute()) == 1) + # add compute resources + vnf3 = self.dc[0].startCompute("vnf3") + vnf4 = self.dc[0].startCompute("vnf4") + # check compute list result + assert(len(self.dc[0].listCompute()) == 2) + assert(len(self.dc[1].listCompute()) == 1) + assert(self.net.ping([vnf3, vnf2]) <= 0.0) + assert(self.net.ping([vnf4, vnf2]) <= 0.0) + # remove compute resources + self.dc[0].stopCompute("vnf3") + self.dc[0].stopCompute("vnf4") + self.dc[1].stopCompute("vnf2") + # check compute list result + assert(len(self.dc[0].listCompute()) == 0) + assert(len(self.dc[1].listCompute()) == 0) + # stop Mininet network + self.stopNet() + +if __name__ == '__main__': + unittest.main()