'requests',
'prometheus_client',
'urllib3',
- 'ipaddress'
+ 'ipaddress',
+ 'simplejson'
],
zip_safe=False,
entry_points={
+++ /dev/null
-This is a placeholder. This folder will contain a OpenStack/HEAT like interface to the emulator.
\ No newline at end of file
--- /dev/null
+import json
+import logging
+import copy
+
+from mininet.node import OVSSwitch
+
+from flask import Flask
+from flask import Response, request
+from flask_restful import Api, Resource
+from mininet.link import Link
+import uuid
+
+
+class ChainApi(Resource):
+ """
+ The chain API is a component that is not used in OpenStack.
+ It is a custom built REST API that can be used to create network chains and loadbalancers.
+ """
+
+ def __init__(self, inc_ip, inc_port, manage):
+ # setup Flask
+ self.app = Flask(__name__)
+ self.api = Api(self.app)
+ self.ip = inc_ip
+ self.port = inc_port
+ self.manage = manage
+ self.playbook_file = '/tmp/son-emu-requests.log'
+ self.api.add_resource(ChainVersionsList, "/",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(ChainList, "/v1/chain/list",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(ChainVnfInterfaces, "/v1/chain/<src_vnf>/<src_intfs>/<dst_vnf>/<dst_intfs>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(ChainVnfDcStackInterfaces,
+ "/v1/chain/<src_dc>/<src_stack>/<src_vnf>/<src_intfs>/<dst_dc>/<dst_stack>/<dst_vnf>/<dst_intfs>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(BalanceHostList, "/v1/lb/list",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(BalanceHost, "/v1/lb/<vnf_src_name>/<vnf_src_interface>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(BalanceHostDcStack, "/v1/lb/<src_dc>/<src_stack>/<vnf_src_name>/<vnf_src_interface>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(QueryTopology, "/v1/topo",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(Shutdown, "/shutdown")
+
+ @self.app.after_request
+ def add_access_control_header(response):
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ def _start_flask(self):
+ logging.info("Starting %s endpoint @ http://%s:%d" % ("ChainDummyApi", self.ip, self.port))
+ if self.app is not None:
+ self.app.before_request(self.dump_playbook)
+ self.app.run(self.ip, self.port, debug=True, use_reloader=False)
+
+ def dump_playbook(self):
+ with self.manage.lock:
+ with open(self.playbook_file, 'a') as logfile:
+ if len(request.data) > 0:
+ data = "# CHAIN API\n"
+ data += "curl -X {type} -H \"Content-type: application/json\" -d '{data}' {url}".format(type=request.method,
+ data=request.data,
+ url=request.url)
+ logfile.write(data + "\n")
+
+
+class Shutdown(Resource):
+ def get(self):
+ logging.debug(("%s is beeing shut down") % (__name__))
+ func = request.environ.get('werkzeug.server.shutdown')
+ if func is None:
+ raise RuntimeError('Not running with the Werkzeug Server')
+ func()
+
+
+class ChainVersionsList(Resource):
+ '''
+ Entrypoint to find versions of the chain api.
+ '''
+
+ def __init__(self, api):
+ self.api = api
+
+ def get(self):
+ '''
+ :return: flask.Response containing the openstack like description of the chain api
+ '''
+ # at least let it look like an open stack function
+ try:
+ resp = """
+ {
+ "versions": [
+ {
+ "id": "v1",
+ "links": [
+ {
+ "href": "http://%s:%d/v1/",
+ "rel": "self"
+ }
+ ],
+ "status": "CURRENT",
+ "version": "1",
+ "min_version": "1",
+ "updated": "2013-07-23T11:33:21Z"
+ }
+ ]
+ }
+ """ % (self.api.ip, self.api.port)
+
+ return Response(resp, status=200, mimetype="application/json")
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not show list of versions." % __name__)
+ return ex.message, 500
+
+
+class ChainList(Resource):
+ '''
+ Will retrieve all chains including their paths.
+ '''
+
+ def __init__(self, api):
+ self.api = api
+
+ def get(self):
+ '''
+ :return: flask.Response containing all live chains
+ '''
+ # at least let it look like an open stack function
+ try:
+ resp = {"chains": list()}
+
+ for chain in self.api.manage.full_chain_data.values():
+ resp["chains"].append(chain)
+
+ return Response(json.dumps(resp), status=200, mimetype="application/json")
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not list all network chains." % __name__)
+ return ex.message, 500
+
+
+class BalanceHostList(Resource):
+ '''
+ Will retrieve all loadbalance rules including their paths.
+ '''
+
+ def __init__(self, api):
+ self.api = api
+
+ def get(self):
+ '''
+ :return: flask.Response containing all live loadbalancer rules
+ '''
+ # at least let it look like an open stack function
+ try:
+ resp = {"loadbalancers": list()}
+
+ for lb in self.api.manage.full_lb_data.values():
+ resp["loadbalancers"].append(lb)
+
+ return Response(json.dumps(resp), status=200, mimetype="application/json")
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not list all live loadbalancers." % __name__)
+ return ex.message, 500
+
+
+class ChainVnfInterfaces(Resource):
+ """
+ Handles requests targeted at: "/v1/chain/<src_vnf>/<src_intfs>/<dst_vnf>/<dst_intfs>"
+ Requests are for tearing down or setting up a chain between two vnfs
+ """
+
+ def __init__(self, api):
+ self.api = api
+
+ def put(self, src_vnf, src_intfs, dst_vnf, dst_intfs):
+ """
+ A put request to "/v1/chain/<src_vnf>/<src_intfs>/<dst_vnf>/<dst_intfs>"
+ will create a chain between two interfaces at the specified vnfs.
+
+ Note:
+ Does not allow a custom path. Uses ``.post``
+ Internally just makes a POST request with no POST data!
+
+ :param src_vnf: Name of the source VNF
+ :type src_vnf: ``str``
+ :param src_intfs: Name of the source VNF interface to chain on
+ :type src_intfs: ``str``
+ :param dst_vnf: Name of the destination VNF
+ :type dst_vnf: ``str``
+ :param dst_intfs: Name of the destination VNF interface to chain on
+ :type dst_intfs: ``str``
+ :return: flask.Response 200 if set up correctly else 500 also returns the cookie as dict {'cookie': value}
+ 501 if one of the VNF / intfs does not exist
+ :rtype: :class:`flask.Response`
+ """
+ return self.post(src_vnf, src_intfs, dst_vnf, dst_intfs)
+
+ def post(self, src_vnf, src_intfs, dst_vnf, dst_intfs):
+ """
+ A post request to "/v1/chain/<src_vnf>/<src_intfs>/<dst_vnf>/<dst_intfs>"
+ will create a chain between two interfaces at the specified vnfs.
+ The POST data contains the path like this.
+ { "path": ["dc1.s1", "s1", "dc4.s1"]}
+ path specifies the destination vnf and interface and contains a list of switches
+ that the path traverses. The path may not contain single hop loops like:
+ [s1, s2, s1].
+ This is a limitation of Ryu, as Ryu does not allow the `INPUT_PORT` action!
+
+ :param src_vnf: Name of the source VNF
+ :type src_vnf: ``str``
+ :param src_intfs: Name of the source VNF interface to chain on
+ :type src_intfs: ``str``
+ :param dst_vnf: Name of the destination VNF
+ :type dst_vnf: ``str``
+ :param dst_intfs: Name of the destination VNF interface to chain on
+ :type dst_intfs: ``str``
+ :return: flask.Response 200 if set up correctly else 500 also returns the cookie as dict {'cookie': value}
+ 501 if one of the VNF / intfs does not exist
+ :rtype: :class:`flask.Response`
+
+ """
+
+ if request.is_json:
+ path = request.json.get('path')
+ layer2 = request.json.get('layer2', True)
+ else:
+ path = None
+ layer2 = True
+
+ # check if both VNFs exist
+ if not self.api.manage.check_vnf_intf_pair(src_vnf, src_intfs):
+ return Response(u"VNF %s or intfs %s does not exist" % (src_vnf, src_intfs), status=501,
+ mimetype="application/json")
+ if not self.api.manage.check_vnf_intf_pair(dst_vnf, dst_intfs):
+ return Response(u"VNF %s or intfs %s does not exist" % (dst_vnf, dst_intfs), status=501,
+ mimetype="application/json")
+ try:
+ cookie = self.api.manage.network_action_start(src_vnf, dst_vnf, vnf_src_interface=src_intfs,
+ vnf_dst_interface=dst_intfs, bidirectional=True,
+ path=path, layer2=layer2)
+ resp = {'cookie': cookie}
+ return Response(json.dumps(resp), status=200, mimetype="application/json")
+
+ except Exception as e:
+ logging.exception(u"%s: Error setting up the chain.\n %s" % (__name__, e))
+ return Response(u"Error setting up the chain", status=500, mimetype="application/json")
+
+ def delete(self, src_vnf, src_intfs, dst_vnf, dst_intfs):
+ """
+ A DELETE request to "/v1/chain/<src_vnf>/<src_intfs>/<dst_vnf>/<dst_intfs>"
+ will delete a previously created chain.
+
+ :param src_vnf: Name of the source VNF
+ :type src_vnf: ``str``
+ :param src_intfs: Name of the source VNF interface to chain on
+ :type src_intfs: ``str``
+ :param dst_vnf: Name of the destination VNF
+ :type dst_vnf: ``str``
+ :param dst_intfs: Name of the destination VNF interface to chain on
+ :type dst_intfs: ``str``
+ :return: flask.Response 200 if set up correctly else 500\
+ also returns the cookie as dict {'cookie': value}
+ 501 if one of the VNF / intfs does not exist
+ :rtype: :class:`flask.Response`
+
+ """
+ # check if both VNFs exist
+ # check if both VNFs exist
+ if not self.api.manage.check_vnf_intf_pair(src_vnf, src_intfs):
+ return Response(u"VNF %s or intfs %s does not exist" % (src_vnf, src_intfs), status=501,
+ mimetype="application/json")
+ if not self.api.manage.check_vnf_intf_pair(dst_vnf, dst_intfs):
+ return Response(u"VNF %s or intfs %s does not exist" % (dst_vnf, dst_intfs), status=501,
+ mimetype="application/json")
+ try:
+ cookie = self.api.manage.network_action_stop(src_vnf, dst_vnf, vnf_src_interface=src_intfs,
+ vnf_dst_interface=dst_intfs, bidirectional=True)
+ return Response(json.dumps(cookie), status=200, mimetype="application/json")
+ except Exception as e:
+ logging.exception(u"%s: Error deleting the chain.\n %s" % (__name__, e))
+ return Response(u"Error deleting the chain", status=500, mimetype="application/json")
+
+
+class ChainVnfDcStackInterfaces(Resource):
+ '''
+ Handles requests targeted at: "/v1/chain/<src_dc>/<src_stack>/<src_vnf>/<src_intfs>/<dst_dc>/<dst_stack>/<dst_vnf>/<dst_intfs>"
+ Handles tearing down or setting up a chain between two vnfs for stacks.
+ '''
+
+ def __init__(self, api):
+ self.api = api
+
+ def put(self, src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs):
+ """
+ A PUT request to "/v1/chain/<src_dc>/<src_stack>/<src_vnf>/<src_intfs>/<dst_dc>/<dst_stack>/<dst_vnf>/<dst_intfs>"
+ will set up chain.
+
+ :Note: PUT Requests can not set up custom paths!
+
+ :param src_dc: Name of the source datacenter
+ :type src_dc: `str`
+ :param src_stack: Name of the source stack
+ :type src_stack: `str`
+ :param src_vnf: Name of the source VNF
+ :type src_vnf: ``str``
+ :param src_intfs: Name of the source VNF interface to chain on
+ :type src_intfs: ``str``
+ :param dst_dc: Name of the destination datacenter
+ :type dst_dc: ``str``
+ :param dst_stack: Name of the destination stack
+ :type dst_stack: ``str``
+ :param dst_vnf: Name of the destination VNF
+ :type dst_vnf: ``str``
+ :param dst_intfs: Name of the destination VNF interface to chain on
+ :type dst_intfs: ``str``
+ :return: flask.Response 200 if set up correctly else 500\
+ also returns the cookie as dict {'cookie': value}
+ 501 if VNF or intfs does not exist
+ :rtype: :class:`flask.Response`
+
+ """
+ # search for real names
+ real_names = self._findNames(src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
+ if type(real_names) is not tuple:
+ # something went wrong
+ return real_names
+
+ container_src, container_dst, interface_src, interface_dst = real_names
+
+ # check if both VNFs exist
+ if not self.api.manage.check_vnf_intf_pair(container_src, interface_src):
+ return Response(u"VNF %s or intfs %s does not exist" % (container_src, interface_src), status=501,
+ mimetype="application/json")
+ if not self.api.manage.check_vnf_intf_pair(container_dst, interface_dst):
+ return Response(u"VNF %s or intfs %s does not exist" % (container_dst, interface_dst), status=501,
+ mimetype="application/json")
+
+ try:
+ cookie = self.api.manage.network_action_start(container_src, container_dst, vnf_src_interface=interface_src,
+ vnf_dst_interface=interface_dst, bidirectional=True,
+ layer2=True)
+ resp = {'cookie': cookie}
+ return Response(json.dumps(resp), status=200, mimetype="application/json")
+
+ except Exception as e:
+ logging.exception(u"%s: Error setting up the chain.\n %s" % (__name__, e))
+ return Response(u"Error setting up the chain", status=500, mimetype="application/json")
+
+ def post(self, src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs):
+ """
+ A post request to "/v1/chain/<src_dc>/<src_stack>/<src_vnf>/<src_intfs>/<dst_dc>/<dst_stack>/<dst_vnf>/<dst_intfs>"
+ will create a chain between two interfaces at the specified vnfs.
+ The POST data contains the path like this.
+ { "path": ["dc1.s1", "s1", "dc4.s1"]}
+ path specifies the destination vnf and interface and contains a list of switches
+ that the path traverses. The path may not contain single hop loops like:
+ [s1, s2, s1].
+ This is a limitation of Ryu, as Ryu does not allow the `INPUT_PORT` action!
+
+ :param src_vnf: Name of the source VNF
+ :type src_vnf: ``str``
+ :param src_intfs: Name of the source VNF interface to chain on
+ :type src_intfs: ``str``
+ :param dst_vnf: Name of the destination VNF
+ :type dst_vnf: ``str``
+ :param dst_intfs: Name of the destination VNF interface to chain on
+ :type dst_intfs: ``str``
+ :return: flask.Response 200 if set up correctly else 500 also returns the cookie as dict {'cookie': value}
+ 501 if vnf / intfs do not exist
+ :rtype: :class:`flask.Response`
+
+ """
+ if request.is_json:
+ path = request.json.get('path')
+ layer2 = request.json.get('layer2', True)
+ else:
+ path = None
+ layer2 = True
+
+ # search for real names
+ real_names = self._findNames(src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
+ if type(real_names) is not tuple:
+ # something went wrong
+ return real_names
+
+ container_src, container_dst, interface_src, interface_dst = real_names
+
+ try:
+ cookie = self.api.manage.network_action_start(container_src, container_dst, vnf_src_interface=interface_src,
+ vnf_dst_interface=interface_dst, bidirectional=True,
+ path=path, layer2=layer2)
+ resp = {'cookie': cookie}
+ return Response(json.dumps(resp), status=200, mimetype="application/json")
+
+ except Exception as e:
+ logging.exception(u"%s: Error setting up the chain.\n %s" % (__name__, e))
+ return Response(u"Error setting up the chain", status=500, mimetype="application/json")
+
+ def delete(self, src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs):
+ """
+ A DELETE request to "/v1/chain/<src_dc>/<src_stack>/<src_vnf>/<src_intfs>/<dst_dc>/<dst_stack>/<dst_vnf>/<dst_intfs>"
+ will delete a previously created chain.
+
+ :param src_dc: Name of the source datacenter
+ :type src_dc: `str`
+ :param src_stack: Name of the source stack
+ :type src_stack: `str`
+ :param src_vnf: Name of the source VNF
+ :type src_vnf: ``str``
+ :param src_intfs: Name of the source VNF interface to chain on
+ :type src_intfs: ``str``
+ :param dst_dc: Name of the destination datacenter
+ :type dst_dc: ``str``
+ :param dst_stack: Name of the destination stack
+ :type dst_stack: ``str``
+ :param dst_vnf: Name of the destination VNF
+ :type dst_vnf: ``str``
+ :param dst_intfs: Name of the destination VNF interface to chain on
+ :type dst_intfs: ``str``
+ :return: flask.Response 200 if set up correctly else 500\
+ also returns the cookie as dict {'cookie': value}
+ 501 if one of the VNF / intfs does not exist
+ :rtype: :class:`flask.Response`
+
+ """
+ # search for real names
+ real_names = self._findNames(src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
+ if type(real_names) is not tuple:
+ # something went wrong, real_names is a Response object
+ return real_names
+
+ container_src, container_dst, interface_src, interface_dst = real_names
+
+ try:
+ cookie = self.api.manage.network_action_stop(container_src, container_dst, vnf_src_interface=interface_src,
+ vnf_dst_interface=interface_dst, bidirectional=True)
+ return Response(json.dumps(cookie), status=200, mimetype="application/json")
+ except Exception as e:
+ logging.exception(u"%s: Error deleting the chain.\n %s" % (__name__, e))
+ return Response(u"Error deleting the chain", status=500, mimetype="application/json")
+
+ # Tries to find real container and interface names according to heat template names
+ # Returns a tuple of 4 or a Response object
+ def _findNames(self, src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs):
+ # search for datacenters
+ if src_dc not in self.api.manage.net.dcs or dst_dc not in self.api.manage.net.dcs:
+ return Response(u"At least one DC does not exist", status=500, mimetype="application/json")
+ dc_src = self.api.manage.net.dcs[src_dc]
+ dc_dst = self.api.manage.net.dcs[dst_dc]
+ # search for related OpenStackAPIs
+ api_src = None
+ api_dst = None
+ from openstack_api_endpoint import OpenstackApiEndpoint
+ for api in OpenstackApiEndpoint.dc_apis:
+ if api.compute.dc == dc_src:
+ api_src = api
+ if api.compute.dc == dc_dst:
+ api_dst = api
+ if api_src is None or api_dst is None:
+ return Response(u"At least one OpenStackAPI does not exist", status=500, mimetype="application/json")
+ # search for stacks
+ stack_src = None
+ stack_dst = None
+ for stack in api_src.compute.stacks.values():
+ if stack.stack_name == src_stack:
+ stack_src = stack
+ for stack in api_dst.compute.stacks.values():
+ if stack.stack_name == dst_stack:
+ stack_dst = stack
+ if stack_src is None or stack_dst is None:
+ return Response(u"At least one Stack does not exist", status=500, mimetype="application/json")
+ # search for servers
+ server_src = None
+ server_dst = None
+ for server in stack_src.servers.values():
+ if server.template_name == src_vnf:
+ server_src = server
+ break
+ for server in stack_dst.servers.values():
+ if server.template_name == dst_vnf:
+ server_dst = server
+ break
+ if server_src is None or server_dst is None:
+ return Response(u"At least one VNF does not exist", status=500, mimetype="application/json")
+
+ container_src = server_src.name
+ container_dst = server_dst.name
+
+ # search for ports
+ port_src = None
+ port_dst = None
+ if src_intfs in server_src.port_names:
+ port_src = stack_src.ports[src_intfs]
+ if dst_intfs in server_dst.port_names:
+ port_dst = stack_dst.ports[dst_intfs]
+ if port_src is None or port_dst is None:
+ return Response(u"At least one Port does not exist", status=500, mimetype="application/json")
+
+ interface_src = port_src.intf_name
+ interface_dst = port_dst.intf_name
+
+ return container_src, container_dst, interface_src, interface_dst
+
+
+class BalanceHostDcStack(Resource):
+ """
+ Handles requests to "/v1/lb/<src_dc>/<src_stack>/<vnf_src_name>/<vnf_src_interface>"
+ Sets up LoadBalancers for VNFs that are belonging to a certain stack.
+ """
+
+ def __init__(self, api):
+ self.api = api
+
+ def post(self, src_dc, src_stack, vnf_src_name, vnf_src_interface):
+ """
+ A POST request to "/v1/lb/<src_dc>/<src_stack>/<vnf_src_name>/<vnf_src_interface>"
+ will set up a loadbalancer. The target VNFs and interfaces are in the post data.
+
+ :Example:
+ See :class:`heat.chain_api.BalanceHost.post`
+
+ :param src_dc: Name of the source VNF
+ :type src_dc: ``str``
+ :param src_stack: Name of the source VNF interface to chain on
+ :type src_stack: ``str``
+ * src_stack == "floating" sets up a new floating node, so only use this name if you know what you are doing.
+ :param vnf_src_name:
+ :type vnf_src_name: ``str``
+ :param vnf_src_interface:
+ :type vnf_src_interface: ``str``
+ :return: flask.Response 200 if set up correctly else 500
+ :rtype: :class:`flask.Response`
+
+ """
+ try:
+ req = request.json
+ if req is None or len(req) == 0 or "dst_vnf_interfaces" not in req:
+ return Response(u"You have to specify destination vnfs via the POST data.",
+ status=500, mimetype="application/json")
+
+ dst_vnfs = req.get('dst_vnf_interfaces')
+ container_src = None
+ interface_src = None
+
+ # check src vnf/port
+ if src_stack != "floating":
+ real_src = self._findName(src_dc, src_stack, vnf_src_name, vnf_src_interface)
+ if type(real_src) is not tuple:
+ # something went wrong, real_src is a Response object
+ return real_src
+
+ container_src, interface_src = real_src
+
+ real_dst_dict = {}
+ for dst_vnf in dst_vnfs:
+ dst_dc = dst_vnf.get('pop', None)
+ dst_stack = dst_vnf.get('stack', None)
+ dst_server = dst_vnf.get('server', None)
+ dst_port = dst_vnf.get('port', None)
+ if dst_dc is not None and dst_stack is not None and dst_server is not None and dst_port is not None:
+ real_dst = self._findName(dst_dc, dst_stack, dst_server, dst_port)
+ if type(real_dst) is not tuple:
+ # something went wrong, real_dst is a Response object
+ return real_dst
+ real_dst_dict[real_dst[0]] = real_dst[1]
+
+ input_object = {"dst_vnf_interfaces": real_dst_dict, "path": req.get("path", None)}
+
+ if src_stack != "floating":
+ self.api.manage.add_loadbalancer(container_src, interface_src, lb_data=input_object)
+ return Response(u"Loadbalancer set up at %s:%s" % (container_src, interface_src),
+ status=200, mimetype="application/json")
+ else:
+ cookie, floating_ip = self.api.manage.add_floating_lb(src_dc, lb_data=input_object)
+
+ return Response(json.dumps({"cookie": "%d" % cookie, "floating_ip": "%s" % floating_ip}),
+ status=200, mimetype="application/json")
+
+ except Exception as e:
+ logging.exception(u"%s: Error setting up the loadbalancer at %s %s %s:%s.\n %s" %
+ (__name__, src_dc, src_stack, vnf_src_name, vnf_src_interface, e))
+ return Response(u"%s: Error setting up the loadbalancer at %s %s %s:%s.\n %s" %
+ (__name__, src_dc, src_stack, vnf_src_name, vnf_src_interface, e), status=500,
+ mimetype="application/json")
+
+ def delete(self, src_dc, src_stack, vnf_src_name, vnf_src_interface):
+ """
+ Will delete a load balancer that sits behind a specified interface at a vnf for a specific stack
+
+ :param src_dc: Name of the source VNF
+ :type src_dc: ``str``
+ :param src_stack: Name of the source VNF interface to chain on
+ :type src_stack: ``str``
+ :param vnf_src_name:
+ :type vnf_src_name: ``str``
+ :param vnf_src_interface:
+ :type vnf_src_interface: ``str``
+ :return: flask.Response 200 if set up correctly else 500
+ :rtype: :class:`flask.Response`
+
+ """
+ try:
+ # check src vnf/port
+ if src_stack != "floating":
+ real_src = self._findName(src_dc, src_stack, vnf_src_name, vnf_src_interface)
+ if type(real_src) is not tuple:
+ # something went wrong, real_src is a Response object
+ return real_src
+
+ container_src, interface_src = real_src
+
+ self.api.manage.delete_loadbalancer(container_src, interface_src)
+ return Response(u"Loadbalancer deleted at %s:%s" % (vnf_src_name, vnf_src_interface),
+ status=200, mimetype="application/json")
+ else:
+ cookie = vnf_src_name
+ self.api.manage.delete_floating_lb(cookie)
+ return Response(u"Floating loadbalancer with cookie %s deleted" % (cookie),
+ status=200, mimetype="application/json")
+
+ except Exception as e:
+ logging.exception(u"%s: Error deleting the loadbalancer at %s %s %s%s.\n %s" %
+ (__name__, src_dc, src_stack, vnf_src_name, vnf_src_interface, e))
+ return Response(u"%s: Error deleting the loadbalancer at %s %s %s%s." %
+ (__name__, src_dc, src_stack, vnf_src_name, vnf_src_interface), status=500,
+ mimetype="application/json")
+
+ # Tries to find real container and port name according to heat template names
+ # Returns a string or a Response object
+ def _findName(self, dc, stack, vnf, port):
+ # search for datacenters
+ if dc not in self.api.manage.net.dcs:
+ return Response(u"DC does not exist", status=500, mimetype="application/json")
+ dc_real = self.api.manage.net.dcs[dc]
+ # search for related OpenStackAPIs
+ api_real = None
+ from openstack_api_endpoint import OpenstackApiEndpoint
+ for api in OpenstackApiEndpoint.dc_apis:
+ if api.compute.dc == dc_real:
+ api_real = api
+ if api_real is None:
+ return Response(u"OpenStackAPI does not exist", status=500, mimetype="application/json")
+ # search for stacks
+ stack_real = None
+ for stackObj in api_real.compute.stacks.values():
+ if stackObj.stack_name == stack:
+ stack_real = stackObj
+ if stack_real is None:
+ return Response(u"Stack does not exist", status=500, mimetype="application/json")
+ # search for servers
+ server_real = None
+ for server in stack_real.servers.values():
+ if server.template_name == vnf:
+ server_real = server
+ break
+ if server_real is None:
+ return Response(u"VNF does not exist", status=500, mimetype="application/json")
+
+ container_real = server_real.name
+
+ # search for ports
+ port_real = None
+ if port in server_real.port_names:
+ port_real = stack_real.ports[port]
+ if port_real is None:
+ return Response(u"At least one Port does not exist", status=500, mimetype="application/json")
+
+ interface_real = port_real.intf_name
+
+ return container_real, interface_real
+
+
+class BalanceHost(Resource):
+ """
+ Handles requests at "/v1/lb/<vnf_src_name>/<vnf_src_interface>"
+ to set up or delete Load Balancers.
+ """
+
+ def __init__(self, api):
+ self.api = api
+
+ def post(self, vnf_src_name, vnf_src_interface):
+ """
+ Will set up a Load balancer behind an interface at a specified vnf
+ We need both to avoid naming conflicts as interface names are not unique
+
+ :param vnf_src_name: Name of the source VNF
+ :type vnf_src_name: ``str``
+ :param vnf_src_interface: Name of the source VNF interface to chain on
+ :type vnf_src_interface: ``str``
+ :return: flask.Response 200 if set up correctly else 500
+ 501 if VNF or intfs does not exist
+ :rtype: :class:`flask.Response`
+
+ """
+ try:
+ req = request.json
+ if req is None or len(req) == 0 or "dst_vnf_interfaces" not in req:
+ return Response(u"You have to specify destination vnfs via the POST data.",
+ status=500, mimetype="application/json")
+
+ if vnf_src_name != "floating":
+ # check if VNF exist
+ if not self.api.manage.check_vnf_intf_pair(vnf_src_name, vnf_src_interface):
+ return Response(u"VNF %s or intfs %s does not exist" % (vnf_src_name, vnf_src_interface),
+ status=501,
+ mimetype="application/json")
+ self.api.manage.add_loadbalancer(vnf_src_name, vnf_src_interface, lb_data=req)
+
+ return Response(u"Loadbalancer set up at %s:%s" % (vnf_src_name, vnf_src_interface),
+ status=200, mimetype="application/json")
+ else:
+ cookie, floating_ip = self.api.manage.add_floating_lb(vnf_src_interface, lb_data=req)
+
+ return Response(json.dumps({"cookie": "%d" % cookie, "floating_ip": "%s" % floating_ip}),
+ status=200, mimetype="application/json")
+ except Exception as e:
+ logging.exception(u"%s: Error setting up the loadbalancer at %s:%s.\n %s" %
+ (__name__, vnf_src_name, vnf_src_interface, e))
+ return Response(u"%s: Error setting up the loadbalancer at %s:%s.\n %s" %
+ (__name__, vnf_src_name, vnf_src_interface, e), status=500, mimetype="application/json")
+
+ def delete(self, vnf_src_name, vnf_src_interface):
+ """
+ Will delete a load balancer that sits behind a specified interface at a vnf
+
+ :param vnf_src_name: Name of the source VNF
+ :type vnf_src_name: ``str``
+ :param vnf_src_interface: Name of the source VNF interface to chain on
+ :type vnf_src_interface: ``str``
+ :return: flask.Response 200 if set up correctly else 500
+ 501 if VNF or intfs does not exist
+ :rtype: :class:`flask.Response`
+
+ """
+ # check if VNF exist
+ if not self.api.manage.check_vnf_intf_pair(vnf_src_name, vnf_src_interface):
+ return Response(u"VNF %s or intfs %s does not exist" % (vnf_src_name, vnf_src_interface), status=501,
+ mimetype="application/json")
+ try:
+ logging.debug("Deleting loadbalancer at %s: interface: %s" % (vnf_src_name, vnf_src_interface))
+ net = self.api.manage.net
+
+ if vnf_src_name != "floating":
+ # check if VNF exists
+ if vnf_src_name not in net:
+ return Response(u"Source VNF or interface can not be found." % vnf_src_name,
+ status=404, mimetype="application/json")
+
+ self.api.manage.delete_loadbalancer(vnf_src_name, vnf_src_interface)
+
+ return Response(u"Loadbalancer deleted at %s:%s" % (vnf_src_name, vnf_src_interface),
+ status=200, mimetype="application/json")
+ else:
+ cookie = vnf_src_name
+ self.api.manage.delete_floating_lb(cookie)
+ return Response(u"Floating loadbalancer with cookie %s removed" % (cookie),
+ status=200, mimetype="application/json")
+ except Exception as e:
+ logging.exception(u"%s: Error deleting the loadbalancer at %s%s.\n %s" %
+ (__name__, vnf_src_name, vnf_src_interface, e))
+ return Response(u"%s: Error deleting the loadbalancer at %s%s." %
+ (__name__, vnf_src_name, vnf_src_interface), status=500, mimetype="application/json")
+
+
+class QueryTopology(Resource):
+ """
+ Handles requests at "/v1/topo/"
+ """
+
+ def __init__(self, api):
+ self.api = api
+
+ def get(self):
+ """
+ Answers GET requests for the current network topology at "/v1/topo".
+ This will only return switches and datacenters and ignore currently deployed VNFs.
+
+ :return: 200 if successful with the network graph as json dict, else 500
+
+ """
+ try:
+ logging.debug("Querying topology")
+ graph = self.api.manage.net.DCNetwork_graph
+ net = self.api.manage.net
+ # root node is nodes
+ topology = {"nodes": list()}
+
+ for n in graph:
+ # remove root node as well as the floating switch fs1
+ if n != "root" and n != "fs1":
+ # we only want to return switches!
+ if not isinstance(net[n], OVSSwitch):
+ continue
+ node = dict()
+
+ # get real datacenter label
+ for dc in self.api.manage.net.dcs.values():
+ if str(dc.switch) == str(n):
+ node["name"] = str(n)
+ node["type"] = "Datacenter"
+ node["label"] = str(dc.label)
+ break
+
+ # node is not a datacenter. It has to be a switch
+ if node.get("type", "") != "Datacenter":
+ node["name"] = str(n)
+ node["type"] = "Switch"
+
+ node["links"] = list()
+ # add links to the topology
+ for graph_node, data in graph[n].items():
+ # only add links to the topology that connect switches
+ if isinstance(net[graph_node], OVSSwitch):
+ # we allow multiple edges between switches, so add them all
+ # with their unique keys
+ link = copy.copy(data)
+ for edge in link:
+ # do not add any links to the floating switch to the topology!
+ if graph_node == "fs1":
+ continue
+ # the translator wants everything as a string!
+ for key, value in link[edge].items():
+ link[edge][key] = str(value)
+ # name of the destination
+ link[edge]["name"] = graph_node
+ node["links"].append(link)
+
+ topology["nodes"].append(node)
+
+ return Response(json.dumps(topology),
+ status=200, mimetype="application/json")
+ except Exception as e:
+ logging.exception(u"%s: Error querying topology.\n %s" %
+ (__name__, e))
+ return Response(u"%s: Error querying topology.\n %s" %
+ (__name__, e), status=500, mimetype="application/json")
--- /dev/null
+from mininet.link import Link
+from resources import *
+from docker import DockerClient
+import logging
+import threading
+import uuid
+import time
+import ip_handler as IP
+
+
+class HeatApiStackInvalidException(Exception):
+ """
+ Exception thrown when a submitted stack is invalid.
+ """
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return repr(self.value)
+
+
+class OpenstackCompute(object):
+ """
+ This class is a datacenter specific compute object that tracks all containers that are running in a datacenter,
+ as well as networks and configured ports.
+ It has some stack dependet logic and can check if a received stack is valid.
+
+ It also handles start and stop of containers.
+ """
+
+ def __init__(self):
+ self.dc = None
+ self.stacks = dict()
+ self.computeUnits = dict()
+ self.routers = dict()
+ self.flavors = dict()
+ self._images = dict()
+ self.nets = dict()
+ self.ports = dict()
+ self.compute_nets = dict()
+ self.dcli = DockerClient(base_url='unix://var/run/docker.sock')
+
+ @property
+ def images(self):
+ """
+ Updates the known images. Asks the docker daemon for a list of all known images and returns
+ the new dictionary.
+
+ :return: Returns the new image dictionary.
+ :rtype: ``dict``
+ """
+ for image in self.dcli.images.list():
+ if len(image.tags) > 0:
+ for t in image.tags:
+ t = t.replace(":latest", "") # only use short tag names for OSM compatibility
+ if t not in self._images:
+ self._images[t] = Image(t)
+ return self._images
+
+ def add_stack(self, stack):
+ """
+ Adds a new stack to the compute node.
+
+ :param stack: Stack dictionary.
+ :type stack: :class:`heat.resources.stack`
+ """
+ if not self.check_stack(stack):
+ self.clean_broken_stack(stack)
+ raise HeatApiStackInvalidException("Stack did not pass validity checks")
+ self.stacks[stack.id] = stack
+
+ def clean_broken_stack(self, stack):
+ for port in stack.ports.values():
+ if port.id in self.ports:
+ del self.ports[port.id]
+ for server in stack.servers.values():
+ if server.id in self.computeUnits:
+ del self.computeUnits[server.id]
+ for net in stack.nets.values():
+ if net.id in self.nets:
+ del self.nets[net.id]
+
+ def check_stack(self, stack):
+ """
+ Checks all dependencies of all servers, ports and routers and their most important parameters.
+
+ :param stack: A reference of the stack that should be checked.
+ :type stack: :class:`heat.resources.stack`
+ :return: * *True*: If the stack is completely fine.
+ * *False*: Else
+ :rtype: ``bool``
+ """
+ everything_ok = True
+ for server in stack.servers.values():
+ for port_name in server.port_names:
+ if port_name not in stack.ports:
+ logging.warning("Server %s of stack %s has a port named %s that is not known." %
+ (server.name, stack.stack_name, port_name))
+ everything_ok = False
+ if server.image is None:
+ logging.warning("Server %s holds no image." % (server.name))
+ everything_ok = False
+ if server.command is None:
+ logging.warning("Server %s holds no command." % (server.name))
+ everything_ok = False
+ for port in stack.ports.values():
+ if port.net_name not in stack.nets:
+ logging.warning("Port %s of stack %s has a network named %s that is not known." %
+ (port.name, stack.stack_name, port.net_name))
+ everything_ok = False
+ if port.intf_name is None:
+ logging.warning("Port %s has no interface name." % (port.name))
+ everything_ok = False
+ if port.ip_address is None:
+ logging.warning("Port %s has no IP address." % (port.name))
+ everything_ok = False
+ for router in stack.routers.values():
+ for subnet_name in router.subnet_names:
+ found = False
+ for net in stack.nets.values():
+ if net.subnet_name == subnet_name:
+ found = True
+ break
+ if not found:
+ logging.warning("Router %s of stack %s has a network named %s that is not known." %
+ (router.name, stack.stack_name, subnet_name))
+ everything_ok = False
+ return everything_ok
+
+ def add_flavor(self, name, cpu, memory, memory_unit, storage, storage_unit):
+ """
+ Adds a flavor to the stack.
+
+ :param name: Specifies the name of the flavor.
+ :type name: ``str``
+ :param cpu:
+ :type cpu: ``str``
+ :param memory:
+ :type memory: ``str``
+ :param memory_unit:
+ :type memory_unit: ``str``
+ :param storage:
+ :type storage: ``str``
+ :param storage_unit:
+ :type storage_unit: ``str``
+ """
+ flavor = InstanceFlavor(name, cpu, memory, memory_unit, storage, storage_unit)
+ self.flavors[flavor.name] = flavor
+ return flavor
+
+ def deploy_stack(self, stackid):
+ """
+ Deploys the stack and starts the emulation.
+
+ :param stackid: An UUID str of the stack
+ :type stackid: ``str``
+ :return: * *False*: If the Datacenter is None
+ * *True*: Else
+ :rtype: ``bool``
+ """
+ if self.dc is None:
+ return False
+
+ stack = self.stacks[stackid]
+ self.update_compute_dicts(stack)
+
+ # Create the networks first
+ for server in stack.servers.values():
+ self._start_compute(server)
+ return True
+
+ def delete_stack(self, stack_id):
+ """
+ Delete a stack and all its components.
+
+ :param stack_id: An UUID str of the stack
+ :type stack_id: ``str``
+ :return: * *False*: If the Datacenter is None
+ * *True*: Else
+ :rtype: ``bool``
+ """
+ if self.dc is None:
+ return False
+
+ # Stop all servers and their links of this stack
+ for server in self.stacks[stack_id].servers.values():
+ self.stop_compute(server)
+ self.delete_server(server)
+ for net in self.stacks[stack_id].nets.values():
+ self.delete_network(net.id)
+ for port in self.stacks[stack_id].ports.values():
+ self.delete_port(port.id)
+
+ del self.stacks[stack_id]
+ return True
+
+ def update_stack(self, old_stack_id, new_stack):
+ """
+ Determines differences within the old and the new stack and deletes, create or changes only parts that
+ differ between the two stacks.
+
+ :param old_stack_id: The ID of the old stack.
+ :type old_stack_id: ``str``
+ :param new_stack: A reference of the new stack.
+ :type new_stack: :class:`heat.resources.stack`
+ :return: * *True*: if the old stack could be updated to the new stack without any error.
+ * *False*: else
+ :rtype: ``bool``
+ """
+ if old_stack_id not in self.stacks:
+ return False
+ old_stack = self.stacks[old_stack_id]
+
+ # Update Stack IDs
+ for server in old_stack.servers.values():
+ if server.name in new_stack.servers:
+ new_stack.servers[server.name].id = server.id
+ for net in old_stack.nets.values():
+ if net.name in new_stack.nets:
+ new_stack.nets[net.name].id = net.id
+ for subnet in new_stack.nets.values():
+ if subnet.subnet_name == net.subnet_name:
+ subnet.subnet_id = net.subnet_id
+ break
+ for port in old_stack.ports.values():
+ if port.name in new_stack.ports:
+ new_stack.ports[port.name].id = port.id
+ for router in old_stack.routers.values():
+ if router.name in new_stack.routers:
+ new_stack.routers[router.name].id = router.id
+
+ # Update the compute dicts to now contain the new_stack components
+ self.update_compute_dicts(new_stack)
+
+ self.update_ip_addresses(old_stack, new_stack)
+
+ # Update all interface names - after each port has the correct UUID!!
+ for port in new_stack.ports.values():
+ port.create_intf_name()
+
+ if not self.check_stack(new_stack):
+ return False
+
+ # Remove unnecessary networks
+ for net in old_stack.nets.values():
+ if not net.name in new_stack.nets:
+ self.delete_network(net.id)
+
+ # Remove all unnecessary servers
+ for server in old_stack.servers.values():
+ if server.name in new_stack.servers:
+ if not server.compare_attributes(new_stack.servers[server.name]):
+ self.stop_compute(server)
+ else:
+ # Delete unused and changed links
+ for port_name in server.port_names:
+ if port_name in old_stack.ports and port_name in new_stack.ports:
+ if not old_stack.ports.get(port_name) == new_stack.ports.get(port_name):
+ my_links = self.dc.net.links
+ for link in my_links:
+ if str(link.intf1) == old_stack.ports[port_name].intf_name and \
+ str(link.intf1.ip) == \
+ old_stack.ports[port_name].ip_address.split('/')[0]:
+ self._remove_link(server.name, link)
+
+ # Add changed link
+ self._add_link(server.name,
+ new_stack.ports[port_name].ip_address,
+ new_stack.ports[port_name].intf_name,
+ new_stack.ports[port_name].net_name)
+ break
+ else:
+ my_links = self.dc.net.links
+ for link in my_links:
+ if str(link.intf1) == old_stack.ports[port_name].intf_name and \
+ str(link.intf1.ip) == old_stack.ports[port_name].ip_address.split('/')[0]:
+ self._remove_link(server.name, link)
+ break
+
+ # Create new links
+ for port_name in new_stack.servers[server.name].port_names:
+ if port_name not in server.port_names:
+ self._add_link(server.name,
+ new_stack.ports[port_name].ip_address,
+ new_stack.ports[port_name].intf_name,
+ new_stack.ports[port_name].net_name)
+ else:
+ self.stop_compute(server)
+
+ # Start all new servers
+ for server in new_stack.servers.values():
+ if server.name not in self.dc.containers:
+ self._start_compute(server)
+ else:
+ server.emulator_compute = self.dc.containers.get(server.name)
+
+ del self.stacks[old_stack_id]
+ self.stacks[new_stack.id] = new_stack
+ return True
+
+ def update_ip_addresses(self, old_stack, new_stack):
+ """
+ Updates the subnet and the port IP addresses - which should always be in this order!
+
+ :param old_stack: The currently running stack
+ :type old_stack: :class:`heat.resources.stack`
+ :param new_stack: The new created stack
+ :type new_stack: :class:`heat.resources.stack`
+ """
+ self.update_subnet_cidr(old_stack, new_stack)
+ self.update_port_addresses(old_stack, new_stack)
+
+ def update_port_addresses(self, old_stack, new_stack):
+ """
+ Updates the port IP addresses. First resets all issued addresses. Then get all IP addresses from the old
+ stack and sets them to the same ports in the new stack. Finally all new or changed instances will get new
+ IP addresses.
+
+ :param old_stack: The currently running stack
+ :type old_stack: :class:`heat.resources.stack`
+ :param new_stack: The new created stack
+ :type new_stack: :class:`heat.resources.stack`
+ """
+ for net in new_stack.nets.values():
+ net.reset_issued_ip_addresses()
+
+ for old_port in old_stack.ports.values():
+ for port in new_stack.ports.values():
+ if port.compare_attributes(old_port):
+ for net in new_stack.nets.values():
+ if net.name == port.net_name:
+ if net.assign_ip_address(old_port.ip_address, port.name):
+ port.ip_address = old_port.ip_address
+ port.mac_address = old_port.mac_address
+ else:
+ port.ip_address = net.get_new_ip_address(port.name)
+
+ for port in new_stack.ports.values():
+ for net in new_stack.nets.values():
+ if port.net_name == net.name and not net.is_my_ip(port.ip_address, port.name):
+ port.ip_address = net.get_new_ip_address(port.name)
+
+ def update_subnet_cidr(self, old_stack, new_stack):
+ """
+ Updates the subnet IP addresses. If the new stack contains subnets from the old stack it will take those
+ IP addresses. Otherwise it will create new IP addresses for the subnet.
+
+ :param old_stack: The currently running stack
+ :type old_stack: :class:`heat.resources.stack`
+ :param new_stack: The new created stack
+ :type new_stack: :class:`heat.resources.stack`
+ """
+ for old_subnet in old_stack.nets.values():
+ IP.free_cidr(old_subnet.get_cidr(), old_subnet.subnet_id)
+
+ for subnet in new_stack.nets.values():
+ subnet.clear_cidr()
+ for old_subnet in old_stack.nets.values():
+ if subnet.subnet_name == old_subnet.subnet_name:
+ if IP.assign_cidr(old_subnet.get_cidr(), subnet.subnet_id):
+ subnet.set_cidr(old_subnet.get_cidr())
+
+ for subnet in new_stack.nets.values():
+ if IP.is_cidr_issued(subnet.get_cidr()):
+ continue
+
+ cird = IP.get_new_cidr(subnet.subnet_id)
+ subnet.set_cidr(cird)
+ return
+
+ def update_compute_dicts(self, stack):
+ """
+ Update and add all stack components tho the compute dictionaries.
+
+ :param stack: A stack reference, to get all required components.
+ :type stack: :class:`heat.resources.stack`
+ """
+ for server in stack.servers.values():
+ self.computeUnits[server.id] = server
+ if isinstance(server.flavor, dict):
+ self.add_flavor(server.flavor['flavorName'],
+ server.flavor['vcpu'],
+ server.flavor['ram'], 'MB',
+ server.flavor['storage'], 'GB')
+ server.flavor = server.flavor['flavorName']
+ for router in stack.routers.values():
+ self.routers[router.id] = router
+ for net in stack.nets.values():
+ self.nets[net.id] = net
+ for port in stack.ports.values():
+ self.ports[port.id] = port
+
+ def _start_compute(self, server):
+ """
+ Starts a new compute object (docker container) inside the emulator.
+ Should only be called by stack modifications and not directly.
+
+ :param server: Specifies the compute resource.
+ :type server: :class:`heat.resources.server`
+ """
+ logging.debug("Starting new compute resources %s" % server.name)
+ network = list()
+
+ for port_name in server.port_names:
+ network_dict = dict()
+ port = self.find_port_by_name_or_id(port_name)
+ if port is not None:
+ network_dict['id'] = port.intf_name
+ network_dict['ip'] = port.ip_address
+ network_dict[network_dict['id']] = self.find_network_by_name_or_id(port.net_name).name
+ network.append(network_dict)
+ self.compute_nets[server.name] = network
+ c = self.dc.startCompute(server.name, image=server.image, command=server.command,
+ network=network, flavor_name=server.flavor)
+ server.emulator_compute = c
+
+ for intf in c.intfs.values():
+ for port_name in server.port_names:
+ port = self.find_port_by_name_or_id(port_name)
+ if port is not None:
+ if intf.name == port.intf_name:
+ # wait up to one second for the intf to come up
+ self.timeout_sleep(intf.isUp, 1)
+ if port.mac_address is not None:
+ intf.setMAC(port.mac_address)
+ else:
+ port.mac_address = intf.MAC()
+
+ # Start the real emulator command now as specified in the dockerfile
+ # ENV SON_EMU_CMD
+ config = c.dcinfo.get("Config", dict())
+ env = config.get("Env", list())
+ for env_var in env:
+ if "SON_EMU_CMD=" in env_var:
+ cmd = str(env_var.split("=")[1])
+ server.son_emu_command = cmd
+ # execute command in new thread to ensure that GK is not blocked by VNF
+ t = threading.Thread(target=c.cmdPrint, args=(cmd,))
+ t.daemon = True
+ t.start()
+
+ def stop_compute(self, server):
+ """
+ Determines which links should be removed before removing the server itself.
+
+ :param server: The server that should be removed
+ :type server: ``heat.resources.server``
+ """
+ logging.debug("Stopping container %s with full name %s" % (server.name, server.full_name))
+ link_names = list()
+ for port_name in server.port_names:
+ link_names.append(self.find_port_by_name_or_id(port_name).intf_name)
+ my_links = self.dc.net.links
+ for link in my_links:
+ if str(link.intf1) in link_names:
+ # Remove all self created links that connect the server to the main switch
+ self._remove_link(server.name, link)
+
+ # Stop the server and the remaining connection to the datacenter switch
+ self.dc.stopCompute(server.name)
+ # Only now delete all its ports and the server itself
+ for port_name in server.port_names:
+ self.delete_port(port_name)
+ self.delete_server(server)
+
+ def find_server_by_name_or_id(self, name_or_id):
+ """
+ Tries to find the server by ID and if this does not succeed then tries to find it via name.
+
+ :param name_or_id: UUID or name of the server.
+ :type name_or_id: ``str``
+ :return: Returns the server reference if it was found or None
+ :rtype: :class:`heat.resources.server`
+ """
+ if name_or_id in self.computeUnits:
+ return self.computeUnits[name_or_id]
+
+ for server in self.computeUnits.values():
+ if server.name == name_or_id or server.template_name == name_or_id or server.full_name == name_or_id:
+ return server
+ return None
+
+ def create_server(self, name, stack_operation=False):
+ """
+ Creates a server with the specified name. Raises an exception when a server with the given name already
+ exists!
+
+ :param name: Name of the new server.
+ :type name: ``str``
+ :param stack_operation: Allows the heat parser to create modules without adapting the current emulation.
+ :type stack_operation: ``bool``
+ :return: Returns the created server.
+ :rtype: :class:`heat.resources.server`
+ """
+ if self.find_server_by_name_or_id(name) is not None and not stack_operation:
+ raise Exception("Server with name %s already exists." % name)
+ server = Server(name)
+ server.id = str(uuid.uuid4())
+ if not stack_operation:
+ self.computeUnits[server.id] = server
+ return server
+
+ def delete_server(self, server):
+ """
+ Deletes the given server from the stack dictionary and the computeUnits dictionary.
+
+ :param server: Reference of the server that should be deleted.
+ :type server: :class:`heat.resources.server`
+ :return: * *False*: If the server name is not in the correct format ('datacentername_stackname_servername') \
+ or when no stack with the correct stackname was found.
+ * *True*: Else
+ :rtype: ``bool``
+ """
+ if server is None:
+ return False
+ name_parts = server.name.split('_')
+ if len(name_parts) < 3:
+ return False
+
+ for stack in self.stacks.values():
+ if stack.stack_name == name_parts[1]:
+ stack.servers.pop(server.id, None)
+ if self.computeUnits.pop(server.id, None) is None:
+ return False
+ return True
+
+ def find_network_by_name_or_id(self, name_or_id):
+ """
+ Tries to find the network by ID and if this does not succeed then tries to find it via name.
+
+ :param name_or_id: UUID or name of the network.
+ :type name_or_id: ``str``
+ :return: Returns the network reference if it was found or None
+ :rtype: :class:`heat.resources.net`
+ """
+ if name_or_id in self.nets:
+ return self.nets[name_or_id]
+ for net in self.nets.values():
+ if net.name == name_or_id:
+ return net
+
+ return None
+
+ def create_network(self, name, stack_operation=False):
+ """
+ Creates a new network with the given name. Raises an exception when a network with the given name already
+ exists!
+
+ :param name: Name of the new network.
+ :type name: ``str``
+ :param stack_operation: Allows the heat parser to create modules without adapting the current emulation.
+ :type stack_operation: ``bool``
+ :return: :class:`heat.resources.net`
+ """
+ logging.debug("Creating network with name %s" % name)
+ if self.find_network_by_name_or_id(name) is not None and not stack_operation:
+ logging.warning("Creating network with name %s failed, as it already exists" % name)
+ raise Exception("Network with name %s already exists." % name)
+ network = Net(name)
+ network.id = str(uuid.uuid4())
+ if not stack_operation:
+ self.nets[network.id] = network
+ return network
+
+ def delete_network(self, name_or_id):
+ """
+ Deletes the given network.
+
+ :param name_or_id: Name or UUID of the network.
+ :type name_or_id: ``str``
+ """
+ net = self.find_network_by_name_or_id(name_or_id)
+ if net is None:
+ raise Exception("Network with name or id %s does not exists." % name_or_id)
+
+ for stack in self.stacks.values():
+ stack.nets.pop(net.name, None)
+
+ self.nets.pop(net.id, None)
+
+ def create_port(self, name, stack_operation=False):
+ """
+ Creates a new port with the given name. Raises an exception when a port with the given name already
+ exists!
+
+ :param name: Name of the new port.
+ :type name: ``str``
+ :param stack_operation: Allows the heat parser to create modules without adapting the current emulation.
+ :type stack_operation: ``bool``
+ :return: Returns the created port.
+ :rtype: :class:`heat.resources.port`
+ """
+ port = self.find_port_by_name_or_id(name)
+ if port is not None and not stack_operation:
+ logging.warning("Creating port with name %s failed, as it already exists" % name)
+ raise Exception("Port with name %s already exists." % name)
+ logging.debug("Creating port with name %s" % name)
+ port = Port(name)
+ if not stack_operation:
+ self.ports[port.id] = port
+ port.create_intf_name()
+ return port
+
+ def find_port_by_name_or_id(self, name_or_id):
+ """
+ Tries to find the port by ID and if this does not succeed then tries to find it via name.
+
+ :param name_or_id: UUID or name of the network.
+ :type name_or_id: ``str``
+ :return: Returns the port reference if it was found or None
+ :rtype: :class:`heat.resources.port`
+ """
+ if name_or_id in self.ports:
+ return self.ports[name_or_id]
+ for port in self.ports.values():
+ if port.name == name_or_id or port.template_name == name_or_id:
+ return port
+
+ return None
+
+ def delete_port(self, name_or_id):
+ """
+ Deletes the given port. Raises an exception when the port was not found!
+
+ :param name_or_id: UUID or name of the port.
+ :type name_or_id: ``str``
+ """
+ port = self.find_port_by_name_or_id(name_or_id)
+ if port is None:
+ raise Exception("Port with name or id %s does not exists." % name_or_id)
+
+ my_links = self.dc.net.links
+ for link in my_links:
+ if str(link.intf1) == port.intf_name and \
+ str(link.intf1.ip) == port.ip_address.split('/')[0]:
+ self._remove_link(link.intf1.node.name, link)
+ break
+
+ self.ports.pop(port.id, None)
+ for stack in self.stacks.values():
+ stack.ports.pop(port.name, None)
+
+ def _add_link(self, node_name, ip_address, link_name, net_name):
+ """
+ Adds a new link between datacenter switch and the node with the given name.
+
+ :param node_name: Name of the required node.
+ :type node_name: ``str``
+ :param ip_address: IP-Address of the node.
+ :type ip_address: ``str``
+ :param link_name: Link name.
+ :type link_name: ``str``
+ :param net_name: Network name.
+ :type net_name: ``str``
+ """
+ node = self.dc.net.get(node_name)
+ params = {'params1': {'ip': ip_address,
+ 'id': link_name,
+ link_name: net_name},
+ 'intfName1': link_name,
+ 'cls': Link}
+ link = self.dc.net.addLink(node, self.dc.switch, **params)
+ OpenstackCompute.timeout_sleep(link.intf1.isUp, 1)
+
+ def _remove_link(self, server_name, link):
+ """
+ Removes a link between server and datacenter switch.
+
+ :param server_name: Specifies the server where the link starts.
+ :type server_name: ``str``
+ :param link: A reference of the link which should be removed.
+ :type link: :class:`mininet.link`
+ """
+ self.dc.switch.detach(link.intf2)
+ del self.dc.switch.intfs[self.dc.switch.ports[link.intf2]]
+ del self.dc.switch.ports[link.intf2]
+ del self.dc.switch.nameToIntf[link.intf2.name]
+ self.dc.net.removeLink(link=link)
+ self.dc.net.DCNetwork_graph.remove_edge(server_name, self.dc.switch.name)
+ self.dc.net.DCNetwork_graph.remove_edge(self.dc.switch.name, server_name)
+ for intf_key in self.dc.net[server_name].intfs.keys():
+ if self.dc.net[server_name].intfs[intf_key].link == link:
+ self.dc.net[server_name].intfs[intf_key].delete()
+ del self.dc.net[server_name].intfs[intf_key]
+
+ @staticmethod
+ def timeout_sleep(function, max_sleep):
+ """
+ This function will execute a function all 0.1 seconds until it successfully returns.
+ Will return after `max_sleep` seconds if not successful.
+
+ :param function: The function to execute. Should return true if done.
+ :type function: ``function``
+ :param max_sleep: Max seconds to sleep. 1 equals 1 second.
+ :type max_sleep: ``float``
+ """
+ current_time = time.time()
+ stop_time = current_time + max_sleep
+ while not function() and current_time < stop_time:
+ current_time = time.time()
+ time.sleep(0.1)
--- /dev/null
+from docker import DockerClient, APIClient
+import time
+import re
+
+
+def docker_container_id(container_name):
+ """
+ Uses the container name to return the container ID.
+
+ :param container_name: The full name of the docker container.
+ :type container_name: ``str``
+ :return: Returns the container ID or None if the container is not running or could not be found.
+ :rtype: ``dict``
+ """
+ c = APIClient()
+ detail = c.inspect_container(container_name)
+ if bool(detail["State"]["Running"]):
+ return detail['Id']
+ return None
+
+
+def docker_abs_cpu(container_id):
+ """
+ Returns the used CPU time since container startup and the system time in nanoseconds and returns the number
+ of available CPU cores.
+
+ :param container_id: The full ID of the docker container.
+ :type container_id: ``str``
+ :return: Returns a dict with CPU_used in nanoseconds, the current system time in nanoseconds and the number of
+ CPU cores available.
+ :rtype: ``dict``
+ """
+ with open('/sys/fs/cgroup/cpuacct/docker/' + container_id + '/cpuacct.usage_percpu', 'r') as f:
+ line = f.readline()
+ sys_time = int(time.time() * 1000000000)
+ numbers = [int(x) for x in line.split()]
+ cpu_usage = 0
+ for number in numbers:
+ cpu_usage += number
+ return {'CPU_used': cpu_usage, 'CPU_used_systime': sys_time, 'CPU_cores': len(numbers)}
+
+
+def docker_mem_used(container_id):
+ """
+ Bytes of memory used from the docker container.
+
+ Note: If you have problems with this command you have to enable memory control group.
+ For this you have to add the following kernel parameters: `cgroup_enable=memory swapaccount=1`.
+ See: https://docs.docker.com/engine/admin/runmetrics/
+
+ :param container_id: The full ID of the docker container.
+ :type container_id: ``str``
+ :return: Returns the memory utilization in bytes.
+ :rtype: ``str``
+ """
+ with open('/sys/fs/cgroup/memory/docker/' + container_id + '/memory.usage_in_bytes', 'r') as f:
+ return int(f.readline())
+
+
+def docker_max_mem(container_id):
+ """
+ Bytes of memory the docker container could use.
+
+ :param container_id: The full ID of the docker container.
+ :type container_id: ``str``
+ :return: Returns the bytes of memory the docker container could use.
+ :rtype: ``str``
+ """
+ with open('/sys/fs/cgroup/memory/docker/' + container_id + '/memory.limit_in_bytes', 'r') as f:
+ mem_limit = int(f.readline())
+ with open('/proc/meminfo', 'r') as f:
+ line = f.readline().split()
+ sys_value = int(line[1])
+ unit = line[2]
+ if unit == 'kB':
+ sys_value *= 1024
+ if unit == 'MB':
+ sys_value *= 1024 * 1024
+
+ if sys_value < mem_limit:
+ return sys_value
+ else:
+ return mem_limit
+
+
+def docker_mem(container_id):
+ """
+ Calculates the current, maximal and percentage usage of the specified docker container.
+
+ :param container_id: The full ID of the docker container.
+ :type container_id: ``str``
+ :return: Returns a dictionary with the total memory usage, the maximal available memory and the percentage
+ memory usage.
+ :rtype: ``dict``
+ """
+ out_dict = dict()
+ out_dict['MEM_used'] = docker_mem_used(container_id)
+ out_dict['MEM_limit'] = docker_max_mem(container_id)
+ out_dict['MEM_%'] = float(out_dict['MEM_used']) / float(out_dict['MEM_limit'])
+ return out_dict
+
+
+def docker_abs_net_io(container_id):
+ """
+ Network traffic of all network interfaces within the controller.
+
+ :param container_id: The full ID of the docker container.
+ :type container_id: ``str``
+ :return: Returns the absolute network I/O till container startup, in bytes. The return dict also contains the
+ system time.
+ :rtype: ``dict``
+ """
+ c = APIClient()
+ command = c.exec_create(container_id, 'ifconfig')
+ ifconfig = c.exec_start(command['Id'])
+ sys_time = int(time.time() * 1000000000)
+
+ in_bytes = 0
+ m = re.findall('RX bytes:(\d+)', str(ifconfig))
+ if m:
+ for number in m:
+ in_bytes += int(number)
+ else:
+ in_bytes = None
+
+ out_bytes = 0
+ m = re.findall('TX bytes:(\d+)', str(ifconfig))
+ if m:
+ for number in m:
+ out_bytes += int(number)
+ else:
+ out_bytes = None
+
+ return {'NET_in': in_bytes, 'NET_out': out_bytes, 'NET_systime': sys_time}
+
+
+def docker_block_rw(container_id):
+ """
+ Determines the disk read and write access from the controller since startup.
+
+ :param container_id: The full ID of the docker container.
+ :type container_id: ``str``
+ :return: Returns a dictionary with the total disc I/O since container startup, in bytes.
+ :rtype: ``dict``
+ """
+ with open('/sys/fs/cgroup/blkio/docker/' + container_id + '/blkio.throttle.io_service_bytes', 'r') as f:
+ read = f.readline().split()
+ write = f.readline().split()
+ rw_dict = dict()
+ rw_dict['BLOCK_systime'] = int(time.time() * 1000000000)
+ if len(read) < 3:
+ rw_dict['BLOCK_read'] = 0
+ else:
+ rw_dict['BLOCK_read'] = read[2]
+ if len(write) < 3:
+ rw_dict['BLOCK_write'] = 0
+ else:
+ rw_dict['BLOCK_write'] = write[2]
+ return rw_dict
+
+
+def docker_PIDS(container_id):
+ """
+ Determines the number of processes within the docker container.
+
+ :param container_id: The full ID of the docker container.
+ :type container_id: ``str``
+ :return: Returns the number of PIDS within a dictionary.
+ :rtype: ``dict``
+ """
+ with open('/sys/fs/cgroup/cpuacct/docker/' + container_id + '/tasks', 'r') as f:
+ return {'PIDS': len(f.read().split('\n')) - 1}
+
+
+def monitoring_over_time(container_id):
+ """
+ Calculates the cpu workload and the network traffic per second.
+
+ :param container_id: The full docker container ID
+ :type container_id: ``str``
+ :return: A dictionary with disk read and write per second, network traffic per second (in and out),
+ the cpu workload and the number of cpu cores available.
+ :rtype: ``dict``
+ """
+ first_cpu_usage = docker_abs_cpu(container_id)
+ first = docker_abs_net_io(container_id)
+ first_disk_io = docker_block_rw(container_id)
+ time.sleep(1)
+ second_cpu_usage = docker_abs_cpu(container_id)
+ second = docker_abs_net_io(container_id)
+ second_disk_io = docker_block_rw(container_id)
+
+ # Disk access
+ time_div = (int(second_disk_io['BLOCK_systime']) - int(first_disk_io['BLOCK_systime']))
+ read_div = int(second_disk_io['BLOCK_read']) - int(first_disk_io['BLOCK_read'])
+ write_div = int(second_disk_io['BLOCK_write']) - int(first_disk_io['BLOCK_write'])
+ out_dict = {'BLOCK_read/s': int(read_div * 1000000000 / float(time_div) + 0.5),
+ 'BLOCK_write/s': int(write_div * 1000000000 / float(time_div) + 0.5)}
+
+ # Network traffic
+ time_div = (int(second['NET_systime']) - int(first['NET_systime']))
+ in_div = int(second['NET_in']) - int(first['NET_in'])
+ out_div = int(second['NET_out']) - int(first['NET_out'])
+ out_dict.update({'NET_in/s': int(in_div * 1000000000 / float(time_div) + 0.5),
+ 'NET_out/s': int(out_div * 1000000000 / float(time_div) + 0.5)})
+
+ # CPU utilization
+ time_div = (int(second_cpu_usage['CPU_used_systime']) - int(first_cpu_usage['CPU_used_systime']))
+ usage_div = int(second_cpu_usage['CPU_used']) - int(first_cpu_usage['CPU_used'])
+ out_dict.update({'CPU_%': usage_div / float(time_div), 'CPU_cores': first_cpu_usage['CPU_cores']})
+ return out_dict
--- /dev/null
+from __future__ import print_function # TODO remove when print is no longer needed for debugging
+from resources import *
+from datetime import datetime
+import re
+import sys
+import uuid
+import logging
+import ip_handler as IP
+
+
+class HeatParser:
+ """
+ The HeatParser will parse a heat dictionary and create a stack and its components, to instantiate it within son-emu.
+ """
+
+ def __init__(self, compute):
+ self.description = None
+ self.parameter_groups = None
+ self.parameters = None
+ self.resources = None
+ self.outputs = None
+ self.compute = compute
+ self.bufferResource = list()
+
+ def parse_input(self, input_dict, stack, dc_label, stack_update=False):
+ """
+ It will parse the input dictionary into the corresponding classes, which are then stored within the stack.
+
+ :param input_dict: Dictionary with the template version and resources.
+ :type input_dict: ``dict``
+ :param stack: Reference of the stack that should finally contain all created classes.
+ :type stack: :class:`heat.resources.stack`
+ :param dc_label: String that contains the label of the used data center.
+ :type dc_label: ``str``
+ :param stack_update: Specifies if a new stack will be created or a older one will be updated
+ :type stack_update: ``bool``
+ :return: * *True*: If the template version is supported and all resources could be created.
+ * *False*: Else
+ :rtype: ``bool``
+ """
+ if not self.check_template_version(str(input_dict['heat_template_version'])):
+ print('Unsupported template version: ' + input_dict['heat_template_version'], file=sys.stderr)
+ return False
+
+ self.description = input_dict.get('description', None)
+ self.parameter_groups = input_dict.get('parameter_groups', None)
+ self.parameters = input_dict.get('parameters', None)
+ self.resources = input_dict.get('resources', None)
+ self.outputs = input_dict.get('outputs', None)
+ # clear bufferResources
+ self.bufferResource = list()
+
+ for resource in self.resources.values():
+ self.handle_resource(resource, stack, dc_label, stack_update=stack_update)
+
+ # This loop tries to create all classes which had unresolved dependencies.
+ unresolved_resources_last_round = len(self.bufferResource) + 1
+ while len(self.bufferResource) > 0 and unresolved_resources_last_round > len(self.bufferResource):
+ unresolved_resources_last_round = len(self.bufferResource)
+ number_of_items = len(self.bufferResource)
+ while number_of_items > 0:
+ self.handle_resource(self.bufferResource.pop(0), stack, dc_label, stack_update=stack_update)
+ number_of_items -= 1
+
+ if len(self.bufferResource) > 0:
+ print(str(len(self.bufferResource)) +
+ ' classes could not be created, because the dependencies could not be found.')
+ return False
+ return True
+
+ def handle_resource(self, resource, stack, dc_label, stack_update=False):
+ """
+ This function will take a resource (from a heat template) and determines which type it is and creates
+ the corresponding class, with its required parameters, for further calculations (like deploying the stack).
+ If it is not possible to create the class, because of unresolved dependencies, it will buffer the resource
+ within the 'self.bufferResource' list.
+
+ :param resource: Dict which contains all important informations about the type and parameters.
+ :type resource: ``dict``
+ :param stack: Reference of the stack that should finally contain the created class.
+ :type stack: :class:`heat.resources.stack`
+ :param dc_label: String that contains the label of the used data center
+ :type dc_label: ``str``
+ :param stack_update: Specifies if a new stack will be created or a older one will be updated
+ :type stack_update: ``bool``
+ :return: void
+ :rtype: ``None``
+ """
+ if "OS::Neutron::Net" in resource['type']:
+ try:
+ net_name = resource['properties']['name']
+ if net_name not in stack.nets:
+ stack.nets[net_name] = self.compute.create_network(net_name, True)
+
+ except Exception as e:
+ logging.warning('Could not create Net: ' + e.message)
+ return
+
+ if 'OS::Neutron::Subnet' in resource['type'] and "Net" not in resource['type']:
+ try:
+ net_name = resource['properties']['network']['get_resource']
+ if net_name not in stack.nets:
+ net = self.compute.create_network(net_name, stack_update)
+ stack.nets[net_name] = net
+ else:
+ net = stack.nets[net_name]
+
+ net.subnet_name = resource['properties']['name']
+ if 'gateway_ip' in resource['properties']:
+ net.gateway_ip = resource['properties']['gateway_ip']
+ net.subnet_id = resource['properties'].get('id', str(uuid.uuid4()))
+ net.subnet_creation_time = str(datetime.now())
+ if not stack_update:
+ net.set_cidr(IP.get_new_cidr(net.subnet_id))
+ except Exception as e:
+ logging.warning('Could not create Subnet: ' + e.message)
+ return
+
+ if 'OS::Neutron::Port' in resource['type']:
+ try:
+ port_name = resource['properties']['name']
+ if port_name not in stack.ports:
+ port = self.compute.create_port(port_name, stack_update)
+ stack.ports[port_name] = port
+ else:
+ port = stack.ports[port_name]
+
+ if resource['properties']['network']['get_resource'] in stack.nets:
+ net = stack.nets[resource['properties']['network']['get_resource']]
+ if net.subnet_id is not None:
+ port.net_name = net.name
+ port.ip_address = net.get_new_ip_address(port.name)
+ return
+ except Exception as e:
+ logging.warning('Could not create Port: ' + e.message)
+ self.bufferResource.append(resource)
+ return
+
+ if 'OS::Nova::Server' in resource['type']:
+ try:
+ compute_name = str(dc_label) + '_' + str(stack.stack_name) + '_' + str(resource['properties']['name'])
+ shortened_name = str(dc_label) + '_' + str(stack.stack_name) + '_' + \
+ self.shorten_server_name(str(resource['properties']['name']), stack)
+ nw_list = resource['properties']['networks']
+
+ if shortened_name not in stack.servers:
+ server = self.compute.create_server(shortened_name, stack_update)
+ stack.servers[shortened_name] = server
+ else:
+ server = stack.servers[shortened_name]
+
+ server.full_name = compute_name
+ server.template_name = str(resource['properties']['name'])
+ server.command = resource['properties'].get('command', '/bin/sh')
+ server.image = resource['properties']['image']
+ server.flavor = resource['properties']['flavor']
+
+ for port in nw_list:
+ port_name = port['port']['get_resource']
+ # just create a port
+ # we don't know which network it belongs to yet, but the resource will appear later in a valid
+ # template
+ if port_name not in stack.ports:
+ stack.ports[port_name] = self.compute.create_port(port_name, stack_update)
+ server.port_names.append(port_name)
+ return
+ except Exception as e:
+ logging.warning('Could not create Server: ' + e.message)
+ return
+
+ if 'OS::Neutron::RouterInterface' in resource['type']:
+ try:
+ router_name = None
+ subnet_name = resource['properties']['subnet']['get_resource']
+
+ if 'get_resource' in resource['properties']['router']:
+ router_name = resource['properties']['router']['get_resource']
+ else:
+ router_name = resource['properties']['router']
+
+ if router_name not in stack.routers:
+ stack.routers[router_name] = Router(router_name)
+
+ for tmp_net in stack.nets.values():
+ if tmp_net.subnet_name == subnet_name:
+ stack.routers[router_name].add_subnet(subnet_name)
+ return
+ except Exception as e:
+ logging.warning('Could not create RouterInterface: ' + e.__repr__())
+ self.bufferResource.append(resource)
+ return
+
+ if 'OS::Neutron::FloatingIP' in resource['type']:
+ try:
+ port_name = resource['properties']['port_id']['get_resource']
+ floating_network_id = resource['properties']['floating_network_id']
+ if port_name not in stack.ports:
+ stack.ports[port_name] = self.compute.create_port(port_name, stack_update)
+
+ stack.ports[port_name].floating_ip = floating_network_id
+ except Exception as e:
+ logging.warning('Could not create FloatingIP: ' + e.message)
+ return
+
+ if 'OS::Neutron::Router' in resource['type']:
+ try:
+ name = resource['properties']['name']
+ if name not in stack.routers:
+ stack.routers[name] = Router(name)
+ except Exception as e:
+ print('Could not create Router: ' + e.message)
+ return
+
+ logging.warning('Could not determine resource type!')
+ return
+
+ def shorten_server_name(self, server_name, stack):
+ """
+ Shortens the server name to a maximum of 12 characters plus the iterator string, if the original name was
+ used before.
+
+ :param server_name: The original server name.
+ :type server_name: ``str``
+ :param stack: A reference to the used stack.
+ :type stack: :class:`heat.resources.stack`
+ :return: A string with max. 12 characters plus iterator string.
+ :rtype: ``str``
+ """
+ server_name = self.shorten_name(server_name, 12)
+ iterator = 0
+ while server_name in stack.servers:
+ server_name = server_name[0:12] + str(iterator)
+ iterator += 1
+ return server_name
+
+ def shorten_name(self, name, max_size):
+ """
+ Shortens the name to max_size characters and replaces all '-' with '_'.
+
+ :param name: The original string.
+ :type name: ``str``
+ :param max_size: The number of allowed characters.
+ :type max_size: ``int``
+ :return: String with at most max_size characters and without '-'.
+ :rtype: ``str``
+ """
+ shortened_name = name.split(':', 1)[0]
+ shortened_name = shortened_name.replace("-", "_")
+ shortened_name = shortened_name[0:max_size]
+ return shortened_name
+
+ def check_template_version(self, version_string):
+ """
+ Checks if a version string is equal or later than 30-04-2015
+
+ :param version_string: String with the version.
+ :type version_string: ``str``
+ :return: * *True*: if the version is equal or later 30-04-2015.
+ * *False*: else
+ :rtype: ``bool``
+ """
+ r = re.compile('\d{4}-\d{2}-\d{2}')
+ if not r.match(version_string):
+ return False
+
+ year, month, day = map(int, version_string.split('-', 2))
+ if year < 2015:
+ return False
+ if year == 2015:
+ if month < 04:
+ return False
+ if month == 04 and day < 30:
+ return False
+ return True
--- /dev/null
+from resources.net import Net
+import threading
+
+lock = threading.Lock()
+
+__issued_ips = dict()
+__default_subnet_size = 256
+__default_subnet_bitmask = 24
+__first_ip = Net.ip_2_int('10.0.0.0')
+__last_ip = Net.ip_2_int('10.255.255.255')
+__current_ip = __first_ip
+
+
+def get_new_cidr(uuid):
+ """
+ Calculates a unused cidr for a subnet.
+
+ :param uuid: The UUID of the subnet - Thus it can store which subnet gets which CIDR
+ :type uuid: ``str``
+ :return: Returns None if all available CIDR are used. Otherwise returns a valid CIDR.
+ :rtype: ``str``
+ """
+ global lock
+ lock.acquire()
+
+ global __current_ip
+ while __first_ip <= __current_ip < __last_ip and __current_ip in __issued_ips:
+ __current_ip += __default_subnet_size
+
+ if __current_ip >= __last_ip or __current_ip < __first_ip or __current_ip in __issued_ips:
+ return None
+
+ __issued_ips[__current_ip] = uuid
+ lock.release()
+
+ return Net.int_2_ip(__current_ip) + '/' + str(__default_subnet_bitmask)
+
+
+def free_cidr(cidr, uuid):
+ """
+ Frees a issued CIDR thus it can be reused.
+
+ :param cidr: The currently used CIDR.
+ :type cidr: ``str``
+ :param uuid: The UUID of the Subnet, which uses this CIDR.
+ :type uuid: ``str``
+ :return: Returns False if the CIDR is None or the UUID did not correspond tho the used CIDR. Else it returns True.
+ :rtype: ``bool``
+ """
+ if cidr is None:
+ return False
+
+ global __current_ip
+ int_ip = Net.cidr_2_int(cidr)
+
+ global lock
+ lock.acquire()
+
+ if int_ip in __issued_ips and __issued_ips[int_ip] == uuid:
+ del __issued_ips[int_ip]
+ if int_ip < __current_ip:
+ __current_ip = int_ip
+ lock.release()
+ return True
+ lock.release()
+ return False
+
+
+def is_cidr_issued(cidr):
+ """
+ Returns True if the CIDR is used.
+
+ :param cidr: The requested CIDR.
+ :type cidr: ``str``
+ :return: Returns True if the CIDR is used, else False.
+ :rtype: ``bool``
+ """
+ if cidr is None:
+ return False
+
+ int_ip = Net.cidr_2_int(cidr)
+
+ if int_ip in __issued_ips:
+ return True
+ return False
+
+
+def is_my_cidr(cidr, uuid):
+ """
+ Checks if the UUID and the used CIDR are related.
+
+ :param cidr: The issued CIDR.
+ :type cidr: ``str``
+ :param uuid: The Subnet UUID.
+ :type uuid: ``str``
+ :return: Returns False if the CIDR is None or if the CIDR is not issued. Else returns True.
+ :rtype: ``bool``
+ """
+ if cidr is None:
+ return False
+
+ int_ip = Net.cidr_2_int(cidr)
+
+ if not int_ip in __issued_ips:
+ return False
+
+ if __issued_ips[int_ip] == uuid:
+ return True
+ return False
+
+
+def assign_cidr(cidr, uuid):
+ """
+ Allows a subnet to request a specific CIDR.
+
+ :param cidr: The requested CIDR.
+ :type cidr: ``str``
+ :param uuid: The Subnet UUID.
+ :type uuid: ``str``
+ :return: Returns False if the CIDR is None or if the CIDR is already issued. Returns True if the CIDR could be
+ assigned to the UUID.
+ """
+ if cidr is None:
+ return False
+
+ int_ip = Net.cidr_2_int(cidr)
+
+ if int_ip in __issued_ips:
+ return False
+
+ global lock
+ lock.acquire()
+ __issued_ips[int_ip] = uuid
+ lock.release()
+ return True
--- /dev/null
+"""Openstack manage component of PG Sandman.
+
+.. module:: manage
+ :synopsis: Module containing the OpenstackManage class.
+.. moduleauthor: PG Sandman
+
+"""
+
+import logging
+import threading
+import uuid
+import networkx as nx
+import chain_api
+import json
+import random
+from emuvim.api.openstack.resources import Net, Port
+from mininet.node import OVSSwitch, RemoteController, Node
+from emuvim.api.openstack.monitor_api import MonitorDummyApi
+
+
+class OpenstackManage(object):
+ """
+ OpenstackManage is a singleton and management component for the emulator.
+ It is the brain of the Openstack component and manages everything that is not datacenter specific like
+ network chains or load balancers.
+ """
+ __instance = None
+
+ def __new__(cls):
+ if OpenstackManage.__instance is None:
+ OpenstackManage.__instance = object.__new__(cls)
+ return OpenstackManage.__instance
+
+ def __init__(self, ip="0.0.0.0", port=4000):
+ # we are a singleton, only initialize once!
+ self.lock = threading.Lock()
+ with self.lock:
+ if hasattr(self, "init"):
+ return
+ self.init = True
+
+ self.endpoints = dict()
+ self.cookies = set()
+ self.cookies.add(0)
+ self.ip = ip
+ self.port = port
+ self._net = None
+ # to keep track which src_vnf(input port on the switch) handles a load balancer
+ self.lb_flow_cookies = dict()
+ self.chain_flow_cookies = dict()
+
+ # for the visualization also store the complete chain data incl. paths
+ self.full_chain_data = dict()
+ self.full_lb_data = dict()
+
+ # flow groups could be handled for each switch separately, but this global group counter should be easier to
+ # debug and to maintain
+ self.flow_groups = dict()
+
+ # we want one global chain api. this should not be datacenter dependent!
+ self.chain = chain_api.ChainApi(ip, port, self)
+ self.thread = threading.Thread(target=self.chain._start_flask, args=())
+ self.thread.daemon = True
+ self.thread.name = self.chain.__class__
+ self.thread.start()
+
+ self.monitoring = MonitorDummyApi(self.ip, 3000)
+ self.thread = threading.Thread(target=self.monitoring._start_flask, args=())
+ self.thread.daemon = True
+ self.thread.name = self.monitoring.__class__
+ self.thread.start()
+
+ # floating ip network setup
+ self.floating_switch = None
+ self.floating_network = None
+ self.floating_netmask = "192.168.100.0/24"
+ self.floating_nodes = dict()
+ self.floating_cookies = dict()
+ self.floating_intf = None
+ self.floating_links = dict()
+
+ @property
+ def net(self):
+ return self._net
+
+ @net.setter
+ def net(self, value):
+ if self._net is None:
+ self._net = value
+ self.init_floating_network()
+ self._net = value
+
+ def init_floating_network(self):
+ """
+ Initialize the floating network component for the emulator.
+ Will not do anything if already initialized.
+ """
+ if self.net is not None and self.floating_switch is None:
+ # create a floating network
+ fn = self.floating_network = Net("default")
+ fn.id = str(uuid.uuid4())
+ fn.set_cidr(self.floating_netmask)
+
+ # create a subnet
+ fn.subnet_id = str(uuid.uuid4())
+ fn.subnet_name = fn.name + "-sub"
+
+ # create a port for the host
+ port = Port("root-port")
+ #port.id = str(uuid.uuid4())
+ port.net_name = fn.name
+
+ # get next free ip
+ root_ip = fn.get_new_ip_address(port.name)
+ port.ip_address = root_ip
+ # floating ip network setup
+ # wierd way of getting a datacenter object
+ first_dc = self.net.dcs.values()[0]
+ # set a dpid for the switch. for this we have to get the id of the next possible dc
+ self.floating_switch = self.net.addSwitch("fs1", dpid=hex(first_dc._get_next_dc_dpid())[2:])
+ # this is the interface appearing on the physical host
+ self.floating_root = Node('root', inNamespace=False)
+ self.net.hosts.append(self.floating_root)
+ self.net.nameToNode['root'] = self.floating_root
+ self.floating_intf = self.net.addLink(self.floating_root, self.floating_switch).intf1
+ self.floating_root.setIP(root_ip, intf=self.floating_intf)
+ self.floating_nodes[(self.floating_root.name, root_ip)] = self.floating_root
+
+
+ def stop_floating_network(self):
+ self._net = None
+ self.floating_switch = None
+
+ def add_endpoint(self, ep):
+ """
+ Registers an openstack endpoint with manage
+
+ :param ep: Openstack API endpoint
+ :type ep: :class:`heat.openstack_api_endpoint`
+ """
+ key = "%s:%s" % (ep.ip, ep.port)
+ self.endpoints[key] = ep
+
+ def get_cookie(self):
+ """
+ Get an unused cookie.
+
+ :return: Cookie
+ :rtype: ``int``
+ """
+ cookie = int(max(self.cookies) + 1)
+ self.cookies.add(cookie)
+ return cookie
+
+ def get_flow_group(self, src_vnf_name, src_vnf_interface):
+ """
+ Gets free group that is not currently used by any other flow for the specified interface / VNF.
+
+ :param src_vnf_name: Source VNF name
+ :type src_vnf_name: ``str``
+ :param src_vnf_interface: Source VNF interface name
+ :type src_vnf_interface: ``str``
+ :return: Flow group identifier.
+ :rtype: ``int``
+ """
+ if (src_vnf_name, src_vnf_interface) not in self.flow_groups:
+ grp = int(len(self.flow_groups) + 1)
+ self.flow_groups[(src_vnf_name, src_vnf_interface)] = grp
+ else:
+ grp = self.flow_groups[(src_vnf_name, src_vnf_interface)]
+ return grp
+
+ def check_vnf_intf_pair(self, vnf_name, vnf_intf_name):
+ """
+ Checks if a VNF exists and has the given interface
+
+ :param vnf_name: Name of the VNF to be checked
+ :type vnf_name: ``str``
+ :param vnf_intf_name: Name of the interface that belongst to the VNF
+ :type vnf_intf_name: ``str``
+ :return: ``True`` if it is valid pair, else ``False``
+ :rtype: ``bool``
+ """
+
+ if vnf_name in self.net:
+ vnf = self.net.getNodeByName(vnf_name)
+ return vnf_intf_name in vnf.nameToIntf
+
+ def network_action_start(self, vnf_src_name, vnf_dst_name, **kwargs):
+ """
+ Starts a network chain for a source destination pair
+
+ :param vnf_src_name: Name of the source VNF
+ :type vnf_src_name: ``str``
+ :param vnf_dst_name: Name of the source VNF interface
+ :type vnf_dst_name: ``str``
+ :param \**kwargs: See below
+
+ :Keyword Arguments:
+ * *vnf_src_interface* (``str``): Name of source interface.
+ * *vnf_dst_interface* (``str``): Name of destination interface.
+ * *weight* (``int``): This value is fed into the shortest path computation if no path is specified.
+ * *match* (``str``): A custom match entry for the openflow flow rules. Only vlanid or port possible.
+ * *bidirectional* (``bool``): If set the chain will be set in both directions, else it will just set up \
+ from source to destination.
+ * *cookie* (``int``): Cookie value used by openflow. Used to identify the flows in the switches to be \
+ able to modify the correct flows.
+ * *no_route* (``bool``): If set a layer 3 route to the target interface will not be set up.
+ :return: The cookie chosen for the flow.
+ :rtype: ``int``
+ """
+ try:
+ vnf_src_interface = kwargs.get('vnf_src_interface')
+ vnf_dst_interface = kwargs.get('vnf_dst_interface')
+ layer2 = kwargs.get('layer2', True)
+ match = kwargs.get('match')
+ flow = (vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
+ if flow in self.chain_flow_cookies:
+ raise Exception("There is already a chain at the specified src/dst pair!")
+ # set up a layer 2 chain, this allows multiple chains for the same interface
+ src_node = self.net.getNodeByName(vnf_src_name)
+ dst_node = self.net.getNodeByName(vnf_dst_name)
+ dst_intf = dst_node.intf(vnf_dst_interface)
+ if layer2:
+ switch, inport = self._get_connected_switch_data(vnf_src_name, vnf_src_interface)
+ self.setup_arp_reply_at(switch, inport, dst_intf.IP(), dst_intf.MAC())
+ if isinstance(match, str):
+ match += ",dl_dst=%s" % dst_intf.MAC()
+ else:
+ match = "dl_dst=%s" % dst_intf.MAC()
+
+ cookie = kwargs.get('cookie', self.get_cookie())
+ self.cookies.add(cookie)
+ c = self.net.setChain(
+ vnf_src_name, vnf_dst_name,
+ vnf_src_interface=vnf_src_interface,
+ vnf_dst_interface=vnf_dst_interface,
+ cmd='add-flow',
+ weight=kwargs.get('weight'),
+ match=match,
+ bidirectional=False,
+ cookie=cookie,
+ path=kwargs.get('path'))
+
+ # to keep this logic seperate of the core son-emu do the housekeeping here
+ data = dict()
+ data["src_vnf"] = vnf_src_name
+ data["src_intf"] = vnf_src_interface
+ data["dst_vnf"] = vnf_dst_name
+ data["dst_intf"] = vnf_dst_interface
+ data["cookie"] = cookie
+ data["layer2"] = layer2
+ if kwargs.get('path') is not None:
+ data["path"] = kwargs.get('path')
+ else:
+ data["path"] = self._get_path(vnf_src_name, vnf_dst_name, vnf_src_interface,
+ vnf_dst_interface)[0]
+
+ # add route to dst ip to this interface
+ # this might block on containers that are still setting up, so start a new thread
+ if not kwargs.get('no_route'):
+ # son_emu does not like concurrent commands for a container so we need to lock this if multiple chains
+ # on the same interface are created
+ src_node.setHostRoute(dst_node.intf(vnf_dst_interface).IP(), vnf_src_interface)
+
+ try:
+ son_emu_data = json.loads(self.get_son_emu_chain_data(vnf_src_name))
+ except:
+ son_emu_data = dict()
+ if "son_emu_data" not in son_emu_data:
+ son_emu_data["son_emu_data"] = dict()
+ if "interfaces" not in son_emu_data["son_emu_data"]:
+ son_emu_data["son_emu_data"]["interfaces"] = dict()
+ if vnf_src_interface not in son_emu_data["son_emu_data"]["interfaces"]:
+ son_emu_data["son_emu_data"]["interfaces"][vnf_src_interface] = list()
+ son_emu_data["son_emu_data"]["interfaces"][vnf_src_interface].append(dst_intf.IP())
+
+ self.set_son_emu_chain_data(vnf_src_name, son_emu_data)
+
+ if kwargs.get('bidirectional', False):
+ # call the reverse direction
+ path = kwargs.get('path')
+ if path is not None:
+ path = list(reversed(path))
+ self.network_action_start(vnf_dst_name, vnf_src_name, vnf_src_interface=vnf_dst_interface,
+ vnf_dst_interface=vnf_src_interface, bidirectional=False,
+ layer2=kwargs.get('layer2', False), path=path,
+ no_route=kwargs.get('no_route'))
+
+ self.full_chain_data[flow] = data
+ self.chain_flow_cookies[flow] = cookie
+ return cookie
+ except Exception as ex:
+ logging.exception("RPC error.")
+ raise Exception(ex.message)
+
+ def network_action_stop(self, vnf_src_name, vnf_dst_name, **kwargs):
+ """
+ Starts a network chain for a source destination pair
+
+ :param vnf_src_name: Name of the source VNF
+ :type vnf_src_name: ``str``
+ :param vnf_dst_name: Name of the source VNF interface
+ :type vnf_dst_name: ``str``
+ :param \**kwargs: See below
+
+ :Keyword Arguments:
+ * *vnf_src_interface* (``str``): Name of source interface.
+ * *vnf_dst_interface* (``str``): Name of destination interface.
+ * *bidirectional* (``bool``): If set the chain will be torn down in both directions, else it will just\
+ be torn down from source to destination.
+ * *cookie* (``int``): Cookie value used by openflow. Used to identify the flows in the switches to be \
+ able to modify the correct flows.
+ """
+ try:
+ if 'cookie' in kwargs:
+ return self.delete_flow_by_cookie(kwargs.get('cookie'))
+
+ if kwargs.get('bidirectional', False):
+ self.delete_chain_by_intf(vnf_dst_name, kwargs.get('vnf_dst_interface'),
+ vnf_src_name, kwargs.get('vnf_src_interface'))
+
+ return self.delete_chain_by_intf(vnf_src_name, kwargs.get('vnf_src_interface'),
+ vnf_dst_name, kwargs.get('vnf_dst_interface'))
+ except Exception as ex:
+ logging.exception("RPC error.")
+ return ex.message
+
+ def set_son_emu_chain_data(self, vnf_name, data):
+ """
+ Set son-emu chain data for this node.
+
+ :param vnf_name: The name of the vnf where the data is stored.
+ :type vnf_name: ``str``
+ :param data: Raw data to store on the node.
+ :type data: ``str``
+ """
+ self.net.getNodeByName(vnf_name).cmd("echo \'%s\' > /tmp/son_emu_data.json" % json.dumps(data))
+ ip_list = []
+ for intf in data['son_emu_data']['interfaces'].values():
+ ip_list.extend(intf)
+
+ self.net.getNodeByName(vnf_name).cmd("echo \'%s\' > /tmp/son_emu_data" % "\n".join(ip_list))
+
+ def get_son_emu_chain_data(self, vnf_name):
+ """
+ Get the current son-emu chain data set for this node.
+
+ :param vnf_name: The name of the vnf where the data is stored.
+ :type vnf_name: ``str``
+ :return: raw data stored on the node
+ :rtype: ``str``
+ """
+ return self.net.getNodeByName(vnf_name).cmd("cat /tmp/son_emu_data.json")
+
+ def _get_connected_switch_data(self, vnf_name, vnf_interface):
+ """
+ Get the switch an interface is connected to
+ :param vnf_name: Name of the VNF
+ :type vnf_name: ``str``
+ :param vnf_interface: Name of the VNF interface
+ :type vnf_interface: ``str``
+ :return: List containing the switch, and the inport number
+ :rtype: [``str``, ``int``]
+ """
+ src_sw = None
+ src_sw_inport_nr = None
+ for connected_sw in self.net.DCNetwork_graph.neighbors(vnf_name):
+ link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
+ for link in link_dict:
+ if (link_dict[link]['src_port_id'] == vnf_interface or
+ link_dict[link][
+ 'src_port_name'] == vnf_interface):
+ # found the right link and connected switch
+ src_sw = connected_sw
+ src_sw_inport_nr = link_dict[link]['dst_port_nr']
+ break
+
+ return src_sw, src_sw_inport_nr
+
+ def _get_path(self, src_vnf, dst_vnf, src_vnf_intf, dst_vnf_intf):
+ """
+ Own implementation of the get_path function from DCNetwork, because we just want the path and not set up
+ flows on the way.
+
+ :param src_vnf: Name of the source VNF
+ :type src_vnf: ``str``
+ :param dst_vnf: Name of the destination VNF
+ :type dst_vnf: ``str``
+ :param src_vnf_intf: Name of the source VNF interface
+ :type src_vnf_intf: ``str``
+ :param dst_vnf_intf: Name of the destination VNF interface
+ :type dst_vnf_intf: ``str``
+ :return: path, src_sw, dst_sw
+ :rtype: ``list``, ``str``, ``str``
+ """
+ # modified version of the _chainAddFlow from emuvim.dcemulator.net._chainAddFlow
+ src_sw = None
+ dst_sw = None
+ logging.debug("Find shortest path from vnf %s to %s",
+ src_vnf, dst_vnf)
+
+ for connected_sw in self.net.DCNetwork_graph.neighbors(src_vnf):
+ link_dict = self.net.DCNetwork_graph[src_vnf][connected_sw]
+ for link in link_dict:
+ if (link_dict[link]['src_port_id'] == src_vnf_intf or
+ link_dict[link][
+ 'src_port_name'] == src_vnf_intf):
+ # found the right link and connected switch
+ src_sw = connected_sw
+ break
+
+ for connected_sw in self.net.DCNetwork_graph.neighbors(dst_vnf):
+ link_dict = self.net.DCNetwork_graph[connected_sw][dst_vnf]
+ for link in link_dict:
+ if link_dict[link]['dst_port_id'] == dst_vnf_intf or \
+ link_dict[link][
+ 'dst_port_name'] == dst_vnf_intf:
+ # found the right link and connected
+ dst_sw = connected_sw
+ break
+ logging.debug("From switch %s to %s " % (src_sw, dst_sw))
+
+ # get shortest path
+ try:
+ # returns the first found shortest path
+ # if all shortest paths are wanted, use: all_shortest_paths
+ path = nx.shortest_path(self.net.DCNetwork_graph, src_sw, dst_sw)
+ except:
+ logging.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
+ src_vnf, dst_vnf, src_sw, dst_sw))
+ logging.debug("Graph nodes: %r" % self.net.DCNetwork_graph.nodes())
+ logging.debug("Graph edges: %r" % self.net.DCNetwork_graph.edges())
+ for e, v in self.net.DCNetwork_graph.edges():
+ logging.debug("%r" % self.net.DCNetwork_graph[e][v])
+ return "No path could be found between {0} and {1}".format(src_vnf, dst_vnf)
+
+ logging.info("Shortest path between {0} and {1}: {2}".format(src_vnf, dst_vnf, path))
+ return path, src_sw, dst_sw
+
+ def add_loadbalancer(self, src_vnf_name, src_vnf_interface, lb_data):
+ """
+ This function will set up a loadbalancer at the given interface.
+
+ :param src_vnf_name: Name of the source VNF
+ :type src_vnf_name: ``str``
+ :param src_vnf_interface: Name of the destination VNF
+ :type src_vnf_interface: ``str``
+ :param lb_data: A dictionary containing the destination data as well as custom path settings
+ :type lb_data: ``dict``
+
+ :Example:
+ lbdata = {"dst_vnf_interfaces": {"dc2_man_web0": "port-man-2",
+ "dc3_man_web0": "port-man-4","dc4_man_web0": "port-man-6"}, "path": {"dc2_man_web0": {"port-man-2": [ "dc1.s1",\
+ "s1", "dc2.s1"]}}}
+ """
+ net = self.net
+ src_sw_inport_nr = 0
+ src_sw = None
+ dest_intfs_mapping = lb_data.get('dst_vnf_interfaces', dict())
+ # a custom path can be specified as a list of switches
+ custom_paths = lb_data.get('path', dict())
+ dest_vnf_outport_nrs = list()
+
+ logging.debug("Call to add_loadbalancer at %s intfs:%s" % (src_vnf_name, src_vnf_interface))
+
+ if not self.check_vnf_intf_pair(src_vnf_name, src_vnf_interface):
+ raise Exception(u"Source VNF %s or intfs %s does not exist" % (src_vnf_name, src_vnf_interface))
+
+ # find the switch belonging to the source interface, as well as the inport nr
+ for connected_sw in net.DCNetwork_graph.neighbors(src_vnf_name):
+ link_dict = net.DCNetwork_graph[src_vnf_name][connected_sw]
+ for link in link_dict:
+ if link_dict[link]['src_port_name'] == src_vnf_interface:
+ src_sw = connected_sw
+ src_sw_inport_nr = link_dict[link]['dst_port_nr']
+ break
+
+ if src_sw is None or src_sw_inport_nr == 0:
+ raise Exception(u"Source VNF or interface can not be found.")
+
+ # get all target interface outport numbers
+ for vnf_name in dest_intfs_mapping:
+ if vnf_name not in net.DCNetwork_graph:
+ raise Exception(u"Target VNF %s is not known." % vnf_name)
+ for connected_sw in net.DCNetwork_graph.neighbors(vnf_name):
+ link_dict = net.DCNetwork_graph[vnf_name][connected_sw]
+ for link in link_dict:
+ if link_dict[link]['src_port_name'] == dest_intfs_mapping[vnf_name]:
+ dest_vnf_outport_nrs.append(int(link_dict[link]['dst_port_nr']))
+ # get first switch
+ if (src_vnf_name, src_vnf_interface) not in self.lb_flow_cookies:
+ self.lb_flow_cookies[(src_vnf_name, src_vnf_interface)] = list()
+
+ src_intf = None
+ src_ip = None
+ src_mac = None
+ for intf in net[src_vnf_name].intfs.values():
+ if intf.name == src_vnf_interface:
+ src_mac = intf.mac
+ src_ip = intf.ip
+ src_intf = intf
+
+ # set up paths for each destination vnf individually
+ index = 0
+ cookie = self.get_cookie()
+ main_cmd = "add-flow -OOpenFlow13"
+ self.lb_flow_cookies[(src_vnf_name, src_vnf_interface)].append(cookie)
+
+ # bookkeeping
+ data = dict()
+ data["src_vnf"] = src_vnf_name
+ data["src_intf"] = src_vnf_interface
+ data["paths"] = list()
+ data["cookie"] = cookie
+
+ # lb mac for src -> target connections
+ lb_mac = "31:33:70:%02x:%02x:%02x" % (random.randint(0, 255),random.randint(0, 255),random.randint(0, 255))
+
+ # calculate lb ip as src_intf.ip +1
+ octets = src_ip.split('.')
+ octets[3] = str(int(octets[3]) + 1)
+ plus_one = '.'.join(octets)
+
+ # set up arp reply as well as add the route to the interface
+ self.setup_arp_reply_at(src_sw, src_sw_inport_nr, plus_one, lb_mac, cookie=cookie)
+ net.getNodeByName(src_vnf_name).setHostRoute(plus_one, src_vnf_interface)
+
+ for dst_vnf_name, dst_vnf_interface in dest_intfs_mapping.items():
+ path, src_sw, dst_sw = self._get_path(src_vnf_name, dst_vnf_name,
+ src_vnf_interface, dst_vnf_interface)
+
+ # use custom path if one is supplied
+ # json does not support hashing on tuples so we use nested dicts
+ if custom_paths is not None and dst_vnf_name in custom_paths:
+ if dst_vnf_interface in custom_paths[dst_vnf_name]:
+ path = custom_paths[dst_vnf_name][dst_vnf_interface]
+ logging.debug("Taking custom path from %s to %s: %s" % (src_vnf_name, dst_vnf_name, path))
+
+ if not self.check_vnf_intf_pair(dst_vnf_name, dst_vnf_interface):
+ self.delete_loadbalancer(src_vnf_name, src_vnf_interface)
+ raise Exception(u"VNF %s or intfs %s does not exist" % (dst_vnf_name, dst_vnf_interface))
+ if isinstance(path, dict):
+ self.delete_loadbalancer(src_vnf_name, src_vnf_interface)
+ raise Exception(u"Can not find a valid path. Are you specifying the right interfaces?.")
+
+ target_mac = "fa:17:00:03:13:37"
+ target_ip = "0.0.0.0"
+ for intf in net[dst_vnf_name].intfs.values():
+ if intf.name == dst_vnf_interface:
+ target_mac = str(intf.mac)
+ target_ip = str(intf.ip)
+ dst_sw_outport_nr = dest_vnf_outport_nrs[index]
+ current_hop = src_sw
+ switch_inport_nr = src_sw_inport_nr
+
+ #self.setup_arp_reply_at(src_sw, src_sw_inport_nr, target_ip, target_mac, cookie=cookie)
+ net.getNodeByName(dst_vnf_name).setHostRoute(src_ip, dst_vnf_interface)
+
+ # choose free vlan if path contains more than 1 switch
+ if len(path) > 1:
+ vlan = net.vlans.pop()
+ if vlan == 0:
+ vlan = net.vlans.pop()
+ else:
+ vlan = None
+
+ single_flow_data = dict()
+ single_flow_data["dst_vnf"] = dst_vnf_name
+ single_flow_data["dst_intf"] = dst_vnf_interface
+ single_flow_data["path"] = path
+ single_flow_data["vlan"] = vlan
+ single_flow_data["cookie"] = cookie
+
+ data["paths"].append(single_flow_data)
+
+ # src to target
+ for i in range(0, len(path)):
+ if i < len(path) - 1:
+ next_hop = path[i + 1]
+ else:
+ # last switch reached
+ next_hop = dst_vnf_name
+ next_node = net.getNodeByName(next_hop)
+ if next_hop == dst_vnf_name:
+ switch_outport_nr = dst_sw_outport_nr
+ logging.info("end node reached: {0}".format(dst_vnf_name))
+ elif not isinstance(next_node, OVSSwitch):
+ logging.info("Next node: {0} is not a switch".format(next_hop))
+ return "Next node: {0} is not a switch".format(next_hop)
+ else:
+ # take first link between switches by default
+ index_edge_out = 0
+ switch_outport_nr = net.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
+
+ cmd = 'priority=1,in_port=%s,cookie=%s' % (switch_inport_nr, cookie)
+ cmd_back = 'priority=1,in_port=%s,cookie=%s' % (switch_outport_nr, cookie)
+ # if a vlan is picked, the connection is routed through multiple switches
+ if vlan is not None:
+ if path.index(current_hop) == 0: # first node
+ # flow #index set up
+ cmd = 'in_port=%s' % src_sw_inport_nr
+ cmd += ',cookie=%s' % cookie
+ cmd += ',table=%s' % cookie
+ cmd += ',ip'
+ cmd += ',reg1=%s' % index
+ cmd += ',actions='
+ # set vlan id
+ cmd += ',push_vlan:0x8100'
+ masked_vlan = vlan | 0x1000
+ cmd += ',set_field:%s->vlan_vid' % masked_vlan
+ cmd += ',set_field:%s->eth_dst' % target_mac
+ cmd += ',set_field:%s->ip_dst' % target_ip
+ cmd += ',output:%s' % switch_outport_nr
+
+ # last switch for reverse route
+ # remove any vlan tags
+ cmd_back += ',dl_vlan=%s' % vlan
+ cmd_back += ',actions=pop_vlan,output:%s' % switch_inport_nr
+ elif next_hop == dst_vnf_name: # last switch
+ # remove any vlan tags
+ cmd += ',dl_vlan=%s' % vlan
+ cmd += ',actions=pop_vlan,output:%s' % switch_outport_nr
+ # set up arp replys at the port so the dst nodes know the src
+ self.setup_arp_reply_at(current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
+
+ # reverse route
+ cmd_back = 'in_port=%s' % switch_outport_nr
+ cmd_back += ',cookie=%s' % cookie
+ cmd_back += ',ip'
+ cmd_back += ',actions='
+ cmd_back += 'push_vlan:0x8100'
+ masked_vlan = vlan | 0x1000
+ cmd_back += ',set_field:%s->vlan_vid' % masked_vlan
+ cmd_back += ',set_field:%s->eth_src' % lb_mac
+ cmd_back += ',set_field:%s->ip_src' % plus_one
+ cmd_back += ',output:%s' % switch_inport_nr
+ else: # middle nodes
+ # if we have a circle in the path we need to specify this, as openflow will ignore the packet
+ # if we just output it on the same port as it came in
+ if switch_inport_nr == switch_outport_nr:
+ cmd += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
+ cmd_back += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
+ else:
+ cmd += ',dl_vlan=%s,actions=output:%s' % (vlan, switch_outport_nr)
+ cmd_back += ',dl_vlan=%s,actions=output:%s' % (vlan, switch_inport_nr)
+ # output the packet at the correct outport
+ else:
+ cmd = 'in_port=%s' % src_sw_inport_nr
+ cmd += ',cookie=%s' % cookie
+ cmd += ',table=%s' % cookie
+ cmd += ',ip'
+ cmd += ',reg1=%s' % index
+ cmd += ',actions='
+ cmd += ',set_field:%s->eth_dst' % target_mac
+ cmd += ',set_field:%s->ip_dst' % target_ip
+ cmd += ',output:%s' % switch_outport_nr
+
+ # reverse route
+ cmd_back = 'in_port=%s' % switch_outport_nr
+ cmd_back += ',cookie=%s' % cookie
+ cmd_back += ',ip'
+ cmd_back += ',actions='
+ cmd_back += ',set_field:%s->eth_src' % lb_mac
+ cmd_back += ',set_field:%s->ip_src' % plus_one
+ cmd_back += ',output:%s' % src_sw_inport_nr
+
+ self.setup_arp_reply_at(current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
+
+ # excecute the command on the target switch
+ logging.debug(cmd)
+ cmd = "\"%s\"" % cmd
+ cmd_back = "\"%s\"" % cmd_back
+ net[current_hop].dpctl(main_cmd, cmd)
+ net[current_hop].dpctl(main_cmd, cmd_back)
+
+ # set next hop for the next iteration step
+ if isinstance(next_node, OVSSwitch):
+ switch_inport_nr = net.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
+ current_hop = next_hop
+
+ # advance to next destination
+ index += 1
+
+ # set up the actual load balancing rule as a multipath on the very first switch
+ cmd = '"in_port=%s' % src_sw_inport_nr
+ cmd += ',cookie=%s' % (cookie)
+ cmd += ',ip'
+ cmd += ',actions='
+ # push 0x01 into the first register
+ cmd += 'load:0x1->NXM_NX_REG0[]'
+ # load balance modulo n over all dest interfaces
+ # TODO: in newer openvswitch implementations this should be changed to symmetric_l3l4+udp
+ # to balance any kind of traffic
+ cmd += ',multipath(symmetric_l4,1024,modulo_n,%s,0,NXM_NX_REG1[0..12])' % len(dest_intfs_mapping)
+ # reuse the cookie as table entry as it will be unique
+ cmd += ',resubmit(, %s)"' % cookie
+
+ # actually add the flow
+ logging.debug("Switch: %s, CMD: %s" % (src_sw, cmd))
+ net[src_sw].dpctl(main_cmd, cmd)
+
+ # finally add all flow data to the internal data storage
+ self.full_lb_data[(src_vnf_name, src_vnf_interface)] = data
+
+ def add_floating_lb(self, datacenter, lb_data):
+ """
+ This function will set up a loadbalancer at the given datacenter.
+ This function returns the floating ip assigned to the loadbalancer as multiple ones are possible.
+
+ :param datacenter: The datacenter entrypoint
+ :type datacenter: ``str``
+ :param lb_data: A dictionary containing the destination data as well as custom path settings
+ :type lb_data: ``dict``
+
+ :Example:
+ lbdata = {"dst_vnf_interfaces": {"dc2_man_web0": "port-man-2",
+ "dc3_man_web0": "port-man-4","dc4_man_web0": "port-man-6"}, "path": {"dc2_man_web0": {"port-man-2": [ "dc1.s1",\
+ "s1", "dc2.s1"]}}}
+ """
+ net = self.net
+ src_sw_inport_nr = 1
+ src_sw = self.floating_switch.name
+ dest_intfs_mapping = lb_data.get('dst_vnf_interfaces', dict())
+ # a custom path can be specified as a list of switches
+ custom_paths = lb_data.get('path', dict())
+ dest_vnf_outport_nrs = list()
+
+ if datacenter not in self.net.dcs:
+ raise Exception(u"Source datacenter can not be found.")
+
+ # get all target interface outport numbers
+ for vnf_name in dest_intfs_mapping:
+ if vnf_name not in net.DCNetwork_graph:
+ raise Exception(u"Target VNF %s is not known." % vnf_name)
+ for connected_sw in net.DCNetwork_graph.neighbors(vnf_name):
+ link_dict = net.DCNetwork_graph[vnf_name][connected_sw]
+ for link in link_dict:
+ if link_dict[link]['src_port_name'] == dest_intfs_mapping[vnf_name]:
+ dest_vnf_outport_nrs.append(int(link_dict[link]['dst_port_nr']))
+
+ if len(dest_vnf_outport_nrs) == 0:
+ raise Exception("There are no paths specified for the loadbalancer")
+ src_ip = self.floating_intf.IP()
+ src_mac = self.floating_intf.MAC()
+
+ # set up paths for each destination vnf individually
+ index = 0
+ cookie = self.get_cookie()
+ main_cmd = "add-flow -OOpenFlow13"
+ floating_ip = self.floating_network.get_new_ip_address("floating-ip").split("/")[0]
+
+ for dst_vnf_name, dst_vnf_interface in dest_intfs_mapping.items():
+ path = None
+ # use custom path if one is supplied
+ # json does not support hashing on tuples so we use nested dicts
+ if custom_paths is not None and dst_vnf_name in custom_paths:
+ if dst_vnf_interface in custom_paths[dst_vnf_name]:
+ path = custom_paths[dst_vnf_name][dst_vnf_interface]
+ logging.debug("Taking custom path to %s: %s" % (dst_vnf_name, path))
+ else:
+ if datacenter not in self.floating_links:
+ self.floating_links[datacenter] = \
+ net.addLink(self.floating_switch, datacenter)
+ path = \
+ self._get_path(self.floating_root.name, dst_vnf_name, self.floating_intf.name, dst_vnf_interface)[0]
+
+ if isinstance(path, dict):
+ self.delete_flow_by_cookie(cookie)
+ raise Exception(u"Can not find a valid path. Are you specifying the right interfaces?.")
+
+ intf = net[dst_vnf_name].nameToIntf[dst_vnf_interface]
+ target_mac = str(intf.MAC())
+ target_ip = str(intf.IP())
+ dst_sw_outport_nr = dest_vnf_outport_nrs[index]
+ current_hop = src_sw
+ switch_inport_nr = src_sw_inport_nr
+ vlan = net.vlans.pop()
+
+ # iterate all switches on the path
+ for i in range(0, len(path)):
+ if i < len(path) - 1:
+ next_hop = path[i + 1]
+ else:
+ # last switch reached
+ next_hop = dst_vnf_name
+ next_node = net.getNodeByName(next_hop)
+
+ # sanity checks
+ if next_hop == dst_vnf_name:
+ switch_outport_nr = dst_sw_outport_nr
+ logging.info("end node reached: {0}".format(dst_vnf_name))
+ elif not isinstance(next_node, OVSSwitch):
+ logging.info("Next node: {0} is not a switch".format(next_hop))
+ return "Next node: {0} is not a switch".format(next_hop)
+ else:
+ # take first link between switches by default
+ index_edge_out = 0
+ switch_outport_nr = net.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
+
+ # default filters, just overwritten on the first node and last node
+ cmd = 'priority=1,in_port=%s,cookie=%s' % (switch_inport_nr, cookie)
+ cmd_back = 'priority=1,in_port=%s,cookie=%s' % (switch_outport_nr, cookie)
+ if i == 0: # first node
+ cmd = 'in_port=%s' % src_sw_inport_nr
+ cmd += ',cookie=%s' % cookie
+ cmd += ',table=%s' % cookie
+ cmd += ',ip'
+ cmd += ',ip_dst=%s' % floating_ip
+ cmd += ',reg1=%s' % index
+ cmd += ',actions='
+ # set vlan id
+ cmd += ',push_vlan:0x8100'
+ masked_vlan = vlan | 0x1000
+ cmd += ',set_field:%s->vlan_vid' % masked_vlan
+ cmd += ',set_field:%s->eth_dst' % target_mac
+ cmd += ',set_field:%s->ip_dst' % target_ip
+ cmd += ',output:%s' % switch_outport_nr
+
+ # last switch for reverse route
+ # remove any vlan tags
+ cmd_back += ',dl_vlan=%s' % vlan
+ cmd_back += ',actions=pop_vlan,output:%s' % switch_inport_nr
+ self.setup_arp_reply_at(current_hop, src_sw_inport_nr, floating_ip, target_mac, cookie=cookie)
+ elif next_hop == dst_vnf_name: # last switch
+ # remove any vlan tags
+ cmd += ',dl_vlan=%s' % vlan
+ cmd += ',actions=pop_vlan,output:%s' % switch_outport_nr
+ # set up arp replys at the port so the dst nodes know the src
+ self.setup_arp_reply_at(current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
+
+ # reverse route
+ cmd_back = 'in_port=%s' % switch_outport_nr
+ cmd_back += ',cookie=%s' % cookie
+ cmd_back += ',ip'
+ cmd_back += ',actions='
+ cmd_back += 'push_vlan:0x8100'
+ masked_vlan = vlan | 0x1000
+ cmd_back += ',set_field:%s->vlan_vid' % masked_vlan
+ cmd_back += ',set_field:%s->eth_src' % src_mac
+ cmd_back += ',set_field:%s->ip_src' % floating_ip
+ cmd_back += ',output:%s' % switch_inport_nr
+ net.getNodeByName(dst_vnf_name).setHostRoute(src_ip, dst_vnf_interface)
+ else: # middle node
+ # if we have a circle in the path we need to specify this, as openflow will ignore the packet
+ # if we just output it on the same port as it came in
+ if switch_inport_nr == switch_outport_nr:
+ cmd += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
+ cmd_back += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
+ else:
+ cmd += ',dl_vlan=%s,actions=output:%s' % (vlan, switch_outport_nr)
+ cmd_back += ',dl_vlan=%s,actions=output:%s' % (vlan, switch_inport_nr)
+
+ # excecute the command on the target switch
+ logging.debug(cmd)
+ cmd = "\"%s\"" % cmd
+ cmd_back = "\"%s\"" % cmd_back
+ net[current_hop].dpctl(main_cmd, cmd)
+ net[current_hop].dpctl(main_cmd, cmd_back)
+
+ # set next hop for the next iteration step
+ if isinstance(next_node, OVSSwitch):
+ switch_inport_nr = net.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
+ current_hop = next_hop
+
+ # advance to next destination
+ index += 1
+
+ # set up the actual load balancing rule as a multipath on the very first switch
+ cmd = '"in_port=%s' % src_sw_inport_nr
+ cmd += ',cookie=%s' % (cookie)
+ cmd += ',ip'
+ cmd += ',actions='
+ # push 0x01 into the first register
+ cmd += 'load:0x1->NXM_NX_REG0[]'
+ # load balance modulo n over all dest interfaces
+ # TODO: in newer openvswitch implementations this should be changed to symmetric_l3l4+udp
+ # to balance any kind of traffic
+ cmd += ',multipath(symmetric_l4,1024,modulo_n,%s,0,NXM_NX_REG1[0..12])' % len(dest_intfs_mapping)
+ # reuse the cookie as table entry as it will be unique
+ cmd += ',resubmit(, %s)"' % cookie
+
+ # actually add the flow
+ logging.debug("Switch: %s, CMD: %s" % (src_sw, cmd))
+ net[src_sw].dpctl(main_cmd, cmd)
+
+ self.floating_cookies[cookie] = floating_ip
+
+ return cookie, floating_ip
+
+ def setup_arp_reply_at(self, switch, port_nr, target_ip, target_mac, cookie=None):
+ """
+ Sets up a custom ARP reply at a switch.
+ An ARP request coming in on the `port_nr` for `target_ip` will be answered with target IP/MAC.
+
+ :param switch: The switch belonging to the interface
+ :type switch: ``str``
+ :param port_nr: The port number at the switch that is connected to the interface
+ :type port_nr: ``int``
+ :param target_ip: The IP for which to set up the ARP reply
+ :type target_ip: ``str``
+ :param target_mac: The MAC address of the target interface
+ :type target_mac: ``str``
+ :param cookie: cookie to identify the ARP request, if None a new one will be picked
+ :type cookie: ``int`` or ``None``
+ :return: cookie
+ :rtype: ``int``
+ """
+ if cookie is None:
+ cookie = self.get_cookie()
+ main_cmd = "add-flow -OOpenFlow13"
+
+ # first set up ARP requests for the source node, so it will always 'find' a partner
+ cmd = '"in_port=%s' % port_nr
+ cmd += ',cookie=%s' % cookie
+ cmd += ',arp'
+ # only answer for target ip arp requests
+ cmd += ',arp_tpa=%s' % target_ip
+ cmd += ',actions='
+ # set message type to ARP reply
+ cmd += 'load:0x2->NXM_OF_ARP_OP[]'
+ # set src ip as dst ip
+ cmd += ',move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[]'
+ # set src mac
+ cmd += ',set_field:%s->eth_src' % target_mac
+ # set src as target
+ cmd += ',move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[]'
+ # set target mac as hex
+ cmd += ',load:0x%s->NXM_NX_ARP_SHA[]' % "".join(target_mac.split(':'))
+ # set target ip as hex
+ octets = target_ip.split('.')
+ dst_ip_hex = '{:02X}{:02X}{:02X}{:02X}'.format(*map(int, octets))
+ cmd += ',load:0x%s->NXM_OF_ARP_SPA[]' % dst_ip_hex
+ # output to incoming port remember the closing "
+ cmd += ',IN_PORT"'
+ self.net[switch].dpctl(main_cmd, cmd)
+ logging.debug(
+ "Set up ARP reply at %s port %s." % (switch, port_nr))
+
+ def delete_flow_by_cookie(self, cookie):
+ """
+ Removes a flow identified by the cookie
+
+ :param cookie: The cookie for the specified flow
+ :type cookie: ``int``
+ :return: True if successful, else false
+ :rtype: ``bool``
+ """
+ if not cookie:
+ return False
+ logging.debug("Deleting flow by cookie %d" % (cookie))
+ flows = list()
+ # we have to call delete-group for each switch
+ for node in self.net.switches:
+ flow = dict()
+ flow["dpid"] = int(node.dpid, 16)
+ flow["cookie"] = cookie
+ flow['cookie_mask'] = int('0xffffffffffffffff', 16)
+
+ flows.append(flow)
+ for flow in flows:
+ logging.debug("Deleting flowentry with cookie %d" % (
+ flow["cookie"]))
+ if self.net.controller == RemoteController:
+ self.net.ryu_REST('stats/flowentry/delete', data=flow)
+
+ self.cookies.remove(cookie)
+ return True
+
+ def delete_chain_by_intf(self, src_vnf_name, src_vnf_intf, dst_vnf_name, dst_vnf_intf):
+ """
+ Removes a flow identified by the vnf_name/vnf_intf pairs
+
+ :param src_vnf_name: The vnf name for the specified flow
+ :type src_vnf_name: ``str``
+ :param src_vnf_intf: The interface name for the specified flow
+ :type src_vnf_intf: ``str``
+ :param dst_vnf_name: The vnf name for the specified flow
+ :type dst_vnf_name: ``str``
+ :param dst_vnf_intf: The interface name for the specified flow
+ :type dst_vnf_intf: ``str``
+ :return: True if successful, else false
+ :rtype: ``bool``
+ """
+ logging.debug("Deleting flow for vnf/intf pair %s %s" % (src_vnf_name, src_vnf_intf))
+ if not self.check_vnf_intf_pair(src_vnf_name, src_vnf_intf):
+ return False
+ if not self.check_vnf_intf_pair(dst_vnf_name, dst_vnf_intf):
+ return False
+ target_flow = (src_vnf_name, src_vnf_intf, dst_vnf_name, dst_vnf_intf)
+ if not target_flow in self.chain_flow_cookies:
+ return False
+
+ success = self.delete_flow_by_cookie(self.chain_flow_cookies[target_flow])
+
+ if success:
+ del self.chain_flow_cookies[target_flow]
+ del self.full_chain_data[target_flow]
+ return True
+ return False
+
+ def delete_loadbalancer(self, vnf_src_name, vnf_src_interface):
+ '''
+ Removes a loadbalancer that is configured for the node and interface
+
+ :param src_vnf_name: Name of the source VNF
+ :param src_vnf_interface: Name of the destination VNF
+ '''
+ flows = list()
+ # we have to call delete-group for each switch
+ delete_group = list()
+ group_id = self.get_flow_group(vnf_src_name, vnf_src_interface)
+ for node in self.net.switches:
+ for cookie in self.lb_flow_cookies[(vnf_src_name, vnf_src_interface)]:
+ flow = dict()
+ flow["dpid"] = int(node.dpid, 16)
+ flow["cookie"] = cookie
+ flow['cookie_mask'] = int('0xffffffffffffffff', 16)
+
+ flows.append(flow)
+ group_del = dict()
+ group_del["dpid"] = int(node.dpid, 16)
+ group_del["group_id"] = group_id
+ delete_group.append(group_del)
+
+ for flow in flows:
+ logging.debug("Deleting flowentry with cookie %d belonging to lb at %s:%s" % (
+ flow["cookie"], vnf_src_name, vnf_src_interface))
+ if self.net.controller == RemoteController:
+ self.net.ryu_REST('stats/flowentry/delete', data=flow)
+
+ logging.debug("Deleting group with id %s" % group_id)
+ for switch_del_group in delete_group:
+ if self.net.controller == RemoteController:
+ self.net.ryu_REST("stats/groupentry/delete", data=switch_del_group)
+
+ # unmap groupid from the interface
+ target_pair = (vnf_src_name, vnf_src_interface)
+ if target_pair in self.flow_groups:
+ del self.flow_groups[target_pair]
+ if target_pair in self.full_lb_data:
+ del self.full_lb_data[target_pair]
+
+ def delete_floating_lb(self, cookie):
+ """
+ Delete a floating loadbalancer.
+ Floating loadbalancers are different from normal ones as there are multiple ones on the same interface.
+ :param cookie: The cookie of the loadbalancer
+ :type cookie: ``int``
+ """
+ cookie = int(cookie)
+ if cookie not in self.floating_cookies:
+ raise Exception("Can not delete floating loadbalancer as the flowcookie is not known")
+
+ self.delete_flow_by_cookie(cookie)
+ floating_ip = self.floating_cookies[cookie]
+ self.floating_network.withdraw_ip_address(floating_ip)
+
+ def set_arp_entry(self, vnf_name, vnf_interface, ip, mac):
+ """
+ Sets an arp entry on the specified VNF. This is done on the node directly and not by open vswitch!
+ :param vnf_name: Name of the VNF
+ :type vnf_name: ``str``
+ :param vnf_interface: Name of the interface
+ :type vnf_interface: ``str``
+ :param ip: IP to reply to
+ :type ip: ``str``
+ :param mac: Answer with this MAC
+ :type mac: ``str``
+ """
+ node = self.net.getNodeByName(vnf_name)
+ node.cmd("arp -i %s -s %s %s" % (vnf_interface, ip, mac))
--- /dev/null
+from flask_restful import Resource
+from flask import Response, request
+from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
+import emuvim.api.openstack.docker_util as DockerUtil
+import logging
+import json
+import time
+
+
+class MonitorDummyApi(BaseOpenstackDummy):
+ def __init__(self, inc_ip, inc_port):
+ super(MonitorDummyApi, self).__init__(inc_ip, inc_port)
+
+ self.api.add_resource(MonitorVersionsList, "/",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(MonitorVnf, "/v1/monitor/<vnf_name>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(MonitorVnfAbs, "/v1/monitor/abs/<vnf_name>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(MonitorVnfDcStack, "/v1/monitor/<dc>/<stack>/<vnf_name>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(Shutdown, "/shutdown")
+
+ def _start_flask(self):
+ logging.info("Starting %s endpoint @ http://%s:%d" % ("MonitorDummyApi", self.ip, self.port))
+ if self.app is not None:
+ self.app.run(self.ip, self.port, debug=True, use_reloader=False, threaded=True)
+
+
+class Shutdown(Resource):
+ """
+ A get request to /shutdown will shut down this endpoint.
+ """
+
+ def get(self):
+ logging.debug(("%s is beeing shut down") % (__name__))
+ func = request.environ.get('werkzeug.server.shutdown')
+ if func is None:
+ raise RuntimeError('Not running with the Werkzeug Server')
+ func()
+
+
+class MonitorVersionsList(Resource):
+ def __init__(self, api):
+ self.api = api
+
+
+ def get(self):
+ """
+ List API versions.
+
+ :return: Returns the api versions.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+
+ # at least let it look like an open stack function
+ try:
+ resp = dict()
+ resp['versions'] = dict()
+ resp['versions'] = [{
+ "id": "v1",
+ "links": [{
+ "href": "http://%s:%d/v1/" % (self.api.ip, self.api.port),
+ "rel": "self"
+ }],
+ "status": "CURRENT",
+ "version": "1",
+ "min_version": "1",
+ "updated": "2013-07-23T11:33:21Z"
+ }]
+
+ return Response(json.dumps(resp), status=200, mimetype="application/json")
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not show list of versions." % __name__)
+ return ex.message, 500
+
+
+class MonitorVnf(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, vnf_name):
+ """
+ Calculates the workload for the specified docker container. Requires at least one second, to calculate
+ the network traffic and cpu usage over time.
+
+ :param vnf_name: Specifies the docker container via name.
+ :type vnf_name: ``str``
+ :return: Returns a json response with network, cpu and memory usage over time, and specifies the storage
+ access, the number of running processes and the current system time.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ if len(vnf_name) < 3 or 'mn.' != vnf_name[:3]:
+ vnf_name = 'mn.' + vnf_name
+
+ found = False
+ from emuvim.api.heat.openstack_api_endpoint import OpenstackApiEndpoint
+ for api in OpenstackApiEndpoint.dc_apis:
+ if vnf_name[3:] in api.compute.dc.net:
+ found = True
+ break
+
+ if not found:
+ return Response(u"MonitorAPI: VNF %s does not exist.\n" % (vnf_name[3:]),
+ status=500,
+ mimetype="application/json")
+ try:
+ docker_id = DockerUtil.docker_container_id(vnf_name)
+ out_dict = dict()
+ out_dict.update(DockerUtil.monitoring_over_time(docker_id))
+ out_dict.update(DockerUtil.docker_mem(docker_id))
+ out_dict.update(DockerUtil.docker_PIDS(docker_id))
+ out_dict['SYS_time'] = int(time.time() * 1000000000)
+
+ response = Response(json.dumps(out_dict) + '\n', status=200, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+ except Exception as e:
+ logging.exception(u"%s: Error getting monitoring information.\n %s" % (__name__, e))
+ return Response(u"Error getting monitoring information.\n", status=500, mimetype="application/json")
+
+
+class MonitorVnfAbs(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, vnf_name):
+ """
+ Calculates the workload for the specified docker container, at this point in time.
+
+ :param vnf_name: Specifies the docker container via name.
+ :type vnf_name: ``str``
+ :return: Returns a json response with network, cpu, memory usage and storage access, as absolute values from
+ startup till this point of time. It also contains the number of running processes and the current
+ system time.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ if len(vnf_name) < 3 or 'mn.' != vnf_name[:3]:
+ vnf_name = 'mn.' + vnf_name
+
+ found = False
+ from emuvim.api.heat.openstack_api_endpoint import OpenstackApiEndpoint
+ for api in OpenstackApiEndpoint.dc_apis:
+ if vnf_name[3:] in api.compute.dc.net:
+ found = True
+ break
+ if not found:
+ return Response(u"MonitorAPI: VNF %s does not exist\n" % vnf_name[3:],
+ status=500,
+ mimetype="application/json")
+ try:
+ docker_id = DockerUtil.docker_container_id(vnf_name)
+ out_dict = dict()
+ out_dict.update(DockerUtil.docker_abs_cpu(docker_id))
+ out_dict.update(DockerUtil.docker_mem(docker_id))
+ out_dict.update(DockerUtil.docker_abs_net_io(docker_id))
+ out_dict.update(DockerUtil.docker_block_rw(docker_id))
+ out_dict.update(DockerUtil.docker_PIDS(docker_id))
+ out_dict['SYS_time'] = int(time.time() * 1000000000)
+
+ response = Response(json.dumps(out_dict) + '\n', status=200, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+ except Exception as e:
+ logging.exception(u"%s: Error getting monitoring information.\n %s" % (__name__, e))
+ return Response(u"Error getting monitoring information.\n", status=500, mimetype="application/json")
+
+
+class MonitorVnfDcStack(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, dc, stack, vnf_name):
+ """
+ Calculates the workload for the specified docker container, at this point in time.
+ This api call is for the translator to monitor a vnfs of a specific datacenter and stack.
+
+ :param dc: Target datacenter.
+ :type dc: ``str``
+ :param stack: Target stack
+ :type stack: ``str``
+ :param vnf_name: Specifies the docker container via name.
+ :type vnf_name: ``str``
+ :return: Returns a json response with network, cpu, memory usage and storage access, as absolute values from
+ startup till this point of time. It also contains the number of running processes and the current
+ system time.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+
+ # search for real name
+ vnf_name = self._findName(dc, stack, vnf_name)
+
+ if type(vnf_name) is not str:
+ # something went wrong, vnf_name is a Response object
+ return vnf_name
+
+ try:
+ docker_id = DockerUtil.docker_container_id(vnf_name)
+ out_dict = dict()
+ out_dict.update(DockerUtil.monitoring_over_time(docker_id))
+ out_dict.update(DockerUtil.docker_mem(docker_id))
+ out_dict.update(DockerUtil.docker_PIDS(docker_id))
+ out_dict['SYS_time'] = int(time.time() * 1000000000)
+
+ response = Response(json.dumps(out_dict) + '\n', status=200, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+ except Exception as e:
+ logging.exception(u"%s: Error getting monitoring information.\n %s" % (__name__, e))
+ return Response(u"Error getting monitoring information.\n", status=500, mimetype="application/json")
+
+ # Tries to find real container name according to heat template names
+ # Returns a string or a Response object
+ def _findName(self, dc, stack, vnf):
+ dc_real = None
+ from emuvim.api.heat.openstack_api_endpoint import OpenstackApiEndpoint
+ for api in OpenstackApiEndpoint.dc_apis:
+ # search for datacenters
+ if dc in api.manage.net.dcs:
+ dc_real = api.manage.net.dcs[dc]
+ break
+ if dc_real is None:
+ return Response(u"DC %s does not exist\n" % (dc), status=500, mimetype="application/json")
+
+ # search for related OpenStackAPIs
+ api_real = None
+ for api in OpenstackApiEndpoint.dc_apis:
+ if api.compute.dc == dc_real:
+ api_real = api
+ if api_real is None:
+ return Response(u"OpenStackAPI does not exist\n", status=500, mimetype="application/json")
+ # search for stacks
+ stack_real = None
+ for stackObj in api_real.compute.stacks.values():
+ if stackObj.stack_name == stack:
+ stack_real = stackObj
+ if stack_real is None:
+ return Response(u"Stack %s does not exist\n" % (stack), status=500, mimetype="application/json")
+ # search for servers
+ server_real = None
+ for server in stack_real.servers.values():
+ if server.template_name == vnf:
+ server_real = server
+ break
+ if server_real is None:
+ return Response(u"VNF %s does not exist\n" % (vnf), status=500, mimetype="application/json")
+ container_real = 'mn.' + server_real.name
+ return container_real
--- /dev/null
+from manage import OpenstackManage
+from openstack_dummies import *
+import logging
+import threading
+import compute
+import requests
+
+
+class OpenstackApiEndpoint():
+ """
+ Base class for an OpenStack datacenter.
+ It holds information about all connected endpoints.
+ """
+ dc_apis = []
+
+ def __init__(self, listenip, port):
+ self.ip = listenip
+ self.port = port
+ self.compute = compute.OpenstackCompute()
+ self.openstack_endpoints = dict()
+ self.openstack_endpoints['keystone'] = KeystoneDummyApi(self.ip, self.port)
+ self.openstack_endpoints['neutron'] = NeutronDummyApi(self.ip, self.port + 4696, self.compute)
+ self.openstack_endpoints['nova'] = NovaDummyApi(self.ip, self.port + 3774, self.compute)
+ self.openstack_endpoints['heat'] = HeatDummyApi(self.ip, self.port + 3004, self.compute)
+ self.openstack_endpoints['glance'] = GlanceDummyApi(self.ip, self.port + 4242, self.compute)
+
+ self.rest_threads = list()
+ self.manage = OpenstackManage()
+ self.manage.add_endpoint(self)
+ OpenstackApiEndpoint.dc_apis.append(self)
+
+ def connect_datacenter(self, dc):
+ """
+ Connect a datacenter to this endpoint.
+ An endpoint can only be connected to a single datacenter.
+
+ :param dc: Datacenter object
+ :type dc: :class:`dc`
+ """
+ self.compute.dc = dc
+ for ep in self.openstack_endpoints.values():
+ ep.manage = self.manage
+ logging.info \
+ ("Connected DC(%s) to API endpoint %s(%s:%d)" % (dc.label, self.__class__.__name__, self.ip, self.port))
+
+ def connect_dc_network(self, dc_network):
+ """
+ Connect the datacenter network to the endpoint.
+
+ :param dc_network: Datacenter network reference
+ :type dc_network: :class:`.net`
+ """
+ self.manage.net = dc_network
+ self.compute.nets[self.manage.floating_network.id] = self.manage.floating_network
+ logging.info("Connected DCNetwork to API endpoint %s(%s:%d)" % (
+ self.__class__.__name__, self.ip, self.port))
+
+ def start(self):
+ """
+ Start all connected OpenStack endpoints that are connected to this API endpoint.
+ """
+ for component in self.openstack_endpoints.values():
+ component.compute = self.compute
+ component.manage = self.manage
+ thread = threading.Thread(target=component._start_flask, args=())
+ thread.daemon = True
+ thread.name = component.__class__
+ thread.start()
+
+ def stop(self):
+ """
+ Stop all connected OpenStack endpoints that are connected to this API endpoint.
+ """
+ for component in self.openstack_endpoints.values():
+ url = "http://" + component.ip + ":" + str(component.port) + "/shutdown"
+ requests.get(url)
--- /dev/null
+from glance_dummy_api import GlanceDummyApi
+from heat_dummy_api import HeatDummyApi
+from keystone_dummy_api import KeystoneDummyApi
+from neutron_dummy_api import NeutronDummyApi
+from nova_dummy_api import NovaDummyApi
+
--- /dev/null
+from flask import Flask, request
+from flask_restful import Api, Resource
+import logging
+
+
+class BaseOpenstackDummy(Resource):
+ """
+ This class is the base class for all openstack entrypoints of son-emu.
+ """
+
+ def __init__(self, listenip, port):
+ self.ip = listenip
+ self.port = port
+ self.compute = None
+ self.manage = None
+ self.playbook_file = '/tmp/son-emu-requests.log'
+ with open(self.playbook_file, 'w'):
+ pass
+
+ # setup Flask
+ self.app = Flask(__name__)
+ self.api = Api(self.app)
+
+ def _start_flask(self):
+ logging.info("Starting %s endpoint @ http://%s:%d" % (__name__, self.ip, self.port))
+ if self.app is not None:
+ self.app.before_request(self.dump_playbook)
+ self.app.run(self.ip, self.port, debug=True, use_reloader=False)
+
+ def dump_playbook(self):
+ with self.manage.lock:
+ with open(self.playbook_file, 'a') as logfile:
+ if len(request.data) > 0:
+ data = "# %s API\n" % str(self.__class__).split('.')[-1].rstrip('\'>')
+ data += "curl -X {type} -H \"Content-type: application/json\" -d '{data}' {url}".format(type=request.method,
+ data=request.data,
+ url=request.url)
+ logfile.write(data + "\n")
--- /dev/null
+from flask_restful import Resource
+from flask import Response, request
+from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
+import logging
+import json
+
+
+class GlanceDummyApi(BaseOpenstackDummy):
+ def __init__(self, in_ip, in_port, compute):
+ super(GlanceDummyApi, self).__init__(in_ip, in_port)
+ self.compute = compute
+ self.api.add_resource(Shutdown,
+ "/shutdown")
+ self.api.add_resource(GlanceListApiVersions,
+ "/versions")
+ self.api.add_resource(GlanceSchema,
+ "/v2/schemas/image",
+ "/v2/schemas/metadefs/namespace",
+ "/v2/schemas/metadefs/resource_type")
+ self.api.add_resource(GlanceListImagesApi,
+ "/v1/images",
+ "/v1/images/detail",
+ "/v2/images",
+ "/v2/images/detail",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(GlanceImageByIdApi,
+ "/v1/images/<id>",
+ "/v2/images/<id>",
+ resource_class_kwargs={'api': self})
+
+ def _start_flask(self):
+ logging.info("Starting %s endpoint @ http://%s:%d" % ("GlanceDummyApi", self.ip, self.port))
+ if self.app is not None:
+ self.app.before_request(self.dump_playbook)
+ self.app.run(self.ip, self.port, debug=True, use_reloader=False)
+
+
+class Shutdown(Resource):
+ def get(self):
+ logging.debug(("%s is beeing shut down") % (__name__))
+ func = request.environ.get('werkzeug.server.shutdown')
+ if func is None:
+ raise RuntimeError('Not running with the Werkzeug Server')
+ func()
+
+
+class GlanceListApiVersions(Resource):
+ def get(self):
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ resp = dict()
+ resp['versions'] = dict()
+ versions = [{
+ "status": "CURRENT",
+ "id": "v2",
+ "links": [
+ {
+ "href": request.url_root + '/v2',
+ "rel": "self"
+ }
+ ]
+ }]
+ resp['versions'] = versions
+ return Response(json.dumps(resp), status=200, mimetype='application/json')
+
+
+class GlanceSchema(Resource):
+ def get(self):
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ resp = dict()
+ resp['name'] = 'someImageName'
+ resp['properties'] = dict()
+ # just an ugly hack to allow the openstack client to work
+ return Response(json.dumps(resp), status=200, mimetype='application/json')
+
+
+class GlanceListImagesApi(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self):
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ resp = dict()
+ resp['next'] = None
+ resp['first'] = "/v2/images"
+ resp['schema'] = "/v2/schemas/images"
+ resp['images'] = list()
+ limit = 18
+ c = 0
+ for image in self.api.compute.images.values():
+ f = dict()
+ f['id'] = image.id
+ f['name'] = str(image.name).replace(":latest", "")
+ f['checksum'] = "2dad48f09e2a447a9bf852bcd93548c1"
+ f['container_format'] = "docker"
+ f['disk_format'] = "raw"
+ f['size'] = 1
+ f['created_at'] = "2016-03-15T15:09:07.000000"
+ f['deleted'] = False
+ f['deleted_at'] = None
+ f['is_public'] = True
+ f['min_disk'] = 1
+ f['min_ram'] = 128
+ f['owner'] = "3dad48f09e2a447a9bf852bcd93548c1"
+ f['properties'] = {}
+ f['protected'] = False
+ f['status'] = "active"
+ f['updated_at'] = "2016-03-15T15:09:07.000000"
+ f['virtual_size'] = 1
+ f['marker'] = None
+ resp['images'].append(f)
+ c+=1
+ if c > limit: # ugly hack to stop buggy glance client to do infinite requests
+ break
+ if "marker" in request.args: # ugly hack to fix pageination of openstack client
+ resp['images'] = None
+ return Response(json.dumps(resp), status=200, mimetype="application/json")
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not retrieve the list of images." % __name__)
+ return ex.message, 500
+
+ def post(self):
+ """
+ This one is a real fake! It does not really create anything and the mentioned image
+ should already be registered with Docker. However, this function returns a reply that looks
+ like the image was just created to make orchestrators, like OSM, happy.
+ """
+ logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
+ # lets see what we should create
+ img_name = request.headers.get("X-Image-Meta-Name")
+ img_size = request.headers.get("X-Image-Meta-Size")
+ img_disk_format = request.headers.get("X-Image-Meta-Disk-Format")
+ img_is_public = request.headers.get("X-Image-Meta-Is-Public")
+ img_container_format = request.headers.get("X-Image-Meta-Container-Format")
+ # try to find ID of already existing image (matched by name)
+ img_id=None
+ for image in self.api.compute.images.values():
+ if img_name in image.name:
+ img_id = image.id
+ logging.debug("Image name: %s" % img_name)
+ logging.debug("Image id: %s" % img_id)
+ # build a response body that looks like a real one
+ resp = dict()
+ f = dict()
+ f['id'] = img_id
+ f['name'] = img_name
+ f['checksum'] = "2dad48f09e2a447a9bf852bcd93548c1"
+ f['container_format'] = img_container_format
+ f['disk_format'] = img_disk_format
+ f['size'] = img_size
+ f['created_at'] = "2016-03-15T15:09:07.000000"
+ f['deleted'] = False
+ f['deleted_at'] = None
+ f['is_public'] = img_is_public
+ f['min_disk'] = 1
+ f['min_ram'] = 128
+ f['owner'] = "3dad48f09e2a447a9bf852bcd93548c1"
+ f['properties'] = {}
+ f['protected'] = False
+ f['status'] = "active"
+ f['updated_at'] = "2016-03-15T15:09:07.000000"
+ f['virtual_size'] = 1
+ resp['image'] = f
+ # build actual response with headers and everything
+ r = Response(json.dumps(resp), status=201, mimetype="application/json")
+ r.headers.add("Location", "http://%s:%d/v1/images/%s" % (self.api.ip,
+ self.api.port,
+ img_id))
+ return r
+
+
+class GlanceImageByIdApi(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, id):
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ from emuvim.api.heat.openstack_dummies.nova_dummy_api import NovaListImages
+ nova = NovaListImages(self.api)
+ return nova.get(id)
+
+ def put(self, id):
+ logging.debug("API CALL: %s " % str(self.__class__.__name__))
+ logging.warning("Endpoint not implemented")
+ return None
+
+
--- /dev/null
+from flask import request, Response
+from flask_restful import Resource
+from emuvim.api.openstack.resources import Stack
+from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
+from datetime import datetime
+from emuvim.api.openstack.heat_parser import HeatParser
+import logging
+import json
+
+
+class HeatDummyApi(BaseOpenstackDummy):
+ def __init__(self, in_ip, in_port, compute):
+ super(HeatDummyApi, self).__init__(in_ip, in_port)
+ self.compute = compute
+
+ self.api.add_resource(Shutdown, "/shutdown")
+ self.api.add_resource(HeatListAPIVersions, "/",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(HeatCreateStack, "/v1/<tenant_id>/stacks",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(HeatShowStack, "/v1/<tenant_id>/stacks/<stack_name_or_id>",
+ "/v1/<tenant_id>/stacks/<stack_name_or_id>/<stack_id>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(HeatUpdateStack, "/v1/<tenant_id>/stacks/<stack_name_or_id>",
+ "/v1/<tenant_id>/stacks/<stack_name_or_id>/<stack_id>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(HeatDeleteStack, "/v1/<tenant_id>/stacks/<stack_name_or_id>",
+ "/v1/<tenant_id>/stacks/<stack_name_or_id>/<stack_id>",
+ resource_class_kwargs={'api': self})
+
+ @self.app.after_request
+ def add_access_control_header(response):
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+
+ def _start_flask(self):
+ logging.info("Starting %s endpoint @ http://%s:%d" % (__name__, self.ip, self.port))
+ if self.app is not None:
+ self.app.before_request(self.dump_playbook)
+ self.app.run(self.ip, self.port, debug=True, use_reloader=False)
+
+
+class Shutdown(Resource):
+ """
+ A get request to /shutdown will shut down this endpoint.
+ """
+
+ def get(self):
+ logging.debug(("%s is beeing shut down") % (__name__))
+ func = request.environ.get('werkzeug.server.shutdown')
+ if func is None:
+ raise RuntimeError('Not running with the Werkzeug Server')
+ func()
+
+
+class HeatListAPIVersions(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self):
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ resp = dict()
+
+ resp['versions'] = dict()
+ resp['versions'] = [{
+ "status": "CURRENT",
+ "id": "v1.0",
+ "links": [
+ {
+ "href": "http://%s:%d/v2.0" % (self.api.ip, self.api.port),
+ "rel": "self"
+ }
+ ]
+ }]
+
+ return Response(json.dumps(resp), status=200, mimetype="application/json")
+
+
+class HeatCreateStack(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def post(self, tenant_id):
+ """
+ Create and deploy a new stack.
+
+ :param tenant_id:
+ :return: 409, if the stack name was already used.
+ 400, if the heat template could not be parsed properly.
+ 500, if any exception occurred while creation.
+ 201, if everything worked out.
+ """
+ logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
+
+ try:
+ stack_dict = json.loads(request.data)
+ for stack in self.api.compute.stacks.values():
+ if stack.stack_name == stack_dict['stack_name']:
+ return [], 409
+ stack = Stack()
+ stack.stack_name = stack_dict['stack_name']
+ reader = HeatParser(self.api.compute)
+
+ if isinstance(stack_dict['template'], str) or isinstance(stack_dict['template'], unicode):
+ stack_dict['template'] = json.loads(stack_dict['template'])
+ if not reader.parse_input(stack_dict['template'], stack, self.api.compute.dc.label):
+ self.api.compute.clean_broken_stack(stack)
+ return 'Could not create stack.', 400
+
+ stack.creation_time = str(datetime.now())
+ stack.status = "CREATE_COMPLETE"
+
+ return_dict = {"stack": {"id": stack.id,
+ "links": [
+ {
+ "href": "http://%s:%s/v1/%s/stacks/%s"
+ % (self.api.ip, self.api.port, tenant_id, stack.id),
+ "rel": "self"
+ }]}}
+
+ self.api.compute.add_stack(stack)
+ self.api.compute.deploy_stack(stack.id)
+ return Response(json.dumps(return_dict), status=201, mimetype="application/json")
+
+ except Exception as ex:
+ logging.exception("Heat: Create Stack exception.")
+ return ex.message, 500
+
+ def get(self, tenant_id):
+ """
+ Calculates information about the requested stack.
+
+ :param tenant_id:
+ :return: Returns a json response which contains information like the stack id, name, status, creation time.
+ 500, if any exception occurred.
+ 200, if everything worked out.
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ return_stacks = dict()
+ return_stacks['stacks'] = list()
+ for stack in self.api.compute.stacks.values():
+ return_stacks['stacks'].append(
+ {"creation_time": stack.creation_time,
+ "description": "desc of " + stack.id,
+ "id": stack.id,
+ "links": [],
+ "stack_name": stack.stack_name,
+ "stack_status": stack.status,
+ "stack_status_reason": "Stack CREATE completed successfully",
+ "updated_time": stack.update_time,
+ "tags": ""
+ })
+
+ return Response(json.dumps(return_stacks), status=200, mimetype="application/json")
+ except Exception as ex:
+ logging.exception("Heat: List Stack exception.")
+ return ex.message, 500
+
+
+class HeatShowStack(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, tenant_id, stack_name_or_id, stack_id=None):
+ """
+ Calculates detailed information about the requested stack.
+
+ :param tenant_id:
+ :param stack_name_or_id:
+ :param stack_id:
+ :return: Returns a json response which contains information like the stack id, name, status, creation time.
+ 500, if any exception occurred.
+ 200, if everything worked out.
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ stack = None
+ if stack_name_or_id in self.api.compute.stacks:
+ stack = self.api.compute.stacks[stack_name_or_id]
+ else:
+ for tmp_stack in self.api.compute.stacks.values():
+ if tmp_stack.stack_name == stack_name_or_id:
+ stack = tmp_stack
+ if stack is None:
+ return 'Could not resolve Stack - ID', 404
+
+ return_stack = {
+ "stack": {
+ "capabilities": [],
+ "creation_time": stack.creation_time,
+ "description": "desc of " + stack.stack_name,
+ "disable_rollback": True,
+ "id": stack.id,
+ "links": [
+ {
+ "href": "http://%s:%s/v1/%s/stacks/%s"
+ % (self.api.ip, self.api.port, tenant_id, stack.id),
+ "rel": "self"
+ }
+ ],
+ "notification_topics": [],
+ "outputs": [],
+ "parameters": {
+ "OS::project_id": "3ab5b02f-a01f-4f95-afa1-e254afc4a435", # add real project id
+ "OS::stack_id": stack.id,
+ "OS::stack_name": stack.stack_name
+ },
+ "stack_name": stack.stack_name,
+ "stack_owner": "The owner of the stack.", # add stack owner
+ "stack_status": stack.status,
+ "stack_status_reason": "The reason for the current status of the stack.", # add status reason
+ "template_description": "The description of the stack template.",
+ "stack_user_project_id": "The project UUID of the stack user.",
+ "timeout_mins": "",
+ "updated_time": "",
+ "parent": "",
+ "tags": ""
+ }
+ }
+
+ return Response(json.dumps(return_stack), status=200, mimetype="application/json")
+
+ except Exception as ex:
+ logging.exception("Heat: Show stack exception.")
+ return ex.message, 500
+
+
+class HeatUpdateStack(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def put(self, tenant_id, stack_name_or_id, stack_id=None):
+ """
+ Updates an existing stack with a new heat template.
+
+ :param tenant_id:
+ :param stack_name_or_id: Specifies the stack, which should be updated.
+ :param stack_id:
+ :return: 404, if the requested stack could not be found.
+ 400, if the stack creation (because of errors in the heat template) or the stack update failed.
+ 500, if any exception occurred while updating.
+ 202, if everything worked out.
+ """
+ logging.debug("API CALL: %s PUT" % str(self.__class__.__name__))
+ try:
+ old_stack = None
+ if stack_name_or_id in self.api.compute.stacks:
+ old_stack = self.api.compute.stacks[stack_name_or_id]
+ else:
+ for tmp_stack in self.api.compute.stacks.values():
+ if tmp_stack.stack_name == stack_name_or_id:
+ old_stack = tmp_stack
+ if old_stack is None:
+ return 'Could not resolve Stack - ID', 404
+
+ stack_dict = json.loads(request.data)
+
+ stack = Stack()
+ stack.stack_name = old_stack.stack_name
+ stack.id = old_stack.id
+ stack.creation_time = old_stack.creation_time
+ stack.update_time = str(datetime.now())
+ stack.status = "UPDATE_COMPLETE"
+
+ reader = HeatParser(self.api.compute)
+ if isinstance(stack_dict['template'], str) or isinstance(stack_dict['template'], unicode):
+ stack_dict['template'] = json.loads(stack_dict['template'])
+ if not reader.parse_input(stack_dict['template'], stack, self.api.compute.dc.label, stack_update=True):
+ return 'Could not create stack.', 400
+
+ if not self.api.compute.update_stack(old_stack.id, stack):
+ return 'Could not update stack.', 400
+
+ return Response(status=202, mimetype="application/json")
+
+ except Exception as ex:
+ logging.exception("Heat: Update Stack exception")
+ return ex.message, 500
+
+
+class HeatDeleteStack(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def delete(self, tenant_id, stack_name_or_id, stack_id=None):
+ """
+ Deletes an existing stack.
+
+ :param tenant_id:
+ :param stack_name_or_id: Specifies the stack, which should be deleted.
+ :param stack_id:
+ :return: 500, if any exception occurred while deletion.
+ 204, if everything worked out.
+ """
+ logging.debug("API CALL: %s DELETE" % str(self.__class__.__name__))
+ try:
+ if stack_name_or_id in self.api.compute.stacks:
+ self.api.compute.delete_stack(stack_name_or_id)
+ return Response('Deleted Stack: ' + stack_name_or_id, 204)
+
+ for stack in self.api.compute.stacks.values():
+ if stack.stack_name == stack_name_or_id:
+ self.api.compute.delete_stack(stack.id)
+ return Response('Deleted Stack: ' + stack_name_or_id, 204)
+
+ except Exception as ex:
+ logging.exception("Heat: Delete Stack exception")
+ return ex.message, 500
--- /dev/null
+from flask_restful import Resource
+from flask import request, Response
+from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
+import logging
+import json
+
+
+class KeystoneDummyApi(BaseOpenstackDummy):
+ def __init__(self, in_ip, in_port):
+ super(KeystoneDummyApi, self).__init__(in_ip, in_port)
+
+ self.api.add_resource(KeystoneListVersions, "/", resource_class_kwargs={'api': self})
+ self.api.add_resource(Shutdown, "/shutdown")
+ self.api.add_resource(KeystoneShowAPIv2, "/v2.0", resource_class_kwargs={'api': self})
+ self.api.add_resource(KeystoneGetToken, "/v2.0/tokens", resource_class_kwargs={'api': self})
+
+ def _start_flask(self):
+ logging.info("Starting %s endpoint @ http://%s:%d" % (__name__, self.ip, self.port))
+ if self.app is not None:
+ self.app.before_request(self.dump_playbook)
+ self.app.run(self.ip, self.port, debug=True, use_reloader=False)
+
+
+class Shutdown(Resource):
+ """
+ A get request to /shutdown will shut down this endpoint.
+ """
+
+ def get(self):
+ logging.debug(("%s is beeing shut down") % (__name__))
+ func = request.environ.get('werkzeug.server.shutdown')
+ if func is None:
+ raise RuntimeError('Not running with the Werkzeug Server')
+ func()
+
+
+class KeystoneListVersions(Resource):
+ """
+ List all known keystone versions.
+ Hardcoded for our version!
+ """
+
+ def __init__(self, api):
+ self.api = api
+
+ def get(self):
+ """
+ List API versions.
+
+ :return: Returns the api versions.
+ :rtype: :class:`flask.response` containing a static json encoded dict.
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ resp = dict()
+ resp['versions'] = dict()
+
+ version = [{
+ "id": "v2.0",
+ "links": [
+ {
+ "href": "http://%s:%d/v2.0" % (self.api.ip, self.api.port),
+ "rel": "self"
+ }
+ ],
+ "media-types": [
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.identity-v2.0+json"
+ }
+ ],
+ "status": "stable",
+ "updated": "2014-04-17T00:00:00Z"
+ }]
+ resp['versions']['values'] = version
+
+ return Response(json.dumps(resp), status=200, mimetype='application/json')
+
+
+class KeystoneShowAPIv2(Resource):
+ """
+ Entrypoint for all openstack clients.
+ This returns all current entrypoints running on son-emu.
+ """
+
+ def __init__(self, api):
+ self.api = api
+
+ def get(self):
+ """
+ List API entrypoints.
+
+ :return: Returns an openstack style response for all entrypoints.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+
+ neutron_port = self.api.port + 4696
+ heat_port = self.api.port + 3004
+
+ resp = dict()
+ resp['version'] = {
+ "status": "stable",
+ "media-types": [
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.identity-v2.0+json"
+ }
+ ],
+ "id": "v2.0",
+ "links": [
+ {
+ "href": "http://%s:%d/v2.0" % (self.api.ip, self.api.port),
+ "rel": "self"
+ },
+ {
+ "href": "http://%s:%d/v2.0/tokens" % (self.api.ip, self.api.port),
+ "rel": "self"
+ },
+ {
+ "href": "http://%s:%d/v2.0/networks" % (self.api.ip, neutron_port),
+ "rel": "self"
+ },
+ {
+ "href": "http://%s:%d/v2.0/subnets" % (self.api.ip, neutron_port),
+ "rel": "self"
+ },
+ {
+ "href": "http://%s:%d/v2.0/ports" % (self.api.ip, neutron_port),
+ "rel": "self"
+ },
+ {
+ "href": "http://%s:%d/v1/<tenant_id>/stacks" % (self.api.ip, heat_port),
+ "rel": "self"
+ }
+ ]
+ }
+
+ return Response(json.dumps(resp), status=200, mimetype='application/json')
+
+
+class KeystoneGetToken(Resource):
+ """
+ Returns a static keystone token.
+ We don't do any validation so we don't care.
+ """
+
+ def __init__(self, api):
+ self.api = api
+
+ def post(self):
+ """
+ List API entrypoints.
+
+ This is hardcoded. For a working "authentication" use these ENVVARS:
+
+ * OS_AUTH_URL=http://<ip>:<port>/v2.0
+ * OS_IDENTITY_API_VERSION=2.0
+ * OS_TENANT_ID=fc394f2ab2df4114bde39905f800dc57
+ * OS_REGION_NAME=RegionOne
+ * OS_USERNAME=bla
+ * OS_PASSWORD=bla
+
+ :return: Returns an openstack style response for all entrypoints.
+ :rtype: :class:`flask.response`
+ """
+
+ logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
+ try:
+ ret = dict()
+ req = json.loads(request.data)
+ ret['access'] = dict()
+ ret['access']['token'] = dict()
+ token = ret['access']['token']
+
+ token['issued_at'] = "2014-01-30T15:30:58.819Z"
+ token['expires'] = "2999-01-30T15:30:58.819Z"
+ token['id'] = req['auth'].get('token', {'id': 'fc394f2ab2df4114bde39905f800dc57'}).get('id')
+ token['tenant'] = dict()
+ token['tenant']['description'] = None
+ token['tenant']['enabled'] = True
+ token['tenant']['id'] = req['auth'].get('tenantId', 'fc394f2ab2df4114bde39905f800dc57')
+ token['tenant']['name'] = "tenantName"
+
+ ret['access']['user'] = dict()
+ user = ret['access']['user']
+ user['username'] = req.get('username', "username")
+ user['name'] = "tenantName"
+ user['roles_links'] = list()
+ user['id'] = token['tenant'].get('id', "fc394f2ab2df4114bde39905f800dc57")
+ user['roles'] = [{'name': 'Member'}]
+
+ ret['access']['region_name'] = "RegionOne"
+
+ ret['access']['serviceCatalog'] = [{
+ "endpoints": [
+ {
+ "adminURL": "http://%s:%s/v2.1/%s" % (self.api.ip, self.api.port + 3774, user['id']),
+ "region": "RegionOne",
+ "internalURL": "http://%s:%s/v2.1/%s" % (self.api.ip, self.api.port + 3774, user['id']),
+ "id": "2dad48f09e2a447a9bf852bcd93548ef",
+ "publicURL": "http://%s:%s/v2.1/%s" % (self.api.ip, self.api.port + 3774, user['id'])
+ }
+ ],
+ "endpoints_links": [],
+ "type": "compute",
+ "name": "nova"
+ },
+ {
+ "endpoints": [
+ {
+ "adminURL": "http://%s:%s/v2.0" % (self.api.ip, self.api.port),
+ "region": "RegionOne",
+ "internalURL": "http://%s:%s/v2.0" % (self.api.ip, self.api.port),
+ "id": "2dad48f09e2a447a9bf852bcd93543fc",
+ "publicURL": "http://%s:%s/v2" % (self.api.ip, self.api.port)
+ }
+ ],
+ "endpoints_links": [],
+ "type": "identity",
+ "name": "keystone"
+ },
+ {
+ "endpoints": [
+ {
+ "adminURL": "http://%s:%s" % (self.api.ip, self.api.port + 4696),
+ "region": "RegionOne",
+ "internalURL": "http://%s:%s" % (self.api.ip, self.api.port + 4696),
+ "id": "2dad48f09e2a447a9bf852bcd93548cf",
+ "publicURL": "http://%s:%s" % (self.api.ip, self.api.port + 4696)
+ }
+ ],
+ "endpoints_links": [],
+ "type": "network",
+ "name": "neutron"
+ },
+ {
+ "endpoints": [
+ {
+ "adminURL": "http://%s:%s" % (self.api.ip, self.api.port + 4242),
+ "region": "RegionOne",
+ "internalURL": "http://%s:%s" % (self.api.ip, self.api.port + 4242),
+ "id": "2dad48f09e2a447a9bf852bcd93548cf",
+ "publicURL": "http://%s:%s" % (self.api.ip, self.api.port + 4242)
+ }
+ ],
+ "endpoints_links": [],
+ "type": "image",
+ "name": "glance"
+ },
+ {
+ "endpoints": [
+ {
+ "adminURL": "http://%s:%s/v1/%s" % (self.api.ip, self.api.port + 3004, user['id']),
+ "region": "RegionOne",
+ "internalURL": "http://%s:%s/v1/%s" % (self.api.ip, self.api.port + 3004, user['id']),
+ "id": "2dad48f09e2a447a9bf852bcd93548bf",
+ "publicURL": "http://%s:%s/v1/%s" % (self.api.ip, self.api.port + 3004, user['id'])
+ }
+ ],
+ "endpoints_links": [],
+ "type": "orchestration",
+ "name": "heat"
+ }
+ ]
+
+ ret['access']["metadata"] = {
+ "is_admin": 0,
+ "roles": [
+ "7598ac3c634d4c3da4b9126a5f67ca2b"
+ ]
+ },
+ ret['access']['trust'] = {
+ "id": "394998fa61f14736b1f0c1f322882949",
+ "trustee_user_id": "269348fdd9374b8885da1418e0730af1",
+ "trustor_user_id": "3ec3164f750146be97f21559ee4d9c51",
+ "impersonation": False
+ }
+ return Response(json.dumps(ret), status=200, mimetype='application/json')
+
+ except Exception as ex:
+ logging.exception("Keystone: Get token failed.")
+ return ex.message, 500
--- /dev/null
+from flask_restful import Resource
+from flask import request, Response
+from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
+from datetime import datetime
+import logging
+import json
+import uuid
+import copy
+
+
+class NeutronDummyApi(BaseOpenstackDummy):
+ def __init__(self, ip, port, compute):
+ super(NeutronDummyApi, self).__init__(ip, port)
+ self.compute = compute
+
+ self.api.add_resource(NeutronListAPIVersions, "/")
+ self.api.add_resource(Shutdown, "/shutdown")
+ self.api.add_resource(NeutronShowAPIv2Details, "/v2.0")
+ self.api.add_resource(NeutronListNetworks, "/v2.0/networks.json", "/v2.0/networks",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronShowNetwork, "/v2.0/networks/<network_id>.json", "/v2.0/networks/<network_id>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronCreateNetwork, "/v2.0/networks.json", "/v2.0/networks",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronUpdateNetwork, "/v2.0/networks/<network_id>.json", "/v2.0/networks/<network_id>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronDeleteNetwork, "/v2.0/networks/<network_id>.json", "/v2.0/networks/<network_id>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronListSubnets, "/v2.0/subnets.json", "/v2.0/subnets",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronShowSubnet, "/v2.0/subnets/<subnet_id>.json", "/v2.0/subnets/<subnet_id>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronCreateSubnet, "/v2.0/subnets.json", "/v2.0/subnets",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronUpdateSubnet, "/v2.0/subnets/<subnet_id>.json", "/v2.0/subnets/<subnet_id>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronDeleteSubnet, "/v2.0/subnets/<subnet_id>.json", "/v2.0/subnets/<subnet_id>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronListPorts, "/v2.0/ports.json", "/v2.0/ports",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronShowPort, "/v2.0/ports/<port_id>.json", "/v2.0/ports/<port_id>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronCreatePort, "/v2.0/ports.json", "/v2.0/ports",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronUpdatePort, "/v2.0/ports/<port_id>.json", "/v2.0/ports/<port_id>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronDeletePort, "/v2.0/ports/<port_id>.json", "/v2.0/ports/<port_id>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NeutronAddFloatingIp, "/v2.0/floatingips.json", "/v2.0/floatingips",
+ resource_class_kwargs={'api': self})
+
+ def _start_flask(self):
+ logging.info("Starting %s endpoint @ http://%s:%d" % (__name__, self.ip, self.port))
+ if self.app is not None:
+ self.app.before_request(self.dump_playbook)
+ self.app.run(self.ip, self.port, debug=True, use_reloader=False)
+
+
+class Shutdown(Resource):
+ def get(self):
+ logging.debug(("%s is beeing shut down") % (__name__))
+ func = request.environ.get('werkzeug.server.shutdown')
+ if func is None:
+ raise RuntimeError('Not running with the Werkzeug Server')
+ func()
+
+
+class NeutronListAPIVersions(Resource):
+ def get(self):
+ """
+ Lists API versions.
+
+ :return: Returns a json with API versions.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: Neutron - List API Versions")
+ resp = dict()
+ resp['versions'] = dict()
+
+ versions = [{
+ "status": "CURRENT",
+ "id": "v2.0",
+ "links": [
+ {
+ "href": request.url_root + '/v2.0',
+ "rel": "self"
+ }
+ ]
+ }]
+ resp['versions'] = versions
+
+ return Response(json.dumps(resp), status=200, mimetype='application/json')
+
+
+class NeutronShowAPIv2Details(Resource):
+ def get(self):
+ """
+ Returns API details.
+
+ :return: Returns a json with API details.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ resp = dict()
+
+ resp['resources'] = dict()
+ resp['resources'] = [{
+ "links": [
+ {
+ "href": request.url_root + 'v2.0/subnets',
+ "rel": "self"
+ }
+ ],
+ "name": "subnet",
+ "collection": "subnets"
+ },
+ {
+ "links": [
+ {
+ "href": request.url_root + 'v2.0/networks',
+ "rel": "self"
+ }
+ ],
+ "name": "network",
+ "collection": "networks"
+ },
+ {
+ "links": [
+ {
+ "href": request.url_root + 'v2.0/ports',
+ "rel": "self"
+ }
+ ],
+ "name": "ports",
+ "collection": "ports"
+ }
+ ]
+
+ return Response(json.dumps(resp), status=200, mimetype='application/json')
+
+
+class NeutronListNetworks(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self):
+ """
+ Lists all networks, used in son-emu. If a 'name' or one or more 'id's are specified, it will only list the
+ network with the name, or the networks specified via id.
+
+ :return: Returns a json response, starting with 'networks' as root node.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ if request.args.get('name'):
+ tmp_network = NeutronShowNetwork(self.api)
+ return tmp_network.get_network(request.args.get('name'), True)
+ id_list = request.args.getlist('id')
+ if len(id_list) == 1:
+ tmp_network = NeutronShowNetwork(self.api)
+ return tmp_network.get_network(request.args.get('id'), True)
+
+ network_list = list()
+ network_dict = dict()
+
+ if len(id_list) == 0:
+ for net in self.api.compute.nets.values():
+ tmp_network_dict = net.create_network_dict()
+ if tmp_network_dict not in network_list:
+ network_list.append(tmp_network_dict)
+ else:
+ for net in self.api.compute.nets.values():
+ if net.id in id_list:
+ tmp_network_dict = net.create_network_dict()
+ if tmp_network_dict not in network_list:
+ network_list.append(tmp_network_dict)
+
+ network_dict["networks"] = network_list
+
+ return Response(json.dumps(network_dict), status=200, mimetype='application/json')
+
+ except Exception as ex:
+ logging.exception("Neutron: List networks exception.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronShowNetwork(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, network_id):
+ """
+ Returns the network, specified via 'network_id'.
+
+ :param network_id: The unique ID string of the network.
+ :type network_id: ``str``
+ :return: Returns a json response, starting with 'network' as root node and one network description.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ return self.get_network(network_id, False)
+
+ def get_network(self, network_name_or_id, as_list):
+ """
+ Returns one network description of the network, specified via 'network_name_or_id'.
+
+ :param network_name_or_id: The indicator string, which specifies the requested network.
+ :type network_name_or_id: ``str``
+ :param as_list: Determines if the network description should start with the root node 'network' or 'networks'.
+ :type as_list: ``bool``
+ :return: Returns a json response, with one network description.
+ :rtype: :class:`flask.response`
+ """
+ try:
+ net = self.api.compute.find_network_by_name_or_id(network_name_or_id)
+ if net is None:
+ return Response(u'Network not found.\n', status=404, mimetype='application/json')
+
+ tmp_network_dict = net.create_network_dict()
+ tmp_dict = dict()
+ if as_list:
+ tmp_dict["networks"] = [tmp_network_dict]
+ else:
+ tmp_dict["network"] = tmp_network_dict
+
+ return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
+
+
+ except Exception as ex:
+ logging.exception("Neutron: Show network exception.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronCreateNetwork(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def post(self):
+ """
+ Creates a network with the name, specified within the request under ['network']['name'].
+
+ :return: * 400, if the network already exists.
+ * 500, if any exception occurred while creation.
+ * 201, if everything worked out.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
+ try:
+ network_dict = json.loads(request.data)
+ name = network_dict['network']['name']
+ net = self.api.compute.find_network_by_name_or_id(name)
+ if net is not None:
+ return Response('Network already exists.\n', status=400, mimetype='application/json')
+
+ net = self.api.compute.create_network(name)
+ return Response(json.dumps({"network": net.create_network_dict()}), status=201, mimetype='application/json')
+ except Exception as ex:
+ logging.exception("Neutron: Create network excepiton.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronUpdateNetwork(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def put(self, network_id): # TODO currently only the name will be changed
+ """
+ Updates the existing network with the given parameters.
+
+ :param network_id: The indicator string, which specifies the requested network.
+ :type network_id: ``str``
+ :return: * 404, if the network could not be found.
+ * 500, if any exception occurred while updating the network.
+ * 200, if everything worked out.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s PUT" % str(self.__class__.__name__))
+ try:
+ if network_id in self.api.compute.nets:
+ net = self.api.compute.nets[network_id]
+ network_dict = json.loads(request.data)
+ old_net = copy.copy(net)
+
+ if "status" in network_dict["network"]:
+ net.status = network_dict["network"]["status"]
+ if "subnets" in network_dict["network"]:
+ pass # tmp_network_dict["subnets"] = None
+ if "name" in network_dict["network"] and net.name != network_dict["network"]["name"]:
+ net.name = network_dict["network"]["name"]
+ if "admin_state_up" in network_dict["network"]:
+ pass # tmp_network_dict["admin_state_up"] = True
+ if "tenant_id" in network_dict["network"]:
+ pass # tmp_network_dict["tenant_id"] = "c1210485b2424d48804aad5d39c61b8f"
+ if "shared" in network_dict["network"]:
+ pass # tmp_network_dict["shared"] = False
+
+ return Response(json.dumps(network_dict), status=200, mimetype='application/json')
+
+ return Response('Network not found.\n', status=404, mimetype='application/json')
+
+ except Exception as ex:
+ logging.exception("Neutron: Show networks exception.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronDeleteNetwork(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def delete(self, network_id):
+ """
+ Deletes the specified network and all its subnets.
+
+ :param network_id: The indicator string, which specifies the requested network.
+ :type network_id: ``str``
+ :return: * 404, if the network or the subnet could not be removed.
+ * 500, if any exception occurred while deletion.
+ * 204, if everything worked out.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s DELETE" % str(self.__class__.__name__))
+ try:
+ if network_id not in self.api.compute.nets:
+ return Response('Could not find network. (' + network_id + ')\n',
+ status=404, mimetype='application/json')
+
+ net = self.api.compute.nets[network_id]
+ delete_subnet = NeutronDeleteSubnet(self.api)
+ resp = delete_subnet.delete(net.subnet_id)
+
+ if not '204' in resp.status and not '404' in resp.status:
+ return resp
+
+ self.api.compute.delete_network(network_id)
+
+ return Response('Network ' + str(network_id) + ' deleted.\n', status=204, mimetype='application/json')
+ except Exception as ex:
+ logging.exception("Neutron: Delete network exception.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronListSubnets(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self):
+ """
+ Lists all subnets, used in son-emu. If a 'name' or one or more 'id's are specified, it will only list the
+ subnet with the name, or the subnets specified via id.
+
+ :return: Returns a json response, starting with 'subnets' as root node.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ if request.args.get('name'):
+ show_subnet = NeutronShowSubnet(self.api)
+ return show_subnet.get_subnet(request.args.get('name'), True)
+ id_list = request.args.getlist('id')
+ if len(id_list) == 1:
+ show_subnet = NeutronShowSubnet(self.api)
+ return show_subnet.get_subnet(id_list[0], True)
+
+ subnet_list = list()
+ subnet_dict = dict()
+
+ if len(id_list) == 0:
+ for net in self.api.compute.nets.values():
+ if net.subnet_id is not None:
+ tmp_subnet_dict = net.create_subnet_dict()
+ subnet_list.append(tmp_subnet_dict)
+ else:
+ for net in self.api.compute.nets.values():
+ if net.subnet_id in id_list:
+ tmp_subnet_dict = net.create_subnet_dict()
+ subnet_list.append(tmp_subnet_dict)
+
+ subnet_dict["subnets"] = subnet_list
+
+ return Response(json.dumps(subnet_dict), status=200, mimetype='application/json')
+
+ except Exception as ex:
+ logging.exception("Neutron: List subnets exception.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronShowSubnet(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, subnet_id):
+ """
+ Returns the subnet, specified via 'subnet_id'.
+
+ :param subnet_id: The unique ID string of the subnet.
+ :type subnet_id: ``str``
+ :return: Returns a json response, starting with 'subnet' as root node and one subnet description.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ return self.get_subnet(subnet_id, False)
+
+ def get_subnet(self, subnet_name_or_id, as_list):
+ """
+ Returns one subnet description of the subnet, specified via 'subnet_name_or_id'.
+
+ :param subnet_name_or_id: The indicator string, which specifies the requested subnet.
+ :type subnet_name_or_id: ``str``
+ :param as_list: Determines if the subnet description should start with the root node 'subnet' or 'subnets'.
+ :type as_list: ``bool``
+ :return: Returns a json response, with one subnet description.
+ :rtype: :class:`flask.response`
+ """
+ try:
+ for net in self.api.compute.nets.values():
+ if net.subnet_id == subnet_name_or_id or net.subnet_name == subnet_name_or_id:
+ tmp_subnet_dict = net.create_subnet_dict()
+ tmp_dict = dict()
+ if as_list:
+ tmp_dict["subnets"] = [tmp_subnet_dict]
+ else:
+ tmp_dict["subnet"] = tmp_subnet_dict
+ return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
+
+ return Response('Subnet not found. (' + subnet_name_or_id + ')\n', status=404, mimetype='application/json')
+
+ except Exception as ex:
+ logging.exception("Neutron: Show subnet exception.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronCreateSubnet(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def post(self):
+ """
+ Creates a subnet with the name, specified within the request under ['subnet']['name'].
+
+ :return: * 400, if the 'CIDR' format is wrong or it does not exist.
+ * 404, if the network was not found.
+ * 409, if the corresponding network already has one subnet.
+ * 500, if any exception occurred while creation and
+ * 201, if everything worked out.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
+ try:
+ subnet_dict = json.loads(request.data)
+ net = self.api.compute.find_network_by_name_or_id(subnet_dict['subnet']['network_id'])
+
+ if net is None:
+ return Response('Could not find network.\n', status=404, mimetype='application/json')
+
+ net.subnet_name = subnet_dict["subnet"].get('name', str(net.name) + '-sub')
+ if net.subnet_id is not None:
+ return Response('Only one subnet per network is supported\n', status=409, mimetype='application/json')
+
+ if "id" in subnet_dict["subnet"]:
+ net.subnet_id = subnet_dict["subnet"]["id"]
+ else:
+ net.subnet_id = str(uuid.uuid4())
+ import emuvim.api.heat.ip_handler as IP
+ net.set_cidr(IP.get_new_cidr(net.subnet_id))
+
+ if "tenant_id" in subnet_dict["subnet"]:
+ pass
+ if "allocation_pools" in subnet_dict["subnet"]:
+ pass
+ if "gateway_ip" in subnet_dict["subnet"]:
+ net.gateway_ip = subnet_dict["subnet"]["gateway_ip"]
+ if "ip_version" in subnet_dict["subnet"]:
+ pass
+ if "enable_dhcp" in subnet_dict["subnet"]:
+ pass
+
+ return Response(json.dumps({'subnet': net.create_subnet_dict()}), status=201, mimetype='application/json')
+
+ except Exception as ex:
+ logging.exception("Neutron: Create network excepiton.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronUpdateSubnet(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def put(self, subnet_id):
+ """
+ Updates the existing subnet with the given parameters.
+
+ :param subnet_id: The indicator string, which specifies the requested subnet.
+ :type subnet_id: ``str``
+ :return: * 404, if the network could not be found.
+ * 500, if any exception occurred while updating the network.
+ * 200, if everything worked out.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s PUT" % str(self.__class__.__name__))
+ try:
+ for net in self.api.compute.nets.values():
+ if net.subnet_id == subnet_id:
+ subnet_dict = json.loads(request.data)
+
+ if "name" in subnet_dict["subnet"]:
+ net.subnet_name = subnet_dict["subnet"]["name"]
+ if "network_id" in subnet_dict["subnet"]:
+ net.id = subnet_dict["subnet"]["network_id"]
+ if "tenant_id" in subnet_dict["subnet"]:
+ pass
+ if "allocation_pools" in subnet_dict["subnet"]:
+ pass
+ if "gateway_ip" in subnet_dict["subnet"]:
+ net.gateway_ip = subnet_dict["subnet"]["gateway_ip"]
+ if "ip_version" in subnet_dict["subnet"]:
+ pass
+ if "cidr" in subnet_dict["subnet"]:
+ net.set_cidr(subnet_dict["subnet"]["cidr"])
+ if "id" in subnet_dict["subnet"]:
+ net.subnet_id = subnet_dict["subnet"]["id"]
+ if "enable_dhcp" in subnet_dict["subnet"]:
+ pass
+
+ net.subnet_update_time = str(datetime.now())
+ tmp_dict = {'subnet': net.create_subnet_dict()}
+ return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
+
+ return Response('Network not found.\n', status=404, mimetype='application/json')
+
+ except Exception as ex:
+ logging.exception("Neutron: Show networks exception.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronDeleteSubnet(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def delete(self, subnet_id):
+ """
+ Deletes the specified subnet.
+
+ :param subnet_id: The indicator string, which specifies the requested subnet.
+ :type subnet_id: ``str``
+ :return: * 404, if the subnet could not be removed.
+ * 500, if any exception occurred while deletion.
+ * 204, if everything worked out.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s DELETE" % str(self.__class__.__name__))
+ try:
+ for net in self.api.compute.nets.values():
+ if net.subnet_id == subnet_id:
+ for server in self.api.compute.computeUnits.values():
+ for port_name in server.port_names:
+ port = self.api.compute.find_port_by_name_or_id(port_name)
+ if port.net_name == net.name:
+ port.ip_address = None
+ self.api.compute.dc.net.removeLink(
+ link=None,
+ node1=self.api.compute.dc.containers[server.name],
+ node2=self.api.compute.dc.switch)
+ port.net_name = None
+
+ net.delete_subnet()
+
+ return Response('Subnet ' + str(subnet_id) + ' deleted.\n',
+ status=204, mimetype='application/json')
+
+ return Response('Could not find subnet.', status=404, mimetype='application/json')
+ except Exception as ex:
+ logging.exception("Neutron: Delete subnet exception.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronListPorts(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self):
+ """
+ Lists all ports, used in son-emu. If a 'name' or one or more 'id's are specified, it will only list the
+ port with the name, or the ports specified via id.
+
+ :return: Returns a json response, starting with 'ports' as root node.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ if request.args.get('name'):
+ show_port = NeutronShowPort(self.api)
+ return show_port.get_port(request.args.get('name'), True)
+ id_list = request.args.getlist('id')
+ if len(id_list) == 1:
+ show_port = NeutronShowPort(self.api)
+ return show_port.get_port(request.args.get('id'), True)
+
+ port_list = list()
+ port_dict = dict()
+
+ if len(id_list) == 0:
+ for port in self.api.compute.ports.values():
+ tmp_port_dict = port.create_port_dict(self.api.compute)
+ port_list.append(tmp_port_dict)
+ else:
+ for port in self.api.compute.ports.values():
+ if port.id in id_list:
+ tmp_port_dict = port.create_port_dict(self.api.compute)
+ port_list.append(tmp_port_dict)
+
+ port_dict["ports"] = port_list
+
+ return Response(json.dumps(port_dict), status=200, mimetype='application/json')
+
+ except Exception as ex:
+ logging.exception("Neutron: List ports exception.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronShowPort(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, port_id):
+ """
+ Returns the port, specified via 'port_id'.
+
+ :param port_id: The unique ID string of the network.
+ :type port_id: ``str``
+ :return: Returns a json response, starting with 'port' as root node and one network description.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ return self.get_port(port_id, False)
+
+ def get_port(self, port_name_or_id, as_list):
+ """
+ Returns one network description of the port, specified via 'port_name_or_id'.
+
+ :param port_name_or_id: The indicator string, which specifies the requested port.
+ :type port_name_or_id: ``str``
+ :param as_list: Determines if the port description should start with the root node 'port' or 'ports'.
+ :type as_list: ``bool``
+ :return: Returns a json response, with one port description.
+ :rtype: :class:`flask.response`
+ """
+ try:
+ port = self.api.compute.find_port_by_name_or_id(port_name_or_id)
+ if port is None:
+ return Response('Port not found. (' + port_name_or_id + ')\n', status=404, mimetype='application/json')
+ tmp_port_dict = port.create_port_dict(self.api.compute)
+ tmp_dict = dict()
+ if as_list:
+ tmp_dict["ports"] = [tmp_port_dict]
+ else:
+ tmp_dict["port"] = tmp_port_dict
+ return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
+ except Exception as ex:
+ logging.exception("Neutron: Show port exception.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronCreatePort(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def post(self):
+ """
+ Creates a port with the name, specified within the request under ['port']['name'].
+
+ :return: * 404, if the network could not be found.
+ * 500, if any exception occurred while creation and
+ * 201, if everything worked out.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
+ try:
+ port_dict = json.loads(request.data)
+ net_id = port_dict['port']['network_id']
+
+ if net_id not in self.api.compute.nets:
+ return Response('Could not find network.\n', status=404, mimetype='application/json')
+
+ net = self.api.compute.nets[net_id]
+ if 'name' in port_dict['port']:
+ name = port_dict['port']['name']
+ else:
+ num_ports = len(self.api.compute.ports)
+ name = "port:cp%s:man:%s" % (num_ports, str(uuid.uuid4()))
+
+ if self.api.compute.find_port_by_name_or_id(name):
+ return Response("Port with name %s already exists.\n" % name, status=500, mimetype='application/json')
+
+ port = self.api.compute.create_port(name)
+
+ port.net_name = net.name
+ port.ip_address = net.get_new_ip_address(name)
+
+ if "admin_state_up" in port_dict["port"]:
+ pass
+ if "device_id" in port_dict["port"]:
+ pass
+ if "device_owner" in port_dict["port"]:
+ pass
+ if "fixed_ips" in port_dict["port"]:
+ pass
+ if "mac_address" in port_dict["port"]:
+ port.mac_address = port_dict["port"]["mac_address"]
+ if "status" in port_dict["port"]:
+ pass
+ if "tenant_id" in port_dict["port"]:
+ pass
+
+ # add the port to a stack if the specified network is a stack network
+ for stack in self.api.compute.stacks.values():
+ for net in stack.nets.values():
+ if net.id == net_id:
+ stack.ports[name] = port
+
+ return Response(json.dumps({'port': port.create_port_dict(self.api.compute)}), status=201,
+ mimetype='application/json')
+ except Exception as ex:
+ logging.exception("Neutron: Show port exception.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronUpdatePort(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def put(self, port_id):
+ """
+ Updates the existing port with the given parameters.
+
+ :param network_id: The indicator string, which specifies the requested port.
+ :type network_id: ``str``
+ :return: * 404, if the network could not be found.
+ * 500, if any exception occurred while updating the network.
+ * 200, if everything worked out.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s PUT" % str(self.__class__.__name__))
+ try:
+ port_dict = json.loads(request.data)
+ port = self.api.compute.find_port_by_name_or_id(port_id)
+ if port is None:
+ return Response("Port with id %s does not exists.\n" % port_id, status=404, mimetype='application/json')
+ old_port = copy.copy(port)
+
+ stack = None
+ for s in self.api.compute.stacks.values():
+ for port in s.ports.values():
+ if port.id == port_id:
+ stack = s
+ if "admin_state_up" in port_dict["port"]:
+ pass
+ if "device_id" in port_dict["port"]:
+ pass
+ if "device_owner" in port_dict["port"]:
+ pass
+ if "fixed_ips" in port_dict["port"]:
+ pass
+ if "id" in port_dict["port"]:
+ port.id = port_dict["port"]["id"]
+ if "mac_address" in port_dict["port"]:
+ port.mac_address = port_dict["port"]["mac_address"]
+ if "name" in port_dict["port"] and port_dict["port"]["name"] != port.name:
+ port.set_name(port_dict["port"]["name"])
+ if stack is not None:
+ if port.net_name in stack.nets:
+ stack.nets[port.net_name].update_port_name_for_ip_address(port.ip_address, port.name)
+ stack.ports[port.name] = stack.ports[old_port.name]
+ del stack.ports[old_port.name]
+ if "network_id" in port_dict["port"]:
+ pass
+ if "status" in port_dict["port"]:
+ pass
+ if "tenant_id" in port_dict["port"]:
+ pass
+
+ return Response(json.dumps({'port': port.create_port_dict(self.api.compute)}), status=200,
+ mimetype='application/json')
+ except Exception as ex:
+ logging.exception("Neutron: Update port exception.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronDeletePort(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def delete(self, port_id):
+ """
+ Deletes the specified port.
+
+ :param port_id: The indicator string, which specifies the requested port.
+ :type port_id: ``str``
+ :return: * 404, if the port could not be found.
+ * 500, if any exception occurred while deletion.
+ * 204, if everything worked out.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s DELETE" % str(self.__class__.__name__))
+ try:
+ port = self.api.compute.find_port_by_name_or_id(port_id)
+ if port is None:
+ return Response("Port with id %s does not exists.\n" % port_id, status=404)
+ stack = None
+ for s in self.api.compute.stacks.values():
+ for p in s.ports.values():
+ if p.id == port_id:
+ stack = s
+ if stack is not None:
+ if port.net_name in stack.nets:
+ stack.nets[port.net_name].withdraw_ip_address(port.ip_address)
+ for server in stack.servers.values():
+ try:
+ server.port_names.remove(port.name)
+ except ValueError:
+ pass
+
+ # delete the port
+ self.api.compute.delete_port(port.id)
+
+ return Response('Port ' + port_id + ' deleted.\n', status=204, mimetype='application/json')
+
+ except Exception as ex:
+ logging.exception("Neutron: Delete port exception.")
+ return Response(ex.message, status=500, mimetype='application/json')
+
+
+class NeutronAddFloatingIp(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self):
+ """
+ Added a quick and dirty fake for the OSM integration. Returns a list of
+ floating IPs. Has nothing to do with the setup inside the emulator.
+ But its enough to make the OSM driver happy.
+ @PG Sandman: Feel free to improve this and let it do something meaningful.
+ """
+ resp = dict()
+ resp["floatingips"] = list()
+ # create a list of floting IP definitions and return it
+ for i in range(100, 110):
+ ip=dict()
+ ip["router_id"] = "router_id"
+ ip["description"] = "hardcoded in api"
+ ip["created_at"] = "router_id"
+ ip["updated_at"] = "router_id"
+ ip["revision_number"] = 1
+ ip["tenant_id"] = "tenant_id"
+ ip["project_id"] = "project_id"
+ ip["floating_network_id"] = str(i)
+ ip["status"] = "ACTIVE"
+ ip["id"] = str(i)
+ ip["port_id"] = "port_id"
+ ip["floating_ip_address"] = "172.0.0.%d" % i
+ ip["fixed_ip_address"] = "10.0.0.%d" % i
+ resp["floatingips"].append(ip)
+ return Response(json.dumps(resp), status=200, mimetype='application/json')
+
+
+ def post(self):
+ """
+ Adds a floating IP to neutron.
+
+ :return: Returns a floating network description.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
+ try:
+ # Fiddle with floating_network !
+ req = json.loads(request.data)
+
+ network_id = req["floatingip"]["floating_network_id"]
+ net = self.api.compute.find_network_by_name_or_id(network_id)
+ if net != self.api.manage.floating_network:
+ return Response("You have to specify the existing floating network\n",
+ status=400, mimetype='application/json')
+
+ port_id = req["floatingip"].get("port_id", None)
+ port = self.api.compute.find_port_by_name_or_id(port_id)
+ if port is not None:
+ if port.net_name != self.api.manage.floating_network.name:
+ return Response("You have to specify a port in the floating network\n",
+ status=400, mimetype='application/json')
+
+ if port.floating_ip is not None:
+ return Response("We allow only one floating ip per port\n", status=400, mimetype='application/json')
+ else:
+ num_ports = len(self.api.compute.ports)
+ name = "port:cp%s:fl:%s" % (num_ports, str(uuid.uuid4()))
+ port = self.api.compute.create_port(name)
+ port.net_name = net.name
+ port.ip_address = net.get_new_ip_address(name)
+
+ port.floating_ip = port.ip_address
+
+ response = dict()
+ resp = response["floatingip"] = dict()
+
+ resp["floating_network_id"] = net.id
+ resp["status"] = "ACTIVE"
+ resp["id"] = net.id
+ resp["port_id"] = port.id
+ resp["floating_ip_address"] = port.floating_ip
+ resp["fixed_ip_address"] = port.floating_ip
+
+ return Response(json.dumps(response), status=200, mimetype='application/json')
+ except Exception as ex:
+ logging.exception("Neutron: Create FloatingIP exception %s.", ex)
+ return Response(ex.message, status=500, mimetype='application/json')
--- /dev/null
+from flask_restful import Resource
+from flask import Response, request
+from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
+import logging
+import json
+import uuid
+from mininet.link import Link
+
+
+class NovaDummyApi(BaseOpenstackDummy):
+ def __init__(self, in_ip, in_port, compute):
+ super(NovaDummyApi, self).__init__(in_ip, in_port)
+ self.compute = compute
+
+ self.api.add_resource(NovaVersionsList, "/",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(Shutdown, "/shutdown")
+ self.api.add_resource(NovaVersionShow, "/v2.1/<id>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NovaListServersApi, "/v2.1/<id>/servers",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NovaListServersAndPortsApi, "/v2.1/<id>/servers/andPorts",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NovaListServersDetailed, "/v2.1/<id>/servers/detail",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NovaShowServerDetails, "/v2.1/<id>/servers/<serverid>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NovaInterfaceToServer, "/v2.1/<id>/servers/<serverid>/os-interface",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NovaShowAndDeleteInterfaceAtServer, "/v2.1/<id>/servers/<serverid>/os-interface/<port_id>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NovaListFlavors, "/v2.1/<id>/flavors", "/v2/<id>/flavors",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NovaListFlavorsDetails, "/v2.1/<id>/flavors/detail", "/v2/<id>/flavors/detail",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NovaListFlavorById, "/v2.1/<id>/flavors/<flavorid>", "/v2/<id>/flavors/<flavorid>",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NovaListImages, "/v2.1/<id>/images",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NovaListImagesDetails, "/v2.1/<id>/images/detail",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(NovaListImageById, "/v2.1/<id>/images/<imageid>",
+ resource_class_kwargs={'api': self})
+
+ def _start_flask(self):
+ logging.info("Starting %s endpoint @ http://%s:%d" % ("NovaDummyApi", self.ip, self.port))
+ # add some flavors for good measure
+ self.compute.add_flavor('m1.tiny', 1, 512, "MB", 1, "GB")
+ self.compute.add_flavor('m1.nano', 1, 64, "MB", 0, "GB")
+ self.compute.add_flavor('m1.micro', 1, 128, "MB", 0, "GB")
+ self.compute.add_flavor('m1.small', 1, 1024, "MB", 2, "GB")
+ if self.app is not None:
+ self.app.before_request(self.dump_playbook)
+ self.app.run(self.ip, self.port, debug=True, use_reloader=False)
+
+
+class Shutdown(Resource):
+ """
+ A get request to /shutdown will shut down this endpoint.
+ """
+
+ def get(self):
+ logging.debug(("%s is beeing shut doen") % (__name__))
+ func = request.environ.get('werkzeug.server.shutdown')
+ if func is None:
+ raise RuntimeError('Not running with the Werkzeug Server')
+ func()
+
+
+class NovaVersionsList(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self):
+ """
+ Lists API versions.
+
+ :return: Returns a json with API versions.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ resp = """
+ {
+ "versions": [
+ {
+ "id": "v2.1",
+ "links": [
+ {
+ "href": "http://%s:%d/v2.1/",
+ "rel": "self"
+ }
+ ],
+ "status": "CURRENT",
+ "version": "2.38",
+ "min_version": "2.1",
+ "updated": "2013-07-23T11:33:21Z"
+ }
+ ]
+ }
+ """ % (self.api.ip, self.api.port)
+
+ response = Response(resp, status=200, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not show list of versions." % __name__)
+ return ex.message, 500
+
+
+class NovaVersionShow(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, id):
+ """
+ Returns API details.
+
+ :param id:
+ :type id: ``str``
+ :return: Returns a json with API details.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+
+ try:
+ resp = """
+ {
+ "version": {
+ "id": "v2.1",
+ "links": [
+ {
+ "href": "http://%s:%d/v2.1/",
+ "rel": "self"
+ },
+ {
+ "href": "http://docs.openstack.org/",
+ "rel": "describedby",
+ "type": "text/html"
+ }
+ ],
+ "media-types": [
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute+json;version=2.1"
+ }
+ ],
+ "status": "CURRENT",
+ "version": "2.38",
+ "min_version": "2.1",
+ "updated": "2013-07-23T11:33:21Z"
+ }
+ }
+ """ % (self.api.ip, self.api.port)
+
+ response = Response(resp, status=200, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not show list of versions." % __name__)
+ return ex.message, 500
+
+
+class NovaListServersApi(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, id):
+ """
+ Creates a list with all running servers and their detailed information.
+
+ :param id: Used to create a individual link to quarry further information.
+ :type id: ``str``
+ :return: Returns a json response with a dictionary that contains the server information.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+
+ try:
+ resp = dict()
+ resp['servers'] = list()
+ for server in self.api.compute.computeUnits.values():
+ s = server.create_server_dict(self.api.compute)
+ s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ server.id)}]
+
+ resp['servers'].append(s)
+
+ response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not retrieve the list of servers." % __name__)
+ return ex.message, 500
+
+ def post(self, id):
+ """
+ Creates a server instance.
+
+ :param id: tenant id, we ignore this most of the time
+ :type id: ``str``
+ :return: Returns a flask response, with detailed information about the just created server.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
+ try:
+ server_dict = json.loads(request.data)['server']
+ networks = server_dict.get('networks', None)
+ name = str(self.api.compute.dc.label) + "_man_" + server_dict["name"][0:12]
+
+ if self.api.compute.find_server_by_name_or_id(name) is not None:
+ return Response("Server with name %s already exists." % name, status=409)
+ # TODO: not finished!
+ resp = dict()
+
+ server = self.api.compute.create_server(name)
+ server.full_name = str(self.api.compute.dc.label) + "_man_" + server_dict["name"]
+ server.template_name = server_dict["name"]
+
+ for flavor in self.api.compute.flavors.values():
+ if flavor.id == server_dict.get('flavorRef', ''):
+ server.flavor = flavor.name
+ for image in self.api.compute.images.values():
+ if image.id in server_dict['imageRef']:
+ server.image = image.name
+
+ if networks is not None:
+ for net in networks:
+ port = self.api.compute.find_port_by_name_or_id(net.get('port', ""))
+ if port is not None:
+ server.port_names.append(port.name)
+ else:
+ return Response("Currently only networking by port is supported.", status=400)
+
+ self.api.compute._start_compute(server)
+
+ response = NovaShowServerDetails(self.api).get(id, server.id)
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not create the server." % __name__)
+ return ex.message, 500
+
+
+class NovaListServersAndPortsApi(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, id):
+ """
+ Creates a list with all running servers and their detailed information. This function also presents all
+ port information of each server.
+
+ :param id: Used to create a individual link to quarry further information.
+ :type id: ``str``
+ :return: Returns a json response with a dictionary that contains the server information.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+
+ try:
+ resp = dict()
+ resp['servers'] = list()
+ for server in self.api.compute.computeUnits.values():
+ s = server.create_server_dict(self.api.compute)
+ s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ server.id)}]
+
+ s['ports'] = list()
+ for port_name in server.port_names:
+ port = self.api.compute.find_port_by_name_or_id(port_name)
+ if port is None:
+ continue
+
+ tmp = port.create_port_dict(self.api.compute)
+ tmp['intf_name'] = port.intf_name
+ s['ports'].append(tmp)
+
+ resp['servers'].append(s)
+
+ response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not retrieve the list of servers." % __name__)
+ return ex.message, 500
+
+
+class NovaListServersDetailed(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, id):
+ """
+ As List Servers, it lists all running servers and their details but furthermore it also states the
+ used flavor and the server image.
+
+ :param id: tenant id, used for the 'href' link.
+ :type id: ``str``
+ :return: Returns a flask response, with detailed information aboit the servers and their flavor and image.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+
+ try:
+ resp = {"servers": list()}
+ for server in self.api.compute.computeUnits.values():
+ s = server.create_server_dict(self.api.compute)
+ s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ server.id)}]
+ flavor = self.api.compute.flavors[server.flavor]
+ s['flavor'] = {
+ "id": flavor.id,
+ "links": [
+ {
+ "href": "http://%s:%d/v2.1/%s/flavors/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ flavor.id),
+ "rel": "bookmark"
+ }
+ ]
+ }
+ image = self.api.compute.images[server.image]
+ s['image'] = {
+ "id": image.id,
+ "links": [
+ {
+ "href": "http://%s:%d/v2.1/%s/images/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ image.id),
+ "rel": "bookmark"
+ }
+ ]
+ }
+
+ resp['servers'].append(s)
+
+ response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not retrieve the list of servers." % __name__)
+ return ex.message, 500
+
+
+class NovaListFlavors(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, id):
+ """
+ Lists all available flavors.
+
+ :param id: tenant id, used for the 'href' link
+ :type id: ``str``
+ :return: Returns a flask response with a list of all flavors.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ resp = dict()
+ resp['flavors'] = list()
+ for flavor in self.api.compute.flavors.values():
+ f = flavor.__dict__.copy()
+ f['id'] = flavor.id
+ f['name'] = flavor.name
+ f['links'] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ flavor.id)}]
+ resp['flavors'].append(f)
+
+ response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not retrieve the list of servers." % __name__)
+ return ex.message, 500
+
+ def post(self, id):
+ logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
+ data = json.loads(request.data).get("flavor")
+ logging.warning("Create Flavor: %s" % str(data))
+ # add to internal dict
+ f = self.api.compute.add_flavor(
+ data.get("name"),
+ data.get("vcpus"),
+ data.get("ram"), "MB",
+ data.get("disk"), "GB")
+ # create response based on incoming data
+ data["id"] = f.id
+ data["links"] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ f.id)}]
+ resp = {"flavor": data}
+ return Response(json.dumps(resp), status=200, mimetype="application/json")
+
+
+class NovaListFlavorsDetails(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, id):
+ """
+ Lists all flavors with additional information like ram and disk space.
+
+ :param id: tenant id, used for the 'href' link
+ :type id: ``str``
+ :return: Returns a flask response with a list of all flavors with additional information.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ resp = dict()
+ resp['flavors'] = list()
+ for flavor in self.api.compute.flavors.values():
+ # use the class dict. it should work fine
+ # but use a copy so we don't modifiy the original
+ f = flavor.__dict__.copy()
+ # add additional expected stuff stay openstack compatible
+ f['links'] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ flavor.id)}]
+ f['OS-FLV-DISABLED:disabled'] = False
+ f['OS-FLV-EXT-DATA:ephemeral'] = 0
+ f['os-flavor-access:is_public'] = True
+ f['ram'] = flavor.memory
+ f['vcpus'] = flavor.cpu
+ f['swap'] = 0
+ f['disk'] = flavor.storage
+ f['rxtx_factor'] = 1.0
+ resp['flavors'].append(f)
+
+ response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not retrieve the list of servers." % __name__)
+ return ex.message, 500
+
+ def post(self, id):
+ logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
+ data = json.loads(request.data).get("flavor")
+ logging.warning("Create Flavor: %s" % str(data))
+ # add to internal dict
+ f = self.api.compute.add_flavor(
+ data.get("name"),
+ data.get("vcpus"),
+ data.get("ram"), "MB",
+ data.get("disk"), "GB")
+ # create response based on incoming data
+ data["id"] = f.id
+ data["links"] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ f.id)}]
+ resp = {"flavor": data}
+ return Response(json.dumps(resp), status=200, mimetype="application/json")
+
+
+class NovaListFlavorById(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, id, flavorid):
+ """
+ Returns details about one flavor.
+
+ :param id: tenant id, used for the 'href' link
+ :type id: ``str``
+ :param flavorid: Represents the flavor.
+ :type flavorid: ``str``
+ :return: Returns a flask response with detailed information about the flavor.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ resp = dict()
+ resp['flavor'] = dict()
+ flavor = self.api.compute.flavors.get(flavorid, None)
+ if flavor is None:
+ for f in self.api.compute.flavors.values():
+ if f.id == flavorid:
+ flavor = f
+ break
+ resp['flavor']['id'] = flavor.id
+ resp['flavor']['name'] = flavor.name
+ resp['flavor']['links'] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ flavor.id)}]
+ response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not retrieve flavor with id %s" % (__name__, flavorid))
+ return ex.message, 500
+
+
+class NovaListImages(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, id):
+ """
+ Creates a list of all usable images.
+
+ :param id: tenant id, used for the 'href' link
+ :type id: ``str``
+ :return: Returns a flask response with a list of available images.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ resp = dict()
+ resp['images'] = list()
+ for image in self.api.compute.images.values():
+ f = dict()
+ f['id'] = image.id
+ f['name'] = str(image.name).replace(":latest", "")
+ f['links'] = [{'href': "http://%s:%d/v2.1/%s/images/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ image.id)}]
+ resp['images'].append(f)
+ response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not retrieve the list of images." % __name__)
+ return ex.message, 500
+
+
+class NovaListImagesDetails(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, id):
+ """
+ As List Images but with additional metadata.
+
+ :param id: tenant id, used for the 'href' link
+ :type id: ``str``
+ :return: Returns a flask response with a list of images and their metadata.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ resp = dict()
+ resp['images'] = list()
+ for image in self.api.compute.images.values():
+ # use the class dict. it should work fine
+ # but use a copy so we don't modifiy the original
+ f = image.__dict__.copy()
+ # add additional expected stuff stay openstack compatible
+ f['name'] = str(image.name).replace(":latest", "")
+ f['links'] = [{'href': "http://%s:%d/v2.1/%s/images/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ image.id)}]
+ f['metadata'] = {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ }
+ resp['images'].append(f)
+
+ response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not retrieve the list of images." % __name__)
+ return ex.message, 500
+
+
+class NovaListImageById(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, id, imageid):
+ """
+ Gets an image by id from the emulator with openstack nova compliant return values.
+
+ :param id: tenantid, we ignore this most of the time
+ :type id: ``str``
+ :param imageid: id of the image. If it is 1 the dummy CREATE-IMAGE is returned
+ :type imageid: ``str``
+ :return: Returns a flask response with the information about one image.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ resp = dict()
+ i = resp['image'] = dict()
+ for image in self.api.compute.images.values():
+ if image.id == imageid or image.name == imageid:
+ i['id'] = image.id
+ i['name'] = image.name
+
+ return Response(json.dumps(resp), status=200, mimetype="application/json")
+
+ response = Response("Image with id or name %s does not exists." % imageid, status=404)
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not retrieve image with id %s." % (__name__, imageid))
+ return ex.message, 500
+
+
+class NovaShowServerDetails(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def get(self, id, serverid):
+ """
+ Returns detailed information about the specified server.
+
+ :param id: tenant id, used for the 'href' link
+ :type id: ``str``
+ :param serverid: Specifies the requested server.
+ :type serverid: ``str``
+ :return: Returns a flask response with details about the server.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ server = self.api.compute.find_server_by_name_or_id(serverid)
+ if server is None:
+ return Response("Server with id or name %s does not exists." % serverid, status=404)
+ s = server.create_server_dict()
+ s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ server.id)}]
+
+ flavor = self.api.compute.flavors[server.flavor]
+ s['flavor'] = {
+ "id": flavor.id,
+ "links": [
+ {
+ "href": "http://%s:%d/v2.1/%s/flavors/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ flavor.id),
+ "rel": "bookmark"
+ }
+ ]
+ }
+ image = self.api.compute.images[server.image]
+ s['image'] = {
+ "id": image.id,
+ "links": [
+ {
+ "href": "http://%s:%d/v2.1/%s/images/%s" % (self.api.ip,
+ self.api.port,
+ id,
+ image.id),
+ "rel": "bookmark"
+ }
+ ]
+ }
+
+ response = Response(json.dumps({'server': s}), status=200, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not retrieve the server details." % __name__)
+ return ex.message, 500
+
+ def delete(self, id, serverid):
+ """
+ Delete a server instance.
+
+ :param id: tenant id, we ignore this most of the time
+ :type id: ``str``
+ :param serverid: The UUID of the server
+ :type serverid: ``str``
+ :return: Returns 200 if everything is fine.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
+ try:
+ server = self.api.compute.find_server_by_name_or_id(serverid)
+ if server is None:
+ return Response('Could not find server.', status=404, mimetype="application/json")
+
+ self.api.compute.stop_compute(server)
+
+ response = Response('Server deleted.', status=204, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not create the server." % __name__)
+ return ex.message, 500
+
+
+class NovaInterfaceToServer(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def post(self, id, serverid):
+ """
+ Add an interface to the specified server.
+
+ :param id: tenant id, we ignore this most of the time
+ :type id: ``str``
+ :param serverid: Specifies the server.
+ :type serverid: ``str``
+ :return: Returns a flask response with information about the attached interface.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ server = self.api.compute.find_server_by_name_or_id(serverid)
+ if server is None:
+ return Response("Server with id or name %s does not exists." % serverid, status=404)
+
+ if server.emulator_compute is None:
+ logging.error("The targeted container does not exist.")
+ return Response("The targeted container of %s does not exist." % serverid, status=404)
+ data = json.loads(request.data).get("interfaceAttachment")
+ resp = dict()
+ port = data.get("port_id", None)
+ net = data.get("net_id", None)
+ dc = self.api.compute.dc
+ network_dict = dict()
+ network = None
+
+ if net is not None and port is not None:
+ port = self.api.compute.find_port_by_name_or_id(port)
+ network = self.api.compute.find_network_by_name_or_id(net)
+ network_dict['id'] = port.intf_name
+ network_dict['ip'] = port.ip_address
+ network_dict[network_dict['id']] = network.name
+ elif net is not None:
+ network = self.api.compute.find_network_by_name_or_id(net)
+ if network is None:
+ return Response("Network with id or name %s does not exists." % net, status=404)
+ port = self.api.compute.create_port("port:cp%s:fl:%s" %
+ (len(self.api.compute.ports), str(uuid.uuid4())))
+
+ port.net_name = network.name
+ port.ip_address = network.get_new_ip_address(port.name)
+ network_dict['id'] = port.intf_name
+ network_dict['ip'] = port.ip_address
+ network_dict[network_dict['id']] = network.name
+ elif port is not None:
+ port = self.api.compute.find_port_by_name_or_id(port)
+ network_dict['id'] = port.intf_name
+ network_dict['ip'] = port.ip_address
+ network = self.api.compute.find_network_by_name_or_id(port.net_name)
+ network_dict[network_dict['id']] = network.name
+ else:
+ raise Exception("You can only attach interfaces by port or network at the moment")
+
+ if network == self.api.manage.floating_network:
+ dc.net.addLink(server.emulator_compute, self.api.manage.floating_switch,
+ params1=network_dict, cls=Link, intfName1=port.intf_name)
+ else:
+ dc.net.addLink(server.emulator_compute, dc.switch,
+ params1=network_dict, cls=Link, intfName1=port.intf_name)
+ resp["port_state"] = "ACTIVE"
+ resp["port_id"] = port.id
+ resp["net_id"] = self.api.compute.find_network_by_name_or_id(port.net_name).id
+ resp["mac_addr"] = port.mac_address
+ resp["fixed_ips"] = list()
+ fixed_ips = dict()
+ fixed_ips["ip_address"] = port.ip_address
+ fixed_ips["subnet_id"] = network.subnet_name
+ resp["fixed_ips"].append(fixed_ips)
+ response = Response(json.dumps({"interfaceAttachment": resp}), status=202, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not add interface to the server." % __name__)
+ return ex.message, 500
+
+
+class NovaShowAndDeleteInterfaceAtServer(Resource):
+ def __init__(self, api):
+ self.api = api
+
+ def delete(self, id, serverid, port_id):
+ """
+ Deletes an existing interface.
+
+ :param id: tenant id, we ignore this most of the time
+ :type id: ``str``
+ :param serverid: Specifies the server, where the interface will be deleted.
+ :type serverid: ``str``
+ :param port_id: Specifies the port of the interface.
+ :type port_id: ``str``
+ :return: Returns a flask response with 202 if everything worked out. Otherwise it will return 404 and an
+ error message.
+ :rtype: :class:`flask.response`
+ """
+ logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
+ try:
+ server = self.api.compute.find_server_by_name_or_id(serverid)
+ if server is None:
+ return Response("Server with id or name %s does not exists." % serverid, status=404)
+ port = self.api.compute.find_port_by_name_or_id(port_id)
+ if port is None:
+ return Response("Port with id or name %s does not exists." % port_id, status=404)
+
+ for link in self.api.compute.dc.net.links:
+ if str(link.intf1) == port.intf_name and \
+ str(link.intf1.ip) == port.ip_address.split('/')[0]:
+ self.api.compute.dc.net.removeLink(link)
+ break
+
+ response = Response("", status=202, mimetype="application/json")
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ except Exception as ex:
+ logging.exception(u"%s: Could not detach interface from the server." % __name__)
+ return ex.message, 500
--- /dev/null
+from instance_flavor import InstanceFlavor
+from model import Model
+from net import Net
+from port import Port
+from resource import Resource
+from router import Router
+from server import Server
+from stack import Stack
+from template import Template
+from image import Image
\ No newline at end of file
--- /dev/null
+import uuid
+from datetime import datetime
+
+
+class Image:
+ def __init__(self, name, id=None):
+ self.name = name
+ if id is None:
+ self.id = str(uuid.uuid4())
+ else:
+ self.id = id
+ self.created = str(datetime.now())
+
+ def __eq__(self, other):
+ if self.name == other.name:
+ return True
+ return False
--- /dev/null
+import uuid
+
+
+class InstanceFlavor:
+ def __init__(self, name, cpu=None, memory=None, memory_unit=None, storage=None, storage_unit=None):
+ self.id = str(uuid.uuid4())
+ self.name = name
+ self.cpu = cpu
+ self.memory = memory
+ self.memory_unit = memory_unit
+ self.storage = storage
+ self.storage_unit = storage_unit
--- /dev/null
+class LoadBalancer(object):
+ def __init__(self, name, id=None, flavor=None, image=None, command=None, nw_list=None):
+ self.name = name
+ self.id = id # not set
+ self.out_ports = dict()
+ self.in_ports = dict()
--- /dev/null
+class Model:
+ def __init__(self, resources=None):
+ if not resources:
+ resources = list()
+ self.resources = resources
--- /dev/null
+import re
+
+
+class Net:
+ def __init__(self, name):
+ self.name = name
+ self.id = None
+ self.subnet_name = None
+ self.subnet_id = None
+ self.subnet_creation_time = None
+ self.subnet_update_time = None
+ self.gateway_ip = None
+ self.segmentation_id = None # not set
+ self._cidr = None
+ self.start_end_dict = None
+ self._issued_ip_addresses = dict()
+
+ def get_short_id(self):
+ """
+ Returns a shortened UUID, with only the first 6 characters.
+
+ :return: First 6 characters of the UUID
+ :rtype: ``str``
+ """
+ return str(self.id)[:6]
+
+ def get_new_ip_address(self, port_name):
+ """
+ Calculates the next unused IP Address which belongs to the subnet.
+
+ :param port_name: Specifies the port.
+ :type port_name: ``str``
+ :return: Returns a unused IP Address or none if all are in use.
+ :rtype: ``str``
+ """
+ if self.start_end_dict is None:
+ return None
+
+ int_start_ip = Net.ip_2_int(self.start_end_dict['start']) + 2 # First address as network address not usable
+ # Second one is for gateways only
+ int_end_ip = Net.ip_2_int(self.start_end_dict['end']) - 1 # Last address for broadcasts
+ while int_start_ip in self._issued_ip_addresses and int_start_ip <= int_end_ip:
+ int_start_ip += 1
+
+ if int_start_ip > int_end_ip:
+ return None
+
+ self._issued_ip_addresses[int_start_ip] = port_name
+ return Net.int_2_ip(int_start_ip) + '/' + self._cidr.rsplit('/', 1)[1]
+
+ def assign_ip_address(self, cidr, port_name):
+ """
+ Assigns the IP address to the port if it is currently NOT used.
+
+ :param cidr: The cidr used by the port - e.g. 10.0.0.1/24
+ :type cidr: ``str``
+ :param port_name: The port name
+ :type port_name: ``str``
+ :return: * *False*: If the IP address is already issued or if it is not within this subnet mask.
+ * *True*: Else
+ """
+ int_ip = Net.cidr_2_int(cidr)
+ if int_ip in self._issued_ip_addresses:
+ return False
+
+ int_start_ip = Net.ip_2_int(self.start_end_dict['start']) + 1 # First address as network address not usable
+ int_end_ip = Net.ip_2_int(self.start_end_dict['end']) - 1 # Last address for broadcasts
+ if int_ip < int_start_ip or int_ip > int_end_ip:
+ return False
+
+ self._issued_ip_addresses[int_ip] = port_name
+ return True
+
+ def is_my_ip(self, cidr, port_name):
+ """
+ Checks if the IP is registered for this port name.
+
+ :param cidr: The cidr used by the port - e.g. 10.0.0.1/24
+ :type cidr: ``str``
+ :param port_name: The port name
+ :type port_name: ``str``
+ :return: Returns true if the IP address belongs to the port name. Else it returns false.
+ """
+ int_ip = Net.cidr_2_int(cidr)
+
+ if not int_ip in self._issued_ip_addresses:
+ return False
+
+ if self._issued_ip_addresses[int_ip] == port_name:
+ return True
+ return False
+
+ def withdraw_ip_address(self, ip_address):
+ """
+ Removes the IP address from the list of issued addresses, thus other ports can use it.
+
+ :param ip_address: The issued IP address.
+ :type ip_address: ``str``
+ """
+ if ip_address is None:
+ return
+
+ if "/" in ip_address:
+ address, suffix = ip_address.rsplit('/', 1)
+ else:
+ address = ip_address
+ int_ip_address = Net.ip_2_int(address)
+ if int_ip_address in self._issued_ip_addresses.keys():
+ del self._issued_ip_addresses[int_ip_address]
+
+ def reset_issued_ip_addresses(self):
+ """
+ Resets all issued IP addresses.
+ """
+ self._issued_ip_addresses = dict()
+
+ def update_port_name_for_ip_address(self, ip_address, port_name):
+ """
+ Updates the port name of the issued IP address.
+
+ :param ip_address: The already issued IP address.
+ :type ip_address: ``str``
+ :param port_name: The new port name
+ :type port_name: ``str``
+ """
+ address, suffix = ip_address.rsplit('/', 1)
+ int_ip_address = Net.ip_2_int(address)
+ self._issued_ip_addresses[int_ip_address] = port_name
+
+ def set_cidr(self, cidr):
+ """
+ Sets the CIDR for the subnet. It previously checks for the correct CIDR format.
+
+ :param cidr: The new CIDR for the subnet.
+ :type cidr: ``str``
+ :return: * *True*: When the new CIDR was set successfully.
+ * *False*: If the CIDR format was wrong.
+ :rtype: ``bool``
+ """
+ if cidr is None:
+ if self._cidr is not None:
+ import emuvim.api.heat.ip_handler as IP
+ IP.free_cidr(self._cidr, self.subnet_id)
+ self._cidr = None
+ self.reset_issued_ip_addresses()
+ self.start_end_dict = dict()
+ return True
+ if not Net.check_cidr_format(cidr):
+ return False
+
+ self.reset_issued_ip_addresses()
+ self.start_end_dict = Net.calculate_start_and_end_dict(cidr)
+ self._cidr = cidr
+ return True
+
+ def get_cidr(self):
+ """
+ Gets the CIDR.
+
+ :return: The CIDR
+ :rtype: ``str``
+ """
+ return self._cidr
+
+ def clear_cidr(self):
+ self._cidr = None
+ self.start_end_dict = dict()
+ self.reset_issued_ip_addresses()
+
+ def delete_subnet(self):
+ self.subnet_id = None
+ self.subnet_name = None
+ self.subnet_creation_time = None
+ self.subnet_update_time = None
+ self.set_cidr(None)
+
+ @staticmethod
+ def calculate_start_and_end_dict(cidr):
+ """
+ Calculates the start and end IP address for the subnet.
+
+ :param cidr: The CIDR for the subnet.
+ :type cidr: ``str``
+ :return: Dict with start and end ip address
+ :rtype: ``dict``
+ """
+ address, suffix = cidr.rsplit('/', 1)
+ int_suffix = int(suffix)
+ int_address = Net.ip_2_int(address)
+ address_space = 2 ** 32 - 1
+
+ for x in range(0, 31 - int_suffix):
+ address_space = ~(~address_space | (1 << x))
+
+ start = int_address & address_space
+ end = start + (2 ** (32 - int_suffix) - 1)
+
+ return {'start': Net.int_2_ip(start), 'end': Net.int_2_ip(end)}
+
+ @staticmethod
+ def cidr_2_int(cidr):
+ if cidr is None:
+ return None
+ ip = cidr.rsplit('/', 1)[0]
+ return Net.ip_2_int(ip)
+
+ @staticmethod
+ def ip_2_int(ip):
+ """
+ Converts a IP address to int.
+
+ :param ip: IP address
+ :type ip: ``str``
+ :return: IP address as int.
+ :rtype: ``int``
+ """
+ o = map(int, ip.split('.'))
+ res = (16777216 * o[0]) + (65536 * o[1]) + (256 * o[2]) + o[3]
+ return res
+
+ @staticmethod
+ def int_2_ip(int_ip):
+ """
+ Converts a int IP address to string.
+
+ :param int_ip: Int IP address.
+ :type int_ip: ``int``
+ :return: IP address
+ :rtype: ``str``
+ """
+ o1 = int(int_ip / 16777216) % 256
+ o2 = int(int_ip / 65536) % 256
+ o3 = int(int_ip / 256) % 256
+ o4 = int(int_ip) % 256
+ return '%(o1)s.%(o2)s.%(o3)s.%(o4)s' % locals()
+
+ @staticmethod
+ def check_cidr_format(cidr):
+ """
+ Checks the CIDR format. An valid example is: 192.168.0.0/29
+
+ :param cidr: CIDR to be checked.
+ :type cidr: ``str``
+ :return: * *True*: If the Format is correct.
+ * *False*: If it is not correct.
+ :rtype: ``bool``
+ """
+ r = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/\d{2}')
+ if r.match(cidr):
+ return True
+ return False
+
+ def create_network_dict(self):
+ """
+ Creates the network description dictionary.
+
+ :return: Network description.
+ :rtype: ``dict``
+ """
+ network_dict = dict()
+ network_dict["status"] = "ACTIVE" # TODO do we support inactive networks?
+ if self.subnet_id == None:
+ network_dict["subnets"] = []
+ else:
+ network_dict["subnets"] = [self.subnet_id]
+ network_dict["name"] = self.name
+ network_dict["admin_state_up"] = True # TODO is it always true?
+ network_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456" # TODO what should go in here
+ network_dict["id"] = self.id
+ network_dict["shared"] = False # TODO is it always false?
+ return network_dict
+
+ def create_subnet_dict(self):
+ """
+ Creates the subnet description dictionary.
+
+ :return: Subnet description.
+ :rtype: ``dict``
+ """
+ subnet_dict = dict()
+ subnet_dict["name"] = self.subnet_name
+ subnet_dict["network_id"] = self.id
+ subnet_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456" # TODO what should go in here?
+ subnet_dict["created_at"] = self.subnet_creation_time
+ subnet_dict["dns_nameservers"] = []
+ subnet_dict["allocation_pools"] = [self.start_end_dict]
+ subnet_dict["host_routers"] = []
+ subnet_dict["gateway_ip"] = self.gateway_ip
+ subnet_dict["ip_version"] = "4"
+ subnet_dict["cidr"] = self.get_cidr()
+ subnet_dict["updated_at"] = self.subnet_update_time
+ subnet_dict["id"] = self.subnet_id
+ subnet_dict["enable_dhcp"] = False # TODO do we support DHCP?
+ return subnet_dict
+
+ def __eq__(self, other):
+ if self.name == other.name and self.subnet_name == other.subnet_name and \
+ self.gateway_ip == other.gateway_ip and \
+ self.segmentation_id == other.segmentation_id and \
+ self._cidr == other._cidr and \
+ self.start_end_dict == other.start_end_dict:
+ return True
+ return False
+
+ def __hash__(self):
+ return hash((self.name,
+ self.subnet_name,
+ self.gateway_ip,
+ self.segmentation_id,
+ self._cidr,
+ self.start_end_dict))
--- /dev/null
+import logging
+import threading
+import uuid
+
+lock = threading.Lock()
+intf_names = dict()
+
+
+class Port:
+ def __init__(self, name, ip_address=None, mac_address=None, floating_ip=None):
+ self.name = name
+ self.intf_name = None
+ self.id = str(uuid.uuid4())
+ self.template_name = name
+ """
+ ip_address is structured like 10.0.0.1/24
+ """
+ self.ip_address = ip_address
+ self.mac_address = mac_address
+ self.floating_ip = floating_ip
+ self.net_name = None
+
+ def set_name(self, name):
+ """
+ Sets the port name.
+
+ :param name: New port name.
+ :type name: ``str``
+ """
+ if self.name == name:
+ return
+
+ # Delete old interface name
+ global lock
+ lock.acquire()
+ if intf_names[self.intf_name][0] == self.id and intf_names[self.intf_name][1] is False:
+ del intf_names[self.intf_name]
+ lock.release()
+
+ self.name = name
+ # Create new interface name
+ self.create_intf_name()
+
+ def create_intf_name(self):
+ """
+ Creates the interface name, while using the first 4 letters of the port name, the specification, if it is an
+ 'in' / 'out' port or something else, and a counter value if the name is already used. The counter starts
+ for each name at 0 and can go up to 999. After creating the name each port will post its interface name
+ into the global dictionary and adding his full name. Thus each port can determine if his desired interface
+ name is already used and choose the next one.
+ """
+ split_name = self.name.split(':')
+ if len(split_name) >= 3:
+ if split_name[2] == 'input' or split_name[2] == 'in':
+ self.intf_name = split_name[0][:4] + '-' + \
+ 'in'
+ elif split_name[2] == 'output' or split_name[2] == 'out':
+ self.intf_name = split_name[0][:4] + '-' + \
+ 'out'
+ else:
+ self.intf_name = split_name[0][:4] + '-' + \
+ split_name[2][:4]
+ else:
+ self.intf_name = self.name[:9]
+
+ global lock
+ lock.acquire()
+ counter = 0
+ global intf_names
+ intf_len = len(self.intf_name)
+ self.intf_name = self.intf_name + '-' + str(counter)[:4]
+ while self.intf_name in intf_names and counter < 999 and not intf_names[self.intf_name][0] == self.id:
+ counter += 1
+ self.intf_name = self.intf_name[:intf_len] + '-' + str(counter)[:4]
+
+ if counter >= 1000:
+ logging.ERROR("Port %s could not create unique interface name (%s)", self.name, self.intf_name)
+ lock.release()
+ return
+
+ updated = False
+ if self.intf_name in intf_names and intf_names[self.intf_name][0] == self.id:
+ updated = True
+
+ intf_names[self.intf_name] = [self.id, updated]
+ lock.release()
+
+ def get_short_id(self):
+ """
+ Gets a shortened ID which only contains first 6 characters.
+
+ :return: The first 6 characters of the UUID.
+ :rtype: ``str``
+ """
+ return str(self.id)[:6]
+
+ def create_port_dict(self, compute):
+ """
+ Creates the port description dictionary.
+
+ :param compute: Requires the compute resource to determine the used network.
+ :type compute: :class:`heat.compute`
+ :return: Returns the description dictionary.
+ :rtype: ``dict``
+ """
+ port_dict = dict()
+ port_dict["admin_state_up"] = True # TODO is it always true?
+ port_dict["device_id"] = "257614cc-e178-4c92-9c61-3b28d40eca44" # TODO find real values
+ port_dict["device_owner"] = "" # TODO do we have such things?
+ net = compute.find_network_by_name_or_id(self.net_name)
+ port_dict["fixed_ips"] = [
+ {
+ "ip_address": self.ip_address.rsplit('/', 1)[0] if self.ip_address is not None else "",
+ "subnet_id": net.subnet_id if net is not None else ""
+ }
+ ]
+ port_dict["id"] = self.id
+ port_dict["mac_address"] = self.mac_address
+ port_dict["name"] = self.name
+ port_dict["network_id"] = net.id if net is not None else ""
+ port_dict["status"] = "ACTIVE" # TODO do we support inactive port?
+ port_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456" # TODO find real tenant_id
+ return port_dict
+
+ def compare_attributes(self, other):
+ """
+ Does NOT compare ip_address because this function only exists to check if we can
+ update the IP address without any changes
+
+ :param other: The port to compare with
+ :type other: :class:`heat.resources.port`
+ :return: True if the attributes are the same, else False.
+ :rtype: ``bool``
+ """
+ if other is None:
+ return False
+
+ if self.name == other.name and self.floating_ip == other.floating_ip and \
+ self.net_name == other.net_name:
+ return True
+ return False
+
+ def __eq__(self, other):
+ if other is None:
+ return False
+
+ if self.name == other.name and self.ip_address == other.ip_address and \
+ self.mac_address == other.mac_address and \
+ self.floating_ip == other.floating_ip and \
+ self.net_name == other.net_name:
+ return True
+ return False
+
+ def __hash__(self):
+ return hash((self.name,
+ self.ip_address,
+ self.mac_address,
+ self.floating_ip,
+ self.net_name))
+
+ def __del__(self):
+ global lock
+ lock.acquire()
+ global intf_names
+ if self.intf_name in intf_names and intf_names[self.intf_name][0] == self.id:
+ if intf_names[self.intf_name][1] is False:
+ del intf_names[self.intf_name]
+ else:
+ intf_names[self.intf_name][1] = False
+ lock.release()
--- /dev/null
+class Resource:
+ def __init__(self, name, type=None, properties=None):
+ self.name = name
+ self.type = type
+ self.properties = properties
--- /dev/null
+import uuid
+
+
+class Router:
+ def __init__(self, name, id=None):
+ self.name = name
+ self.id = id if id is not None else str(uuid.uuid4())
+ self.subnet_names = list()
+
+ def add_subnet(self, subnet_name):
+ self.subnet_names.append(subnet_name)
+
+ def __eq__(self, other):
+ if self.name == other.name and len(self.subnet_names) == len(other.subnet_names) and \
+ set(self.subnet_names) == set(other.subnet_names):
+ return True
+ return False
--- /dev/null
+class Server(object):
+ def __init__(self, name, id=None, flavor=None, image=None, command=None, nw_list=None):
+ self.name = name
+ self.full_name = None
+ self.template_name = None
+ self.id = id
+ self.image = image
+ self.command = command
+ self.port_names = list()
+ self.flavor = flavor
+ self.son_emu_command = None
+ self.emulator_compute = None
+
+ def compare_attributes(self, other):
+ """
+ Compares only class attributes like name and flavor but not the list of ports with the other server.
+
+ :param other: The second server to compare with.
+ :type other: :class:`heat.resources.server`
+ :return: * *True*: If all attributes are alike.
+ * *False*: Else
+ :rtype: ``bool``
+ """
+ if self.name == other.name and self.full_name == other.full_name and \
+ self.flavor == other.flavor and \
+ self.image == other.image and \
+ self.command == other.command:
+ return True
+ return False
+
+ def __eq__(self, other):
+ if self.name == other.name and self.full_name == other.full_name and \
+ self.flavor == other.flavor and \
+ self.image == other.image and \
+ self.command == other.command and \
+ len(self.port_names) == len(other.port_names) and \
+ set(self.port_names) == set(other.port_names):
+ return True
+ return False
+
+ def create_server_dict(self, compute=None):
+ """
+ Creates the server description dictionary.
+
+ :param compute: The compute resource for further status information.
+ :type compute: :class:`heat.compute`
+ :return: Server description dictionary.
+ :rtype: ``dict``
+ """
+ server_dict = dict()
+ server_dict['name'] = self.name
+ server_dict['full_name'] = self.full_name
+ server_dict['id'] = self.id
+ server_dict['template_name'] = self.template_name
+ server_dict['flavor'] = self.flavor
+ server_dict['image'] = self.image
+ if self.son_emu_command is not None:
+ server_dict['command'] = self.son_emu_command
+ else:
+ server_dict['command'] = self.command
+
+ if compute is not None:
+ server_dict['status'] = 'ACTIVE'
+ server_dict['OS-EXT-STS:power_state'] = 1
+ server_dict["OS-EXT-STS:task_state"] = None
+ return server_dict
--- /dev/null
+import uuid
+
+
+class Stack:
+ def __init__(self, id=None):
+ self.servers = dict()
+ self.nets = dict()
+ self.ports = dict()
+ self.routers = dict()
+ self.stack_name = None
+ self.creation_time = None
+ self.update_time = None
+ self.status = None
+ if id is None:
+ self.id = str(uuid.uuid4())
+ else:
+ self.id = id
+
+ def add_server(self, server):
+ """
+ Adds one server to the server dictionary.
+
+ :param server: The server to add.
+ :type server: :class:`heat.resources.server`
+ """
+ self.servers[server.name] = server
+
+ def add_net(self, net):
+ """
+ Adds one network to the network dictionary.
+
+ :param net: Network to add.
+ :type net: :class:`heat.resources.net`
+ """
+ self.nets[net.name] = net
+
+ def add_port(self, port):
+ """
+ Adds one port to the port dictionary.
+
+ :param port: Port to add.
+ :type port: :class:`heat.resources.port`
+ """
+ self.ports[port.name] = port
+
+ def add_router(self, router):
+ """
+ Adds one router to the port dictionary.
+
+ :param router: Router to add.
+ :type router: :class:`heat.resources.router`
+ """
+ self.routers[router.name] = router
--- /dev/null
+class Template:
+ def __init__(self, resources=None):
+ self.version = '2015-04-30'
+ self.resources = resources