From: tierno Date: Tue, 6 Sep 2016 14:43:04 +0000 (+0200) Subject: Initial openvim v0.4.6 upload X-Git-Tag: v1.0.0~12 X-Git-Url: https://osm.etsi.org/gitweb/?a=commitdiff_plain;h=f7aa8c4db7a57d5865d3b7767d5957fda6867198;p=osm%2Fopenvim.git Initial openvim v0.4.6 upload Signed-off-by: tierno --- diff --git a/ODL.py b/ODL.py new file mode 100644 index 0000000..e244c67 --- /dev/null +++ b/ODL.py @@ -0,0 +1,510 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' +Implement the plugging for OpendayLight openflow controller +It creates the class OF_conn to create dataplane connections +with static rules based on packet destination MAC address +''' + +__author__="Pablo Montes, Alfonso Tierno" +__date__ ="$28-oct-2014 12:07:15$" + + +import json +import requests +import base64 +import logging + +class OF_conn(): + '''OpenDayLight connector. No MAC learning is used''' + def __init__(self, params): + ''' Constructor. + Params: dictionary with the following keys: + of_dpid: DPID to use for this controller + of_ip: controller IP address + of_port: controller TCP port + of_user: user credentials, can be missing or None + of_password: password credentials + of_debug: debug level for logging. Default to ERROR + other keys are ignored + Raise an exception if same parameter is missing or wrong + ''' + #check params + if "of_ip" not in params or params["of_ip"]==None or "of_port" not in params or params["of_port"]==None: + raise ValueError("IP address and port must be provided") + #internal variables + self.name = "OpenDayLight" + self.headers = {'content-type':'application/json', + 'Accept':'application/json' + } + self.auth=None + self.pp2ofi={} # From Physical Port to OpenFlow Index + self.ofi2pp={} # From OpenFlow Index to Physical Port + + self.dpid = str(params["of_dpid"]) + self.id = 'openflow:'+str(int(self.dpid.replace(':', ''), 16)) + self.url = "http://%s:%s" %( str(params["of_ip"]), str(params["of_port"] ) ) + if "of_user" in params and params["of_user"]!=None: + if not params.get("of_password"): + of_password="" + else: + of_password=str(params["of_password"]) + self.auth = base64.b64encode(str(params["of_user"])+":"+of_password) + self.headers['Authorization'] = 'Basic '+self.auth + + + self.logger = logging.getLogger('vim.OF.ODL') + self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR")) ) + + def get_of_switches(self): + ''' Obtain a a list of switches or DPID detected by this controller + Return + >=0, list: list length, and a list where each element a tuple pair (DPID, IP address) + <0, text_error: if fails + ''' + try: + of_response = requests.get(self.url+"/restconf/operational/opendaylight-inventory:nodes", + headers=self.headers) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code != 200: + self.logger.warning("get_of_switches " + error_text) + return -1 , error_text + self.logger.debug("get_of_switches " + error_text) + info = of_response.json() + + if type(info) != dict: + self.logger.error("get_of_switches. Unexpected response, not a dict: %s", str(info)) + return -1, "Unexpected response, not a dict. Wrong version?" + + nodes = info.get('nodes') + if type(nodes) is not dict: + self.logger.error("get_of_switches. Unexpected response at 'nodes', not found or not a dict: %s", str(type(info))) + return -1, "Unexpected response at 'nodes', not found or not a dict. Wrong version?" + + node_list = nodes.get('node') + if type(node_list) is not list: + self.logger.error("get_of_switches. Unexpected response, at 'nodes':'node', not found or not a list: %s", str(type(node_list))) + return -1, "Unexpected response, at 'nodes':'node', not found or not a list. Wrong version?" + + switch_list=[] + for node in node_list: + node_id = node.get('id') + if node_id is None: + self.logger.error("get_of_switches. Unexpected response at 'nodes':'node'[]:'id', not found: %s", str(node)) + return -1, "Unexpected response at 'nodes':'node'[]:'id', not found . Wrong version?" + + if node_id == 'controller-config': + continue + + node_ip_address = node.get('flow-node-inventory:ip-address') + if node_ip_address is None: + self.logger.error("get_of_switches. Unexpected response at 'nodes':'node'[]:'flow-node-inventory:ip-address', not found: %s", str(node)) + return -1, "Unexpected response at 'nodes':'node'[]:'flow-node-inventory:ip-address', not found. Wrong version?" + + node_id_hex=hex(int(node_id.split(':')[1])).split('x')[1].zfill(16) + switch_list.append( (':'.join(a+b for a,b in zip(node_id_hex[::2], node_id_hex[1::2])), node_ip_address)) + + return len(switch_list), switch_list + except (requests.exceptions.RequestException, ValueError) as e: + #ValueError in the case that JSON can not be decoded + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("get_of_switches " + error_text) + return -1, error_text + + def obtain_port_correspondence(self): + '''Obtain the correspondence between physical and openflow port names + return: + 0, dictionary: with physical name as key, openflow name as value + -1, error_text: if fails + ''' + try: + of_response = requests.get(self.url+"/restconf/operational/opendaylight-inventory:nodes", + headers=self.headers) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code != 200: + self.logger.warning("obtain_port_correspondence " + error_text) + return -1 , error_text + self.logger.debug("obtain_port_correspondence " + error_text) + info = of_response.json() + + if type(info) != dict: + self.logger.error("obtain_port_correspondence. Unexpected response not a dict: %s", str(info)) + return -1, "Unexpected openflow response, not a dict. Wrong version?" + + nodes = info.get('nodes') + if type(nodes) is not dict: + self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes', not found or not a dict: %s", str(type(nodes))) + return -1, "Unexpected response at 'nodes',not found or not a dict. Wrong version?" + + node_list = nodes.get('node') + if type(node_list) is not list: + self.logger.error("obtain_port_correspondence. Unexpected response, at 'nodes':'node', not found or not a list: %s", str(type(node_list))) + return -1, "Unexpected response, at 'nodes':'node', not found or not a list. Wrong version?" + + for node in node_list: + node_id = node.get('id') + if node_id is None: + self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:'id', not found: %s", str(node)) + return -1, "Unexpected response at 'nodes':'node'[]:'id', not found . Wrong version?" + + if node_id == 'controller-config': + continue + + # Figure out if this is the appropriate switch. The 'id' is 'openflow:' plus the decimal value + # of the dpid + # In case this is not the desired switch, continue + if self.id != node_id: + continue + + node_connector_list = node.get('node-connector') + if type(node_connector_list) is not list: + self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:'node-connector', not found or not a list: %s", str(node)) + return -1, "Unexpected response at 'nodes':'node'[]:'node-connector', not found or not a list. Wrong version?" + + for node_connector in node_connector_list: + self.pp2ofi[ str(node_connector['flow-node-inventory:name']) ] = str(node_connector['id'] ) + self.ofi2pp[ node_connector['id'] ] = str(node_connector['flow-node-inventory:name']) + + + node_ip_address = node.get('flow-node-inventory:ip-address') + if node_ip_address is None: + self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:'flow-node-inventory:ip-address', not found: %s", str(node)) + return -1, "Unexpected response at 'nodes':'node'[]:'flow-node-inventory:ip-address', not found. Wrong version?" + self.ip_address = node_ip_address + + #If we found the appropriate dpid no need to continue in the for loop + break + + #print self.name, ": obtain_port_correspondence ports:", self.pp2ofi + return 0, self.pp2ofi + except (requests.exceptions.RequestException, ValueError) as e: + #ValueError in the case that JSON can not be decoded + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("obtain_port_correspondence " + error_text) + return -1, error_text + + def get_of_rules(self, translate_of_ports=True): + ''' Obtain the rules inserted at openflow controller + Params: + translate_of_ports: if True it translates ports from openflow index to physical switch name + Return: + 0, dict if ok: with the rule name as key and value is another dictionary with the following content: + priority: rule priority + name: rule name (present also as the master dict key) + ingress_port: match input port of the rule + dst_mac: match destination mac address of the rule, can be missing or None if not apply + vlan_id: match vlan tag of the rule, can be missing or None if not apply + actions: list of actions, composed by a pair tuples: + (vlan, None/int): for stripping/setting a vlan tag + (out, port): send to this port + switch: DPID, all + -1, text_error if fails + ''' + + if len(self.ofi2pp) == 0: + r,c = self.obtain_port_correspondence() + if r<0: + return r,c + #get rules + try: + of_response = requests.get(self.url+"/restconf/config/opendaylight-inventory:nodes/node/" + self.id + + "/table/0", headers=self.headers) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + + # The configured page does not exist if there are no rules installed. In that case we return an empty dict + if of_response.status_code == 404: + return 0, {} + + elif of_response.status_code != 200: + self.logger.warning("get_of_rules " + error_text) + return -1 , error_text + self.logger.debug("get_of_rules " + error_text) + + info = of_response.json() + + if type(info) != dict: + self.logger.error("get_of_rules. Unexpected response not a dict: %s", str(info)) + return -1, "Unexpected openflow response, not a dict. Wrong version?" + + table = info.get('flow-node-inventory:table') + if type(table) is not list: + self.logger.error("get_of_rules. Unexpected response at 'flow-node-inventory:table', not a list: %s", str(type(table))) + return -1, "Unexpected response at 'flow-node-inventory:table', not a list. Wrong version?" + + flow_list = table[0].get('flow') + if flow_list is None: + return 0, {} + + if type(flow_list) is not list: + self.logger.error("get_of_rules. Unexpected response at 'flow-node-inventory:table'[0]:'flow', not a list: %s", str(type(flow_list))) + return -1, "Unexpected response at 'flow-node-inventory:table'[0]:'flow', not a list. Wrong version?" + + #TODO translate ports according to translate_of_ports parameter + + rules = dict() + for flow in flow_list: + if not ('id' in flow and 'match' in flow and 'instructions' in flow and \ + 'instruction' in flow['instructions'] and 'apply-actions' in flow['instructions']['instruction'][0] and \ + 'action' in flow['instructions']['instruction'][0]['apply-actions']): + return -1, "unexpected openflow response, one or more elements are missing. Wrong version?" + + flow['instructions']['instruction'][0]['apply-actions']['action'] + + rule = dict() + rule['switch'] = self.dpid + rule['priority'] = flow.get('priority') + #rule['name'] = flow['id'] + #rule['cookie'] = flow['cookie'] + if 'in-port' in flow['match']: + in_port = flow['match']['in-port'] + if not in_port in self.ofi2pp: + return -1, "Error: Ingress port "+in_port+" is not in switch port list" + + if translate_of_ports: + in_port = self.ofi2pp[in_port] + + rule['ingress_port'] = in_port + + if 'vlan-match' in flow['match'] and 'vlan-id' in flow['match']['vlan-match'] and \ + 'vlan-id' in flow['match']['vlan-match']['vlan-id'] and \ + 'vlan-id-present' in flow['match']['vlan-match']['vlan-id'] and \ + flow['match']['vlan-match']['vlan-id']['vlan-id-present'] == True: + rule['vlan_id'] = flow['match']['vlan-match']['vlan-id']['vlan-id'] + + if 'ethernet-match' in flow['match'] and 'ethernet-destination' in flow['match']['ethernet-match'] and \ + 'address' in flow['match']['ethernet-match']['ethernet-destination']: + rule['dst_mac'] = flow['match']['ethernet-match']['ethernet-destination']['address'] + + instructions=flow['instructions']['instruction'][0]['apply-actions']['action'] + + max_index=0 + for instruction in instructions: + if instruction['order'] > max_index: + max_index = instruction['order'] + + actions=[None]*(max_index+1) + for instruction in instructions: + if 'output-action' in instruction: + if not 'output-node-connector' in instruction['output-action']: + return -1, "unexpected openflow response, one or more elementa are missing. Wrong version?" + + out_port = instruction['output-action']['output-node-connector'] + if not out_port in self.ofi2pp: + return -1, "Error: Output port "+out_port+" is not in switch port list" + + if translate_of_ports: + out_port = self.ofi2pp[out_port] + + actions[instruction['order']] = ('out',out_port) + + elif 'strip-vlan-action' in instruction: + actions[instruction['order']] = ('vlan', None) + + elif 'set-field' in instruction: + if not ('vlan-match' in instruction['set-field'] and 'vlan-id' in instruction['set-field']['vlan-match'] and 'vlan-id' in instruction['set-field']['vlan-match']['vlan-id']): + return -1, "unexpected openflow response, one or more elements are missing. Wrong version?" + + actions[instruction['order']] = ('vlan', instruction['set-field']['vlan-match']['vlan-id']['vlan-id']) + + actions = [x for x in actions if x != None] + + rule['actions'] = list(actions) + rules[flow['id']] = dict(rule) + + #flow['id'] + #flow['priority'] + #flow['cookie'] + #flow['match']['in-port'] + #flow['match']['vlan-match']['vlan-id']['vlan-id'] + # match -> in-port + # -> vlan-match -> vlan-id -> vlan-id + #flow['match']['vlan-match']['vlan-id']['vlan-id-present'] + #TODO se asume que no se usan reglas con vlan-id-present:false + #instructions -> instruction -> apply-actions -> action + #instructions=flow['instructions']['instruction'][0]['apply-actions']['action'] + #Es una lista. Posibles elementos: + #max_index=0 + #for instruction in instructions: + # if instruction['order'] > max_index: + # max_index = instruction['order'] + #actions=[None]*(max_index+1) + #for instruction in instructions: + # if 'output-action' in instruction: + # actions[instruction['order']] = ('out',instruction['output-action']['output-node-connector']) + # elif 'strip-vlan-action' in instruction: + # actions[instruction['order']] = ('vlan', None) + # elif 'set-field' in instruction: + # actions[instruction['order']] = ('vlan', instruction['set-field']['vlan-match']['vlan-id']['vlan-id']) + # + #actions = [x for x in actions if x != None] + # -> output-action -> output-node-connector + # -> pop-vlan-action + + return 0, rules + except (requests.exceptions.RequestException, ValueError) as e: + #ValueError in the case that JSON can not be decoded + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("get_of_rules " + error_text) + return -1, error_text + + def del_flow(self, flow_name): + ''' Delete an existing rule + Params: flow_name, this is the rule name + Return + 0, None if ok + -1, text_error if fails + ''' + try: + of_response = requests.delete(self.url+"/restconf/config/opendaylight-inventory:nodes/node/" + self.id + + "/table/0/flow/"+flow_name, headers=self.headers) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code != 200: + self.logger.warning("del_flow " + error_text) + return -1 , error_text + self.logger.debug("del_flow OK " + error_text) + return 0, None + + except requests.exceptions.RequestException as e: + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("del_flow " + error_text) + return -1, error_text + + def new_flow(self, data): + ''' Insert a new static rule + Params: data: dictionary with the following content: + priority: rule priority + name: rule name + ingress_port: match input port of the rule + dst_mac: match destination mac address of the rule, missing or None if not apply + vlan_id: match vlan tag of the rule, missing or None if not apply + actions: list of actions, composed by a pair tuples with these posibilities: + ('vlan', None/int): for stripping/setting a vlan tag + ('out', port): send to this port + Return + 0, None if ok + -1, text_error if fails + ''' + if len(self.pp2ofi) == 0: + r,c = self.obtain_port_correspondence() + if r<0: + return r,c + try: + #We have to build the data for the opendaylight call from the generic data + sdata = dict() + sdata['flow-node-inventory:flow'] = list() + sdata['flow-node-inventory:flow'].append(dict()) + flow = sdata['flow-node-inventory:flow'][0] + flow['id'] = data['name'] + flow['flow-name'] = data['name'] + flow['idle-timeout'] = 0 + flow['hard-timeout'] = 0 + flow['table_id'] = 0 + flow['priority'] = data.get('priority') + flow['match'] = dict() + if not data['ingress_port'] in self.pp2ofi: + error_text = 'Error. Port '+data['ingress_port']+' is not present in the switch' + self.logger.warning("new_flow " + error_text) + return -1, error_text + flow['match']['in-port'] = self.pp2ofi[data['ingress_port']] + if 'dst_mac' in data: + flow['match']['ethernet-match'] = dict() + flow['match']['ethernet-match']['ethernet-destination'] = dict() + flow['match']['ethernet-match']['ethernet-destination']['address'] = data['dst_mac'] + if data.get('vlan_id'): + flow['match']['vlan-match'] = dict() + flow['match']['vlan-match']['vlan-id'] = dict() + flow['match']['vlan-match']['vlan-id']['vlan-id-present'] = True + flow['match']['vlan-match']['vlan-id']['vlan-id'] = int(data['vlan_id']) + flow['instructions'] = dict() + flow['instructions']['instruction'] = list() + flow['instructions']['instruction'].append(dict()) + flow['instructions']['instruction'][0]['order'] = 1 + flow['instructions']['instruction'][0]['apply-actions'] = dict() + flow['instructions']['instruction'][0]['apply-actions']['action'] = list() + actions = flow['instructions']['instruction'][0]['apply-actions']['action'] + + order = 0 + for action in data['actions']: + new_action = { 'order': order } + if action[0] == "vlan": + if action[1] == None: + #strip vlan + new_action['strip-vlan-action'] = dict() + else: + new_action['set-field'] = dict() + new_action['set-field']['vlan-match'] = dict() + new_action['set-field']['vlan-match']['vlan-id'] = dict() + new_action['set-field']['vlan-match']['vlan-id']['vlan-id-present'] = True + new_action['set-field']['vlan-match']['vlan-id']['vlan-id'] = int(action[1]) + elif action[0] == 'out': + new_action['output-action'] = dict() + if not action[1] in self.pp2ofi: + error_msj = 'Port '+action[1]+' is not present in the switch' + return -1, error_msj + + new_action['output-action']['output-node-connector'] = self.pp2ofi[ action[1] ] + else: + error_msj = "Unknown item '%s' in action list" % action[0] + self.logger.error("new_flow " + error_msj) + return -1, error_msj + + actions.append(new_action) + order += 1 + + #print json.dumps(sdata) + of_response = requests.put(self.url+"/restconf/config/opendaylight-inventory:nodes/node/" + self.id + + "/table/0/flow/" + data['name'], + headers=self.headers, data=json.dumps(sdata) ) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code != 200: + self.logger.warning("new_flow " + error_text) + return -1 , error_text + self.logger.debug("new_flow OK " + error_text) + return 0, None + + except requests.exceptions.RequestException as e: + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("new_flow " + error_text) + return -1, error_text + + def clear_all_flows(self): + ''' Delete all existing rules + Return: + 0, None if ok + -1, text_error if fails + ''' + try: + of_response = requests.delete(self.url+"/restconf/config/opendaylight-inventory:nodes/node/" + self.id + + "/table/0", headers=self.headers) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code != 200 and of_response.status_code != 404: #HTTP_Not_Found + self.logger.warning("clear_all_flows " + error_text) + return -1 , error_text + self.logger.debug("clear_all_flows OK " + error_text) + return 0, None + except requests.exceptions.RequestException as e: + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("clear_all_flows " + error_text) + return -1, error_text diff --git a/charm/README.md b/charm/README.md new file mode 100644 index 0000000..68d803f --- /dev/null +++ b/charm/README.md @@ -0,0 +1,45 @@ +# Juju Charms for deploying OpenVIM + +## Overview +These are the charm layers used to build Juju charms for deploying OpenVIM components. These charms are also published to the [Juju Charm Store](https://jujucharms.com/) and can be deployed directly from there using the [etsi-osm](https://jujucharms.com/u/nfv/etsi-osm), or they can be build from these layers and deployed locally. + +## Building the OpenVIM Charms + +To build these charms, you will need [charm-tools][]. You should also read +over the developer [Getting Started][] page for an overview of charms and +building them. Then, in any of the charm layer directories, use `charm build`. +For example: + + export JUJU_REPOSITORY=$HOME/charms + mkdir $HOME/charms + + cd openvim/charms/hadoop/layer-openvim + charm build + +This will build the OpenVIM controller charm, pulling in the appropriate base and +interface layers from [interfaces.juju.solutions][]. You can get local copies +of those layers as well using `charm pull-source`: + + export LAYER_PATH=$HOME/layers + export INTERFACE_PATH=$HOME/interfaces + mkdir $HOME/{layers,interfaces} + + charm pull-source layer:openvim-compute + charm pull-source interface:openvim-compute + +You can then deploy the locally built charms individually: + + juju deploy local:exenial/openvim + +You can also use the local version of a bundle: + + juju deploy openvim/charms/bundles/openmano.yaml + +> Note: With Juju versions < 2.0, you will need to use [juju-deployer][] to +deploy the local bundle. + + +[charm-tools]: https://jujucharms.com/docs/stable/tools-charm-tools +[Getting Started]: https://jujucharms.com/docs/devel/developer-getting-started +[interfaces.juju.solutions]: http://interfaces.juju.solutions/ +[juju-deployer]: https://pypi.python.org/pypi/juju-deployer/ diff --git a/charm/bundles/openmano.yaml b/charm/bundles/openmano.yaml new file mode 100644 index 0000000..d0e09a6 --- /dev/null +++ b/charm/bundles/openmano.yaml @@ -0,0 +1,56 @@ +series: xenial +services: + mariadb: + charm: "cs:trusty/mariadb-3" + num_units: 1 + annotations: + "gui-x": "539" + "gui-y": "494.7050807568877" + to: + - "0" + openmano: + charm: "cs:~nfv/openmano-3" + num_units: 1 + annotations: + "gui-x": "939" + "gui-y": "494.7050807568877" + to: + - "1" + "openvim-controller": + charm: "cs:~nfv/openvim-controller-2" + num_units: 1 + annotations: + "gui-x": "739" + "gui-y": "148.29491924311225" + to: + - "2" + "openvim-compute": + charm: "cs:~nfv/openvim-compute-2" + num_units: 1 + annotations: + "gui-x": "339" + "gui-y": "148.29491924311225" + to: + - "3" +relations: + - - "openmano:db" + - "mariadb:db" + - - "openvim-controller:db" + - "mariadb:db" + - - "openmano:openvim-controller" + - "openvim-controller:openvim-controller" + - - "openvim-controller:compute" + - "openvim-compute:compute" +machines: + "0": + series: trusty + constraints: arch=amd64 + "1": + series: xenial + constraints: arch=amd64 + "2": + series: xenial + constraints: arch=amd64 + "3": + series: xenial + constraints: arch=amd64 diff --git a/charm/openvim/interface-openvim-compute/interface.yaml b/charm/openvim/interface-openvim-compute/interface.yaml new file mode 100644 index 0000000..72f7f8a --- /dev/null +++ b/charm/openvim/interface-openvim-compute/interface.yaml @@ -0,0 +1,3 @@ +name: openvim-compute +version: 0 +description: Connection to an OpenVIM compute host diff --git a/charm/openvim/interface-openvim-compute/provides.py b/charm/openvim/interface-openvim-compute/provides.py new file mode 100644 index 0000000..adb0829 --- /dev/null +++ b/charm/openvim/interface-openvim-compute/provides.py @@ -0,0 +1,28 @@ +from charms.reactive import hook +from charms.reactive import RelationBase +from charms.reactive import scopes + + +class ProvidesOpenVIMCompute(RelationBase): + scope = scopes.GLOBAL + + auto_accessors = ['ssh_key'] + + @hook('{provides:openvim-compute}-relation-{joined,changed}') + def changed(self): + self.set_state('{relation_name}.connected') + if self.ssh_key(): + self.set_state('{relation_name}.available') + + @hook('{provides:openvim-compute}-relation-{broken,departed}') + def departed(self): + self.remove_state('{relation_name}.connected') + self.remove_state('{relation_name}.available') + + def ssh_key_installed(self): + convo = self.conversation() + convo.set_remote('ssh_key_installed', True) + + def send_user(self, user): + convo = self.conversation() + convo.set_remote('user', user) \ No newline at end of file diff --git a/charm/openvim/interface-openvim-compute/requires.py b/charm/openvim/interface-openvim-compute/requires.py new file mode 100644 index 0000000..86dd6e1 --- /dev/null +++ b/charm/openvim/interface-openvim-compute/requires.py @@ -0,0 +1,31 @@ +from charms.reactive import hook +from charms.reactive import RelationBase +from charms.reactive import scopes + + +class RequiresOpenVIMCompute(RelationBase): + scope = scopes.UNIT + + @hook('{requires:openvim-compute}-relation-{joined,changed}') + def changed(self): + self.set_state('{relation_name}.connected') + if self.ready_to_ssh(): + self.set_state('{relation_name}.available') + + @hook('{requires:openvim-compute}-relation-{broken,departed}') + def departed(self): + self.remove_state('{relation_name}.connected') + self.remove_state('{relation_name}.available') + + def send_ssh_key(self, key): + for c in self.conversations(): + c.set_remote('ssh_key', key) + + def authorized_nodes(self): + return [{ + 'user': c.get_remote('user'), + 'address': c.get_remote('private-address'), + } for c in self.conversations() if c.get_remote('ssh_key_installed')] + + def ready_to_ssh(self): + return len(self.authorized_nodes()) > 0 diff --git a/charm/openvim/interface-openvim/README.md b/charm/openvim/interface-openvim/README.md new file mode 100644 index 0000000..e69de29 diff --git a/charm/openvim/interface-openvim/interface.yaml b/charm/openvim/interface-openvim/interface.yaml new file mode 100644 index 0000000..fce5577 --- /dev/null +++ b/charm/openvim/interface-openvim/interface.yaml @@ -0,0 +1,3 @@ +name: openvim +summary: Basic OpenVIM interface +version: 1 diff --git a/charm/openvim/interface-openvim/provides.py b/charm/openvim/interface-openvim/provides.py new file mode 100644 index 0000000..c4ade3f --- /dev/null +++ b/charm/openvim/interface-openvim/provides.py @@ -0,0 +1,24 @@ +from charmhelpers.core import hookenv +from charms.reactive import hook +from charms.reactive import RelationBase +from charms.reactive import scopes + + +class OpenVimProvides(RelationBase): + scope = scopes.GLOBAL + + @hook('{provides:openvim}-relation-{joined,changed}') + def changed(self): + self.set_state('{relation_name}.available') + + @hook('{provides:openvim}-relation-{broken,departed}') + def broken(self): + self.remove_state('{relation_name}.available') + + def configure(self, port, user): + relation_info = { + 'hostname': hookenv.unit_get('private-address'), + 'port': port, + 'user': user, + } + self.set_remote(**relation_info) diff --git a/charm/openvim/interface-openvim/requires.py b/charm/openvim/interface-openvim/requires.py new file mode 100644 index 0000000..77da9ca --- /dev/null +++ b/charm/openvim/interface-openvim/requires.py @@ -0,0 +1,61 @@ +from charms.reactive import hook +from charms.reactive import RelationBase +from charms.reactive import scopes + + +class OpenVimRequires(RelationBase): + scope = scopes.UNIT + + @hook('{requires:openvim}-relation-{joined,changed}') + def changed(self): + conv = self.conversation() + if conv.get_remote('port'): + # this unit's conversation has a port, so + # it is part of the set of available units + conv.set_state('{relation_name}.available') + + @hook('{requires:openvim}-relation-{departed,broken}') + def broken(self): + conv = self.conversation() + conv.remove_state('{relation_name}.available') + + def services(self): + """ + Returns a list of available openvim services and their associated hosts + and ports. + + The return value is a list of dicts of the following form:: + + [ + { + 'service_name': name_of_service, + 'hosts': [ + { + 'hostname': address_of_host, + 'port': port_for_host, + 'user': user_for_host, + }, + # ... + ], + }, + # ... + ] + """ + services = {} + for conv in self.conversations(): + service_name = conv.scope.split('/')[0] + service = services.setdefault(service_name, { + 'service_name': service_name, + 'hosts': [], + }) + host = conv.get_remote('hostname') or \ + conv.get_remote('private-address') + port = conv.get_remote('port') + user = conv.get_remote('user') + if host and port: + service['hosts'].append({ + 'hostname': host, + 'port': port, + 'user': user, + }) + return [s for s in services.values() if s['hosts']] diff --git a/charm/openvim/layer-openvim-compute/README.md b/charm/openvim/layer-openvim-compute/README.md new file mode 100644 index 0000000..a6a203c --- /dev/null +++ b/charm/openvim/layer-openvim-compute/README.md @@ -0,0 +1,58 @@ +# Overview + +Launches an OpenVIM compute node. + +# Preparation + +When running with an LXD cloud, the openvim-compute nodes needs to have some +devices added and be run with extra privileges. A quick-and-dirty way of +accomplishing this is to edit the juju-default LXD profile: + + lxc profile edit juju-default + +change it to: + + name: juju-default + config: + boot.autostart: "true" + security.nesting: "true" + security.privileged: "true" + description: "" + devices: + kvm: + path: /dev/kvm + type: unix-char + tun: + path: /dev/net/tun + type: unix-char + +# Usage + + juju deploy mysql + juju deploy openvim-controller + juju deploy openvim-compute + juju relate mysql openvim-controller + juju relate openvim-compute openvim-controller + +# Creating and starting a VM + +The openvim-controller charm will create a default tenant, image, flavor, +and networks, but you'll want to add your own VM when you're ready to deploy. +This charm generates a basic VM yaml definition for you if you'd like to launch +one quickly. First, ssh into your openvim-controller box: + + juju ssh openvim-controller/0 # may not be zero, find instance id with `juju status`. + +Then create your VM and get its uuid: + + /home/ubuntu/openmano/openvim/openvim vm-create /tmp/server.yaml + +And finally start it: + + /home/ubuntu/openmano/openvim/openvim vm-start + + +# Contact Information + +Rye Terrell rye.terrell@canonical.com +George Kraft george.kraft@canonical.com diff --git a/charm/openvim/layer-openvim-compute/icon.svg b/charm/openvim/layer-openvim-compute/icon.svg new file mode 100644 index 0000000..902a49f --- /dev/null +++ b/charm/openvim/layer-openvim-compute/icon.svg @@ -0,0 +1,1284 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + diff --git a/charm/openvim/layer-openvim-compute/layer.yaml b/charm/openvim/layer-openvim-compute/layer.yaml new file mode 100644 index 0000000..4746769 --- /dev/null +++ b/charm/openvim/layer-openvim-compute/layer.yaml @@ -0,0 +1,12 @@ +includes: + - 'layer:basic' + - 'interface:openvim-compute' +options: + basic: + packages: + - qemu-kvm + - libvirt-bin + - bridge-utils + - virt-viewer + - virt-manager + - hugepages diff --git a/charm/openvim/layer-openvim-compute/metadata.yaml b/charm/openvim/layer-openvim-compute/metadata.yaml new file mode 100644 index 0000000..e9344cf --- /dev/null +++ b/charm/openvim/layer-openvim-compute/metadata.yaml @@ -0,0 +1,19 @@ +name: openvim-compute +summary: Open Virtual Infrastructure Manager +maintainers: + - Rye Terrell + - George Kraft +description: | + Installs and configures a compute node for OpenVIM +tags: + - nfv + - telco + - osm +series: + - xenial +extra-bindings: + public: + internal: +provides: + compute: + interface: openvim-compute diff --git a/charm/openvim/layer-openvim-compute/reactive/openvim.py b/charm/openvim/layer-openvim-compute/reactive/openvim.py new file mode 100644 index 0000000..cf6937f --- /dev/null +++ b/charm/openvim/layer-openvim-compute/reactive/openvim.py @@ -0,0 +1,74 @@ +from os import chmod +from charms.reactive import when, when_not, set_state +from charmhelpers.core.hookenv import status_set +from charmhelpers.core.unitdata import kv +from charmhelpers.core.host import mkdir, symlink, chownr, add_user_to_group +from charmhelpers.fetch.archiveurl import ArchiveUrlFetchHandler +from charmhelpers.contrib.unison import ensure_user + +def create_openvim_user(): + status_set("maintenance", "Creating OpenVIM user") + ensure_user('openvim') + +def group_openvim_user(): + status_set("maintenance", "Adding OpenVIM user to groups") + add_user_to_group('openvim', 'libvirtd') + add_user_to_group('openvim', 'sudo') + +def nopasswd_openvim_sudo(): + status_set("maintenance", "Allowing nopasswd sudo for OpenVIM user") + with open('/etc/sudoers', 'r+') as f: + data = f.read() + if 'openvim ALL=(ALL) NOPASSWD:ALL' not in data: + f.seek(0) + f.truncate() + data += '\nopenvim ALL=(ALL) NOPASSWD:ALL\n' + f.write(data) + +def setup_qemu_binary(): + status_set("maintenance", "Setting up qemu-kvm binary") + mkdir('/usr/libexec', owner='root', group='root', perms=0o775, force=False) + symlink('/usr/bin/kvm', '/usr/libexec/qemu-kvm') + +def setup_images_folder(): + status_set("maintenance", "Setting up VM images folder") + mkdir('/opt/VNF', owner='openvim', group='openvim', perms=0o775, force=False) + symlink('/var/lib/libvirt/images', '/opt/VNF/images') + chownr('/opt/VNF', owner='openvim', group='openvim', follow_links=False, chowntopdir=True) + chownr('/var/lib/libvirt/images', owner='root', group='openvim', follow_links=False, chowntopdir=True) + chmod('/var/lib/libvirt/images', 0o775) + +def download_default_image(): + status_set("maintenance", "Downloading default image") + fetcher = ArchiveUrlFetchHandler() + fetcher.download( + source="https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img", + dest="/opt/VNF/images/ubuntu-16.04-server-cloudimg-amd64-disk1.img" + # TODO: add checksum + ) + +@when_not('openvim-compute.installed') +def prepare_openvim_compute(): + create_openvim_user() + group_openvim_user() + nopasswd_openvim_sudo() + setup_qemu_binary() + setup_images_folder() + download_default_image() + status_set("active", "Ready") + set_state('openvim-compute.installed') + +@when('compute.available', 'openvim-compute.installed') +def install_ssh_key(compute): + cache = kv() + if cache.get("ssh_key:" + compute.ssh_key()): + return + mkdir('/home/openvim/.ssh', owner='openvim', group='openvim', perms=0o775) + with open("/home/openvim/.ssh/authorized_keys", 'a') as f: + f.write(compute.ssh_key() + '\n') + compute.ssh_key_installed() + cache.set("ssh_key:" + compute.ssh_key(), True) + +@when('compute.connected') +def send_user(compute): + compute.send_user('openvim') diff --git a/charm/openvim/layer-openvim/README.md b/charm/openvim/layer-openvim/README.md new file mode 100644 index 0000000..2216934 --- /dev/null +++ b/charm/openvim/layer-openvim/README.md @@ -0,0 +1,58 @@ +# Overview + +Launches an OpenVIM controller. + +# Preparation + +When running with an LXD cloud, the openvim-compute nodes needs to have some +devices added and be run with extra privileges. A quick-and-dirty way of +accomplishing this is to edit the juju-default LXD profile: + + lxc profile edit juju-default + +change it to: + + name: juju-default + config: + boot.autostart: "true" + security.nesting: "true" + security.privileged: "true" + description: "" + devices: + kvm: + path: /dev/kvm + type: unix-char + tun: + path: /dev/net/tun + type: unix-char + +# Usage + + juju deploy mysql + juju deploy openvim + juju deploy openvim-compute + juju relate mysql openvim + juju relate openvim-compute openvim + +# Creating and starting a VM + +The openvim charm will create a default tenant, image, flavor, +and networks, but you'll want to add your own VM when you're ready to deploy. +This charm generates a basic VM yaml definition for you if you'd like to launch +one quickly. First, ssh into your openvim box: + + juju ssh openvim-contrller/0 # may not be zero, find instance id with `juju status`. + +Then create your VM and get its uuid: + + /home/ubuntu/openmano/openvim/openvim vm-create /tmp/server.yaml + +And finally start it: + + /home/ubuntu/openmano/openvim/openvim vm-start + + +# Contact Information + +Rye Terrell rye.terrell@canonical.com +George Kraft george.kraft@canonical.com diff --git a/charm/openvim/layer-openvim/icon.svg b/charm/openvim/layer-openvim/icon.svg new file mode 100644 index 0000000..3503615 --- /dev/null +++ b/charm/openvim/layer-openvim/icon.svg @@ -0,0 +1,281 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + diff --git a/charm/openvim/layer-openvim/layer.yaml b/charm/openvim/layer-openvim/layer.yaml new file mode 100644 index 0000000..d9078d9 --- /dev/null +++ b/charm/openvim/layer-openvim/layer.yaml @@ -0,0 +1,25 @@ +includes: + - 'layer:basic' + - 'interface:mysql' + - 'interface:openvim-compute' + - 'interface:openvim' +options: + basic: + packages: + - git + - screen + - wget + - mysql-client + - python3-git + - python-yaml + - python-libvirt + - python-bottle + - python-mysqldb + - python-jsonschema + - python-paramiko + - python-argcomplete + - python-requests + - python-novaclient + - python-keystoneclient + - python-glanceclient + - python-neutronclient diff --git a/charm/openvim/layer-openvim/metadata.yaml b/charm/openvim/layer-openvim/metadata.yaml new file mode 100644 index 0000000..4444735 --- /dev/null +++ b/charm/openvim/layer-openvim/metadata.yaml @@ -0,0 +1,24 @@ +name: openvim +summary: Open Virtual Infrastructure Manager +maintainers: + - Rye Terrell + - George Kraft +description: | + Installs and configures the OpenVIM controller from OpenMANO. +tags: + - nfv + - telco + - osm +series: + - xenial +extra-bindings: + public: + internal: +requires: + db: + interface: mysql + compute: + interface: openvim-compute +provides: + openvim-controller: + interface: openvim diff --git a/charm/openvim/layer-openvim/reactive/openvim.py b/charm/openvim/layer-openvim/reactive/openvim.py new file mode 100644 index 0000000..d31a9f2 --- /dev/null +++ b/charm/openvim/layer-openvim/reactive/openvim.py @@ -0,0 +1,273 @@ +import os +import json +import time +import subprocess +from git import Repo as gitrepo +from shutil import rmtree + +from charms.reactive import when, when_not, set_state +from charmhelpers.core.templating import render +from charmhelpers.core.hookenv import ( + status_set, + leader_set, + leader_get, + unit_public_ip, +) +from charmhelpers.core.unitdata import kv +from charmhelpers.core.host import ( + symlink, + mkdir, + chownr, + service_start, +) +from charmhelpers.contrib.unison import ( + create_private_key, + create_public_key, + ensure_user, +) + + +def sh(cmd): + return subprocess.check_output(cmd, shell=True) + + +def sh_as_openvim(cmd): + return sh('sudo -iu openvim ' + cmd) + + +def create_openvim_user(): + status_set("maintenance", "Creating OpenVIM user") + ensure_user('openvim') + + +def initialize_openvim_database(db): + status_set("maintenance", "Initializing OpenVIM database") + sh_as_openvim("/opt/openmano/openvim/database_utils/init_vim_db.sh -u %s -p %s -d %s -h %s" % ( + db.user(), + db.password(), + db.database(), + db.host() + )) + + +def generate_ssh_key(): + status_set("maintenance", "Generating ssh key") + user = "openvim" + folder = "/home/%s/.ssh" % user + mkdir(folder, owner=user, group=user, perms=0o775) + private_path = "%s/id_rsa" % folder + public_path = "%s.pub" % private_path + create_private_key(user, private_path) + create_public_key(user, private_path, public_path) + + +def add_openvim_to_path(): + status_set("maintenance", "Adding OpenVIM to path") + symlink( + '/opt/openmano/scripts/service-openmano.sh', + '/usr/bin/service-openmano') + symlink('/opt/openmano/openvim/openvim', '/usr/bin/openvim') + + +def download_openvim(): + status_set("maintenance", "Downloading OpenVIM") + if os.path.isdir("/opt/openmano"): + rmtree("/opt/openmano") + gitrepo.clone_from('https://github.com/tvansteenburgh/openmano.git', '/opt/openmano') + chownr('/opt/openmano', owner='openvim', group='openvim', follow_links=False, chowntopdir=True) + + +def configure_openvim(db): + status_set("maintenance", "Configuring OpenVIM") + render( + source="openvimd.cfg", + target="/opt/openmano/openvim/openvimd.cfg", + owner="openvim", + perms=0o664, + context={"db": db} + ) + + +# TODO: possibly combine all of these create functions? +def create_tenant(): + status_set("maintenance", "Creating tenant") + render(source="tenant.yaml", target="/tmp/tenant.yaml", owner="openvim", perms=0o664, context={}) + cmd = 'openvim tenant-create /tmp/tenant.yaml' + tenant_uuid = sh_as_openvim(cmd).split()[0] + tenant_uuid = str(tenant_uuid, 'utf-8') + leader_set({'tenant': tenant_uuid}) + return tenant_uuid + + +def create_image(): + status_set("maintenance", "Creating image") + render(source="image.yaml", target="/tmp/image.yaml", owner="openvim", perms=0o664, context={}) + cmd = 'openvim image-create /tmp/image.yaml' + image_uuid = sh_as_openvim(cmd).split()[0] + image_uuid = str(image_uuid, 'utf-8') + return image_uuid + + +def create_flavor(): + status_set("maintenance", "Creating flavor") + render(source="flavor.yaml", target="/tmp/flavor.yaml", owner="openvim", perms=0o664, context={}) + cmd = 'openvim flavor-create /tmp/flavor.yaml' + flavor_uuid = sh_as_openvim(cmd).split()[0] + flavor_uuid = str(flavor_uuid, 'utf-8') + return flavor_uuid + + +# TODO: especially combine these stupid network functions +def create_default_network(): + status_set("maintenance", "Creating default network") + render(source="net-default.yaml", target="/tmp/net-default.yaml", owner="openvim", perms=0o664, context={}) + cmd = 'openvim net-create /tmp/net-default.yaml' + net_default_uuid = sh_as_openvim(cmd).split()[0] + net_default_uuid = str(net_default_uuid, 'utf-8') + return net_default_uuid + + +def create_virbr_network(): + status_set("maintenance", "Creating virbr0 network") + render(source="net-virbr0.yaml", target="/tmp/net-virbr0.yaml", owner="openvim", perms=0o664, context={}) + cmd = 'openvim net-create /tmp/net-virbr0.yaml' + net_virbr0_uuid = sh_as_openvim(cmd).split()[0] + net_virbr0_uuid = str(net_virbr0_uuid, 'utf-8') + return net_virbr0_uuid + + +def create_vm_yaml(image_uuid, flavor_uuid, net_default_uuid, net_virbr0_uuid): + status_set("maintenance", "Creating default VM yaml file") + render( + source="server.yaml", + target="/tmp/server.yaml", + owner="openvim", + perms=0o664, + context={ + "image_uuid": image_uuid, + "flavor_uuid": flavor_uuid, + "net_default_uuid": net_default_uuid, + "net_virbr0_uuid": net_virbr0_uuid + } + ) + + +def create_sane_defaults(): + tenant_uuid = create_tenant() + add_openvim_tenant_env_var(tenant_uuid) + image_uuid = create_image() + flavor_uuid = create_flavor() + net_default_uuid = create_default_network() + net_virbr0_uuid = create_virbr_network() + create_vm_yaml( + image_uuid=image_uuid, + flavor_uuid=flavor_uuid, + net_default_uuid=net_default_uuid, + net_virbr0_uuid=net_virbr0_uuid + ) + + +def install_openvim_service(): + status_set("maintenance", "Installing OpenVIM service") + if not os.path.exists('/etc/systemd/system'): + os.makedirs('/etc/systemd/system') + render( + source="openvim.service", + target="/etc/systemd/system/openvim.service", + owner="root", + perms=0o644, + context={} + ) + + +def add_openvim_tenant_env_var(tenant_uuid): + status_set("maintenance", "Adding OPENVIM_TENANT environment variable") + env_line = 'export OPENVIM_TENANT=%s\n' % tenant_uuid + with open('/home/openvim/.profile', 'w+') as f: + lines = f.readlines() + for line in lines: + if env_line == line: + return + f.seek(0) + f.truncate() + for line in lines: + f.write(line) + f.write(env_line) + + +def openvim_running(): + try: + sh_as_openvim('openvim tenant-list') + return True + except: + return False + + +def start_openvim(): + status_set("maintenance", "Starting OpenVIM") + service_start('openvim') + t0 = time.time() + while not openvim_running(): + if time.time() - t0 > 60: + raise Exception('Failed to start openvim.') + time.sleep(0.25) + + +@when_not('db.available') +def not_ready(): + status_set('waiting', 'MySQL database required') + + +@when('db.available') +@when_not('openvim-controller.installed') +def install_openvim_controller(mysql): + create_openvim_user() + download_openvim() + add_openvim_to_path() + configure_openvim(mysql) + initialize_openvim_database(mysql) + generate_ssh_key() + install_openvim_service() + start_openvim() + create_sane_defaults() + status_set( + 'active', + 'Up on {host}:{port}'.format( + host=unit_public_ip(), + port='9080')) + set_state('openvim-controller.installed') + + +@when('compute.connected', 'openvim-controller.installed') +def send_ssh_key(compute): + with open('/home/openvim/.ssh/id_rsa.pub', 'r') as f: + key = f.read().strip() + compute.send_ssh_key(key) + + +@when('compute.available', 'openvim-controller.installed') +def host_add(compute): + cache = kv() + for node in compute.authorized_nodes(): + if cache.get("compute:" + node['address']): + continue + cmd = "ssh -n -o 'StrictHostKeyChecking no' %s@%s" + sh_as_openvim(cmd % (node['user'], node['address'])) + data = { + 'host': { + 'name': 'compute-0', + 'user': node['user'], + 'ip_name': node['address'], + 'description': 'compute-0' + } + } + with open('/tmp/compute-0.json', 'w') as f: + json.dump(data, f, indent=4, sort_keys=True) + # TODO: openvim run function! + sh_as_openvim('openvim host-add /tmp/compute-0.json') + cache.set('compute:' + node['address'], True) + + +@when('openvim-controller.available') +def openvim_available(openvim): + openvim.configure(port=9080, user=leader_get('tenant')) diff --git a/charm/openvim/layer-openvim/templates/flavor.yaml b/charm/openvim/layer-openvim/templates/flavor.yaml new file mode 100644 index 0000000..3e99ead --- /dev/null +++ b/charm/openvim/layer-openvim/templates/flavor.yaml @@ -0,0 +1,5 @@ +flavor: + name: xenial + description: xenial + ram: 1024 + vcpus: 1 diff --git a/charm/openvim/layer-openvim/templates/image.yaml b/charm/openvim/layer-openvim/templates/image.yaml new file mode 100644 index 0000000..73b87b3 --- /dev/null +++ b/charm/openvim/layer-openvim/templates/image.yaml @@ -0,0 +1,4 @@ +image: + name: xenial + description: xenial + path: /opt/VNF/images/ubuntu-16.04-server-cloudimg-amd64-disk1.img diff --git a/charm/openvim/layer-openvim/templates/net-default.yaml b/charm/openvim/layer-openvim/templates/net-default.yaml new file mode 100644 index 0000000..6e93f85 --- /dev/null +++ b/charm/openvim/layer-openvim/templates/net-default.yaml @@ -0,0 +1,5 @@ +network: + name: default + type: bridge_man + provider:physical: default + shared: true diff --git a/charm/openvim/layer-openvim/templates/net-virbr0.yaml b/charm/openvim/layer-openvim/templates/net-virbr0.yaml new file mode 100644 index 0000000..dae78ba --- /dev/null +++ b/charm/openvim/layer-openvim/templates/net-virbr0.yaml @@ -0,0 +1,5 @@ +network: + name: shared_bridge_net + type: bridge_data + provider:physical: bridge:virbr0 + shared: true diff --git a/charm/openvim/layer-openvim/templates/openvim.service b/charm/openvim/layer-openvim/templates/openvim.service new file mode 100644 index 0000000..a667a62 --- /dev/null +++ b/charm/openvim/layer-openvim/templates/openvim.service @@ -0,0 +1,10 @@ +[Unit] +Description=openvim + +[Service] +User=openvim +ExecStart=/opt/openmano/openvim/openvimd.py -c /opt/openmano/openvim/openvimd.cfg +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/charm/openvim/layer-openvim/templates/openvimd.cfg b/charm/openvim/layer-openvim/templates/openvimd.cfg new file mode 100644 index 0000000..c4175c5 --- /dev/null +++ b/charm/openvim/layer-openvim/templates/openvimd.cfg @@ -0,0 +1,127 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + + + +#Miscellaneous +#Option to test openvim without the needed infrastructure, possible values are +# "normal" by default, Openflow controller (OFC), switch and real host are needed +# "test" Used for testing http API and database without connecting to host or to OFC +# "host only" Used when neither OFC nor OF switch are provided. +# Dataplane network connection must be done manually. +# "OF only" Used for testing of new openflow controllers support. No real VM deployments will be done but +# OFC will be used as in real mode +# "development" Forced a cloud-type deployment, nomal memory instead of hugepages is used, +# without cpu pinning, and using a bridge network instead of a real OFC dataplane networks. +# The same 'development_bridge' (see below) is used for all dataplane networks +mode: host only + +#Openflow controller information +of_controller: floodlight # Type of controller to be used. + # Valid controllers are 'opendaylight', 'floodlight' or +#of_controller_module: module # Only needed for . Python module that implement + # this controller. By default a file with the name .py is used +#of_: value # Other parameters required by controller. Consumed by __init__ +of_user: user credentials # User credentials for the controller if needed +of_password: passwd credentials # Password credentials for the controller if needed +of_controller_ip: 127.0.0.1 # IP address where the Openflow controller is listening +of_controller_port: 7070 # TCP port where the Openflow controller is listening (REST API server) +of_controller_dpid: '00:01:02:03:04:05:06:07' # Openflow Switch identifier (put here the right number) + +#This option is used for those openflow switch that cannot deliver one packet to several output with different vlan tags +#When set to true, it fails when trying to attach different vlan tagged ports to the same net +of_controller_nets_with_same_vlan: false # (by default, true) + +#Server parameters +http_host: 0.0.0.0 # IP address where openvim is listening (by default, localhost) +http_port: 9080 # General port where openvim is listening (by default, 9080) +http_admin_port: 9085 # Admin port where openvim is listening (when missing, no administration server is launched) + +#database parameters +db_host: {{ db.host() }} +db_user: {{ db.user() }} +db_passwd: {{ db.password() }} +db_name: {{ db.database() }} + +#host paremeters +image_path: "/opt/VNF/images" # Folder, same for every host, where the VNF images will be copied + +#testing parameters (used by ./test/test_openvim.py) +tenant_id: fc7b43b6-6bfa-11e4-84d2-5254006d6777 # Default tenant identifier for testing + +#VLAN ranges used for the dataplane networks (ptp, data) +#When a network is created an unused value in this range is used +network_vlan_range_start: 3000 +network_vlan_range_end: 4000 + +#host bridge interfaces for networks +# Openvim cannot create bridge networks automatically, in the same way as other CMS do. +# Bridge networks need to be pre-provisioned on each host and Openvim uses those pre-provisioned bridge networks. +# Openvim assumes that the following bridge interfaces have been created on each host, appropriately associated to a physical port. +# The following information needs to be provided: +# - Name of the bridge (identical in all hosts) +# - VLAN tag associated to each bridge interface +# - The speed of the physical port in Gbps, where that bridge interface was created +# For instance, next example assumes that 10 bridges have been created on each host +# using vlans 2001 to 2010, associated to a 1Gbps physical port +bridge_ifaces: + #name: [vlan, speed in Gbps] + virbrMan1: [2001, 1] + virbrMan2: [2002, 1] + virbrMan3: [2003, 1] + virbrMan4: [2004, 1] + virbrMan5: [2005, 1] + virbrMan6: [2006, 1] + virbrMan7: [2007, 1] + virbrMan8: [2008, 1] + virbrMan9: [2009, 1] + virbrMan10: [2010, 1] + +#Used only when 'mode' is at development'. Indicates which 'bridge_ifaces' is used for dataplane networks +development_bridge: virbrMan10 + +#DHCP SERVER PARAMETERS. +#In case some of the previous 'bridge_ifaces' are connected to an EXTERNAL dhcp server, provide +# the server parameters to allow openvim getting the allocated IP addresses of virtual machines +# connected to the indicated 'bridge_ifaces' and or 'nets'. Openvim will connect to the dhcp server by ssh. +#DHCP server must contain a shell script "./get_dhcp_lease.sh" that accept a mac address as parameter +# and return empty or the allocated IP address. See an example at the end of the file ./openvim/dhcp_thread.py +#COMMENT all lines in case you do not have a DHCP server in 'normal', 'development' or 'host only' modes. +# For 'test' or 'OF only' modes you can leave then uncommented, because in these modes fake IP +# address are generated instead of connecting with a real DHCP server. +dhcp_server: + host: host-ip-or-name + #port: 22 #ssh port, by default 22 + provider: isc-dhcp-server #dhcp-server type + user: user + #provide password, or key if needed + password: passwd + #key: ssh-access-key + #list of the previous bridge interfaces attached to this dhcp server + bridge_ifaces: [ virbrMan1, virbrMan2 ] + #list of the networks attached to this dhcp server + nets: [default] + + +#logging parameters # DEBUG, INFO, WARNING, ERROR, CRITICAL +log_level: ERROR +log_level_db: DEBUG +log_level_of: DEBUG diff --git a/charm/openvim/layer-openvim/templates/server.yaml b/charm/openvim/layer-openvim/templates/server.yaml new file mode 100644 index 0000000..b21e72d --- /dev/null +++ b/charm/openvim/layer-openvim/templates/server.yaml @@ -0,0 +1,10 @@ +server: + name: VM + description: VM + imageRef: {{ image_uuid }} + flavorRef: {{ flavor_uuid }} + networks: + - name: default + uuid: {{ net_default_uuid }} + - name: virbr0 + uuid: {{ net_virbr0_uuid }} diff --git a/charm/openvim/layer-openvim/templates/tenant.yaml b/charm/openvim/layer-openvim/templates/tenant.yaml new file mode 100644 index 0000000..7c3d59f --- /dev/null +++ b/charm/openvim/layer-openvim/templates/tenant.yaml @@ -0,0 +1,3 @@ +tenant: + name: openvim-tenant + description: openvim-tenant diff --git a/charm/openvim/layer-openvim/tests/10-smoke b/charm/openvim/layer-openvim/tests/10-smoke new file mode 100755 index 0000000..33d7b43 --- /dev/null +++ b/charm/openvim/layer-openvim/tests/10-smoke @@ -0,0 +1,44 @@ +#!/usr/bin/python3 + +import amulet +import openvim + +def deploy_openvim(): + d = amulet.Deployment() + d.add("mysql", series="trusty") + d.add("openvim-controller", series="xenial") + d.add("openvim-compute", charm="local:xenial/openvim-compute", series="xenial") + d.relate("openvim-controller:db", "mysql:db") + d.relate("openvim-controller:compute", "openvim-compute:compute") + d.setup(timeout=900) + d.sentry.wait() + return d + +def get_openvim_connection(deployment): + address = deployment.sentry["openvim-controller"][0].info["public-address"] + return openvim.connect(address) + +def create_vm(deployment): + c = get_openvim_connection(deployment) + tenant = c.get_tenants()[0] + c.set_active_tenant(tenant) + networks = c.get_networks() + image = c.get_images()[0] + flavor = c.get_flavors()[0] + + server = c.create_server( + name="vm", + description="test vm", + image=image, + flavor=flavor, + networks=networks + ) + + return server + +def test_vm_creation(): + d = deploy_openvim() + create_vm(d) + +if __name__ == "__main__": + test_vm_creation() diff --git a/charm/openvim/layer-openvim/tests/20-deployment b/charm/openvim/layer-openvim/tests/20-deployment new file mode 100755 index 0000000..a0f8b0d --- /dev/null +++ b/charm/openvim/layer-openvim/tests/20-deployment @@ -0,0 +1,47 @@ +#!/usr/bin/python3 + +import amulet +import openvim + +deployment = None + +def deploy_openvim_without_relations(): + global deployment + deployment = amulet.Deployment() + deployment.add("mysql", series="trusty") + deployment.add("openvim-controller", series="xenial") + deployment.add("openvim-compute", charm="local:xenial/openvim-compute", series="xenial") + deployment.expose("openvim-controller") + deployment.setup(timeout=900) + deployment.sentry.wait() + +def add_relations(): + deployment.relate("openvim-controller:db", "mysql:db") + deployment.relate("openvim-controller:compute", "openvim-compute:compute") + +def get_openvim_connection(): + address = deployment.sentry["openvim-controller"][0].info["public-address"] + return openvim.connect(address) + +def get_first_unit_status(service): + service_status = deployment.sentry.get_status()["openvim-controller"] + unit_status = next(iter(service_status.values())) + return unit_status + +def test_controller_blocks_without_mysql(): + unit_status = get_first_unit_status("openvim-controller") + workload_status = unit_status["workload-status"] + assert workload_status["current"] == "blocked" + assert workload_status["message"] == "mysql database required" + +def test_adding_compute_unit(): + deployment.add_unit("openvim-compute") + deployment.sentry.wait(timeout=900) + c = get_openvim_connection() + assert len(c.get_hosts()) == 2 + +if __name__ == "__main__": + deploy_openvim_without_relations() + test_controller_blocks_without_mysql() + add_relations() + test_adding_compute_unit() diff --git a/charm/openvim/layer-openvim/tests/openvim.py b/charm/openvim/layer-openvim/tests/openvim.py new file mode 100644 index 0000000..ce5a32c --- /dev/null +++ b/charm/openvim/layer-openvim/tests/openvim.py @@ -0,0 +1,55 @@ +import requests +import json + +class Connection(object): + def __init__(self, base_url): + self.base_url = base_url + + def set_active_tenant(self, tenant): + self.tenant_id = tenant["id"] + + def get_tenants(self): + return self._http_get("tenants")["tenants"] + + def get_hosts(self): + return self._http_get("hosts")["hosts"] + + def get_networks(self): + return self._http_get("networks")["networks"] + + def get_images(self): + return self._http_get(self.tenant_id + "/images")["images"] + + def get_flavors(self): + return self._http_get(self.tenant_id + "/flavors")["flavors"] + + def create_server(self, name, description, image, flavor, networks): + request_data = {"server": { + "name": name, + "description": description, + "imageRef": image["id"], + "flavorRef": flavor["id"], + "networks": [ + {"name": n["name"], "uuid": n["id"]} + for n in networks + ] + }} + + path = self.tenant_id + "/servers" + return self._http_post(path, request_data) + + def _http_get(self, path): + response = requests.get(self.base_url + path) + assert response.status_code == 200 + return response.json() + + def _http_post(self, path, request_data): + data = json.dumps(request_data) + headers = {"content-type": "application/json"} + response = requests.post(self.base_url + path, data=data, headers=headers) + assert response.status_code == 200 + return response.json() + +def connect(host, port=9080): + base_url = "http://%s:%s/openvim/" % (host, port) + return Connection(base_url) diff --git a/charm/openvim/layer-openvim/tests/tests.yaml b/charm/openvim/layer-openvim/tests/tests.yaml new file mode 100644 index 0000000..7b10e42 --- /dev/null +++ b/charm/openvim/layer-openvim/tests/tests.yaml @@ -0,0 +1,3 @@ +packages: + - amulet + - python3-requests diff --git a/database_utils/dump_db.sh b/database_utils/dump_db.sh new file mode 100755 index 0000000..67d6823 --- /dev/null +++ b/database_utils/dump_db.sh @@ -0,0 +1,147 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +LICENSE_HEAD='/** +* Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +* This file is part of openmano +* All Rights Reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"); you may +* not use this file except in compliance with the License. You may obtain +* a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +* License for the specific language governing permissions and limitations +* under the License. +* +* For those usages not covered by the Apache License, Version 2.0 please +* contact with: nfvlabs@tid.es +**/ +' + + +DBUSER="vim" +DBPASS="" +DBHOST="localhost" +DBPORT="3306" +DBNAME="vim_db" + +# Detect paths +MYSQL=$(which mysql) +AWK=$(which awk) +GREP=$(which grep) +DIRNAME=`dirname $0` + +function usage(){ + echo -e "Usage: $0 OPTIONS" + echo -e " Dumps openvim database content" + echo -e " OPTIONS" + echo -e " -u USER database user. '$DBUSER' by default. Prompts if DB access fails" + echo -e " -p PASS database password. 'No password' by default. Prompts if DB access fails" + echo -e " -P PORT database port. '$DBPORT' by default" + echo -e " -h HOST database host. '$DBHOST' by default" + echo -e " -d NAME database name. '$DBNAME' by default. Prompts if DB access fails" + echo -e " --help shows this help" +} + +while getopts ":u:p:P:h:-:" o; do + case "${o}" in + u) + DBUSER="$OPTARG" + ;; + p) + DBPASS="$OPTARG" + ;; + P) + DBPORT="$OPTARG" + ;; + d) + DBNAME="$OPTARG" + ;; + h) + DBHOST="$OPTARG" + ;; + -) + [ "${OPTARG}" == "help" ] && usage && exit 0 + echo "Invalid option: --$OPTARG" >&2 && usage >&2 + exit 1 + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 && usage >&2 + exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 && usage >&2 + exit 1 + ;; + *) + usage >&2 + exit -1 + ;; + esac +done +shift $((OPTIND-1)) + +#check and ask for database user password +DBUSER_="-u$DBUSER" +DBPASS_="" +[ -n "$DBPASS" ] && DBPASS_="-p$DBPASS" +DBHOST_="-h$DBHOST" +DBPORT_="-P$DBPORT" +while ! echo ";" | mysql $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ $DBNAME >/dev/null 2>&1 +do + [ -n "$logintry" ] && echo -e "\nInvalid database credentials!!!. Try again (Ctrl+c to abort)" + [ -z "$logintry" ] && echo -e "\nProvide database name and credentials" + read -e -p "mysql database name($DBNAME): " KK + [ -n "$KK" ] && DBNAME="$KK" + read -e -p "mysql user($DBUSER): " KK + [ -n "$KK" ] && DBUSER="$KK" && DBUSER_="-u$DBUSER" + read -e -s -p "mysql password: " DBPASS + [ -n "$DBPASS" ] && DBPASS_="-p$DBPASS" + [ -z "$DBPASS" ] && DBPASS_="" + logintry="yes" + echo +done + + +#echo structure, including the content of schema_version +echo "$LICENSE_HEAD" > ${DIRNAME}/${DBNAME}_structure.sql +mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --no-data --add-drop-table --add-drop-database --routines --databases $DBNAME >> ${DIRNAME}/${DBNAME}_structure.sql +echo -e "\n\n\n\n" >> ${DIRNAME}/${DBNAME}_structure.sql +mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --no-create-info $DBNAME --tables schema_version 2>/dev/null >> ${DIRNAME}/${DBNAME}_structure.sql +echo " ${DIRNAME}/${DBNAME}_structure.sql" + +#echo only data +echo "$LICENSE_HEAD" > ${DIRNAME}/${DBNAME}_data.sql #copy my own header +mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --no-create-info $DBNAME >> ${DIRNAME}/${DBNAME}_data.sql +echo " ${DIRNAME}/${DBNAME}_data.sql" + +#echo all +echo "$LICENSE_HEAD" > ${DIRNAME}/${DBNAME}_all.sql #copy my own header +mysqldump $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ --add-drop-table --add-drop-database --routines --databases $DBNAME >> ${DIRNAME}/${DBNAME}_all.sql +echo " ${DIRNAME}/${DBNAME}_all.sql" + diff --git a/database_utils/host_ranking.sql b/database_utils/host_ranking.sql new file mode 100644 index 0000000..5e229bf --- /dev/null +++ b/database_utils/host_ranking.sql @@ -0,0 +1,43 @@ +/* +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## +*/ + +/* This table contains a list of processor ranking + The larger ranking the better performance + All physical host models must be included in this table + before being adding to openvim + processor information is obtained with commnand cat /proc/cpuinfo + NOTE: Current version of openvim ignores the ranking +*/ + + +LOCK TABLES `host_ranking` WRITE; +/*!40000 ALTER TABLE `host_ranking` DISABLE KEYS */; +INSERT INTO `host_ranking` + (family, manufacturer, version, description, ranking) +VALUES + ('Xeon','Intel','Intel(R) Xeon(R) CPU E5-2680 0 @ 2.70GHz','sandy bridge',170), + ('Xeon','Intel','Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz','sandy bridge',200), + ('Xeon','Intel','Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz','ivy bridge',300), + ('Xeon','Intel','Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz','ivy bridge',310); /*last entry ends with ';' */ + +UNLOCK TABLES; diff --git a/database_utils/init_vim_db.sh b/database_utils/init_vim_db.sh new file mode 100755 index 0000000..a2dcdea --- /dev/null +++ b/database_utils/init_vim_db.sh @@ -0,0 +1,124 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +DBUSER="vim" +DBPASS="" +DBHOST="localhost" +DBPORT="3306" +DBNAME="vim_db" + +# Detect paths +MYSQL=$(which mysql) +AWK=$(which awk) +GREP=$(which grep) +DIRNAME=`dirname $0` + +function usage(){ + echo -e "Usage: $0 OPTIONS [{openvim_version}]" + echo -e " Inits openvim database; deletes previous one and loads from ${DBNAME}_structure.sql" + echo -e " and data from host_ranking.sql, nets.sql, of_ports_pci_correspondece*.sql" + echo -e " If openvim_version is not provided it tries to get from openvimd.py using relative path" + echo -e " OPTIONS" + echo -e " -u USER database user. '$DBUSER' by default. Prompts if DB access fails" + echo -e " -p PASS database password. 'No password' by default. Prompts if DB access fails" + echo -e " -P PORT database port. '$DBPORT' by default" + echo -e " -h HOST database host. '$DBHOST' by default" + echo -e " -d NAME database name. '$DBNAME' by default. Prompts if DB access fails" + echo -e " --help shows this help" +} + +while getopts ":u:p:P:h:d:-:" o; do + case "${o}" in + u) + DBUSER="$OPTARG" + ;; + p) + DBPASS="$OPTARG" + ;; + P) + DBPORT="$OPTARG" + ;; + d) + DBNAME="$OPTARG" + ;; + h) + DBHOST="$OPTARG" + ;; + -) + [ "${OPTARG}" == "help" ] && usage && exit 0 + echo "Invalid option: --$OPTARG" >&2 && usage >&2 + exit 1 + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 && usage >&2 + exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 && usage >&2 + exit 1 + ;; + *) + usage >&2 + exit -1 + ;; + esac +done +shift $((OPTIND-1)) + +#check and ask for database user password +DBUSER_="-u$DBUSER" +DBPASS_="" +[ -n "$DBPASS" ] && DBPASS_="-p$DBPASS" +DBHOST_="-h$DBHOST" +DBPORT_="-P$DBPORT" +while ! echo ";" | mysql $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ $DBNAME >/dev/null 2>&1 +do + [ -n "$logintry" ] && echo -e "\nInvalid database credentials!!!. Try again (Ctrl+c to abort)" + [ -z "$logintry" ] && echo -e "\nProvide database name and credentials" + read -e -p "mysql database name($DBNAME): " KK + [ -n "$KK" ] && DBNAME="$KK" + read -e -p "mysql user($DBUSER): " KK + [ -n "$KK" ] && DBUSER="$KK" && DBUSER_="-u$DBUSER" + read -e -s -p "mysql password: " DBPASS + [ -n "$DBPASS" ] && DBPASS_="-p$DBPASS" + [ -z "$DBPASS" ] && DBPASS_="" + logintry="yes": + echo +done + +echo " loading ${DIRNAME}/vim_db_structure.sql" +sed -e "s/vim_db/$DBNAME/" ${DIRNAME}/vim_db_structure.sql | mysql $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ + +echo " migrage database version" +${DIRNAME}/migrate_vim_db.sh $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ -d$DBNAME $1 + +echo " loading ${DIRNAME}/host_ranking.sql" +mysql $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ $DBNAME < ${DIRNAME}/host_ranking.sql + +echo " loading ${DIRNAME}/of_ports_pci_correspondence.sql" +mysql $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ $DBNAME < ${DIRNAME}/of_ports_pci_correspondence.sql +#mysql -h $HOST -P $PORT -u $MUSER -p$MPASS $MDB < ${DIRNAME}/of_ports_pci_correspondence_centos.sql + +echo " loading ${DIRNAME}/nets.sql" +mysql $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ $DBNAME < ${DIRNAME}/nets.sql + diff --git a/database_utils/migrate_vim_db.sh b/database_utils/migrate_vim_db.sh new file mode 100755 index 0000000..a8fec3e --- /dev/null +++ b/database_utils/migrate_vim_db.sh @@ -0,0 +1,450 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +# +#Upgrade/Downgrade openvim database preserving the content +# + +DBUSER="vim" +DBPASS="" +DBHOST="localhost" +DBPORT="3306" +DBNAME="vim_db" + +# Detect paths +MYSQL=$(which mysql) +AWK=$(which awk) +GREP=$(which grep) +DIRNAME=`dirname $0` + +function usage(){ + echo -e "Usage: $0 OPTIONS [{openvim_version}]" + echo -e " Upgrades/Downgrades openvim database preserving the content" + echo -e " if openvim_version is not provided it tries to get from openvimd.py using relative path" + echo -e " OPTIONS" + echo -e " -u USER database user. '$DBUSER' by default. Prompts if DB access fails" + echo -e " -p PASS database password. 'No password' by default. Prompts if DB access fails" + echo -e " -P PORT database port. '$DBPORT' by default" + echo -e " -h HOST database host. '$DBHOST' by default" + echo -e " -d NAME database name. '$DBNAME' by default. Prompts if DB access fails" + echo -e " --help shows this help" +} + +while getopts ":u:p:P:h:d:-:" o; do + case "${o}" in + u) + DBUSER="$OPTARG" + ;; + p) + DBPASS="$OPTARG" + ;; + P) + DBPORT="$OPTARG" + ;; + d) + DBNAME="$OPTARG" + ;; + h) + DBHOST="$OPTARG" + ;; + -) + [ "${OPTARG}" == "help" ] && usage && exit 0 + echo "Invalid option: --$OPTARG" >&2 && usage >&2 + exit 1 + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 && usage >&2 + exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 && usage >&2 + exit 1 + ;; + *) + usage >&2 + exit -1 + ;; + esac +done +shift $((OPTIND-1)) + + +#GET OPENVIM VERSION +OPENVIM_VER="$1" +if [ -z "$OPENVIM_VER" ] +then + OPENVIM_VER=`${DIRNAME}/../openvimd.py -v` + OPENVIM_VER=${OPENVIM_VER%%-r*} + OPENVIM_VER=${OPENVIM_VER##*version } + echo " Detected openvim version $OPENVIM_VER" +fi +VERSION_1=`echo $OPENVIM_VER | cut -f 1 -d"."` +VERSION_2=`echo $OPENVIM_VER | cut -f 2 -d"."` +VERSION_3=`echo $OPENVIM_VER | cut -f 3 -d"."` +if ! [ "$VERSION_1" -ge 0 -a "$VERSION_2" -ge 0 -a "$VERSION_3" -ge 0 ] 2>/dev/null +then + [ -n "$1" ] && echo "Invalid openvim version '$1', expected 'X.X.X'" >&2 + [ -z "$1" ] && echo "Can not get openvim version" >&2 + exit -1 +fi +OPENVIM_VER_NUM=`printf "%d%03d%03d" ${VERSION_1} ${VERSION_2} ${VERSION_3}` + +#check and ask for database user password +DBUSER_="-u$DBUSER" +[ -n "$DBPASS" ] && DBPASS_="-p$DBPASS" +DBHOST_="-h$DBHOST" +DBPORT_="-P$DBPORT" +while ! echo ";" | mysql $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ $DBNAME >/dev/null 2>&1 +do + [ -n "$logintry" ] && echo -e "\nInvalid database credentials!!!. Try again (Ctrl+c to abort)" + [ -z "$logintry" ] && echo -e "\nProvide database name and credentials" + read -e -p "mysql database name($DBNAME): " KK + [ -n "$KK" ] && DBNAME="$KK" + read -e -p "mysql user($DBUSER): " KK + [ -n "$KK" ] && DBUSER="$KK" && DBUSER_="-u$DBUSER" + read -e -s -p "mysql password: " DBPASS + [ -n "$DBPASS" ] && DBPASS_="-p$DBPASS" + [ -z "$DBPASS" ] && DBPASS_="" + logintry="yes" + echo +done + +DBCMD="mysql $DBHOST_ $DBPORT_ $DBUSER_ $DBPASS_ $DBNAME" +#echo DBCMD $DBCMD + +#GET DATABASE VERSION +#check that the database seems a openvim database +if ! echo -e "show create table instances;\nshow create table numas" | $DBCMD >/dev/null 2>&1 +then + echo " database $DBNAME does not seem to be an openvim database" >&2 + exit -1; +fi + +if ! echo 'show create table schema_version;' | $DBCMD >/dev/null 2>&1 +then + DATABASE_VER="0.0" + DATABASE_VER_NUM=0 +else + DATABASE_VER_NUM=`echo "select max(version_int) from schema_version;" | $DBCMD | tail -n+2` + DATABASE_VER=`echo "select version from schema_version where version_int='$DATABASE_VER_NUM';" | $DBCMD | tail -n+2` + [ "$DATABASE_VER_NUM" -lt 0 -o "$DATABASE_VER_NUM" -gt 100 ] && echo " Error can not get database version ($DATABASE_VER?)" >&2 && exit -1 + #echo "_${DATABASE_VER_NUM}_${DATABASE_VER}" +fi + + +#GET DATABASE TARGET VERSION +DATABASE_TARGET_VER_NUM=0 +[ $OPENVIM_VER_NUM -gt 1091 ] && DATABASE_TARGET_VER_NUM=1 #>0.1.91 => 1 +[ $OPENVIM_VER_NUM -ge 2003 ] && DATABASE_TARGET_VER_NUM=2 #0.2.03 => 2 +[ $OPENVIM_VER_NUM -ge 2005 ] && DATABASE_TARGET_VER_NUM=3 #0.2.5 => 3 +[ $OPENVIM_VER_NUM -ge 3001 ] && DATABASE_TARGET_VER_NUM=4 #0.3.1 => 4 +[ $OPENVIM_VER_NUM -ge 4001 ] && DATABASE_TARGET_VER_NUM=5 #0.4.1 => 5 +[ $OPENVIM_VER_NUM -ge 4002 ] && DATABASE_TARGET_VER_NUM=6 #0.4.2 => 6 +[ $OPENVIM_VER_NUM -ge 4005 ] && DATABASE_TARGET_VER_NUM=7 #0.4.5 => 7 +#TODO ... put next versions here + + +function upgrade_to_1(){ + echo " upgrade database from version 0.0 to version 0.1" + echo " CREATE TABLE \`schema_version\`" + echo "CREATE TABLE \`schema_version\` ( + \`version_int\` INT NOT NULL COMMENT 'version as a number. Must not contain gaps', + \`version\` VARCHAR(20) NOT NULL COMMENT 'version as a text', + \`openvim_ver\` VARCHAR(20) NOT NULL COMMENT 'openvim version', + \`comments\` VARCHAR(2000) NULL COMMENT 'changes to database', + \`date\` DATE NULL, + PRIMARY KEY (\`version_int\`) + ) + COMMENT='database schema control version' + COLLATE='utf8_general_ci' + ENGINE=InnoDB;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "INSERT INTO \`schema_version\` (\`version_int\`, \`version\`, \`openvim_ver\`, \`comments\`, \`date\`) + VALUES (1, '0.1', '0.2.00', 'insert schema_version; alter nets with last_error column', '2015-05-05');" | $DBCMD + echo " ALTER TABLE \`nets\`, ADD COLUMN \`last_error\`" + echo "ALTER TABLE \`nets\` + ADD COLUMN \`last_error\` VARCHAR(200) NULL AFTER \`status\`;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} +function downgrade_from_1(){ + echo " downgrade database from version 0.1 to version 0.0" + echo " ALTER TABLE \`nets\` DROP COLUMN \`last_error\`" + echo "ALTER TABLE \`nets\` DROP COLUMN \`last_error\`;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo " DROP TABLE \`schema_version\`" + echo "DROP TABLE \`schema_version\`;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} +function upgrade_to_2(){ + echo " upgrade database from version 0.1 to version 0.2" + echo " ALTER TABLE \`of_ports_pci_correspondence\` \`resources_port\` \`ports\` ADD COLUMN \`switch_dpid\`" + for table in of_ports_pci_correspondence resources_port ports + do + echo "ALTER TABLE \`${table}\` + ADD COLUMN \`switch_dpid\` CHAR(23) NULL DEFAULT NULL AFTER \`switch_port\`; " | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "ALTER TABLE ${table} CHANGE COLUMN switch_port switch_port VARCHAR(24) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + [ $table == of_ports_pci_correspondence ] || + echo "ALTER TABLE ${table} DROP INDEX vlan_switch_port, ADD UNIQUE INDEX vlan_switch_port (vlan, switch_port, switch_dpid);" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + done + echo " UPDATE procedure UpdateSwitchPort" + echo "DROP PROCEDURE IF EXISTS UpdateSwitchPort; + delimiter // + CREATE PROCEDURE UpdateSwitchPort() MODIFIES SQL DATA SQL SECURITY INVOKER + COMMENT 'Load the openflow switch ports from of_ports_pci_correspondece into resoureces_port and ports' + BEGIN + #DELETES switch_port entry before writing, because if not it fails for key constrains + UPDATE ports + RIGHT JOIN resources_port as RP on ports.uuid=RP.port_id + INNER JOIN resources_port as RP2 on RP2.id=RP.root_id + INNER JOIN numas on RP.numa_id=numas.id + INNER JOIN hosts on numas.host_id=hosts.uuid + INNER JOIN of_ports_pci_correspondence as PC on hosts.ip_name=PC.ip_name and RP2.pci=PC.pci + SET ports.switch_port=null, ports.switch_dpid=null, RP.switch_port=null, RP.switch_dpid=null; + #write switch_port into resources_port and ports + UPDATE ports + RIGHT JOIN resources_port as RP on ports.uuid=RP.port_id + INNER JOIN resources_port as RP2 on RP2.id=RP.root_id + INNER JOIN numas on RP.numa_id=numas.id + INNER JOIN hosts on numas.host_id=hosts.uuid + INNER JOIN of_ports_pci_correspondence as PC on hosts.ip_name=PC.ip_name and RP2.pci=PC.pci + SET ports.switch_port=PC.switch_port, ports.switch_dpid=PC.switch_dpid, RP.switch_port=PC.switch_port, RP.switch_dpid=PC.switch_dpid; + END// + delimiter ;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "INSERT INTO \`schema_version\` (\`version_int\`, \`version\`, \`openvim_ver\`, \`comments\`, \`date\`) + VALUES (2, '0.2', '0.2.03', 'update Procedure UpdateSwitchPort', '2015-05-06');" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} +function upgrade_to_3(){ + echo " upgrade database from version 0.2 to version 0.3" + echo " change size of source_name at table resources_port" + echo "ALTER TABLE resources_port CHANGE COLUMN source_name source_name VARCHAR(24) NULL DEFAULT NULL AFTER port_id;"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo " CREATE PROCEDURE GetAllAvailablePorts" + echo "delimiter // + CREATE PROCEDURE GetAllAvailablePorts(IN Numa INT) CONTAINS SQL SQL SECURITY INVOKER + COMMENT 'Obtain all -including those not connected to switch port- ports available for a numa' + BEGIN + SELECT port_id, pci, Mbps, Mbps - Mbps_consumed as Mbps_free, totalSRIOV - coalesce(usedSRIOV,0) as availableSRIOV, switch_port, mac + FROM + ( + SELECT id as port_id, Mbps, pci, switch_port, mac + FROM resources_port + WHERE numa_id = Numa AND id=root_id AND status = 'ok' AND instance_id IS NULL + ) as A + INNER JOIN + ( + SELECT root_id, sum(Mbps_used) as Mbps_consumed, COUNT(id)-1 as totalSRIOV + FROM resources_port + WHERE numa_id = Numa AND status = 'ok' + GROUP BY root_id + ) as B + ON A.port_id = B.root_id + LEFT JOIN + ( + SELECT root_id, COUNT(id) as usedSRIOV + FROM resources_port + WHERE numa_id = Numa AND status = 'ok' AND instance_id IS NOT NULL + GROUP BY root_id + ) as C + ON A.port_id = C.root_id + ORDER BY Mbps_free, availableSRIOV, pci; + END// + delimiter ;"| $DBCMD || ! ! echo "ERROR. Aborted!" || exit -1 + echo "INSERT INTO schema_version (version_int, version, openvim_ver, comments, date) VALUES (3, '0.3', '0.2.5', 'New Procedure GetAllAvailablePorts', '2015-07-09');"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} + +function upgrade_to_4(){ + echo " upgrade database from version 0.3 to version 0.4" + echo " remove unique VLAN index at 'resources_port', 'ports'" + echo "ALTER TABLE resources_port DROP INDEX vlan_switch_port;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "ALTER TABLE ports DROP INDEX vlan_switch_port;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo " change table 'ports'" + echo "ALTER TABLE ports CHANGE COLUMN model model VARCHAR(12) NULL DEFAULT NULL COMMENT 'driver model for bridge ifaces; PF,VF,VFnotShared for data ifaces' AFTER mac;"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "ALTER TABLE ports DROP COLUMN vlan_changed;"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "ALTER TABLE resources_port DROP COLUMN vlan;"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "INSERT INTO schema_version (version_int, version, openvim_ver, comments, date) VALUES (4, '0.4', '0.3.1', 'Remove unique index VLAN at resources_port', '2015-09-04');"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} + +function upgrade_to_X(){ + #TODO, this change of foreign key does not work + echo " upgrade database from version 0.X to version 0.X" + echo "ALTER TABLE instances DROP FOREIGN KEY FK_instances_flavors, DROP INDEX FK_instances_flavors, + DROP FOREIGN KEY FK_instances_images, DROP INDEX FK_instances_flavors,;"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "ALTER TABLE instances + ADD CONSTRAINT FK_instances_flavors FOREIGN KEY (flavor_id, tenant_id) REFERENCES tenants_flavors (flavor_id, tenant_id), + ADD CONSTRAINT FK_instances_images FOREIGN KEY (image_id, tenant_id) REFERENCES tenants_images (image_id, tenant_id);" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} + +function downgrade_from_2(){ + echo " downgrade database from version 0.2 to version 0.1" + echo " UPDATE procedure UpdateSwitchPort" + echo "DROP PROCEDURE IF EXISTS UpdateSwitchPort; + delimiter // + CREATE PROCEDURE UpdateSwitchPort() MODIFIES SQL DATA SQL SECURITY INVOKER + BEGIN + UPDATE + resources_port INNER JOIN ( + SELECT resources_port.id,KK.switch_port + FROM resources_port INNER JOIN numas on resources_port.numa_id=numas.id + INNER JOIN hosts on numas.host_id=hosts.uuid + INNER JOIN of_ports_pci_correspondence as KK on hosts.ip_name=KK.ip_name and resources_port.pci=KK.pci + ) as TABLA + ON resources_port.root_id=TABLA.id + SET resources_port.switch_port=TABLA.switch_port + WHERE resources_port.root_id=TABLA.id; + END// + delimiter ;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo " ALTER TABLE \`of_ports_pci_correspondence\` \`resources_port\` \`ports\` DROP COLUMN \`switch_dpid\`" + for table in of_ports_pci_correspondence resources_port ports + do + [ $table == of_ports_pci_correspondence ] || + echo "ALTER TABLE ${table} DROP INDEX vlan_switch_port, ADD UNIQUE INDEX vlan_switch_port (vlan, switch_port);" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "ALTER TABLE \`${table}\` DROP COLUMN \`switch_dpid\`;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + switch_port_size=12 + [ $table == of_ports_pci_correspondence ] && switch_port_size=50 + echo "ALTER TABLE ${table} CHANGE COLUMN switch_port switch_port VARCHAR(${switch_port_size}) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + done + echo "DELETE FROM \`schema_version\` WHERE \`version_int\` = '2';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} +function downgrade_from_3(){ + echo " downgrade database from version 0.3 to version 0.2" + echo " change back size of source_name at table resources_port" + echo "ALTER TABLE resources_port CHANGE COLUMN source_name source_name VARCHAR(20) NULL DEFAULT NULL AFTER port_id;"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo " DROP PROCEDURE GetAllAvailablePorts" + echo "DROP PROCEDURE GetAllAvailablePorts;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "DELETE FROM schema_version WHERE version_int = '3';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} +function downgrade_from_4(){ + echo " downgrade database from version 0.4 to version 0.3" + echo " adding back unique index VLAN at 'resources_port','ports'" + echo "ALTER TABLE resources_port ADD COLUMN vlan SMALLINT(5) UNSIGNED NULL DEFAULT NULL AFTER Mbps_used;"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "UPDATE resources_port SET vlan= 99+id-root_id WHERE id != root_id;"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "ALTER TABLE resources_port ADD UNIQUE INDEX vlan_switch_port (vlan, switch_port, switch_dpid);" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "ALTER TABLE ports ADD UNIQUE INDEX vlan_switch_port (vlan, switch_port, switch_dpid);" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo " change back table 'ports'" + echo "ALTER TABLE ports CHANGE COLUMN model model VARCHAR(12) NULL DEFAULT NULL AFTER mac;"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "ALTER TABLE ports ADD COLUMN vlan_changed SMALLINT(5) NULL DEFAULT NULL COMMENT '!=NULL when original vlan have been changed to match a pmp net with all ports in the same vlan' AFTER switch_port;"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "DELETE FROM schema_version WHERE version_int = '4';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} + + +function upgrade_to_5(){ + echo " upgrade database from version 0.4 to version 0.5" + echo " add 'ip_address' to ports'" + echo "ALTER TABLE ports ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER mac;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "INSERT INTO schema_version (version_int, version, openvim_ver, comments, date) VALUES (5, '0.5', '0.4.1', 'Add ip_address to ports', '2015-09-04');"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} +function downgrade_from_5(){ + echo " downgrade database from version 0.5 to version 0.4" + echo " removing 'ip_address' from 'ports'" + echo "ALTER TABLE ports DROP COLUMN ip_address;"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "DELETE FROM schema_version WHERE version_int = '5';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} + +function upgrade_to_6(){ + echo " upgrade database from version 0.5 to version 0.6" + echo " Change enalarge name, description to 255 at all database" + for table in flavors images instances tenants + do + name_length=255 + [[ $table == tenants ]] || name_length=64 + echo -en " $table \r" + echo "ALTER TABLE $table CHANGE COLUMN name name VARCHAR($name_length) NOT NULL, CHANGE COLUMN description description VARCHAR(255) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + done + echo -en " hosts \r" + echo "ALTER TABLE hosts CHANGE COLUMN name name VARCHAR(255) NOT NULL, CHANGE COLUMN ip_name ip_name VARCHAR(64) NOT NULL, CHANGE COLUMN user user VARCHAR(64) NOT NULL, CHANGE COLUMN password password VARCHAR(64) NULL DEFAULT NULL, CHANGE COLUMN description description VARCHAR(255) NULL DEFAULT NULL, CHANGE COLUMN features features VARCHAR(255) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo -en " nets \r" + echo "ALTER TABLE nets CHANGE COLUMN name name VARCHAR(255) NOT NULL, CHANGE COLUMN last_error last_error VARCHAR(255) NULL DEFAULT NULL, CHANGE COLUMN bind bind VARCHAR(36) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo -en " instances \r" + echo "ALTER TABLE instances CHANGE COLUMN last_error last_error VARCHAR(255) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo -en " ports \r" + echo "ALTER TABLE ports CHANGE COLUMN name name VARCHAR(64) NOT NULL, CHANGE COLUMN switch_port switch_port VARCHAR(64) NULL DEFAULT NULL, CHANGE COLUMN switch_dpid switch_dpid VARCHAR(64) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo -en " of_flows \r" + echo "ALTER TABLE of_flows CHANGE COLUMN name name VARCHAR(64) NOT NULL, CHANGE COLUMN net_id net_id VARCHAR(36) NULL DEFAULT NULL, CHANGE COLUMN actions actions VARCHAR(255) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo -en " of_ports_pci_cor... \r" + echo "ALTER TABLE of_ports_pci_correspondence CHANGE COLUMN ip_name ip_name VARCHAR(64) NULL DEFAULT NULL, CHANGE COLUMN switch_port switch_port VARCHAR(64) NULL DEFAULT NULL, CHANGE COLUMN switch_dpid switch_dpid VARCHAR(64) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo -en " resources_port \r" + echo "ALTER TABLE resources_port CHANGE COLUMN source_name source_name VARCHAR(64) NULL DEFAULT NULL, CHANGE COLUMN switch_port switch_port VARCHAR(64) NULL DEFAULT NULL, CHANGE COLUMN switch_dpid switch_dpid VARCHAR(64) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "INSERT INTO schema_version (version_int, version, openvim_ver, comments, date) VALUES (6, '0.6', '0.4.2', 'Enlarging name at database', '2016-02-01');" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} +function downgrade_from_6(){ + echo " downgrade database from version 0.6 to version 0.5" + echo " Change back name,description to shorter length at all database" + for table in flavors images instances tenants + do + name_length=50 + [[ $table == flavors ]] || [[ $table == images ]] || name_length=36 + echo -en " $table \r" + echo "ALTER TABLE $table CHANGE COLUMN name name VARCHAR($name_length) NOT NULL, CHANGE COLUMN description description VARCHAR(100) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + done + echo -en " hosts \r" + echo "ALTER TABLE hosts CHANGE COLUMN name name VARCHAR(36) NOT NULL, CHANGE COLUMN ip_name ip_name VARCHAR(36) NOT NULL, CHANGE COLUMN user user VARCHAR(36) NOT NULL, CHANGE COLUMN password password VARCHAR(36) NULL DEFAULT NULL, CHANGE COLUMN description description VARCHAR(100) NULL DEFAULT NULL, CHANGE COLUMN features features VARCHAR(50) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo -en " nets \r" + echo "ALTER TABLE nets CHANGE COLUMN name name VARCHAR(50) NOT NULL, CHANGE COLUMN last_error last_error VARCHAR(200) NULL DEFAULT NULL, CHANGE COLUMN bind bind VARCHAR(36) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo -en " instances \r" + echo "ALTER TABLE instances CHANGE COLUMN last_error last_error VARCHAR(200) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo -en " ports \r" + echo "ALTER TABLE ports CHANGE COLUMN name name VARCHAR(25) NULL DEFAULT NULL, CHANGE COLUMN switch_port switch_port VARCHAR(24) NULL DEFAULT NULL, CHANGE COLUMN switch_dpid switch_dpid VARCHAR(23) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo -en " of_flows \r" + echo "ALTER TABLE of_flows CHANGE COLUMN name name VARCHAR(50) NULL DEFAULT NULL, CHANGE COLUMN net_id net_id VARCHAR(50) NULL DEFAULT NULL, CHANGE COLUMN actions actions VARCHAR(100) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo -en " of_ports_pci_cor... \r" + echo "ALTER TABLE of_ports_pci_correspondence CHANGE COLUMN ip_name ip_name VARCHAR(50) NULL DEFAULT NULL, CHANGE COLUMN switch_port switch_port VARCHAR(24) NULL DEFAULT NULL, CHANGE COLUMN switch_dpid switch_dpid VARCHAR(23) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo -en " resources_port \r" + echo "ALTER TABLE resources_port CHANGE COLUMN source_name source_name VARCHAR(24) NULL DEFAULT NULL, CHANGE COLUMN switch_port switch_port VARCHAR(24) NULL DEFAULT NULL, CHANGE COLUMN switch_dpid switch_dpid VARCHAR(23) NULL DEFAULT NULL;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "DELETE FROM schema_version WHERE version_int='6';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} +function upgrade_to_7(){ + echo " upgrade database from version 0.6 to version 0.7" + echo " add 'bind_net','bind_type','cidr','enable_dhcp' to 'nets'" + echo "ALTER TABLE nets ADD COLUMN cidr VARCHAR(64) NULL DEFAULT NULL AFTER bind, ADD COLUMN enable_dhcp ENUM('true','false') NOT NULL DEFAULT 'false' after cidr, ADD COLUMN dhcp_first_ip VARCHAR(64) NULL DEFAULT NULL AFTER enable_dhcp, ADD COLUMN dhcp_last_ip VARCHAR(64) NULL DEFAULT NULL AFTER dhcp_first_ip;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "ALTER TABLE nets CHANGE COLUMN bind provider VARCHAR(36) NULL DEFAULT NULL AFTER vlan;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "ALTER TABLE nets ADD COLUMN bind_net VARCHAR(36) NULL DEFAULT NULL COMMENT 'To connect with other net' AFTER provider, ADD COLUMN bind_type VARCHAR(36) NULL DEFAULT NULL COMMENT 'VLAN: to insert/remove' after bind_net;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "INSERT INTO schema_version (version_int, version, openvim_ver, comments, date) VALUES (7, '0.7', '0.4.4', 'Add bind_net to net table', '2016-02-12');"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} +function downgrade_from_7(){ + echo " downgrade database from version 0.7 to version 0.6" + echo " removing 'bind_net','bind_type','cidr','enable_dhcp' from 'nets'" + echo "ALTER TABLE nets CHANGE COLUMN provider bind NULL DEFAULT NULL AFTER vlan;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "ALTER TABLE nets DROP COLUMN cidr, DROP COLUMN enable_dhcp, DROP COLUMN bind_net, DROP COLUMN bind_type, DROP COLUMN dhcp_first_ip, DROP COLUMN dhcp_last_ip;"| $DBCMD || ! echo "ERROR. Aborted!" || exit -1 + echo "DELETE FROM schema_version WHERE version_int = '7';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1 +} +#TODO ... put funtions here + + +[ $DATABASE_TARGET_VER_NUM -eq $DATABASE_VER_NUM ] && echo " current database version $DATABASE_VER is ok" +#UPGRADE DATABASE step by step +while [ $DATABASE_TARGET_VER_NUM -gt $DATABASE_VER_NUM ] +do + DATABASE_VER_NUM=$((DATABASE_VER_NUM+1)) + upgrade_to_${DATABASE_VER_NUM} + #FILE_="${DIRNAME}/upgrade_to_${DATABASE_VER_NUM}.sh" + #[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to upgrade" >&2 && exit -1 + #$FILE_ || exit -1 # if fail return +done + +#DOWNGRADE DATABASE step by step +while [ $DATABASE_TARGET_VER_NUM -lt $DATABASE_VER_NUM ] +do + #FILE_="${DIRNAME}/downgrade_from_${DATABASE_VER_NUM}.sh" + #[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to downgrade" >&2 && exit -1 + #$FILE_ || exit -1 # if fail return + downgrade_from_${DATABASE_VER_NUM} + DATABASE_VER_NUM=$((DATABASE_VER_NUM-1)) +done + +#echo done + diff --git a/database_utils/nets.sql b/database_utils/nets.sql new file mode 100644 index 0000000..654d0d1 --- /dev/null +++ b/database_utils/nets.sql @@ -0,0 +1,74 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +/* + This table contains a list of networks created from the begining + The following fields are needed + uuid: provide a valid uuid format + type: ptp, data (point to point, or point to multipoint) are openflow dadaplane nets + bridge_man, bridge_data are virtio/bridge controlplane nets + name: useful human readable name + shared: by default true + vlan: default vlan of the dataplane net + bind: for control plane: + default: default network + macvtap:host_iface. Connect to a direct macvtap host interface + bridge:bridge_name. Connect to this host bridge_name interface + for dataplane: NULL, because the binding is done with a external port +*/ + + +LOCK TABLES `nets` WRITE; +/* +INSERT INTO `nets` + (uuid, `type`, name, shared, vlan, bind) +VALUES + ('00000000-0000-0000-0000-000000000000','bridge_man', 'default', 'true', NULL, 'default'), + ('11111111-1111-1111-1111-111111111111','bridge_man', 'direct:em1','true', NULL, 'macvtap:em1'), + ('aaaaaaaa-1111-aaaa-aaaa-aaaaaaaaaaaa','data', 'coreIPv4', 'true', 702, NULL), + ('aaaaaaaa-aaaa-0000-1111-aaaaaaaaaaaa','bridge_data','virbrMan2', 'true', 2002, 'bridge:virbrMan2') # last row without ',' +; +*/ + +UNLOCK TABLES; + +/* External PORTS are necessary to connect a dataplane network to an external switch port + The following fields are needed + uuid: provide a valid uuid format + name: useful human readable name + net_id: uuid of the net where this port must be connected + Mbps: only informative, indicates the expected bandwidth in megabits/s + type: only external has meaning here + vlan: if the traffic at that port must be vlan tagged + switch_port: port name at switch: +*/ + +LOCK TABLES `ports` WRITE; +/* +INSERT INTO `ports` + (uuid, name, net_id, Mbps, type, vlan, switch_port) +VALUES + ('6d536a80-52e9-11e4-9e31-5254006d6777','CoreIPv4', 'aaaaaaaa-1111-aaaa-aaaa-aaaaaaaaaaaa',10000,'external',702, 'Te0/47') # last row without ',' +; +*/ + +UNLOCK TABLES; + diff --git a/database_utils/of_ports_pci_correspondence.sql b/database_utils/of_ports_pci_correspondence.sql new file mode 100644 index 0000000..b9aa846 --- /dev/null +++ b/database_utils/of_ports_pci_correspondence.sql @@ -0,0 +1,76 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +/* + READ THIS please: + This table contains the matching between dataplane host ports + and openflow switch ports. + The two first column identifies the host and the pci bus + command ethtool -i provides the pci bus port (at host) + command ethtool -p makes this port blinking (at host) + Last column identifies the switch port name + openvim prints at starting the openflow ports naming + NOTE: if a host has already been inserted, you must execute + UpdateSwitchPort database procedure to associate ports with + the switch connection +*/ + +LOCK TABLES `of_ports_pci_correspondence` WRITE; + +/* DATA for fakehost examples*/ +INSERT INTO `of_ports_pci_correspondence` + (ip_name, pci, switch_port) +VALUES + ('fake-host-0', '0000:06:00.0', 'port0/0'), + ('fake-host-0', '0000:06:00.1', 'port0/1'), + ('fake-host-0', '0000:08:00.0', 'port0/2'), + ('fake-host-0', '0000:08:00.1', 'port0/3'), + + ('fake-host-1', '0000:44:00.0', 'port0/4'), + ('fake-host-1', '0000:44:00.1', 'port0/5'), + ('fake-host-1', '0000:43:00.0', 'port0/6'), + ('fake-host-1', '0000:43:00.1', 'port0/7'), + ('fake-host-1', '0000:04:00.0', 'port0/8'), + ('fake-host-1', '0000:04:00.1', 'port0/9'), + ('fake-host-1', '0000:06:00.0', 'port0/10'), + ('fake-host-1', '0000:06:00.1', 'port0/11'), + + ('fake-host-2', '0000:44:00.0', 'port0/12'), + ('fake-host-2', '0000:44:00.1', 'port0/13'), + ('fake-host-2', '0000:43:00.0', 'port0/14'), + ('fake-host-2', '0000:43:00.1', 'port0/15'), + ('fake-host-2', '0000:04:00.0', 'port0/16'), + ('fake-host-2', '0000:04:00.1', 'port0/17'), + ('fake-host-2', '0000:06:00.0', 'port0/18'), + ('fake-host-2', '0000:06:00.1', 'port0/19'), + + ('fake-host-3', '0000:44:00.0', 'port1/0'), + ('fake-host-3', '0000:44:00.1', 'port1/1'), + ('fake-host-3', '0000:43:00.0', 'port1/2'), + ('fake-host-3', '0000:43:00.1', 'port1/3'), + ('fake-host-3', '0000:04:00.0', 'port1/4'), + ('fake-host-3', '0000:04:00.1', 'port1/5'), + ('fake-host-3', '0000:06:00.0', 'port1/6'), + ('fake-host-3', '0000:06:00.1', 'port1/7') +; + + +UNLOCK TABLES; diff --git a/database_utils/vim_db_structure.sql b/database_utils/vim_db_structure.sql new file mode 100644 index 0000000..1499171 --- /dev/null +++ b/database_utils/vim_db_structure.sql @@ -0,0 +1,962 @@ +/** +* Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +* This file is part of openmano +* All Rights Reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"); you may +* not use this file except in compliance with the License. You may obtain +* a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +* License for the specific language governing permissions and limitations +* under the License. +* +* For those usages not covered by the Apache License, Version 2.0 please +* contact with: nfvlabs@tid.es +**/ + +-- MySQL dump 10.13 Distrib 5.5.43, for debian-linux-gnu (x86_64) +-- +-- Host: localhost Database: vim_db +-- ------------------------------------------------------ +-- Server version 5.5.43-0ubuntu0.14.04.1 + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + +-- +-- Current Database: `vim_db` +-- + +/*!40000 DROP DATABASE IF EXISTS `vim_db`*/; + +CREATE DATABASE /*!32312 IF NOT EXISTS*/ `vim_db` /*!40100 DEFAULT CHARACTER SET utf8 */; + +USE `vim_db`; + +-- +-- Table structure for table `flavors` +-- + +DROP TABLE IF EXISTS `flavors`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `flavors` ( + `uuid` varchar(36) NOT NULL, + `name` varchar(64) NOT NULL, + `description` varchar(255) DEFAULT NULL, + `disk` smallint(5) unsigned DEFAULT NULL, + `ram` smallint(5) unsigned DEFAULT NULL, + `vcpus` smallint(5) unsigned DEFAULT NULL, + `extended` varchar(2000) DEFAULT NULL COMMENT 'Extra description yaml format of needed resources and pining, orginized in sets per numa', + `public` enum('yes','no') NOT NULL DEFAULT 'no', + `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='flavors with extra vnfcd info'; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `host_ranking` +-- + +DROP TABLE IF EXISTS `host_ranking`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `host_ranking` ( + `id` int(10) NOT NULL AUTO_INCREMENT, + `family` varchar(50) NOT NULL, + `manufacturer` varchar(50) NOT NULL, + `version` varchar(50) NOT NULL, + `description` varchar(50) DEFAULT NULL, + `ranking` smallint(4) unsigned NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `family_manufacturer_version` (`family`,`manufacturer`,`version`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `hosts` +-- + +DROP TABLE IF EXISTS `hosts`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `hosts` ( + `uuid` varchar(36) NOT NULL, + `name` varchar(255) NOT NULL, + `ip_name` varchar(64) NOT NULL, + `description` varchar(255) DEFAULT NULL, + `status` enum('ok','error','notused') NOT NULL DEFAULT 'ok', + `ranking` smallint(6) NOT NULL DEFAULT '0', + `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + `features` varchar(255) DEFAULT NULL, + `user` varchar(64) NOT NULL, + `password` varchar(64) DEFAULT NULL, + `admin_state_up` enum('true','false') NOT NULL DEFAULT 'true', + `RAM` mediumint(8) unsigned NOT NULL DEFAULT '0' COMMENT 'Host memory in MB not used as hugepages', + `cpus` smallint(5) unsigned NOT NULL DEFAULT '0' COMMENT 'Host threads(or cores) not isolated from OS', + PRIMARY KEY (`uuid`), + UNIQUE KEY `ip_name` (`ip_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='hosts information'; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `images` +-- + +DROP TABLE IF EXISTS `images`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `images` ( + `uuid` varchar(36) NOT NULL, + `path` varchar(100) NOT NULL, + `name` varchar(64) NOT NULL, + `description` varchar(255) DEFAULT NULL, + `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + `modified_at` timestamp NULL DEFAULT NULL, + `public` enum('yes','no') NOT NULL DEFAULT 'no', + `progress` tinyint(3) unsigned NOT NULL DEFAULT '100', + `status` enum('ACTIVE','DOWN','BUILD','ERROR') NOT NULL DEFAULT 'ACTIVE', + `metadata` varchar(2000) DEFAULT NULL COMMENT 'Metatdata in json text format', + PRIMARY KEY (`uuid`), + UNIQUE KEY `path` (`path`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `instance_devices` +-- + +DROP TABLE IF EXISTS `instance_devices`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `instance_devices` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `type` enum('usb','disk','cdrom','xml') NOT NULL, + `xml` varchar(1000) DEFAULT NULL COMMENT 'libvirt XML format for aditional device', + `instance_id` varchar(36) NOT NULL, + `image_id` varchar(36) DEFAULT NULL COMMENT 'Used in case type is disk', + `vpci` char(12) DEFAULT NULL COMMENT 'format XXXX:XX:XX.X', + `dev` varchar(12) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `FK_instance_devices_instances` (`instance_id`), + KEY `FK_instance_devices_images` (`image_id`), + CONSTRAINT `FK_instance_devices_images` FOREIGN KEY (`image_id`) REFERENCES `tenants_images` (`image_id`), + CONSTRAINT `FK_instance_devices_instances` FOREIGN KEY (`instance_id`) REFERENCES `instances` (`uuid`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `instances` +-- + +DROP TABLE IF EXISTS `instances`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `instances` ( + `uuid` varchar(36) NOT NULL, + `flavor_id` varchar(36) NOT NULL, + `image_id` varchar(36) NOT NULL, + `name` varchar(64) NOT NULL, + `description` varchar(255) DEFAULT NULL, + `last_error` varchar(255) DEFAULT NULL, + `progress` tinyint(3) unsigned NOT NULL DEFAULT '0', + `tenant_id` varchar(36) NOT NULL, + `status` enum('ACTIVE','PAUSED','INACTIVE','CREATING','ERROR','DELETING') NOT NULL DEFAULT 'ACTIVE', + `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + `modified_at` timestamp NULL DEFAULT NULL, + `host_id` varchar(36) NOT NULL COMMENT 'HOST where is allocated', + `ram` mediumint(8) unsigned NOT NULL DEFAULT '0' COMMENT 'used non-hugepages memory in MB', + `vcpus` smallint(5) unsigned NOT NULL DEFAULT '0' COMMENT 'used non-isolated CPUs', + PRIMARY KEY (`uuid`), + KEY `FK_instances_tenants` (`tenant_id`), + KEY `FK_instances_flavors` (`flavor_id`), + KEY `FK_instances_images` (`image_id`), + KEY `FK_instances_hosts` (`host_id`), + CONSTRAINT `FK_instances_flavors` FOREIGN KEY (`flavor_id`) REFERENCES `tenants_flavors` (`flavor_id`), + CONSTRAINT `FK_instances_hosts` FOREIGN KEY (`host_id`) REFERENCES `hosts` (`uuid`), + CONSTRAINT `FK_instances_images` FOREIGN KEY (`image_id`) REFERENCES `tenants_images` (`image_id`), + CONSTRAINT `FK_instances_tenants` FOREIGN KEY (`tenant_id`) REFERENCES `tenants` (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='VM instances'; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `logs` +-- + +DROP TABLE IF EXISTS `logs`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `logs` ( + `id` int(10) unsigned NOT NULL AUTO_INCREMENT, + `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + `tenant_id` varchar(36) DEFAULT NULL, + `related` enum('hosts','images','flavors','tenants','ports','instances','nets') DEFAULT NULL, + `uuid` varchar(36) DEFAULT NULL COMMENT 'uuid of host, image, etc that log relates to', + `level` enum('panic','error','info','debug','verbose') NOT NULL, + `description` varchar(200) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `nets` +-- + +DROP TABLE IF EXISTS `nets`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `nets` ( + `uuid` varchar(36) NOT NULL, + `tenant_id` varchar(36) DEFAULT NULL, + `type` enum('ptp','data','bridge_data','bridge_man') NOT NULL DEFAULT 'bridge_man', + `status` enum('ACTIVE','DOWN','BUILD','ERROR') NOT NULL DEFAULT 'ACTIVE', + `last_error` varchar(255) DEFAULT NULL, + `name` varchar(255) NOT NULL, + `shared` enum('true','false') NOT NULL DEFAULT 'false', + `admin_state_up` enum('true','false') NOT NULL DEFAULT 'true', + `vlan` smallint(6) DEFAULT NULL, + `provider` varchar(36) DEFAULT NULL, + `bind_net` varchar(36) DEFAULT NULL COMMENT 'To connect with other net', + `bind_type` varchar(36) DEFAULT NULL COMMENT 'VLAN: to insert/remove', + `cidr` varchar(64) DEFAULT NULL, + `enable_dhcp` enum('true','false') NOT NULL DEFAULT 'false', + `dhcp_first_ip` varchar(64) DEFAULT NULL, + `dhcp_last_ip` varchar(64) DEFAULT NULL, + PRIMARY KEY (`uuid`), + UNIQUE KEY `type_vlan` (`type`,`vlan`), + UNIQUE KEY `physical` (`provider`), + KEY `FK_nets_tenants` (`tenant_id`), + CONSTRAINT `FK_nets_tenants` FOREIGN KEY (`tenant_id`) REFERENCES `tenants` (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `numas` +-- + +DROP TABLE IF EXISTS `numas`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `numas` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `host_id` varchar(36) NOT NULL, + `numa_socket` tinyint(3) unsigned NOT NULL DEFAULT '0', + `hugepages` smallint(5) unsigned NOT NULL DEFAULT '0' COMMENT 'Available memory for guest in GB', + `status` enum('ok','error','notused') NOT NULL DEFAULT 'ok', + `memory` smallint(5) unsigned NOT NULL DEFAULT '0' COMMENT 'total memry in GB, not all available for guests', + `admin_state_up` enum('true','false') NOT NULL DEFAULT 'true', + PRIMARY KEY (`id`), + KEY `FK_numas_hosts` (`host_id`), + CONSTRAINT `FK_numas_hosts` FOREIGN KEY (`host_id`) REFERENCES `hosts` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `of_flows` +-- + +DROP TABLE IF EXISTS `of_flows`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `of_flows` ( + `id` int(10) unsigned NOT NULL AUTO_INCREMENT, + `name` varchar(64) NOT NULL, + `net_id` varchar(36) DEFAULT NULL, + `priority` int(10) unsigned DEFAULT NULL, + `vlan_id` smallint(5) unsigned DEFAULT NULL, + `ingress_port` varchar(10) DEFAULT NULL, + `src_mac` varchar(50) DEFAULT NULL, + `dst_mac` varchar(50) DEFAULT NULL, + `actions` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `name` (`name`), + KEY `FK_of_flows_nets` (`net_id`), + CONSTRAINT `FK_of_flows_nets` FOREIGN KEY (`net_id`) REFERENCES `nets` (`uuid`) ON DELETE SET NULL ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `of_ports_pci_correspondence` +-- + +DROP TABLE IF EXISTS `of_ports_pci_correspondence`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `of_ports_pci_correspondence` ( + `id` int(10) NOT NULL AUTO_INCREMENT, + `ip_name` varchar(64) DEFAULT NULL, + `pci` varchar(50) DEFAULT NULL, + `switch_port` varchar(64) DEFAULT NULL, + `switch_dpid` varchar(64) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `ports` +-- + +DROP TABLE IF EXISTS `ports`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `ports` ( + `uuid` varchar(36) NOT NULL, + `name` varchar(64) NOT NULL, + `instance_id` varchar(36) DEFAULT NULL, + `tenant_id` varchar(36) DEFAULT NULL, + `net_id` varchar(36) DEFAULT NULL, + `vpci` char(12) DEFAULT NULL, + `Mbps` mediumint(8) unsigned DEFAULT NULL COMMENT 'In Mbits/s', + `admin_state_up` enum('true','false') NOT NULL DEFAULT 'true', + `status` enum('ACTIVE','DOWN','BUILD','ERROR') NOT NULL DEFAULT 'ACTIVE', + `type` enum('instance:bridge','instance:data','external') NOT NULL DEFAULT 'instance:bridge', + `vlan` smallint(5) DEFAULT NULL COMMENT 'vlan of this SRIOV, or external port', + `switch_port` varchar(64) DEFAULT NULL, + `switch_dpid` varchar(64) DEFAULT NULL, + `mac` char(18) DEFAULT NULL COMMENT 'mac address format XX:XX:XX:XX:XX:XX', + `ip_address` varchar(64) DEFAULT NULL, + `model` varchar(12) DEFAULT NULL COMMENT 'driver model for bridge ifaces; PF,VF,VFnotShared for data ifaces', + PRIMARY KEY (`uuid`), + UNIQUE KEY `mac` (`mac`), + KEY `FK_instance_ifaces_instances` (`instance_id`), + KEY `FK_instance_ifaces_nets` (`net_id`), + KEY `FK_ports_tenants` (`tenant_id`), + CONSTRAINT `FK_instance_ifaces_nets` FOREIGN KEY (`net_id`) REFERENCES `nets` (`uuid`), + CONSTRAINT `FK_ports_instances` FOREIGN KEY (`instance_id`) REFERENCES `instances` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE, + CONSTRAINT `FK_ports_tenants` FOREIGN KEY (`tenant_id`) REFERENCES `tenants` (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Bridge interfaces used by instances'; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `resources_core` +-- + +DROP TABLE IF EXISTS `resources_core`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `resources_core` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `numa_id` int(11) DEFAULT NULL, + `core_id` smallint(5) unsigned NOT NULL, + `thread_id` smallint(5) unsigned NOT NULL, + `instance_id` varchar(36) DEFAULT NULL COMMENT 'instance that consume this resource', + `v_thread_id` smallint(6) DEFAULT NULL COMMENT 'name used by virtual machine; -1 if this thread is not used because core is asigned completely', + `status` enum('ok','error','notused','noteligible') NOT NULL DEFAULT 'ok' COMMENT '''error'': resource not available becasue an error at deployment; ''notused'': admin marked as not available, ''noteligible'': used by host and not available for guests', + `paired` enum('Y','N') NOT NULL DEFAULT 'N', + PRIMARY KEY (`id`), + KEY `FK_resources_core_instances` (`instance_id`), + KEY `FK_resources_core_numas` (`numa_id`), + CONSTRAINT `FK_resources_core_instances` FOREIGN KEY (`instance_id`) REFERENCES `instances` (`uuid`), + CONSTRAINT `FK_resources_core_numas` FOREIGN KEY (`numa_id`) REFERENCES `numas` (`id`) ON DELETE CASCADE ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Contain an entry by thread (two entries per core) of all available cores. Threy will be free if instance_id is NULL'; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `resources_mem` +-- + +DROP TABLE IF EXISTS `resources_mem`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `resources_mem` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `numa_id` int(11) NOT NULL DEFAULT '0', + `instance_id` varchar(36) DEFAULT '0' COMMENT 'NULL is allowed in order to allow some memory not used', + `consumed` int(3) unsigned NOT NULL DEFAULT '0' COMMENT 'In GB', + PRIMARY KEY (`id`), + KEY `FK_resources_mem_instances` (`instance_id`), + KEY `FK_resources_mem_numas` (`numa_id`), + CONSTRAINT `FK_resources_mem_instances` FOREIGN KEY (`instance_id`) REFERENCES `instances` (`uuid`) ON DELETE CASCADE, + CONSTRAINT `FK_resources_mem_numas` FOREIGN KEY (`numa_id`) REFERENCES `numas` (`id`) ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Include the hugepages memory used by one instance (VM) in one host NUMA.'; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `resources_port` +-- + +DROP TABLE IF EXISTS `resources_port`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `resources_port` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `numa_id` int(11) NOT NULL DEFAULT '0', + `instance_id` varchar(36) DEFAULT NULL COMMENT 'Contain instance that use this resource completely. NULL if this resource is free or partially used (resources_port_SRIOV)', + `port_id` varchar(36) DEFAULT NULL COMMENT 'When resource is used, this point to the ports table', + `source_name` varchar(64) DEFAULT NULL, + `pci` char(12) NOT NULL DEFAULT '0' COMMENT 'Host physical pci bus. Format XXXX:XX:XX.X', + `Mbps` smallint(5) unsigned DEFAULT '10' COMMENT 'Nominal Port speed ', + `root_id` int(11) DEFAULT NULL COMMENT 'NULL for physical port entries; =id for SRIOV port', + `status` enum('ok','error','notused') NOT NULL DEFAULT 'ok', + `Mbps_used` smallint(5) unsigned NOT NULL DEFAULT '0' COMMENT 'Speed bandwidth used when asigned', + `switch_port` varchar(64) DEFAULT NULL, + `switch_dpid` varchar(64) DEFAULT NULL, + `mac` char(18) DEFAULT NULL COMMENT 'mac address format XX:XX:XX:XX:XX:XX', + PRIMARY KEY (`id`), + UNIQUE KEY `mac` (`mac`), + UNIQUE KEY `port_id` (`port_id`), + KEY `FK_resources_port_numas` (`numa_id`), + KEY `FK_resources_port_instances` (`instance_id`), + CONSTRAINT `FK_resources_port_instances` FOREIGN KEY (`instance_id`) REFERENCES `instances` (`uuid`), + CONSTRAINT `FK_resources_port_numas` FOREIGN KEY (`numa_id`) REFERENCES `numas` (`id`) ON DELETE CASCADE ON UPDATE CASCADE, + CONSTRAINT `FK_resources_port_ports` FOREIGN KEY (`port_id`) REFERENCES `ports` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Contain NIC ports SRIOV and availabes, and current use. Every port contain several entries, one per port (root_id=NULL) and all posible SRIOV (root_id=id of port)'; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `schema_version` +-- + +DROP TABLE IF EXISTS `schema_version`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `schema_version` ( + `version_int` int(11) NOT NULL COMMENT 'version as a number. Must not contain gaps', + `version` varchar(20) NOT NULL COMMENT 'version as a text', + `openvim_ver` varchar(20) NOT NULL COMMENT 'openvim version', + `comments` varchar(2000) DEFAULT NULL COMMENT 'changes to database', + `date` date DEFAULT NULL, + PRIMARY KEY (`version_int`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='database schema control version'; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `tenants` +-- + +DROP TABLE IF EXISTS `tenants`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `tenants` ( + `uuid` varchar(36) NOT NULL, + `name` varchar(255) NOT NULL, + `description` varchar(255) DEFAULT NULL, + `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + `enabled` enum('true','false') NOT NULL DEFAULT 'true', + PRIMARY KEY (`uuid`), + UNIQUE KEY `name` (`name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='tenants information'; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `tenants_flavors` +-- + +DROP TABLE IF EXISTS `tenants_flavors`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `tenants_flavors` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `flavor_id` varchar(36) NOT NULL, + `tenant_id` varchar(36) NOT NULL, + PRIMARY KEY (`id`), + KEY `FK__tenants` (`tenant_id`), + KEY `FK__flavors` (`flavor_id`), + CONSTRAINT `FK__flavors` FOREIGN KEY (`flavor_id`) REFERENCES `flavors` (`uuid`), + CONSTRAINT `FK__tenants` FOREIGN KEY (`tenant_id`) REFERENCES `tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `tenants_images` +-- + +DROP TABLE IF EXISTS `tenants_images`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `tenants_images` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `image_id` varchar(36) NOT NULL, + `tenant_id` varchar(36) NOT NULL, + PRIMARY KEY (`id`), + KEY `FK_tenants_images_tenants` (`tenant_id`), + KEY `FK_tenants_images_images` (`image_id`), + CONSTRAINT `FK_tenants_images_images` FOREIGN KEY (`image_id`) REFERENCES `images` (`uuid`), + CONSTRAINT `FK_tenants_images_tenants` FOREIGN KEY (`tenant_id`) REFERENCES `tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `uuids` +-- + +DROP TABLE IF EXISTS `uuids`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `uuids` ( + `uuid` varchar(36) NOT NULL, + `root_uuid` varchar(36) DEFAULT NULL COMMENT 'Some related UUIDs can be grouped by this field, so that they can be deleted at once', + `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + `used_at` enum('flavors','hosts','images','instances','nets','ports','tenants') DEFAULT NULL COMMENT 'Table that uses this UUID', + PRIMARY KEY (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Used to avoid UUID repetitions'; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Dumping routines for database 'vim_db' +-- +/*!50003 DROP PROCEDURE IF EXISTS `GetAllAvailablePorts` */; +/*!50003 SET @saved_cs_client = @@character_set_client */ ; +/*!50003 SET @saved_cs_results = @@character_set_results */ ; +/*!50003 SET @saved_col_connection = @@collation_connection */ ; +/*!50003 SET character_set_client = utf8 */ ; +/*!50003 SET character_set_results = utf8 */ ; +/*!50003 SET collation_connection = utf8_general_ci */ ; +/*!50003 SET @saved_sql_mode = @@sql_mode */ ; +/*!50003 SET sql_mode = '' */ ; +DELIMITER ;; +CREATE PROCEDURE `GetAllAvailablePorts`(IN Numa INT) + SQL SECURITY INVOKER + COMMENT 'Obtain all -including those not connected to switch port- ports available for a numa' +BEGIN + SELECT port_id, pci, Mbps, Mbps - Mbps_consumed as Mbps_free, totalSRIOV - coalesce(usedSRIOV,0) as availableSRIOV, switch_port, mac + FROM + ( + SELECT id as port_id, Mbps, pci, switch_port, mac + FROM resources_port + WHERE numa_id = Numa AND id=root_id AND status = 'ok' AND instance_id IS NULL + ) as A + INNER JOIN + ( + SELECT root_id, sum(Mbps_used) as Mbps_consumed, COUNT(id)-1 as totalSRIOV + FROM resources_port + WHERE numa_id = Numa AND status = 'ok' + GROUP BY root_id + ) as B + ON A.port_id = B.root_id + LEFT JOIN + ( + SELECT root_id, COUNT(id) as usedSRIOV + FROM resources_port + WHERE numa_id = Numa AND status = 'ok' AND instance_id IS NOT NULL + GROUP BY root_id + ) as C + ON A.port_id = C.root_id + ORDER BY Mbps_free, availableSRIOV, pci; + END ;; +DELIMITER ; +/*!50003 SET sql_mode = @saved_sql_mode */ ; +/*!50003 SET character_set_client = @saved_cs_client */ ; +/*!50003 SET character_set_results = @saved_cs_results */ ; +/*!50003 SET collation_connection = @saved_col_connection */ ; +/*!50003 DROP PROCEDURE IF EXISTS `GetAvailablePorts` */; +/*!50003 SET @saved_cs_client = @@character_set_client */ ; +/*!50003 SET @saved_cs_results = @@character_set_results */ ; +/*!50003 SET @saved_col_connection = @@collation_connection */ ; +/*!50003 SET character_set_client = utf8 */ ; +/*!50003 SET character_set_results = utf8 */ ; +/*!50003 SET collation_connection = utf8_general_ci */ ; +/*!50003 SET @saved_sql_mode = @@sql_mode */ ; +/*!50003 SET sql_mode = '' */ ; +DELIMITER ;; +CREATE PROCEDURE `GetAvailablePorts`(IN `Numa` INT) + DETERMINISTIC + SQL SECURITY INVOKER +BEGIN +SELECT port_id, pci, Mbps, Mbps - Mbps_consumed as Mbps_free, totalSRIOV - coalesce(usedSRIOV,0) as availableSRIOV, switch_port, mac +FROM + ( + SELECT id as port_id, Mbps, pci, switch_port, mac + FROM resources_port + WHERE numa_id = Numa AND id=root_id AND status = 'ok' AND switch_port is not Null AND instance_id IS NULL + ) as A + INNER JOIN + ( + SELECT root_id, sum(Mbps_used) as Mbps_consumed, COUNT(id)-1 as totalSRIOV + FROM resources_port + WHERE numa_id = Numa AND status = 'ok' + GROUP BY root_id + ) as B + ON A.port_id = B.root_id + LEFT JOIN + ( + SELECT root_id, COUNT(id) as usedSRIOV + FROM resources_port + WHERE numa_id = Numa AND status = 'ok' AND instance_id IS NOT NULL AND switch_port is not Null + GROUP BY root_id + ) as C + ON A.port_id = C.root_id + +ORDER BY Mbps_free, availableSRIOV, pci +; +END ;; +DELIMITER ; +/*!50003 SET sql_mode = @saved_sql_mode */ ; +/*!50003 SET character_set_client = @saved_cs_client */ ; +/*!50003 SET character_set_results = @saved_cs_results */ ; +/*!50003 SET collation_connection = @saved_col_connection */ ; +/*!50003 DROP PROCEDURE IF EXISTS `GetHostByMemCpu` */; +/*!50003 SET @saved_cs_client = @@character_set_client */ ; +/*!50003 SET @saved_cs_results = @@character_set_results */ ; +/*!50003 SET @saved_col_connection = @@collation_connection */ ; +/*!50003 SET character_set_client = utf8 */ ; +/*!50003 SET character_set_results = utf8 */ ; +/*!50003 SET collation_connection = utf8_general_ci */ ; +/*!50003 SET @saved_sql_mode = @@sql_mode */ ; +/*!50003 SET sql_mode = '' */ ; +DELIMITER ;; +CREATE PROCEDURE `GetHostByMemCpu`(IN `Needed_mem` INT, IN `Needed_cpus` INT) + SQL SECURITY INVOKER + COMMENT 'Obtain those hosts with the available free Memory(Non HugePages) and CPUS (Non isolated)' +BEGIN + +SELECT * +FROM hosts as H +LEFT JOIN ( + SELECT sum(ram) as used_ram, sum(vcpus) as used_cpus, host_id + FROM instances + GROUP BY host_id +) as U ON U.host_id = H.uuid +WHERE Needed_mem<=H.RAM-coalesce(U.used_ram,0) AND Needed_cpus<=H.cpus-coalesce(U.used_cpus,0) AND H.admin_state_up = 'true' +ORDER BY RAM-coalesce(U.used_ram,0), cpus-coalesce(U.used_cpus,0) + +; +END ;; +DELIMITER ; +/*!50003 SET sql_mode = @saved_sql_mode */ ; +/*!50003 SET character_set_client = @saved_cs_client */ ; +/*!50003 SET character_set_results = @saved_cs_results */ ; +/*!50003 SET collation_connection = @saved_col_connection */ ; +/*!50003 DROP PROCEDURE IF EXISTS `GetIfaces` */; +/*!50003 SET @saved_cs_client = @@character_set_client */ ; +/*!50003 SET @saved_cs_results = @@character_set_results */ ; +/*!50003 SET @saved_col_connection = @@collation_connection */ ; +/*!50003 SET character_set_client = utf8 */ ; +/*!50003 SET character_set_results = utf8 */ ; +/*!50003 SET collation_connection = utf8_general_ci */ ; +/*!50003 SET @saved_sql_mode = @@sql_mode */ ; +/*!50003 SET sql_mode = '' */ ; +DELIMITER ;; +CREATE PROCEDURE `GetIfaces`() + SQL SECURITY INVOKER + COMMENT 'Used for the http get ports' +BEGIN + +SELECT *, 'ACTIVE' as status,'true' as admin_state_up FROM +( + ( + SELECT ifa.uuid as id, ifa.name as name, instance_id as device_id, net_id, tenant_id + FROM instance_ifaces AS ifa JOIN instances AS i on ifa.instance_id=i.uuid + ) + UNION + ( + SELECT iface_uuid as id, ifa.name as name, instance_id as device_id, net_id,tenant_id + FROM resources_port AS ifa JOIN instances AS i on ifa.instance_id=i.uuid + WHERE iface_uuid is not NULL + ) + UNION + ( + SELECT uuid as id, name, Null as device_id, net_id, Null as tenant_id + FROM external_ports + ) +) as B +; +END ;; +DELIMITER ; +/*!50003 SET sql_mode = @saved_sql_mode */ ; +/*!50003 SET character_set_client = @saved_cs_client */ ; +/*!50003 SET character_set_results = @saved_cs_results */ ; +/*!50003 SET collation_connection = @saved_col_connection */ ; +/*!50003 DROP PROCEDURE IF EXISTS `GetNextAutoIncrement` */; +/*!50003 SET @saved_cs_client = @@character_set_client */ ; +/*!50003 SET @saved_cs_results = @@character_set_results */ ; +/*!50003 SET @saved_col_connection = @@collation_connection */ ; +/*!50003 SET character_set_client = utf8 */ ; +/*!50003 SET character_set_results = utf8 */ ; +/*!50003 SET collation_connection = utf8_general_ci */ ; +/*!50003 SET @saved_sql_mode = @@sql_mode */ ; +/*!50003 SET sql_mode = '' */ ; +DELIMITER ;; +CREATE PROCEDURE `GetNextAutoIncrement`() + SQL SECURITY INVOKER +BEGIN +SELECT table_name, AUTO_INCREMENT +FROM information_schema.tables +WHERE table_name = 'resources_port' +AND table_schema = DATABASE( ) ; +END ;; +DELIMITER ; +/*!50003 SET sql_mode = @saved_sql_mode */ ; +/*!50003 SET character_set_client = @saved_cs_client */ ; +/*!50003 SET character_set_results = @saved_cs_results */ ; +/*!50003 SET collation_connection = @saved_col_connection */ ; +/*!50003 DROP PROCEDURE IF EXISTS `GetNumaByCore` */; +/*!50003 SET @saved_cs_client = @@character_set_client */ ; +/*!50003 SET @saved_cs_results = @@character_set_results */ ; +/*!50003 SET @saved_col_connection = @@collation_connection */ ; +/*!50003 SET character_set_client = utf8 */ ; +/*!50003 SET character_set_results = utf8 */ ; +/*!50003 SET collation_connection = utf8_general_ci */ ; +/*!50003 SET @saved_sql_mode = @@sql_mode */ ; +/*!50003 SET sql_mode = '' */ ; +DELIMITER ;; +CREATE PROCEDURE `GetNumaByCore`(IN `Needed_cores` SMALLINT) + SQL SECURITY INVOKER + COMMENT 'Obtain Numas with a concrete number of available cores, with bot' +BEGIN + +SELECT numa_id, host_id, numa_socket, freecores FROM +( + SELECT numa_id, COUNT(core_id) as freecores FROM + ( + SELECT numa_id, core_id, COUNT(thread_id) AS freethreads + FROM resources_core + WHERE instance_id IS NULL AND status = 'ok' + GROUP BY numa_id, core_id + ) AS FREECORES_TABLE + WHERE FREECORES_TABLE.freethreads = 2 + GROUP BY numa_id +) AS NBCORES_TABLE +INNER JOIN numas ON numas.id = NBCORES_TABLE.numa_id +INNER JOIN hosts ON numas.host_id = hosts.uuid + +WHERE NBCORES_TABLE.freecores >= Needed_cores AND numas.status = 'ok' AND numas.admin_state_up = 'true' AND hosts.admin_state_up = 'true' +ORDER BY NBCORES_TABLE.freecores +; + +END ;; +DELIMITER ; +/*!50003 SET sql_mode = @saved_sql_mode */ ; +/*!50003 SET character_set_client = @saved_cs_client */ ; +/*!50003 SET character_set_results = @saved_cs_results */ ; +/*!50003 SET collation_connection = @saved_col_connection */ ; +/*!50003 DROP PROCEDURE IF EXISTS `GetNumaByMemory` */; +/*!50003 SET @saved_cs_client = @@character_set_client */ ; +/*!50003 SET @saved_cs_results = @@character_set_results */ ; +/*!50003 SET @saved_col_connection = @@collation_connection */ ; +/*!50003 SET character_set_client = utf8 */ ; +/*!50003 SET character_set_results = utf8 */ ; +/*!50003 SET collation_connection = utf8_general_ci */ ; +/*!50003 SET @saved_sql_mode = @@sql_mode */ ; +/*!50003 SET sql_mode = '' */ ; +DELIMITER ;; +CREATE PROCEDURE `GetNumaByMemory`(IN `Needed_mem` SMALLINT) + DETERMINISTIC + SQL SECURITY INVOKER + COMMENT 'Obtain numas with a free quantity of memory, passed by parameter' +BEGIN +SELECT * FROM +( SELECT numas.id as numa_id, numas.host_id, numas.numa_socket, numas.hugepages, numas.hugepages - sum(coalesce(resources_mem.consumed,0)) AS freemem + FROM numas + LEFT JOIN resources_mem ON numas.id = resources_mem.numa_id + JOIN hosts ON numas.host_id = hosts.uuid + WHERE numas.status = 'ok' AND numas.admin_state_up = 'true' AND hosts.admin_state_up = 'true' + GROUP BY numas.id +) AS COMBINED + +WHERE COMBINED.freemem >= Needed_mem +ORDER BY COMBINED.freemem +; +END ;; +DELIMITER ; +/*!50003 SET sql_mode = @saved_sql_mode */ ; +/*!50003 SET character_set_client = @saved_cs_client */ ; +/*!50003 SET character_set_results = @saved_cs_results */ ; +/*!50003 SET collation_connection = @saved_col_connection */ ; +/*!50003 DROP PROCEDURE IF EXISTS `GetNumaByPort` */; +/*!50003 SET @saved_cs_client = @@character_set_client */ ; +/*!50003 SET @saved_cs_results = @@character_set_results */ ; +/*!50003 SET @saved_col_connection = @@collation_connection */ ; +/*!50003 SET character_set_client = utf8 */ ; +/*!50003 SET character_set_results = utf8 */ ; +/*!50003 SET collation_connection = utf8_general_ci */ ; +/*!50003 SET @saved_sql_mode = @@sql_mode */ ; +/*!50003 SET sql_mode = '' */ ; +DELIMITER ;; +CREATE PROCEDURE `GetNumaByPort`(IN `Needed_speed` SMALLINT, IN `Needed_ports` SMALLINT) + SQL SECURITY INVOKER + COMMENT 'Busca Numas con N puertos fisicos LIBRES de X velocidad' +BEGIN + +SELECT numa_id, COUNT(id) AS number_ports +FROM +( + SELECT root_id AS id, status, numa_id, Mbps, SUM(Mbps_used) AS Consumed + FROM resources_port + GROUP BY root_id +) AS P +WHERE status = 'ok' AND switch_port is not Null AND Consumed = 0 AND Mbps >= Needed_speed +GROUP BY numa_id +HAVING number_ports >= Needed_ports +; + +END ;; +DELIMITER ; +/*!50003 SET sql_mode = @saved_sql_mode */ ; +/*!50003 SET character_set_client = @saved_cs_client */ ; +/*!50003 SET character_set_results = @saved_cs_results */ ; +/*!50003 SET collation_connection = @saved_col_connection */ ; +/*!50003 DROP PROCEDURE IF EXISTS `GetNumaByThread` */; +/*!50003 SET @saved_cs_client = @@character_set_client */ ; +/*!50003 SET @saved_cs_results = @@character_set_results */ ; +/*!50003 SET @saved_col_connection = @@collation_connection */ ; +/*!50003 SET character_set_client = utf8 */ ; +/*!50003 SET character_set_results = utf8 */ ; +/*!50003 SET collation_connection = utf8_general_ci */ ; +/*!50003 SET @saved_sql_mode = @@sql_mode */ ; +/*!50003 SET sql_mode = '' */ ; +DELIMITER ;; +CREATE PROCEDURE `GetNumaByThread`(IN `Needed_threads` SMALLINT) + SQL SECURITY INVOKER +BEGIN + +SELECT numa_id, host_id, numa_socket, freethreads +FROM +( + SELECT numa_id, COUNT(thread_id) AS freethreads + FROM resources_core + WHERE instance_id IS NULL AND status = 'ok' + GROUP BY numa_id +) AS NBCORES_TABLE +INNER JOIN numas ON numas.id = NBCORES_TABLE.numa_id +INNER JOIN hosts ON numas.host_id = hosts.uuid + +WHERE NBCORES_TABLE.freethreads >= Needed_threads AND numas.status = 'ok' AND numas.admin_state_up = 'true' AND hosts.admin_state_up = 'true' +ORDER BY NBCORES_TABLE.freethreads +; + +END ;; +DELIMITER ; +/*!50003 SET sql_mode = @saved_sql_mode */ ; +/*!50003 SET character_set_client = @saved_cs_client */ ; +/*!50003 SET character_set_results = @saved_cs_results */ ; +/*!50003 SET collation_connection = @saved_col_connection */ ; +/*!50003 DROP PROCEDURE IF EXISTS `GetPortsFromNuma` */; +/*!50003 SET @saved_cs_client = @@character_set_client */ ; +/*!50003 SET @saved_cs_results = @@character_set_results */ ; +/*!50003 SET @saved_col_connection = @@collation_connection */ ; +/*!50003 SET character_set_client = utf8 */ ; +/*!50003 SET character_set_results = utf8 */ ; +/*!50003 SET collation_connection = utf8_general_ci */ ; +/*!50003 SET @saved_sql_mode = @@sql_mode */ ; +/*!50003 SET sql_mode = '' */ ; +DELIMITER ;; +CREATE PROCEDURE `GetPortsFromNuma`(IN `Numa` INT) + NO SQL + SQL SECURITY INVOKER +BEGIN +SELECT Mbps, pci, status, Mbps_consumed +FROM +( + SELECT id, Mbps, pci, status + FROM resources_port + WHERE numa_id = Numa AND id=root_id AND status='ok' AND switch_port is not Null +) as A +INNER JOIN +( + SELECT root_id, sum(Mbps_used) as Mbps_consumed + FROM resources_port + WHERE numa_id = Numa + GROUP BY root_id +) as B +ON A.id = B.root_id +; +END ;; +DELIMITER ; +/*!50003 SET sql_mode = @saved_sql_mode */ ; +/*!50003 SET character_set_client = @saved_cs_client */ ; +/*!50003 SET character_set_results = @saved_cs_results */ ; +/*!50003 SET collation_connection = @saved_col_connection */ ; +/*!50003 DROP PROCEDURE IF EXISTS `UpdateSwitchPort` */; +/*!50003 SET @saved_cs_client = @@character_set_client */ ; +/*!50003 SET @saved_cs_results = @@character_set_results */ ; +/*!50003 SET @saved_col_connection = @@collation_connection */ ; +/*!50003 SET character_set_client = utf8 */ ; +/*!50003 SET character_set_results = utf8 */ ; +/*!50003 SET collation_connection = utf8_general_ci */ ; +/*!50003 SET @saved_sql_mode = @@sql_mode */ ; +/*!50003 SET sql_mode = '' */ ; +DELIMITER ;; +CREATE PROCEDURE `UpdateSwitchPort`() + MODIFIES SQL DATA + SQL SECURITY INVOKER + COMMENT 'Load the openflow switch ports from of_ports_pci_correspondece into resoureces_port and ports' +BEGIN + + UPDATE ports + RIGHT JOIN resources_port as RP on ports.uuid=RP.port_id + INNER JOIN resources_port as RP2 on RP2.id=RP.root_id + INNER JOIN numas on RP.numa_id=numas.id + INNER JOIN hosts on numas.host_id=hosts.uuid + INNER JOIN of_ports_pci_correspondence as PC on hosts.ip_name=PC.ip_name and RP2.pci=PC.pci + SET ports.switch_port=null, ports.switch_dpid=null, RP.switch_port=null, RP.switch_dpid=null; + + UPDATE ports + RIGHT JOIN resources_port as RP on ports.uuid=RP.port_id + INNER JOIN resources_port as RP2 on RP2.id=RP.root_id + INNER JOIN numas on RP.numa_id=numas.id + INNER JOIN hosts on numas.host_id=hosts.uuid + INNER JOIN of_ports_pci_correspondence as PC on hosts.ip_name=PC.ip_name and RP2.pci=PC.pci + SET ports.switch_port=PC.switch_port, ports.switch_dpid=PC.switch_dpid, RP.switch_port=PC.switch_port, RP.switch_dpid=PC.switch_dpid; + END ;; +DELIMITER ; +/*!50003 SET sql_mode = @saved_sql_mode */ ; +/*!50003 SET character_set_client = @saved_cs_client */ ; +/*!50003 SET character_set_results = @saved_cs_results */ ; +/*!50003 SET collation_connection = @saved_col_connection */ ; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +-- Dump completed on 2016-05-13 12:52:19 + + + + + +-- MySQL dump 10.13 Distrib 5.5.43, for debian-linux-gnu (x86_64) +-- +-- Host: localhost Database: vim_db +-- ------------------------------------------------------ +-- Server version 5.5.43-0ubuntu0.14.04.1 + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + +-- +-- Dumping data for table `schema_version` +-- + +LOCK TABLES `schema_version` WRITE; +/*!40000 ALTER TABLE `schema_version` DISABLE KEYS */; +INSERT INTO `schema_version` VALUES (1,'0.1','0.2.00','insert schema_version; alter nets with last_error column','2015-05-05'),(2,'0.2','0.2.03','update Procedure UpdateSwitchPort','2015-05-06'),(3,'0.3','0.2.5','New Procedure GetAllAvailablePorts','2015-07-09'),(4,'0.4','0.3.1','Remove unique index VLAN at resources_port','2015-09-04'),(5,'0.5','0.4.1','Add ip_address to ports','2015-09-04'),(6,'0.6','0.4.2','Enlarging name at database','2016-02-01'),(7,'0.7','0.4.4','Add bind_net to net table','2016-02-12'); +/*!40000 ALTER TABLE `schema_version` ENABLE KEYS */; +UNLOCK TABLES; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +-- Dump completed on 2016-05-13 12:52:19 diff --git a/dhcp_thread.py b/dhcp_thread.py new file mode 100644 index 0000000..a24585e --- /dev/null +++ b/dhcp_thread.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' +This is thread that interact with the dhcp server to get the IP addresses +''' +__author__="Pablo Montes, Alfonso Tierno" +__date__ ="$4-Jan-2016 12:07:15$" + + + +import threading +import time +import Queue +import paramiko +import random +import subprocess + +#TODO: insert a logging system + +class dhcp_thread(threading.Thread): + def __init__(self, dhcp_params, db, db_lock, test, dhcp_nets, debug=None): + '''Init a thread. + Arguments: thread_info must be a dictionary with: + 'dhcp_params' dhcp server parameters with the following keys: + mandatory : user, host, port, key, ifaces(interface name list of the one managed by the dhcp) + optional: password, key, port(22) + 'db' 'db_lock': database class and lock for accessing it + 'test': in test mode no acces to a server is done, and ip is invented + ''' + threading.Thread.__init__(self) + self.name = "dhcp_thread" + self.dhcp_params = dhcp_params + self.debug = debug + self.db = db + self.db_lock = db_lock + self.test = test + self.dhcp_nets = dhcp_nets + self.ssh_conn = None + + self.mac_status ={} #dictionary of mac_address to retrieve information + #ip: None + #retries: + #next_reading: time for the next trying to check ACTIVE status or IP + #created: time when it was added + #active: time when the VM becomes into ACTIVE status + + + self.queueLock = threading.Lock() + self.taskQueue = Queue.Queue(2000) + + def ssh_connect(self): + try: + #Connect SSH + self.ssh_conn = paramiko.SSHClient() + self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.ssh_conn.load_system_host_keys() + self.ssh_conn.connect(self.dhcp_params["host"], port=self.dhcp_params.get("port",22), + username=self.dhcp_params["user"], password=self.dhcp_params.get("password"), pkey=self.dhcp_params.get("key"), + timeout=2) + except paramiko.ssh_exception.SSHException as e: + text = e.args[0] + print self.name, ": ssh_connect ssh Exception:", text + + def load_mac_from_db(self): + #TODO get macs to follow from the database + print self.name, " load macs from db" + self.db_lock.acquire() + r,c = self.db.get_table(SELECT=('mac','ip_address','nets.uuid as net_id', ), + FROM='ports join nets on ports.net_id=nets.uuid', + WHERE_NOT={'ports.instance_id': None, 'nets.provider': None}) + self.db_lock.release() + now = time.time() + self.mac_status ={} + if r<0: + print self.name, ": Error getting data from database:", c + return + for port in c: + if port["net_id"] in self.dhcp_nets: + self.mac_status[ port["mac"] ] = {"ip": port["ip_address"], "next_reading": now, "created": now, "retries":0} + + def insert_task(self, task, *aditional): + try: + self.queueLock.acquire() + task = self.taskQueue.put( (task,) + aditional, timeout=5) + self.queueLock.release() + return 1, None + except Queue.Full: + return -1, "timeout inserting a task over host " + self.name + + def run(self): + print self.name, " starting, nets", self.dhcp_nets + next_iteration = time.time() + 10 + while True: + self.load_mac_from_db() + while True: + self.queueLock.acquire() + if not self.taskQueue.empty(): + task = self.taskQueue.get() + else: + task = None + self.queueLock.release() + + if task is None: + now=time.time() + if now >= next_iteration: + next_iteration = self.get_ip_from_dhcp() + else: + time.sleep(1) + continue + + if task[0] == 'add': + print self.name, ": processing task add mac", task[1] + now=time.time() + self.mac_status[task[1] ] = {"ip": None, "next_reading": now, "created": now, "retries":0} + next_iteration = now + elif task[0] == 'del': + print self.name, ": processing task del mac", task[1] + if task[1] in self.mac_status: + del self.mac_status[task[1] ] + elif task[0] == 'exit': + print self.name, ": processing task exit" + self.terminate() + return 0 + else: + print self.name, ": unknown task", task + + def terminate(self): + try: + if self.ssh_conn: + self.ssh_conn.close() + except Exception as e: + text = str(e) + print self.name, ": terminate Exception:", text + print self.name, ": exit from host_thread" + + def get_ip_from_dhcp(self): + + now = time.time() + next_iteration= now + 40000 # >10 hores + + #print self.name, "Iteration" + for mac_address in self.mac_status: + if now < self.mac_status[mac_address]["next_reading"]: + if self.mac_status[mac_address]["next_reading"] < next_iteration: + next_iteration = self.mac_status[mac_address]["next_reading"] + continue + + if self.mac_status[mac_address].get("active") == None: + #check from db if already active + self.db_lock.acquire() + r,c = self.db.get_table(FROM="ports as p join instances as i on p.instance_id=i.uuid", + WHERE={"p.mac": mac_address, "i.status": "ACTIVE"}) + self.db_lock.release() + if r>0: + self.mac_status[mac_address]["active"] = now + self.mac_status[mac_address]["next_reading"] = (int(now)/2 +1)* 2 + print self.name, "mac %s VM ACTIVE" % (mac_address) + self.mac_status[mac_address]["retries"] = 0 + else: + #print self.name, "mac %s VM INACTIVE" % (mac_address) + if now - self.mac_status[mac_address]["created"] > 300: + #modify Database to tell openmano that we can not get dhcp from the machine + if not self.mac_status[mac_address].get("ip"): + self.db_lock.acquire() + r,c = self.db.update_rows("ports", {"ip_address": "0.0.0.0"}, {"mac": mac_address}) + self.db_lock.release() + self.mac_status[mac_address]["ip"] = "0.0.0.0" + print self.name, "mac %s >> set to 0.0.0.0 because of timeout" % (mac_address) + self.mac_status[mac_address]["next_reading"] = (int(now)/60 +1)* 60 + else: + self.mac_status[mac_address]["next_reading"] = (int(now)/6 +1)* 6 + if self.mac_status[mac_address]["next_reading"] < next_iteration: + next_iteration = self.mac_status[mac_address]["next_reading"] + continue + + + if self.test: + if self.mac_status[mac_address]["retries"]>random.randint(10,100): #wait between 10 and 100 seconds to produce a fake IP + content = self.get_fake_ip() + else: + content = None + elif self.dhcp_params["host"]=="localhost": + try: + command = ['get_dhcp_lease.sh', mac_address] + content = subprocess.check_output(command) + except Exception as e: + text = str(e) + print self.name, ": get_ip_from_dhcp subprocess Exception", text + content = None + else: + try: + if not self.ssh_conn: + self.ssh_connect() + command = 'get_dhcp_lease.sh ' + mac_address + (_, stdout, _) = self.ssh_conn.exec_command(command) + content = stdout.read() + except paramiko.ssh_exception.SSHException as e: + text = e.args[0] + print self.name, ": get_ip_from_dhcp: ssh_Exception:", text + content = None + self.ssh_conn = None + except Exception as e: + text = str(e) + print self.name, ": get_ip_from_dhcp: Exception:", text + content = None + self.ssh_conn = None + + if content: + self.mac_status[mac_address]["ip"] = content + #modify Database + self.db_lock.acquire() + r,c = self.db.update_rows("ports", {"ip_address": content}, {"mac": mac_address}) + self.db_lock.release() + if r<0: + print self.name, ": Database update error:", c + else: + self.mac_status[mac_address]["retries"] = 0 + self.mac_status[mac_address]["next_reading"] = (int(now)/3600 +1)* 36000 # 10 hores + if self.mac_status[mac_address]["next_reading"] < next_iteration: + next_iteration = self.mac_status[mac_address]["next_reading"] + print self.name, "mac %s >> %s" % (mac_address, content) + continue + #a fail has happen + self.mac_status[mac_address]["retries"] +=1 + #next iteration is every 2sec at the beginning; every 5sec after a minute, every 1min after a 5min + if now - self.mac_status[mac_address]["active"] > 120: + #modify Database to tell openmano that we can not get dhcp from the machine + if not self.mac_status[mac_address].get("ip"): + self.db_lock.acquire() + r,c = self.db.update_rows("ports", {"ip_address": "0.0.0.0"}, {"mac": mac_address}) + self.db_lock.release() + self.mac_status[mac_address]["ip"] = "0.0.0.0" + print self.name, "mac %s >> set to 0.0.0.0 because of timeout" % (mac_address) + + if now - self.mac_status[mac_address]["active"] > 60: + self.mac_status[mac_address]["next_reading"] = (int(now)/6 +1)* 6 + elif now - self.mac_status[mac_address]["active"] > 300: + self.mac_status[mac_address]["next_reading"] = (int(now)/60 +1)* 60 + else: + self.mac_status[mac_address]["next_reading"] = (int(now)/2 +1)* 2 + + if self.mac_status[mac_address]["next_reading"] < next_iteration: + next_iteration = self.mac_status[mac_address]["next_reading"] + return next_iteration + + def get_fake_ip(self): + fake_ip= "192.168.%d.%d" % (random.randint(1,254), random.randint(1,254) ) + while True: + #check not already provided + already_used = False + for mac_address in self.mac_status: + if self.mac_status[mac_address]["ip"] == fake_ip: + already_used = True + break + if not already_used: + return fake_ip + + +#EXAMPLE of bash script that must be available at the DHCP server for "isc-dhcp-server" type +# $ cat ./get_dhcp_lease.sh +# #!/bin/bash +# awk ' +# ($1=="lease" && $3=="{"){ lease=$2; active="no"; found="no" } +# ($1=="binding" && $2=="state" && $3=="active;"){ active="yes" } +# ($1=="hardware" && $2=="ethernet" && $3==tolower("'$1';")){ found="yes" } +# ($1=="client-hostname"){ name=$2 } +# ($1=="}"){ if (active=="yes" && found=="yes"){ target_lease=lease; target_name=name}} +# END{printf("%s", target_lease)} #print target_name +# ' /var/lib/dhcp/dhcpd.leases + + diff --git a/floodlight.py b/floodlight.py new file mode 100644 index 0000000..eb6b6fb --- /dev/null +++ b/floodlight.py @@ -0,0 +1,441 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' +Implement the plugging for floodligth openflow controller +It creates the class OF_conn to create dataplane connections +with static rules based on packet destination MAC address +''' + +__author__="Pablo Montes, Alfonso Tierno" +__date__ ="$28-oct-2014 12:07:15$" + + +import json +import requests +import logging + +class OF_conn(): + ''' Openflow Connector for Floodlight. + No MAC learning is used + version 0.9 or 1.X is autodetected + version 1.X is in progress, not finished!!! + ''' + def __init__(self, params): + ''' Constructor. + params is a dictionay with the following keys: + of_dpid: DPID to use for this controller + of_ip: controller IP address + of_port: controller TCP port + of_version: version, can be "0.9" or "1.X". By default it is autodetected + of_debug: debug level for logging. Default to ERROR + other keys are ignored + Raise an exception if same parameter is missing or wrong + ''' + #check params + if "of_ip" not in params or params["of_ip"]==None or "of_port" not in params or params["of_port"]==None: + raise ValueError("IP address and port must be provided") + + self.name = "Floodlight" + self.dpid = str(params["of_dpid"]) + self.url = "http://%s:%s" %( str(params["of_ip"]), str(params["of_port"]) ) + + self.pp2ofi={} # From Physical Port to OpenFlow Index + self.ofi2pp={} # From OpenFlow Index to Physical Port + self.headers = {'content-type':'application/json', 'Accept':'application/json'} + self.version= None + self.logger = logging.getLogger('vim.OF.FL') + self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR") ) ) + self._set_version(params.get("of_version") ) + + def _set_version(self, version): + '''set up a version of the controller. + Depending on the version it fills the self.ver_names with the naming used in this version + ''' + #static version names + if version==None: + self.version= None + elif version=="0.9": + self.version= version + self.name = "Floodlightv0.9" + self.ver_names={ + "dpid": "dpid", + "URLmodifier": "staticflowentrypusher", + "destmac": "dst-mac", + "vlanid": "vlan-id", + "inport": "ingress-port", + "setvlan": "set-vlan-id", + "stripvlan": "strip-vlan", + } + elif version[0]=="1" : #version 1.X + self.version= version + self.name = "Floodlightv1.X" + self.ver_names={ + "dpid": "switchDPID", + "URLmodifier": "staticflowpusher", + "destmac": "eth_dst", + "vlanid": "eth_vlan_vid", + "inport": "in_port", + "setvlan": "set_vlan_vid", + "stripvlan": "strip_vlan", + } + else: + raise ValueError("Invalid version for floodlight controller") + + def get_of_switches(self): + ''' Obtain a a list of switches or DPID detected by this controller + Return + >=0, list: list length, and a list where each element a tuple pair (DPID, IP address) + <0, text_error: if fails + ''' + try: + of_response = requests.get(self.url+"/wm/core/controller/switches/json", headers=self.headers) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code != 200: + self.logger.warning("get_of_switches " + error_text) + return -1 , error_text + self.logger.debug("get_of_switches " + error_text) + info = of_response.json() + if type(info) != list and type(info) != tuple: + self.logger.error("get_of_switches. Unexpected response not a list %s", str(type(info))) + return -1, "Unexpected response, not a list. Wrong version?" + if len(info)==0: + return 0, info + #autodiscover version + if self.version == None: + if 'dpid' in info[0] and 'inetAddress' in info[0]: + self._set_version("0.9") + elif 'switchDPID' in info[0] and 'inetAddress' in info[0]: + self._set_version("1.X") + else: + self.logger.error("get_of_switches. Unexpected response, not found 'dpid' or 'switchDPID' field: %s", str(info[0])) + return -1, "Unexpected response, not found 'dpid' or 'switchDPID' field. Wrong version?" + + switch_list=[] + for switch in info: + switch_list.append( (switch[ self.ver_names["dpid"] ], switch['inetAddress']) ) + return len(switch_list), switch_list + except (requests.exceptions.RequestException, ValueError) as e: + #ValueError in the case that JSON can not be decoded + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("get_of_switches " + error_text) + return -1, error_text + + def get_of_rules(self, translate_of_ports=True): + ''' Obtain the rules inserted at openflow controller + Params: + translate_of_ports: if True it translates ports from openflow index to physical switch name + Return: + 0, dict if ok: with the rule name as key and value is another dictionary with the following content: + priority: rule priority + name: rule name (present also as the master dict key) + ingress_port: match input port of the rule + dst_mac: match destination mac address of the rule, can be missing or None if not apply + vlan_id: match vlan tag of the rule, can be missing or None if not apply + actions: list of actions, composed by a pair tuples: + (vlan, None/int): for stripping/setting a vlan tag + (out, port): send to this port + switch: DPID, all + -1, text_error if fails + ''' + + #get translation, autodiscover version + if len(self.ofi2pp) == 0: + r,c = self.obtain_port_correspondence() + if r<0: + return r,c + #get rules + try: + of_response = requests.get(self.url+"/wm/%s/list/%s/json" %(self.ver_names["URLmodifier"], self.dpid), + headers=self.headers) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code != 200: + self.logger.warning("get_of_rules " + error_text) + return -1 , error_text + self.logger.debug("get_of_rules " + error_text) + info = of_response.json() + if type(info) != dict: + self.logger.error("get_of_rules. Unexpected response not a dict %s", str(type(info))) + return -1, "Unexpected response, not a dict. Wrong version?" + rule_dict={} + for switch,switch_info in info.iteritems(): + if switch_info == None: + continue + if str(switch) != self.dpid: + continue + for name,details in switch_info.iteritems(): + rule = {} + rule["switch"] = str(switch) + #rule["active"] = "true" + rule["priority"] = int(details["priority"]) + if self.version[0]=="0": + if translate_of_ports: + rule["ingress_port"] = self.ofi2pp[ details["match"]["inputPort"] ] + else: + rule["ingress_port"] = str(details["match"]["inputPort"]) + dst_mac = details["match"]["dataLayerDestination"] + if dst_mac != "00:00:00:00:00:00": + rule["dst_mac"] = dst_mac + vlan = details["match"]["dataLayerVirtualLan"] + if vlan != -1: + rule["vlan_id"] = vlan + actionlist=[] + for action in details["actions"]: + if action["type"]=="OUTPUT": + if translate_of_ports: + port = self.ofi2pp[ action["port"] ] + else: + port = action["port"] + actionlist.append( ("out", port) ) + elif action["type"]=="STRIP_VLAN": + actionlist.append( ("vlan",None) ) + elif action["type"]=="SET_VLAN_ID": + actionlist.append( ("vlan", action["virtualLanIdentifier"]) ) + else: + actionlist.append( (action["type"], str(action) )) + self.logger.warning("get_of_rules() Unknown action in rule %s: %s", rule["name"], str(action)) + rule["actions"] = actionlist + elif self.version[0]=="1": + if translate_of_ports: + rule["ingress_port"] = self.ofi2pp[ details["match"]["in_port"] ] + else: + rule["ingress_port"] = details["match"]["in_port"] + if "eth_dst" in details["match"]: + dst_mac = details["match"]["eth_dst"] + if dst_mac != "00:00:00:00:00:00": + rule["dst_mac"] = dst_mac + if "eth_vlan_vid" in details["match"]: + vlan = int(details["match"]["eth_vlan_vid"],16) & 0xFFF + rule["vlan_id"] = str(vlan) + actionlist=[] + for action in details["instructions"]["instruction_apply_actions"]: + if action=="output": + if translate_of_ports: + port = self.ofi2pp[ details["instructions"]["instruction_apply_actions"]["output"] ] + else: + port = details["instructions"]["instruction_apply_actions"]["output"] + actionlist.append( ("out",port) ) + elif action=="strip_vlan": + actionlist.append( ("vlan",None) ) + elif action=="set_vlan_vid": + actionlist.append( ("vlan", details["instructions"]["instruction_apply_actions"]["set_vlan_vid"]) ) + else: + self.logger.error("get_of_rules Unknown action in rule %s: %s", rule["name"], str(action)) + #actionlist.append( (action, str(details["instructions"]["instruction_apply_actions"]) )) + rule_dict[str(name)] = rule + return 0, rule_dict + except (requests.exceptions.RequestException, ValueError) as e: + #ValueError in the case that JSON can not be decoded + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("get_of_rules " + error_text) + return -1, error_text + + def obtain_port_correspondence(self): + '''Obtain the correspondence between physical and openflow port names + return: + 0, dictionary: with physical name as key, openflow name as value + -1, error_text: if fails + ''' + try: + of_response = requests.get(self.url+"/wm/core/controller/switches/json", headers=self.headers) + #print vim_response.status_code + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code != 200: + self.logger.warning("obtain_port_correspondence " + error_text) + return -1 , error_text + self.logger.debug("obtain_port_correspondence " + error_text) + info = of_response.json() + + if type(info) != list and type(info) != tuple: + return -1, "unexpected openflow response, not a list. Wrong version?" + + index = -1 + if len(info)>0: + #autodiscover version + if self.version == None: + if 'dpid' in info[0] and 'ports' in info[0]: + self._set_version("0.9") + elif 'switchDPID' in info[0]: + self._set_version("1.X") + else: + return -1, "unexpected openflow response, Wrong version?" + + for i in range(0,len(info)): + if info[i][ self.ver_names["dpid"] ] == self.dpid: + index = i + break + if index == -1: + text = "DPID '"+self.dpid+"' not present in controller "+self.url + #print self.name, ": get_of_controller_info ERROR", text + return -1, text + else: + if self.version[0]=="0": + ports = info[index]["ports"] + else: #version 1.X + of_response = requests.get(self.url+"/wm/core/switch/%s/port-desc/json" %self.dpid, headers=self.headers) + #print vim_response.status_code + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code != 200: + self.logger.warning("obtain_port_correspondence " + error_text) + return -1 , error_text + self.logger.debug("obtain_port_correspondence " + error_text) + info = of_response.json() + if type(info) != dict: + return -1, "unexpected openflow port-desc response, not a dict. Wrong version?" + if "portDesc" not in info: + return -1, "unexpected openflow port-desc response, 'portDesc' not found. Wrong version?" + if type(info["portDesc"]) != list and type(info["portDesc"]) != tuple: + return -1, "unexpected openflow port-desc response at 'portDesc', not a list. Wrong version?" + ports = info["portDesc"] + for port in ports: + self.pp2ofi[ str(port["name"]) ] = str(port["portNumber"] ) + self.ofi2pp[ port["portNumber"]] = str(port["name"]) + #print self.name, ": get_of_controller_info ports:", self.pp2ofi + return 0, self.pp2ofi + except (requests.exceptions.RequestException, ValueError) as e: + #ValueError in the case that JSON can not be decoded + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("obtain_port_correspondence " + error_text) + return -1, error_text + + def del_flow(self, flow_name): + ''' Delete an existing rule + Params: flow_name, this is the rule name + Return + 0, None if ok + -1, text_error if fails + ''' + #autodiscover version + if self.version == None: + r,c = self.get_of_switches() + if r<0: + return r,c + elif r==0: + return -1, "No dpid found " + try: + of_response = requests.delete(self.url+"/wm/%s/json" % self.ver_names["URLmodifier"], + headers=self.headers, data='{"switch":"%s","name":"%s"}' %(self.dpid, flow_name) + ) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code != 200: + self.logger.warning("del_flow " + error_text) + return -1 , error_text + self.logger.debug("del_flow OK " + error_text) + return 0, None + + except requests.exceptions.RequestException as e: + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("del_flow " + error_text) + return -1, error_text + + def new_flow(self, data): + ''' Insert a new static rule + Params: data: dictionary with the following content: + priority: rule priority + name: rule name + ingress_port: match input port of the rule + dst_mac: match destination mac address of the rule, missing or None if not apply + vlan_id: match vlan tag of the rule, missing or None if not apply + actions: list of actions, composed by a pair tuples with these posibilities: + ('vlan', None/int): for stripping/setting a vlan tag + ('out', port): send to this port + Return + 0, None if ok + -1, text_error if fails + ''' + #get translation, autodiscover version + if len(self.pp2ofi) == 0: + r,c = self.obtain_port_correspondence() + if r<0: + return r,c + try: + #We have to build the data for the floodlight call from the generic data + sdata = {'active': "true", "name":data["name"]} + if data.get("priority"): + sdata["priority"] = str(data["priority"]) + if data.get("vlan_id"): + sdata[ self.ver_names["vlanid"] ] = data["vlan_id"] + if data.get("dst_mac"): + sdata[ self.ver_names["destmac"] ] = data["dst_mac"] + sdata['switch'] = self.dpid + if not data['ingress_port'] in self.pp2ofi: + error_text = 'Error. Port '+data['ingress_port']+' is not present in the switch' + self.logger.warning("new_flow " + error_text) + return -1, error_text + + sdata[ self.ver_names["inport"] ] = self.pp2ofi[data['ingress_port']] + sdata['actions'] = "" + + for action in data['actions']: + if len(sdata['actions']) > 0: + sdata['actions'] += ',' + if action[0] == "vlan": + if action[1]==None: + sdata['actions'] += self.ver_names["stripvlan"] + else: + sdata['actions'] += self.ver_names["setvlan"] + "=" + str(action[1]) + elif action[0] == 'out': + sdata['actions'] += "output=" + self.pp2ofi[ action[1] ] + + + of_response = requests.post(self.url+"/wm/%s/json" % self.ver_names["URLmodifier"], + headers=self.headers, data=json.dumps(sdata) ) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code != 200: + self.logger.warning("new_flow " + error_text) + return -1 , error_text + self.logger.debug("new_flow OK" + error_text) + return 0, None + + except requests.exceptions.RequestException as e: + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("new_flow " + error_text) + return -1, error_text + + def clear_all_flows(self): + ''' Delete all existing rules + Return: + 0, None if ok + -1, text_error if fails + ''' + #autodiscover version + if self.version == None: + r,c = self.get_of_switches() + if r<0: + return r,c + elif r==0: #empty + return 0, None + try: + url = self.url+"/wm/%s/clear/%s/json" % (self.ver_names["URLmodifier"], self.dpid) + of_response = requests.get(url ) + error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + if of_response.status_code < 200 or of_response.status_code >= 300: + self.logger.warning("clear_all_flows " + error_text) + return -1 , error_text + self.logger.debug("clear_all_flows OK " + error_text) + return 0, None + except requests.exceptions.RequestException as e: + error_text = type(e).__name__ + ": " + str(e) + self.logger.error("clear_all_flows " + error_text) + return -1, error_text diff --git a/host_thread.py b/host_thread.py new file mode 100644 index 0000000..0aba077 --- /dev/null +++ b/host_thread.py @@ -0,0 +1,1692 @@ +# -*- coding: utf-8 -*- + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' +This is thread that interact with the host and the libvirt to manage VM +One thread will be launched per host +''' +__author__="Pablo Montes, Alfonso Tierno" +__date__ ="$10-jul-2014 12:07:15$" + + +import json +import yaml +import threading +import time +import Queue +import paramiko +from jsonschema import validate as js_v, exceptions as js_e +import libvirt +from vim_schema import localinfo_schema, hostinfo_schema +import random +#from logging import Logger +#import utils.auxiliary_functions as af + +#TODO: insert a logging system + +class host_thread(threading.Thread): + def __init__(self, name, host, user, db, db_lock, test, image_path, host_id, version, develop_mode, develop_bridge_iface): + '''Init a thread. + Arguments: + 'id' number of thead + 'name' name of thread + 'host','user': host ip or name to manage and user + 'db', 'db_lock': database class and lock to use it in exclusion + ''' + threading.Thread.__init__(self) + self.name = name + self.host = host + self.user = user + self.db = db + self.db_lock = db_lock + self.test = test + self.develop_mode = develop_mode + self.develop_bridge_iface = develop_bridge_iface + self.image_path = image_path + self.host_id = host_id + self.version = version + + self.xml_level = 0 + #self.pending ={} + + self.server_status = {} #dictionary with pairs server_uuid:server_status + self.pending_terminate_server =[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed + self.next_update_server_status = 0 #time when must be check servers status + + self.hostinfo = None + + self.queueLock = threading.Lock() + self.taskQueue = Queue.Queue(2000) + + def ssh_connect(self): + try: + #Connect SSH + self.ssh_conn = paramiko.SSHClient() + self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.ssh_conn.load_system_host_keys() + self.ssh_conn.connect(self.host, username=self.user, timeout=10) #, None) + except paramiko.ssh_exception.SSHException as e: + text = e.args[0] + print self.name, ": ssh_connect ssh Exception:", text + + def load_localinfo(self): + if not self.test: + try: + #Connect SSH + self.ssh_connect() + + command = 'mkdir -p ' + self.image_path + #print self.name, ': command:', command + (_, stdout, stderr) = self.ssh_conn.exec_command(command) + content = stderr.read() + if len(content) > 0: + print self.name, ': command:', command, "stderr:", content + + command = 'cat ' + self.image_path + '/.openvim.yaml' + #print self.name, ': command:', command + (_, stdout, stderr) = self.ssh_conn.exec_command(command) + content = stdout.read() + if len(content) == 0: + print self.name, ': command:', command, "stderr:", stderr.read() + raise paramiko.ssh_exception.SSHException("Error empty file ") + self.localinfo = yaml.load(content) + js_v(self.localinfo, localinfo_schema) + self.localinfo_dirty=False + if 'server_files' not in self.localinfo: + self.localinfo['server_files'] = {} + print self.name, ': localinfo load from host' + return + + except paramiko.ssh_exception.SSHException as e: + text = e.args[0] + print self.name, ": load_localinfo ssh Exception:", text + except libvirt.libvirtError as e: + text = e.get_error_message() + print self.name, ": load_localinfo libvirt Exception:", text + except yaml.YAMLError as exc: + text = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + text = " at position: (%s:%s)" % (mark.line+1, mark.column+1) + print self.name, ": load_localinfo yaml format Exception", text + except js_e.ValidationError as e: + text = "" + if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'" + print self.name, ": load_localinfo format Exception:", text, e.message + except Exception as e: + text = str(e) + print self.name, ": load_localinfo Exception:", text + + #not loaded, insert a default data and force saving by activating dirty flag + self.localinfo = {'files':{}, 'server_files':{} } + #self.localinfo_dirty=True + self.localinfo_dirty=False + + def load_hostinfo(self): + if self.test: + return; + try: + #Connect SSH + self.ssh_connect() + + + command = 'cat ' + self.image_path + '/hostinfo.yaml' + #print self.name, ': command:', command + (_, stdout, stderr) = self.ssh_conn.exec_command(command) + content = stdout.read() + if len(content) == 0: + print self.name, ': command:', command, "stderr:", stderr.read() + raise paramiko.ssh_exception.SSHException("Error empty file ") + self.hostinfo = yaml.load(content) + js_v(self.hostinfo, hostinfo_schema) + print self.name, ': hostlinfo load from host', self.hostinfo + return + + except paramiko.ssh_exception.SSHException as e: + text = e.args[0] + print self.name, ": load_hostinfo ssh Exception:", text + except libvirt.libvirtError as e: + text = e.get_error_message() + print self.name, ": load_hostinfo libvirt Exception:", text + except yaml.YAMLError as exc: + text = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + text = " at position: (%s:%s)" % (mark.line+1, mark.column+1) + print self.name, ": load_hostinfo yaml format Exception", text + except js_e.ValidationError as e: + text = "" + if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'" + print self.name, ": load_hostinfo format Exception:", text, e.message + except Exception as e: + text = str(e) + print self.name, ": load_hostinfo Exception:", text + + #not loaded, insert a default data + self.hostinfo = None + + def save_localinfo(self, tries=3): + if self.test: + self.localinfo_dirty = False + return + + while tries>=0: + tries-=1 + + try: + command = 'cat > ' + self.image_path + '/.openvim.yaml' + print self.name, ': command:', command + (stdin, _, _) = self.ssh_conn.exec_command(command) + yaml.safe_dump(self.localinfo, stdin, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) + self.localinfo_dirty = False + break #while tries + + except paramiko.ssh_exception.SSHException as e: + text = e.args[0] + print self.name, ": save_localinfo ssh Exception:", text + if "SSH session not active" in text: + self.ssh_connect() + except libvirt.libvirtError as e: + text = e.get_error_message() + print self.name, ": save_localinfo libvirt Exception:", text + except yaml.YAMLError as exc: + text = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + text = " at position: (%s:%s)" % (mark.line+1, mark.column+1) + print self.name, ": save_localinfo yaml format Exception", text + except Exception as e: + text = str(e) + print self.name, ": save_localinfo Exception:", text + + def load_servers_from_db(self): + self.db_lock.acquire() + r,c = self.db.get_table(SELECT=('uuid','status', 'image_id'), FROM='instances', WHERE={'host_id': self.host_id}) + self.db_lock.release() + + self.server_status = {} + if r<0: + print self.name, ": Error getting data from database:", c + return + for server in c: + self.server_status[ server['uuid'] ] = server['status'] + + #convert from old version to new one + if 'inc_files' in self.localinfo and server['uuid'] in self.localinfo['inc_files']: + server_files_dict = {'source file': self.localinfo['inc_files'][ server['uuid'] ] [0], 'file format':'raw' } + if server_files_dict['source file'][-5:] == 'qcow2': + server_files_dict['file format'] = 'qcow2' + + self.localinfo['server_files'][ server['uuid'] ] = { server['image_id'] : server_files_dict } + if 'inc_files' in self.localinfo: + del self.localinfo['inc_files'] + self.localinfo_dirty = True + + def delete_unused_files(self): + '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database + Deletes unused entries at self.loacalinfo and the corresponding local files. + The only reason for this mismatch is the manual deletion of instances (VM) at database + ''' + if self.test: + return + for uuid,images in self.localinfo['server_files'].items(): + if uuid not in self.server_status: + for localfile in images.values(): + try: + print self.name, ": deleting file '%s' of unused server '%s'" %(localfile['source file'], uuid) + self.delete_file(localfile['source file']) + except paramiko.ssh_exception.SSHException as e: + print self.name, ": Exception deleting file '%s': %s" %(localfile['source file'], str(e)) + del self.localinfo['server_files'][uuid] + self.localinfo_dirty = True + + def insert_task(self, task, *aditional): + try: + self.queueLock.acquire() + task = self.taskQueue.put( (task,) + aditional, timeout=5) + self.queueLock.release() + return 1, None + except Queue.Full: + return -1, "timeout inserting a task over host " + self.name + + def run(self): + while True: + self.load_localinfo() + self.load_hostinfo() + self.load_servers_from_db() + self.delete_unused_files() + while True: + self.queueLock.acquire() + if not self.taskQueue.empty(): + task = self.taskQueue.get() + else: + task = None + self.queueLock.release() + + if task is None: + now=time.time() + if self.localinfo_dirty: + self.save_localinfo() + elif self.next_update_server_status < now: + self.update_servers_status() + self.next_update_server_status = now + 5 + elif len(self.pending_terminate_server)>0 and self.pending_terminate_server[0][0]=0: + break + elif task[0] == 'image': + pass + elif task[0] == 'exit': + print self.name, ": processing task exit" + self.terminate() + return 0 + elif task[0] == 'reload': + print self.name, ": processing task reload terminating and relaunching" + self.terminate() + break + elif task[0] == 'edit-iface': + print self.name, ": processing task edit-iface port=%s, old_net=%s, new_net=%s" % (task[1], task[2], task[3]) + self.edit_iface(task[1], task[2], task[3]) + elif task[0] == 'restore-iface': + print self.name, ": processing task restore-iface %s mac=%s" % (task[1], task[2]) + self.restore_iface(task[1], task[2]) + else: + print self.name, ": unknown task", task + + def server_forceoff(self, wait_until_finished=False): + while len(self.pending_terminate_server)>0: + now = time.time() + if self.pending_terminate_server[0][0]>now: + if wait_until_finished: + time.sleep(1) + continue + else: + return + req={'uuid':self.pending_terminate_server[0][1], + 'action':{'terminate':'force'}, + 'status': None + } + self.action_on_server(req) + self.pending_terminate_server.pop(0) + + def terminate(self): + try: + self.server_forceoff(True) + if self.localinfo_dirty: + self.save_localinfo() + if not self.test: + self.ssh_conn.close() + except Exception as e: + text = str(e) + print self.name, ": terminate Exception:", text + print self.name, ": exit from host_thread" + + def get_local_iface_name(self, generic_name): + if self.hostinfo != None and "iface_names" in self.hostinfo and generic_name in self.hostinfo["iface_names"]: + return self.hostinfo["iface_names"][generic_name] + return generic_name + + def create_xml_server(self, server, dev_list, server_metadata={}): + """Function that implements the generation of the VM XML definition. + Additional devices are in dev_list list + The main disk is upon dev_list[0]""" + + #get if operating system is Windows + windows_os = False + os_type = server_metadata.get('os_type', None) + if os_type == None and 'metadata' in dev_list[0]: + os_type = dev_list[0]['metadata'].get('os_type', None) + if os_type != None and os_type.lower() == "windows": + windows_os = True + #get type of hard disk bus + bus_ide = True if windows_os else False + bus = server_metadata.get('bus', None) + if bus == None and 'metadata' in dev_list[0]: + bus = dev_list[0]['metadata'].get('bus', None) + if bus != None: + bus_ide = True if bus=='ide' else False + + self.xml_level = 0 + + text = "" + #get topology + topo = server_metadata.get('topology', None) + if topo == None and 'metadata' in dev_list[0]: + topo = dev_list[0]['metadata'].get('topology', None) + #name + name = server.get('name','') + "_" + server['uuid'] + name = name[:58] #qemu impose a length limit of 59 chars or not start. Using 58 + text += self.inc_tab() + "" + name+ "" + #uuid + text += self.tab() + "" + server['uuid'] + "" + + numa={} + if 'extended' in server and server['extended']!=None and 'numas' in server['extended']: + numa = server['extended']['numas'][0] + #memory + use_huge = False + memory = int(numa.get('memory',0))*1024*1024 #in KiB + if memory==0: + memory = int(server['ram'])*1024; + else: + if not self.develop_mode: + use_huge = True + if memory==0: + return -1, 'No memory assigned to instance' + memory = str(memory) + text += self.tab() + "" +memory+"" + text += self.tab() + "" +memory+ "" + if use_huge: + text += self.tab()+''+ \ + self.inc_tab() + ''+ \ + self.dec_tab()+ '' + + #cpu + use_cpu_pinning=False + vcpus = int(server.get("vcpus",0)) + cpu_pinning = [] + if 'cores-source' in numa: + use_cpu_pinning=True + for index in range(0, len(numa['cores-source'])): + cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] ) + vcpus += 1 + if 'threads-source' in numa: + use_cpu_pinning=True + for index in range(0, len(numa['threads-source'])): + cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] ) + vcpus += 1 + if 'paired-threads-source' in numa: + use_cpu_pinning=True + for index in range(0, len(numa['paired-threads-source'])): + cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] ) + cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] ) + vcpus += 2 + + if use_cpu_pinning and not self.develop_mode: + text += self.tab()+"" +str(len(cpu_pinning)) +"" + \ + self.tab()+'' + self.xml_level += 1 + for i in range(0, len(cpu_pinning)): + text += self.tab() + "" + text += self.dec_tab()+''+ \ + self.tab() + '' +\ + self.inc_tab() + "" +\ + self.dec_tab() + '' + else: + if vcpus==0: + return -1, "Instance without number of cpus" + text += self.tab()+"" + str(vcpus) + "" + + #boot + boot_cdrom = False + for dev in dev_list: + if dev['type']=='cdrom' : + boot_cdrom = True + break + text += self.tab()+ '' + \ + self.inc_tab() + "hvm" + if boot_cdrom: + text += self.tab() + "" + text += self.tab() + "" + \ + self.dec_tab()+'' + #features + text += self.tab()+''+\ + self.inc_tab()+'' +\ + self.tab()+'' +\ + self.tab()+''+ \ + self.dec_tab() +'' + if windows_os or topo=="oneSocket": + text += self.tab() + " "% vcpus + else: + text += self.tab() + "" + text += self.tab() + "" +\ + self.tab() + "preserve" + \ + self.tab() + "restart" + \ + self.tab() + "restart" + text += self.tab() + "" + \ + self.inc_tab() + "/usr/libexec/qemu-kvm" + \ + self.tab() + "" +\ + self.inc_tab() + "" + \ + self.dec_tab() + "" +\ + self.tab() + "" + \ + self.inc_tab()+ "" + \ + self.dec_tab()+'' + if windows_os: + text += self.tab() + "" + \ + self.tab() + "" + \ + self.tab() + "" + \ + self.tab() + "" + \ + self.tab() + "" + \ + self.tab() + "" + \ + self.tab() + "" #TODO revisar + +#> self.tab()+'\n' +\ +#> self.dec_tab()+'\n' +\ +#> self.tab()+'\n' + if windows_os: + text += self.tab() + "" + else: + #If image contains 'GRAPH' include graphics + #if 'GRAPH' in image: + text += self.tab() + "" +\ + self.inc_tab() + "" +\ + self.dec_tab() + "" + + vd_index = 'a' + for dev in dev_list: + bus_ide_dev = bus_ide + if dev['type']=='cdrom' or dev['type']=='disk': + if dev['type']=='cdrom': + bus_ide_dev = True + text += self.tab() + "" + if 'file format' in dev: + text += self.inc_tab() + "" + if 'source file' in dev: + text += self.tab() + "" + #elif v['type'] == 'block': + # text += self.tab() + "" + #else: + # return -1, 'Unknown disk type ' + v['type'] + vpci = dev.get('vpci',None) + if vpci == None: + vpci = dev['metadata'].get('vpci',None) + text += self.pci2xml(vpci) + + if bus_ide_dev: + text += self.tab() + "" #TODO allows several type of disks + else: + text += self.tab() + "" + text += self.dec_tab() + '' + vd_index = chr(ord(vd_index)+1) + elif dev['type']=='xml': + dev_text = dev['xml'] + if 'vpci' in dev: + dev_text = dev_text.replace('__vpci__', dev['vpci']) + if 'source file' in dev: + dev_text = dev_text.replace('__file__', dev['source file']) + if 'file format' in dev: + dev_text = dev_text.replace('__format__', dev['source file']) + if '__dev__' in dev_text: + dev_text = dev_text.replace('__dev__', vd_index) + vd_index = chr(ord(vd_index)+1) + text += dev_text + else: + return -1, 'Unknown device type ' + dev['type'] + + net_nb=0 + bridge_interfaces = server.get('networks', []) + for v in bridge_interfaces: + #Get the brifge name + self.db_lock.acquire() + result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} ) + self.db_lock.release() + if result <= 0: + print "create_xml_server ERROR getting nets",result, content + return -1, content + #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM + #I know it is not secure + #for v in sorted(desc['network interfaces'].itervalues()): + model = v.get("model", None) + if content[0]['provider']=='default': + text += self.tab() + "" + \ + self.inc_tab() + "" + elif content[0]['provider'][0:7]=='macvtap': + text += self.tab()+"" + \ + self.inc_tab() + "" + \ + self.tab() + "" + if windows_os: + text += self.tab() + "" + elif model==None: + model = "virtio" + elif content[0]['provider'][0:6]=='bridge': + text += self.tab() + "" + \ + self.inc_tab()+"" + if windows_os: + text += self.tab() + "" +\ + self.tab() + "" + elif model==None: + model = "virtio" + else: + return -1, 'Unknown Bridge net provider ' + content[0]['provider'] + if model!=None: + text += self.tab() + "" + if v.get('mac_address', None) != None: + text+= self.tab() +"" + text += self.pci2xml(v.get('vpci',None)) + text += self.dec_tab()+'' + + net_nb += 1 + + interfaces = numa.get('interfaces', []) + + net_nb=0 + for v in interfaces: + if self.develop_mode: #map these interfaces to bridges + text += self.tab() + "" + \ + self.inc_tab()+"" + if windows_os: + text += self.tab() + "" +\ + self.tab() + "" + else: + text += self.tab() + "" #e1000 is more probable to be supported than 'virtio' + if v.get('mac_address', None) != None: + text+= self.tab() +"" + text += self.pci2xml(v.get('vpci',None)) + text += self.dec_tab()+'' + continue + + if v['dedicated'] == 'yes': #passthrought + text += self.tab() + "" + \ + self.inc_tab() + "" + self.inc_tab() + text += self.pci2xml(v['source']) + text += self.dec_tab()+'' + text += self.pci2xml(v.get('vpci',None)) + if windows_os: + text += self.tab() + "" + text += self.dec_tab()+'' + net_nb += 1 + else: #sriov_interfaces + #skip not connected interfaces + if v.get("net_id") == None: + continue + text += self.tab() + "" + self.inc_tab() + if v.get('mac_address', None) != None: + text+= self.tab() + "" + text+= self.tab()+'' + self.inc_tab() + text += self.pci2xml(v['source']) + text += self.dec_tab()+'' + if v.get('vlan',None) != None: + text += self.tab() + " " + text += self.pci2xml(v.get('vpci',None)) + if windows_os: + text += self.tab() + "" + text += self.dec_tab()+'' + + + text += self.dec_tab()+''+\ + self.dec_tab()+'' + return 0, text + + def pci2xml(self, pci): + '''from a pci format text XXXX:XX:XX.X generates the xml content of
+ alows an empty pci text''' + if pci is None: + return "" + first_part = pci.split(':') + second_part = first_part[2].split('.') + return self.tab() + "
" + + def tab(self): + """Return indentation according to xml_level""" + return "\n" + (' '*self.xml_level) + + def inc_tab(self): + """Increment and return indentation according to xml_level""" + self.xml_level += 1 + return self.tab() + + def dec_tab(self): + """Decrement and return indentation according to xml_level""" + self.xml_level -= 1 + return self.tab() + + def get_file_info(self, path): + command = 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path + print self.name, ': command:', command + (_, stdout, _) = self.ssh_conn.exec_command(command) + content = stdout.read() + if len(content) == 0: + return None # file does not exist + else: + return content.split(" ") #(permission, 1, owner, group, size, date, file) + + def qemu_get_info(self, path): + command = 'qemu-img info ' + path + print self.name, ': command:', command + (_, stdout, stderr) = self.ssh_conn.exec_command(command) + content = stdout.read() + if len(content) == 0: + error = stderr.read() + print self.name, ": get_qemu_info error ", error + raise paramiko.ssh_exception.SSHException("Error getting qemu_info: " + error) + else: + try: + return yaml.load(content) + except yaml.YAMLError as exc: + text = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + text = " at position: (%s:%s)" % (mark.line+1, mark.column+1) + print self.name, ": get_qemu_info yaml format Exception", text + raise paramiko.ssh_exception.SSHException("Error getting qemu_info yaml format" + text) + + def qemu_change_backing(self, inc_file, new_backing_file): + command = 'qemu-img rebase -u -b ' + new_backing_file + ' ' + inc_file + print self.name, ': command:', command + (_, _, stderr) = self.ssh_conn.exec_command(command) + content = stderr.read() + if len(content) == 0: + return 0 + else: + print self.name, ": qemu_change_backing error: ", content + return -1 + + def get_notused_filename(self, proposed_name, suffix=''): + '''Look for a non existing file_name in the host + proposed_name: proposed file name, includes path + suffix: suffix to be added to the name, before the extention + ''' + extension = proposed_name.rfind(".") + slash = proposed_name.rfind("/") + if extension < 0 or extension < slash: # no extension + extension = len(proposed_name) + target_name = proposed_name[:extension] + suffix + proposed_name[extension:] + info = self.get_file_info(target_name) + if info is None: + return target_name + + index=0 + while info is not None: + target_name = proposed_name[:extension] + suffix + "-" + str(index) + proposed_name[extension:] + index+=1 + info = self.get_file_info(target_name) + return target_name + + def get_notused_path(self, proposed_path, suffix=''): + '''Look for a non existing path at database for images + proposed_path: proposed file name, includes path + suffix: suffix to be added to the name, before the extention + ''' + extension = proposed_path.rfind(".") + if extension < 0: + extension = len(proposed_path) + if suffix != None: + target_path = proposed_path[:extension] + suffix + proposed_path[extension:] + index=0 + while True: + r,_=self.db.get_table(FROM="images",WHERE={"path":target_path}) + if r<=0: + return target_path + target_path = proposed_path[:extension] + suffix + "-" + str(index) + proposed_path[extension:] + index+=1 + + + def delete_file(self, file_name): + command = 'rm -f '+file_name + print self.name, ': command:', command + (_, _, stderr) = self.ssh_conn.exec_command(command) + error_msg = stderr.read() + if len(error_msg) > 0: + raise paramiko.ssh_exception.SSHException("Error deleting file: " + error_msg) + + def copy_file(self, source, destination, perserve_time=True): + command = 'cp --no-preserve=mode ' + if perserve_time: command += '--preserve=timestamps ' + command += source + ' ' + destination + print self.name, ': command:', command + (_, _, stderr) = self.ssh_conn.exec_command(command) + error_msg = stderr.read() + if len(error_msg) > 0: + raise paramiko.ssh_exception.SSHException("Error copying image to local host: " + error_msg) + + def copy_remote_file(self, remote_file, use_incremental): + ''' Copy a file from the repository to local folder and recursively + copy the backing files in case the remote file is incremental + Read and/or modified self.localinfo['files'] that contain the + unmodified copies of images in the local path + params: + remote_file: path of remote file + use_incremental: None (leave the decision to this function), True, False + return: + local_file: name of local file + qemu_info: dict with quemu information of local file + use_incremental_out: True, False; same as use_incremental, but if None a decision is taken + ''' + + use_incremental_out = use_incremental + new_backing_file = None + local_file = None + + #in case incremental use is not decided, take the decision depending on the image + #avoid the use of incremental if this image is already incremental + qemu_remote_info = self.qemu_get_info(remote_file) + if use_incremental_out==None: + use_incremental_out = not 'backing file' in qemu_remote_info + #copy recursivelly the backing files + if 'backing file' in qemu_remote_info: + new_backing_file, _, _ = self.copy_remote_file(qemu_remote_info['backing file'], True) + + #check if remote file is present locally + if use_incremental_out and remote_file in self.localinfo['files']: + local_file = self.localinfo['files'][remote_file] + local_file_info = self.get_file_info(local_file) + remote_file_info = self.get_file_info(remote_file) + if local_file_info == None: + local_file = None + elif local_file_info[4]!=remote_file_info[4] or local_file_info[5]!=remote_file_info[5]: + #local copy of file not valid because date or size are different. + #TODO DELETE local file if this file is not used by any active virtual machine + try: + self.delete_file(local_file) + del self.localinfo['files'][remote_file] + except Exception: + pass + local_file = None + else: #check that the local file has the same backing file, or there are not backing at all + qemu_info = self.qemu_get_info(local_file) + if new_backing_file != qemu_info.get('backing file'): + local_file = None + + + if local_file == None: #copy the file + img_name= remote_file.split('/') [-1] + img_local = self.image_path + '/' + img_name + local_file = self.get_notused_filename(img_local) + self.copy_file(remote_file, local_file, use_incremental_out) + + if use_incremental_out: + self.localinfo['files'][remote_file] = local_file + if new_backing_file: + self.qemu_change_backing(local_file, new_backing_file) + qemu_info = self.qemu_get_info(local_file) + + return local_file, qemu_info, use_incremental_out + + def launch_server(self, conn, server, rebuild=False, domain=None): + if self.test: + time.sleep(random.randint(20,150)) #sleep random timeto be make it a bit more real + return 0, 'Success' + + server_id = server['uuid'] + paused = server.get('paused','no') + try: + if domain!=None and rebuild==False: + domain.resume() + #self.server_status[server_id] = 'ACTIVE' + return 0, 'Success' + + self.db_lock.acquire() + result, server_data = self.db.get_instance(server_id) + self.db_lock.release() + if result <= 0: + print self.name, ": launch_server ERROR getting server from DB",result, server_data + return result, server_data + + #0: get image metadata + server_metadata = server.get('metadata', {}) + use_incremental = None + + if "use_incremental" in server_metadata: + use_incremental = False if server_metadata["use_incremental"]=="no" else True + + server_host_files = self.localinfo['server_files'].get( server['uuid'], {}) + if rebuild: + #delete previous incremental files + for file_ in server_host_files.values(): + self.delete_file(file_['source file'] ) + server_host_files={} + + #1: obtain aditional devices (disks) + #Put as first device the main disk + devices = [ {"type":"disk", "image_id":server['image_id'], "vpci":server_metadata.get('vpci', None) } ] + if 'extended' in server_data and server_data['extended']!=None and "devices" in server_data['extended']: + devices += server_data['extended']['devices'] + + for dev in devices: + if dev['image_id'] == None: + continue + + self.db_lock.acquire() + result, content = self.db.get_table(FROM='images', SELECT=('path','metadata'),WHERE={'uuid':dev['image_id']} ) + self.db_lock.release() + if result <= 0: + error_text = "ERROR", result, content, "when getting image", dev['image_id'] + print self.name, ": launch_server", error_text + return -1, error_text + if content[0]['metadata'] is not None: + dev['metadata'] = json.loads(content[0]['metadata']) + else: + dev['metadata'] = {} + + if dev['image_id'] in server_host_files: + dev['source file'] = server_host_files[ dev['image_id'] ] ['source file'] #local path + dev['file format'] = server_host_files[ dev['image_id'] ] ['file format'] # raw or qcow2 + continue + + #2: copy image to host + remote_file = content[0]['path'] + use_incremental_image = use_incremental + if dev['metadata'].get("use_incremental") == "no": + use_incremental_image = False + local_file, qemu_info, use_incremental_image = self.copy_remote_file(remote_file, use_incremental_image) + + #create incremental image + if use_incremental_image: + local_file_inc = self.get_notused_filename(local_file, '.inc') + command = 'qemu-img create -f qcow2 '+local_file_inc+ ' -o backing_file='+ local_file + print 'command:', command + (_, _, stderr) = self.ssh_conn.exec_command(command) + error_msg = stderr.read() + if len(error_msg) > 0: + raise paramiko.ssh_exception.SSHException("Error creating incremental file: " + error_msg) + local_file = local_file_inc + qemu_info = {'file format':'qcow2'} + + server_host_files[ dev['image_id'] ] = {'source file': local_file, 'file format': qemu_info['file format']} + + dev['source file'] = local_file + dev['file format'] = qemu_info['file format'] + + self.localinfo['server_files'][ server['uuid'] ] = server_host_files + self.localinfo_dirty = True + + #3 Create XML + result, xml = self.create_xml_server(server_data, devices, server_metadata) #local_file + if result <0: + print self.name, ": create xml server error:", xml + return -2, xml + print self.name, ": create xml:", xml + atribute = libvirt.VIR_DOMAIN_START_PAUSED if paused == "yes" else 0 + #4 Start the domain + if not rebuild: #ensures that any pending destroying server is done + self.server_forceoff(True) + #print self.name, ": launching instance" #, xml + conn.createXML(xml, atribute) + #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE' + + return 0, 'Success' + + except paramiko.ssh_exception.SSHException as e: + text = e.args[0] + print self.name, ": launch_server(%s) ssh Exception: %s" %(server_id, text) + if "SSH session not active" in text: + self.ssh_connect() + except libvirt.libvirtError as e: + text = e.get_error_message() + print self.name, ": launch_server(%s) libvirt Exception: %s" %(server_id, text) + except Exception as e: + text = str(e) + print self.name, ": launch_server(%s) Exception: %s" %(server_id, text) + return -1, text + + def update_servers_status(self): + # # virDomainState + # VIR_DOMAIN_NOSTATE = 0 + # VIR_DOMAIN_RUNNING = 1 + # VIR_DOMAIN_BLOCKED = 2 + # VIR_DOMAIN_PAUSED = 3 + # VIR_DOMAIN_SHUTDOWN = 4 + # VIR_DOMAIN_SHUTOFF = 5 + # VIR_DOMAIN_CRASHED = 6 + # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended + + if self.test or len(self.server_status)==0: + return + + try: + conn = libvirt.open("qemu+ssh://"+self.user+"@"+self.host+"/system") + domains= conn.listAllDomains() + domain_dict={} + for domain in domains: + uuid = domain.UUIDString() ; + libvirt_status = domain.state() + #print libvirt_status + if libvirt_status[0] == libvirt.VIR_DOMAIN_RUNNING or libvirt_status[0] == libvirt.VIR_DOMAIN_SHUTDOWN: + new_status = "ACTIVE" + elif libvirt_status[0] == libvirt.VIR_DOMAIN_PAUSED: + new_status = "PAUSED" + elif libvirt_status[0] == libvirt.VIR_DOMAIN_SHUTOFF: + new_status = "INACTIVE" + elif libvirt_status[0] == libvirt.VIR_DOMAIN_CRASHED: + new_status = "ERROR" + else: + new_status = None + domain_dict[uuid] = new_status + conn.close + except libvirt.libvirtError as e: + print self.name, ": get_state() Exception '", e.get_error_message() + return + + for server_id, current_status in self.server_status.iteritems(): + new_status = None + if server_id in domain_dict: + new_status = domain_dict[server_id] + else: + new_status = "INACTIVE" + + if new_status == None or new_status == current_status: + continue + if new_status == 'INACTIVE' and current_status == 'ERROR': + continue #keep ERROR status, because obviously this machine is not running + #change status + print self.name, ": server ", server_id, "status change from ", current_status, "to", new_status + STATUS={'progress':100, 'status':new_status} + if new_status == 'ERROR': + STATUS['last_error'] = 'machine has crashed' + self.db_lock.acquire() + r,_ = self.db.update_rows('instances', STATUS, {'uuid':server_id}, log=False) + self.db_lock.release() + if r>=0: + self.server_status[server_id] = new_status + + def action_on_server(self, req, last_retry=True): + '''Perform an action on a req + Attributes: + req: dictionary that contain: + server properties: 'uuid','name','tenant_id','status' + action: 'action' + host properties: 'user', 'ip_name' + return (error, text) + 0: No error. VM is updated to new state, + -1: Invalid action, as trying to pause a PAUSED VM + -2: Error accessing host + -3: VM nor present + -4: Error at DB access + -5: Error while trying to perform action. VM is updated to ERROR + ''' + server_id = req['uuid'] + conn = None + new_status = None + old_status = req['status'] + last_error = None + + if self.test: + if 'terminate' in req['action']: + new_status = 'deleted' + elif 'shutoff' in req['action'] or 'shutdown' in req['action'] or 'forceOff' in req['action']: + if req['status']!='ERROR': + time.sleep(5) + new_status = 'INACTIVE' + elif 'start' in req['action'] and req['status']!='ERROR': new_status = 'ACTIVE' + elif 'resume' in req['action'] and req['status']!='ERROR' and req['status']!='INACTIVE' : new_status = 'ACTIVE' + elif 'pause' in req['action'] and req['status']!='ERROR': new_status = 'PAUSED' + elif 'reboot' in req['action'] and req['status']!='ERROR': new_status = 'ACTIVE' + elif 'rebuild' in req['action']: + time.sleep(random.randint(20,150)) + new_status = 'ACTIVE' + elif 'createImage' in req['action']: + time.sleep(5) + self.create_image(None, req) + else: + try: + conn = libvirt.open("qemu+ssh://"+self.user+"@"+self.host+"/system") + try: + dom = conn.lookupByUUIDString(server_id) + except libvirt.libvirtError as e: + text = e.get_error_message() + if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text: + dom = None + else: + print self.name, ": action_on_server(",server_id,") libvirt exception:", text + raise e + + if 'forceOff' in req['action']: + if dom == None: + print self.name, ": action_on_server(",server_id,") domain not running" + else: + try: + print self.name, ": sending DESTROY to server", server_id + dom.destroy() + except Exception as e: + if "domain is not running" not in e.get_error_message(): + print self.name, ": action_on_server(",server_id,") Exception while sending force off:", e.get_error_message() + last_error = 'action_on_server Exception while destroy: ' + e.get_error_message() + new_status = 'ERROR' + + elif 'terminate' in req['action']: + if dom == None: + print self.name, ": action_on_server(",server_id,") domain not running" + new_status = 'deleted' + else: + try: + if req['action']['terminate'] == 'force': + print self.name, ": sending DESTROY to server", server_id + dom.destroy() + new_status = 'deleted' + else: + print self.name, ": sending SHUTDOWN to server", server_id + dom.shutdown() + self.pending_terminate_server.append( (time.time()+10,server_id) ) + except Exception as e: + print self.name, ": action_on_server(",server_id,") Exception while destroy:", e.get_error_message() + last_error = 'action_on_server Exception while destroy: ' + e.get_error_message() + new_status = 'ERROR' + if "domain is not running" in e.get_error_message(): + try: + dom.undefine() + new_status = 'deleted' + except Exception: + print self.name, ": action_on_server(",server_id,") Exception while undefine:", e.get_error_message() + last_error = 'action_on_server Exception2 while undefine:', e.get_error_message() + #Exception: 'virDomainDetachDevice() failed' + if new_status=='deleted': + if server_id in self.server_status: + del self.server_status[server_id] + if req['uuid'] in self.localinfo['server_files']: + for file_ in self.localinfo['server_files'][ req['uuid'] ].values(): + try: + self.delete_file(file_['source file']) + except Exception: + pass + del self.localinfo['server_files'][ req['uuid'] ] + self.localinfo_dirty = True + + elif 'shutoff' in req['action'] or 'shutdown' in req['action']: + try: + if dom == None: + print self.name, ": action_on_server(",server_id,") domain not running" + else: + dom.shutdown() +# new_status = 'INACTIVE' + #TODO: check status for changing at database + except Exception as e: + new_status = 'ERROR' + print self.name, ": action_on_server(",server_id,") Exception while shutdown:", e.get_error_message() + last_error = 'action_on_server Exception while shutdown: ' + e.get_error_message() + + elif 'rebuild' in req['action']: + if dom != None: + dom.destroy() + r = self.launch_server(conn, req, True, None) + if r[0] <0: + new_status = 'ERROR' + last_error = r[1] + else: + new_status = 'ACTIVE' + elif 'start' in req['action']: + #La instancia está sólo en la base de datos pero no en la libvirt. es necesario crearla + rebuild = True if req['action']['start']=='rebuild' else False + r = self.launch_server(conn, req, rebuild, dom) + if r[0] <0: + new_status = 'ERROR' + last_error = r[1] + else: + new_status = 'ACTIVE' + + elif 'resume' in req['action']: + try: + if dom == None: + pass + else: + dom.resume() +# new_status = 'ACTIVE' + except Exception as e: + print self.name, ": action_on_server(",server_id,") Exception while resume:", e.get_error_message() + + elif 'pause' in req['action']: + try: + if dom == None: + pass + else: + dom.suspend() +# new_status = 'PAUSED' + except Exception as e: + print self.name, ": action_on_server(",server_id,") Exception while pause:", e.get_error_message() + + elif 'reboot' in req['action']: + try: + if dom == None: + pass + else: + dom.reboot() + print self.name, ": action_on_server(",server_id,") reboot:" + #new_status = 'ACTIVE' + except Exception as e: + print self.name, ": action_on_server(",server_id,") Exception while reboot:", e.get_error_message() + elif 'createImage' in req['action']: + self.create_image(dom, req) + + + conn.close() + except libvirt.libvirtError as e: + if conn is not None: conn.close + text = e.get_error_message() + new_status = "ERROR" + last_error = text + print self.name, ": action_on_server(",server_id,") Exception '", text + if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text: + print self.name, ": action_on_server(",server_id,") Exception removed from host" + #end of if self.test + if new_status == None: + return 1 + + print self.name, ": action_on_server(",server_id,") new status", new_status, last_error + UPDATE = {'progress':100, 'status':new_status} + + if new_status=='ERROR': + if not last_retry: #if there will be another retry do not update database + return -1 + elif 'terminate' in req['action']: + #PUT a log in the database + print self.name, ": PANIC deleting server", server_id, last_error + self.db_lock.acquire() + self.db.new_row('logs', + {'uuid':server_id, 'tenant_id':req['tenant_id'], 'related':'instances','level':'panic', + 'description':'PANIC deleting server from host '+self.name+': '+last_error} + ) + self.db_lock.release() + if server_id in self.server_status: + del self.server_status[server_id] + return -1 + else: + UPDATE['last_error'] = last_error + if new_status != 'deleted' and (new_status != old_status or new_status == 'ERROR') : + self.db_lock.acquire() + self.db.update_rows('instances', UPDATE, {'uuid':server_id}, log=True) + self.server_status[server_id] = new_status + self.db_lock.release() + if new_status == 'ERROR': + return -1 + return 1 + + + def restore_iface(self, name, mac, lib_conn=None): + ''' make an ifdown, ifup to restore default parameter of na interface + Params: + mac: mac address of the interface + lib_conn: connection to the libvirt, if None a new connection is created + Return 0,None if ok, -1,text if fails + ''' + conn=None + ret = 0 + error_text=None + if self.test: + print self.name, ": restore_iface '%s' %s" % (name, mac) + return 0, None + try: + if not lib_conn: + conn = libvirt.open("qemu+ssh://"+self.user+"@"+self.host+"/system") + else: + conn = lib_conn + + #wait to the pending VM deletion + #TODO.Revise self.server_forceoff(True) + + iface = conn.interfaceLookupByMACString(mac) + iface.destroy() + iface.create() + print self.name, ": restore_iface '%s' %s" % (name, mac) + except libvirt.libvirtError as e: + error_text = e.get_error_message() + print self.name, ": restore_iface '%s' '%s' libvirt exception: %s" %(name, mac, error_text) + ret=-1 + finally: + if lib_conn is None and conn is not None: + conn.close + return ret, error_text + + + def create_image(self,dom, req): + if self.test: + if 'path' in req['action']['createImage']: + file_dst = req['action']['createImage']['path'] + else: + createImage=req['action']['createImage'] + img_name= createImage['source']['path'] + index=img_name.rfind('/') + file_dst = self.get_notused_path(img_name[:index+1] + createImage['name'] + '.qcow2') + image_status='ACTIVE' + else: + for retry in (0,1): + try: + server_id = req['uuid'] + createImage=req['action']['createImage'] + file_orig = self.localinfo['server_files'][server_id] [ createImage['source']['image_id'] ] ['source file'] + if 'path' in req['action']['createImage']: + file_dst = req['action']['createImage']['path'] + else: + img_name= createImage['source']['path'] + index=img_name.rfind('/') + file_dst = self.get_notused_filename(img_name[:index+1] + createImage['name'] + '.qcow2') + + self.copy_file(file_orig, file_dst) + qemu_info = self.qemu_get_info(file_orig) + if 'backing file' in qemu_info: + for k,v in self.localinfo['files'].items(): + if v==qemu_info['backing file']: + self.qemu_change_backing(file_dst, k) + break + image_status='ACTIVE' + break + except paramiko.ssh_exception.SSHException as e: + image_status='ERROR' + error_text = e.args[0] + print self.name, "': create_image(",server_id,") ssh Exception:", error_text + if "SSH session not active" in error_text and retry==0: + self.ssh_connect() + except Exception as e: + image_status='ERROR' + error_text = str(e) + print self.name, "': create_image(",server_id,") Exception:", error_text + + #TODO insert a last_error at database + self.db_lock.acquire() + self.db.update_rows('images', {'status':image_status, 'progress': 100, 'path':file_dst}, + {'uuid':req['new_image']['uuid']}, log=True) + self.db_lock.release() + + def edit_iface(self, port_id, old_net, new_net): + #This action imply remove and insert interface to put proper parameters + if self.test: + time.sleep(1) + else: + #get iface details + self.db_lock.acquire() + r,c = self.db.get_table(FROM='ports as p join resources_port as rp on p.uuid=rp.port_id', + WHERE={'port_id': port_id}) + self.db_lock.release() + if r<0: + print self.name, ": edit_iface(",port_id,") DDBB error:", c + return + elif r==0: + print self.name, ": edit_iface(",port_id,") por not found" + return + port=c[0] + if port["model"]!="VF": + print self.name, ": edit_iface(",port_id,") ERROR model must be VF" + return + #create xml detach file + xml=[] + self.xml_level = 2 + xml.append("") + xml.append(" ") + xml.append(" "+ self.pci2xml(port['pci'])+"\n ") + xml.append('') + + + try: + conn=None + conn = libvirt.open("qemu+ssh://"+self.user+"@"+self.host+"/system") + dom = conn.lookupByUUIDString(port["instance_id"]) + if old_net: + text="\n".join(xml) + print self.name, ": edit_iface detaching SRIOV interface", text + dom.detachDeviceFlags(text, flags=libvirt.VIR_DOMAIN_AFFECT_LIVE) + if new_net: + xml[-1] =" " + self.xml_level = 1 + xml.append(self.pci2xml(port.get('vpci',None)) ) + xml.append('') + text="\n".join(xml) + print self.name, ": edit_iface attaching SRIOV interface", text + dom.attachDeviceFlags(text, flags=libvirt.VIR_DOMAIN_AFFECT_LIVE) + + except libvirt.libvirtError as e: + text = e.get_error_message() + print self.name, ": edit_iface(",port["instance_id"],") libvirt exception:", text + + finally: + if conn is not None: conn.close + + +def create_server(server, db, db_lock, only_of_ports): + #print "server" + #print "server" + #print server + #print "server" + #print "server" + #try: +# host_id = server.get('host_id', None) + extended = server.get('extended', None) + +# print '----------------------' +# print json.dumps(extended, indent=4) + + requirements={} + requirements['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]} + requirements['ram'] = server['flavor'].get('ram', 0) + if requirements['ram']== None: + requirements['ram'] = 0 + requirements['vcpus'] = server['flavor'].get('vcpus', 0) + if requirements['vcpus']== None: + requirements['vcpus'] = 0 + #If extended is not defined get requirements from flavor + if extended is None: + #If extended is defined in flavor convert to dictionary and use it + if 'extended' in server['flavor'] and server['flavor']['extended'] != None: + json_acceptable_string = server['flavor']['extended'].replace("'", "\"") + extended = json.loads(json_acceptable_string) + else: + extended = None + #print json.dumps(extended, indent=4) + + #For simplicity only one numa VM are supported in the initial implementation + if extended != None: + numas = extended.get('numas', []) + if len(numas)>1: + return (-2, "Multi-NUMA VMs are not supported yet") + #elif len(numas)<1: + # return (-1, "At least one numa must be specified") + + #a for loop is used in order to be ready to multi-NUMA VMs + request = [] + for numa in numas: + numa_req = {} + numa_req['memory'] = numa.get('memory', 0) + if 'cores' in numa: + numa_req['proc_req_nb'] = numa['cores'] #number of cores or threads to be reserved + numa_req['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved + numa_req['proc_req_list'] = numa.get('cores-id', None) #list of ids to be assigned to the cores or threads + elif 'paired-threads' in numa: + numa_req['proc_req_nb'] = numa['paired-threads'] + numa_req['proc_req_type'] = 'paired-threads' + numa_req['proc_req_list'] = numa.get('paired-threads-id', None) + elif 'threads' in numa: + numa_req['proc_req_nb'] = numa['threads'] + numa_req['proc_req_type'] = 'threads' + numa_req['proc_req_list'] = numa.get('threads-id', None) + else: + numa_req['proc_req_nb'] = 0 # by default + numa_req['proc_req_type'] = 'threads' + + + + #Generate a list of sriov and another for physical interfaces + interfaces = numa.get('interfaces', []) + sriov_list = [] + port_list = [] + for iface in interfaces: + iface['bandwidth'] = int(iface['bandwidth']) + if iface['dedicated'][:3]=='yes': + port_list.append(iface) + else: + sriov_list.append(iface) + + #Save lists ordered from more restrictive to less bw requirements + numa_req['sriov_list'] = sorted(sriov_list, key=lambda k: k['bandwidth'], reverse=True) + numa_req['port_list'] = sorted(port_list, key=lambda k: k['bandwidth'], reverse=True) + + + request.append(numa_req) + + # print "----------\n"+json.dumps(request[0], indent=4) + # print '----------\n\n' + + #Search in db for an appropriate numa for each requested numa + #at the moment multi-NUMA VMs are not supported + if len(request)>0: + requirements['numa'].update(request[0]) + if requirements['numa']['memory']>0: + requirements['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory + elif requirements['ram']==0: + return (-1, "Memory information not set neither at extended field not at ram") + if requirements['numa']['proc_req_nb']>0: + requirements['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus + elif requirements['vcpus']==0: + return (-1, "Processor information not set neither at extended field not at vcpus") + + + db_lock.acquire() + result, content = db.get_numas(requirements, server.get('host_id', None), only_of_ports) + db_lock.release() + + if result == -1: + return (-1, content) + + numa_id = content['numa_id'] + host_id = content['host_id'] + + #obtain threads_id and calculate pinning + cpu_pinning = [] + reserved_threads=[] + if requirements['numa']['proc_req_nb']>0: + db_lock.acquire() + result, content = db.get_table(FROM='resources_core', + SELECT=('id','core_id','thread_id'), + WHERE={'numa_id':numa_id,'instance_id': None, 'status':'ok'} ) + db_lock.release() + if result <= 0: + print content + return -1, content + + #convert rows to a dictionary indexed by core_id + cores_dict = {} + for row in content: + if not row['core_id'] in cores_dict: + cores_dict[row['core_id']] = [] + cores_dict[row['core_id']].append([row['thread_id'],row['id']]) + + #In case full cores are requested + paired = 'N' + if requirements['numa']['proc_req_type'] == 'cores': + #Get/create the list of the vcpu_ids + vcpu_id_list = requirements['numa']['proc_req_list'] + if vcpu_id_list == None: + vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb'])) + + for threads in cores_dict.itervalues(): + #we need full cores + if len(threads) != 2: + continue + + #set pinning for the first thread + cpu_pinning.append( [ vcpu_id_list.pop(0), threads[0][0], threads[0][1] ] ) + + #reserve so it is not used the second thread + reserved_threads.append(threads[1][1]) + + if len(vcpu_id_list) == 0: + break + + #In case paired threads are requested + elif requirements['numa']['proc_req_type'] == 'paired-threads': + paired = 'Y' + #Get/create the list of the vcpu_ids + if requirements['numa']['proc_req_list'] != None: + vcpu_id_list = [] + for pair in requirements['numa']['proc_req_list']: + if len(pair)!=2: + return -1, "Field paired-threads-id not properly specified" + return + vcpu_id_list.append(pair[0]) + vcpu_id_list.append(pair[1]) + else: + vcpu_id_list = range(0,2*int(requirements['numa']['proc_req_nb'])) + + for threads in cores_dict.itervalues(): + #we need full cores + if len(threads) != 2: + continue + #set pinning for the first thread + cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]]) + + #set pinning for the second thread + cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]]) + + if len(vcpu_id_list) == 0: + break + + #In case normal threads are requested + elif requirements['numa']['proc_req_type'] == 'threads': + #Get/create the list of the vcpu_ids + vcpu_id_list = requirements['numa']['proc_req_list'] + if vcpu_id_list == None: + vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb'])) + + for threads_index in sorted(cores_dict, key=lambda k: len(cores_dict[k])): + threads = cores_dict[threads_index] + #set pinning for the first thread + cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]]) + + #if exists, set pinning for the second thread + if len(threads) == 2 and len(vcpu_id_list) != 0: + cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]]) + + if len(vcpu_id_list) == 0: + break + + #Get the source pci addresses for the selected numa + used_sriov_ports = [] + for port in requirements['numa']['sriov_list']: + db_lock.acquire() + result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} ) + db_lock.release() + if result <= 0: + print content + return -1, content + for row in content: + if row['id'] in used_sriov_ports or row['id']==port['port_id']: + continue + port['pci'] = row['pci'] + if 'mac_address' not in port: + port['mac_address'] = row['mac'] + del port['mac'] + port['port_id']=row['id'] + port['Mbps_used'] = port['bandwidth'] + used_sriov_ports.append(row['id']) + break + + for port in requirements['numa']['port_list']: + port['Mbps_used'] = None + if port['dedicated'] != "yes:sriov": + port['mac_address'] = port['mac'] + del port['mac'] + continue + db_lock.acquire() + result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac', 'Mbps'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} ) + db_lock.release() + if result <= 0: + print content + return -1, content + port['Mbps_used'] = content[0]['Mbps'] + for row in content: + if row['id'] in used_sriov_ports or row['id']==port['port_id']: + continue + port['pci'] = row['pci'] + if 'mac_address' not in port: + port['mac_address'] = row['mac'] # mac cannot be set to passthrough ports + del port['mac'] + port['port_id']=row['id'] + used_sriov_ports.append(row['id']) + break + + # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4) + # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4) + + server['host_id'] = host_id + + + #Generate dictionary for saving in db the instance resources + resources = {} + resources['bridged-ifaces'] = [] + + numa_dict = {} + numa_dict['interfaces'] = [] + + numa_dict['interfaces'] += requirements['numa']['port_list'] + numa_dict['interfaces'] += requirements['numa']['sriov_list'] + + #Check bridge information + unified_dataplane_iface=[] + unified_dataplane_iface += requirements['numa']['port_list'] + unified_dataplane_iface += requirements['numa']['sriov_list'] + + for control_iface in server.get('networks', []): + control_iface['net_id']=control_iface.pop('uuid') + #Get the brifge name + db_lock.acquire() + result, content = db.get_table(FROM='nets', SELECT=('name','type', 'vlan'),WHERE={'uuid':control_iface['net_id']} ) + db_lock.release() + if result < 0: + pass + elif result==0: + return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface['net_id'] + else: + network=content[0] + if control_iface.get("type", 'virtual') == 'virtual': + if network['type']!='bridge_data' and network['type']!='bridge_man': + return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface['net_id'] + resources['bridged-ifaces'].append(control_iface) + else: + if network['type']!='data' and network['type']!='ptp': + return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface['net_id'] + #dataplane interface, look for it in the numa tree and asign this network + iface_found=False + for dataplane_iface in numa_dict['interfaces']: + if dataplane_iface['name'] == control_iface.get("name"): + if (dataplane_iface['dedicated'] == "yes" and control_iface["type"] != "PF") or \ + (dataplane_iface['dedicated'] == "no" and control_iface["type"] != "VF") or \ + (dataplane_iface['dedicated'] == "yes:sriov" and control_iface["type"] != "VFnotShared") : + return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \ + (control_iface.get("name"), dataplane_iface['dedicated'], control_iface["type"]) + dataplane_iface['uuid'] = control_iface['net_id'] + if dataplane_iface['dedicated'] == "no": + dataplane_iface['vlan'] = network['vlan'] + if dataplane_iface['dedicated'] != "yes" and control_iface.get("mac_address"): + dataplane_iface['mac_address'] = control_iface.get("mac_address") + if control_iface.get("vpci"): + dataplane_iface['vpci'] = control_iface.get("vpci") + iface_found=True + break + if not iface_found: + return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface.get("name") + + resources['host_id'] = host_id + resources['image_id'] = server['image_id'] + resources['flavor_id'] = server['flavor_id'] + resources['tenant_id'] = server['tenant_id'] + resources['ram'] = requirements['ram'] + resources['vcpus'] = requirements['vcpus'] + resources['status'] = 'CREATING' + + if 'description' in server: resources['description'] = server['description'] + if 'name' in server: resources['name'] = server['name'] + + resources['extended'] = {} #optional + resources['extended']['numas'] = [] + numa_dict['numa_id'] = numa_id + numa_dict['memory'] = requirements['numa']['memory'] + numa_dict['cores'] = [] + + for core in cpu_pinning: + numa_dict['cores'].append({'id': core[2], 'vthread': core[0], 'paired': paired}) + for core in reserved_threads: + numa_dict['cores'].append({'id': core}) + resources['extended']['numas'].append(numa_dict) + if extended!=None and 'devices' in extended: #TODO allow extra devices without numa + resources['extended']['devices'] = extended['devices'] + + + print '===================================={' + print json.dumps(resources, indent=4) + print '====================================}' + + return 0, resources + diff --git a/httpserver.py b/httpserver.py new file mode 100644 index 0000000..2be5164 --- /dev/null +++ b/httpserver.py @@ -0,0 +1,2156 @@ +# -*- coding: utf-8 -*- + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' +This is the thread for the http server North API. +Two thread will be launched, with normal and administrative permissions. +''' + +__author__="Alfonso Tierno" +__date__ ="$10-jul-2014 12:07:15$" + +import bottle +import yaml +import json +import threading +import datetime +from utils import RADclass +from jsonschema import validate as js_v, exceptions as js_e +import host_thread as ht +from vim_schema import host_new_schema, host_edit_schema, tenant_new_schema, \ + tenant_edit_schema, \ + flavor_new_schema, flavor_update_schema, \ + image_new_schema, image_update_schema, \ + server_new_schema, server_action_schema, network_new_schema, network_update_schema, \ + port_new_schema, port_update_schema + +global my +global url_base +global config_dic + +url_base="/openvim" + +HTTP_Bad_Request = 400 +HTTP_Unauthorized = 401 +HTTP_Not_Found = 404 +HTTP_Forbidden = 403 +HTTP_Method_Not_Allowed = 405 +HTTP_Not_Acceptable = 406 +HTTP_Request_Timeout = 408 +HTTP_Conflict = 409 +HTTP_Service_Unavailable = 503 +HTTP_Internal_Server_Error= 500 + + +def check_extended(extended, allow_net_attach=False): + '''Makes and extra checking of extended input that cannot be done using jsonschema + Attributes: + allow_net_attach: for allowing or not the uuid field at interfaces + that are allowed for instance, but not for flavors + Return: (<0, error_text) if error; (0,None) if not error ''' + if "numas" not in extended: return 0, None + id_s=[] + numaid=0 + for numa in extended["numas"]: + nb_formats = 0 + if "cores" in numa: + nb_formats += 1 + if "cores-id" in numa: + if len(numa["cores-id"]) != numa["cores"]: + return -HTTP_Bad_Request, "different number of cores-id (%d) than cores (%d) at numa %d" % (len(numa["cores-id"]), numa["cores"],numaid) + id_s.extend(numa["cores-id"]) + if "threads" in numa: + nb_formats += 1 + if "threads-id" in numa: + if len(numa["threads-id"]) != numa["threads"]: + return -HTTP_Bad_Request, "different number of threads-id (%d) than threads (%d) at numa %d" % (len(numa["threads-id"]), numa["threads"],numaid) + id_s.extend(numa["threads-id"]) + if "paired-threads" in numa: + nb_formats += 1 + if "paired-threads-id" in numa: + if len(numa["paired-threads-id"]) != numa["paired-threads"]: + return -HTTP_Bad_Request, "different number of paired-threads-id (%d) than paired-threads (%d) at numa %d" % (len(numa["paired-threads-id"]), numa["paired-threads"],numaid) + for pair in numa["paired-threads-id"]: + if len(pair) != 2: + return -HTTP_Bad_Request, "paired-threads-id must contain a list of two elements list at numa %d" % (numaid) + id_s.extend(pair) + if nb_formats > 1: + return -HTTP_Service_Unavailable, "only one of cores, threads, paired-threads are allowed in this version at numa %d" % numaid + #check interfaces + if "interfaces" in numa: + ifaceid=0 + names=[] + vpcis=[] + for interface in numa["interfaces"]: + if "uuid" in interface and not allow_net_attach: + return -HTTP_Bad_Request, "uuid field is not allowed at numa %d interface %s position %d" % (numaid, interface.get("name",""), ifaceid ) + if "mac_address" in interface and interface["dedicated"]=="yes": + return -HTTP_Bad_Request, "mac_address can not be set for dedicated (passthrough) at numa %d, interface %s position %d" % (numaid, interface.get("name",""), ifaceid ) + if "name" in interface: + if interface["name"] in names: + return -HTTP_Bad_Request, "name repeated at numa %d, interface %s position %d" % (numaid, interface.get("name",""), ifaceid ) + names.append(interface["name"]) + if "vpci" in interface: + if interface["vpci"] in vpcis: + return -HTTP_Bad_Request, "vpci %s repeated at numa %d, interface %s position %d" % (interface["vpci"], numaid, interface.get("name",""), ifaceid ) + vpcis.append(interface["vpci"]) + ifaceid+=1 + numaid+=1 + if numaid > 1: + return -HTTP_Service_Unavailable, "only one numa can be defined in this version " + for a in range(0,len(id_s)): + if a not in id_s: + return -HTTP_Bad_Request, "core/thread identifiers must start at 0 and gaps are not alloed. Missing id number %d" % a + + return 0, None + +# +# dictionaries that change from HTTP API to database naming +# +http2db_host={'id':'uuid'} +http2db_tenant={'id':'uuid'} +http2db_flavor={'id':'uuid','imageRef':'image_id'} +http2db_image={'id':'uuid', 'created':'created_at', 'updated':'modified_at', 'public': 'public'} +http2db_server={'id':'uuid','hostId':'host_id','flavorRef':'flavor_id','imageRef':'image_id','created':'created_at'} +http2db_network={'id':'uuid','provider:vlan':'vlan', 'provider:physical': 'provider'} +http2db_port={'id':'uuid', 'network_id':'net_id', 'mac_address':'mac', 'device_owner':'type','device_id':'instance_id','binding:switch_port':'switch_port','binding:vlan':'vlan', 'bandwidth':'Mbps'} + +def remove_extra_items(data, schema): + deleted=[] + if type(data) is tuple or type(data) is list: + for d in data: + a= remove_extra_items(d, schema['items']) + if a is not None: deleted.append(a) + elif type(data) is dict: + for k in data.keys(): + if 'properties' not in schema or k not in schema['properties'].keys(): + del data[k] + deleted.append(k) + else: + a = remove_extra_items(data[k], schema['properties'][k]) + if a is not None: deleted.append({k:a}) + if len(deleted) == 0: return None + elif len(deleted) == 1: return deleted[0] + else: return deleted + +def delete_nulls(var): + if type(var) is dict: + for k in var.keys(): + if var[k] is None: del var[k] + elif type(var[k]) is dict or type(var[k]) is list or type(var[k]) is tuple: + if delete_nulls(var[k]): del var[k] + if len(var) == 0: return True + elif type(var) is list or type(var) is tuple: + for k in var: + if type(k) is dict: delete_nulls(k) + if len(var) == 0: return True + return False + + +class httpserver(threading.Thread): + def __init__(self, db_conn, name="http", host='localhost', port=8080, admin=False, config_=None): + ''' + Creates a new thread to attend the http connections + Attributes: + db_conn: database connection + name: name of this thread + host: ip or name where to listen + port: port where to listen + admin: if this has privileges of administrator or not + config_: unless the first thread must be provided. It is a global dictionary where to allocate the self variable + ''' + global url_base + global config_dic + + #initialization + if config_ is not None: + config_dic = config_ + if 'http_threads' not in config_dic: + config_dic['http_threads'] = {} + threading.Thread.__init__(self) + self.host = host + self.port = port + self.db = db_conn + self.admin = admin + if name in config_dic: + print "httpserver Warning!!! Onether thread with the same name", name + n=0 + while name+str(n) in config_dic: + n +=1 + name +=str(n) + self.name = name + self.url_preffix = 'http://' + self.host + ':' + str(self.port) + url_base + config_dic['http_threads'][name] = self + + #Ensure that when the main program exits the thread will also exit + self.daemon = True + self.setDaemon(True) + + def run(self): + bottle.run(host=self.host, port=self.port, debug=True) #quiet=True + + def gethost(self, host_id): + result, content = self.db.get_host(host_id) + if result < 0: + print "httpserver.gethost error %d %s" % (result, content) + bottle.abort(-result, content) + elif result==0: + print "httpserver.gethost host '%s' not found" % host_id + bottle.abort(HTTP_Not_Found, content) + else: + data={'host' : content} + convert_boolean(content, ('admin_state_up',) ) + change_keys_http2db(content, http2db_host, reverse=True) + print data['host'] + return format_out(data) + +@bottle.route(url_base + '/', method='GET') +def http_get(): + print + return 'works' #TODO: put links or redirection to /openvim??? + +# +# Util funcions +# + +def change_keys_http2db(data, http_db, reverse=False): + '''Change keys of dictionary data according to the key_dict values + This allow change from http interface names to database names. + When reverse is True, the change is otherwise + Attributes: + data: can be a dictionary or a list + http_db: is a dictionary with hhtp names as keys and database names as value + reverse: by default change is done from http API to database. If True change is done otherwise + Return: None, but data is modified''' + if type(data) is tuple or type(data) is list: + for d in data: + change_keys_http2db(d, http_db, reverse) + elif type(data) is dict or type(data) is bottle.FormsDict: + if reverse: + for k,v in http_db.items(): + if v in data: data[k]=data.pop(v) + else: + for k,v in http_db.items(): + if k in data: data[v]=data.pop(k) + + + +def format_out(data): + '''return string of dictionary data according to requested json, yaml, xml. By default json''' + if 'application/yaml' in bottle.request.headers.get('Accept'): + bottle.response.content_type='application/yaml' + return yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) #, canonical=True, default_style='"' + else: #by default json + bottle.response.content_type='application/json' + #return data #json no style + return json.dumps(data, indent=4) + "\n" + +def format_in(schema): + try: + error_text = "Invalid header format " + format_type = bottle.request.headers.get('Content-Type', 'application/json') + if 'application/json' in format_type: + error_text = "Invalid json format " + #Use the json decoder instead of bottle decoder because it informs about the location of error formats with a ValueError exception + client_data = json.load(bottle.request.body) + #client_data = bottle.request.json() + elif 'application/yaml' in format_type: + error_text = "Invalid yaml format " + client_data = yaml.load(bottle.request.body) + elif format_type == 'application/xml': + bottle.abort(501, "Content-Type: application/xml not supported yet.") + else: + print "HTTP HEADERS: " + str(bottle.request.headers.items()) + bottle.abort(HTTP_Not_Acceptable, 'Content-Type ' + str(format_type) + ' not supported.') + return + #if client_data == None: + # bottle.abort(HTTP_Bad_Request, "Content error, empty") + # return + #check needed_items + + #print "HTTP input data: ", str(client_data) + error_text = "Invalid content " + js_v(client_data, schema) + + return client_data + except (ValueError, yaml.YAMLError) as exc: + error_text += str(exc) + print error_text + bottle.abort(HTTP_Bad_Request, error_text) + except js_e.ValidationError as exc: + print "HTTP validate_in error, jsonschema exception ", exc.message, "at", exc.path + print " CONTENT: " + str(bottle.request.body.readlines()) + error_pos = "" + if len(exc.path)>0: error_pos=" at '" + ":".join(map(str, exc.path)) + "'" + bottle.abort(HTTP_Bad_Request, error_text + error_pos+": "+exc.message) + #except: + # bottle.abort(HTTP_Bad_Request, "Content error: Failed to parse Content-Type", error_pos) + # raise + +def filter_query_string(qs, http2db, allowed): + '''Process query string (qs) checking that contains only valid tokens for avoiding SQL injection + Attributes: + 'qs': bottle.FormsDict variable to be processed. None or empty is considered valid + 'allowed': list of allowed string tokens (API http naming). All the keys of 'qs' must be one of 'allowed' + 'http2db': dictionary with change from http API naming (dictionary key) to database naming(dictionary value) + Return: A tuple with the (select,where,limit) to be use in a database query. All of then transformed to the database naming + select: list of items to retrieve, filtered by query string 'field=token'. If no 'field' is present, allowed list is returned + where: dictionary with key, value, taken from the query string token=value. Empty if nothing is provided + limit: limit dictated by user with the query string 'limit'. 100 by default + abort if not permitted, using bottel.abort + ''' + where={} + limit=100 + select=[] + if type(qs) is not bottle.FormsDict: + print '!!!!!!!!!!!!!!invalid query string not a dictionary' + #bottle.abort(HTTP_Internal_Server_Error, "call programmer") + else: + for k in qs: + if k=='field': + select += qs.getall(k) + for v in select: + if v not in allowed: + bottle.abort(HTTP_Bad_Request, "Invalid query string at 'field="+v+"'") + elif k=='limit': + try: + limit=int(qs[k]) + except: + bottle.abort(HTTP_Bad_Request, "Invalid query string at 'limit="+qs[k]+"'") + else: + if k not in allowed: + bottle.abort(HTTP_Bad_Request, "Invalid query string at '"+k+"="+qs[k]+"'") + if qs[k]!="null": where[k]=qs[k] + else: where[k]=None + if len(select)==0: select += allowed + #change from http api to database naming + for i in range(0,len(select)): + k=select[i] + if k in http2db: + select[i] = http2db[k] + change_keys_http2db(where, http2db) + #print "filter_query_string", select,where,limit + + return select,where,limit + + +def convert_bandwidth(data, reverse=False): + '''Check the field bandwidth recursively and when found, it removes units and convert to number + It assumes that bandwidth is well formed + Attributes: + 'data': dictionary bottle.FormsDict variable to be checked. None or empty is considered valid + 'reverse': by default convert form str to int (Mbps), if True it convert from number to units + Return: + None + ''' + if type(data) is dict: + for k in data.keys(): + if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list: + convert_bandwidth(data[k], reverse) + if "bandwidth" in data: + try: + value=str(data["bandwidth"]) + if not reverse: + pos = value.find("bps") + if pos>0: + if value[pos-1]=="G": data["bandwidth"] = int(data["bandwidth"][:pos-1]) * 1000 + elif value[pos-1]=="k": data["bandwidth"]= int(data["bandwidth"][:pos-1]) / 1000 + else: data["bandwidth"]= int(data["bandwidth"][:pos-1]) + else: + value = int(data["bandwidth"]) + if value % 1000 == 0: data["bandwidth"]=str(value/1000) + " Gbps" + else: data["bandwidth"]=str(value) + " Mbps" + except: + print "convert_bandwidth exception for type", type(data["bandwidth"]), " data", data["bandwidth"] + return + if type(data) is tuple or type(data) is list: + for k in data: + if type(k) is dict or type(k) is tuple or type(k) is list: + convert_bandwidth(k, reverse) + +def convert_boolean(data, items): + '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean + It assumes that bandwidth is well formed + Attributes: + 'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid + 'items': tuple of keys to convert + Return: + None + ''' + if type(data) is dict: + for k in data.keys(): + if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list: + convert_boolean(data[k], items) + if k in items: + if type(data[k]) is str: + if data[k]=="false": data[k]=False + elif data[k]=="true": data[k]=True + if type(data) is tuple or type(data) is list: + for k in data: + if type(k) is dict or type(k) is tuple or type(k) is list: + convert_boolean(k, items) + +def convert_datetime2str(var): + '''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s' + It enters recursively in the dict var finding this kind of variables + ''' + if type(var) is dict: + for k,v in var.items(): + if type(v) is datetime.datetime: + var[k]= v.strftime('%Y-%m-%dT%H:%M:%S') + elif type(v) is dict or type(v) is list or type(v) is tuple: + convert_datetime2str(v) + if len(var) == 0: return True + elif type(var) is list or type(var) is tuple: + for v in var: + convert_datetime2str(v) + +def check_valid_tenant(my, tenant_id): + if tenant_id=='any': + if not my.admin: + return HTTP_Unauthorized, "Needed admin privileges" + else: + result, _ = my.db.get_table(FROM='tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id}) + if result<=0: + return HTTP_Not_Found, "tenant '%s' not found" % tenant_id + return 0, None + +def check_valid_uuid(uuid): + id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"} + try: + js_v(uuid, id_schema) + return True + except js_e.ValidationError: + return False + +@bottle.error(400) +@bottle.error(401) +@bottle.error(404) +@bottle.error(403) +@bottle.error(405) +@bottle.error(406) +@bottle.error(408) +@bottle.error(409) +@bottle.error(503) +@bottle.error(500) +def error400(error): + e={"error":{"code":error.status_code, "type":error.status, "description":error.body}} + return format_out(e) + +@bottle.hook('after_request') +def enable_cors(): + #TODO: Alf: Is it needed?? + bottle.response.headers['Access-Control-Allow-Origin'] = '*' + +# +# HOSTS +# + +@bottle.route(url_base + '/hosts', method='GET') +def http_get_hosts(): + select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_host, + ('id','name','description','status','admin_state_up') ) + + myself = config_dic['http_threads'][ threading.current_thread().name ] + result, content = myself.db.get_table(FROM='hosts', SELECT=select_, WHERE=where_, LIMIT=limit_) + if result < 0: + print "http_get_hosts Error", content + bottle.abort(-result, content) + else: + convert_boolean(content, ('admin_state_up',) ) + change_keys_http2db(content, http2db_host, reverse=True) + for row in content: + row['links'] = ( {'href': myself.url_preffix + '/hosts/' + str(row['id']), 'rel': 'bookmark'}, ) + data={'hosts' : content} + return format_out(data) + +@bottle.route(url_base + '/hosts/', method='GET') +def http_get_host_id(host_id): + my = config_dic['http_threads'][ threading.current_thread().name ] + return my.gethost(host_id) + +@bottle.route(url_base + '/hosts', method='POST') +def http_post_hosts(): + '''insert a host into the database. All resources are got and inserted''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #check permissions + if not my.admin: + bottle.abort(HTTP_Unauthorized, "Needed admin privileges") + + #parse input data + http_content = format_in( host_new_schema ) + r = remove_extra_items(http_content, host_new_schema) + if r is not None: print "http_post_host_id: Warning: remove extra items ", r + change_keys_http2db(http_content['host'], http2db_host) + + host = http_content['host'] + warning_text="" + if 'host-data' in http_content: + host.update(http_content['host-data']) + ip_name=http_content['host-data']['ip_name'] + user=http_content['host-data']['user'] + password=http_content['host-data'].get('password', None) + else: + ip_name=host['ip_name'] + user=host['user'] + password=host.get('password', None) + + #fill rad info + rad = RADclass.RADclass() + (return_status, code) = rad.obtain_RAD(user, password, ip_name) + + #return + if not return_status: + print 'http_post_hosts ERROR obtaining RAD', code + bottle.abort(HTTP_Bad_Request, code) + return + warning_text=code + rad_structure = yaml.load(rad.to_text()) + print 'rad_structure\n---------------------' + print json.dumps(rad_structure, indent=4) + print '---------------------' + #return + WHERE_={"family":rad_structure['processor']['family'], 'manufacturer':rad_structure['processor']['manufacturer'], 'version':rad_structure['processor']['version']} + result, content = my.db.get_table(FROM='host_ranking', + SELECT=('ranking',), + WHERE=WHERE_) + if result > 0: + host['ranking'] = content[0]['ranking'] + else: + #error_text= "Host " + str(WHERE_)+ " not found in ranking table. Not valid for VIM management" + #bottle.abort(HTTP_Bad_Request, error_text) + #return + warning_text += "Host " + str(WHERE_)+ " not found in ranking table. Assuming lowest value 100\n" + host['ranking'] = 100 #TODO: as not used in this version, set the lowest value + + features = rad_structure['processor'].get('features', ()) + host['features'] = ",".join(features) + host['numas'] = [] + + for node in (rad_structure['resource topology']['nodes'] or {}).itervalues(): + interfaces= [] + cores = [] + eligible_cores=[] + count = 0 + for core in node['cpu']['eligible_cores']: + eligible_cores.extend(core) + for core in node['cpu']['cores']: + for thread_id in core: + c={'core_id': count, 'thread_id': thread_id} + if thread_id not in eligible_cores: c['status'] = 'noteligible' + cores.append(c) + count = count+1 + + if 'nics' in node: + for port_k, port_v in node['nics']['nic 0']['ports'].iteritems(): + if port_v['virtual']: + continue + else: + sriovs = [] + for port_k2, port_v2 in node['nics']['nic 0']['ports'].iteritems(): + if port_v2['virtual'] and port_v2['PF_pci_id']==port_k: + sriovs.append({'pci':port_k2, 'mac':port_v2['mac'], 'source_name':port_v2['source_name']}) + if len(sriovs)>0: + #sort sriov according to pci and rename them to the vf number + new_sriovs = sorted(sriovs, key=lambda k: k['pci']) + index=0 + for sriov in new_sriovs: + sriov['source_name'] = index + index += 1 + interfaces.append ({'pci':str(port_k), 'Mbps': port_v['speed']/1000000, 'sriovs': new_sriovs, 'mac':port_v['mac'], 'source_name':port_v['source_name']}) + #@TODO LA memoria devuelta por el RAD es incorrecta, almenos para IVY1, NFV100 + memory=node['memory']['node_size'] / (1024*1024*1024) + #memory=get_next_2pow(node['memory']['hugepage_nr']) + host['numas'].append( {'numa_socket': node['id'], 'hugepages': node['memory']['hugepage_nr'], 'memory':memory, 'interfaces': interfaces, 'cores': cores } ) + print json.dumps(host, indent=4) + #return + # + #insert in data base + result, content = my.db.new_host(host) + if result >= 0: + if content['admin_state_up']: + #create thread + host_test_mode = True if config_dic['mode']=='test' or config_dic['mode']=="OF only" else False + host_develop_mode = True if config_dic['mode']=='development' else False + host_develop_bridge_iface = config_dic.get('development_bridge', None) + thread = ht.host_thread(name=host.get('name',ip_name), user=user, host=ip_name, db=config_dic['db'], db_lock=config_dic['db_lock'], + test=host_test_mode, image_path=config_dic['image_path'], + version=config_dic['version'], host_id=content['uuid'], + develop_mode=host_develop_mode, develop_bridge_iface=host_develop_bridge_iface ) + thread.start() + config_dic['host_threads'][ content['uuid'] ] = thread + + #return host data + change_keys_http2db(content, http2db_host, reverse=True) + if len(warning_text)>0: + content["warning"]= warning_text + data={'host' : content} + return format_out(data) + else: + bottle.abort(HTTP_Bad_Request, content) + return + +@bottle.route(url_base + '/hosts/', method='PUT') +def http_put_host_id(host_id): + '''modify a host into the database. All resources are got and inserted''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #check permissions + if not my.admin: + bottle.abort(HTTP_Unauthorized, "Needed admin privileges") + + #parse input data + http_content = format_in( host_edit_schema ) + r = remove_extra_items(http_content, host_edit_schema) + if r is not None: print "http_post_host_id: Warning: remove extra items ", r + change_keys_http2db(http_content['host'], http2db_host) + + #insert in data base + result, content = my.db.edit_host(host_id, http_content['host']) + if result >= 0: + convert_boolean(content, ('admin_state_up',) ) + change_keys_http2db(content, http2db_host, reverse=True) + data={'host' : content} + + #reload thread + config_dic['host_threads'][host_id].name = content.get('name',content['ip_name']) + config_dic['host_threads'][host_id].user = content['user'] + config_dic['host_threads'][host_id].host = content['ip_name'] + config_dic['host_threads'][host_id].insert_task("reload") + + #print data + return format_out(data) + else: + bottle.abort(HTTP_Bad_Request, content) + return + + + +@bottle.route(url_base + '/hosts/', method='DELETE') +def http_delete_host_id(host_id): + my = config_dic['http_threads'][ threading.current_thread().name ] + #check permissions + if not my.admin: + bottle.abort(HTTP_Unauthorized, "Needed admin privileges") + result, content = my.db.delete_row('hosts', host_id) + if result == 0: + bottle.abort(HTTP_Not_Found, content) + elif result >0: + #terminate thread + if host_id in config_dic['host_threads']: + config_dic['host_threads'][host_id].insert_task("exit") + #return data + data={'result' : content} + return format_out(data) + else: + print "http_delete_host_id error",result, content + bottle.abort(-result, content) + return + + + +# +# TENANTS +# + +@bottle.route(url_base + '/tenants', method='GET') +def http_get_tenants(): + my = config_dic['http_threads'][ threading.current_thread().name ] + select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_tenant, + ('id','name','description','enabled') ) + result, content = my.db.get_table(FROM='tenants', SELECT=select_,WHERE=where_,LIMIT=limit_) + if result < 0: + print "http_get_tenants Error", content + bottle.abort(-result, content) + else: + change_keys_http2db(content, http2db_tenant, reverse=True) + convert_boolean(content, ('enabled',)) + data={'tenants' : content} + #data['tenants_links'] = dict([('tenant', row['id']) for row in content]) + return format_out(data) + +@bottle.route(url_base + '/tenants/', method='GET') +def http_get_tenant_id(tenant_id): + my = config_dic['http_threads'][ threading.current_thread().name ] + result, content = my.db.get_table(FROM='tenants', SELECT=('uuid','name','description', 'enabled'),WHERE={'uuid': tenant_id} ) + if result < 0: + print "http_get_tenant_id error %d %s" % (result, content) + bottle.abort(-result, content) + elif result==0: + print "http_get_tenant_id tenant '%s' not found" % tenant_id + bottle.abort(HTTP_Not_Found, "tenant %s not found" % tenant_id) + else: + change_keys_http2db(content, http2db_tenant, reverse=True) + convert_boolean(content, ('enabled',)) + data={'tenant' : content[0]} + #data['tenants_links'] = dict([('tenant', row['id']) for row in content]) + return format_out(data) + + +@bottle.route(url_base + '/tenants', method='POST') +def http_post_tenants(): + '''insert a tenant into the database.''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #parse input data + http_content = format_in( tenant_new_schema ) + r = remove_extra_items(http_content, tenant_new_schema) + if r is not None: print "http_post_tenants: Warning: remove extra items ", r + change_keys_http2db(http_content['tenant'], http2db_tenant) + + #insert in data base + result, content = my.db.new_tenant(http_content['tenant']) + + if result >= 0: + return http_get_tenant_id(content) + else: + bottle.abort(-result, content) + return + +@bottle.route(url_base + '/tenants/', method='PUT') +def http_put_tenant_id(tenant_id): + '''update a tenant into the database.''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #parse input data + http_content = format_in( tenant_edit_schema ) + r = remove_extra_items(http_content, tenant_edit_schema) + if r is not None: print "http_put_tenant_id: Warning: remove extra items ", r + change_keys_http2db(http_content['tenant'], http2db_tenant) + + #insert in data base + result, content = my.db.update_rows('tenants', http_content['tenant'], WHERE={'uuid': tenant_id}, log=True ) + if result >= 0: + return http_get_tenant_id(tenant_id) + else: + bottle.abort(-result, content) + return + +@bottle.route(url_base + '/tenants/', method='DELETE') +def http_delete_tenant_id(tenant_id): + my = config_dic['http_threads'][ threading.current_thread().name ] + #check permissions + r, tenants_flavors = my.db.get_table(FROM='tenants_flavors', SELECT=('flavor_id','tenant_id'), WHERE={'tenant_id': tenant_id}) + if r<=0: + tenants_flavors=() + r, tenants_images = my.db.get_table(FROM='tenants_images', SELECT=('image_id','tenant_id'), WHERE={'tenant_id': tenant_id}) + if r<=0: + tenants_images=() + result, content = my.db.delete_row('tenants', tenant_id) + if result == 0: + bottle.abort(HTTP_Not_Found, content) + elif result >0: + print "alf", tenants_flavors, tenants_images + for flavor in tenants_flavors: + my.db.delete_row_by_key("flavors", "uuid", flavor['flavor_id']) + for image in tenants_images: + my.db.delete_row_by_key("images", "uuid", image['image_id']) + data={'result' : content} + return format_out(data) + else: + print "http_delete_tenant_id error",result, content + bottle.abort(-result, content) + return + +# +# FLAVORS +# + +@bottle.route(url_base + '//flavors', method='GET') +def http_get_flavors(tenant_id): + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + #obtain data + select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_flavor, + ('id','name','description','public') ) + if tenant_id=='any': + from_ ='flavors' + else: + from_ ='tenants_flavors inner join flavors on tenants_flavors.flavor_id=flavors.uuid' + where_['tenant_id'] = tenant_id + result, content = my.db.get_table(FROM=from_, SELECT=select_, WHERE=where_, LIMIT=limit_) + if result < 0: + print "http_get_flavors Error", content + bottle.abort(-result, content) + else: + change_keys_http2db(content, http2db_flavor, reverse=True) + for row in content: + row['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'flavors', str(row['id']) ) ), 'rel':'bookmark' } ] + data={'flavors' : content} + return format_out(data) + +@bottle.route(url_base + '//flavors/', method='GET') +def http_get_flavor_id(tenant_id, flavor_id): + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + #obtain data + select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_flavor, + ('id','name','description','ram', 'vcpus', 'extended', 'disk', 'public') ) + if tenant_id=='any': + from_ ='flavors' + else: + from_ ='tenants_flavors as tf inner join flavors as f on tf.flavor_id=f.uuid' + where_['tenant_id'] = tenant_id + where_['uuid'] = flavor_id + result, content = my.db.get_table(SELECT=select_, FROM=from_, WHERE=where_, LIMIT=limit_) + + if result < 0: + print "http_get_flavor_id error %d %s" % (result, content) + bottle.abort(-result, content) + elif result==0: + print "http_get_flavors_id flavor '%s' not found" % str(flavor_id) + bottle.abort(HTTP_Not_Found, 'flavor %s not found' % flavor_id) + else: + change_keys_http2db(content, http2db_flavor, reverse=True) + if 'extended' in content[0] and content[0]['extended'] is not None: + extended = json.loads(content[0]['extended']) + if 'devices' in extended: + change_keys_http2db(extended['devices'], http2db_flavor, reverse=True) + content[0]['extended']=extended + convert_bandwidth(content[0], reverse=True) + content[0]['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'flavors', str(content[0]['id']) ) ), 'rel':'bookmark' } ] + data={'flavor' : content[0]} + #data['tenants_links'] = dict([('tenant', row['id']) for row in content]) + return format_out(data) + + +@bottle.route(url_base + '//flavors', method='POST') +def http_post_flavors(tenant_id): + '''insert a flavor into the database, and attach to tenant.''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + http_content = format_in( flavor_new_schema ) + r = remove_extra_items(http_content, flavor_new_schema) + if r is not None: print "http_post_flavors: Warning: remove extra items ", r + change_keys_http2db(http_content['flavor'], http2db_flavor) + extended_dict = http_content['flavor'].pop('extended', None) + if extended_dict is not None: + result, content = check_extended(extended_dict) + if result<0: + print "http_post_flavors wrong input extended error %d %s" % (result, content) + bottle.abort(-result, content) + return + convert_bandwidth(extended_dict) + if 'devices' in extended_dict: change_keys_http2db(extended_dict['devices'], http2db_flavor) + http_content['flavor']['extended'] = json.dumps(extended_dict) + #insert in data base + result, content = my.db.new_flavor(http_content['flavor'], tenant_id) + if result >= 0: + return http_get_flavor_id(tenant_id, content) + else: + print "http_psot_flavors error %d %s" % (result, content) + bottle.abort(-result, content) + return + +@bottle.route(url_base + '//flavors/', method='DELETE') +def http_delete_flavor_id(tenant_id, flavor_id): + '''Deletes the flavor_id of a tenant. IT removes from tenants_flavors table.''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + return + result, content = my.db.delete_image_flavor('flavor', flavor_id, tenant_id) + if result == 0: + bottle.abort(HTTP_Not_Found, content) + elif result >0: + data={'result' : content} + return format_out(data) + else: + print "http_delete_flavor_id error",result, content + bottle.abort(-result, content) + return + +@bottle.route(url_base + '//flavors//', method='POST') +def http_attach_detach_flavors(tenant_id, flavor_id, action): + '''attach/detach an existing flavor in this tenant. That is insert/remove at tenants_flavors table.''' + #TODO alf: not tested at all!!! + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + if tenant_id=='any': + bottle.abort(HTTP_Bad_Request, "Invalid tenant 'any' with this command") + #check valid action + if action!='attach' and action != 'detach': + bottle.abort(HTTP_Method_Not_Allowed, "actions can be attach or detach") + return + + #Ensure that flavor exist + from_ ='tenants_flavors as tf right join flavors as f on tf.flavor_id=f.uuid' + where_={'uuid': flavor_id} + result, content = my.db.get_table(SELECT=('public','tenant_id'), FROM=from_, WHERE=where_) + if result==0: + if action=='attach': + text_error="Flavor '%s' not found" % flavor_id + else: + text_error="Flavor '%s' not found for tenant '%s'" % (flavor_id, tenant_id) + bottle.abort(HTTP_Not_Found, text_error) + return + elif result>0: + flavor=content[0] + if action=='attach': + if flavor['tenant_id']!=None: + bottle.abort(HTTP_Conflict, "Flavor '%s' already attached to tenant '%s'" % (flavor_id, tenant_id)) + if flavor['public']=='no' and not my.admin: + #allow only attaching public flavors + bottle.abort(HTTP_Unauthorized, "Needed admin rights to attach a private flavor") + return + #insert in data base + result, content = my.db.new_row('tenants_flavors', {'flavor_id':flavor_id, 'tenant_id': tenant_id}) + if result >= 0: + return http_get_flavor_id(tenant_id, flavor_id) + else: #detach + if flavor['tenant_id']==None: + bottle.abort(HTTP_Not_Found, "Flavor '%s' not attached to tenant '%s'" % (flavor_id, tenant_id)) + result, content = my.db.delete_row_by_dict(FROM='tenants_flavors', WHERE={'flavor_id':flavor_id, 'tenant_id':tenant_id}) + if result>=0: + if flavor['public']=='no': + #try to delete the flavor completely to avoid orphan flavors, IGNORE error + my.db.delete_row_by_dict(FROM='flavors', WHERE={'uuid':flavor_id}) + data={'result' : "flavor detached"} + return format_out(data) + + #if get here is because an error + print "http_attach_detach_flavors error %d %s" % (result, content) + bottle.abort(-result, content) + return + +@bottle.route(url_base + '//flavors/', method='PUT') +def http_put_flavor_id(tenant_id, flavor_id): + '''update a flavor_id into the database.''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + #parse input data + http_content = format_in( flavor_update_schema ) + r = remove_extra_items(http_content, flavor_update_schema) + if r is not None: print "http_put_flavor_id: Warning: remove extra items ", r + change_keys_http2db(http_content['flavor'], http2db_flavor) + extended_dict = http_content['flavor'].pop('extended', None) + if extended_dict is not None: + result, content = check_extended(extended_dict) + if result<0: + print "http_put_flavor_id wrong input extended error %d %s" % (result, content) + bottle.abort(-result, content) + return + convert_bandwidth(extended_dict) + if 'devices' in extended_dict: change_keys_http2db(extended_dict['devices'], http2db_flavor) + http_content['flavor']['extended'] = json.dumps(extended_dict) + #Ensure that flavor exist + where_={'uuid': flavor_id} + if tenant_id=='any': + from_ ='flavors' + else: + from_ ='tenants_flavors as ti inner join flavors as i on ti.flavor_id=i.uuid' + where_['tenant_id'] = tenant_id + result, content = my.db.get_table(SELECT=('public',), FROM=from_, WHERE=where_) + if result==0: + text_error="Flavor '%s' not found" % flavor_id + if tenant_id!='any': + text_error +=" for tenant '%s'" % flavor_id + bottle.abort(HTTP_Not_Found, text_error) + return + elif result>0: + if content[0]['public']=='yes' and not my.admin: + #allow only modifications over private flavors + bottle.abort(HTTP_Unauthorized, "Needed admin rights to edit a public flavor") + return + #insert in data base + result, content = my.db.update_rows('flavors', http_content['flavor'], {'uuid': flavor_id}) + + if result < 0: + print "http_put_flavor_id error %d %s" % (result, content) + bottle.abort(-result, content) + return + else: + return http_get_flavor_id(tenant_id, flavor_id) + + + +# +# IMAGES +# + +@bottle.route(url_base + '//images', method='GET') +def http_get_images(tenant_id): + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + #obtain data + select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_image, + ('id','name','description','path','public') ) + if tenant_id=='any': + from_ ='images' + else: + from_ ='tenants_images inner join images on tenants_images.image_id=images.uuid' + where_['tenant_id'] = tenant_id + result, content = my.db.get_table(SELECT=select_, FROM=from_, WHERE=where_, LIMIT=limit_) + if result < 0: + print "http_get_images Error", content + bottle.abort(-result, content) + else: + change_keys_http2db(content, http2db_image, reverse=True) + #for row in content: row['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'images', str(row['id']) ) ), 'rel':'bookmark' } ] + data={'images' : content} + return format_out(data) + +@bottle.route(url_base + '//images/', method='GET') +def http_get_image_id(tenant_id, image_id): + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + #obtain data + select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_image, + ('id','name','description','progress', 'status','path', 'created', 'updated','public') ) + if tenant_id=='any': + from_ ='images' + else: + from_ ='tenants_images as ti inner join images as i on ti.image_id=i.uuid' + where_['tenant_id'] = tenant_id + where_['uuid'] = image_id + result, content = my.db.get_table(SELECT=select_, FROM=from_, WHERE=where_, LIMIT=limit_) + + if result < 0: + print "http_get_images error %d %s" % (result, content) + bottle.abort(-result, content) + elif result==0: + print "http_get_images image '%s' not found" % str(image_id) + bottle.abort(HTTP_Not_Found, 'image %s not found' % image_id) + else: + convert_datetime2str(content) + change_keys_http2db(content, http2db_image, reverse=True) + if 'metadata' in content[0] and content[0]['metadata'] is not None: + metadata = json.loads(content[0]['metadata']) + content[0]['metadata']=metadata + content[0]['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'images', str(content[0]['id']) ) ), 'rel':'bookmark' } ] + data={'image' : content[0]} + #data['tenants_links'] = dict([('tenant', row['id']) for row in content]) + return format_out(data) + +@bottle.route(url_base + '//images', method='POST') +def http_post_images(tenant_id): + '''insert a image into the database, and attach to tenant.''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + http_content = format_in(image_new_schema) + r = remove_extra_items(http_content, image_new_schema) + if r is not None: print "http_post_images: Warning: remove extra items ", r + change_keys_http2db(http_content['image'], http2db_image) + metadata_dict = http_content['image'].pop('metadata', None) + if metadata_dict is not None: + http_content['image']['metadata'] = json.dumps(metadata_dict) + #insert in data base + result, content = my.db.new_image(http_content['image'], tenant_id) + if result >= 0: + return http_get_image_id(tenant_id, content) + else: + print "http_post_images error %d %s" % (result, content) + bottle.abort(-result, content) + return + +@bottle.route(url_base + '//images/', method='DELETE') +def http_delete_image_id(tenant_id, image_id): + '''Deletes the image_id of a tenant. IT removes from tenants_images table.''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + result, content = my.db.delete_image_flavor('image', image_id, tenant_id) + if result == 0: + bottle.abort(HTTP_Not_Found, content) + elif result >0: + data={'result' : content} + return format_out(data) + else: + print "http_delete_image_id error",result, content + bottle.abort(-result, content) + return + +@bottle.route(url_base + '//images//', method='POST') +def http_attach_detach_images(tenant_id, image_id, action): + '''attach/detach an existing image in this tenant. That is insert/remove at tenants_images table.''' + #TODO alf: not tested at all!!! + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + if tenant_id=='any': + bottle.abort(HTTP_Bad_Request, "Invalid tenant 'any' with this command") + #check valid action + if action!='attach' and action != 'detach': + bottle.abort(HTTP_Method_Not_Allowed, "actions can be attach or detach") + return + + #Ensure that image exist + from_ ='tenants_images as ti right join images as i on ti.image_id=i.uuid' + where_={'uuid': image_id} + result, content = my.db.get_table(SELECT=('public','tenant_id'), FROM=from_, WHERE=where_) + if result==0: + if action=='attach': + text_error="Image '%s' not found" % image_id + else: + text_error="Image '%s' not found for tenant '%s'" % (image_id, tenant_id) + bottle.abort(HTTP_Not_Found, text_error) + return + elif result>0: + image=content[0] + if action=='attach': + if image['tenant_id']!=None: + bottle.abort(HTTP_Conflict, "Image '%s' already attached to tenant '%s'" % (image_id, tenant_id)) + if image['public']=='no' and not my.admin: + #allow only attaching public images + bottle.abort(HTTP_Unauthorized, "Needed admin rights to attach a private image") + return + #insert in data base + result, content = my.db.new_row('tenants_images', {'image_id':image_id, 'tenant_id': tenant_id}) + if result >= 0: + return http_get_image_id(tenant_id, image_id) + else: #detach + if image['tenant_id']==None: + bottle.abort(HTTP_Not_Found, "Image '%s' not attached to tenant '%s'" % (image_id, tenant_id)) + result, content = my.db.delete_row_by_dict(FROM='tenants_images', WHERE={'image_id':image_id, 'tenant_id':tenant_id}) + if result>=0: + if image['public']=='no': + #try to delete the image completely to avoid orphan images, IGNORE error + my.db.delete_row_by_dict(FROM='images', WHERE={'uuid':image_id}) + data={'result' : "image detached"} + return format_out(data) + + #if get here is because an error + print "http_attach_detach_images error %d %s" % (result, content) + bottle.abort(-result, content) + return + +@bottle.route(url_base + '//images/', method='PUT') +def http_put_image_id(tenant_id, image_id): + '''update a image_id into the database.''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + #parse input data + http_content = format_in( image_update_schema ) + r = remove_extra_items(http_content, image_update_schema) + if r is not None: print "http_put_image_id: Warning: remove extra items ", r + change_keys_http2db(http_content['image'], http2db_image) + metadata_dict = http_content['image'].pop('metadata', None) + if metadata_dict is not None: + http_content['image']['metadata'] = json.dumps(metadata_dict) + #Ensure that image exist + where_={'uuid': image_id} + if tenant_id=='any': + from_ ='images' + else: + from_ ='tenants_images as ti inner join images as i on ti.image_id=i.uuid' + where_['tenant_id'] = tenant_id + result, content = my.db.get_table(SELECT=('public',), FROM=from_, WHERE=where_) + if result==0: + text_error="Image '%s' not found" % image_id + if tenant_id!='any': + text_error +=" for tenant '%s'" % image_id + bottle.abort(HTTP_Not_Found, text_error) + return + elif result>0: + if content[0]['public']=='yes' and not my.admin: + #allow only modifications over private images + bottle.abort(HTTP_Unauthorized, "Needed admin rights to edit a public image") + return + #insert in data base + result, content = my.db.update_rows('images', http_content['image'], {'uuid': image_id}) + + if result < 0: + print "http_put_image_id error %d %s" % (result, content) + bottle.abort(-result, content) + return + else: + return http_get_image_id(tenant_id, image_id) + + +# +# SERVERS +# + +@bottle.route(url_base + '//servers', method='GET') +def http_get_servers(tenant_id): + my = config_dic['http_threads'][ threading.current_thread().name ] + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + return + #obtain data + select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_server, + ('id','name','description','hostId','imageRef','flavorRef','status', 'tenant_id') ) + if tenant_id!='any': + where_['tenant_id'] = tenant_id + result, content = my.db.get_table(SELECT=select_, FROM='instances', WHERE=where_, LIMIT=limit_) + if result < 0: + print "http_get_servers Error", content + bottle.abort(-result, content) + else: + change_keys_http2db(content, http2db_server, reverse=True) + for row in content: + tenant_id = row.pop('tenant_id') + row['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'servers', str(row['id']) ) ), 'rel':'bookmark' } ] + data={'servers' : content} + return format_out(data) + +@bottle.route(url_base + '//servers/', method='GET') +def http_get_server_id(tenant_id, server_id): + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + return + #obtain data + result, content = my.db.get_instance(server_id) + if result == 0: + bottle.abort(HTTP_Not_Found, content) + elif result >0: + #change image/flavor-id to id and link + convert_bandwidth(content, reverse=True) + convert_datetime2str(content) + if content["ram"]==0 : del content["ram"] + if content["vcpus"]==0 : del content["vcpus"] + if 'flavor_id' in content: + if content['flavor_id'] is not None: + content['flavor'] = {'id':content['flavor_id'], + 'links':[{'href': "/".join( (my.url_preffix, content['tenant_id'], 'flavors', str(content['flavor_id']) ) ), 'rel':'bookmark'}] + } + del content['flavor_id'] + if 'image_id' in content: + if content['image_id'] is not None: + content['image'] = {'id':content['image_id'], + 'links':[{'href': "/".join( (my.url_preffix, content['tenant_id'], 'images', str(content['image_id']) ) ), 'rel':'bookmark'}] + } + del content['image_id'] + change_keys_http2db(content, http2db_server, reverse=True) + if 'extended' in content: + if 'devices' in content['extended']: change_keys_http2db(content['extended']['devices'], http2db_server, reverse=True) + + data={'server' : content} + return format_out(data) + else: + bottle.abort(-result, content) + return + +@bottle.route(url_base + '//servers', method='POST') +def http_post_server_id(tenant_id): + '''deploys a new server''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + return + if tenant_id=='any': + bottle.abort(HTTP_Bad_Request, "Invalid tenant 'any' with this command") + #chek input + http_content = format_in( server_new_schema ) + r = remove_extra_items(http_content, server_new_schema) + if r is not None: print "http_post_serves: Warning: remove extra items ", r + change_keys_http2db(http_content['server'], http2db_server) + extended_dict = http_content['server'].get('extended', None) + if extended_dict is not None: + result, content = check_extended(extended_dict, True) + if result<0: + print "http_post_servers wrong input extended error %d %s" % (result, content) + bottle.abort(-result, content) + return + convert_bandwidth(extended_dict) + if 'devices' in extended_dict: change_keys_http2db(extended_dict['devices'], http2db_server) + + server = http_content['server'] + server_start = server.get('start', 'yes') + server['tenant_id'] = tenant_id + #check flavor valid and take info + result, content = my.db.get_table(FROM='tenants_flavors as tf join flavors as f on tf.flavor_id=f.uuid', + SELECT=('ram','vcpus','extended'), WHERE={'uuid':server['flavor_id'], 'tenant_id':tenant_id}) + if result<=0: + bottle.abort(HTTP_Not_Found, 'flavor_id %s not found' % server['flavor_id']) + return + server['flavor']=content[0] + #check image valid and take info + result, content = my.db.get_table(FROM='tenants_images as ti join images as i on ti.image_id=i.uuid', + SELECT=('path','metadata'), WHERE={'uuid':server['image_id'], 'tenant_id':tenant_id, "status":"ACTIVE"}) + if result<=0: + bottle.abort(HTTP_Not_Found, 'image_id %s not found or not ACTIVE' % server['image_id']) + return + server['image']=content[0] + if "hosts_id" in server: + result, content = my.db.get_table(FROM='hosts', SELECT=('uuid',), WHERE={'uuid': server['host_id']}) + if result<=0: + bottle.abort(HTTP_Not_Found, 'hostId %s not found' % server['host_id']) + return + #print json.dumps(server, indent=4) + + result, content = ht.create_server(server, config_dic['db'], config_dic['db_lock'], config_dic['mode']=='normal') + + if result >= 0: + #Insert instance to database + nets=[] + print + print "inserting at DB" + print + if server_start == 'no': + content['status'] = 'INACTIVE' + ports_to_free=[] + new_instance_result, new_instance = my.db.new_instance(content, nets, ports_to_free) + if new_instance_result < 0: + print "Error http_post_servers() :", new_instance_result, new_instance + bottle.abort(-new_instance_result, new_instance) + return + print + print "inserted at DB" + print + for port in ports_to_free: + r,c = config_dic['host_threads'][ server['host_id'] ].insert_task( 'restore-iface',*port ) + if r < 0: + print ' http_post_servers ERROR RESTORE IFACE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + c + #updata nets + for net in nets: + r,c = config_dic['of_thread'].insert_task("update-net", net) + if r < 0: + print ':http_post_servers ERROR UPDATING NETS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + c + + + + #look for dhcp ip address + r2, c2 = my.db.get_table(FROM="ports", SELECT=["mac", "net_id"], WHERE={"instance_id": new_instance}) + if r2 >0 and config_dic.get("dhcp_server"): + for iface in c2: + if iface["net_id"] in config_dic["dhcp_nets"]: + #print "dhcp insert add task" + r,c = config_dic['dhcp_thread'].insert_task("add", iface["mac"]) + if r < 0: + print ':http_post_servers ERROR UPDATING dhcp_server !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + c + + #Start server + + server['uuid'] = new_instance + #server_start = server.get('start', 'yes') + if server_start != 'no': + server['paused'] = True if server_start == 'paused' else False + server['action'] = {"start":None} + server['status'] = "CREATING" + #Program task + r,c = config_dic['host_threads'][ server['host_id'] ].insert_task( 'instance',server ) + if r<0: + my.db.update_rows('instances', {'status':"ERROR"}, {'uuid':server['uuid'], 'last_error':c}, log=True) + + return http_get_server_id(tenant_id, new_instance) + else: + bottle.abort(HTTP_Bad_Request, content) + return + +def http_server_action(server_id, tenant_id, action): + '''Perform actions over a server as resume, reboot, terminate, ...''' + my = config_dic['http_threads'][ threading.current_thread().name ] + server={"uuid": server_id, "action":action} + where={'uuid': server_id} + if tenant_id!='any': + where['tenant_id']= tenant_id + result, content = my.db.get_table(FROM='instances', WHERE=where) + if result == 0: + bottle.abort(HTTP_Not_Found, "server %s not found" % server_id) + return + if result < 0: + print "http_post_server_action error getting data %d %s" % (result, content) + bottle.abort(HTTP_Internal_Server_Error, content) + return + server.update(content[0]) + tenant_id = server["tenant_id"] + + #TODO check a right content + new_status = None + if 'terminate' in action: + new_status='DELETING' + elif server['status'] == 'ERROR': #or server['status'] == 'CREATING': + if 'terminate' not in action and 'rebuild' not in action: + bottle.abort(HTTP_Method_Not_Allowed, "Server is in ERROR status, must be rebuit or deleted ") + return +# elif server['status'] == 'INACTIVE': +# if 'start' not in action and 'createImage' not in action: +# bottle.abort(HTTP_Method_Not_Allowed, "The only possible action over an instance in 'INACTIVE' status is 'start'") +# return +# if 'start' in action: +# new_status='CREATING' +# server['paused']='no' +# elif server['status'] == 'PAUSED': +# if 'resume' not in action: +# bottle.abort(HTTP_Method_Not_Allowed, "The only possible action over an instance in 'PAUSED' status is 'resume'") +# return +# elif server['status'] == 'ACTIVE': +# if 'pause' not in action and 'reboot'not in action and 'shutoff'not in action: +# bottle.abort(HTTP_Method_Not_Allowed, "The only possible action over an instance in 'ACTIVE' status is 'pause','reboot' or 'shutoff'") +# return + + if 'start' in action or 'createImage' in action or 'rebuild' in action: + #check image valid and take info + image_id = server['image_id'] + if 'createImage' in action: + if 'imageRef' in action['createImage']: + image_id = action['createImage']['imageRef'] + elif 'disk' in action['createImage']: + result, content = my.db.get_table(FROM='instance_devices', + SELECT=('image_id','dev'), WHERE={'instance_id':server['uuid'],"type":"disk"}) + if result<=0: + bottle.abort(HTTP_Not_Found, 'disk not found for server') + return + elif result>1: + disk_id=None + if action['createImage']['imageRef']['disk'] != None: + for disk in content: + if disk['dev'] == action['createImage']['imageRef']['disk']: + disk_id = disk['image_id'] + break + if disk_id == None: + bottle.abort(HTTP_Not_Found, 'disk %s not found for server' % action['createImage']['imageRef']['disk']) + return + else: + bottle.abort(HTTP_Not_Found, 'more than one disk found for server' ) + return + image_id = disk_id + else: #result==1 + image_id = content[0]['image_id'] + + result, content = my.db.get_table(FROM='tenants_images as ti join images as i on ti.image_id=i.uuid', + SELECT=('path','metadata'), WHERE={'uuid':image_id, 'tenant_id':tenant_id, "status":"ACTIVE"}) + if result<=0: + bottle.abort(HTTP_Not_Found, 'image_id %s not found or not ACTIVE' % image_id) + return + if content[0]['metadata'] is not None: + try: + metadata = json.loads(content[0]['metadata']) + except: + return -HTTP_Internal_Server_Error, "Can not decode image metadata" + content[0]['metadata']=metadata + else: + content[0]['metadata'] = {} + server['image']=content[0] + if 'createImage' in action: + action['createImage']['source'] = {'image_id': image_id, 'path': content[0]['path']} + if 'createImage' in action: + #Create an entry in Database for the new image + new_image={'status':'BUILD', 'progress': 0 } + new_image_metadata=content[0] + if 'metadata' in server['image'] and server['image']['metadata'] != None: + new_image_metadata.update(server['image']['metadata']) + new_image_metadata = {"use_incremental":"no"} + if 'metadata' in action['createImage']: + new_image_metadata.update(action['createImage']['metadata']) + new_image['metadata'] = json.dumps(new_image_metadata) + new_image['name'] = action['createImage'].get('name', None) + new_image['description'] = action['createImage'].get('description', None) + new_image['uuid']=my.db.new_uuid() + if 'path' in action['createImage']: + new_image['path'] = action['createImage']['path'] + else: + new_image['path']="/provisional/path/" + new_image['uuid'] + result, image_uuid = my.db.new_image(new_image, tenant_id) + if result<=0: + bottle.abort(HTTP_Bad_Request, 'Error: ' + image_uuid) + return + server['new_image'] = new_image + + + #Program task + r,c = config_dic['host_threads'][ server['host_id'] ].insert_task( 'instance',server ) + if r<0: + print "Task queue full at host ", server['host_id'] + bottle.abort(HTTP_Request_Timeout, c) + if 'createImage' in action and result >= 0: + return http_get_image_id(tenant_id, image_uuid) + + #Update DB only for CREATING or DELETING status + data={'result' : 'in process'} + if new_status != None and new_status == 'DELETING': + nets=[] + ports_to_free=[] + #look for dhcp ip address + r2, c2 = my.db.get_table(FROM="ports", SELECT=["mac", "net_id"], WHERE={"instance_id": server_id}) + r,c = my.db.delete_instance(server_id, tenant_id, nets, ports_to_free, "requested by http") + for port in ports_to_free: + r1,c1 = config_dic['host_threads'][ server['host_id'] ].insert_task( 'restore-iface',*port ) + if r1 < 0: + print ' http_post_server_action error at server deletion ERROR resore-iface !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + c1 + data={'result' : 'deleting in process, but ifaces cannot be restored!!!!!'} + for net in nets: + r1,c1 = config_dic['of_thread'].insert_task("update-net", net) + if r1 < 0: + print ' http_post_server_action error at server deletion ERROR UPDATING NETS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + c1 + data={'result' : 'deleting in process, but openflow rules cannot be deleted!!!!!'} + #look for dhcp ip address + if r2 >0 and config_dic.get("dhcp_server"): + for iface in c2: + if iface["net_id"] in config_dic["dhcp_nets"]: + r,c = config_dic['dhcp_thread'].insert_task("del", iface["mac"]) + #print "dhcp insert del task" + if r < 0: + print ':http_post_servers ERROR UPDATING dhcp_server !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + c + + return format_out(data) + + + +@bottle.route(url_base + '//servers/', method='DELETE') +def http_delete_server_id(tenant_id, server_id): + '''delete a server''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + return + + return http_server_action(server_id, tenant_id, {"terminate":None} ) + + +@bottle.route(url_base + '//servers//action', method='POST') +def http_post_server_action(tenant_id, server_id): + '''take an action over a server''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #check valid tenant_id + result,content = check_valid_tenant(my, tenant_id) + if result != 0: + bottle.abort(result, content) + return + http_content = format_in( server_action_schema ) + #r = remove_extra_items(http_content, server_action_schema) + #if r is not None: print "http_post_server_action: Warning: remove extra items ", r + + return http_server_action(server_id, tenant_id, http_content) + +# +# NETWORKS +# + + +@bottle.route(url_base + '/networks', method='GET') +def http_get_networks(): + my = config_dic['http_threads'][ threading.current_thread().name ] + #obtain data + select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_network, + ('id','name','tenant_id','type', + 'shared','provider:vlan','status','last_error','admin_state_up','provider:physical') ) + #TODO temporally remove tenant_id + if "tenant_id" in where_: + del where_["tenant_id"] + result, content = my.db.get_table(SELECT=select_, FROM='nets', WHERE=where_, LIMIT=limit_) + if result < 0: + print "http_get_networks error %d %s" % (result, content) + bottle.abort(-result, content) + else: + convert_boolean(content, ('shared', 'admin_state_up', 'enable_dhcp') ) + delete_nulls(content) + change_keys_http2db(content, http2db_network, reverse=True) + data={'networks' : content} + return format_out(data) + +@bottle.route(url_base + '/networks/', method='GET') +def http_get_network_id(network_id): + my = config_dic['http_threads'][ threading.current_thread().name ] + #obtain data + where_ = bottle.request.query + where_['uuid'] = network_id + result, content = my.db.get_table(FROM='nets', WHERE=where_, LIMIT=100) + + if result < 0: + print "http_get_networks_id error %d %s" % (result, content) + bottle.abort(-result, content) + elif result==0: + print "http_get_networks_id network '%s' not found" % network_id + bottle.abort(HTTP_Not_Found, 'network %s not found' % network_id) + else: + convert_boolean(content, ('shared', 'admin_state_up', 'enale_dhcp') ) + change_keys_http2db(content, http2db_network, reverse=True) + #get ports + result, ports = my.db.get_table(FROM='ports', SELECT=('uuid as port_id',), + WHERE={'net_id': network_id}, LIMIT=100) + if len(ports) > 0: + content[0]['ports'] = ports + delete_nulls(content[0]) + data={'network' : content[0]} + return format_out(data) + +@bottle.route(url_base + '/networks', method='POST') +def http_post_networks(): + '''insert a network into the database.''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #parse input data + http_content = format_in( network_new_schema ) + r = remove_extra_items(http_content, network_new_schema) + if r is not None: print "http_post_networks: Warning: remove extra items ", r + change_keys_http2db(http_content['network'], http2db_network) + network=http_content['network'] + #check valid tenant_id + tenant_id= network.get('tenant_id') + if tenant_id!=None: + result, _ = my.db.get_table(FROM='tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id,"enabled":True}) + if result<=0: + bottle.abort(HTTP_Not_Found, 'tenant %s not found or not enabled' % tenant_id) + return + bridge_net = None + #check valid params + net_provider = network.get('provider') + net_type = network.get('type') + net_vlan = network.get("vlan") + net_bind_net = network.get("bind_net") + net_bind_type= network.get("bind_type") + name = network["name"] + + #check if network name ends with : and network exist in order to make and automated bindning + vlan_index =name.rfind(":") + if net_bind_net==None and net_bind_type==None and vlan_index > 1: + try: + vlan_tag = int(name[vlan_index+1:]) + if vlan_tag >0 and vlan_tag < 4096: + net_bind_net = name[:vlan_index] + net_bind_type = "vlan:" + name[vlan_index+1:] + except: + pass + + if net_bind_net != None: + #look for a valid net + if check_valid_uuid(net_bind_net): + net_bind_key = "uuid" + else: + net_bind_key = "name" + result, content = my.db.get_table(FROM='nets', WHERE={net_bind_key: net_bind_net} ) + if result<0: + bottle.abort(HTTP_Internal_Server_Error, 'getting nets from db ' + content) + return + elif result==0: + bottle.abort(HTTP_Bad_Request, "bind_net %s '%s'not found" % (net_bind_key, net_bind_net) ) + return + elif result>1: + bottle.abort(HTTP_Bad_Request, "more than one bind_net %s '%s' found, use uuid" % (net_bind_key, net_bind_net) ) + return + network["bind_net"] = content[0]["uuid"] + if net_bind_type != None: + if net_bind_type[0:5] != "vlan:": + bottle.abort(HTTP_Bad_Request, "bad format for 'bind_type', must be 'vlan:'") + return + if int(net_bind_type[5:]) > 4095 or int(net_bind_type[5:])<=0 : + bottle.abort(HTTP_Bad_Request, "bad format for 'bind_type', must be 'vlan:' with a tag between 1 and 4095") + return + network["bind_type"] = net_bind_type + + if net_provider!=None: + if net_provider[:9]=="openflow:": + if net_type!=None: + if net_type!="ptp" and net_type!="data": + bottle.abort(HTTP_Bad_Request, "Only 'ptp' or 'data' net types can be bound to 'openflow'") + else: + net_type='data' + else: + if net_type!=None: + if net_type!="bridge_man" and net_type!="bridge_data": + bottle.abort(HTTP_Bad_Request, "Only 'bridge_man' or 'bridge_data' net types can be bound to 'bridge', 'macvtap' or 'default") + else: + net_type='bridge_man' + + if net_type==None: + net_type='bridge_man' + + if net_provider != None: + if net_provider[:7]=='bridge:': + #check it is one of the pre-provisioned bridges + bridge_net_name = net_provider[7:] + for brnet in config_dic['bridge_nets']: + if brnet[0]==bridge_net_name: # free + if brnet[3] != None: + bottle.abort(HTTP_Conflict, "invalid 'provider:physical', bridge '%s' is already used" % bridge_net_name) + return + bridge_net=brnet + net_vlan = brnet[1] + break +# if bridge_net==None: +# bottle.abort(HTTP_Bad_Request, "invalid 'provider:physical', bridge '%s' is not one of the provisioned 'bridge_ifaces' in the configuration file" % bridge_net_name) +# return + elif net_type=='bridge_data' or net_type=='bridge_man': + #look for a free precreated nets + for brnet in config_dic['bridge_nets']: + if brnet[3]==None: # free + if bridge_net != None: + if net_type=='bridge_man': #look for the smaller speed + if brnet[2] < bridge_net[2]: bridge_net = brnet + else: #look for the larger speed + if brnet[2] > bridge_net[2]: bridge_net = brnet + else: + bridge_net = brnet + net_vlan = brnet[1] + if bridge_net==None: + bottle.abort(HTTP_Bad_Request, "Max limits of bridge networks reached. Future versions of VIM will overcome this limit") + return + else: + print "using net", bridge_net + net_provider = "bridge:"+bridge_net[0] + net_vlan = bridge_net[1] + if net_vlan==None and (net_type=="data" or net_type=="ptp"): + net_vlan = my.db.get_free_net_vlan() + if net_vlan < 0: + bottle.abort(HTTP_Internal_Server_Error, "Error getting an available vlan") + return + + network['provider'] = net_provider + network['type'] = net_type + network['vlan'] = net_vlan + result, content = my.db.new_row('nets', network, True, True) + + if result >= 0: + if bridge_net!=None: + bridge_net[3] = content + + if config_dic.get("dhcp_server"): + if network["name"] in config_dic["dhcp_server"].get("nets", () ): + config_dic["dhcp_nets"].append(content) + print "dhcp_server: add new net", content + elif bridge_net != None and bridge_net[0] in config_dic["dhcp_server"].get("bridge_ifaces", () ): + config_dic["dhcp_nets"].append(content) + print "dhcp_server: add new net", content + return http_get_network_id(content) + else: + print "http_post_networks error %d %s" % (result, content) + bottle.abort(-result, content) + return + + +@bottle.route(url_base + '/networks/', method='PUT') +def http_put_network_id(network_id): + '''update a network_id into the database.''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #parse input data + http_content = format_in( network_update_schema ) + r = remove_extra_items(http_content, network_update_schema) + change_keys_http2db(http_content['network'], http2db_network) + network=http_content['network'] + + #Look for the previous data + where_ = {'uuid': network_id} + result, network_old = my.db.get_table(FROM='nets', WHERE=where_) + if result < 0: + print "http_put_network_id error %d %s" % (result, network_old) + bottle.abort(-result, network_old) + return + elif result==0: + print "http_put_network_id network '%s' not found" % network_id + bottle.abort(HTTP_Not_Found, 'network %s not found' % network_id) + return + #get ports + nbports, content = my.db.get_table(FROM='ports', SELECT=('uuid as port_id',), + WHERE={'net_id': network_id}, LIMIT=100) + if result < 0: + print "http_put_network_id error %d %s" % (result, network_old) + bottle.abort(-result, content) + return + if nbports>0: + if 'type' in network and network['type'] != network_old[0]['type']: + bottle.abort(HTTP_Method_Not_Allowed, "Can not change type of network while having ports attached") + if 'vlan' in network and network['vlan'] != network_old[0]['vlan']: + bottle.abort(HTTP_Method_Not_Allowed, "Can not change vlan of network while having ports attached") + + #check valid params + net_provider = network.get('provider', network_old[0]['provider']) + net_type = network.get('type', network_old[0]['type']) + net_bind_net = network.get("bind_net") + net_bind_type= network.get("bind_type") + if net_bind_net != None: + #look for a valid net + if check_valid_uuid(net_bind_net): + net_bind_key = "uuid" + else: + net_bind_key = "name" + result, content = my.db.get_table(FROM='nets', WHERE={net_bind_key: net_bind_net} ) + if result<0: + bottle.abort(HTTP_Internal_Server_Error, 'getting nets from db ' + content) + return + elif result==0: + bottle.abort(HTTP_Bad_Request, "bind_net %s '%s'not found" % (net_bind_key, net_bind_net) ) + return + elif result>1: + bottle.abort(HTTP_Bad_Request, "more than one bind_net %s '%s' found, use uuid" % (net_bind_key, net_bind_net) ) + return + network["bind_net"] = content[0]["uuid"] + if net_bind_type != None: + if net_bind_type[0:5] != "vlan:": + bottle.abort(HTTP_Bad_Request, "bad format for 'bind_type', must be 'vlan:'") + return + if int(net_bind_type[5:]) > 4095 or int(net_bind_type[5:])<=0 : + bottle.abort(HTTP_Bad_Request, "bad format for 'bind_type', must be 'vlan:' with a tag between 1 and 4095") + return + if net_provider!=None: + if net_provider[:9]=="openflow:": + if net_type!="ptp" and net_type!="data": + bottle.abort(HTTP_Bad_Request, "Only 'ptp' or 'data' net types can be bound to 'openflow'") + else: + if net_type!="bridge_man" and net_type!="bridge_data": + bottle.abort(HTTP_Bad_Request, "Only 'bridge_man' or 'bridge_data' net types can be bound to 'bridge', 'macvtap' or 'default") + + #insert in data base + result, content = my.db.update_rows('nets', network, WHERE={'uuid': network_id}, log=True ) + if result >= 0: + if result>0: # and nbports>0 and 'admin_state_up' in network and network['admin_state_up'] != network_old[0]['admin_state_up']: + r,c = config_dic['of_thread'].insert_task("update-net", network_id) + if r < 0: + print "http_put_network_id error while launching openflow rules" + bottle.abort(HTTP_Internal_Server_Error, c) + if config_dic.get("dhcp_server"): + if network_id in config_dic["dhcp_nets"]: + config_dic["dhcp_nets"].remove(network_id) + print "dhcp_server: delete net", network_id + if network.get("name", network_old["name"]) in config_dic["dhcp_server"].get("nets", () ): + config_dic["dhcp_nets"].append(network_id) + print "dhcp_server: add new net", network_id + else: + net_bind = network.get("bind", network_old["bind"] ) + if net_bind and net_bind[:7]=="bridge:" and net_bind[7:] in config_dic["dhcp_server"].get("bridge_ifaces", () ): + config_dic["dhcp_nets"].append(network_id) + print "dhcp_server: add new net", network_id + return http_get_network_id(network_id) + else: + bottle.abort(-result, content) + return + + +@bottle.route(url_base + '/networks/', method='DELETE') +def http_delete_network_id(network_id): + '''delete a network_id from the database.''' + my = config_dic['http_threads'][ threading.current_thread().name ] + + #delete from the data base + result, content = my.db.delete_row('nets', network_id ) + + if result == 0: + bottle.abort(HTTP_Not_Found, content) + elif result >0: + for brnet in config_dic['bridge_nets']: + if brnet[3]==network_id: + brnet[3]=None + break + if config_dic.get("dhcp_server") and network_id in config_dic["dhcp_nets"]: + config_dic["dhcp_nets"].remove(network_id) + print "dhcp_server: delete net", network_id + data={'result' : content} + return format_out(data) + else: + print "http_delete_network_id error",result, content + bottle.abort(-result, content) + return +# +# OPENFLOW +# +@bottle.route(url_base + '/networks//openflow', method='GET') +def http_get_openflow_id(network_id): + '''To obtain the list of openflow rules of a network + ''' + my = config_dic['http_threads'][ threading.current_thread().name ] + #ignore input data + if network_id=='all': + where_={} + else: + where_={"net_id": network_id} + result, content = my.db.get_table(SELECT=("name","net_id","priority","vlan_id","ingress_port","src_mac","dst_mac","actions"), + WHERE=where_, FROM='of_flows') + if result < 0: + bottle.abort(-result, content) + return + data={'openflow-rules' : content} + return format_out(data) + +@bottle.route(url_base + '/networks//openflow', method='PUT') +def http_put_openflow_id(network_id): + '''To make actions over the net. The action is to reinstall the openflow rules + network_id can be 'all' + ''' + my = config_dic['http_threads'][ threading.current_thread().name ] + if not my.admin: + bottle.abort(HTTP_Unauthorized, "Needed admin privileges") + return + #ignore input data + if network_id=='all': + where_={} + else: + where_={"uuid": network_id} + result, content = my.db.get_table(SELECT=("uuid","type"), WHERE=where_, FROM='nets') + if result < 0: + bottle.abort(-result, content) + return + + for net in content: + if net["type"]!="ptp" and net["type"]!="data": + result-=1 + continue + r,c = config_dic['of_thread'].insert_task("update-net", net['uuid']) + if r < 0: + print "http_put_openflow_id error while launching openflow rules" + bottle.abort(HTTP_Internal_Server_Error, c) + data={'result' : str(result)+" nets updates"} + return format_out(data) + +@bottle.route(url_base + '/networks/openflow/clear', method='DELETE') +@bottle.route(url_base + '/networks/clear/openflow', method='DELETE') +def http_clear_openflow_rules(): + '''To make actions over the net. The action is to delete ALL openflow rules + ''' + my = config_dic['http_threads'][ threading.current_thread().name ] + if not my.admin: + bottle.abort(HTTP_Unauthorized, "Needed admin privileges") + return + #ignore input data + r,c = config_dic['of_thread'].insert_task("clear-all") + if r < 0: + print "http_delete_openflow_id error while launching openflow rules" + bottle.abort(HTTP_Internal_Server_Error, c) + return + + data={'result' : " Clearing openflow rules in process"} + return format_out(data) + +@bottle.route(url_base + '/networks/openflow/ports', method='GET') +def http_get_openflow_ports(): + '''Obtain switch ports names of openflow controller + ''' + data={'ports' : config_dic['of_thread'].OF_connector.pp2ofi} + return format_out(data) + + +# +# PORTS +# + +@bottle.route(url_base + '/ports', method='GET') +def http_get_ports(): + #obtain data + my = config_dic['http_threads'][ threading.current_thread().name ] + select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_port, + ('id','name','tenant_id','network_id','vpci','mac_address','device_owner','device_id', + 'binding:switch_port','binding:vlan','bandwidth','status','admin_state_up','ip_address') ) + #result, content = my.db.get_ports(where_) + result, content = my.db.get_table(SELECT=select_, WHERE=where_, FROM='ports',LIMIT=limit_) + if result < 0: + print "http_get_ports Error", result, content + bottle.abort(-result, content) + return + else: + convert_boolean(content, ('admin_state_up',) ) + delete_nulls(content) + change_keys_http2db(content, http2db_port, reverse=True) + data={'ports' : content} + return format_out(data) + +@bottle.route(url_base + '/ports/', method='GET') +def http_get_port_id(port_id): + my = config_dic['http_threads'][ threading.current_thread().name ] + #obtain data + result, content = my.db.get_table(WHERE={'uuid': port_id}, FROM='ports') + if result < 0: + print "http_get_ports error", result, content + bottle.abort(-result, content) + elif result==0: + print "http_get_ports port '%s' not found" % str(port_id) + bottle.abort(HTTP_Not_Found, 'port %s not found' % port_id) + else: + convert_boolean(content, ('admin_state_up',) ) + delete_nulls(content) + change_keys_http2db(content, http2db_port, reverse=True) + data={'port' : content[0]} + return format_out(data) + + +@bottle.route(url_base + '/ports', method='POST') +def http_post_ports(): + '''insert an external port into the database.''' + my = config_dic['http_threads'][ threading.current_thread().name ] + if not my.admin: + bottle.abort(HTTP_Unauthorized, "Needed admin privileges") + #parse input data + http_content = format_in( port_new_schema ) + r = remove_extra_items(http_content, port_new_schema) + if r is not None: print "http_post_ports: Warning: remove extra items ", r + change_keys_http2db(http_content['port'], http2db_port) + port=http_content['port'] + + port['type'] = 'external' + if 'net_id' in port and port['net_id'] == None: + del port['net_id'] + + if 'net_id' in port: + #check that new net has the correct type + result, new_net = my.db.check_target_net(port['net_id'], None, 'external' ) + if result < 0: + bottle.abort(HTTP_Bad_Request, new_net) + return + #insert in data base + result, uuid = my.db.new_row('ports', port, True, True) + if result > 0: + if 'net_id' in port: + r,c = config_dic['of_thread'].insert_task("update-net", port['net_id']) + if r < 0: + print "http_post_ports error while launching openflow rules" + bottle.abort(HTTP_Internal_Server_Error, c) + return http_get_port_id(uuid) + else: + bottle.abort(-result, uuid) + return + +@bottle.route(url_base + '/ports/', method='PUT') +def http_put_port_id(port_id): + '''update a port_id into the database.''' + + my = config_dic['http_threads'][ threading.current_thread().name ] + #parse input data + http_content = format_in( port_update_schema ) + change_keys_http2db(http_content['port'], http2db_port) + port_dict=http_content['port'] + + #Look for the previous port data + where_ = {'uuid': port_id} + result, content = my.db.get_table(FROM="ports",WHERE=where_) + if result < 0: + print "http_put_port_id error", result, content + bottle.abort(-result, content) + return + elif result==0: + print "http_put_port_id port '%s' not found" % port_id + bottle.abort(HTTP_Not_Found, 'port %s not found' % port_id) + return + print port_dict + for k in ('vlan','switch_port','mac_address', 'tenant_id'): + if k in port_dict and not my.admin: + bottle.abort(HTTP_Unauthorized, "Needed admin privileges for changing " + k) + return + + port=content[0] + #change_keys_http2db(port, http2db_port, reverse=True) + nets = [] + host_id = None + result=1 + if 'net_id' in port_dict: + #change of net. + old_net = port.get('net_id', None) + new_net = port_dict['net_id'] + if old_net != new_net: + + if new_net is not None: nets.append(new_net) #put first the new net, so that new openflow rules are created before removing the old ones + if old_net is not None: nets.append(old_net) + if port['type'] == 'instance:bridge': + bottle.abort(HTTP_Forbidden, "bridge interfaces cannot be attached to a different net") + return + elif port['type'] == 'external': + if not my.admin: + bottle.abort(HTTP_Unauthorized, "Needed admin privileges") + return + else: + if new_net != None: + #check that new net has the correct type + result, new_net_dict = my.db.check_target_net(new_net, None, port['type'] ) + + #change VLAN for SR-IOV ports + if result>=0 and port["type"]=="instance:data" and port["model"]=="VF": #TODO consider also VFnotShared + if new_net == None: + port_dict["vlan"] = None + else: + port_dict["vlan"] = new_net_dict["vlan"] + #get host where this VM is allocated + result, content = my.db.get_table(FROM="instances",WHERE={"uuid":port["instance_id"]}) + if result<0: + print "http_put_port_id database error", content + elif result>0: + host_id = content[0]["host_id"] + + #insert in data base + if result >= 0: + result, content = my.db.update_rows('ports', port_dict, WHERE={'uuid': port_id}, log=False ) + + #Insert task to complete actions + if result > 0: + for net_id in nets: + r,v = config_dic['of_thread'].insert_task("update-net", net_id) + if r<0: print "Error ********* http_put_port_id update_of_flows: ", v + #TODO Do something if fails + if host_id != None: + config_dic['host_threads'][host_id].insert_task("edit-iface", port_id, old_net, new_net) + + if result >= 0: + return http_get_port_id(port_id) + else: + bottle.abort(HTTP_Bad_Request, content) + return + + +@bottle.route(url_base + '/ports/', method='DELETE') +def http_delete_port_id(port_id): + '''delete a port_id from the database.''' + my = config_dic['http_threads'][ threading.current_thread().name ] + if not my.admin: + bottle.abort(HTTP_Unauthorized, "Needed admin privileges") + return + + #Look for the previous port data + where_ = {'uuid': port_id, "type": "external"} + result, ports = my.db.get_table(WHERE=where_, FROM='ports',LIMIT=100) + + if result<=0: + print "http_delete_port_id port '%s' not found" % port_id + bottle.abort(HTTP_Not_Found, 'port %s not found or device_owner is not external' % port_id) + return + #delete from the data base + result, content = my.db.delete_row('ports', port_id ) + + if result == 0: + bottle.abort(HTTP_Not_Found, content) + elif result >0: + network = ports[0].get('net_id', None) + if network is not None: + #change of net. + r,c = config_dic['of_thread'].insert_task("update-net", network) + if r<0: print "!!!!!! http_delete_port_id update_of_flows error", r, c + data={'result' : content} + return format_out(data) + else: + print "http_delete_port_id error",result, content + bottle.abort(-result, content) + return + diff --git a/openflow b/openflow new file mode 100755 index 0000000..7ea59c4 --- /dev/null +++ b/openflow @@ -0,0 +1,388 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# PYTHON_ARGCOMPLETE_OK + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' +This program is useful to interact directly with Openflow Controllers +to clear rules, add and delete rules, list rules, etc. +''' + +__author__="Gerardo Garcia, Alfonso Tierno, Pablo Montes" +__date__ ="$09-oct-2014 09:09:48$" + +#import time +import os +import sys +import argparse +import argcomplete +import imp +import yaml +import requests +import logging +from openflow_thread import change_db2of, FlowBadFormat + +def of_switches(args): + r,c = ofconnector.get_of_switches() + if r<0: + print c + return r + else: + for s in c: + print " %s %s" % (s[0], s[1]) + return 0 + +def of_list(args): + r,c = ofconnector.get_of_rules(not args.no_translate) + if r<0: + print c + return r + if args.verbose > 0: + print yaml.safe_dump(c, indent=4, default_flow_style=False) + return 0 + + print " switch priority name ingress_port dst_mac vlan_id actions" + for name,rule in c.iteritems(): + action_list=[] + for action in rule["actions"]: + action_list.append(action[0]+"="+str(action[1])) + if "vlan_id" in rule: + vlan=str(rule["vlan_id"]) + else: + vlan="any" + print "%s %s %s %s %s %s %s" % \ + (rule["switch"], str(rule["priority"]).ljust(6), name.ljust(40), rule["ingress_port"].ljust(8), \ + rule.get("dst_mac","any").ljust(18), vlan.ljust(4), ",".join(action_list) ) + return 0 + +def of_clear(args): + if not args.force: + r = raw_input("Clear all Openflow rules (y/N)? ") + if not (len(r)>0 and r[0].lower()=="y"): + return 0 + r,c = ofconnector.clear_all_flows() + if r<0: + print c + return r + return 0 + +def of_port_list(args): + r,c = ofconnector.obtain_port_correspondence() + if r<0: + print c + return r + yaml.safe_dump({"ports": c}, sys.stdout, indent=2, default_flow_style=False) + +#def of_dump(args): +# args.verbose = 3 +# args.no_translate=False +# of_list(args) + return 0 + +def of_reinstall(args): + try: + URLrequest = "http://%s:%s/openvim/networks/all/openflow" %(vim_host, vim_admin_port) + print URLrequest + openvim_response = requests.put(URLrequest) + print openvim_response.text + return 0 + except requests.exceptions.RequestException as e: + print " Exception GET at '"+URLrequest+"' " + str(e) + return -1 + +def of_install(args): + line_number=1 + try: + f = open(args.file, "r") + text = f.read() + f.close() + lines=text.split("\n") + heads=lines[0].split() + + for line in lines[1:]: + line_number += 1 + rule={} + items= line.split() + if len(items)==0 or items[0][0]=="#": #empty line or commented + continue + for i in range(0,len(items)): + rule[ heads[i] ] = items[i] + if rule["vlan_id"] == "any": + del rule["vlan_id"] + if rule["dst_mac"] == "any": + del rule["dst_mac"] + if 'priority' in rule and (rule['priority']==None or rule['priority']=="None" ): + del rule['priority'] + try: + change_db2of(rule) + except FlowBadFormat as e: + print "Format error at line %d: %s" % (line_number, str(e)) + continue + r,c = ofconnector.new_flow(rule) + if r<0: + error="ERROR: "+c + else: + error="OK" + print "%s %s %s input=%s dst_mac=%s vlan_id=%s %s" % \ + (rule["switch"], str(rule.get("priority")).ljust(6), rule["name"].ljust(20), rule["ingress_port"].ljust(3), \ + rule.get("dst_mac","any").ljust(18), rule.get("vlan_id","any").ljust(4), error ) + return 0 + except IOError as e: + print " Error opening file '" + args.file + "': " + e.args[1] + return -1 + except yaml.YAMLError as exc: + error_pos = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1) + print " Error yaml/json format error at " + error_pos + return -1 + +def of_add(args): + if args.act==None and args.actions==None: + print "openflow add: error: one of the arguments --actions or [--setvlan,--stripvlan],--out is required" + return -1 + elif args.act!=None and args.actions!=None: + print "openflow add: error: Use either --actions option or [--setvlan,--stripvlan],--out options; but not both" + return -1 + + rule={"name":args.name, "priority":args.priority, + "ingress_port": args.inport + } + if args.matchvlan: + rule["vlan_id"] = args.matchvlan + if args.matchmac: + rule["dst_mac"] = args.matchmac + + if args.actions: + rule["actions"] = args.actions + try: + change_db2of(rule) + except FlowBadFormat as e: + print "Format error at --actions: '%s' Expected 'vlan=,out=,...'" % str(e) + return -1 + elif args.act: + rule["actions"]=[] + error_msj = "openflow add: error: --setvlan,--stripvlan options must be followed by an --out option" + previous_option_vlan=False # indicates if the previous option was a set or strip vlan to avoid consecutive ones and to force an out options afterwards + for action in args.act: + if action==None or type(action)==int: + if previous_option_vlan: #consecutive vlan options + print error_msj + return -1 + previous_option_vlan=True + rule["actions"].append( ("vlan", action) ) + else: + previous_option_vlan=False + rule["actions"].append( ("out", action) ) + if previous_option_vlan: + print error_msj + return -1 + #print rule + #return + + r,c = ofconnector.new_flow(rule) + if r<0: + print c + return -1 + return 0 + +def of_delete(args): + if not args.force: + r = raw_input("Clear rule %s (y/N)? " %(args.name)) + if not (len(r)>0 and r[0].lower()=="y"): + return 0 + r,c = ofconnector.del_flow(args.name) + if r<0: + print c + return -1 + return 0 + +def config(args): + print "OPENVIM_HOST: %s" %(vim_host) + print "OPENVIM_ADMIN_PORT: %s" %(vim_admin_port) + print "OF_CONTROLLER_TYPE: %s" %(of_controller_type) + if of_controller_module or (of_controller_type!="floodlight" and of_controller_type!="opendaylight"): + print "OF_CONTROLLER_MODULE: %s" %(of_controller_module) + print "OF_CONTROLLER_USER: %s" %(of_controller_user) + print "OF_CONTROLLER_PASSWORD: %s" %(of_controller_password) + #print "OF_CONTROLLER_VERSION: %s" %(of_controller_version) + print "OF_CONTROLLER_IP: %s" %(of_controller_ip) + print "OF_CONTROLLER_PORT: %s" %(of_controller_port) + print "OF_CONTROLLER_DPID: %s" %(of_controller_dpid) + return + +version="0.8" +global vim_host +global vim_admin_port +global of_controller_type +global of_controller_user +global of_controller_password +global of_controller_ip +global of_controller_port +global of_controller_dpid +global of_controller_module +global ofconnector + +if __name__=="__main__": + #print "test_ofconnector version", version, "Jul 2015" + #print "(c) Copyright Telefonica" + + vim_host = os.getenv('OPENVIM_HOST',"localhost") + vim_admin_port = os.getenv('OPENVIM_ADMIN_PORT',"8085") + of_controller_type = os.getenv('OF_CONTROLLER_TYPE',"floodlight") + of_controller_user = os.getenv('OF_CONTROLLER_USER',None) + of_controller_password = os.getenv('OF_CONTROLLER_PASSWORD',None) + #of_controller_version = os.getenv('OF_CONTROLLER_VERSION',"0.90") + of_controller_ip = os.getenv('OF_CONTROLLER_IP',"localhost") + of_controller_port = os.getenv('OF_CONTROLLER_PORT',"7070") + of_controller_dpid = os.getenv('OF_CONTROLLER_DPID','00:01:02:03:e4:05:e6:07') + of_controller_module = os.getenv('OF_CONTROLLER_MODULE',None) + + main_parser = argparse.ArgumentParser(description='User program to interact with Openflow controller') + main_parser.add_argument('--version', action='version', version='%(prog)s ' + version ) + + #main_parser = argparse.ArgumentParser() + subparsers = main_parser.add_subparsers(help='commands') + + config_parser = subparsers.add_parser('config', help="prints configuration values") + config_parser.set_defaults(func=config) + + add_parser = subparsers.add_parser('add', help="adds an openflow rule") + add_parser.add_argument('--verbose', '-v', action='count') + add_parser.add_argument("name", action="store", help="name of the rule") + add_parser.add_argument("--inport", required=True, action="store", type=str, help="match rule: ingress-port") + add_parser.add_argument("--actions", action="store", type=str, help="action with the format: vlan=,out=,...") + add_parser.add_argument("--priority", action="store", type=int, help="rule priority") + add_parser.add_argument("--matchmac", action="store", help="match rule: mac address") + add_parser.add_argument("--matchvlan", action="store", type=int, help="match rule: vlan id") + add_parser.add_argument("--stripvlan", action="append_const", dest="act", const=None, help="alternative to --actions. Use before --out to strip vlan") + add_parser.add_argument("--setvlan", action="append", dest="act", type=int, help="alternative to --actions. Use before --out to set vlan") + add_parser.add_argument("--out", action="append", dest="act", type=str, help="alternative to --actions. out= can be used several times") + add_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + add_parser.set_defaults(func=of_add) + + delete_parser = subparsers.add_parser('delete', help="delete an openflow rule") + delete_parser.add_argument('--verbose', '-v', action='count') + delete_parser.add_argument("-f", "--force", action="store_true", help="force deletion without asking") + delete_parser.add_argument("name", action="store", help="name of the rule to be deleted") + delete_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + delete_parser.set_defaults(func=of_delete) + + switches_parser = subparsers.add_parser('switches', help="list all switches controlled by the OFC") + switches_parser.add_argument('--verbose', '-v', action='count') + switches_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + switches_parser.set_defaults(func=of_switches) + + list_parser = subparsers.add_parser('list', help="list openflow rules") + list_parser.add_argument('--verbose', '-v', action='count') + list_parser.add_argument("--no-translate", "-n", action="store_true", help="Skip translation from openflow index to switch port name") + list_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + list_parser.set_defaults(func=of_list) + + #dump_parser = subparsers.add_parser('dump', help="dump openflow rules") + #dump_parser.set_defaults(func=of_dump) + + clear_parser = subparsers.add_parser('clear', help="clear all openflow rules") + clear_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking") + clear_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + clear_parser.set_defaults(func=of_clear) + + install_parser = subparsers.add_parser('install', help="install openflow rules from file") + install_parser.add_argument("file", action="store", help="file with rules generated using 'openflow list > rules.txt'") + install_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + install_parser.set_defaults(func=of_install) + + reinstall_parser = subparsers.add_parser('reinstall', help="reinstall openflow rules from VIM rules") + reinstall_parser.set_defaults(func=of_reinstall) + reinstall_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + + portlist_parser = subparsers.add_parser('port-list', help="list the physical to openflow port correspondence") + portlist_parser.set_defaults(func=of_port_list) + portlist_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + + argcomplete.autocomplete(main_parser) + + args = main_parser.parse_args() + module_info=None + try: + if args.func is not config: + params={ "of_ip": of_controller_ip, + "of_port": of_controller_port, + "of_dpid": of_controller_dpid, + "of_user": of_controller_user, + "of_password": of_controller_password, + } + if "debug" in args and args.debug: + streamformat = "%(asctime)s %(name)s %(levelname)s: %(message)s" + logging.basicConfig(format=streamformat, level= logging.DEBUG) + logger = logging.getLogger('vim') + logger.setLevel(logging.DEBUG) + params["of_debug"]="DEBUG" + else: + #logger = logging.getLogger('vim').addHandler(logging.NullHandler()) + #logger.setLevel(logging.CRITICAL) + params["of_debug"]="CRITICAL" + + if of_controller_type=='opendaylight': + module = "ODL" + elif of_controller_module != None: + module = of_controller_module + else: + module = of_controller_type + module_info = imp.find_module(module) + + of_conn = imp.load_module("of_conn", *module_info) + try: + ofconnector = of_conn.OF_conn(params) + except Exception as e: + print "Cannot open the Openflow controller '%s': %s" % (type(e).__name__, str(e)) + result = -1 + exit() + result = args.func(args) + if result == None: + result = 0 + + #for some reason it fails if call exit inside try instance. Need to call exit at the end !? + except (IOError, ImportError) as e: + print "Cannot open openflow controller module '%s'; %s: %s" % (module, type(e).__name__, str(e)) + result = -1 + #except Exception as e: + # print "Cannot open the Openflow controller '%s': %s" % (type(e).__name__, str(e)) + # result = -1 + except requests.exceptions.ConnectionError as e: + print "Cannot connect to server; %s: %s" % (type(e).__name__, str(e)) + result = -2 + except (KeyboardInterrupt): + print 'Exiting openVIM' + result = -3 + except (SystemExit): + result = -4 + + #close open file + if module_info and module_info[0]: + file.close(module_info[0]) + exit(result) + + + diff --git a/openflow_thread.py b/openflow_thread.py new file mode 100644 index 0000000..a002b89 --- /dev/null +++ b/openflow_thread.py @@ -0,0 +1,576 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' +This thread interacts with a openflow floodligth controller to create dataplane connections +''' + +__author__="Pablo Montes, Alfonso Tierno" +__date__ ="17-jul-2015" + + +#import json +import threading +import time +import Queue +import requests +import logging + +class FlowBadFormat(Exception): + '''raise when a bad format of flow is found''' + +def change_of2db(flow): + '''Change 'flow' dictionary from openflow format to database format + Basically the change consist of changing 'flow[actions] from a list of + double tuple to a string + from [(A,B),(C,D),..] to "A=B,C=D" ''' + action_str_list=[] + if type(flow)!=dict or "actions" not in flow: + raise FlowBadFormat("Bad input parameters, expect dictionary with 'actions' as key") + try: + for action in flow['actions']: + action_str_list.append( action[0] + "=" + str(action[1]) ) + flow['actions'] = ",".join(action_str_list) + except: + raise FlowBadFormat("Unexpected format at 'actions'") + +def change_db2of(flow): + '''Change 'flow' dictionary from database format to openflow format + Basically the change consist of changing 'flow[actions]' from a string to + a double tuple list + from "A=B,C=D,..." to [(A,B),(C,D),..] + raise FlowBadFormat ''' + actions=[] + if type(flow)!=dict or "actions" not in flow or type(flow["actions"])!=str: + raise FlowBadFormat("Bad input parameters, expect dictionary with 'actions' as key") + action_list = flow['actions'].split(",") + for action_item in action_list: + action_tuple = action_item.split("=") + if len(action_tuple) != 2: + raise FlowBadFormat("Expected key=value format at 'actions'") + if action_tuple[0].strip().lower()=="vlan": + if action_tuple[1].strip().lower() in ("none", "strip"): + actions.append( ("vlan",None) ) + else: + try: + actions.append( ("vlan", int(action_tuple[1])) ) + except: + raise FlowBadFormat("Expected integer after vlan= at 'actions'") + elif action_tuple[0].strip().lower()=="out": + actions.append( ("out", str(action_tuple[1])) ) + else: + raise FlowBadFormat("Unexpected '%s' at 'actions'"%action_tuple[0]) + flow['actions'] = actions + + + +class of_test_connector(): + '''This is a fake openflow connector for testing. + It does nothing and it is used for running openvim without an openflow controller + ''' + def __init__(self, params): + self.name = "ofc_test" + self.rules={} + self.logger = logging.getLogger('vim.OF.TEST') + self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR") ) ) + def get_of_switches(self): + return 0, () + def obtain_port_correspondence(self): + return 0, () + def del_flow(self, flow_name): + if flow_name in self.rules: + self.logger.debug("del_flow OK") + del self.rules[flow_name] + return 0, None + else: + self.logger.warning("del_flow not found") + return -1, "flow %s not found" + def new_flow(self, data): + self.rules[ data["name"] ] = data + self.logger.debug("new_flow OK") + return 0, None + def get_of_rules(self, translate_of_ports=True): + return 0, self.rules + + def clear_all_flows(self): + self.logger.debug("clear_all_flows OK") + self.rules={} + return 0, None + + + +class openflow_thread(threading.Thread): + def __init__(self, OF_connector, db, db_lock, of_test, pmp_with_same_vlan, debug='ERROR'): + threading.Thread.__init__(self) + + self.db = db + self.pmp_with_same_vlan = pmp_with_same_vlan + self.name = "openflow" + self.test = of_test + self.db_lock = db_lock + self.OF_connector = OF_connector + self.logger = logging.getLogger('vim.OF') + self.logger.setLevel( getattr(logging, debug) ) + + self.queueLock = threading.Lock() + self.taskQueue = Queue.Queue(2000) + + def insert_task(self, task, *aditional): + try: + self.queueLock.acquire() + task = self.taskQueue.put( (task,) + aditional, timeout=5) + self.queueLock.release() + return 1, None + except Queue.Full: + return -1, "timeout inserting a task over openflow thread " + self.name + + def run(self): + while True: + self.queueLock.acquire() + if not self.taskQueue.empty(): + task = self.taskQueue.get() + else: + task = None + self.queueLock.release() + + if task is None: + time.sleep(1) + continue + + if task[0] == 'update-net': + r,c = self.update_of_flows(task[1]) + #update database status + self.db_lock.acquire() + if r<0: + UPDATE={'status':'ERROR', 'last_error': str(c)} + self.logger.error("processing task 'update-net' %s: %s", str(task[1]), c) + else: + UPDATE={'status':'ACTIVE', 'last_error': None} + self.logger.debug("processing task 'update-net' %s: OK", str(task[1])) + self.db.update_rows('nets', UPDATE, WHERE={'uuid':task[1]}) + self.db_lock.release() + + elif task[0] == 'clear-all': + r,c = self.clear_all_flows() + if r<0: + self.logger.error("processing task 'clear-all': %s", c) + else: + self.logger.debug("processing task 'clear-all': OK") + elif task[0] == 'exit': + self.logger.debug("exit from openflow_thread") + self.terminate() + return 0 + else: + self.logger.error("unknown task %s", str(task)) + + def terminate(self): + pass + #print self.name, ": exit from openflow_thread" + + def update_of_flows(self, net_id): + ports=() + self.db_lock.acquire() + select_= ('type','admin_state_up', 'vlan', 'provider', 'bind_net','bind_type','uuid') + result, nets = self.db.get_table(FROM='nets', SELECT=select_, WHERE={'uuid':net_id} ) + #get all the networks binding to this + if result > 0: + if nets[0]['bind_net']: + bind_id = nets[0]['bind_net'] + else: + bind_id = net_id + #get our net and all bind_nets + result, nets = self.db.get_table(FROM='nets', SELECT=select_, + WHERE_OR={'bind_net':bind_id, 'uuid':bind_id} ) + + self.db_lock.release() + if result < 0: + return -1, "DB error getting net: " + nets + #elif result==0: + #net has been deleted + ifaces_nb = 0 + database_flows = [] + for net in nets: + net_id = net["uuid"] + if net['admin_state_up'] == 'false': + net['ports'] = () + else: + self.db_lock.acquire() + nb_ports, net_ports = self.db.get_table( + FROM='ports', + SELECT=('switch_port','vlan','uuid','mac','type','model'), + WHERE={'net_id':net_id, 'admin_state_up':'true', 'status':'ACTIVE'} ) + self.db_lock.release() + if nb_ports < 0: + #print self.name, ": update_of_flows() ERROR getting ports", ports + return -1, "DB error getting ports from net '%s': %s" % (net_id, net_ports) + + #add the binding as an external port + if net['provider'] and net['provider'][:9]=="openflow:": + external_port={"type":"external","mac":None} + external_port['uuid'] = net_id + ".1" #fake uuid + if net['provider'][-5:]==":vlan": + external_port["vlan"] = net["vlan"] + external_port["switch_port"] = net['provider'][9:-5] + else: + external_port["vlan"] = None + external_port["switch_port"] = net['provider'][9:] + net_ports = net_ports + (external_port,) + nb_ports += 1 + net['ports'] = net_ports + ifaces_nb += nb_ports + + # Get the name of flows that will be affected by this NET + self.db_lock.acquire() + result, database_net_flows = self.db.get_table(FROM='of_flows', WHERE={'net_id':net_id}) + self.db_lock.release() + if result < 0: + #print self.name, ": update_of_flows() ERROR getting flows from database", database_flows + return -1, "DB error getting flows from net '%s': %s" %(net_id, database_net_flows) + database_flows += database_net_flows + # Get the name of flows where net_id==NULL that means net deleted (At DB foreign key: On delete set null) + self.db_lock.acquire() + result, database_net_flows = self.db.get_table(FROM='of_flows', WHERE={'net_id':None}) + self.db_lock.release() + if result < 0: + #print self.name, ": update_of_flows() ERROR getting flows from database", database_flows + return -1, "DB error getting flows from net 'null': %s" %(database_net_flows) + database_flows += database_net_flows + + #Get the existing flows at openflow controller + result, of_flows = self.OF_connector.get_of_rules() + if result < 0: + #print self.name, ": update_of_flows() ERROR getting flows from controller", of_flows + return -1, "OF error getting flows: " + of_flows + + if ifaces_nb < 2: + pass + elif net['type'] == 'ptp': + if ifaces_nb > 2: + #print self.name, 'Error, network '+str(net_id)+' has been defined as ptp but it has '+\ + # str(ifaces_nb)+' interfaces.' + return -1, "'ptp' type network cannot connect %d interfaces, only 2" % ifaces_nb + elif net['type'] == 'data': + if ifaces_nb > 2 and self.pmp_with_same_vlan: + # check all ports are VLAN (tagged) or none + vlan_tag = None + for port in ports: + if port["type"]=="external": + if port["vlan"] != None: + if port["vlan"]!=net["vlan"]: + text="External port vlan-tag and net vlan-tag must be the same when flag 'of_controller_nets_with_same_vlan' is True" + #print self.name, "Error", text + return -1, text + if vlan_tag == None: + vlan_tag=True + elif vlan_tag==False: + text="Passthrough and external port vlan-tagged can not be connected when flag 'of_controller_nets_with_same_vlan' is True" + #print self.name, "Error", text + return -1, text + else: + if vlan_tag == None: + vlan_tag=False + elif vlan_tag == True: + text="SR-IOV and external port not vlan-tagged can not be connected when flag 'of_controller_nets_with_same_vlan' is True" + #print self.name, "Error", text + return -1, text + elif port["model"]=="PF" or port["model"]=="VFnotShared": + if vlan_tag == None: + vlan_tag=False + elif vlan_tag==True: + text="Passthrough and SR-IOV ports cannot be connected when flag 'of_controller_nets_with_same_vlan' is True" + #print self.name, "Error", text + return -1, text + elif port["model"] == "VF": + if vlan_tag == None: + vlan_tag=True + elif vlan_tag==False: + text="Passthrough and SR-IOV ports cannot be connected when flag 'of_controller_nets_with_same_vlan' is True" + #print self.name, "Error", text + return -1, text + else: + return -1, 'Only ptp and data networks are supported for openflow' + + # calculate new flows to be inserted + result, new_flows = self._compute_net_flows(nets) + if result < 0: + return result, new_flows + + #modify database flows format and get the used names + used_names=[] + for flow in database_flows: + try: + change_db2of(flow) + except FlowBadFormat as e: + self.logger.error("Exception FlowBadFormat: '%s', flow: '%s'",str(e), str(flow)) + continue + used_names.append(flow['name']) + name_index=0 + #insert at database the new flows, change actions to human text + for flow in new_flows: + #1 check if an equal flow is already present + index = self._check_flow_already_present(flow, database_flows) + if index>=0: + database_flows[index]["not delete"]=True + self.logger.debug("Skipping already present flow %s", str(flow)) + continue + #2 look for a non used name + flow_name=flow["net_id"]+"."+str(name_index) + while flow_name in used_names or flow_name in of_flows: + name_index += 1 + flow_name=flow["net_id"]+"."+str(name_index) + used_names.append(flow_name) + flow['name'] = flow_name + #3 insert at openflow + result, content = self.OF_connector.new_flow(flow) + if result < 0: + #print self.name, ": Error '%s' at flow insertion" % c, flow + return -1, content + #4 insert at database + try: + change_of2db(flow) + except FlowBadFormat as e: + #print self.name, ": Error Exception FlowBadFormat '%s'" % str(e), flow + return -1, str(e) + self.db_lock.acquire() + result, content = self.db.new_row('of_flows', flow) + self.db_lock.release() + if result < 0: + #print self.name, ": Error '%s' at database insertion" % content, flow + return -1, content + + #delete not needed old flows from openflow and from DDBB, + #check that the needed flows at DDBB are present in controller or insert them otherwise + for flow in database_flows: + if "not delete" in flow: + if flow["name"] not in of_flows: + #not in controller, insert it + result, content = self.OF_connector.new_flow(flow) + if result < 0: + #print self.name, ": Error '%s' at flow insertion" % c, flow + return -1, content + continue + #Delete flow + if flow["name"] in of_flows: + result, content = self.OF_connector.del_flow(flow['name']) + if result<0: + self.logger.error("cannot delete flow '%s' from OF: %s", flow['name'], content ) + continue #skip deletion from database + #delete from database + self.db_lock.acquire() + result, content = self.db.delete_row_by_key('of_flows', 'id', flow['id']) + self.db_lock.release() + if result<0: + self.logger.error("cannot delete flow '%s' from DB: %s", flow['name'], content ) + + return 0, 'Success' + + def clear_all_flows(self): + try: + if not self.test: + self.OF_connector.clear_all_flows() + #remove from database + self.db_lock.acquire() + self.db.delete_row_by_key('of_flows', None, None) #this will delete all lines + self.db_lock.release() + return 0, None + except requests.exceptions.RequestException as e: + #print self.name, ": clear_all_flows Exception:", str(e) + return -1, str(e) + + flow_fields=('priority', 'vlan', 'ingress_port', 'actions', 'dst_mac', 'src_mac', 'net_id') + def _check_flow_already_present(self, new_flow, flow_list): + '''check if the same flow is already present in the flow list + The flow is repeated if all the fields, apart from name, are equal + Return the index of matching flow, -1 if not match''' + index=0 + for flow in flow_list: + equal=True + for f in self.flow_fields: + if flow.get(f) != new_flow.get(f): + equal=False + break + if equal: + return index + index += 1 + return -1 + + def _compute_net_flows(self, nets): + new_flows=[] + new_broadcast_flows={} + nb_ports = 0 + + # Check switch_port information is right + self.logger.debug("_compute_net_flows nets: %s", str(nets)) + for net in nets: + for port in net['ports']: + nb_ports += 1 + if not self.test and str(port['switch_port']) not in self.OF_connector.pp2ofi: + error_text= "switch port name '%s' is not valid for the openflow controller" % str(port['switch_port']) + #print self.name, ": ERROR " + error_text + return -1, error_text + + for net_src in nets: + net_id = net_src["uuid"] + for net_dst in nets: + vlan_net_in = None + vlan_net_out = None + if net_src == net_dst: + #intra net rules + priority = 1000 + elif net_src['bind_net'] == net_dst['uuid']: + if net_src.get('bind_type') and net_src['bind_type'][0:5] == "vlan:": + vlan_net_out = int(net_src['bind_type'][5:]) + priority = 1100 + elif net_dst['bind_net'] == net_src['uuid']: + if net_dst.get('bind_type') and net_dst['bind_type'][0:5] == "vlan:": + vlan_net_in = int(net_dst['bind_type'][5:]) + priority = 1100 + else: + #nets not binding + continue + for src_port in net_src['ports']: + vlan_in = vlan_net_in + if vlan_in == None and src_port['vlan'] != None: + vlan_in = src_port['vlan'] + elif vlan_in != None and src_port['vlan'] != None: + #TODO this is something that we can not do. It requires a double VLAN check + #outer VLAN should be src_port['vlan'] and inner VLAN should be vlan_in + continue + + # BROADCAST: + broadcast_key = src_port['uuid'] + "." + str(vlan_in) + if broadcast_key in new_broadcast_flows: + flow_broadcast = new_broadcast_flows[broadcast_key] + else: + flow_broadcast = {'priority': priority, + 'net_id': net_id, + 'dst_mac': 'ff:ff:ff:ff:ff:ff', + "ingress_port": str(src_port['switch_port']), + 'actions': [] + } + new_broadcast_flows[broadcast_key] = flow_broadcast + if vlan_in is not None: + flow_broadcast['vlan_id'] = str(vlan_in) + + for dst_port in net_dst['ports']: + vlan_out = vlan_net_out + if vlan_out == None and dst_port['vlan'] != None: + vlan_out = dst_port['vlan'] + elif vlan_out != None and dst_port['vlan'] != None: + #TODO this is something that we can not do. It requires a double VLAN set + #outer VLAN should be dst_port['vlan'] and inner VLAN should be vlan_out + continue + #if src_port == dst_port: + # continue + if src_port['switch_port'] == dst_port['switch_port'] and vlan_in == vlan_out: + continue + flow = { + "priority": priority, + 'net_id': net_id, + "ingress_port": str(src_port['switch_port']), + 'actions': [] + } + if vlan_in is not None: + flow['vlan_id'] = str(vlan_in) + # allow that one port have no mac + if dst_port['mac'] is None or nb_ports==2: # point to point or nets with 2 elements + flow['priority'] = priority-5 # less priority + else: + flow['dst_mac'] = str(dst_port['mac']) + + if vlan_out == None: + if vlan_in != None: + flow['actions'].append( ('vlan',None) ) + else: + flow['actions'].append( ('vlan', vlan_out ) ) + flow['actions'].append( ('out', str(dst_port['switch_port'])) ) + + if self._check_flow_already_present(flow, new_flows) >= 0: + self.logger.debug("Skipping repeated flow '%s'", str(flow)) + continue + + new_flows.append(flow) + + # BROADCAST: + if nb_ports <= 2: # point to multipoint or nets with more than 2 elements + continue + out = (vlan_out, str(dst_port['switch_port'])) + if out not in flow_broadcast['actions']: + flow_broadcast['actions'].append( out ) + + #BROADCAST + for flow_broadcast in new_broadcast_flows.values(): + if len(flow_broadcast['actions'])==0: + continue #nothing to do, skip + flow_broadcast['actions'].sort() + if 'vlan_id' in flow_broadcast: + previous_vlan = 0 # indicates that a packet contains a vlan, and the vlan + else: + previous_vlan = None + final_actions=[] + action_number = 0 + for action in flow_broadcast['actions']: + if action[0] != previous_vlan: + final_actions.append( ('vlan', action[0]) ) + previous_vlan = action[0] + if self.pmp_with_same_vlan and action_number: + return -1, "Can not interconnect different vlan tags in a network when flag 'of_controller_nets_with_same_vlan' is True." + action_number += 1 + final_actions.append( ('out', action[1]) ) + flow_broadcast['actions'] = final_actions + + if self._check_flow_already_present(flow_broadcast, new_flows) >= 0: + self.logger.debug("Skipping repeated flow '%s'", str(flow_broadcast)) + continue + + new_flows.append(flow_broadcast) + + #UNIFY openflow rules with the same input port and vlan and the same output actions + #These flows differ at the dst_mac; and they are unified by not filtering by dst_mac + #this can happen if there is only two ports. It is converted to a point to point connection + flow_dict={} # use as key vlan_id+ingress_port and as value the list of flows matching these values + for flow in new_flows: + key = str(flow.get("vlan_id"))+":"+flow["ingress_port"] + if key in flow_dict: + flow_dict[key].append(flow) + else: + flow_dict[key]=[ flow ] + new_flows2=[] + for flow_list in flow_dict.values(): + convert2ptp=False + if len (flow_list)>=2: + convert2ptp=True + for f in flow_list: + if f['actions'] != flow_list[0]['actions']: + convert2ptp=False + break + if convert2ptp: # add only one unified rule without dst_mac + self.logger.debug("Convert flow rules to NON mac dst_address " + str(flow_list) ) + flow_list[0].pop('dst_mac') + flow_list[0]["priority"] -= 5 + new_flows2.append(flow_list[0]) + else: # add all the rules + new_flows2 += flow_list + return 0, new_flows2 + diff --git a/openvim b/openvim new file mode 100755 index 0000000..ae49e29 --- /dev/null +++ b/openvim @@ -0,0 +1,1047 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# PYTHON_ARGCOMPLETE_OK + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' +This is a client for managing the openvim server. +Useful for human CLI management of items at openvim +''' +__author__="Alfonso Tierno, Gerardo Garcia" +__date__ ="$26-nov-2014 19:09:29$" +__version__="0.4.0-r443" +version_date="Nov 2015" +name="openvim" + +from argcomplete.completers import FilesCompleter +import os +import argparse +import argcomplete +import requests +import json +import yaml +from jsonschema import validate as js_v, exceptions as js_e + +class ArgumentParserError(Exception): pass + +class ThrowingArgumentParser(argparse.ArgumentParser): + def error(self, message): + print "Error: %s" %message + print + self.print_usage() + #self.print_help() + print + print "Type 'openvim -h' for help" + raise ArgumentParserError + +global vim_config +config_items=("HOST", "PORT", "ADMIN_PORT", "TENANT") +show_at_verbose1=('status', 'created', 'path', 'last_error','description','hostId','progress', + 'ram','vcpus','type','shared','admin_state_up', 'enabled', 'ip_name') + +#template for creating new items +template={ + "port":{ + "${}":[ + "${name} provide a port name", + "${net_id} provide the network uuid (null):", + "${vlan} provide the vlan if any (null):", + "${port} provide the attached switch port (Te0/47)", + "${mac} provide the mac of external device if known (null):" + ], + "port":{ + "name": "${name}", + "network_id": "${net_id null}", + "type": "external", + "binding:vlan": "${vlan null-int}", + "binding:switch_port": "${port}", + "mac_address": "${mac null}" + }, + }, + "network":{ + "${}":[ + "${name} provide a network name", + "${type} provide a type: bridge_data,bridge_man,data,ptp (data)", + "${shared} external network: true,false (true)", + "${phy} conected to: bridge:name,macvtap:iface,default (null)" + ], + "network":{ + "name": "${name}", + "type": "${type}", + "provider:physical": "${phy null}", + "shared": "${shared bool}" + } + }, + "host": { + "${}":[ + "${name} host name", + "${user} host user (user)", + "${ip_name} host access IP or name (${name})", + "${description} host description (${name})" + ], + + "host":{ + "name": "${name}", + "user": "${user}", + "ip_name": "${ip_name}", + "description": "${description}" + } + }, + "flavor":{ + "${}":[ + "${name} flavor name", + "${description} flavor description (${name})", + "${processor_ranking} processor ranking (100)", + "${memory} memory in GB (2)", + "${threads} threads needed (2)" + ], + "flavor":{ + "name":"${name}", + "description":"${description}", + "extended":{ + "processor_ranking":"${processor_ranking int}", + "numas":[ + { + "memory":"${memory int}", + "threads":"${threads int}" + } + ] + } + } + }, + "tenant":{ + "${}":[ + "${name} tenant name", + "${description} tenant description (${name})" + ], + "tenant":{ + "name": "${name}", + "description": "${description}" + } + }, + "image":{ + "${}":[ + "${name} image name", + "${path} image path (/path/to/shared/compute/host/folder/image.qcow2)", + "${description} image description (${name})" + ], + "image":{ + "name":"${name}", + "description":"${description}", + "path":"${path}" + } + }, + "server":{ + "${}":[ + "${name} provide a name (VM)", + "${description} provide a description (${name})", + "${image_id} provide image_id uuid", + "${flavor_id} provide flavor_id uuid", + "${network0} provide a bridge network id; enter='default' (00000000-0000-0000-0000-000000000000)", + "${network1} provide a bridge network id; enter='virbrMan' (60f5227e-195f-11e4-836d-52540030594e)" + ], + "server":{ + "networks":[ + { + "name":"mgmt0", + "vpci": "0000:00:0a.0", + "uuid":"${network0}" + }, + { + "name":"ge0", + "vpci": "0000:00:0b.0", + "uuid":"${network1}" + } + ], + "name":"${name}", + "description":"${description}", + "imageRef": "${image_id}", + "flavorRef": "${flavor_id}" + } + } +} + +def check_configuration(*exception ): + ''' + Check that the configuration variables are present. + exception can contain variables that are not tested, normally because is not used for the command + ''' + #print exception + for item in config_items: + if item not in exception and vim_config[item]==None: + print "OPENVIM_"+item+" variable not defined. Try '" + name + " config file_cfg' or 'export OPENVIM_"+item+"=something'" + exit(-401); #HTTP_Unauthorized + +def check_valid_uuid(uuid): + ''' + Determines if the param uuid is a well formed uuid. Otherwise it is consider a name + ''' + id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"} + try: + js_v(uuid, id_schema) + return True + except js_e.ValidationError: + return False + +def _print_verbose(row_data, element, verbose=0): + #print row_data + data = row_data[element] + if verbose==1 or verbose==2: + data2= dict((k,v) for (k,v) in data.iteritems() if k in show_at_verbose1 and v!=None) + if 'image' in data and 'id' in data['image']: + data2['image'] = data['image']['id'] + if 'flavor' in data and 'id' in data['flavor']: + data2['flavor'] = data['flavor']['id'] + if verbose==2: + #TODO add more intems in a digest mode, extended, numas ... + pass + #if "numas" in data + #if "extended" in data + else: + data2= data + #print json.dumps(c2, indent=4) +# keys = data.keys() +# for key in keys: +# if data[key]==None: del data[key] +# elif key=='id' or key=='name' or key=='links': del data[key] + + #print json.dumps(data2, indent=4) + data2={element: data2} + print yaml.safe_dump(data2, indent=4, default_flow_style=False) + +def vim_read(url): + ''' + Send a GET http to VIM + ''' + headers_req = {'content-type': 'application/json'} + try: + vim_response = requests.get(url, headers = headers_req) + if vim_response.status_code == 200: + #print vim_response.json() + #print json.dumps(vim_response.json(), indent=4) + content = vim_response.json() + return 1, content + #print http_content + else: + #print " Error. VIM response '%s': not possible to GET %s, error %s" % (vim_response.status_code, url, vim_response.text) + return -vim_response.status_code, vim_response.text + except requests.exceptions.RequestException, e: + return -1, " Exception GET at '"+url+"' " + str(e.message) + +def vim_delete(url): + ''' + Send a DELETE http to VIM + ''' + headers_req = {'content-type': 'application/json'} + try: + vim_response = requests.delete(url, headers = headers_req) + if vim_response.status_code != 200: + #print " Error. VIM response '%s': not possible to DELETE %s, error %s" % (vim_response.status_code, url, vim_response.text) + return -vim_response.status_code, vim_response.text + except requests.exceptions.RequestException, e: + return -1, " Exception DELETE at '"+url+"' " + str(e.message) + return 1, vim_response.json() + +def vim_action(url, payload): + ''' + Send a POST http to VIM + ''' + headers_req = {'content-type': 'application/json'} + try: + vim_response = requests.post(url, data=json.dumps(payload), headers = headers_req) + if vim_response.status_code != 200: + #print " Error. VIM response '%s': not possible to POST %s, error %s" % (vim_response.status_code, url, vim_response.text) + return -vim_response.status_code, vim_response.text + except requests.exceptions.RequestException, e: + return -1, " Exception POST at '"+url+"' " + str(e.message) + return 1, vim_response.json() + +def vim_edit(url, payload): + headers_req = {'content-type': 'application/json'} + try: + vim_response = requests.put(url, data=json.dumps(payload), headers = headers_req) + if vim_response.status_code != 200: + #print " Error. VIM response '%s': not possible to PUT %s, error %s" % (vim_response.status_code, url, vim_response.text) + return -vim_response.status_code, vim_response.text + except requests.exceptions.RequestException, e: + return -1, " Exception PUT at '"+url+"' " + str(e.message) + return 1, vim_response.json() + +def vim_create(url, payload): + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + #print str(payload) + try: + vim_response = requests.post(url, data=json.dumps(payload), headers=headers_req) + if vim_response.status_code != 200: + #print " Error. VIM response '%s': not possible to POST %s, error %s" % (vim_response.status_code, url, vim_response.text) + return -vim_response.status_code, vim_response.text + except requests.exceptions.RequestException, e: + return -1, " Exception POST at '"+url+"' " + str(e.message) + return 1, vim_response.json() + +def parse_yaml_json(text, file_name=None): + parser_json=None + if file_name: + if file_name[-5:]=='.yaml' or file_name[-4:]=='.yml': + parser_json = False + elif file_name[-5:]=='.json': + parser_json = True + if parser_json==None: + parser_json=True if '\t' in text else False + + if parser_json: #try parse in json format, because yaml does not admit tabs + try: + data = json.loads(text) + return 0, data + except Exception as e: + return -1, "Error json format: " + str(e) + #try to parse in yaml format + try: + data = yaml.load(text) + return 0, data + except yaml.YAMLError as exc: + error_pos = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + error_pos = " at line:%s column:%s" % (mark.line+1, mark.column+1) + return -1, " Error yaml format error" + error_pos + +def change_string(text, var_list): + ''' + See change_var_recursively help + Used for changed any '${var format}' content inside the 'text' string + into the corresponding value at 'var_list'[var] + 'format' is optional, string by default. Contain a - separated formats,ej, int-null + ''' + end=0 + type_=None + while True: + ini = text.find("${", end) + if ini<0: return text + end = text.find("}", ini) + if end<0: return text + end+=1 + + var = text[ini:end] + if ' ' in var: + kk=var.split(" ") + var=kk[0]+"}" + type_=kk[-1][:-1] + var = var_list.get(var, None) + if var==None: return text + + text = text[:ini] + var + text[end:] + if type_ != None: + if 'null' in type_ and text=="null": + return None + if 'int' in type_ : #and text.isnumeric(): + return int(text) + if 'bool' in type_ : #and text.isnumeric(): + if text.lower()=="true": return True + elif text.lower()=="false": return False + else: + print "input boolean paramter must be 'true' or 'false'" + exit(1) + return text + +def chage_var_recursively(data, var_list): + ''' + Check recursively the content of 'data', and look for "*${*}*" variables and changes + It assumes that this variables are not in the key of dictionary, + The overall target is having json/yaml templates with this variables in the text. + The user is asked for a value that is replaced wherever appears + Attributes: + 'data': dictionary, or list. None or empty is considered valid + 'var_list': dictionary (name:change) pairs + Return: + None, data is modified + ''' + + if type(data) is dict: + for k in data.keys(): + if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list: + chage_var_recursively(data[k], var_list) + elif type(data[k]) is str: + data[k] = change_string(data[k], var_list) + if type(data) is list: + for k in range(0,len(data)): + if type(data[k]) is dict or type(data[k]) is list: + chage_var_recursively(data[k], var_list) + elif type(data[k]) is str: + data[k] = change_string(data[k], var_list) + +def change_var(data, default_values={}): + ''' Look for a text "${}" key at 'data' that indicates that this json contains + variables that must be ask for values to the user and changes in the text of + the dictionary + 'default_values' contain a dictionary of values that must be used and not be asked + Useful for creating templates + "${}" entry contain a list with the text: + ${var-name} prompt text to show user (Default value to allocate if user type CR) + ''' + if type(data) is not dict: + return -1, "Format error, not a object (dictionary)" + if "${}" not in data: + return 0, data + + var_list={} + for var in data["${}"]: + r = var.find("}",) + 1 + if r<=2 or var[:2] != '${': + return -1, "Format error at '${}':" + var + #change variables inside description text + if "${" in var[r:]: + var = var[:r] + change_string(var[r:], var_list) + d_start = var.rfind("(",) + 1 + d_end = var.rfind(")",) + if d_start>0 and d_end>=d_start: + default = var[d_start:d_end] + else: default=None + if var[2:r-1] in default_values: + var_list[ var[:r] ] = default_values[ var[2:r-1] ] + continue + v = raw_input(var[r:] + "? ") + if v=="": + if default != None: + v = default + else: + v = raw_input(" empty string? try again: ") + var_list[ var[:r] ] = str(v) + del data["${}"] + chage_var_recursively(data, var_list) + return 0, data + +def load_file(file_, parse=False): + try: + f = open(file_, 'r') + read_data = f.read() + f.close() + if not parse: + return 0, read_data + except IOError, e: + return -1, " Error opening file '" + file_ + "': " + e.args[1] + return parse_yaml_json(read_data, file_) + +def load_file_or_yaml(content): + ''' + 'content' can be or a yaml/json file or a text containing a yaml/json text format + This function autodetect, trying to load and parse the filename 'content', + if fails trying to parse the 'content' as text + Returns the dictionary once parsed, or print an error and finish the program + ''' + r,payload = load_file(content, parse=True) + if r<0: + if r==-1 and "{" in content or ":" in content: + #try to parse directly + r,payload = parse_yaml_json(content) + if r<0: + print payload + exit (-1) + else: + print payload + exit (-1) + return payload + +def config(args): + #print "config-list",args + if args.file != None: + try: + f = open(args.file, 'r') + read_data = f.read() + f.close() + except IOError, e: + print " Error opening file '" + args.file + "': " + e.args[1] + return -1 + try: + data = yaml.load(read_data) + #print data + if "http_host" in data: + print " export OPENVIM_HOST="+ data["http_host"] + if "http_port" in data: + print " export OPENVIM_PORT="+ str(data["http_port"]) + #vim_config[item] = data["OPENVIM_"+item] #TODO find the way to change envioronment + #os.setenv('OPENVIM_'+item, vim_config[item]) + if "http_admin_port" in data: + print " export OPENVIM_ADMIN_PORT="+ str(data["http_admin_port"]) + if "tenant_id" in data: + print " export OPENVIM_TENANT="+ data["tenant_id"] + return 0 + except yaml.YAMLError, exc: + error_pos = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1) + print " Error yaml/json format error at '"+ args.file +"'"+error_pos + return -1 + print args.file + print "OPENVIM_HOST: %s" %vim_config["HOST"] + print "OPENVIM_PORT: %s" %vim_config["PORT"] + print "OPENVIM_ADMIN_PORT: %s" %vim_config["ADMIN_PORT"] + print "OPENVIM_TENANT: %s" %vim_config["TENANT"] + return 0 + +def element_new(args): + #print args + tenant="" + if args.element in ('flavors','images','servers'): + check_configuration( "ADMIN_PORT" ) + tenant="/"+vim_config["TENANT"] + else: + check_configuration("ADMIN_PORT", "TENANT") + tenant="" + + default_values={} + if args.name != None: + default_values["name"] = args.name + if "description" in args and args.description != None: + default_values["description"] = args.description + if "path" in args and args.path != None: + default_values["path"] = args.path + if args.file==None: + payload= template[args.element[:-1] ] + payload = yaml.load(str(payload)) #with this trick we make a completely copy of the data, so to not modified the original one + else: + payload=load_file_or_yaml(args.file) + r,c= change_var(payload, default_values) + if r<0: + print "Template error", c + return -1 + payload=c + #print payload + if args.element[:-1] not in payload: + payload = {args.element[:-1]: payload } + item = payload[args.element[:-1]] + + url = "http://%s:%s/openvim%s/%s" %(vim_config["HOST"], vim_config["PORT"], tenant, args.element) + if "ADMIN_PORT" in vim_config and vim_config["ADMIN_PORT"]!=None: + url_admin = "http://%s:%s/openvim%s/%s" %(vim_config["HOST"], vim_config["ADMIN_PORT"], tenant, args.element) + else: + url_admin = None + + if args.name != None: + item["name"] = args.name + if "description" in args and args.description != None: + item["description"] = args.description + if "path" in args and args.path != None: + item["path"] = args.path + + + r,c = vim_create(url, payload) + if r==-401 and url_admin!=None and url != url_admin: #Unauthorized, try with admin privileges + url=url_admin + r,c = vim_create(url, payload) + if r<0: + print c + return -r + else: + #print c + item=c[ args.element[:-1] ] + uuid=item.get('id', None) + if uuid is None: + uuid=item.get('uuid', 'uuid-not-found?') + name = item.get('name', '') + print " " + uuid +" "+ name.ljust(20) + " Created" + for e in ('warning', 'error', 'last_error'): + if e in item: print e + "\n" + item[e] + if args.verbose!=None: + _print_verbose(c, args.element[:-1], args.verbose) + return 0 + +def element_action(args): + filter_qs = "" + tenant="" + if args.element in ('flavors','images','servers'): + check_configuration( "ADMIN_PORT") + tenant="/"+vim_config["TENANT"] + else: + check_configuration("ADMIN_PORT", "TENANT") + tenant="" + url = "http://%s:%s/openvim%s/%s" %(vim_config["HOST"], vim_config["PORT"], tenant, args.element) + if "ADMIN_PORT" in vim_config and vim_config["ADMIN_PORT"]!=None: + url_admin = "http://%s:%s/openvim%s/%s" %(vim_config["HOST"], vim_config["ADMIN_PORT"], tenant, args.element) + else: + url_admin = None + + if args.filter: + filter_qs += "?" + args.filter + if args.name!=None: + if check_valid_uuid(args.name): + if len(filter_qs) > 0: filter_qs += "&" + "id=" + str(args.name) + else: filter_qs += "?" + "id=" + str(args.name) + else: + if len(filter_qs) > 0: filter_qs += "&" + "name=" + str(args.name) + else: filter_qs += "?" + "name=" + str(args.name) + + + r,c = vim_read(url + filter_qs) + if r==-401 and url_admin!=None and url != url_admin: #Unauthorized, try with admin privileges + r,c = vim_read(url_admin + filter_qs) + if r<0: + print "Error:", c + return -r + if args.action=='createImage': + payload={ args.action: {"name":args.imageName} } + if args.description != None: + payload[args.action]["description"]=args.description + if args.path != None: + payload[args.action]["path"]=args.path + else: + payload={ args.action: None} + #print json.dumps(c, indent=4) + item_list = c[ args.element ] + if len(item_list)==0 and args.name != None: + print " Not found " + args.element + " " + args.name + return 404 #HTTP_Not_Found + result = 0 + for item in item_list: + uuid=item.get('id', None) + if uuid is None: + uuid=item.get('uuid', None) + if uuid is None: + print "Id not found" + continue + name = item.get('name', '') + if not args.force: + r = raw_input(" Action over " + args.element + " " + uuid + " " + name + " (y/N)? ") + if len(r)>0 and r[0].lower()=="y": + pass + else: + continue + r,c = vim_action(url + "/" + uuid + "/action", payload) + if r==-401 and url_admin!=None and url != url_admin: #Unauthorized, try with admin privileges + url=url_admin + r,c = vim_action(url + "/" + uuid + "/action", payload) + if r<0: + print " " + uuid +" "+ name.ljust(20) + " " + c + result = -r + else: + if args.action == "createImage": #response contain an {image: {...} }, not a {server: {...} }. + print " " + c["image"]["id"] +" "+ c["image"]["name"].ljust(20) + args.element="images" + else: + print " " + uuid +" "+ name.ljust(20) + " "+ args.action + if "verbose" in args and args.verbose!=None: + _print_verbose(c, args.element[:-1], args.verbose) + return result + +def element_edit(args): + filter_qs = "" + tenant="" + if args.element in ('flavors','images','servers'): + check_configuration( "ADMIN_PORT") + tenant="/"+vim_config["TENANT"] + else: + check_configuration("ADMIN_PORT", "TENANT") + tenant="" + + url = "http://%s:%s/openvim%s/%s" %(vim_config["HOST"], vim_config["PORT"], tenant, args.element) + if "ADMIN_PORT" in vim_config and vim_config["ADMIN_PORT"]!=None: + url_admin = "http://%s:%s/openvim%s/%s" %(vim_config["HOST"], vim_config["ADMIN_PORT"], tenant, args.element) + else: + url_admin = None + + if args.filter: + filter_qs += "?" + args.filter + if args.name!=None: + if check_valid_uuid(args.name): + if len(filter_qs) > 0: filter_qs += "&" + "id=" + str(args.name) + else: filter_qs += "?" + "id=" + str(args.name) + else: + if len(filter_qs) > 0: filter_qs += "&" + "name=" + str(args.name) + else: filter_qs += "?" + "name=" + str(args.name) + + + r,c = vim_read(url + filter_qs) + if r==-401 and url_admin!=None and url != url_admin: #Unauthorized, try with admin privileges + r,c = vim_read(url_admin + filter_qs) + if r<0: + print "Error:", c + return -r + + payload=load_file_or_yaml(args.file) + r2,c2= change_var(payload) + if r2<0: + print "Template error", c2 + return -1 + payload=c2 + if args.element[:-1] not in payload: + payload = {args.element[:-1]: payload } + + #print json.dumps(c, indent=4) + item_list = c[ args.element ] + if len(item_list)==0 and args.name != None: + print " Not found " + args.element + " " + args.name + return 404 #HTTP_Not_Found + result = 0 + for item in item_list: + uuid=item.get('id', None) + if uuid is None: + uuid=item.get('uuid', None) + if uuid is None: + print "Id not found" + continue + name = item.get('name', '') + if not args.force or (args.name==None and args.filer==None): + r = raw_input(" Edit " + args.element + " " + uuid + " " + name + " (y/N)? ") + if len(r)>0 and r[0].lower()=="y": + pass + else: + continue + r,c = vim_edit(url + "/" + uuid, payload) + if r==-401 and url_admin!=None and url != url_admin: #Unauthorized, try with admin privileges + url=url_admin + r,c = vim_edit(url + "/" + uuid, payload) + if r<0: + print " " + uuid +" "+ name.ljust(20) + " " + c + result = -r + else: + print " " + uuid +" "+ name.ljust(20) + " edited" + if "verbose" in args and args.verbose!=None: + _print_verbose(c, args.element[:-1], args.verbose) + return result + +def element_action_edit(args): + #print args + if args.element=='ports': + if args.action=='attach': + args.file='network_id: ' + args.network_id + else: #args.action=='detach' + args.file='network_id: null' + if args.action=='up': + args.file='admin_state_up: true' + if args.action=='down': + args.file='admin_state_up: false' + return element_edit(args) + +def element_delete(args): + filter_qs = "" + tenant="" + if args.element in ('flavors','images','servers'): + check_configuration("ADMIN_PORT" ) + tenant="/"+vim_config["TENANT"] + else: + check_configuration("ADMIN_PORT", "TENANT") + tenant="" + + url = "http://%s:%s/openvim%s/%s" %(vim_config["HOST"], vim_config["PORT"], tenant, args.element) + if "ADMIN_PORT" in vim_config and vim_config["ADMIN_PORT"]!=None: + url_admin = "http://%s:%s/openvim%s/%s" %(vim_config["HOST"], vim_config["ADMIN_PORT"], tenant, args.element) + else: + url_admin = None + + if args.filter: + filter_qs += "?" + args.filter + if args.name!=None: + if check_valid_uuid(args.name): + if len(filter_qs) > 0: filter_qs += "&" + "id=" + str(args.name) + else: filter_qs += "?" + "id=" + str(args.name) + else: + if len(filter_qs) > 0: filter_qs += "&" + "name=" + str(args.name) + else: filter_qs += "?" + "name=" + str(args.name) + + + r,c = vim_read(url + filter_qs) + if r==-401 and url_admin!=None and url != url_admin: #Unauthorized, try with admin privileges + r,c = vim_read(url_admin + filter_qs) + if r<0: + print "Error:", c + return -r + + #print json.dumps(c, indent=4) + item_list = c[ args.element ] + if len(item_list)==0 and args.name != None: + print " Not found " + args.element + " " + args.name + return 404 #HTTP_Not_Found + result = 0 + for item in item_list: + uuid=item.get('id', None) + if uuid is None: + uuid=item.get('uuid', None) + if uuid is None: + print "Id not found" + result = -500 + continue + name = item.get('name', '') + if not args.force: + r = raw_input(" Delete " + args.element + " " + uuid + " " + name + " (y/N)? ") + if len(r)>0 and r[0].lower()=="y": + pass + else: + continue + r,c = vim_delete(url + "/" + uuid) + if r==-401 and url_admin!=None and url != url_admin: #Unauthorized, try with admin privileges + url=url_admin + r,c = vim_delete(url + "/" + uuid) + if r<0: + print " " + uuid +" "+ name.ljust(20) + " " + c + result = -r + else: + print " " + uuid +" "+ name.ljust(20) + " deleted" + return result + +def element_list(args): + #print "element_list", args + filter_qs = "" + tenant="" + if args.element in ('flavors','images','servers'): + check_configuration( "ADMIN_PORT" ) + tenant="/"+vim_config["TENANT"] + else: + check_configuration( "ADMIN_PORT", "TENANT" ) + tenant="" + #if args.name: + # what += "/" + args.name + url = "http://%s:%s/openvim%s/%s" %(vim_config["HOST"], vim_config["PORT"], tenant, args.element) + if "ADMIN_PORT" in vim_config and vim_config["ADMIN_PORT"]!=None: + url_admin = "http://%s:%s/openvim%s/%s" %(vim_config["HOST"], vim_config["ADMIN_PORT"], tenant, args.element) + else: + url_admin = None + #print " get", what, " >>>>>>>> ", + if args.filter: + filter_qs += "?" + args.filter + if args.name!=None: + if check_valid_uuid(args.name): + if len(filter_qs) > 0: filter_qs += "&" + "id=" + str(args.name) + else: filter_qs += "?" + "id=" + str(args.name) + else: + if len(filter_qs) > 0: filter_qs += "&" + "name=" + str(args.name) + else: filter_qs += "?" + "name=" + str(args.name) + + + r,c = vim_read(url + filter_qs) + if r==-401 and url_admin!=None and url != url_admin: #Unauthorized, try with admin privileges + r,c = vim_read(url_admin + filter_qs) + if r<0: + print "Error:", c + return -r + #print json.dumps(c, indent=4) + result = 0 + item_list = c[ args.element ] + verbose=0 + #if args.name!=None and len(item_list)==1: + # verbose+=1 + if args.verbose!=None: + verbose += args.verbose + for item in item_list: + extra="" + if args.element=="servers" or args.element=="networks": extra = " "+item['status'] + if args.element in ("hosts","networks","ports") and not item['admin_state_up']: extra += " admin_state_up=false" + print item['id']+" "+item['name'].ljust(20) + extra + if verbose>0: + r2,c2 = vim_read(url + "/"+ item['id']) + if r2==-401 and url_admin!=None and url != url_admin: #Unauthorized, try with admin privileges + url=url_admin + r2,c2 = vim_read(url + "/"+ item['id']) + if r2>0: + _print_verbose(c2, args.element[:-1], verbose) + + return result + +def openflow_action(args): + if args.action=='port-list': + url = "http://%s:%s/openvim/networks/openflow/ports" %(vim_config["HOST"], vim_config["PORT"]) + r,c = vim_read(url) + elif args.action=='rules-list' or args.action=='reinstall': + PORT = vim_config["PORT"] + if args.action=='reinstall': + if "ADMIN_PORT" not in vim_config: + print "OPENVIM_ADMIN_PORT variable not defined" + return 404 #HTTP_Not_Found + PORT = vim_config["ADMIN_PORT"] + if args.name!=None: + url = "http://%s:%s/openvim/networks" %(vim_config["HOST"], PORT) + if check_valid_uuid(args.name): + url += "?id=" + str(args.name) + else: + url += "?name=" + str(args.name) + r,c = vim_read(url) + if r<0: + print "Error:", c + return -r + if len (c["networks"]) == 0: + print " Network not found" + return 404 #HTTP_Not_Found + if len (c["networks"]) > 1: + print " More than one net with this name found. Use uuid instead of name to concretize" + return 404 #HTTP_Not_Found + network = c["networks"][0]["id"] + else: + network="all" + url = "http://%s:%s/openvim/networks/%s/openflow" %(vim_config["HOST"], PORT, network) + if args.action=='reinstall': + r,c = vim_edit(url, None) + else: + r,c = vim_read(url) + elif args.action=='clear-all': + if "ADMIN_PORT" not in vim_config: + print "OPENVIM_ADMIN_PORT variable not defined" + return 401 # HTTP_Unauthorized + url = "http://%s:%s/openvim/networks/openflow/clear" %(vim_config["HOST"], vim_config["ADMIN_PORT"]) + r,c = vim_delete(url) + else: + return 400 #HTTP_Bad_Request + if r<0: + print "Error:", c + return -r + else: + print yaml.safe_dump(c, indent=4, default_flow_style=False) + return 0 + +if __name__=="__main__": + + item_dict={'vm':'servers','host':'hosts','tenant':'tenants','image':'images','flavor':'flavors','net':'networks','port':'ports'} + + vim_config={} + vim_config["HOST"] = os.getenv('OPENVIM_HOST', 'localhost') + vim_config["PORT"] = os.getenv('OPENVIM_PORT', '9080') + vim_config["ADMIN_PORT"] = os.getenv('OPENVIM_ADMIN_PORT', '9085') + vim_config["TENANT"] = os.getenv('OPENVIM_TENANT', None) + + + main_parser = ThrowingArgumentParser(description='User program to interact with OPENVIM-SERVER (openvimd)') + #main_parser = argparse.ArgumentParser(description='User program to interact with OPENVIM-SERVER (openvimd)') + main_parser.add_argument('--version', action='version', version='%(prog)s ' + __version__ + ' '+version_date) + + subparsers = main_parser.add_subparsers(help='commands') + + config_parser = subparsers.add_parser('config', help="prints configuration values") + config_parser.add_argument("file", nargs='?', help="configuration file to extract the configuration").completer = FilesCompleter + config_parser.set_defaults(func=config) + #HELP + for item in item_dict: + #LIST + element_list_parser = subparsers.add_parser(item+'-list', help="lists information about "+item_dict[item]) + element_list_parser.add_argument("name", nargs='?', help="name or ID of the " + item) + element_list_parser.add_argument("-F","--filter", action="store", help="filter query string") + element_list_parser.add_argument('--verbose', '-v', action='count', help="increment the verbosity") + element_list_parser.set_defaults(func=element_list, element=item_dict[item]) + #NEW + if item=='host': + element_new_parser = subparsers.add_parser(item+'-add', help="adds a new compute node") + else: + element_new_parser = subparsers.add_parser(item+'-create', help="creates a new "+item_dict[item][:-1]) + element_new_parser.add_argument("file", nargs='?', help="json/yaml text or file with content").completer = FilesCompleter + element_new_parser.add_argument("--name", action="store", help="Use this name") + if item!="network": + element_new_parser.add_argument("--description", action="store", help="Use this descrition") + if item=="image": + element_new_parser.add_argument("--path", action="store", help="Use this path") + element_new_parser.add_argument('--verbose', '-v', action='count', help="increment the verbosity") + element_new_parser.set_defaults(func=element_new, element=item_dict[item]) + #DELETE + if item=='host': + element_del_parser = subparsers.add_parser(item+'-remove', help="removes a compute node") + else: + element_del_parser = subparsers.add_parser(item+'-delete', help="deletes one or several "+item_dict[item]) + element_del_parser.add_argument("name", nargs='?', help="name or ID of the "+item+", if missing means all") + element_del_parser.add_argument("-F","--filter", action="store", help="filter query string") + element_del_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation") + element_del_parser.set_defaults(func=element_delete, element=item_dict[item]) + #EDIT + element_edit_parser = subparsers.add_parser(item+'-edit', help="edits one or several "+item_dict[item]) + element_edit_parser.add_argument("name", help="name or ID of the "+item+"") + element_edit_parser.add_argument("file", help="json/yaml text or file with the changes").completer = FilesCompleter + element_edit_parser.add_argument("-F","--filter", action="store", help="filter query string") + element_edit_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation") + element_edit_parser.add_argument('--verbose', '-v', action='count', help="increment the verbosity") + element_edit_parser.set_defaults(func=element_edit, element=item_dict[item]) + #ACTION + if item=='vm': + for item2 in ('shutdown', 'start', 'rebuild', 'reboot'): + vm_action_parser = subparsers.add_parser("vm-"+item2, help="performs this action over the virtual machine") + vm_action_parser.add_argument("name", nargs='?', help="name or ID of the server, if missing means all") + vm_action_parser.add_argument("-F","--filter", action="store", help="filter query string") + vm_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation") + vm_action_parser.set_defaults(func=element_action, element="servers", action=item2 ) + vm_action_image_parser = subparsers.add_parser("vm-createImage", help="creates a snapshot of the virtual machine disk into a new image") + vm_action_image_parser.add_argument("name", help="name or ID of the server") + vm_action_image_parser.add_argument("imageName", help="image name") + vm_action_image_parser.add_argument("--description", action="store", help="Provide a new image description") + vm_action_image_parser.add_argument("--path", action="store", help="Provide a new image complete path") + vm_action_image_parser.add_argument("-F","--filter", action="store", help="filter query string") + vm_action_image_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation") + vm_action_image_parser.add_argument('--verbose', '-v', action='count', help="increment the verbosity") + vm_action_image_parser.set_defaults(func=element_action, element="servers", action="createImage" ) + #ACTION that are implemented with EDITION + if item=='port': + port_action_attach_parser = subparsers.add_parser("port-attach", help="connects a port to a network") + port_action_attach_parser.add_argument("name", help="name or ID of the port") + port_action_attach_parser.add_argument("network_id", help="ID of the network") + port_action_attach_parser.add_argument("-F","--filter", action="store", help="filter query string") + port_action_attach_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation") + port_action_attach_parser.set_defaults(func=element_action_edit, element="ports", action="attach") + + port_action_detach_parser = subparsers.add_parser("port-detach", help="removes a port from a network") + port_action_detach_parser.add_argument("name", help="name or ID of the port") + port_action_detach_parser.add_argument("-F","--filter", action="store", help="filter query string") + port_action_detach_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation") + port_action_detach_parser.set_defaults(func=element_action_edit, element="ports", action="dettach") + + if item=='net' or item=='host': + nethost_action_up_parser = subparsers.add_parser(item+"-up", help="puts admin_state_up of "+item_dict[item][:-1]+" to true") + nethost_action_up_parser.add_argument("name", help="name or ID of the "+item_dict[item][:-1]) + nethost_action_up_parser.add_argument("-F","--filter", action="store", help="filter query string") + nethost_action_up_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation") + nethost_action_up_parser.set_defaults(func=element_action_edit, element=item_dict[item], action="up") + + nethost_action_down_parser = subparsers.add_parser(item+"-down", help="puts admin_state_up of "+item_dict[item][:-1]+" to false") + nethost_action_down_parser.add_argument("name", help="name or ID of the "+item_dict[item][:-1]) + nethost_action_down_parser.add_argument("-F","--filter", action="store", help="filter query string") + nethost_action_down_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation") + nethost_action_down_parser.set_defaults(func=element_action_edit, element=item_dict[item], action="down") + + #openflow rules + openflow_list_action = subparsers.add_parser("openflow-port-list", help="list openflow switch ports name") + openflow_list_action.set_defaults(func=openflow_action, action="port-list") + + openflow_list_action = subparsers.add_parser("openflow-clear-all", help="removes all openflow rules") + openflow_list_action.set_defaults(func=openflow_action, action="clear-all") + + openflow_list_action = subparsers.add_parser("openflow-net-reinstall", help="reinstall the openflow rules for a network") + openflow_list_action.add_argument("name", nargs='?', help="network name, if missing all networks") + openflow_list_action.set_defaults(func=openflow_action, action="reinstall") + + openflow_list_action = subparsers.add_parser("openflow-net-list", help="list installed openflow rules for a network") + openflow_list_action.add_argument("name", nargs='?', help="network name, if missing all networks") + openflow_list_action.set_defaults(func=openflow_action, action="rules-list") + + argcomplete.autocomplete(main_parser) + + try: + args = main_parser.parse_args() + result = args.func(args) + if result == None: + result = 0 + #for some reason it fails if call exit inside try instance. Need to call exit at the end !? + except (requests.exceptions.ConnectionError): + print "Connection error: not possible to contact OPENVIM-SERVER (openvimd)" + result = -2 + except (KeyboardInterrupt): + print 'Exiting openVIM' + result = -3 + except (SystemExit, ArgumentParserError): + result = -4 + + #print result + exit(result) + diff --git a/openvimd.cfg b/openvimd.cfg new file mode 100644 index 0000000..022a5e3 --- /dev/null +++ b/openvimd.cfg @@ -0,0 +1,127 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + + + +#Miscellaneous +#Option to test openvim without the needed infrastructure, possible values are +# "normal" by default, Openflow controller (OFC), switch and real host are needed +# "test" Used for testing http API and database without connecting to host or to OFC +# "host only" Used when neither OFC nor OF switch are provided. +# Dataplane network connection must be done manually. +# "OF only" Used for testing of new openflow controllers support. No real VM deployments will be done but +# OFC will be used as in real mode +# "development" Forced a cloud-type deployment, nomal memory instead of hugepages is used, +# without cpu pinning, and using a bridge network instead of a real OFC dataplane networks. +# The same 'development_bridge' (see below) is used for all dataplane networks +mode: test + +#Openflow controller information +of_controller: floodlight # Type of controller to be used. + # Valid controllers are 'opendaylight', 'floodlight' or +#of_controller_module: module # Only needed for . Python module that implement + # this controller. By default a file with the name .py is used +#of_: value # Other parameters required by controller. Consumed by __init__ +of_user: user credentials # User credentials for the controller if needed +of_password: passwd credentials # Password credentials for the controller if needed +of_controller_ip: 127.0.0.1 # IP address where the Openflow controller is listening +of_controller_port: 7070 # TCP port where the Openflow controller is listening (REST API server) +of_controller_dpid: '00:01:02:03:04:05:06:07' # Openflow Switch identifier (put here the right number) + +#This option is used for those openflow switch that cannot deliver one packet to several output with different vlan tags +#When set to true, it fails when trying to attach different vlan tagged ports to the same net +of_controller_nets_with_same_vlan: false # (by default, true) + +#Server parameters +http_host: localhost # IP address where openvim is listening (by default, localhost) +http_port: 9080 # General port where openvim is listening (by default, 9080) +http_admin_port: 9085 # Admin port where openvim is listening (when missing, no administration server is launched) + +#database parameters +db_host: localhost # by default localhost +db_user: vim # DB user +db_passwd: vimpw # DB password +db_name: vim_db # Name of the VIM DB + +#host paremeters +image_path: "/opt/VNF/images" # Folder, same for every host, where the VNF images will be copied + +#testing parameters (used by ./test/test_openvim.py) +tenant_id: fc7b43b6-6bfa-11e4-84d2-5254006d6777 # Default tenant identifier for testing + +#VLAN ranges used for the dataplane networks (ptp, data) +#When a network is created an unused value in this range is used +network_vlan_range_start: 3000 +network_vlan_range_end: 4000 + +#host bridge interfaces for networks +# Openvim cannot create bridge networks automatically, in the same way as other CMS do. +# Bridge networks need to be pre-provisioned on each host and Openvim uses those pre-provisioned bridge networks. +# Openvim assumes that the following bridge interfaces have been created on each host, appropriately associated to a physical port. +# The following information needs to be provided: +# - Name of the bridge (identical in all hosts) +# - VLAN tag associated to each bridge interface +# - The speed of the physical port in Gbps, where that bridge interface was created +# For instance, next example assumes that 10 bridges have been created on each host +# using vlans 2001 to 2010, associated to a 1Gbps physical port +bridge_ifaces: + #name: [vlan, speed in Gbps] + virbrMan1: [2001, 1] + virbrMan2: [2002, 1] + virbrMan3: [2003, 1] + virbrMan4: [2004, 1] + virbrMan5: [2005, 1] + virbrMan6: [2006, 1] + virbrMan7: [2007, 1] + virbrMan8: [2008, 1] + virbrMan9: [2009, 1] + virbrMan10: [2010, 1] + +#Used only when 'mode' is at development'. Indicates which 'bridge_ifaces' is used for dataplane networks +development_bridge: virbrMan10 + +#DHCP SERVER PARAMETERS. +#In case some of the previous 'bridge_ifaces' are connected to an EXTERNAL dhcp server, provide +# the server parameters to allow openvim getting the allocated IP addresses of virtual machines +# connected to the indicated 'bridge_ifaces' and or 'nets'. Openvim will connect to the dhcp server by ssh. +#DHCP server must contain a shell script "./get_dhcp_lease.sh" that accept a mac address as parameter +# and return empty or the allocated IP address. See an example at the end of the file ./openvim/dhcp_thread.py +#COMMENT all lines in case you do not have a DHCP server in 'normal', 'development' or 'host only' modes. +# For 'test' or 'OF only' modes you can leave then uncommented, because in these modes fake IP +# address are generated instead of connecting with a real DHCP server. +dhcp_server: + host: host-ip-or-name + #port: 22 #ssh port, by default 22 + provider: isc-dhcp-server #dhcp-server type + user: user + #provide password, or key if needed + password: passwd + #key: ssh-access-key + #list of the previous bridge interfaces attached to this dhcp server + bridge_ifaces: [ virbrMan1, virbrMan2 ] + #list of the networks attached to this dhcp server + nets: [default] + + +#logging parameters # DEBUG, INFO, WARNING, ERROR, CRITICAL +log_level: ERROR +log_level_db: DEBUG +log_level_of: DEBUG diff --git a/openvimd.py b/openvimd.py new file mode 100755 index 0000000..220c90a --- /dev/null +++ b/openvimd.py @@ -0,0 +1,357 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' +This is the main program of openvim, it reads the configuration +and launches the rest of threads: http clients, openflow controller +and host controllers +''' + +__author__="Alfonso Tierno" +__date__ ="$10-jul-2014 12:07:15$" +__version__="0.4.6-r466" +version_date="Jul 2016" +database_version="0.7" #expected database schema version + +import httpserver +from utils import auxiliary_functions as af +import sys +import getopt +import time +import vim_db +import yaml +import os +from jsonschema import validate as js_v, exceptions as js_e +import host_thread as ht +import dhcp_thread as dt +import openflow_thread as oft +import threading +from vim_schema import config_schema +import logging +import imp + +global config_dic +global logger +logger = logging.getLogger('vim') + +def load_configuration(configuration_file): + default_tokens ={'http_port':9080, 'http_host':'localhost', + 'of_controller_nets_with_same_vlan':True, + 'image_path':'/opt/VNF/images', + 'network_vlan_range_start':1000, + 'network_vlan_range_end': 4096, + 'log_level': "DEBUG", + 'log_level_db': "ERROR", + 'log_level_of': 'ERROR', + } + try: + #First load configuration from configuration file + #Check config file exists + if not os.path.isfile(configuration_file): + return (False, "Configuration file '"+configuration_file+"' does not exists") + + #Read and parse file + (return_status, code) = af.read_file(configuration_file) + if not return_status: + return (return_status, "Error loading configuration file '"+configuration_file+"': "+code) + try: + config = yaml.load(code) + except yaml.YAMLError, exc: + error_pos = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1) + return (False, "Error loading configuration file '"+configuration_file+"'"+error_pos+": content format error: Failed to parse yaml format") + + + try: + js_v(config, config_schema) + except js_e.ValidationError, exc: + error_pos = "" + if len(exc.path)>0: error_pos=" at '" + ":".join(map(str, exc.path))+"'" + return False, "Error loading configuration file '"+configuration_file+"'"+error_pos+": "+exc.message + + + #Check default values tokens + for k,v in default_tokens.items(): + if k not in config: config[k]=v + #Check vlan ranges + if config["network_vlan_range_start"]+10 >= config["network_vlan_range_end"]: + return False, "Error invalid network_vlan_range less than 10 elements" + + except Exception,e: + return (False, "Error loading configuration file '"+configuration_file+"': "+str(e)) + return (True, config) + +def create_database_connection(config_dic): + db = vim_db.vim_db( (config_dic["network_vlan_range_start"],config_dic["network_vlan_range_end"]), config_dic['log_level_db'] ); + if db.connect(config_dic['db_host'], config_dic['db_user'], config_dic['db_passwd'], config_dic['db_name']) == -1: + logger.error("Cannot connect to database %s at %s@%s", config_dic['db_name'], config_dic['db_user'], config_dic['db_host']) + exit(-1) + return db + +def usage(): + print "Usage: ", sys.argv[0], "[options]" + print " -v|--version: prints current version" + print " -c|--config [configuration_file]: loads the configuration file (default: openvimd.cfg)" + print " -h|--help: shows this help" + print " -p|--port [port_number]: changes port number and overrides the port number in the configuration file (default: 9090)" + print " -P|--adminport [port_number]: changes admin port number and overrides the port number in the configuration file (default: 9095)" + return + + +if __name__=="__main__": + #streamformat = "%(levelname)s (%(module)s:%(lineno)d) %(message)s" + streamformat = "%(asctime)s %(name)s %(levelname)s: %(message)s" + logging.basicConfig(format=streamformat, level= logging.DEBUG) + logger.setLevel(logging.DEBUG) + try: + opts, args = getopt.getopt(sys.argv[1:], "hvc:p:P:", ["config", "help", "version", "port", "adminport"]) + except getopt.GetoptError, err: + # print help information and exit: + logger.error("%s. Type -h for help", err) # will print something like "option -a not recognized" + #usage() + sys.exit(-2) + + port=None + port_admin = None + config_file = 'openvimd.cfg' + + for o, a in opts: + if o in ("-v", "--version"): + print "openvimd version", __version__, version_date + print "(c) Copyright Telefonica" + sys.exit(0) + elif o in ("-h", "--help"): + usage() + sys.exit(0) + elif o in ("-c", "--config"): + config_file = a + elif o in ("-p", "--port"): + port = a + elif o in ("-P", "--adminport"): + port_admin = a + else: + assert False, "Unhandled option" + + + try: + #Load configuration file + r, config_dic = load_configuration(config_file) + #print config_dic + if not r: + logger.error(config_dic) + config_dic={} + exit(-1) + logging.basicConfig(level = getattr(logging, config_dic['log_level'])) + logger.setLevel(getattr(logging, config_dic['log_level'])) + #override parameters obtained by command line + if port is not None: config_dic['http_port'] = port + if port_admin is not None: config_dic['http_admin_port'] = port_admin + + #check mode + if 'mode' not in config_dic: + config_dic['mode'] = 'normal' + #allow backward compatibility of test_mode option + if 'test_mode' in config_dic and config_dic['test_mode']==True: + config_dic['mode'] = 'test' + if config_dic['mode'] == 'development' and ( 'development_bridge' not in config_dic or config_dic['development_bridge'] not in config_dic.get("bridge_ifaces",None) ): + logger.error("'%s' is not a valid 'development_bridge', not one of the 'bridge_ifaces'", config_file) + exit(-1) + + if config_dic['mode'] != 'normal': + print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + print "!! Warning, openvimd in TEST mode '%s'" % config_dic['mode'] + print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + config_dic['version'] = __version__ + + #Connect to database + db_http = create_database_connection(config_dic) + r = db_http.get_db_version() + if r[0]<0: + logger.error("DATABASE is not a VIM one or it is a '0.0' version. Try to upgrade to version '%s' with './database_utils/migrate_vim_db.sh'", database_version) + exit(-1) + elif r[1]!=database_version: + logger.error("DATABASE wrong version '%s'. Try to upgrade/downgrade to version '%s' with './database_utils/migrate_vim_db.sh'", r[1], database_version) + exit(-1) + db_of = create_database_connection(config_dic) + db_lock= threading.Lock() + config_dic['db'] = db_of + config_dic['db_lock'] = db_lock + + #precreate interfaces; [bridge:, VLAN used at Host, uuid of network camping in this bridge, speed in Gbit/s + config_dic['dhcp_nets']=[] + config_dic['bridge_nets']=[] + for bridge,vlan_speed in config_dic["bridge_ifaces"].items(): + #skip 'development_bridge' + if config_dic['mode'] == 'development' and config_dic['development_bridge'] == bridge: + continue + config_dic['bridge_nets'].append( [bridge, vlan_speed[0], vlan_speed[1], None] ) + del config_dic["bridge_ifaces"] + + #check if this bridge is already used (present at database) for a network) + used_bridge_nets=[] + for brnet in config_dic['bridge_nets']: + r,nets = db_of.get_table(SELECT=('uuid',), FROM='nets',WHERE={'provider': "bridge:"+brnet[0]}) + if r>0: + brnet[3] = nets[0]['uuid'] + used_bridge_nets.append(brnet[0]) + if config_dic.get("dhcp_server"): + if brnet[0] in config_dic["dhcp_server"]["bridge_ifaces"]: + config_dic['dhcp_nets'].append(nets[0]['uuid']) + if len(used_bridge_nets) > 0 : + logger.info("found used bridge nets: " + ",".join(used_bridge_nets)) + #get nets used by dhcp + if config_dic.get("dhcp_server"): + for net in config_dic["dhcp_server"].get("nets", () ): + r,nets = db_of.get_table(SELECT=('uuid',), FROM='nets',WHERE={'name': net}) + if r>0: + config_dic['dhcp_nets'].append(nets[0]['uuid']) + + # get host list from data base before starting threads + r,hosts = db_of.get_table(SELECT=('name','ip_name','user','uuid'), FROM='hosts', WHERE={'status':'ok'}) + if r<0: + logger.error("Cannot get hosts from database %s", hosts) + exit(-1) + # create connector to the openflow controller + of_test_mode = False if config_dic['mode']=='normal' or config_dic['mode']=="OF only" else True + + if of_test_mode: + OF_conn = oft.of_test_connector({"of_debug": config_dic['log_level_of']} ) + else: + #load other parameters starting by of_ from config dict in a temporal dict + temp_dict={ "of_ip": config_dic['of_controller_ip'], + "of_port": config_dic['of_controller_port'], + "of_dpid": config_dic['of_controller_dpid'], + "of_debug": config_dic['log_level_of'] + } + for k,v in config_dic.iteritems(): + if type(k) is str and k[0:3]=="of_" and k[0:13] != "of_controller": + temp_dict[k]=v + if config_dic['of_controller']=='opendaylight': + module = "ODL" + elif "of_controller_module" in config_dic: + module = config_dic["of_controller_module"] + else: + module = config_dic['of_controller'] + module_info=None + try: + module_info = imp.find_module(module) + + OF_conn = imp.load_module("OF_conn", *module_info) + try: + OF_conn = OF_conn.OF_conn(temp_dict) + except Exception as e: + logger.error("Cannot open the Openflow controller '%s': %s", type(e).__name__, str(e)) + if module_info and module_info[0]: + file.close(module_info[0]) + exit(-1) + except (IOError, ImportError) as e: + if module_info and module_info[0]: + file.close(module_info[0]) + logger.error("Cannot open openflow controller module '%s'; %s: %s; revise 'of_controller' field of configuration file.", module, type(e).__name__, str(e)) + exit(-1) + + + #create openflow thread + thread = oft.openflow_thread(OF_conn, of_test=of_test_mode, db=db_of, db_lock=db_lock, + pmp_with_same_vlan=config_dic['of_controller_nets_with_same_vlan'], + debug=config_dic['log_level_of']) + r,c = thread.OF_connector.obtain_port_correspondence() + if r<0: + logger.error("Cannot get openflow information %s", c) + exit() + thread.start() + config_dic['of_thread'] = thread + + #create dhcp_server thread + host_test_mode = True if config_dic['mode']=='test' or config_dic['mode']=="OF only" else False + dhcp_params = config_dic.get("dhcp_server") + if dhcp_params: + thread = dt.dhcp_thread(dhcp_params=dhcp_params, test=host_test_mode, dhcp_nets=config_dic["dhcp_nets"], db=db_of, db_lock=db_lock, debug=config_dic['log_level_of']) + thread.start() + config_dic['dhcp_thread'] = thread + + + #Create one thread for each host + host_test_mode = True if config_dic['mode']=='test' or config_dic['mode']=="OF only" else False + host_develop_mode = True if config_dic['mode']=='development' else False + host_develop_bridge_iface = config_dic.get('development_bridge', None) + config_dic['host_threads'] = {} + for host in hosts: + host['image_path'] = '/opt/VNF/images/openvim' + thread = ht.host_thread(name=host['name'], user=host['user'], host=host['ip_name'], db=db_of, db_lock=db_lock, + test=host_test_mode, image_path=config_dic['image_path'], version=config_dic['version'], + host_id=host['uuid'], develop_mode=host_develop_mode, develop_bridge_iface=host_develop_bridge_iface ) + thread.start() + config_dic['host_threads'][ host['uuid'] ] = thread + + + + #Create thread to listen to web requests + http_thread = httpserver.httpserver(db_http, 'http', config_dic['http_host'], config_dic['http_port'], False, config_dic) + http_thread.start() + + if 'http_admin_port' in config_dic: + db_http = create_database_connection(config_dic) + http_thread_admin = httpserver.httpserver(db_http, 'http-admin', config_dic['http_host'], config_dic['http_admin_port'], True) + http_thread_admin.start() + else: + http_thread_admin = None + time.sleep(1) + logger.info('Waiting for http clients') + print ('openvimd ready') + print ('====================') + sys.stdout.flush() + + #TODO: Interactive console would be nice here instead of join or sleep + + r="help" #force print help at the beginning + while True: + if r=='exit': + break + elif r!='': + print "type 'exit' for terminate" + r = raw_input('> ') + + except (KeyboardInterrupt, SystemExit): + pass + + logger.info('Exiting openvimd') + threads = config_dic.get('host_threads', {}) + if 'of_thread' in config_dic: + threads['of'] = (config_dic['of_thread']) + if 'dhcp_thread' in config_dic: + threads['dhcp'] = (config_dic['dhcp_thread']) + + for thread in threads.values(): + thread.insert_task("exit") + for thread in threads.values(): + thread.join() + #http_thread.join() + #if http_thread_admin is not None: + #http_thread_admin.join() + logger.debug( "bye!") + exit() + diff --git a/scripts/configure-compute-node-RHEL7.1.sh b/scripts/configure-compute-node-RHEL7.1.sh new file mode 100755 index 0000000..0fb4056 --- /dev/null +++ b/scripts/configure-compute-node-RHEL7.1.sh @@ -0,0 +1,528 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +# Authors: Antonio Lopez, Pablo Montes, Alfonso Tierno +# July 2015 + +# Personalize RHEL7.1 on compute nodes +# Prepared to work with the following network card drivers: +# tg3, igb drivers for management interfaces +# ixgbe (Intel Niantic) and i40e (Intel Fortville) drivers for data plane interfaces + +# To download: +# wget https://raw.githubusercontent.com/nfvlabs/openmano/master/scripts/configure-compute-node-RHEL7.1.sh +# To execute: +# chmod +x ./configure-compute-node-RHEL7.1.sh +# sudo ./configure-compute-node-RHEL7.1.sh + +# Assumptions: +# All virtualization options activated on BIOS (vt-d, vt-x, SR-IOV, no power savings...) +# RHEL7.1 installed without /home partition and with the following packages selection: +# @base, @core, @development, @network-file-system-client, @virtualization-hypervisor, @virtualization-platform, @virtualization-tools + + +function usage(){ + echo -e "Usage: sudo $0 [-y] [ [|dhcp] ]" + echo -e " Configure compute host for VIM usage. (version 0.4). Params:" + echo -e " -y do not prompt for confirmation. If a new user is created, the user name is set as password" + echo -e " Create if not exist and configure this user for openvim to connect" + echo -e " if suplied creates bridge interfaces on this interface, needed for openvim" + echo -e " ip or dhcp if suplied, configure the interface with this ip address (/24) or 'dhcp' " +} + + +#1 CHECK input parameters +#1.1 root privileges +[ "$USER" != "root" ] && echo "Needed root privileges" && usage && exit -1 + +#1.2 input parameters +FORCE="" +while getopts "y" o; do + case "${o}" in + y) + FORCE="yes" + ;; + *) + usage + exit -1 + ;; + esac +done +shift $((OPTIND-1)) + + +if [ $# -lt 1 ] +then + usage + exit +fi + +user_name=$1 +interface=$2 +ip_iface=$3 + +if [ -n "$interface" ] && ! ifconfig $interface &> /dev/null +then + echo "Error: interface '$interface' is not present in the system" + usage + exit 1 +fi + +echo ' +################################################################# +##### INSTALL NEEDED PACKETS ##### +#################################################################' + +# Required packages +yum repolist +yum check-update +yum update -y +yum install -y screen virt-manager ethtool gcc gcc-c++ xorg-x11-xauth xorg-x11-xinit xorg-x11-deprecated-libs libXtst guestfish hwloc libhugetlbfs-utils libguestfs-tools numactl +# Selinux management +yum install -y policycoreutils-python + +echo ' +################################################################# +##### INSTALL USER ##### +#################################################################' + +# Add required groups +groupadd -f nfvgroup +groupadd -f libvirt #for other operating systems may be libvirtd + +# Adds user, default password same as name +if grep -q "^${user_name}:" /etc/passwd +then + #user exist, add to group + echo "adding user ${user_name} to groups libvirt,nfvgroup" + usermod -a -G libvirt,nfvgroup -g nfvgroup $user_name +else + #create user if it does not exist + [ -z "$FORCE" ] && read -p "user '${user_name}' does not exist, create (Y/n)" kk + if ! [ -z "$kk" -o "$kk"="y" -o "$kk"="Y" ] + then + exit + fi + echo "creating and configuring user ${user_name}" + useradd -m -G libvirt,nfvgroup -g nfvgroup $user_name + #Password + if [ -z "$FORCE" ] + then + echo "Provide a password for $user_name" + passwd $user_name + else + echo -e "$user_name\n$user_name" | passwd --stdin $user_name + fi +fi + +#Setting default libvirt URI for the user +echo "Setting default libvirt URI for the user" +echo "if test -x `which virsh`; then" >> /home/${user_name}/.bash_profile +echo " export LIBVIRT_DEFAULT_URI=qemu:///system" >> /home/${user_name}/.bash_profile +echo "fi" >> /home/${user_name}/.bash_profile + +echo ' +################################################################# +##### INSTALL HUGEPAGES ISOLCPUS GRUB ##### +#################################################################' + +# Huge pages 1G auto mount +mkdir -p /mnt/huge +if ! grep -q "Huge pages" /etc/fstab +then + echo "" >> /etc/fstab + echo "# Huge pages" >> /etc/fstab + echo "nodev /mnt/huge hugetlbfs pagesize=1GB 0 0" >> /etc/fstab + echo "" >> /etc/fstab +fi + +# Huge pages reservation service +if ! [ -f /usr/lib/systemd/system/hugetlb-gigantic-pages.service ] +then + echo "configuring huge pages service" + cat > /usr/lib/systemd/system/hugetlb-gigantic-pages.service << EOL +[Unit] +Description=HugeTLB Gigantic Pages Reservation +DefaultDependencies=no +Before=dev-hugepages.mount +ConditionPathExists=/sys/devices/system/node +ConditionKernelCommandLine=hugepagesz=1G + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/lib/systemd/hugetlb-reserve-pages + +[Install] +WantedBy=sysinit.target +EOL +fi +# Grub virtualization options: + +# Get isolcpus +isolcpus=`gawk 'BEGIN{pre=-2;} + ($1=="processor"){pro=$3;} + ($1=="core" && $4!=0){ + if (pre+1==pro){endrange="-" pro} + else{cpus=cpus endrange sep pro; sep=","; endrange="";}; + pre=pro;} + END{printf("%s",cpus endrange);}' /proc/cpuinfo` + + +# Huge pages reservation file: reserving all memory apart from 4GB per NUMA node +# Get the number of hugepages: all memory but 8GB reserved for the OS +#totalmem=`dmidecode --type 17|grep Size |grep MB |gawk '{suma+=$2} END {print suma/1024}'` +#hugepages=$(($totalmem-8)) + +if ! [ -f /usr/lib/systemd/hugetlb-reserve-pages ] +then + cat > /usr/lib/systemd/hugetlb-reserve-pages << EOL +#!/bin/bash +nodes_path=/sys/devices/system/node/ +if [ ! -d \$nodes_path ]; then + echo "ERROR: \$nodes_path does not exist" + exit 1 +fi + +reserve_pages() +{ + echo \$1 > \$nodes_path/\$2/hugepages/hugepages-1048576kB/nr_hugepages +} + +# This example reserves all available memory apart from 4 GB for linux +# using 1GB size. You can modify it to your needs or comment the lines +# to avoid reserve memory in a numa node +EOL + for f in /sys/devices/system/node/node?/meminfo + do + node=`head -n1 $f | gawk '($5=="kB"){print $2}'` + memory=`head -n1 $f | gawk '($5=="kB"){print $4}'` + memory=$((memory+1048576-1)) #memory must be ceiled + memory=$((memory/1048576)) #from `kB to GB + #if memory + [ $memory -gt 4 ] && echo "reserve_pages $((memory-4)) node$node" >> /usr/lib/systemd/hugetlb-reserve-pages + done + + # Run the following commands to enable huge pages early boot reservation: + chmod +x /usr/lib/systemd/hugetlb-reserve-pages + systemctl enable hugetlb-gigantic-pages +fi + +# Prepares the text to add at the end of the grub line, including blacklisting ixgbevf driver in the host +textokernel="intel_iommu=on default_hugepagesz=1G hugepagesz=1G isolcpus=$isolcpus modprobe.blacklist=ixgbevf modprobe.blacklist=i40evf" + +# Add text to the kernel line +if ! grep -q "intel_iommu=on default_hugepagesz=1G hugepagesz=1G" /etc/default/grub +then + echo "adding cmdline ${textokernel}" + sed -i "/^GRUB_CMDLINE_LINUX=/s/\"\$/ ${textokernel}\"/" /etc/default/grub + # grub2 upgrade + grub2-mkconfig -o /boot/grub2/grub.cfg +fi + +echo ' +################################################################# +##### OTHER CONFIGURATION ##### +#################################################################' + +# Disable requiretty +if ! grep -q "#openmano" /etc/sudoers +then + cat >> /home/${user_name}/script_visudo.sh << EOL +#!/bin/bash +cat \$1 | awk '(\$0~"requiretty"){print "#"\$0}(\$0!~"requiretty"){print \$0}' > tmp +cat tmp > \$1 +rm tmp +EOL + chmod +x /home/${user_name}/script_visudo.sh + echo "Disabling requitetty" + export EDITOR=/home/${user_name}/script_visudo.sh && sudo -E visudo + rm -f /home/${user_name}/script_visudo.sh +fi + +#Configure polkint to run virsh as a normal user +echo "Configuring polkint to run virsh as a normal user" +cat >> /etc/polkit-1/localauthority/50-local.d/50-org.libvirt-access.pkla << EOL +[libvirt Admin Access] +Identity=unix-group:libvirt +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes +EOL + +# Links the OpenMANO required folder /opt/VNF/images to /var/lib/libvirt/images. The OS installation +# should have only a / partition with all possible space available + +echo " link /opt/VNF/images to /var/lib/libvirt/images" +if [ "$user_name" != "" ] +then + #mkdir -p /home/${user_name}/VNF_images + #chown -R ${user_name}:nfvgroup /home/${user_name}/VNF_images + #chmod go+x $HOME + + # The orchestator needs to link the images folder + rm -f /opt/VNF/images + mkdir -p /opt/VNF/ + ln -s /var/lib/libvirt/images /opt/VNF/images + chown -R ${user_name}:nfvgroup /opt/VNF + chown -R root:nfvgroup /var/lib/libvirt/images + chmod g+rwx /var/lib/libvirt/images + + # Selinux management + #echo "configure Selinux management" + #semanage fcontext -a -t virt_image_t "/home/${user_name}/VNF_images(/.*)?" + #cat /etc/selinux/targeted/contexts/files/file_contexts.local |grep virt_image + #restorecon -R -v /home/${user_name}/VNF_images +else + mkdir -p /opt/VNF/images + chmod o+rx /opt/VNF/images +fi + +echo "creating local information /opt/VNF/images/hostinfo.yaml" +echo "#By default openvim assumes control plane interface naming as em1,em2,em3,em4 " > /opt/VNF/images/hostinfo.yaml +echo "#and bridge ifaces as virbrMan1, virbrMan2, ..." >> /opt/VNF/images/hostinfo.yaml +echo "#if compute node contain a different name it must be indicated in this file" >> /opt/VNF/images/hostinfo.yaml +echo "#with the format extandard-name: compute-name" >> /opt/VNF/images/hostinfo.yaml +if [ "$interface" != "" -a "$interface" != "em1" ] +then + echo "iface_names:" >> /opt/VNF/images/hostinfo.yaml + echo " em1: ${interface}" >> /opt/VNF/images/hostinfo.yaml +fi +chmod o+r /opt/VNF/images/hostinfo.yaml + +# deactivate memory overcommit +echo "deactivate memory overcommit" +service ksmtuned stop +service ksm stop +chkconfig ksmtuned off +chkconfig ksm off + + +# Libvirt options (uncomment the following) +echo "configure Libvirt options" +sed -i 's/#unix_sock_group = "libvirt"/unix_sock_group = "libvirt"/' /etc/libvirt/libvirtd.conf +sed -i 's/#unix_sock_rw_perms = "0770"/unix_sock_rw_perms = "0770"/' /etc/libvirt/libvirtd.conf +sed -i 's/#unix_sock_dir = "\/var\/run\/libvirt"/unix_sock_dir = "\/var\/run\/libvirt"/' /etc/libvirt/libvirtd.conf +sed -i 's/#auth_unix_rw = "none"/auth_unix_rw = "none"/' /etc/libvirt/libvirtd.conf + +#creating the polkit grant access for libvirt user. +#This does not work !!!! so commented. No way to get running without uncomented the auth_unix_rw = "none" line +# +#cat > /etc/polkit-1/localauthority/50-local.d/50-org.example-libvirt-remote-access.pkla << EOL +#[libvirt Management Access] +# Identity=unix-user:n2;unix-user:kk +# Action=org.libvirt.unix.manage +# ResultAny=yes +# ResultInactive=yes +# ResultActive=yes +#EOL + +# Configuration change of qemu for the numatune bug issue +# RHEL7.1: for this version should not be necesary - to revise +#if ! grep -q "cgroup_controllers = [ \"cpu\", \"devices\", \"memory\", \"blkio\", \"cpuacct\" ]" /etc/libvirt/qemu.conf +#then +#cat /etc/libvirt/qemu.conf | awk '{print $0}($0~"#cgroup_controllers"){print "cgroup_controllers = [ \"cpu\", \"devices\", \"memory\", \"blkio\", \"cpuacct\" ]"}' > tmp +#mv tmp /etc/libvirt/qemu.conf +#fi + +echo ' +################################################################# +##### NETWORK CONFIGURATION ##### +#################################################################' +# Network config (if the second parameter is net) +if [ -n "$interface" ] +then + + # Deactivate network manager + systemctl stop NetworkManager + systemctl disable NetworkManager + + # For management and data interfaces + rm -f /etc/udev/rules.d/pci_config.rules # it will be created to define VFs + + pushd /etc/sysconfig/network-scripts/ + + # Set ONBOOT=on and MTU=9000 on the interface used for the bridges + echo "configuring iface $interface" + cat ifcfg-$interface | grep -e HWADDR -e UUID > $interface.tmp + echo "TYPE=Ethernet +NAME=$interface +DEVICE=$interface +TYPE=Ethernet +ONBOOT=yes +NM_CONTROLLED=no +MTU=9000 +BOOTPROTO=none +IPV6INIT=no" >> $interface.tmp + mv $interface.tmp ifcfg-$interface + + # Management interfaces +# integrated_interfaces="" +# nb_ifaces=0 +# for iface in `ifconfig -a | grep ":\ " | cut -f 1 -d":"| grep -v "_" | grep -v "\." | grep -v "lo" | sort` +# do +# driver=`ethtool -i $iface| awk '($0~"driver"){print $2}'` +# if [ $driver != "ixgbe" ] && [ $driver != "bridge" ] +# then +# integrated_interfaces="$integrated_interfaces $iface" +# nb_ifaces=$((nb_ifaces+1)) +# eval iface${nb_ifaces}=$iface +# fi +# done + + #Create infrastructure bridge, normally used for connecting to compute nodes, openflow controller, ... + echo "DEVICE=virbrInf +TYPE=Bridge +ONBOOT=yes +DELAY=0 +NM_CONTROLLED=no +USERCTL=no" > ifcfg-virbrInf + + #Create VLAN for infrastructure bridge + echo "DEVICE=${interface}.1001 +ONBOOT=yes +NM_CONTROLLED=no +USERCTL=no +VLAN=yes +BOOTPROTO=none +BRIDGE=virbrInf" > ifcfg-${interface}.1001 + + + #Create bridge interfaces + echo "Creating bridge ifaces: " + for ((i=1;i<=20;i++)) + do + i2digits=$i + [ $i -lt 10 ] && i2digits="0$i" + echo " virbrMan$i vlan 20$i2digits" + echo "DEVICE=virbrMan$i +TYPE=Bridge +ONBOOT=yes +DELAY=0 +NM_CONTROLLED=no +USERCTL=no" > ifcfg-virbrMan$i + +#Without IP: +#BOOTPROTO=static +#IPADDR=10.10.10.$((i+209)) +#NETMASK=255.255.255.0" > ifcfg-virbrMan$i + + # create the required interfaces to connect the bridges + echo "DEVICE=${interface}.20$i2digits +ONBOOT=yes +NM_CONTROLLED=no +USERCTL=no +VLAN=yes +BOOTPROTO=none +BRIDGE=virbrMan$i" > ifcfg-${interface}.20$i2digits + done + + if [ -n "$ip_iface" ] + then + echo "configuring iface $iface interface with ip $ip_iface" + # Network interfaces + # 1Gbps interfaces are configured with ONBOOT=yes and static IP address + cat ifcfg-$iface | grep -e HWADDR -e UUID > $iface.tmp + echo "TYPE=Ethernet +NAME=$iface +DEVICE=$iface +TYPE=Ethernet +ONBOOT=yes +NM_CONTROLLED=no +IPV6INIT=no" >> $iface.tmp + [ $ip_iface = "dhcp" ] && echo -e "BOOTPROTO=dhcp\nDHCP_HOSTNAME=$HOSTNAME" >> $iface.tmp + [ $ip_iface != "dhcp" ] && echo -e "BOOTPROTO=static\nIPADDR=${ip_iface}\nNETMASK=255.255.255.0" >> $iface.tmp + mv $iface.tmp ifcfg-$iface + fi + + for iface in `ifconfig -a | grep ": " | cut -f 1 -d":" | grep -v -e "_" -e "\." -e "lo" -e "virbr" -e "tap"` + do + # 10/40 Gbps interfaces + # Intel X520 cards: driver ixgbe + # Intel XL710 Fortville cards: driver i40e + driver=`ethtool -i $iface| awk '($0~"driver"){print $2}'` + if [ "$driver" = "i40e" -o "$driver" = "ixgbe" ] + then + echo "configuring dataplane iface $iface" + + # Create 8 SR-IOV per PF by udev rules only for Fortville cards (i40e driver) + if [ "$driver" = "i40e" ] + then + pci=`ethtool -i $iface | awk '($0~"bus-info"){print $2}'` + echo "ACTION==\"add\", KERNEL==\"$pci\", SUBSYSTEM==\"pci\", RUN+=\"/usr/bin/bash -c 'echo 8 > /sys/bus/pci/devices/$pci/sriov_numvfs'\"" >> /etc/udev/rules.d/pci_config.rules + fi + + # Configure PF to boot automatically and to have a big MTU + # 10Gbps interfaces are configured with ONBOOT=yes and MTU=2000 + cat ifcfg-$iface | grep -e HWADDR -e UUID > $iface.tmp + echo "TYPE=Ethernet +NAME=$iface +DEVICE=$iface +ONBOOT=yes +MTU=9000 +NM_CONTROLLED=no +IPV6INIT=no +BOOTPROTO=none" >> $iface.tmp + mv $iface.tmp ifcfg-$iface + fi + done + popd +fi + + +# Activate 8 Virtual Functions per PF on Niantic cards (ixgbe driver) +if [[ `lsmod | cut -d" " -f1 | grep "ixgbe" | grep -v vf` ]] +then + if ! grep -q "ixgbe" /etc/modprobe.d/ixgbe.conf + then + echo "options ixgbe max_vfs=8" >> /etc/modprobe.d/ixgbe.conf + fi + +fi + +# Executes dracut to load drivers on boot +echo "Regenerating initramfs" +dracut --force + +# To define 8 VFs per PF we do it on rc.local, because the driver needs to be unloaded and loaded again +#if ! grep -q "NFV" /etc/rc.local +#then +# echo "" >> /etc/rc.local +# echo "# NFV" >> /etc/rc.local +# echo "modprobe -r ixgbe" >> /etc/rc.local +# echo "modprobe ixgbe max_vfs=8" >> /etc/rc.local +# echo "" >> /etc/rc.local + +# chmod +x /etc/rc.d/rc.local + +#fi + +echo +echo "Do not forget to create a shared (NFS, Samba, ...) where original virtual machine images are allocated" +echo +echo "Do not forget to copy the public ssh key into /home/${user_name}/.ssh/authorized_keys for authomatic login from openvim controller" +echo + +echo "Reboot the system to make the changes effective" + diff --git a/scripts/configure-compute-node-RHEL7.2.sh b/scripts/configure-compute-node-RHEL7.2.sh new file mode 100644 index 0000000..b68aebb --- /dev/null +++ b/scripts/configure-compute-node-RHEL7.2.sh @@ -0,0 +1,560 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +# Authors: Antonio Lopez, Pablo Montes, Alfonso Tierno +# 2016 March 18 +# Modified to run on grub2 and efi boot + +# Personalize RHEL7.2 on compute nodes +# Prepared to work with the following network card drivers: +# tg3, igb drivers for management interfaces +# ixgbe (Intel Niantic) and i40e (Intel Fortville) drivers for data plane interfaces + +# To download: +# wget https://raw.githubusercontent.com/nfvlabs/openmano/master/scripts/configure-compute-node-RHEL7.1.sh +# To execute: +# chmod +x ./configure-compute-node-RHEL7.1.sh +# sudo ./configure-compute-node-RHEL7.1.sh + +# Assumptions: +# All virtualization options activated on BIOS (vt-d, vt-x, SR-IOV, no power savings...) +# RHEL7.2 installed without /home partition and with the following packages selection: +# @base, @core, @development, @network-file-system-client, @virtualization-hypervisor, @virtualization-platform, @virtualization-tools + +# 2016 Aug 17 Antonio López +# Changed virbrInf to virbrVIM, to reflect that this bridge is used to communicate with the VIM (OpenVIM) +# Changed the vlan tag used by virbrVIM from 2000 to 1100 + +function usage(){ + echo -e "Usage: sudo $0 [-y] [ [|dhcp] ]" + echo -e " Configure compute host for VIM usage. (version 0.4). Params:" + echo -e " -y do not prompt for confirmation. If a new user is created, the user name is set as password" + echo -e " Create if not exist and configure this user for openvim to connect" + echo -e " if suplied creates bridge interfaces on this interface, needed for openvim" + echo -e " ip or dhcp if suplied, configure the interface with this ip address (/24) or 'dhcp' " +} + + +#1 CHECK input parameters +#1.1 root privileges +[ "$USER" != "root" ] && echo "Needed root privileges" && usage && exit -1 + +#1.2 input parameters +FORCE="" +while getopts "y" o; do + case "${o}" in + y) + FORCE="yes" + ;; + *) + usage + exit -1 + ;; + esac +done +shift $((OPTIND-1)) + + +if [ $# -lt 1 ] +then + usage + exit +fi + +user_name=$1 +interface=$2 +ip_iface=$3 + +if [ -n "$interface" ] && ! ifconfig $interface &> /dev/null +then + echo "Error: interface '$interface' is not present in the system" + usage + exit 1 +fi + +echo ' +################################################################# +##### INSTALL NEEDED PACKETS ##### +#################################################################' + +# Required packages +yum repolist +yum check-update +yum update -y +yum install -y screen virt-manager ethtool gcc gcc-c++ xorg-x11-xauth xorg-x11-xinit xorg-x11-deprecated-libs libXtst guestfish hwloc libhugetlbfs-utils libguestfs-tools numactl +# Selinux management +yum install -y policycoreutils-python + +echo ' +################################################################# +##### INSTALL USER ##### +#################################################################' + +# Add required groups +groupadd -f nfvgroup +groupadd -f libvirt #for other operating systems may be libvirtd + +# Adds user, default password same as name +if grep -q "^${user_name}:" /etc/passwd +then + #user exist, add to group + echo "adding user ${user_name} to groups libvirt,nfvgroup" + usermod -a -G libvirt,nfvgroup -g nfvgroup $user_name +else + #create user if it does not exist + [ -z "$FORCE" ] && read -p "user '${user_name}' does not exist, create (Y/n)" kk + if ! [ -z "$kk" -o "$kk"="y" -o "$kk"="Y" ] + then + exit + fi + echo "creating and configuring user ${user_name}" + useradd -m -G libvirt,nfvgroup -g nfvgroup $user_name + #Password + if [ -z "$FORCE" ] + then + echo "Provide a password for $user_name" + passwd $user_name + else + echo -e "$user_name\n$user_name" | passwd --stdin $user_name + fi +fi + +#Setting default libvirt URI for the user +echo "Setting default libvirt URI for the user" +echo "if test -x `which virsh`; then" >> /home/${user_name}/.bash_profile +echo " export LIBVIRT_DEFAULT_URI=qemu:///system" >> /home/${user_name}/.bash_profile +echo "fi" >> /home/${user_name}/.bash_profile + +echo ' +################################################################# +##### INSTALL HUGEPAGES ISOLCPUS GRUB ##### +#################################################################' + +# Huge pages 1G auto mount +mkdir -p /mnt/huge +if ! grep -q "Huge pages" /etc/fstab +then + echo "" >> /etc/fstab + echo "# Huge pages" >> /etc/fstab + echo "nodev /mnt/huge hugetlbfs pagesize=1GB 0 0" >> /etc/fstab + echo "" >> /etc/fstab +fi + +# Huge pages reservation service +if ! [ -f /usr/lib/systemd/system/hugetlb-gigantic-pages.service ] +then + echo "configuring huge pages service" + cat > /usr/lib/systemd/system/hugetlb-gigantic-pages.service << EOL +[Unit] +Description=HugeTLB Gigantic Pages Reservation +DefaultDependencies=no +Before=dev-hugepages.mount +ConditionPathExists=/sys/devices/system/node +ConditionKernelCommandLine=hugepagesz=1G + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/lib/systemd/hugetlb-reserve-pages + +[Install] +WantedBy=sysinit.target +EOL +fi +# Grub virtualization options: + +# Get isolcpus +isolcpus=`gawk 'BEGIN{pre=-2;} + ($1=="processor"){pro=$3;} + ($1=="core" && $4!=0){ + if (pre+1==pro){endrange="-" pro} + else{cpus=cpus endrange sep pro; sep=","; endrange="";}; + pre=pro;} + END{printf("%s",cpus endrange);}' /proc/cpuinfo` + + +# Huge pages reservation file: reserving all memory apart from 4GB per NUMA node +# Get the number of hugepages: all memory but 8GB reserved for the OS +#totalmem=`dmidecode --type 17|grep Size |grep MB |gawk '{suma+=$2} END {print suma/1024}'` +#hugepages=$(($totalmem-8)) + +if ! [ -f /usr/lib/systemd/hugetlb-reserve-pages ] +then + cat > /usr/lib/systemd/hugetlb-reserve-pages << EOL +#!/bin/bash +nodes_path=/sys/devices/system/node/ +if [ ! -d \$nodes_path ]; then + echo "ERROR: \$nodes_path does not exist" + exit 1 +fi + +reserve_pages() +{ + echo \$1 > \$nodes_path/\$2/hugepages/hugepages-1048576kB/nr_hugepages +} + +# This example reserves all available memory apart from 4 GB for linux +# using 1GB size. You can modify it to your needs or comment the lines +# to avoid reserve memory in a numa node +EOL + for f in /sys/devices/system/node/node?/meminfo + do + node=`head -n1 $f | gawk '($5=="kB"){print $2}'` + memory=`head -n1 $f | gawk '($5=="kB"){print $4}'` + memory=$((memory+1048576-1)) #memory must be ceiled + memory=$((memory/1048576)) #from `kB to GB + #if memory + [ $memory -gt 4 ] && echo "reserve_pages $((memory-4)) node$node" >> /usr/lib/systemd/hugetlb-reserve-pages + done + + # Run the following commands to enable huge pages early boot reservation: + chmod +x /usr/lib/systemd/hugetlb-reserve-pages + systemctl enable hugetlb-gigantic-pages +fi + +# Prepares the text to add at the end of the grub line, including blacklisting ixgbevf driver in the host + +textokernel="intel_iommu=on default_hugepagesz=1G hugepagesz=1G isolcpus=$isolcpus modprobe.blacklist=ixgbevf modprobe.blacklist=i40evf" + +# Add text to the kernel line +if ! grep -q "intel_iommu=on default_hugepagesz=1G hugepagesz=1G" /etc/default/grub +then + echo "adding cmdline ${textokernel}" + sed -i "/^GRUB_CMDLINE_LINUX=/s/\"\$/ ${textokernel}\"/" /etc/default/grub + + # grub2 upgrade + + # BIOS based systems + grub2-mkconfig -o /boot/grub2/grub.cfg + + # UEFI based systems + grub2-mkconfig -o /boot/efi/EFI/redhat/grub.cfg + +fi + +echo ' +################################################################# +##### OTHER CONFIGURATION ##### +#################################################################' + +# Disable requiretty +if ! grep -q "#openmano" /etc/sudoers +then + cat >> /home/${user_name}/script_visudo.sh << EOL +#!/bin/bash +cat \$1 | awk '(\$0~"requiretty"){print "#"\$0}(\$0!~"requiretty"){print \$0}' > tmp +cat tmp > \$1 +rm tmp +EOL + chmod +x /home/${user_name}/script_visudo.sh + echo "Disabling requitetty" + export EDITOR=/home/${user_name}/script_visudo.sh && sudo -E visudo + rm -f /home/${user_name}/script_visudo.sh +fi + +#Configure polkint to run virsh as a normal user +echo "Configuring polkint to run virsh as a normal user" +cat >> /etc/polkit-1/localauthority/50-local.d/50-org.libvirt-access.pkla << EOL +[libvirt Admin Access] +Identity=unix-group:libvirt +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes +EOL + +# Links the OpenMANO required folder /opt/VNF/images to /var/lib/libvirt/images. The OS installation +# should have only a / partition with all possible space available + +echo " link /opt/VNF/images to /var/lib/libvirt/images" +if [ "$user_name" != "" ] +then + #mkdir -p /home/${user_name}/VNF_images + #chown -R ${user_name}:nfvgroup /home/${user_name}/VNF_images + #chmod go+x $HOME + + # The orchestator needs to link the images folder + rm -f /opt/VNF/images + mkdir -p /opt/VNF/ + ln -s /var/lib/libvirt/images /opt/VNF/images + chown -R ${user_name}:nfvgroup /opt/VNF + chown -R root:nfvgroup /var/lib/libvirt/images + chmod g+rwx /var/lib/libvirt/images + + # Selinux management + #echo "configure Selinux management" + #semanage fcontext -a -t virt_image_t "/home/${user_name}/VNF_images(/.*)?" + #cat /etc/selinux/targeted/contexts/files/file_contexts.local |grep virt_image + #restorecon -R -v /home/${user_name}/VNF_images +else + mkdir -p /opt/VNF/images + chmod o+rx /opt/VNF/images +fi + +echo "creating local information /opt/VNF/images/hostinfo.yaml" +echo "#By default openvim assumes control plane interface naming as em1,em2,em3,em4 " > /opt/VNF/images/hostinfo.yaml +echo "#and bridge ifaces as virbrMan1, virbrMan2, ..." >> /opt/VNF/images/hostinfo.yaml +echo "#if compute node contain a different name it must be indicated in this file" >> /opt/VNF/images/hostinfo.yaml +echo "#with the format extandard-name: compute-name" >> /opt/VNF/images/hostinfo.yaml +if [ "$interface" != "" -a "$interface" != "em1" ] +then + echo "iface_names:" >> /opt/VNF/images/hostinfo.yaml + echo " em1: ${interface}" >> /opt/VNF/images/hostinfo.yaml +fi +chmod o+r /opt/VNF/images/hostinfo.yaml + +# deactivate memory overcommit +echo "deactivate memory overcommit" +service ksmtuned stop +service ksm stop +chkconfig ksmtuned off +chkconfig ksm off + + +# Libvirt options (uncomment the following) +echo "configure Libvirt options" +sed -i 's/#unix_sock_group = "libvirt"/unix_sock_group = "libvirt"/' /etc/libvirt/libvirtd.conf +sed -i 's/#unix_sock_rw_perms = "0770"/unix_sock_rw_perms = "0770"/' /etc/libvirt/libvirtd.conf +sed -i 's/#unix_sock_dir = "\/var\/run\/libvirt"/unix_sock_dir = "\/var\/run\/libvirt"/' /etc/libvirt/libvirtd.conf +sed -i 's/#auth_unix_rw = "none"/auth_unix_rw = "none"/' /etc/libvirt/libvirtd.conf + +#creating the polkit grant access for libvirt user. +#This does not work !!!! so commented. No way to get running without uncomented the auth_unix_rw = "none" line +# +#cat > /etc/polkit-1/localauthority/50-local.d/50-org.example-libvirt-remote-access.pkla << EOL +#[libvirt Management Access] +# Identity=unix-user:n2;unix-user:kk +# Action=org.libvirt.unix.manage +# ResultAny=yes +# ResultInactive=yes +# ResultActive=yes +#EOL + +# Configuration change of qemu for the numatune bug issue +# RHEL7.1: for this version should not be necesary - to revise +#if ! grep -q "cgroup_controllers = [ \"cpu\", \"devices\", \"memory\", \"blkio\", \"cpuacct\" ]" /etc/libvirt/qemu.conf +#then +#cat /etc/libvirt/qemu.conf | awk '{print $0}($0~"#cgroup_controllers"){print "cgroup_controllers = [ \"cpu\", \"devices\", \"memory\", \"blkio\", \"cpuacct\" ]"}' > tmp +#mv tmp /etc/libvirt/qemu.conf +#fi + +echo ' +################################################################# +##### NETWORK CONFIGURATION ##### +#################################################################' +# Network config (if the second parameter is net) +if [ -n "$interface" ] +then + + # Deactivate network manager + systemctl stop NetworkManager + systemctl disable NetworkManager + + # For management and data interfaces + #rm -f /etc/udev/rules.d/pci_config.rules # it will be created to define VFs + + pushd /etc/sysconfig/network-scripts/ + + # Set ONBOOT=on and MTU=9000 on the interface used for the bridges + echo "configuring iface $interface" + cat ifcfg-$interface | grep -e HWADDR -e UUID > $interface.tmp + echo "TYPE=Ethernet +NAME=$interface +DEVICE=$interface +TYPE=Ethernet +ONBOOT=yes +NM_CONTROLLED=no +MTU=9000 +BOOTPROTO=none +IPV6INIT=no" >> $interface.tmp + mv $interface.tmp ifcfg-$interface + + # Management interfaces +# integrated_interfaces="" +# nb_ifaces=0 +# for iface in `ifconfig -a | grep ":\ " | cut -f 1 -d":"| grep -v "_" | grep -v "\." | grep -v "lo" | sort` +# do +# driver=`ethtool -i $iface| awk '($0~"driver"){print $2}'` +# if [ $driver != "ixgbe" ] && [ $driver != "bridge" ] +# then +# integrated_interfaces="$integrated_interfaces $iface" +# nb_ifaces=$((nb_ifaces+1)) +# eval iface${nb_ifaces}=$iface +# fi +# done + + #Create infrastructure bridge, normally used for connecting to compute nodes, openflow controller, ... + echo "DEVICE=virbrVIM +NAME=virbrVIM +TYPE=Bridge +ONBOOT=yes +DELAY=0 +NM_CONTROLLED=no +MTU=9000 +USERCTL=no" > ifcfg-virbrVIM +[[ $ip_iface != "dhcp" ]] && [[ $ip_iface != "" ]] && echo -e "BOOTPROTO=static\nIPADDR=${ip_iface}\nNETMASK=255.255.255.0" >> ifcfg-virbrVIM + + #Create VLAN for infrastructure bridge + echo "DEVICE=${interface}.1100 +NAME=${interface}.1100 +ONBOOT=yes +NM_CONTROLLED=no +USERCTL=no +VLAN=yes +MTU=9000 +BOOTPROTO=none +BRIDGE=virbrVIM" > ifcfg-${interface}.1100 + + + #Create bridge interfaces + echo "Creating bridge ifaces: " + for ((i=1;i<=20;i++)) + do + i2digits=$i + [ $i -lt 10 ] && i2digits="0$i" + echo " virbrMan$i vlan 20$i2digits" + echo "DEVICE=virbrMan$i +NAME=virbrMan$i +TYPE=Bridge +ONBOOT=yes +DELAY=0 +NM_CONTROLLED=no +MTU=9000 +USERCTL=no" > ifcfg-virbrMan$i + +#Without IP: +#BOOTPROTO=static +#IPADDR=10.10.10.$((i+209)) +#NETMASK=255.255.255.0" > ifcfg-virbrMan$i + + # create the required interfaces to connect the bridges + echo "DEVICE=${interface}.20$i2digits +NAME=${interface}.20$i2digits +ONBOOT=yes +NM_CONTROLLED=no +USERCTL=no +VLAN=yes +BOOTPROTO=none +MTU=9000 +BRIDGE=virbrMan$i" > ifcfg-${interface}.20$i2digits + done + + iface=$interface + if [ -n "$ip_iface" ] + then + echo "configuring iface $iface interface with ip $ip_iface" + # Network interfaces + # 1Gbps interfaces are configured with ONBOOT=yes and static IP address + cat ifcfg-$iface | grep -e HWADDR -e UUID > $iface.tmp + echo "TYPE=Ethernet +NAME=$iface +DEVICE=$iface +TYPE=Ethernet +ONBOOT=yes +NM_CONTROLLED=no +MTU=9000 +IPV6INIT=no" >> $iface.tmp + [ $ip_iface = "dhcp" ] && echo -e "BOOTPROTO=dhcp\nDHCP_HOSTNAME=$HOSTNAME" >> $iface.tmp + [ $ip_iface != "dhcp" ] && echo -e "BOOTPROTO=static\nIPADDR=${ip_iface}\nNETMASK=255.255.255.0" >> $iface.tmp + mv $iface.tmp ifcfg-$iface + fi + # Script to create vfs + echo "#!/bin/bash" > /root/activate-vfs.sh + chmod +x /root/activate-vfs.sh + for iface in `ifconfig -a | grep ": " | cut -f 1 -d":" | grep -v -e "_" -e "\." -e "lo" -e "virbr" -e "tap"` + do + # 10/40 Gbps interfaces + # Intel X520 cards: driver ixgbe + # Intel XL710 Fortville cards: driver i40e + driver=`ethtool -i $iface| awk '($0~"driver"){print $2}'` + if [ "$driver" = "i40e" -o "$driver" = "ixgbe" ] + then + echo "configuring dataplane iface $iface" + + # Create 8 SR-IOV per PF by udev rules only for Fortville cards (i40e driver) + if [ "$driver" = "i40e" ] + then + pci=`ethtool -i $iface | awk '($0~"bus-info"){print $2}'` + echo "echo 8 > /sys/bus/pci/devices/$pci/sriov_numvfs" >> /root/activate-vfs.sh + fi + + # Configure PF to boot automatically and to have a big MTU + # 10Gbps interfaces are configured with ONBOOT=yes and MTU=2000 + cat ifcfg-$iface | grep -e HWADDR -e UUID > $iface.tmp + echo "TYPE=Ethernet +NAME=$iface +DEVICE=$iface +ONBOOT=yes +MTU=9000 +NM_CONTROLLED=no +IPV6INIT=no +BOOTPROTO=none" >> $iface.tmp + mv $iface.tmp ifcfg-$iface + fi + done + popd +fi +# add entry in rc.local for activate-vfs +grep -q 'touch /var/lock/subsys/local' '/etc/rc.d/rc.local' +if [[ $? == 0 ]] +then + echo "/root/activate-vfs.sh" >> /etc/rc.local +fi + + + +# Activate 8 Virtual Functions per PF on Niantic cards (ixgbe driver) +if [[ `lsmod | cut -d" " -f1 | grep "ixgbe" | grep -v vf` ]] +then + if ! grep -q "ixgbe" /etc/modprobe.d/ixgbe.conf + then + echo "options ixgbe max_vfs=8" >> /etc/modprobe.d/ixgbe.conf + fi + +fi + +# Executes dracut to load drivers on boot +echo "Regenerating initramfs" +dracut --force + +# To define 8 VFs per PF we do it on rc.local, because the driver needs to be unloaded and loaded again +#if ! grep -q "NFV" /etc/rc.local +#then +# echo "" >> /etc/rc.local +# echo "# NFV" >> /etc/rc.local +# echo "modprobe -r ixgbe" >> /etc/rc.local +# echo "modprobe ixgbe max_vfs=8" >> /etc/rc.local +# echo "" >> /etc/rc.local + +# chmod +x /etc/rc.d/rc.local + +#fi + +echo +echo "Do not forget to create a shared (NFS, Samba, ...) where original virtual machine images are allocated" +echo +echo "Do not forget to copy the public ssh key of openvim user into /home/${user_name}/.ssh/authorized_keys for authomatic login from openvim controller" +echo + +echo "Reboot the system to make the changes effective" + diff --git a/scripts/configure-compute-node-UBUNTU14.04.sh b/scripts/configure-compute-node-UBUNTU14.04.sh new file mode 100755 index 0000000..9d7daa7 --- /dev/null +++ b/scripts/configure-compute-node-UBUNTU14.04.sh @@ -0,0 +1,478 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +# Authors: Antonio Lopez, Pablo Montes, Alfonso Tierno +# June 2015 + +# Personalize RHEL7.1 on compute nodes +# Prepared to work with the following network card drivers: +# tg3, igb drivers for management interfaces +# ixgbe (Intel Niantic) and i40e (Intel Fortville) drivers for data plane interfaces + +# To download: +# wget https://raw.githubusercontent.com/nfvlabs/openmano/master/scripts/configure-compute-node-RHEL7.1.sh +# To execute: +# chmod +x ./configure-compute-node-RHEL7.1.sh +# sudo ./configure-compute-node-RHEL7.1.sh + +# Assumptions: +# All virtualization options activated on BIOS (vt-d, vt-x, SR-IOV, no power savings...) +# RHEL7.1 installed without /home partition and with the following packages selection: +# @base, @core, @development, @network-file-system-client, @virtualization-hypervisor, @virtualization-platform, @virtualization-tools + + +function usage(){ + echo -e "Usage: sudo $0 [-y] [ [|dhcp] ]" + echo -e " Configure compute host for VIM usage. (version 0.4). Params:" + echo -e " -y do not prompt for confirmation. If a new user is created, the user name is set as password" + echo -e " Create if not exist and configure this user for openvim to connect" + echo -e " if suplied creates bridge interfaces on this interface, needed for openvim" + echo -e " ip or dhcp if suplied, configure the interface with this ip address (/24) or 'dhcp' " +} + + +#1 CHECK input parameters +#1.1 root privileges +[ "$USER" != "root" ] && echo "Needed root privileges" && usage && exit -1 + +#1.2 input parameters +FORCE="" +while getopts "y" o; do + case "${o}" in + y) + FORCE="yes" + ;; + *) + usage + exit -1 + ;; + esac +done +shift $((OPTIND-1)) + + + +if [ $# -lt 1 ] +then + usage + exit +fi + + +user_name=$1 +interface=$2 +ip_iface=$3 + +if [ -n "$interface" ] && ! ifconfig $interface &> /dev/null +then + echo "Error: interface '$interface' is not present in the system" + usage + exit 1 +fi + +echo ' +################################################################# +##### INSTALL NEEDED PACKETS ##### +#################################################################' + +# Required packages +apt-get -y update +#apt-get -y install grub-common screen virt-manager ethtool build-essential x11-common x11-utils x11-apps libguestfs-tools hwloc libguestfs-tools numactl vlan nfs-common nfs-kernel-server +apt-get -y install grub-common screen virt-manager ethtool build-essential x11-common x11-utils libguestfs-tools hwloc libguestfs-tools numactl vlan nfs-common nfs-kernel-server + +echo "Remove unneeded packages....." +apt-get -y autoremove +# Selinux management +#yum install -y policycoreutils-python + + + +echo ' +################################################################# +##### INSTALL USER ##### +#################################################################' + +# Add required groups +groupadd -f admin +groupadd -f libvirt #for other operating systems may be libvirtd + +# Adds user, default password same as name +if grep -q "^${user_name}:" /etc/passwd +then + #user exist, add to group + echo "adding user ${user_name} to groups libvirt,admin" + usermod -a -G libvirt,admin -g admin $user_name +else + #create user if it does not exist + [ -z "$FORCE" ] && read -p "user '${user_name}' does not exist, create (Y/n)" kk + if ! [ -z "$kk" -o "$kk"="y" -o "$kk"="Y" ] + then + exit + fi + echo "creating and configuring user ${user_name}" + useradd -m -G libvirt,admin -g admin $user_name + #Password + if [ -z "$FORCE" ] + then + echo "Provide a password for $user_name" + passwd $user_name + else + echo -e "$user_name\n$user_name" | passwd --stdin $user_name + fi +fi + +# Allow admin users to access without password +if ! grep -q "#openmano" /etc/sudoers +then + cat >> /home/${user_name}/script_visudo.sh << EOL +#!/bin/bash +cat \$1 | awk '(\$0~"requiretty"){print "#"\$0}(\$0!~"requiretty"){print \$0}' > tmp +cat tmp > \$1 +rm tmp +echo "" >> \$1 +echo "#openmano allow to group admin to grant root privileges without password" >> \$1 +echo "%admin ALL=(ALL) NOPASSWD: ALL" >> \$1 +EOL + chmod +x /home/${user_name}/script_visudo.sh + echo "allowing admin user to get root privileges withut password" + export EDITOR=/home/${user_name}/script_visudo.sh && sudo -E visudo + rm -f /home/${user_name}/script_visudo.sh +fi + + +echo ' +################################################################# +##### INSTALL HUGEPAGES ISOLCPUS GRUB ##### +#################################################################' + +# Huge pages 1G auto mount +mkdir -p /mnt/huge +if ! grep -q "Huge pages" /etc/fstab +then + echo "" >> /etc/fstab + echo "# Huge pages" >> /etc/fstab + echo "nodev /mnt/huge hugetlbfs pagesize=1GB 0 0" >> /etc/fstab + echo "" >> /etc/fstab +fi + +# Grub virtualization options: + +# Get isolcpus +isolcpus=`gawk 'BEGIN{pre=-2;} + ($1=="processor"){pro=$3;} + ($1=="core" && $4!=0){ + if (pre+1==pro){endrange="-" pro} + else{cpus=cpus endrange sep pro; sep=","; endrange="";}; + pre=pro;} + END{printf("%s",cpus endrange);}' /proc/cpuinfo` + + +echo "CPUS: $isolcpus" + +# Huge pages reservation file: reserving all memory apart from 4GB per NUMA node +# Get the number of hugepages: all memory but 8GB reserved for the OS +#totalmem=`dmidecode --type 17|grep Size |grep MB |gawk '{suma+=$2} END {print suma/1024}'` +#hugepages=$(($totalmem-8)) + +if ! [ -f /usr/lib/systemd/hugetlb-reserve-pages ] +then + cat > /usr/lib/systemd/hugetlb-reserve-pages << EOL +#!/bin/bash +nodes_path=/sys/devices/system/node/ +if [ ! -d \$nodes_path ]; then + echo "ERROR: \$nodes_path does not exist" + exit 1 +fi + +reserve_pages() +{ + echo \$1 > \$nodes_path/\$2/hugepages/hugepages-1048576kB/nr_hugepages +} + +# This example reserves all available memory apart from 4 GB for linux +# using 1GB size. You can modify it to your needs or comment the lines +# to avoid reserve memory in a numa node +EOL + for f in /sys/devices/system/node/node?/meminfo + do + node=`head -n1 $f | gawk '($5=="kB"){print $2}'` + memory=`head -n1 $f | gawk '($5=="kB"){print $4}'` + memory=$((memory+1048576-1)) #memory must be ceiled + memory=$((memory/1048576)) #from `kB to GB + #if memory + [ $memory -gt 4 ] && echo "reserve_pages $((memory-4)) node$node" >> /usr/lib/systemd/hugetlb-reserve-pages + done + + # Run the following commands to enable huge pages early boot reservation: + chmod +x /usr/lib/systemd/hugetlb-reserve-pages + systemctl enable hugetlb-gigantic-pages +fi + +# Prepares the text to add at the end of the grub line, including blacklisting ixgbevf driver in the host +memtotal=`grep MemTotal /proc/meminfo | awk '{ print $2 }' ` +hpages=$(( ($memtotal/(1024*1024))-8 )) + +memtotal=$((memtotal+1048576-1)) #memory must be ceiled +memtotal=$((memtotal/1048576)) #from `kB to GBa +hpages=$((memtotal-8)) +[[ $hpages -lt 0 ]] $$ hpages=0 + + +echo "------> memtotal: $memtotal" + +textokernel="intel_iommu=on default_hugepagesz=1G hugepagesz=1G hugepages=$hpages isolcpus=$isolcpus modprobe.blacklist=ixgbevf modprobe.blacklist=i40evf" + +echo "Text to kernel: $textokernel" + + +# Add text to the kernel line +if ! grep -q "intel_iommu=on default_hugepagesz=1G hugepagesz=1G" /etc/default/grub +then + echo ">>>>>>> adding cmdline ${textokernel}" + sed -i "/^GRUB_CMDLINE_LINUX_DEFAULT=/s/\"\$/${textokernel}\"/" /etc/default/grub + # grub2 upgrade + #grub2-mkconfig -o /boot/grub2/grub.cfg + update-grub +fi + +echo ' +################################################################# +##### OTHER CONFIGURATION ##### +#################################################################' + +# Links the OpenMANO required folder /opt/VNF/images to /var/lib/libvirt/images. The OS installation +# should have only a / partition with all possible space available + +echo " link /opt/VNF/images to /var/lib/libvirt/images" +if [ "$user_name" != "" ] +then + #mkdir -p /home/${user_name}/VNF_images + #chown -R ${user_name}:admin /home/${user_name}/VNF_images + #chmod go+x $HOME + + # The orchestator needs to link the images folder + rm -f /opt/VNF/images + mkdir -p /opt/VNF/ + ln -s /var/lib/libvirt/images /opt/VNF/images + chown -R ${user_name}:admin /opt/VNF + chown -R root:admin /var/lib/libvirt/images + chmod g+rwx /var/lib/libvirt/images + + # Selinux management + #echo "configure Selinux management" + #semanage fcontext -a -t virt_image_t "/home/${user_name}/VNF_images(/.*)?" + #cat /etc/selinux/targeted/contexts/files/file_contexts.local |grep virt_image + #restorecon -R -v /home/${user_name}/VNF_images +else + mkdir -p /opt/VNF/images + chmod o+rx /opt/VNF/images +fi + +echo "creating local information /opt/VNF/images/hostinfo.yaml" +echo "#By default openvim assumes control plane interface naming as em1,em2,em3,em4 " > /opt/VNF/images/hostinfo.yaml +echo "#and bridge ifaces as virbrMan1, virbrMan2, ..." >> /opt/VNF/images/hostinfo.yaml +echo "#if compute node contain a different name it must be indicated in this file" >> /opt/VNF/images/hostinfo.yaml +echo "#with the format extandard-name: compute-name" >> /opt/VNF/images/hostinfo.yaml +if [ "$interface" != "" -a "$interface" != "em1" ] +then + echo "iface_names:" >> /opt/VNF/images/hostinfo.yaml + echo " em1: ${interface}" >> /opt/VNF/images/hostinfo.yaml +fi +chmod o+r /opt/VNF/images/hostinfo.yaml + +# deactivate memory overcommit +#echo "deactivate memory overcommit" +#service ksmtuned stop +#service ksm stop +#chkconfig ksmtuned off +#chkconfig ksm off + + +# Libvirt options (uncomment the following) +echo "configure Libvirt options" +sed -i 's/#unix_sock_group = "libvirt"/unix_sock_group = "libvirt"/' /etc/libvirt/libvirtd.conf +sed -i 's/#unix_sock_rw_perms = "0770"/unix_sock_rw_perms = "0770"/' /etc/libvirt/libvirtd.conf +sed -i 's/#unix_sock_dir = "\/var\/run\/libvirt"/unix_sock_dir = "\/var\/run\/libvirt"/' /etc/libvirt/libvirtd.conf +sed -i 's/#auth_unix_rw = "none"/auth_unix_rw = "none"/' /etc/libvirt/libvirtd.conf + + +echo ' +################################################################# +##### NETWORK CONFIGURATION ##### +#################################################################' +# Network config (if the second parameter is net) +echo "Interface ==> $interface" +if [ -n "$interface" ] +then + + + # For management and data interfaces + rm -f /etc/udev/rules.d/pci_config.rules # it will be created to define VFs + + + # Set ONBOOT=on and MTU=9000 on the interface used for the bridges + echo "configuring iface $interface" + +#MTU for interfaces and bridges +MTU=9000 + +cp /etc/network/interfaces interfaces.tmp + + + #Create infrastructure bridge, normally used for connecting to compute nodes, openflow controller, ... + + + #Create VLAN for infrastructure bridge + + echo " +######### CUTLINE ######### + +auto ${interface} +iface ${interface} inet static + mtu $MTU + +auto ${interface}.1001 +iface ${interface}.1001 inet static + mtu $MTU +" >> interfaces.tmp + + echo "ifconfig ${interface} mtu $MTU + ifconfig ${interface} up +" > mtu.tmp + + + #Create bridge interfaces + echo "Creating bridge ifaces: " + for ((i=1;i<=20;i++)) + do + i2digits=$i + [ $i -lt 10 ] && i2digits="0$i" + echo " virbrMan$i vlan 20$i2digits" + + j=$i + + echo " +auto ${interface}.20$i2digits +iface ${interface}.20$i2digits inet static + mtu $MTU + +auto virbrMan$j +iface virbrMan$j inet static + bridge_ports ${interface}.20$i2digits + mtu $MTU +" >> interfaces.tmp + + echo "ifconfig ${interface}.20$i2digits mtu $MTU +ifconfig virbrMan$j mtu $MTU +ifconfig virbrMan$j up +" >> mtu.tmp + + done + + echo " +auto em2.1001 +iface em2.1001 inet static + +auto virbrInf +iface virbrInf inet static + bridge_ports em2.1001 +" >> interfaces.tmp + + echo "ifconfig em2.1001 mtu $MTU +ifconfig virbrInf mtu $MTU +ifconfig virbrInf up +" >> mtu.tmp + +if ! grep -q "#### CUTLINE ####" /etc/network/interfaces +then + echo "====== Copying interfaces.tmp to /etc/network/interfaces" + cp interfaces.tmp /etc/network/interfaces +fi + + + #popd +fi + + +# Activate 8 Virtual Functions per PF on Niantic cards (ixgbe driver) +if [[ `lsmod | cut -d" " -f1 | grep "ixgbe" | grep -v vf` ]] +then + if ! grep -q "ixgbe" /etc/modprobe.d/ixgbe.conf + then + echo "options ixgbe max_vfs=8" >> /etc/modprobe.d/ixgbe.conf + fi + +fi + +# Set dataplane MTU + +echo "sleep 10" >> mtu.tmp + +interfaces=`ifconfig -a | grep ^p | cut -d " " -f 1` +for ph in $interfaces +do + echo "ifconfig $ph mtu $MTU" >> mtu.tmp + echo "ifconfig $ph up" >> mtu.tmp +done + + + +cp mtu.tmp /etc/setmtu.sh +chmod +x /etc/setmtu.sh + +# To define 8 VFs per PF we do it on rc.local, because the driver needs to be unloaded and loaded again +#if ! grep -q "NFV" /etc/rc.local +#then + echo "#!/bin/sh -e +" > /etc/rc.local + echo "# NFV" >> /etc/rc.local + echo "modprobe -r ixgbe" >> /etc/rc.local + echo "modprobe ixgbe max_vfs=8" >> /etc/rc.local + echo "/etc/setmtu.sh" >> /etc/rc.local + echo " +exit 0" >> /etc/rc.local + echo "" >> /etc/rc.local + + chmod +x /etc/rc.d/rc.local + +#fi + +chmod a+rwx /var/lib/libvirt/images +mkdir /usr/libexec/ +pushd /usr/libexec/ +ln -s /usr/bin/qemu-system-x86_64 qemu-kvm +popd + +#Deactivating apparmor while looking for a better solution +/etc/init.d/apparmor stop +update-rc.d -f apparmor remove + +echo +echo "Do not forget to create a shared (NFS, Samba, ...) where original virtual machine images are allocated" +echo +echo "Do not forget to copy the public ssh key into /home/${user_name}/.ssh/authorized_keys for authomatic login from openvim controller" +echo + +echo "Reboot the system to make the changes effective" + + diff --git a/scripts/configure-compute-node-develop.sh b/scripts/configure-compute-node-develop.sh new file mode 100755 index 0000000..c378459 --- /dev/null +++ b/scripts/configure-compute-node-develop.sh @@ -0,0 +1,252 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +# v1.0: 2015 June +# Authors: Antonio Lopez, Pablo Montes, Alfonso Tierno + +# Personalize RHEL7/CENTOS compute nodes for using openvim in 'development' mode: +# not using huge pages neither isolcpus + +# To download: +# wget https://raw.githubusercontent.com/nfvlabs/openmano/master/scripts/configure-compute-node-develop.sh +# To execute: +# chmod +x ./configure-compute-node-develop.sh +# sudo ./configure-compute-node-develop.sh + +function usage(){ + echo -e "Usage: sudo $0 [-y] [ [|dhcp] ]" + echo -e " Configure compute host for VIM usage in mode 'development'. Params:" + echo -e " -y do not prompt for confirmation. If a new user is created, the user name is set as password" + echo -e " Create if not exist and configure this user for openvim to connect" + echo -e " if supplied creates bridge interfaces on this interface, needed for openvim" + echo -e " ip or dhcp if supplied, configure the interface with this ip address (/24) or 'dhcp' " +} + +#1 CHECK input parameters +#1.1 root privileges +[ "$USER" != "root" ] && echo "Needed root privileges" && usage && exit -1 + +#1.2 input parameters +FORCE="" +while getopts "y" o; do + case "${o}" in + y) + FORCE="yes" + ;; + *) + usage + exit -1 + ;; + esac +done +shift $((OPTIND-1)) + + +if [ $# -lt 1 ] +then + usage + exit +fi + +user_name=$1 +interface=$2 +ip_iface=$3 + +if [ -n "$interface" ] && ! ifconfig $interface &> /dev/null +then + echo "Error: interface '$interface' is not present in the system" + usage + exit 1 +fi + +echo ' +################################################################# +##### INSTALL NEEDED PACKETS ##### +#################################################################' + +# Required packages +yum repolist +yum check-update +yum update -y +yum install -y screen virt-manager ethtool gcc gcc-c++ xorg-x11-xauth xorg-x11-xinit xorg-x11-deprecated-libs libXtst guestfish hwloc libhugetlbfs-utils libguestfs-tools +# Selinux management +yum install -y policycoreutils-python + +echo ' +################################################################# +##### INSTALL USER ##### +#################################################################' + +# Add required groups +groupadd -f admin +groupadd -f libvirt #for other operating systems may be libvirtd + +# Adds user, default password same as name +if grep -q "^${user_name}:" /etc/passwd +then + #user exist, add to group + echo "adding user ${user_name} to groups libvirt,admin" + usermod -a -G libvirt,admin -g admin $user_name +else + #create user if it does not exist + [ -z "$FORCE" ] && read -p "user '${user_name}' does not exist, create (Y/n)" kk + if ! [ -z "$kk" -o "$kk"="y" -o "$kk"="Y" ] + then + exit + fi + echo "creating and configuring user ${user_name}" + useradd -m -G libvirt,admin -g admin $user_name + #Password + if [ -z "$FORCE" ] + then + echo "Provide a password for $user_name" + passwd $user_name + else + echo -e "$user_name\n$user_name" | passwd --stdin $user_name + fi +fi + +# Allow admin users to access without password +if ! grep -q "#openmano" /etc/sudoers +then + cat >> /home/${user_name}/script_visudo.sh << EOL +#!/bin/bash +cat \$1 | awk '(\$0~"requiretty"){print "#"\$0}(\$0!~"requiretty"){print \$0}' > tmp +cat tmp > \$1 +rm tmp +echo "" >> \$1 +echo "#openmano allow to group admin to grant root privileges without password" >> \$1 +echo "%admin ALL=(ALL) NOPASSWD: ALL" >> \$1 +EOL + chmod +x /home/${user_name}/script_visudo.sh + echo "allowing admin user to get root privileges withut password" + export EDITOR=/home/${user_name}/script_visudo.sh && sudo -E visudo + rm -f /home/${user_name}/script_visudo.sh +fi + +echo ' +################################################################# +##### OTHER CONFIGURATION ##### +#################################################################' +# Creates a folder to store images in the user home +#Creates a link to the /home folder because in RHEL this folder is larger +echo "creating compute node folder for local images /opt/VNF/images" +if [ "$user_name" != "" ] +then + mkdir -p /home/VNF_images + chown -R ${user_name}:admin /home/VNF_images + chmod go+x /home/VNF_images + + # The orchestator needs to link the images folder + rm -f /opt/VNF/images + mkdir -p /opt/VNF/ + ln -s /home/VNF_images /opt/VNF/images + chown -R ${user_name}:admin /opt/VNF + +else + mkdir -p /opt/VNF/images + chmod o+rx /opt/VNF/images +fi + +echo "creating local information /opt/VNF/images/hostinfo.yaml" +echo "#By default openvim assumes control plane interface naming as em1,em2,em3,em4 " > /opt/VNF/images/hostinfo.yaml +echo "#and bridge ifaces as virbrMan1, virbrMan2, ..." >> /opt/VNF/images/hostinfo.yaml +echo "#if compute node contain a different name it must be indicated in this file" >> /opt/VNF/images/hostinfo.yaml +echo "#with the format extandard-name: compute-name" >> /opt/VNF/images/hostinfo.yaml +if [ "$interface" != "" -a "$interface" != "em1" ] +then + echo "iface_names:" >> /opt/VNF/images/hostinfo.yaml + echo " em1: ${interface}" >> /opt/VNF/images/hostinfo.yaml +fi +chmod o+r /opt/VNF/images/hostinfo.yaml + +# deactivate memory overcommit +echo "deactivate memory overcommit" +service ksmtuned stop +service ksm stop +chkconfig ksmtuned off +chkconfig ksm off + +# Libvirt options (uncomment the following) +echo "configure Libvirt options" +sed -i 's/#unix_sock_group = "libvirt"/unix_sock_group = "libvirt"/' /etc/libvirt/libvirtd.conf +sed -i 's/#unix_sock_rw_perms = "0770"/unix_sock_rw_perms = "0770"/' /etc/libvirt/libvirtd.conf +sed -i 's/#unix_sock_dir = "\/var\/run\/libvirt"/unix_sock_dir = "\/var\/run\/libvirt"/' /etc/libvirt/libvirtd.conf +sed -i 's/#auth_unix_rw = "none"/auth_unix_rw = "none"/' /etc/libvirt/libvirtd.conf + +echo ' +################################################################# +##### NETWORK CONFIGURATION ##### +#################################################################' +# Network config (if the second parameter is net) +if [ -n "$interface" ] +then + + # Deactivate network manager + #systemctl stop NetworkManager + #systemctl disable NetworkManager + + pushd /etc/sysconfig/network-scripts/ + + #Create infrastructure bridge + echo "DEVICE=virbrInf +TYPE=Bridge +ONBOOT=yes +DELAY=0 +NM_CONTROLLED=no +IPADDR=10.10.0.1 +NETMASK=255.255.255.0 +USERCTL=no" > ifcfg-virbrInf + + #Create bridge interfaces + echo "Creating bridge ifaces: " + for ((i=1;i<=20;i++)) + do + i2digits=$i + [ $i -lt 10 ] && i2digits="0$i" + echo " virbrMan$i" + echo "DEVICE=virbrMan$i +TYPE=Bridge +ONBOOT=yes +DELAY=0 +NM_CONTROLLED=no +USERCTL=no" > ifcfg-virbrMan$i + + done + + popd +fi + +echo +echo "Do not forget to create a folder where original virtual machine images are allocated (ex. $HOME/static_storage)" +echo +echo "Do not forget to allow openvim machine accessing directly to the host with ssh. Can be done by:" +echo " Copy the public ssh key of the openvim user from $HOME/.ssh/id_dsa.pub (in openvim) into /home/${user_name}/.ssh/authorized_keys (in the host) for automatic login from openvim controller" +echo " Or running on openvim machine 'ssh-keygen' (generate ssh keys) and 'ssh-copy-id @'" +echo +echo "Do not forget to perform an initial ssh login from openmano VM into the host so the openmano ssh host key is added to /home/${user_name}/.ssh/known_hosts" +echo + +echo "Reboot the system to make the changes effective" + + diff --git a/scripts/flow-logback.xml b/scripts/flow-logback.xml new file mode 100644 index 0000000..91f000d --- /dev/null +++ b/scripts/flow-logback.xml @@ -0,0 +1,38 @@ + + + + + + %level [%logger:%thread] %msg%n + + + + + + + + + + + diff --git a/scripts/flow.properties b/scripts/flow.properties new file mode 100644 index 0000000..86f0bfa --- /dev/null +++ b/scripts/flow.properties @@ -0,0 +1,42 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +floodlight.modules = net.floodlightcontroller.storage.memory.MemoryStorageSource,\ +net.floodlightcontroller.core.FloodlightProvider,\ +net.floodlightcontroller.threadpool.ThreadPool,\ +net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher,\ +net.floodlightcontroller.firewall.Firewall,\ +net.floodlightcontroller.jython.JythonDebugInterface,\ +net.floodlightcontroller.counter.CounterStore,\ +net.floodlightcontroller.perfmon.PktInProcessingTime,\ +net.floodlightcontroller.ui.web.StaticWebRoutable + +#PORT API floodlight will listen to. Must match the 'of_controller_port' of openvimd.cfg +net.floodlightcontroller.restserver.RestApiServer.port = 7070 + +#PORT used by the switch to connect to floodlight +net.floodlightcontroller.core.FloodlightProvider.openflowport = 6633 +net.floodlightcontroller.jython.JythonDebugInterface.port = 6655 + +#timeout parameters +net.floodlightcontroller.forwarding.Forwarding.idletimeout = 5 +net.floodlightcontroller.forwarding.Forwarding.hardtimeout = 0 + diff --git a/scripts/flow.properties_v1.1 b/scripts/flow.properties_v1.1 new file mode 100644 index 0000000..eede49b --- /dev/null +++ b/scripts/flow.properties_v1.1 @@ -0,0 +1,36 @@ +floodlight.modules=\ +net.floodlightcontroller.jython.JythonDebugInterface,\ +net.floodlightcontroller.storage.memory.MemoryStorageSource,\ +net.floodlightcontroller.core.internal.FloodlightProvider,\ +net.floodlightcontroller.threadpool.ThreadPool,\ +net.floodlightcontroller.debugcounter.DebugCounterServiceImpl,\ +net.floodlightcontroller.perfmon.PktInProcessingTime,\ +net.floodlightcontroller.debugevent.DebugEventService,\ +net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher,\ +net.floodlightcontroller.restserver.RestApiServer,\ +net.floodlightcontroller.topology.TopologyManager,\ +net.floodlightcontroller.forwarding.Forwarding,\ +net.floodlightcontroller.linkdiscovery.internal.LinkDiscoveryManager,\ +net.floodlightcontroller.ui.web.StaticWebRoutable,\ +net.floodlightcontroller.loadbalancer.LoadBalancer,\ +net.floodlightcontroller.firewall.Firewall,\ +net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl +org.sdnplatform.sync.internal.SyncManager.authScheme=CHALLENGE_RESPONSE +org.sdnplatform.sync.internal.SyncManager.keyStorePath=/etc/floodlight/auth_credentials.jceks +org.sdnplatform.sync.internal.SyncManager.dbPath=/var/lib/floodlight/ +org.sdnplatform.sync.internal.SyncManager.port=6642 +net.floodlightcontroller.core.internal.FloodlightProvider.openflowPort=6653 +net.floodlightcontroller.core.internal.FloodlightProvider.role=ACTIVE +net.floodlightcontroller.core.internal.OFSwitchManager.clearTablesOnInitialHandshakeAsMaster=YES +net.floodlightcontroller.core.internal.OFSwitchManager.clearTablesOnEachTransitionToMaster=YES +net.floodlightcontroller.core.internal.OFSwitchManager.keyStorePath=/path/to/your/keystore-file.jks +net.floodlightcontroller.core.internal.OFSwitchManager.keyStorePassword=your-keystore-password +net.floodlightcontroller.core.internal.OFSwitchManager.useSsl=NO +net.floodlightcontroller.restserver.RestApiServer.keyStorePath=/path/to/your/keystore-file.jks +net.floodlightcontroller.restserver.RestApiServer.keyStorePassword=your-keystore-password +net.floodlightcontroller.restserver.RestApiServer.httpsNeedClientAuthentication=NO +net.floodlightcontroller.restserver.RestApiServer.useHttps=NO +net.floodlightcontroller.restserver.RestApiServer.useHttp=YES +net.floodlightcontroller.restserver.RestApiServer.httpsPort=8081 +net.floodlightcontroller.restserver.RestApiServer.httpPort=8080 + diff --git a/scripts/get_dhcp_lease.sh b/scripts/get_dhcp_lease.sh new file mode 100755 index 0000000..d2f04c3 --- /dev/null +++ b/scripts/get_dhcp_lease.sh @@ -0,0 +1,10 @@ +#!/bin/bash +awk ' +($1=="lease" && $3=="{"){ lease=$2; active="no"; found="no" } +($1=="binding" && $2=="state" && $3=="active;"){ active="yes" } +($1=="hardware" && $2=="ethernet" && $3==tolower("'$1';")){ found="yes" } +($1=="client-hostname"){ name=$2 } +($1=="}"){ if (active=="yes" && found=="yes"){ target_lease=lease; target_name=name}} +END{printf("%s", target_lease)} #print target_name +' /var/lib/dhcp/dhcpd.leases + diff --git a/scripts/host-add-develop.sh b/scripts/host-add-develop.sh new file mode 100755 index 0000000..dce4bde --- /dev/null +++ b/scripts/host-add-develop.sh @@ -0,0 +1,151 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +#Get configuration of a host for using it as a compute node + +function usage(){ + echo -e "usage: $0 user ip_name nb_cores GiB_memory nb_10GB_interfaces [hostname] [>> host.yaml]\n Get host parameters and generated a yaml file to be used for openvim host-add" + echo -e " - In case hostname is not specified it will be used the name of the machine where the script is run" + echo -e " - nb_cores must be an odd number and bigger or equal to 4." + echo -e " - GiB_memory must be an odd number and bigger or equal to 16. 4GiB of memory will be reserved for the host OS, the rest will be used by VM." + echo -e " - nb_10GB_interfaces must be an odd number and bigger or equal to 4." + echo -e " - The output will be a server descriptor with two numas and resources (memory, cores and interfaces) equally distributed between them." + echo -e " - Each interface (physical funtion) will have defined 8 SR-IOV (virtual functions)." + exit 1 +} + +function get_hash_value() { echo `eval echo $\{\`echo $1[$2]\`\}`; } + +function get_mac(){ + seed=$1 + b1=$((seed%16)); seed=$((seed/16)) + b2=$((seed%16)); seed=$((seed/16)) + b3=$((seed%16)); seed=$((seed/16)) + b4=$((seed%16)); seed=$((seed/16)) + b5=$((seed%16)); seed=$((seed/16)) + mac=`printf "%02X:%02X:%02X:%02X:%02X:%02X" 2 $b5 $b4 $b3 $b2 $b1` + echo $mac +} + + +#check root privileges and non a root user behind + +[ "$#" -lt "5" ] && echo "Missing parameters" && usage +[ "$#" -gt "6" ] && echo "Too many parameters" && usage +HOST_NAME=`cat /etc/hostname` +[ "$#" -eq "6" ] && HOST_NAME=$6 +FEATURES_LIST="lps,dioc,hwsv,tlbps,ht,lps,64b,iommu" +NUMAS=2 +CORES=$3 +MEMORY=$4 +INTERFACES=$5 + +#Ensure the user input is big enough +([ $((CORES%2)) -ne 0 ] || [ $CORES -lt 4 ] ) && echo -e "ERROR: Wrong number of cores\n" && usage +([ $((MEMORY%2)) -ne 0 ] || [ $MEMORY -lt 16 ] ) && echo -e "ERROR: Wrong number of memory\n" && usage +([ $((INTERFACES%2)) -ne 0 ] || [ $INTERFACES -lt 4 ] ) && echo -e "ERROR: Wrong number of interfaces\n" && usage + +#Generate a cpu topology for 4 numas with hyperthreading +CPUS=`pairs_gap=$((CORES/NUMAS));numa=0;inc=0;sibling=0;for((thread=0;thread<=$((pairs_gap-1));thread++)); do printf " ${numa}-${sibling}-${thread} ${numa}-${sibling}-$((thread+pairs_gap))";numa=$(((numa+1)%$NUMAS)); sibling=$((sibling+inc)); inc=$(((inc+1)%2)); done` + +#in this developing/fake server all cores can be used + +echo "#This file was created by $0" +echo "#for adding this compute node to openvim" +echo "#copy this file to openvim controller and run" +echo "#openvim host-add " +echo +echo "host:" +echo " name: $HOST_NAME" +echo " user: $1" +echo " ip_name: $2" +echo "host-data:" +echo " name: $HOST_NAME" +echo " user: $1" +echo " ip_name: $2" +echo " ranking: 100" +echo " description: $HOST_NAME" +echo " features: $FEATURES_LIST" +echo " numas:" + +numa=0 +last_iface=0 +iface_counter=0 +while [ $numa -lt $NUMAS ] +do + echo " - numa_socket: $numa" +#MEMORY + echo " hugepages: $((MEMORY/2-2))" + echo " memory: $((MEMORY/2))" + +#CORES + echo " cores:" + for cpu in $CPUS + do + PHYSICAL=`echo $cpu | cut -f 1 -d"-"` + CORE=`echo $cpu | cut -f 2 -d"-"` + THREAD=`echo $cpu | cut -f 3 -d"-"` + [ $PHYSICAL != $numa ] && continue #skip non physical + echo " - core_id: $CORE" + echo " thread_id: $THREAD" + [ $CORE -eq 0 ] && echo " status: noteligible" + done + + + #GENERATE INTERFACES INFORMATION AND PRINT IT + seed=$RANDOM + echo " interfaces:" + for ((iface=0;iface<$INTERFACES;iface+=2)) + do + name="iface$iface_counter" + bus=$((iface+last_iface)) + pci=`printf "0000:%02X:00.0" $bus` + mac=`get_mac $seed` + seed=$((seed+1)) + + echo " - source_name: $name" + echo " Mbps: 10000" + echo " pci: \"$pci\"" + echo " mac: \"$mac\"" + echo " switch_dpid: \"01:02:03:04:05:06\"" + echo " switch_port: fake0/$iface_counter" + echo " sriovs:" + + for((nb_sriov=0;nb_sriov<8;nb_sriov++)) + do + pci=`printf "0000:%02X:10.%i" $bus $nb_sriov` + mac=`get_mac $seed` + seed=$((seed+1)) + echo " - mac: \"$mac\"" + echo " pci: \"$pci\"" + echo " source_name: $nb_sriov" + done + + iface_counter=$((iface_counter+1)) + done + last_iface=$(((numa+1)*127/NUMAS+5)) #made-up formula for more realistic pci numbers + + + numa=$((numa+1)) +done + diff --git a/scripts/host-add.sh b/scripts/host-add.sh new file mode 100755 index 0000000..cf2138d --- /dev/null +++ b/scripts/host-add.sh @@ -0,0 +1,406 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +#Get configuration of a host for using it as a compute node + +function usage(){ + echo -e "usage: $0 user ip_name [>> host.yaml]\n Get host parameters and generated a yaml file to be used for openvim host-add" + exit 1 +} + +function load_vf_driver(){ + local pf_driver=$1 + if [[ `lsmod | cut -d" " -f1 | grep $pf_driver | grep -v vf` ]] && [[ ! `lsmod | cut -d" " -f1 | grep ${pf_driver}vf` ]] + then + >&2 echo "$pf_driver is loaded but not ${pf_driver}vf. This is required in order to properly add SR-IOV." + read -p "Do you want to load ${pf_driver}vf [Y/n] " load_driver + case $load_driver in + [nN]* ) exit 1;; + * ) >&2 echo "Loading ${pf_driver}vf..." + modprobe ${pf_driver}vf; + >&2 echo "Reloading ${pf_driver}..." + modprobe -r $pf_driver; + modprobe $pf_driver;; + esac + fi +} + +function remove_vf_driver(){ + local pf_driver=$1 + if [[ `lsmod | cut -d" " -f1 | grep $pf_driver | grep -v vf` ]] && [[ `lsmod | cut -d" " -f1 | grep ${pf_driver}vf` ]] + then + >&2 echo "${pf_driver}vf is loaded. In order to ensure proper SR-IOV behavior the driver must be removed." + read -p "Do you want to remove ${pf_driver}vf now? [Y/n] " remove_driver + case $remove_driver in + [nN]* ) >&2 echo "OK. Remember to remove the driver prior start using the compute node executing:"; + >&2 echo "modprobe -r ${pf_driver}vf"; + >&2 echo "modprobe -r ${pf_driver}"; + >&2 echo "modprobe ${pf_driver}";; + * ) >&2 echo "Removing ${pf_driver}vf..." + modprobe -r ${pf_driver}vf; + >&2 echo "Reloading ${pf_driver}..." + modprobe -r $pf_driver; + modprobe $pf_driver;; + esac + fi +} + +function get_hash_value() { echo `eval echo $\{\`echo $1[$2]\`\}`; } + +function xmlpath_args() +{ + local expr="${1//\// }" + local path=() + local chunk tag data + local exit_code=1 + local print_line=0 + local closing_tag=0 + + while IFS='' read -r -d '<' chunk; do + data=arguments="" + IFS='>' read -r tag_arg data <<< "$chunk" + IFS=' ' read -r tag arguments <<< "$tag_arg" + #If last tag was single level remove it from path + if [[ $closing_tag -eq 1 ]] + then + unset path[${#path[@]}-1] + closing_tag=0 + fi + #In case the tag is closed in the same line mark it + [[ $arguments = ?*'/' ]] && closing_tag=1 + arguments="${arguments//\//}" + case "$tag" in + '?'*) ;; + '!--'*) ;; + ?*'/') ;; + '/'?*) unset path[${#path[@]}-1] ;; + ?*) path+=("$tag");; + esac + + #echo "\"${path[@]}\" \"$expr\" \"$data\" \"$arguments\" $exit_code $print_line" + + if [[ "${path[@]}" == "$expr" ]] + then + #If there is data print it and append arguments if any + if [ "$data" != "" ] + then + echo "$data $arguments" + #return code 0 means data was found + exit_code=0 + continue + #if there is no data but there are arguments print arguments + elif [ "$arguments" != "" ] + then + echo "$arguments" + #return code 2 means no data but arguments were found + exit_code=2 + continue + #otherwise switch flag to start/stop echoing each line until the tag is closed + elif [[ $exit_code -eq 1 ]] + then + print_line=$(((print_line+1)%2)) + #return code 3 means that the whole xml segment is returned + exit_code=3 + fi + fi + [[ $print_line == "1" ]] && echo "<"$chunk + done + return $exit_code +} + + +#check root privileges and non a root user behind + +[[ "$#" -lt "2" ]] && echo "Missing parameters" && usage +load_vf_driver ixgbe +load_vf_driver i40e + +HOST_NAME=`cat /etc/hostname` +FEATURES=`grep "^flags" /proc/cpuinfo` +FEATURES_LIST="" +if echo $FEATURES | grep -q pdpe1gb ; then FEATURES_LIST="${FEATURES_LIST},lps"; fi +if echo $FEATURES | grep -q dca ; then FEATURES_LIST="${FEATURES_LIST},dioc"; fi +if echo $FEATURES | egrep -q "(vmx|svm)" ; then FEATURES_LIST="${FEATURES_LIST},hwsv"; fi +if echo $FEATURES | egrep -q "(ept|npt)" ; then FEATURES_LIST="${FEATURES_LIST},tlbps"; fi +if echo $FEATURES | grep -q ht ; then FEATURES_LIST="${FEATURES_LIST},ht"; fi +if uname -m | grep -q x86_64 ; then FEATURES_LIST="${FEATURES_LIST},64b"; fi +if cat /var/log/dmesg | grep -q -e Intel-IOMMU ; then FEATURES_LIST="${FEATURES_LIST},iommu"; fi +FEATURES_LIST=${FEATURES_LIST#,} + +NUMAS=`gawk 'BEGIN{numas=0;} + ($1=="physical" && $2=="id" ){ if ($4+1>numas){numas=$4+1} }; + END{printf("%d",numas);}' /proc/cpuinfo` + +CPUS=`gawk '($1=="processor"){pro=$3;} + ($1=="physical" && $2=="id"){ phy=$4;} + ($1=="core" && $2=="id"){printf " %d-%d-%d", phy,$4,pro;}' /proc/cpuinfo` + +if grep -q isolcpus /proc/cmdline +then + isolcpus=`cat /proc/cmdline` + isolcpus=${isolcpus##*isolcpus=} + isolcpus=${isolcpus%% *} + isolcpus=${isolcpus//,/ } +else + isolcpus="" +fi + + +#obtain interfaces information +unset dpid +read -p "Do you want to provide the interfaces connectivity information (datapathid/dpid of the switch and switch port id)? [Y/n] " conn_info +case $conn_info in + [Nn]* ) prov_conn=false;; + * ) prov_conn=true; + read -p "What is the switch dapapathid/dpdi? (01:02:03:04:05:06:07:08) " dpid; + [[ -z $dpid ]] && dpid="01:02:03:04:05:06:07:08"; + PORT_RANDOM=$RANDOM + iface_counter=0;; +esac +OLDIFS=$IFS +IFS=$'\n' +unset PF_list +unset VF_list +for device in `virsh nodedev-list --cap net | grep -v net_lo_00_00_00_00_00_00` +do +virsh nodedev-dumpxml $device > device_xml +name=`xmlpath_args "device/capability/interface" < device_xml` +name="${name// /}" +address=`xmlpath_args "device/capability/address" < device_xml` +address="${address// /}" +parent=`xmlpath_args "device/parent" < device_xml` +parent="${parent// /}" +#the following line created variables 'speed' and 'state' +eval `xmlpath_args "device/capability/link" < device_xml` +virsh nodedev-dumpxml $parent > parent_xml +driver=`xmlpath_args "device/driver/name" < parent_xml` +[ $? -eq 1 ] && driver="N/A" +driver="${driver// /}" + +#If the device is not up try to bring it up and reload state +if [[ $state == 'down' ]] && ( [[ $driver == "ixgbe" ]] || [[ $driver == "i40e" ]] ) +then + >&2 echo "$name is down. Trying to bring it up" + ifconfig $name up + sleep 2 + virsh nodedev-dumpxml $device > device_xml + eval `xmlpath_args "device/capability/link" < device_xml` +fi + +if [[ $state == 'down' ]] && ( [[ $driver == "ixgbe" ]] || [[ $driver == "i40e" ]] ) +then + >&2 echo "Interfaces must be connected and up in order to properly detect the speed. You can provide this information manually or skip the interface" + keep_asking=true + skip_interface=true + unset speed + while $keep_asking; do + read -p "Do you want to skip interface $name ($address) [y/N] " -i "n" skip + case $skip in + [Yy]* ) keep_asking=false;; + * ) skip_interface=false; + default_speed="10000" + while $keep_asking; do + read -p "What is the speed of the interface expressed in Mbps? ($default_speed) " speed; + [[ -z $speed ]] && speed=$default_speed + [[ $speed =~ ''|*[!0-9] ]] && echo "The input must be an integer" && continue; + keep_asking=false ; + done;; + esac + done + + $skip_interface && continue +fi +#the following line creates a 'node' variable +eval `xmlpath_args "device/capability/numa" < parent_xml` +#the following line creates the variable 'type' +#in case the interface is a PF the value is 'virt_functions' +#in case the interface is a VF the value is 'phys_function' +type="N/A" +eval `xmlpath_args "device/capability/capability" < parent_xml` +#obtain pci +#the following line creates the variables 'domain' 'bus' 'slot' and 'function' +eval `xmlpath_args "device/capability/iommuGroup/address" < parent_xml` +pci="${domain#*x}:${bus#*x}:${slot#*x}.${function#*x}" +underscored_pci="${pci//\:/_}" +underscored_pci="pci_${underscored_pci//\./_}" + +if ( [[ $driver == "ixgbe" ]] || [[ $driver == "i40e" ]] ) +then + underscored_pci="pf"$underscored_pci + PF_list[${#PF_list[@]}]=$underscored_pci + eval declare -A $underscored_pci + eval $underscored_pci["name"]=$name + eval $underscored_pci["numa"]=$node + eval $underscored_pci["mac"]=$address + eval $underscored_pci["speed"]=$speed + eval $underscored_pci["pci"]=$pci + #request switch port to the user if this information is being provided and include it + if $prov_conn + then + unset switch_port + read -p "What is the port name in the switch $dpid where port $name ($pci) is connected? (${name}-${PORT_RANDOM}/$iface_counter) " switch_port + [[ -z $switch_port ]] && switch_port="${name}-${PORT_RANDOM}/$iface_counter" + iface_counter=$((iface_counter+1)) + eval $underscored_pci["dpid"]=$dpid + eval $underscored_pci["switch_port"]=$switch_port + fi + + #Añado el pci de cada uno de los hijos + SRIOV_counter=0 + for child in `xmlpath_args "device/capability/capability/address" < parent_xml` + do + SRIOV_counter=$((SRIOV_counter+1)) + #the following line creates the variables 'domain' 'bus' 'slot' and 'function' + eval $child + eval $underscored_pci["SRIOV"$SRIOV_counter]="${domain#*x}_${bus#*x}_${slot#*x}_${function#*x}" + done + eval $underscored_pci["SRIOV"]=$SRIOV_counter + +#Si se trata de un SRIOV (tiene una capability con type 'phys_function') +elif [[ $type == 'phys_function' ]] +then + underscored_pci="vf"$underscored_pci + VF_list[${#VF_list[@]}]=$underscored_pci + eval declare -A $underscored_pci + eval $underscored_pci["source_name"]=$name + eval $underscored_pci["mac"]=$address + eval $underscored_pci["pci"]=$pci +fi +rm -f device_xml parent_xml +done +IFS=$OLDIFS + +echo "#This file was created by $0" +echo "#for adding this compute node to openvim" +echo "#copy this file to openvim controller and run" +echo "#openvim host-add " +echo +echo "host:" +echo " name: $HOST_NAME" +echo " user: $1" +echo " ip_name: $2" +echo "host-data:" +echo " name: $HOST_NAME" +echo " user: $1" +echo " ip_name: $2" +echo " ranking: 100" +echo " description: $HOST_NAME" +echo " features: $FEATURES_LIST" +echo " numas:" + +numa=0 +while [[ $numa -lt $NUMAS ]] +do + echo " - numa_socket: $numa" +#MEMORY + if [ -f /sys/devices/system/node/node${numa}/hugepages/hugepages-1048576kB/nr_hugepages ] + then + echo " hugepages: " `cat /sys/devices/system/node/node${numa}/hugepages/hugepages-1048576kB/nr_hugepages` + else + #TODO hugepages of 2048kB size + echo " hugepages: 0" + fi + memory=`head -n1 /sys/devices/system/node/node${numa}/meminfo | gawk '($5=="kB"){print $4}'` + memory=$((memory+1048576-1)) #memory must be ceiled + memory=$((memory/1048576)) #from `kB to GB + echo " memory: $memory" + +#CORES + echo " cores:" + FIRST="-" #first item in a list start with "-" in yaml files, then it will set to " " + for cpu in $CPUS + do + PHYSICAL=`echo $cpu | cut -f 1 -d"-"` + CORE=`echo $cpu | cut -f 2 -d"-"` + THREAD=`echo $cpu | cut -f 3 -d"-"` + [[ $PHYSICAL != $numa ]] && continue #skip non physical + echo " - core_id: $CORE" + echo " thread_id: $THREAD" + #check if eligible + cpu_isolated="no" + for isolcpu in $isolcpus + do + isolcpu_start=`echo $isolcpu | cut -f 1 -d"-"` + isolcpu_end=`echo $isolcpu | cut -f 2 -d"-"` + if [ "$THREAD" -ge "$isolcpu_start" -a "$THREAD" -le "$isolcpu_end" ] + then + cpu_isolated="yes" + break + fi + done + [[ $cpu_isolated == "no" ]] && echo " status: noteligible" + FIRST=" " + done + + #NIC INTERFACES + interfaces_nb=0 + for ((i=0; i<${#PF_list[@]};i++)) + do + underscored_pci=${PF_list[$i]} + pname=$(get_hash_value $underscored_pci "name") + pnuma=$(get_hash_value $underscored_pci "numa") + [[ $pnuma != $numa ]] && continue + pmac=$(get_hash_value $underscored_pci "mac") + ppci=$(get_hash_value $underscored_pci "pci") + pspeed=$(get_hash_value $underscored_pci "speed") + pSRIOV=$(get_hash_value $underscored_pci "SRIOV") + [[ $interfaces_nb -eq 0 ]] && echo " interfaces:" + interfaces_nb=$((interfaces_nb+1)) + sriov_nb=0 + echo " - source_name: $pname" + echo " Mbps: $pspeed" + echo " pci: \"$ppci\"" + echo " mac: \"$pmac\"" + if $prov_conn + then + pdpid=$(get_hash_value $underscored_pci "dpid") + pswitch_port=$(get_hash_value $underscored_pci "switch_port") + echo " switch_dpid: $pdpid" + echo " switch_port: $pswitch_port" + fi + for ((j=1;j<=$pSRIOV;j++)) + do + childSRIOV="vfpci_"$(get_hash_value $underscored_pci "SRIOV"$j) + pname=$(get_hash_value $childSRIOV "source_name") + index=${pname##*_} + pmac=$(get_hash_value $childSRIOV "mac") + ppci=$(get_hash_value $childSRIOV "pci") + [[ $sriov_nb -eq 0 ]] && echo " sriovs:" + sriov_nb=$((sriov_nb+1)) + echo " - mac: \"$pmac\"" + echo " pci: \"$ppci\"" + echo " source_name: $index" + done + done + + numa=$((numa+1)) +done +remove_vf_driver ixgbe +remove_vf_driver i40e +#Bring up all interfaces +for ((i=0; i<${#PF_list[@]};i++)) +do + underscored_pci=${PF_list[$i]} + pname=$(get_hash_value $underscored_pci "name") + ifconfig $pname up +done diff --git a/scripts/initopenvim.sh b/scripts/initopenvim.sh new file mode 100755 index 0000000..69f3667 --- /dev/null +++ b/scripts/initopenvim.sh @@ -0,0 +1,213 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +#This script can be used as a basic test of openvim +#stopping on an error +#WARNING: It destroy the database content + + +function usage(){ + echo -e "usage: ${BASH_SOURCE[0]} [OPTIONS] \n Deletes openvim content and add fake hosts, networks" + echo -e " is a list of the following items (by default 'reset create')" + echo -e " reset reset the openvim database content" + echo -e " create creates fake hosts and networks" + echo -e " delete delete created items" + echo -e " delete-all delete vms. flavors, images, ..." + echo -e " OPTIONS:" + echo -e " -f --force : does not prompt for confirmation" + echo -e " -d --delete : same to action delete-all" + echo -e " --insert-bashrc insert the created tenant variables at" + echo -e " ~/.bashrc to be available by openvim CLI" + echo -e " -h --help : shows this help" +} + +function is_valid_uuid(){ + echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0 + return 1 +} + + +#detect if is called with a source to use the 'exit'/'return' command for exiting +[[ ${BASH_SOURCE[0]} != $0 ]] && _exit="return" || _exit="exit" + +#check correct arguments +force="" +action_list="" +insert_bashrc="" + +while [[ $# -gt 0 ]] +do + argument="$1" + shift + if [[ $argument == reset ]] || [[ $argument == create ]] || [[ $argument == delete ]] || [[ $argument == delete-all ]] + then + action_list="$action_list $argument" + continue + #short options + elif [[ ${argument:0:1} == "-" ]] && [[ ${argument:1:1} != "-" ]] && [[ ${#argument} -ge 2 ]] + then + index=0 + while index=$((index+1)) && [[ $index -lt ${#argument} ]] + do + [[ ${argument:$index:1} == h ]] && usage && $_exit 0 + [[ ${argument:$index:1} == f ]] && force=y && continue + [[ ${argument:$index:1} == d ]] && action_list="delete-all $action_list" && continue + echo "invalid option '${argument:$index:1}'? Type -h for help" >&2 && $_exit 1 + done + continue + fi + #long options + [[ $argument == --help ]] && usage && $_exit 0 + [[ $argument == --force ]] && force=y && continue + [[ $argument == --delete ]] && action_list="delete-all $action_list" && continue + [[ $argument == --insert-bashrc ]] && insert_bashrc=y && continue + echo "invalid argument '$argument'? Type -h for help" >&2 && $_exit 1 +done + +DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]})) +DIRvim=$(dirname $DIRNAME) +export OPENVIM_HOST=localhost +export OPENVIM_PORT=9080 +[[ $insert_bashrc == y ]] && echo -e "\nexport OPENVIM_HOST=localhost" >> ~/.bashrc +[[ $insert_bashrc == y ]] && echo -e "\nexport OPENVIM_PORT=9080" >> ~/.bashrc +#by default action should be reset and create +[[ -z $action_list ]] && action_list="reset create" + + +for action in $action_list +do +if [[ $action == "reset" ]] +then + #ask for confirmation if argument is not -f --force + force_="y" + [[ $force != y ]] && read -e -p "WARNING: openvim database content will be lost!!! Continue(y/N)" force_ + [[ $force_ != y ]] && [[ $force_ != yes ]] && echo "aborted!" && $_exit + echo "deleting deployed vm" + ${DIRvim}/openvim vm-delete -f | grep -q deleted && sleep 10 #give some time to get virtual machines deleted + echo "Stopping openvim" + $DIRNAME/service-openvim.sh stop + echo "Initializing databases" + $DIRvim/database_utils/init_vim_db.sh -u vim -p vimpw + echo "Starting openvim" + $DIRNAME/service-openvim.sh start + +elif [[ $action == delete-all ]] +then + for t in `${DIRvim}/openvim tenant-list | awk '/^ *[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12} +/{printf("%s:%s",$1,$2)}'` + do + t_id=${t%%:*} + t_name=${t#*:} + [[ -z $t_id ]] && continue + export OPENVIM_TENANT=${t_id} + for what in vm image flavor port net + do + items=`${DIRvim}/openvim $what-list | awk '/^ *[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12} +/{print $1}'` + if [[ -n $items ]] + then + [[ $force == y ]] && echo deleting openvim ${what}s from tenant ${t_name} + [[ $force != y ]] && read -e -p "Delete openvim ${what}s from tenant ${t_name}?(y/N) " force_ + [[ $force_ != y ]] && [[ $force_ != yes ]] && echo "aborted!" && $_exit + for item in $items + do + echo -n "$item " + ${DIRvim}/openvim $what-delete -f $item || ! echo "fail" >&2 || $_exit 1 + done + fi + done + ${DIRvim}/openvim tenant-delete -f $t_id || ! echo "fail" >&2 || $_exit 1 + for what in host + do + items=`${DIRvim}/openvim $what-list | awk '/^ *[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12} +/{print $1}'` + if [[ -n $items ]] + then + [[ $force == y ]] && echo deleting openvim ${what}s + [[ $force != y ]] && read -e -p "Delete openvim ${what}s?(y/N) " force_ + [[ $force_ != y ]] && [[ $force_ != yes ]] && echo "aborted!" && $_exit + for item in $items + do + echo -n "$item " + ${DIRvim}/openvim $what-delete -f $item || ! echo "fail" >&2 || $_exit 1 + done + fi + done + + done +elif [[ $action == "delete" ]] +then + ${DIRvim}/openvim net-delete -f default || echo "fail" + ${DIRvim}/openvim net-delete -f macvtap:em1 || echo "fail" + ${DIRvim}/openvim net-delete -f shared_bridge_net || echo "fail" + ${DIRvim}/openvim net-delete -f data_net || echo "fail" + ${DIRvim}/openvim host-remove -f fake-host-0 || echo "fail" + ${DIRvim}/openvim host-remove -f fake-host-1 || echo "fail" + ${DIRvim}/openvim host-remove -f fake-host-2 || echo "fail" + ${DIRvim}/openvim host-remove -f fake-host-3 || echo "fail" + result=`openvim tenant-list TEST-admin` + vimtenant=`echo $result |gawk '{print $1}'` + #check a valid uuid is obtained + is_valid_uuid $vimtenant || ! echo "Tenant TEST-admin not found. Already delete?" >&2 || $_exit 1 + export OPENVIM_TENANT=$vimtenant + ${DIRvim}/openvim tenant-delete -f TEST-admin || echo "fail" + echo + +elif [[ $action == "create" ]] +then + echo "Adding example hosts" + ${DIRvim}/openvim host-add $DIRvim/test/hosts/host-example0.json || ! echo "fail" >&2 || $_exit 1 + ${DIRvim}/openvim host-add $DIRvim/test/hosts/host-example1.json || ! echo "fail" >&2 || $_exit 1 + ${DIRvim}/openvim host-add $DIRvim/test/hosts/host-example2.json || ! echo "fail" >&2 || $_exit 1 + ${DIRvim}/openvim host-add $DIRvim/test/hosts/host-example3.json || ! echo "fail" >&2 || $_exit 1 + echo "Adding example nets" + ${DIRvim}/openvim net-create $DIRvim/test/networks/net-example0.yaml || ! echo "fail" >&2 || $_exit 1 + ${DIRvim}/openvim net-create $DIRvim/test/networks/net-example1.yaml || ! echo "fail" >&2 || $_exit 1 + ${DIRvim}/openvim net-create $DIRvim/test/networks/net-example2.yaml || ! echo "fail" >&2 || $_exit 1 + ${DIRvim}/openvim net-create $DIRvim/test/networks/net-example3.yaml || ! echo "fail" >&2 || $_exit 1 + + printf "%-50s" "Creating openvim tenant 'TEST-admin': " + result=`openvim tenant-create '{"tenant": {"name":"TEST-admin", "description":"admin"}}'` + vimtenant=`echo $result |gawk '{print $1}'` + #check a valid uuid is obtained + ! is_valid_uuid $vimtenant && echo "FAIL" && echo " $result" && $_exit 1 + echo " $vimtenant" + export OPENVIM_TENANT=$vimtenant + [[ $insert_bashrc == y ]] && echo -e "\nexport OPENVIM_TENANT=$vimtenant" >> ~/.bashrc + + echo + #echo "Check virtual machines are deployed" + #vms_error=`openvim vm-list | grep ERROR | wc -l` + #vms=`openvim vm-list | wc -l` + #[[ $vms -ne 8 ]] && echo "WARNING: $vms VMs created, must be 8 VMs" >&2 && $_exit 1 + #[[ $vms_error -gt 0 ]] && echo "WARNING: $vms_error VMs with ERROR" >&2 && $_exit 1 +fi +done + +echo +echo DONE +#echo "Listing VNFs" +#openvim vnf-list +#echo "Listing scenarios" +#openvim scenario-list +#echo "Listing scenario instances" +#openvim instance-scenario-list + + diff --git a/scripts/install-floodlight.sh b/scripts/install-floodlight.sh new file mode 100755 index 0000000..506d5c1 --- /dev/null +++ b/scripts/install-floodlight.sh @@ -0,0 +1,150 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +#ONLY TESTED for Ubuntu 14.10 14.04, CentOS7 and RHEL7 +#Get needed packages, to run floodlight + +function usage(){ + echo -e "usage: sudo $0 \n Install floodlight v0.9 in ./floodlight-0.90" +} + +function install_packages(){ + [ -x /usr/bin/apt-get ] && apt-get install -y $* + [ -x /usr/bin/yum ] && yum install -y $* + + #check properly installed + for PACKAGE in $* + do + PACKAGE_INSTALLED="no" + [ -x /usr/bin/apt-get ] && dpkg -l $PACKAGE &>> /dev/null && PACKAGE_INSTALLED="yes" + [ -x /usr/bin/yum ] && yum list installed $PACKAGE &>> /dev/null && PACKAGE_INSTALLED="yes" + if [ "$PACKAGE_INSTALLED" = "no" ] + then + echo "failed to install package '$PACKAGE'. Revise network connectivity and try again" + exit -1 + fi + done +} + +#check root privileges and non a root user behind +[ "$1" == "-h" -o "$1" == "--help" ] && usage && exit 0 +[ "$USER" != "root" ] && echo "Needed root privileges" >&2 && usage >&2 && exit -1 +[ -z "$SUDO_USER" -o "$SUDO_USER" = "root" ] && echo "Must be runned with sudo from a non root user" >&2 && usage >&2 && exit -1 + +echo "This script will update repositories and Installing FloodLight." +echo "It will install Java and other packages, that takes a while to download" +read -e -p "Do you agree on download and install FloodLight from http://www.projectfloodlight.org upon the owner license? (y/N)" KK +[[ "$KK" != "y" ]] && [[ "$KK" != "yes" ]] && exit 0 + +#Discover Linux distribution +#try redhat type +[ -f /etc/redhat-release ] && _DISTRO=$(cat /etc/redhat-release 2>/dev/null | cut -d" " -f1) +#if not assuming ubuntu type +[ -f /etc/redhat-release ] || _DISTRO=$(lsb_release -is 2>/dev/null) +if [ "$_DISTRO" == "Ubuntu" ] +then + _RELEASE="14" + if ! lsb_release -rs | grep -q "14." + then + read -e -p "WARNING! Not tested Ubuntu version. Continue assuming a '$_RELEASE' type? (y/N)" KK + [ "$KK" != "y" -a "$KK" != "yes" ] && echo "Cancelled" && exit 0 + fi +elif [ "$_DISTRO" == "CentOS" ] +then + _RELEASE="7" + if ! cat /etc/redhat-release | grep -q "7." + then + read -e -p "WARNING! Not tested CentOS version. Continue assuming a '_RELEASE' type? (y/N)" KK + [ "$KK" != "y" -a "$KK" != "yes" ] && echo "Cancelled" && exit 0 + fi +elif [ "$_DISTRO" == "Red" ] +then + _RELEASE="7" + if ! cat /etc/redhat-release | grep -q "7." + then + read -e -p "WARNING! Not tested Red Hat OS version. Continue assuming a '_RELEASE' type? (y/N)" KK + [ "$KK" != "y" -a "$KK" != "yes" ] && echo "Cancelled" && exit 0 + fi +else #[ "$_DISTRO" != "Ubuntu" -a "$_DISTRO" != "CentOS" -a "$_DISTRO" != "Red" ] + _DISTRO_DISCOVER=$_DISTRO + [ -x /usr/bin/apt-get ] && _DISTRO="Ubuntu" && _RELEASE="14" + [ -x /usr/bin/yum ] && _DISTRO="CentOS" && _RELEASE="7" + read -e -p "WARNING! Not tested Linux distribution '$_DISTRO_DISCOVER '. Continue assuming a '$_DISTRO $_RELEASE' type? (y/N)" KK + [ "$KK" != "y" -a "$KK" != "yes" ] && echo "Cancelled" && exit 0 +fi + + + +echo ' +################################################################# +##### UPDATE REPOSITORIES ##### +#################################################################' +[ "$_DISTRO" == "Ubuntu" ] && apt-get update -y + +[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && yum check-update -y +[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && sudo yum repolist + +echo ' +################################################################# +##### DOWNLOADING AND CONFIGURE FLOODLIGHT ##### +#################################################################' + #Install Java JDK and Ant packages at the VM + [ "$_DISTRO" == "Ubuntu" ] && install_packages "build-essential default-jdk ant python-dev screen wget" #TODO revise if packages are needed apart from ant + [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_package " ant screen wget" + + #floodlight 0.9 + echo "downloading v0.90 from the oficial page" + su $SUDO_USER -c 'wget https://github.com/floodlight/floodlight/archive/v0.90.tar.gz' + su $SUDO_USER -c 'tar xvzf v0.90.tar.gz' + #floodlight 1.1 + #echo "downloading v1.1 from the oficial page" + #su $SUDO_USER -c 'wget https://github.com/floodlight/floodlight/archive/v1.1.tar.gz' + #su $SUDO_USER -c 'tar xvzf v01.1.tar.gz' + + #Configure Java environment variables. It is seem that is not needed!!! + #export JAVA_HOME=/usr/lib/jvm/default-java" >> /home/${SUDO_USER}/.bashr + #export PATH=$PATH:$JAVA_HOME + #echo "export JAVA_HOME=/usr/lib/jvm/default-java" >> /home/${SUDO_USER}/.bashrc + #echo "export PATH=$PATH:$JAVA_HOME" >> /home/${SUDO_USER}/.bashrc + + #Compile floodlight + pushd ./floodlight-0.90 + #pushd ./floodlight-1.1 + su $SUDO_USER -c 'ant' + export FLOODLIGHT_PATH=$(pwd) + popd + +echo ' +################################################################# +##### CONFIGURE envioronment ##### +#################################################################' +#insert into .bashrc + echo " inserting FLOODLIGHT_PATH at .bashrc" + su $SUDO_USER -c "echo 'export FLOODLIGHT_PATH=\"${FLOODLIGHT_PATH}\"' >> ~/.bashrc" + +echo +echo "Done! you may need to logout and login again for loading the configuration" +echo " If your have installed openvim, run './openvim/scripts/service-floodlight.sh start' for starting floodlight in a screen" + + + diff --git a/scripts/install-openvim.sh b/scripts/install-openvim.sh new file mode 100755 index 0000000..fbf0fb5 --- /dev/null +++ b/scripts/install-openvim.sh @@ -0,0 +1,260 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +#ONLY TESTED for Ubuntu 14.10 14.04, CentOS7 and RHEL7 +#Get needed packages, source code and configure to run openvim +#Ask for database user and password if not provided +# $1: database user +# $2: database password + +function usage(){ + echo -e "usage: sudo $0 [db-user [db-passwd]]\n Install source code in ./openvim" +} + +function install_packages(){ + [ -x /usr/bin/apt-get ] && apt-get install -y $* + [ -x /usr/bin/yum ] && yum install -y $* + + #check properly installed + for PACKAGE in $* + do + PACKAGE_INSTALLED="no" + [ -x /usr/bin/apt-get ] && dpkg -l $PACKAGE &>> /dev/null && PACKAGE_INSTALLED="yes" + [ -x /usr/bin/yum ] && yum list installed $PACKAGE &>> /dev/null && PACKAGE_INSTALLED="yes" + if [ "$PACKAGE_INSTALLED" = "no" ] + then + echo "failed to install package '$PACKAGE'. Revise network connectivity and try again" + exit -1 + fi + done +} + +#check root privileges and non a root user behind +[ "$1" == "-h" -o "$1" == "--help" ] && usage && exit 0 +[ "$USER" != "root" ] && echo "Needed root privileges" >&2 && usage >&2 && exit -1 +[ -z "$SUDO_USER" -o "$SUDO_USER" = "root" ] && echo "Must be runned with sudo from a non root user" >&2 && usage >&2 && exit -1 + + +#Discover Linux distribution +#try redhat type +[ -f /etc/redhat-release ] && _DISTRO=$(cat /etc/redhat-release 2>/dev/null | cut -d" " -f1) +#if not assuming ubuntu type +[ -f /etc/redhat-release ] || _DISTRO=$(lsb_release -is 2>/dev/null) +if [ "$_DISTRO" == "Ubuntu" ] +then + _RELEASE="14" + if ! lsb_release -rs | grep -q "14." + then + read -e -p "WARNING! Not tested Ubuntu version. Continue assuming a '$_RELEASE' type? (y/N)" KK + [ "$KK" != "y" -a "$KK" != "yes" ] && echo "Cancelled" && exit 0 + fi +elif [ "$_DISTRO" == "CentOS" ] +then + _RELEASE="7" + if ! cat /etc/redhat-release | grep -q "7." + then + read -e -p "WARNING! Not tested CentOS version. Continue assuming a '_RELEASE' type? (y/N)" KK + [ "$KK" != "y" -a "$KK" != "yes" ] && echo "Cancelled" && exit 0 + fi +elif [ "$_DISTRO" == "Red" ] +then + _RELEASE="7" + if ! cat /etc/redhat-release | grep -q "7." + then + read -e -p "WARNING! Not tested Red Hat OS version. Continue assuming a '_RELEASE' type? (y/N)" KK + [ "$KK" != "y" -a "$KK" != "yes" ] && echo "Cancelled" && exit 0 + fi +else #[ "$_DISTRO" != "Ubuntu" -a "$_DISTRO" != "CentOS" -a "$_DISTRO" != "Red" ] + _DISTRO_DISCOVER=$_DISTRO + [ -x /usr/bin/apt-get ] && _DISTRO="Ubuntu" && _RELEASE="14" + [ -x /usr/bin/yum ] && _DISTRO="CentOS" && _RELEASE="7" + read -e -p "WARNING! Not tested Linux distribution '$_DISTRO_DISCOVER '. Continue assuming a '$_DISTRO $_RELEASE' type? (y/N)" KK + [ "$KK" != "y" -a "$KK" != "yes" ] && echo "Cancelled" && exit 0 +fi + + + +echo ' +################################################################# +##### UPDATE REPOSITORIES ##### +#################################################################' +[ "$_DISTRO" == "Ubuntu" ] && apt-get update -y + +[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && yum check-update -y +[ "$_DISTRO" == "CentOS" ] && sudo yum install -y epel-release +[ "$_DISTRO" == "Red" ] && wget http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm \ + && sudo rpm -ivh epel-release-7-5.noarch.rpm && sudo yum install -y epel-release && rm -f epel-release-7-5.noarch.rpm +[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && sudo yum repolist + + +echo ' +################################################################# +##### INSTALL REQUIRED PACKAGES ##### +#################################################################' +[ "$_DISTRO" == "Ubuntu" ] && install_packages "mysql-server" +[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "mariadb mariadb-server" + +if [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] +then + #start services. By default CentOS does not start services + service mariadb start + service httpd start + systemctl enable mariadb + systemctl enable httpd + read -e -p "Do you want to configure mariadb (recomended if not done before) (Y/n)" KK + [ "$KK" != "n" -a "$KK" != "no" ] && mysql_secure_installation + + read -e -p "Do you want to set firewall to grant web access port 80,443 (Y/n)" KK + [ "$KK" != "n" -a "$KK" != "no" ] && + firewall-cmd --permanent --zone=public --add-service=http && + firewall-cmd --permanent --zone=public --add-service=https && + firewall-cmd --reload +fi + +#check and ask for database user password. Must be done after database instalation +[ -n "$1" ] && DBUSER=$1 +[ -z "$1" ] && DBUSER=root +[ -n "$2" ] && DBPASSWD="-p$2" +[ -z "$2" ] && DBPASSWD="" +echo -e "\nCheking database connection and ask for credentials" +while ! echo "" | mysql -u$DBUSER $DBPASSWD +do + [ -n "$logintry" ] && echo -e "\nInvalid database credentials!!!. Try again (Ctrl+c to abort)" + [ -z "$logintry" ] && echo -e "\nProvide database credentials" + read -e -p "database user? ($DBUSER) " DBUSER_ + [ -n "$DBUSER_" ] && DBUSER=$DBUSER_ + read -e -s -p "database password? (Enter for not using password) " DBPASSWD_ + [ -n "$DBPASSWD_" ] && DBPASSWD="-p$DBPASSWD_" + [ -z "$DBPASSWD_" ] && DBPASSWD="" + logintry="yes" +done + +echo ' +################################################################# +##### INSTALL PYTHON PACKAGES ##### +#################################################################' +[ "$_DISTRO" == "Ubuntu" ] && install_packages "python-yaml python-libvirt python-bottle python-mysqldb python-jsonschema python-paramiko python-argcomplete python-requests git screen wget" +[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "PyYAML libvirt-python MySQL-python python-jsonschema python-paramiko python-argcomplete python-requests git screen wget" + +#The only way to install python-bottle on Centos7 is with easy_install or pip +[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && easy_install -U bottle + +echo ' +################################################################# +##### DOWNLOAD SOURCE ##### +#################################################################' +su $SUDO_USER -c 'git clone https://github.com/nfvlabs/openvim.git openvim' +#Unncoment to use a concrete branch, if not main branch +#pushd openvim +#su $SUDO_USER -c 'git checkout v0.4' +#popd + +echo ' +################################################################# +##### CREATE DATABASE ##### +#################################################################' +mysqladmin -u$DBUSER $DBPASSWD create vim_db + +echo "CREATE USER 'vim'@'localhost' identified by 'vimpw';" | mysql -u$DBUSER $DBPASSWD +echo "GRANT ALL PRIVILEGES ON vim_db.* TO 'vim'@'localhost';" | mysql -u$DBUSER $DBPASSWD + +echo "vim database" +su $SUDO_USER -c './openvim/database_utils/init_vim_db.sh -u vim -p vimpw' + + +if [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] +then + echo ' +################################################################# +##### CONFIGURE firewalld ##### +#################################################################' + read -e -p "Configure firewalld for openvimd port 9080? (Y/n)" KK + if [ "$KK" != "n" -a "$KK" != "no" ] + then + #Creates a service file for openvim + echo ' + + openvimd + openvimd service + +' > /etc/firewalld/services/openvimd.xml + #put proper permissions + pushd /etc/firewalld/services > /dev/null + restorecon openvim + chmod 640 openvim + popd > /dev/null + #Add the openvim service to the default zone permanently and reload the firewall configuration + firewall-cmd --permanent --add-service=openvim > /dev/null + firewall-cmd --reload > /dev/null + echo "done." + else + echo "skipping." + fi +fi + +echo ' +################################################################# +##### CONFIGURE openvim CLIENTS ##### +#################################################################' +#creates a link at ~/bin +su $SUDO_USER -c 'mkdir -p ~/bin' +rm -f /home/${SUDO_USER}/bin/openvim +rm -f /home/${SUDO_USER}/bin/openflow +rm -f /home/${SUDO_USER}/bin/service-openvim +rm -f /home/${SUDO_USER}/bin/initopenvim +rm -f /home/${SUDO_USER}/bin/service-floodlight +rm -f /home/${SUDO_USER}/bin/service-opendaylight +rm -f /home/${SUDO_USER}/bin/get_dhcp_lease.sh +ln -s ${PWD}/openvim/openvim /home/${SUDO_USER}/bin/openvim +ln -s ${PWD}/openvim/openflow /home/${SUDO_USER}/bin/openflow +ln -s ${PWD}/openvim/scripts/service-openvim.sh /home/${SUDO_USER}/bin/service-openvim +ln -s ${PWD}/openvim/scripts/initopenvim.sh /home/${SUDO_USER}/bin/initopenvim +ln -s ${PWD}/openvim/scripts/service-floodlight.sh /home/${SUDO_USER}/bin/service-floodlight +ln -s ${PWD}/openvim/scripts/service-opendaylight.sh /home/${SUDO_USER}/bin/service-opendaylight +ln -s ${PWD}/openvim/scripts/get_dhcp_lease.sh /home/${SUDO_USER}/bin/get_dhcp_lease.sh + +#insert /home//bin in the PATH +#skiped because normally this is done authomatically when ~/bin exist +#if ! su $SUDO_USER -c 'echo $PATH' | grep -q "/home/${SUDO_USER}/bin" +#then +# echo " inserting /home/$SUDO_USER/bin in the PATH at .bashrc" +# su $SUDO_USER -c 'echo "PATH=\$PATH:/home/\${USER}/bin" >> ~/.bashrc' +#fi + +#configure arg-autocomplete for this user +#in case of minmal instalation this package is not installed by default +[[ "$_DISTRO" == "CentOS" || "$_DISTRO" == "Red" ]] && yum install -y bash-completion +#su $SUDO_USER -c 'mkdir -p ~/.bash_completion.d' +su $SUDO_USER -c 'activate-global-python-argcomplete --user' +if ! grep -q bash_completion.d/python-argcomplete.sh /home/${SUDO_USER}/.bashrc +then + echo " inserting .bash_completion.d/python-argcomplete.sh execution at .bashrc" + su $SUDO_USER -c 'echo ". /home/${USER}/.bash_completion.d/python-argcomplete.sh" >> ~/.bashrc' +fi + +echo +echo "Done! you may need to logout and login again for loading the configuration" +echo " Run './openvim/scripts/service-openvim.sh start' for starting openvim in a screen" + + + diff --git a/scripts/openvim-report.sh b/scripts/openvim-report.sh new file mode 100755 index 0000000..951634d --- /dev/null +++ b/scripts/openvim-report.sh @@ -0,0 +1,92 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +#It generates a report for debugging + +DIRNAME=$(readlink -f ${BASH_SOURCE[0]}) +DIRNAME=$(dirname $DIRNAME ) +OVCLIENT=$DIRNAME/../openvim + +#get screen log files at the beginning +echo +echo "-------------------------------" +echo "log files" +echo "-------------------------------" +echo +echo "cat $DIRNAME/../logs/openvim.log*" +cat $DIRNAME/../logs/openvim.log* +echo +echo + +#get version +echo +echo "-------------------------------" +echo "version" +echo "-------------------------------" +echo "cat $DIRNAME/../openvimd.py|grep ^__version__" +cat $DIRNAME/../openvimd.py|grep ^__version__ +echo +echo + +#get configuration files +echo "-------------------------------" +echo "Configuration files" +echo "-------------------------------" +echo "cat $DIRNAME/../openvimd.cfg" +cat $DIRNAME/../openvimd.cfg +echo + +#get list of items +for verbose in "" "-vvv" +do + echo "-------------------------------" + echo "OPENVIM$verbose" + echo "-------------------------------" + echo "$OVCLIENT config" + $OVCLIENT config + echo "-------------------------------" + echo "$OVCLIENT tenant-list $verbose" + $OVCLIENT tenant-list $verbose + echo "-------------------------------" + echo "$OVCLIENT host-list $verbose" + $OVCLIENT host-list $verbose + echo "-------------------------------" + echo "$OVCLIENT net-list $verbose" + $OVCLIENT net-list $verbose + echo "-------------------------------" + echo "$OVCLIENT port-list $verbose" + $OVCLIENT port-list $verbose + echo "-------------------------------" + echo "$OVCLIENT flavor-list $verbose" + $OVCLIENT flavor-list $verbose + echo "-------------------------------" + echo "$OVCLIENT image-list $verbose" + $OVCLIENT image-list $verbose + echo "-------------------------------" + echo "$OVCLIENT vm-list $verbose" + $OVCLIENT vm-list $verbose + echo "-------------------------------" + echo + +done +echo diff --git a/scripts/service-floodlight.sh b/scripts/service-floodlight.sh new file mode 100755 index 0000000..b7130a6 --- /dev/null +++ b/scripts/service-floodlight.sh @@ -0,0 +1,163 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +#launch floodlight inside a screen. It assumes shell variable $FLOODLIGHT_PATH +# contain the installation path + + +DIRNAME=$(readlink -f ${BASH_SOURCE[0]}) +DIRNAME=$(dirname $DIRNAME ) +DIR_OM=$(dirname $DIRNAME ) + +function usage(){ + echo -e "Usage: $0 start|stop|restart|status" + echo -e " Launch|Removes|Restart|Getstatus floodlight on a screen" + echo -e " Shell variable FLOODLIGHT_PATH must indicate floodlight installationpath" +} + +function kill_pid(){ + #send TERM signal and wait 5 seconds and send KILL signal ir still running + #PARAMS: $1: PID of process to terminate + kill $1 #send TERM signal + WAIT=5 + while [ $WAIT -gt 0 ] && ps -o pid -U $USER -u $USER | grep -q $1 + do + sleep 1 + WAIT=$((WAIT-1)) + [ $WAIT -eq 0 ] && echo -n "sending SIGKILL... " && kill -9 $1 #kill when count reach 0 + done + echo "done" + +} + +#obtain parameters +#om_action="start" #uncoment to get a default action +for param in $* +do + [ "$param" == "start" -o "$param" == "stop" -o "$param" == "restart" -o "$param" == "status" ] && om_action=$param && continue + [ "$param" == "openflow" -o "$param" == "flow" -o "$param" == "floodlight" ] && continue + [ "$param" == "-h" -o "$param" == "--help" ] && usage && exit 0 + + #if none of above, reach this line because a param is incorrect + echo "Unknown param '$param' type $0 --help" >&2 + exit -1 +done + +#check action is provided +[ -z "$om_action" ] && usage >&2 && exit -1 + + om_cmd="floodlight.jar" + om_name="floodlight" + + #obtain PID of program + component_id=`ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep ${om_cmd} | awk '{print $1}'` + + #status + if [ "$om_action" == "status" ] + then + [ -n "$component_id" ] && echo " $om_name running, pid $component_id" + [ -z "$component_id" ] && echo " $om_name stopped" + fi + + #stop + if [ "$om_action" == "stop" -o "$om_action" == "restart" ] + then + #terminates program + [ -n "$component_id" ] && echo -n " stopping $om_name ... " && kill_pid $component_id + component_id="" + #terminates screen + if screen -wipe | grep -Fq .flow + then + screen -S flow -p 0 -X stuff "exit\n" + sleep 1 + fi + fi + + #start + if [ "$om_action" == "start" -o "$om_action" == "restart" ] + then + [[ -z $FLOODLIGHT_PATH ]] && echo "FLOODLIGHT_PATH shell variable must indicate floodlight installation path" >&2 && exit -1 + #calculates log file name + logfile="" + mkdir -p $DIR_OM/logs && logfile=$DIR_OM/logs/openflow.log || echo "can not create logs directory $DIR_OM/logs" + #check already running + [ -n "$component_id" ] && echo " $om_name is already running. Skipping" && continue + #create screen if not created + echo -n " starting $om_name ... " + if ! screen -wipe | grep -Fq .flow + then + pushd ${FLOODLIGHT_PATH} > /dev/null + screen -dmS flow bash + sleep 1 + popd > /dev/null + else + echo -n " using existing screen 'flow' ... " + screen -S flow -p 0 -X log off + screen -S flow -p 0 -X stuff "cd ${FLOODLIGHT_PATH}\n" + sleep 1 + fi + #move old log file index one number up and log again in index 0 + if [[ -n $logfile ]] + then + for index in 8 7 6 5 4 3 2 1 + do + [[ -f ${logfile}.${index} ]] && mv ${logfile}.${index} ${logfile}.$((index+1)) + done + [[ -f ${logfile} ]] && mv ${logfile} ${logfile}.1 + screen -S flow -p 0 -X logfile ${logfile} + screen -S flow -p 0 -X log on + fi + #launch command to screen + screen -S flow -p 0 -X stuff "java -Dlogback.configurationFile=${DIRNAME}/flow-logback.xml -jar ./target/floodlight.jar -cf ${DIRNAME}/flow.properties_v0.9\n" + #check if is running + [[ -n $logfile ]] && timeout=120 #2 minute + [[ -z $logfile ]] && timeout=20 + while [[ $timeout -gt 0 ]] + do + #check if is running + #echo timeout $timeout + #if ! ps -f -U $USER -u $USER | grep -v grep | grep -q ${om_cmd} + log_lines=0 + [[ -n $logfile ]] && log_lines=`head ${logfile} | wc -l` + component_id=`ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep ${om_cmd} | awk '{print $1}'` + if [[ -z $component_id ]] + then #process not started or finished + [[ $log_lines -ge 2 ]] && echo -n "ERROR, it has exited." && break + #started because writted serveral lines at log so report error + fi + [[ -n $logfile ]] && grep -q "Listening for switch connections" ${logfile} && sleep 1 && break + sleep 1 + timeout=$((timeout -1)) + done + if [[ -n $logfile ]] && [[ $timeout == 0 ]] + then + echo -n "timeout!" + else + echo -n "running on 'screen -x flow'." + fi + [[ -n $logfile ]] && echo " Logging at '${logfile}'" || echo + fi + + + + diff --git a/scripts/service-opendaylight.sh b/scripts/service-opendaylight.sh new file mode 100755 index 0000000..3c060c6 --- /dev/null +++ b/scripts/service-opendaylight.sh @@ -0,0 +1,164 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +#launch opendaylight inside a screen. It assumes shell variable $OPENDAYLIGHT_PATH +# contain the installation path + + +DIRNAME=$(readlink -f ${BASH_SOURCE[0]}) +DIRNAME=$(dirname $DIRNAME ) +DIR_OM=$(dirname $DIRNAME ) + +function usage(){ + echo -e "Usage: $0 start|stop|restart|status" + echo -e " Launch|Removes|Restart|Getstatus opendaylight on a screen" + echo -e " Shell variable OPENDAYDLIGHT_PATH must indicate opendaylight installation path" +} + +function kill_pid(){ + #send TERM signal and wait 5 seconds and send KILL signal ir still running + #PARAMS: $1: PID of process to terminate + kill $1 #send TERM signal + WAIT=5 + while [ $WAIT -gt 0 ] && ps -o pid -U $USER -u $USER | grep -q $1 + do + sleep 1 + WAIT=$((WAIT-1)) + [ $WAIT -eq 0 ] && echo -n "sending SIGKILL... " && kill -9 $1 #kill when count reach 0 + done + echo "done" + +} + +#obtain parameters +#om_action="start" #uncoment to get a default action +for param in $* +do + [ "$param" == "start" -o "$param" == "stop" -o "$param" == "restart" -o "$param" == "status" ] && om_action=$param && continue + [ "$param" == "openflow" -o "$param" == "flow" -o "$param" == "opendaylight" ] && continue + [ "$param" == "-h" -o "$param" == "--help" ] && usage && exit 0 + + #if none of above, reach this line because a param is incorrect + echo "Unknown param '$param' type $0 --help" >&2 + exit -1 +done + +#check action is provided +[ -z "$om_action" ] && usage >&2 && exit -1 + + om_cmd="./karaf" + om_name="opendaylight" + + #obtain PID of program + component_id=`ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep ${om_cmd} | awk '{print $1}'` + + #status + if [ "$om_action" == "status" ] + then + [ -n "$component_id" ] && echo " $om_name running, pid $component_id" + [ -z "$component_id" ] && echo " $om_name stopped" + fi + + #stop + if [ "$om_action" == "stop" -o "$om_action" == "restart" ] + then + #terminates program + [ -n "$component_id" ] && echo -n " stopping $om_name ... " && kill_pid $component_id + component_id="" + #terminates screen + if screen -wipe | grep -Fq .flow + then + screen -S flow -p 0 -X stuff "exit\n" + sleep 1 + fi + fi + + #start + if [ "$om_action" == "start" -o "$om_action" == "restart" ] + then + [[ -z $OPENDAYDLIGHT_PATH ]] && echo "OPENDAYDLIGHT_PATH shell variable must indicate opendaylight installation path" >&2 && exit -1 + #calculates log file name + logfile="" + mkdir -p $DIR_OM/logs && logfile=$DIR_OM/logs/openflow.log && logfile_console=$DIR_OM/logs/openflow_console.log || echo "can not create logs directory $DIR_OM/logs" + #check already running + [ -n "$component_id" ] && echo " $om_name is already running. Skipping" && continue + #create screen if not created + echo -n " starting $om_name ... " + if ! screen -wipe | grep -Fq .flow + then + pushd ${OPENDAYDLIGHT_PATH}/bin > /dev/null + screen -dmS flow bash + sleep 1 + popd > /dev/null + else + echo -n " using existing screen 'flow' ... " + screen -S flow -p 0 -X log off + screen -S flow -p 0 -X stuff "cd ${OPENDAYDLIGHT_PATH}/bin\n" + sleep 1 + fi + #move old log file index one number up and log again in index 0 + if [[ -n $logfile ]] + then + for index in .9 .8 .7 .6 .5 .4 .3 .2 .1 "" + do + rm -f ${logfile}${index} + ln -s ${OPENDAYDLIGHT_PATH}/data/log/karaf.log${index} ${logfile}${index} + done + rm -rf ${logfile_console} + screen -S flow -p 0 -X logfile ${logfile_console} + screen -S flow -p 0 -X log on + fi + #launch command to screen + screen -S flow -p 0 -X stuff "${om_cmd}\n" + #check if is running + [[ -n $logfile ]] && timeout=120 #2 minute + [[ -z $logfile ]] && timeout=20 + while [[ $timeout -gt 0 ]] + do + #check if is running + #echo timeout $timeout + #if ! ps -f -U $USER -u $USER | grep -v grep | grep -q ${om_cmd} + log_lines=0 + [[ -n $logfile_console ]] && log_lines=`head ${logfile_console} | wc -l` + component_id=`ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep ${om_cmd} | awk '{print $1}'` + if [[ -z $component_id ]] + then #process not started or finished + [[ $log_lines -ge 2 ]] && echo -n "ERROR, it has exited." && break + #started because writted serveral lines at log so report error + fi + [[ -n $logfile_console ]] && grep -q "Listening on port" ${logfile_console} && sleep 1 && break + sleep 1 + timeout=$((timeout -1)) + done + if [[ -n $logfile_console ]] && [[ $timeout == 0 ]] + then + echo -n "timeout!" + else + echo -n "running on 'screen -x flow'." + fi + [[ -n $logfile ]] && echo " Logging at '${logfile}'" || echo + fi + + + + diff --git a/scripts/service-openvim.sh b/scripts/service-openvim.sh new file mode 100755 index 0000000..aaa9a27 --- /dev/null +++ b/scripts/service-openvim.sh @@ -0,0 +1,180 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +#launch openvim (andr floodlight) inside a screen. +#It assumes a relative path '..' for openvim +#for floodlight, the variable FLOODLIGHT_PATH indicates the installation path + + +DIRNAME=$(readlink -f ${BASH_SOURCE[0]}) +DIRNAME=$(dirname $DIRNAME ) +DIR_OM=$(dirname $DIRNAME ) +#[[ -z $FLOODLIGHT_PATH ]] && FLOODLIGHT_PATH=$(dirname ${DIR_OM})/floodlight-1.1 +#[[ -z $FLOODLIGHT_PATH ]] && FLOODLIGHT_PATH=$(dirname ${DIR_OM})/floodlight-0.90 + +function usage(){ + echo -e "Usage: $0 [openvim/vim] [floodlight/flow] start|stop|restart|status" + echo -e " Launch|Removes|Restart|Getstatus openvim (by default) or/and floodlight on a screen" + echo -e " For floodlight variable FLOODLIGHT_PATH must indicate installation path" +} + +function kill_pid(){ + #send TERM signal and wait 5 seconds and send KILL signal ir still running + #PARAMS: $1: PID of process to terminate + kill $1 #send TERM signal + WAIT=5 + while [ $WAIT -gt 0 ] && ps -o pid -U $USER -u $USER | grep -q $1 + do + sleep 1 + WAIT=$((WAIT-1)) + [ $WAIT -eq 0 ] && echo -n "sending SIGKILL... " && kill -9 $1 #kill when count reach 0 + done + echo "done" + +} + +#obtain parameters +om_list="" +#om_action="start" #uncoment to get a default action +for param in $* +do + [ "$param" == "start" -o "$param" == "stop" -o "$param" == "restart" -o "$param" == "status" ] && om_action=$param && continue + [ "$param" == "openvim" -o "$param" == "vim" ] && om_list="$om_list vim" && continue + [ "$param" == "openmano" -o "$param" == "mano" ] && continue #allow and ingore for backwards compatibility + [ "$param" == "openflow" -o "$param" == "flow" -o "$param" == "floodlight" ] && om_list="flow $om_list" && continue + [ "$param" == "-h" -o "$param" == "--help" ] && usage && exit 0 + #note flow that it must be the first element, because openvim relay on this + + #if none of above, reach this line because a param is incorrect + echo "Unknown param '$param' type $0 --help" >&2 + exit -1 +done + +#check action is provided +[ -z "$om_action" ] && usage >&2 && exit -1 + +#if no componenets supplied assume all +[ -z "$om_list" ] && om_list="vim" + +for om_component in $om_list +do + [ "${om_component}" == "flow" ] && om_cmd="floodlight.jar" && om_name="floodlight" && om_dir=$FLOODLIGHT_PATH + [ "${om_component}" == "vim" ] && om_cmd="openvimd.py" && om_name="openvim " && om_dir=${DIR_OM} + #obtain PID of program + component_id=`ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep ${om_cmd} | awk '{print $1}'` + + #status + if [ "$om_action" == "status" ] + then + [ -n "$component_id" ] && echo " $om_name running, pid $component_id" + [ -z "$component_id" ] && echo " $om_name stopped" + fi + + #stop + if [ "$om_action" == "stop" -o "$om_action" == "restart" ] + then + #terminates program + [ -n "$component_id" ] && echo -n " stopping $om_name ... " && kill_pid $component_id + component_id="" + #terminates screen + if screen -wipe | grep -Fq .$om_component + then + screen -S $om_component -p 0 -X stuff "exit\n" + sleep 1 + fi + fi + + #start + if [ "$om_action" == "start" -o "$om_action" == "restart" ] + then + [[ -z $FLOODLIGHT_PATH ]] && [[ $om_component == flow ]] && + echo "FLOODLIGHT_PATH shell variable must indicate floodlight installation path" >&2 && exit -1 + #calculates log file name + logfile="" + mkdir -p $DIR_OM/logs && logfile=$DIR_OM/logs/open${om_component}.log || echo "can not create logs directory $DIR_OM/logs" + #check already running + [ -n "$component_id" ] && echo " $om_name is already running. Skipping" && continue + #create screen if not created + echo -n " starting $om_name ... " + if ! screen -wipe | grep -Fq .${om_component} + then + pushd ${om_dir} > /dev/null + screen -dmS ${om_component} bash + sleep 1 + popd > /dev/null + else + echo -n " using existing screen '${om_component}' ... " + screen -S ${om_component} -p 0 -X log off + screen -S ${om_component} -p 0 -X stuff "cd ${om_dir}\n" + sleep 1 + fi + #move old log file index one number up and log again in index 0 + if [[ -n $logfile ]] + then + for index in 8 7 6 5 4 3 2 1 + do + [[ -f ${logfile}.${index} ]] && mv ${logfile}.${index} ${logfile}.$((index+1)) + done + [[ -f ${logfile} ]] && mv ${logfile} ${logfile}.1 + screen -S ${om_component} -p 0 -X logfile ${logfile} + screen -S ${om_component} -p 0 -X log on + fi + #launch command to screen + #[ "${om_component}" != "flow" ] && screen -S ${om_component} -p 0 -X stuff "cd ${DIR_OM}/open${om_component}\n" && sleep 1 + [ "${om_component}" == "flow" ] && screen -S flow -p 0 -X stuff "java -Dlogback.configurationFile=${DIRNAME}/flow-logback.xml -jar ./target/floodlight.jar -cf ${DIRNAME}/flow.properties_v0.9\n" + #[ "${om_component}" == "flow" ] && screen -S flow -p 0 -X stuff "java -Dlogback.configurationFile=${DIRNAME}/flow-logback.xml -jar ./target/floodlight.jar -cf ${DIRNAME}/flow.properties_v1.1\n" && sleep 5 + [ "${om_component}" != "flow" ] && screen -S ${om_component} -p 0 -X stuff "./${om_cmd}\n" + #check if is running + [[ -n $logfile ]] && timeout=120 #2 minute + [[ -z $logfile ]] && timeout=20 + while [[ $timeout -gt 0 ]] + do + #check if is running + #echo timeout $timeout + #if ! ps -f -U $USER -u $USER | grep -v grep | grep -q ${om_cmd} + log_lines=0 + [[ -n $logfile ]] && log_lines=`head ${logfile} | wc -l` + component_id=`ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep ${om_cmd} | awk '{print $1}'` + if [[ -z $component_id ]] + then #process not started or finished + [[ $log_lines -ge 2 ]] && echo -n "ERROR, it has exited." && break + #started because writted serveral lines at log so report error + fi + [[ -n $logfile ]] && [[ ${om_component} == flow ]] && grep -q "Listening for switch connections" ${logfile} && sleep 1 && break + [[ -n $logfile ]] && [[ ${om_component} != flow ]] && grep -q "open${om_component}d ready" ${logfile} && break + sleep 1 + timeout=$((timeout -1)) + done + if [[ -n $logfile ]] && [[ $timeout == 0 ]] + then + echo -n "timeout!" + else + echo -n "running on 'screen -x ${om_component}'." + fi + [[ -n $logfile ]] && echo " Logging at '${logfile}'" || echo + fi +done + + + + diff --git a/templates/flavor.yaml b/templates/flavor.yaml new file mode 100644 index 0000000..3a240b6 --- /dev/null +++ b/templates/flavor.yaml @@ -0,0 +1,67 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + + +flavor: + name: flavor-name + description: flavor-description + # cloud type requirements + ram: 1024 # Memory in MB. Ignored if provided 'memory' at 'extended' + vcpus: 2 # Number of cpus. Ignored if provided at 'extended' + + # NFV type requirements + # allocating EXCLUSIVE resoureces in the same NUMA node. + extended: # optional + processor_ranking: 100 # minimal processor family. Not used in current version + numas: # list of numa set. Only one supported in current version + - memory: 8 # GByte of huge pages at this numa + + #Choose among one of "cores", "paired-threads", "threads" + paired-threads: 5 # Cores with two paired hyper threads + #paired-threads-id: [[0,1],[2,3],[4,5],[6,7],[8,9]] # Guess pinning. By default follows incremental order + #threads: 10 # threads awereness of the hyperthreading + ##threads-id: [0,1,2,3,4,5,6,7,8,9] #Optional. Guess pinning + #cores: 5 # Complete cores, without hyperthreading. VIM ensures the other paired thread is idle + ##cores-id: [0,1,2,3,4] # Optional. Guess pinning of cores + + #Optional: Dataplane needed interfaces + interfaces: + - name: xe0 # Optional. User friendly name + vpci: "0000:00:10.0" # Optional. Guess PCI + bandwidth: 10 Gbps # Needed minimun bandwidth + dedicated: "yes" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag) + - name: xe1 + vpci: "0000:00:11.0" + bandwidth: 10 Gbps + dedicated: "no" + + #Optional: List of extra devices + devices: # order determines device letter asignation (hda, hdb, ...) + - type: disk # "disk","cdrom","xml","usb" + imageRef: 37598e34-ccb3-11e4-a996-52540030594e # UUID of an image, only for disk,cdrom,xml + # vpci: "0000:00:03.0" # Optional, not for disk or cdrom + # xml: 'Only for type xml: a XML described device xml text. Do not use single quotes inside + # The following words, if found, will be replaced: + # __file__ by image path, (imageiRef must be provided) + # __format__ by qcow2 or raw (imageRef must be provided) + # __dev__ by device letter (b, c, d ...) + # __vpci__ by vpci (vpci must be provided) + diff --git a/templates/image.yaml b/templates/image.yaml new file mode 100644 index 0000000..05045b4 --- /dev/null +++ b/templates/image.yaml @@ -0,0 +1,33 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + + +image: + name: image-name + description: image-description + path: /image-path/at/the/compute-node/storage/image + metadata: # Optional extra metadata of the image. All fields are optional + use_incremental: "yes" # "yes" by default, "no" Deployed using a incremental qcow2 image + vpci: "0000:10:00.0" #requiered PCI at guess + os_distro: win # operating system distribution + os_type: windows # operating system type "linux" by default, "windows" + os_version: "7" # operating system version + bus: "ide" # By default "virtio" for linux, "ide" for windows diff --git a/templates/network.yaml b/templates/network.yaml new file mode 100644 index 0000000..02fa42b --- /dev/null +++ b/templates/network.yaml @@ -0,0 +1,32 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +network: + name: network-name + type: data # "bridge_data" or "bridge_man" for control plane, "data" for dataplane, or "ptp" for point to point dataplane + provider:physical: null # needed a value for a bridge_data, bridge_man, + # can be: + # bridge:iface_name : attached to a iface_name host bridge interface + # macvtap:iface_name : attached to a iface_name host physical interface + # default : attached to the default host interface + # null : for data or ptp types. (To be changed in future versions) + shared: true # true, false: if shared it will consider by OPENVIM an EXTERNAL network available at OPENMANO + diff --git a/templates/port.yaml b/templates/port.yaml new file mode 100644 index 0000000..14297ed --- /dev/null +++ b/templates/port.yaml @@ -0,0 +1,29 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + + +port: + name: port-name + #network_id: network uuid this port must be attached + type: external + binding:vlan: 103 # if provided packets to/from this switch port will be vlan tagged + binding:switch_port": "Te/01" # provide the openflow switch port + #mac_address: 34:44:45:67:78:12 # mac address of the element behind this port. As normally is not know, leave commented diff --git a/templates/server.yaml b/templates/server.yaml new file mode 100644 index 0000000..72f4564 --- /dev/null +++ b/templates/server.yaml @@ -0,0 +1,76 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +server: + name: vm-name # name + description: vm-description # Optional user description + imageRef: 24640fe0-8a9e-11e4-a236-52540056c317 # valid image uuid + flavorRef: e22dd8a8-9ca5-11e4-99b6-52540056c317 # valid flavor uuid + networks: # List of control plane interfaces, Optional + - name: mgmt0 #friendly user name + vpci: "0000:00:0a.0" #Optional guess PCI + uuid: c09b2f1a-8a9e-11e4-a236-52540056c317 # valid network uuid + #mac_address: #guess concrete mac address, by default one is asigned + #model: "virtio","e1000","ne2k_pci","pcnet","rtl8139", By default auto, normally virtio + start: "yes" # "yes","no","paused". By default it is started upon creted + hostId: ec656bc4-9ca5-11e4-99b6-52540056c317 #prefered host where to allocate + + # allocating EXCLUSIVE resoureces in the same NUMA node. + # If provided, it overrides extended values at flavor + extended: # optional + processor_ranking: 100 # minimal processor family. Not used in current version + numas: # list of numa set. Only one supported in current version + - memory: 8 # GByte of huge pages at this numa + + #Choose among one of "cores", "paired-threads", "threads" + paired-threads: 5 # Cores with two paired hyper threads + #paired-threads-id: [[0,1],[2,3],[4,5],[6,7],[8,9]] # Guess pinning. By default follows incremental order + #threads: 10 # threads awereness of the hyperthreading + ##threads-id: [0,1,2,3,4,5,6,7,8,9] #Optional. Guess pinning + #cores: 5 # Complete cores, without hyperthreading. VIM ensures the other paired thread is idle + ##cores-id: [0,1,2,3,4] # Optional. Guess pinning of cores + + #Optional: Dataplane needed interfaces + interfaces: + - name: xe0 # Optional. User friendly name + vpci: "0000:00:10.0" # Optional. Guess PCI + bandwidth: 10 Gbps # Needed minimun bandwidth + dedicated: "yes" # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag) + # you can attach this inteface to a network at server creation or later with a port attach + #uuid: 41bcac58-9be9-11e4-b1b6-52540056c317 # Attach the interface to this network uuid + - name: xe1 + vpci: "0000:00:11.0" + bandwidth: 10 Gbps + dedicated: "no" + #mac_address: #guess concrete mac address, by default one is asigned. Not possible for dedicated: "yes" + + #Optional: List of extra devices + devices: # order determines device letter asignation (hda, hdb, ...) + - type: disk # "disk","cdrom","xml","usb" + imageRef: 37598e34-ccb3-11e4-a996-52540030594e # UUID of an image, only for disk,cdrom,xml + # vpci: "0000:00:03.0" # Optional, not for disk or cdrom + # xml: 'Only for type xml: a XML described device xml text. Do not use single quotes inside + # The following words, if found, will be replaced: + # __file__ by image path, (imageiRef must be provided) + # __format__ by qcow2 or raw (imageRef must be provided) + # __dev__ by device letter (b, c, d ...) + # __vpci__ by vpci (vpci must be provided) + diff --git a/templates/tenant.yaml b/templates/tenant.yaml new file mode 100644 index 0000000..94c6b24 --- /dev/null +++ b/templates/tenant.yaml @@ -0,0 +1,25 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + + +tenant: + name: tenant-name + description: tenant-description diff --git a/test/2VM_2I.sh b/test/2VM_2I.sh new file mode 100755 index 0000000..96f8100 --- /dev/null +++ b/test/2VM_2I.sh @@ -0,0 +1,286 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +# +#author Alfonso Tierno +# +#script to test openvim with the creation of flavors and interfaces +#using images already inserted +# + +echo " Creates 1 flavor, 3 nets, 2 VMs (US) + Interfaces: 2 sriov, 1 passthrough, 1 sriov dedicated + Test mac address allocation, 1 VM with direct network attach, + another VM with network attach after creation" +echo +echo -n "type network model to test network model (empty to skip)? " +read model +echo "Press enter to continue" +read kk +if [ -n "$model" ] +then + model="model: '${model}'" +fi +#echo _${model}_ + +#image to load +imagePath=/mnt/powervault/virtualization/vnfs/os/US1404.qcow2 +#image to load as an extra disk, can be any +imagePath_extra=/mnt/powervault/virtualization/vnfs/os/US1404user.qcow2 +#default network to use +network_eth0=default + + +DIRNAME=`dirname $0` + +function del_rubbish(){ + echo "Press enter to delete the deployed things" + read kk + [ -n "$DEL_server" ] && ${DIRNAME}/test_openvim.py -f del server $DEL_server + [ -n "$DEL_network" ] && ${DIRNAME}/test_openvim.py -f del network $DEL_network + [ -n "$DEL_flavor" ] && ${DIRNAME}/test_openvim.py -f del flavor $DEL_flavor + [ -n "$DEL_image" ] && ${DIRNAME}/test_openvim.py -f del image $DEL_image + rm -f kk.out +} + +function proccess_out(){ # action_text field to retrieve + if egrep -q "\"error\"" kk.out + then + echo "failed to" $1 + cat kk.out + del_rubbish + exit -1 + fi + if [ -z "$2" ] ; then pattern='"id"' ; else pattern="$2" ; fi + value=`egrep "$pattern" kk.out ` + value=${value##* \"} + value=${value%\"*} + if [[ -z "$value" ]] + then + echo "not found the field" $2 + cat kk.out + del_rubbish + exit -1 + fi +} + +#proccess_out "insert server tidgen1" '^ "id"' +#echo $value +#exit 0 + + + +echo -n "get ${imagePath##*/} image: " +${DIRNAME}/test_openvim.py -F"path=$imagePath" images > kk.out +proccess_out "get ${imagePath##*/}" +echo $value +image1=$value + + +echo -n "get ${imagePath_extra##*/} image: " +${DIRNAME}/test_openvim.py -F"path=$imagePath_extra" images > kk.out +proccess_out "get ${imagePath_extra##*/}" +echo $value +image2=$value + + +echo -n "get ${network_eth0} network: " +${DIRNAME}/test_openvim.py -F"name=$network_eth0" network > kk.out +proccess_out "get ${network_eth0} network" +echo $value +network_eth0=$value + + +echo -n "insert flavor: " +${DIRNAME}/test_openvim.py new flavor ' +--- +flavor: + name: 5PTh_8G_2I + description: flavor to test openvim + extended: + processor_ranking: 205 + numas: + - memory: 8 + paired-threads: 5 + interfaces: + - name: xe0 + dedicated: "yes" + bandwidth: "10 Gbps" + vpci: "0000:00:10.0" + #mac_address: "10:10:10:10:10:10" + - name: xe1 + dedicated: "yes:sriov" + bandwidth: "10 Gbps" + vpci: "0000:00:11.0" + mac_address: "10:10:10:10:10:11" +' > kk.out +proccess_out "insert flavor" +echo $value +flavor1=$value +DEL_flavor="$DEL_flavor $flavor1" + + +echo -n "insert ptp net1: " +${DIRNAME}/test_openvim.py new network ' +--- +network: + name: network-xe0 + type: ptp +' > kk.out +proccess_out "insert network 0" +echo $value +network0=$value +DEL_network="$DEL_network $value" + + +echo -n "insert ptp net2: " +${DIRNAME}/test_openvim.py new network ' +--- +network: + name: network-xe1 + type: ptp +' > kk.out +proccess_out "insert network 1" +echo $value +network1=$value +DEL_network="$DEL_network $value" + + + +echo -n "insert bridge network net2: " +${DIRNAME}/test_openvim.py new network ' +--- +network: + name: network-net2 + type: bridge_data +' > kk.out +proccess_out "insert network 2" +echo $value +network2=$value +DEL_network="$DEL_network $value" + +echo -n "insert test VM 1: " +${DIRNAME}/test_openvim.py new server " +--- +server: + name: test_VM1 + descrition: US or tidgen with 1 SRIOV 1 PASSTHROUGH + imageRef: '$image1' + flavorRef: '$flavor1' + networks: + - name: mgmt0 + vpci: '0000:00:0a.0' + uuid: ${network_eth0} + mac_address: '10:10:10:10:10:12' + - name: eth0 + vpci: '0000:00:0b.0' + uuid: '$network2' + mac_address: '10:10:10:10:10:13' + ${model} +" > kk.out +proccess_out "insert test VM 2" '^ "id"' +echo $value +server1=$value +DEL_server="$DEL_server $value" + + + +echo -n "insert test VM 2: " +${DIRNAME}/test_openvim.py new server " +--- +server: + name: test_VM2 + descrition: US or tidgen with direct network attach + imageRef: '$image1' + flavorRef: '$flavor1' + extended: + processor_ranking: 205 + numas: + - memory: 8 + threads: 10 + interfaces: + - name: xe0 + dedicated: 'yes' + bandwidth: '10 Gbps' + vpci: '0000:00:10.0' + #mac_address: '10:10:10:10:aa:10' + uuid: '$network0' + - name: xe1 + dedicated: 'no' + bandwidth: '7 Gbps' + vpci: '0000:00:11.0' + mac_address: '10:10:10:10:aa:11' + uuid: '$network1' + devices: + - type: disk + imageRef: '$image2' + networks: + - name: mgmt0 + vpci: '0000:00:0a.0' + uuid: ${network_eth0} + mac_address: '10:10:10:10:aa:12' + - name: eth0 + vpci: '0000:00:0b.0' + uuid: '$network2' + mac_address: '10:10:10:10:aa:13' + ${model} +" > kk.out +proccess_out "insert test VM 2" '^ "id"' +echo $value +server2=$value +DEL_server="$DEL_server $value" + +echo -n "get xe0 iface uuid from tidgen1: " +${DIRNAME}/test_openvim.py -F "device_id=${server1}&name=xe0" ports > kk.out +proccess_out "get xe0 uuid port from tidgen1" +echo $value +iface_xe0=$value + + +echo -n "get xe1 iface uuid from tidgen1: " +${DIRNAME}/test_openvim.py -F "device_id=${server1}&name=xe1" ports > kk.out +proccess_out "get xe1 uuid port from tidgen1" +echo $value +iface_xe1=$value + + +echo -n "attach xe0 from tidgen1 to network " +${DIRNAME}/test_openvim.py -f edit ports $iface_xe0 "network_id: $network0" > kk.out +proccess_out "attach xe0 from tidgen1 to network" +echo "ok" + + +echo -n "attach xe1 from tidgen1 to network " +${DIRNAME}/test_openvim.py -f edit ports $iface_xe1 "network_id: $network1" > kk.out +proccess_out "attach xe1 from tidgen1 to network" +echo "ok" + + +echo +echo finish. Check connections!! +echo click return to delete all deployed things +echo + +del_rubbish +exit 0 + diff --git a/test/2VM_2I_2VLAN.sh b/test/2VM_2I_2VLAN.sh new file mode 100755 index 0000000..1e0f626 --- /dev/null +++ b/test/2VM_2I_2VLAN.sh @@ -0,0 +1,276 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +# +#author Alfonso Tierno +# +#script to test openvim with the creation of flavors and interfaces +#using images already inserted +# + +echo " Creates 1 flavor, 3 nets, 2 VMs (US) + Interfaces: 2 sriov, 1 passthrough, 1 sriov dedicated + Test mac address allocation, 1 VM with direct network attach, + another VM with network attach after creation" +echo +echo "Press enter to continue" +read kk + +#image to load +imagePath=/mnt/powervault/virtualization/vnfs/os/US1404.qcow2 +#image to load as an extra disk, can be any +imagePath_extra=/mnt/powervault/virtualization/vnfs/os/US1404user.qcow2 +#default network to use +network_eth0=default + + +DIRNAME=`dirname $0` + +function del_rubbish(){ + echo "Press enter to delete the deployed things" + read kk + [ -n "$DEL_server" ] && ${DIRNAME}/test_openvim.py -f del server $DEL_server + [ -n "$DEL_network" ] && ${DIRNAME}/test_openvim.py -f del network $DEL_network + [ -n "$DEL_flavor" ] && ${DIRNAME}/test_openvim.py -f del flavor $DEL_flavor + [ -n "$DEL_image" ] && ${DIRNAME}/test_openvim.py -f del image $DEL_image + rm -f kk.out +} + +function proccess_out(){ # action_text field to retrieve + if egrep -q "\"error\"" kk.out + then + echo "failed to" $1 + cat kk.out + del_rubbish + exit -1 + fi + if [ -z "$2" ] ; then pattern='"id"' ; else pattern="$2" ; fi + value=`egrep "$pattern" kk.out ` + value=${value##* \"} + value=${value%\"*} + if [[ -z "$value" ]] + then + echo "not found the field" $2 + cat kk.out + del_rubbish + exit -1 + fi +} + +#proccess_out "insert server tidgen1" '^ "id"' +#echo $value +#exit 0 + + + +echo -n "get ${imagePath##*/} image: " +${DIRNAME}/test_openvim.py -F"path=$imagePath" images > kk.out +proccess_out "get ${imagePath##*/}" +echo $value +image1=$value + + +echo -n "get ${imagePath_extra##*/} image: " +${DIRNAME}/test_openvim.py -F"path=$imagePath_extra" images > kk.out +proccess_out "get ${imagePath_extra##*/}" +echo $value +image2=$value + + +echo -n "get ${network_eth0} network: " +${DIRNAME}/test_openvim.py -F"name=$network_eth0" network > kk.out +proccess_out "get ${network_eth0} network" +echo $value +network_eth0=$value + + +echo -n "insert flavor: " +${DIRNAME}/test_openvim.py new flavor ' +--- +flavor: + name: 5PTh_8G_2I + description: flavor to test openvim + extended: + processor_ranking: 205 + numas: + - memory: 8 + paired-threads: 5 + interfaces: + - name: xe0 + dedicated: "yes" + bandwidth: "10 Gbps" + vpci: "0000:00:10.0" + #mac_address: "10:10:10:10:10:10" + - name: xe1 + dedicated: "no" + bandwidth: "10 Gbps" + vpci: "0000:00:11.0" + mac_address: "10:10:10:10:10:11" +' > kk.out +proccess_out "insert flavor" +echo $value +flavor1=$value +DEL_flavor="$DEL_flavor $flavor1" + + +echo -n "insert ptp net1: " +${DIRNAME}/test_openvim.py new network ' +--- +network: + name: network-xe0 + type: ptp +' > kk.out +proccess_out "insert network 0" +echo $value +network0=$value +DEL_network="$DEL_network $value" + + +echo -n "insert ptp net2: " +${DIRNAME}/test_openvim.py new network ' +--- +network: + name: network-xe1 + type: ptp +' > kk.out +proccess_out "insert network 1" +echo $value +network1=$value +DEL_network="$DEL_network $value" + + + +echo -n "insert bridge network net2: " +${DIRNAME}/test_openvim.py new network ' +--- +network: + name: network-net2 + type: bridge_data +' > kk.out +proccess_out "insert network 2" +echo $value +network2=$value +DEL_network="$DEL_network $value" + +echo -n "insert test VM 1: " +${DIRNAME}/test_openvim.py new server " +--- +server: + name: test_VM1 + descrition: US or tidgen with 1 SRIOV 1 PASSTHROUGH + imageRef: '$image1' + flavorRef: '$flavor1' + networks: + - name: mgmt0 + vpci: '0000:00:0a.0' + uuid: ${network_eth0} + mac_address: '10:10:10:10:10:12' + - name: eth0 + vpci: '0000:00:0b.0' + uuid: '$network2' + mac_address: '10:10:10:10:10:13' +" > kk.out +proccess_out "insert test VM 2" '^ "id"' +echo $value +server1=$value +DEL_server="$DEL_server $value" + + + +echo -n "insert test VM 2: " +${DIRNAME}/test_openvim.py new server " +--- +server: + name: test_VM2 + descrition: US or tidgen with direct network attach + imageRef: '$image1' + flavorRef: '$flavor1' + networks: + - name: mgmt0 + vpci: '0000:00:0a.0' + uuid: ${network_eth0} + mac_address: '10:10:10:10:aa:12' + - name: eth0 + vpci: '0000:00:0b.0' + uuid: '$network2' + mac_address: '10:10:10:10:aa:13' + extended: + processor_ranking: 205 + numas: + - memory: 8 + threads: 10 + interfaces: + - name: xe0 + dedicated: 'yes' + bandwidth: '10 Gbps' + vpci: '0000:00:10.0' + #mac_address: '10:10:10:10:aa:10' + uuid: '$network0' + - name: xe1 + dedicated: 'no' + bandwidth: '7 Gbps' + vpci: '0000:00:11.0' + mac_address: '10:10:10:10:aa:11' + uuid: '$network1' + devices: + - type: disk + imageRef: '$image2' +" > kk.out +proccess_out "insert test VM 2" '^ "id"' +echo $value +server2=$value +DEL_server="$DEL_server $value" + +echo -n "get xe0 iface uuid from tidgen1: " +${DIRNAME}/test_openvim.py -F "device_id=${server1}&name=xe0" ports > kk.out +proccess_out "get xe0 uuid port from tidgen1" +echo $value +iface_xe0=$value + + +echo -n "get xe1 iface uuid from tidgen1: " +${DIRNAME}/test_openvim.py -F "device_id=${server1}&name=xe1" ports > kk.out +proccess_out "get xe1 uuid port from tidgen1" +echo $value +iface_xe1=$value + + +echo -n "attach xe0 from tidgen1 to network " +${DIRNAME}/test_openvim.py -f edit ports $iface_xe0 "network_id: $network0" > kk.out +proccess_out "attach xe0 from tidgen1 to network" +echo "ok" + + +echo -n "attach xe1 from tidgen1 to network " +${DIRNAME}/test_openvim.py -f edit ports $iface_xe1 "network_id: $network1" > kk.out +proccess_out "attach xe1 from tidgen1 to network" +echo "ok" + + +echo +echo finsish. Check connections!! +echo + +del_rubbish +exit 0 + diff --git a/test/2VM_NoHuge.sh b/test/2VM_NoHuge.sh new file mode 100755 index 0000000..9326ec2 --- /dev/null +++ b/test/2VM_NoHuge.sh @@ -0,0 +1,199 @@ +#!/bin/bash +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +# +#author Alfonso Tierno +# +#script to test openvim with the creation of flavors and interfaces +#in cloud style +# + +echo " Creates 1 flavor, 3 nets, 2 VMs (US) + WITHOUT huge pages, nor NUMA assigment + network attach after creation" +echo +echo "Press enter to continue" +read kk + +#image to load +imagePath=/mnt/powervault/virtualization/vnfs/os/US1404.qcow2 +#image to load as an extra disk, can be any +imagePath_extra=/mnt/powervault/virtualization/vnfs/os/US1404user.qcow2 +#default network to use +network_eth0=default + + +DIRNAME=`dirname $0` + +function del_rubbish(){ + echo "Press enter to delete the deployed things" + read kk + [ -n "$DEL_server" ] && ${DIRNAME}/test_openvim.py -f del server $DEL_server + [ -n "$DEL_network" ] && ${DIRNAME}/test_openvim.py -f del network $DEL_network + [ -n "$DEL_flavor" ] && ${DIRNAME}/test_openvim.py -f del flavor $DEL_flavor + [ -n "$DEL_image" ] && ${DIRNAME}/test_openvim.py -f del image $DEL_image + rm -f kk.out +} + +function proccess_out(){ # action_text field to retrieve + if egrep -q "\"error\"" kk.out + then + echo "failed to" $1 + cat kk.out + del_rubbish + exit -1 + fi + if [ -z "$2" ] ; then pattern='"id"' ; else pattern="$2" ; fi + value=`egrep "$pattern" kk.out ` + value=${value##* \"} + value=${value%\"*} + if [[ -z "$value" ]] + then + echo "not found the field" $2 + cat kk.out + del_rubbish + exit -1 + fi +} + +#proccess_out "insert server tidgen1" '^ "id"' +#echo $value +#exit 0 + + + +echo -n "get ${imagePath##*/} image: " +${DIRNAME}/test_openvim.py -F"path=$imagePath" images > kk.out +proccess_out "get ${imagePath##*/}" +echo $value +image1=$value + + +echo -n "get ${imagePath_extra##*/} image: " +${DIRNAME}/test_openvim.py -F"path=$imagePath_extra" images > kk.out +proccess_out "get ${imagePath_extra##*/}" +echo $value +image2=$value + + +echo -n "get ${network_eth0} network: " +${DIRNAME}/test_openvim.py -F"name=$network_eth0" network > kk.out +proccess_out "get ${network_eth0} network" +echo $value +network_eth0=$value + + +echo -n "insert flavor: " +${DIRNAME}/test_openvim.py new flavor ' +--- +flavor: + name: CloudVM + description: normal cloud image with 1G, 1core + ram: 1024 + vcpus: 1 +' > kk.out +proccess_out "insert flavor" +echo $value +flavor1=$value +DEL_flavor="$DEL_flavor $flavor1" + + +echo +echo "Press enter to continue" +read kk + +echo -n "insert bridge network net2: " +${DIRNAME}/test_openvim.py new network ' +--- +network: + name: network-bridge + type: bridge_data +' > kk.out +proccess_out "insert network 2" +echo $value +network2=$value +DEL_network="$DEL_network $value" + +echo -n "insert test VM 1: " +${DIRNAME}/test_openvim.py new server " +--- +server: + name: test_VM1 + descrition: US 1 core + imageRef: '$image1' + flavorRef: '$flavor1' + networks: + - name: mgmt0 + vpci: '0000:00:0a.0' + uuid: ${network_eth0} + mac_address: '10:10:10:10:10:12' + - name: eth0 + vpci: '0000:00:0b.0' + uuid: '$network2' + mac_address: '10:10:10:10:10:13' +" > kk.out +proccess_out "insert test VM 2" '^ "id"' +echo $value +server1=$value +DEL_server="$DEL_server $value" + + +echo +echo "Press enter to continue" +read kk + +echo -n "insert test VM 2: " +${DIRNAME}/test_openvim.py new server " +--- +server: + name: test_VM2 + descrition: US 1G 1core + imageRef: '$image1' + flavorRef: '$flavor1' + ram: 1024 + vcpus: 1 + networks: + - name: mgmt0 + vpci: '0000:00:0a.0' + uuid: ${network_eth0} + mac_address: '10:10:10:10:aa:12' + - name: eth0 + vpci: '0000:00:0b.0' + uuid: '$network2' + mac_address: '10:10:10:10:aa:13' + extended: + devices: + - type: disk + imageRef: '$image2' +" > kk.out +proccess_out "insert test VM 2" '^ "id"' +echo $value +server2=$value +DEL_server="$DEL_server $value" + +echo +echo finsish. Check connections!! +echo + +del_rubbish +exit 0 + diff --git a/test/flavors/2Th_2G.yaml b/test/flavors/2Th_2G.yaml new file mode 100644 index 0000000..3df9573 --- /dev/null +++ b/test/flavors/2Th_2G.yaml @@ -0,0 +1,37 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + + +{ +"flavor":{ + "name":"2Th_2G", + "description":"2 Threads, 2Gbyte, only 2 management interfaces", + "extended":{ + "processor_ranking":205, + "numas":[ + { + "memory":2, + "threads":2 + } + ] + } +} +} diff --git a/test/flavors/5PTh_8G_2I.yaml b/test/flavors/5PTh_8G_2I.yaml new file mode 100644 index 0000000..9ffeb49 --- /dev/null +++ b/test/flavors/5PTh_8G_2I.yaml @@ -0,0 +1,39 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +# 5Th_8G_2P +flavor: + name: 5PTh_8G_2I + description: 5Pair threads (10tthreads) 8GB memory and 2 dataplane passthrough interfaces + extended: + processor_ranking: 100 + numas: + - memory: 8 + paired-threads: 5 + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + bandwidth: 10 Gbps + dedicated: "yes" + - name: xe1 + vpci: "0000:00:11.0" + bandwidth: 10 Gbps + dedicated: "yes" diff --git a/test/flavors/new_flavor.yaml b/test/flavors/new_flavor.yaml new file mode 100644 index 0000000..fab23d5 --- /dev/null +++ b/test/flavors/new_flavor.yaml @@ -0,0 +1,45 @@ + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +{ + "${}":[ + "${name} flavor name", + "${description} flavor description (${name})", + "${processor_ranking} processnor ranking (100)", + "${memory} memory in GB (2)", + "${threads} threads needed (2)" + ], + +"flavor":{ + "name":"${name}", + "description":"${description}", + "extended":{ + "processor_ranking":"${processor_ranking int}", + "numas":[ + { + "memory":"${memory int}", + "threads":"${threads int}" + } + ] + } +} +} diff --git a/test/get_params.sh b/test/get_params.sh new file mode 100755 index 0000000..0a0f6df --- /dev/null +++ b/test/get_params.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +# +#author Alfonso Tierno +# +#script that obtain the parameters from the configuration file +#it is a bit unsafe because a comment in the yaml configuration file +#can wrong this script + +#get params from configuration file + +[ -z "$1" ] && echo "usage: $0 [openvim_cfg_file]" && exit + +OPENVIM_PORT=`grep http_port: $1` +OPENVIM_PORT=${OPENVIM_PORT#http_port:} +OPENVIM_PORT=${OPENVIM_PORT%%#*} +OPENVIM_PORT=`eval echo ${OPENVIM_PORT}` # remove white spaces + +OPENVIM_ADMIN_PORT=`grep http_admin_port: $1` +OPENVIM_ADMIN_PORT=${OPENVIM_ADMIN_PORT#http_admin_port:} +OPENVIM_ADMIN_PORT=${OPENVIM_ADMIN_PORT%%#*} +OPENVIM_ADMIN_PORT=`eval echo ${OPENVIM_ADMIN_PORT}` # remove white spaces + +OPENVIM_HOST=`grep http_host: $1` +OPENVIM_HOST=${OPENVIM_HOST#http_host:} +OPENVIM_HOST=${OPENVIM_HOST%%#*} +OPENVIM_HOST=`eval echo ${OPENVIM_HOST}` # remove white spaces + +OPENVIM_OF_IP=`grep of_controller_ip: $1` +OPENVIM_OF_IP=${OPENVIM_OF_IP#of_controller_ip:} +OPENVIM_OF_IP=${OPENVIM_OF_IP%%#*} +OPENVIM_OF_IP=`eval echo ${OPENVIM_OF_IP}` # remove white spaces + +OPENVIM_OF_PORT=`grep of_controller_port: $1` +OPENVIM_OF_PORT=${OPENVIM_OF_PORT#of_controller_port:} +OPENVIM_OF_PORT=${OPENVIM_OF_PORT%%#*} +OPENVIM_OF_PORT=`eval echo ${OPENVIM_OF_PORT}` # remove white spaces + +OPENVIM_OF_DPID=`grep of_controller_dpid: $1` +OPENVIM_OF_DPID=${OPENVIM_OF_DPID#of_controller_dpid:} +OPENVIM_OF_DPID=${OPENVIM_OF_DPID%%#*} +OPENVIM_OF_DPID=`eval echo ${OPENVIM_OF_DPID}` # remove white spaces + diff --git a/test/hosts/host-example0.json b/test/hosts/host-example0.json new file mode 100644 index 0000000..b5dc5ee --- /dev/null +++ b/test/hosts/host-example0.json @@ -0,0 +1,465 @@ +{ + "host":{ + "name": "fake-host-0", + "user": "user", + "password": "password", + "ip_name": "fakehost0" + }, +"host-data": +{ + "name": "fake-host-0", + "ranking": 300, + "description": "fake host 0 for test mode", + "ip_name": "fakehost0", + "features": "lps,dioc,hwsv,ht,64b,tlbps", + "user": "user", + "password": "password", + "numas": [ + { + "cores": [ + { + "core_id": 0, + "thread_id": 12, + "status": "noteligible" + }, + { + "core_id": 0, + "thread_id": 36, + "status": "noteligible" + }, + { + "core_id": 1, + "thread_id": 13 + }, + { + "core_id": 1, + "thread_id": 37 + }, + { + "core_id": 2, + "thread_id": 14 + }, + { + "core_id": 2, + "thread_id": 38 + }, + { + "core_id": 3, + "thread_id": 15 + }, + { + "core_id": 3, + "thread_id": 39 + }, + { + "core_id": 4, + "thread_id": 16 + }, + { + "core_id": 4, + "thread_id": 40 + }, + { + "core_id": 5, + "thread_id": 17 + }, + { + "core_id": 5, + "thread_id": 41 + }, + { + "core_id": 6, + "thread_id": 18 + }, + { + "core_id": 6, + "thread_id": 42 + }, + { + "core_id": 7, + "thread_id": 19 + }, + { + "core_id": 7, + "thread_id": 43 + }, + { + "core_id": 8, + "thread_id": 20 + }, + { + "core_id": 8, + "thread_id": 44 + }, + { + "core_id": 9, + "thread_id": 21 + }, + { + "core_id": 9, + "thread_id": 45 + }, + { + "core_id": 10, + "thread_id": 22 + }, + { + "core_id": 10, + "thread_id": 46 + }, + { + "core_id": 11, + "thread_id": 23 + }, + { + "core_id": 11, + "thread_id": 47 + } + ], + "numa_socket": 1, + "hugepages": 28, + "memory": 32 + }, + { + "cores": [ + { + "core_id": 0, + "thread_id": 0, + "status": "noteligible" + }, + { + "core_id": 0, + "thread_id": 24, + "status": "noteligible" + }, + { + "core_id": 1, + "thread_id": 1 + }, + { + "core_id": 1, + "thread_id": 25 + }, + { + "core_id": 2, + "thread_id": 2 + }, + { + "core_id": 2, + "thread_id": 26 + }, + { + "core_id": 3, + "thread_id": 3 + }, + { + "core_id": 3, + "thread_id": 27 + }, + { + "core_id": 4, + "thread_id": 4 + }, + { + "core_id": 4, + "thread_id": 28 + }, + { + "core_id": 5, + "thread_id": 5 + }, + { + "core_id": 5, + "thread_id": 29 + }, + { + "core_id": 6, + "thread_id": 6 + }, + { + "core_id": 6, + "thread_id": 30 + }, + { + "core_id": 7, + "thread_id": 7 + }, + { + "core_id": 7, + "thread_id": 31 + }, + { + "core_id": 8, + "thread_id": 8 + }, + { + "core_id": 8, + "thread_id": 32 + }, + { + "core_id": 9, + "thread_id": 9 + }, + { + "core_id": 9, + "thread_id": 33 + }, + { + "core_id": 10, + "thread_id": 10 + }, + { + "core_id": 10, + "thread_id": 34 + }, + { + "core_id": 11, + "thread_id": 11 + }, + { + "core_id": 11, + "thread_id": 35 + } + ], + "interfaces": [ + { + "source_name": "eth8", + "Mbps": 10000, + "pci": "0000:08:00.0", + "switch_port": "port0/2", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "a6:12:27:bd:2d:ef", + "pci": "0000:08:10.0", + "source_name": 0, + "vlan": 106 + }, + { + "mac": "be:8a:40:58:cf:de", + "pci": "0000:08:10.2", + "source_name": 1, + "vlan": 107 + }, + { + "mac": "c6:bf:7a:30:13:55", + "pci": "0000:08:10.4", + "source_name": 2, + "vlan": 103 + }, + { + "mac": "be:32:50:ef:ea:4e", + "pci": "0000:08:10.6", + "source_name": 3, + "vlan": 105 + }, + { + "mac": "ae:36:60:bc:41:78", + "pci": "0000:08:11.0", + "source_name": 4, + "vlan": 102 + }, + { + "mac": "52:22:3a:99:67:4e", + "pci": "0000:08:11.2", + "source_name": 5, + "vlan": 104 + }, + { + "mac": "0a:b8:00:2c:8a:b2", + "pci": "0000:08:11.4", + "source_name": 6, + "vlan": 100 + }, + { + "mac": "e2:f6:70:83:a3:ec", + "pci": "0000:08:11.6", + "source_name": 7, + "vlan": 101 + } + ], + "mac": "90:e2:ba:0c:36:4c" + }, + { + "source_name": "eth17", + "Mbps": 10000, + "pci": "0000:08:00.1", + "switch_port": "port0/3", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "92:0e:27:0c:ad:eb", + "pci": "0000:08:10.1", + "source_name": 0, + "vlan": 105 + }, + { + "mac": "be:99:65:e8:98:a3", + "pci": "0000:08:10.3", + "source_name": 1, + "vlan": 106 + }, + { + "mac": "8a:3b:54:42:88:b2", + "pci": "0000:08:10.5", + "source_name": 2, + "vlan": 101 + }, + { + "mac": "c6:5d:8a:c5:05:f7", + "pci": "0000:08:10.7", + "source_name": 3, + "vlan": 103 + }, + { + "mac": "96:bd:61:02:4f:d6", + "pci": "0000:08:11.1", + "source_name": 4, + "vlan": 102 + }, + { + "mac": "22:5d:85:2c:1b:fd", + "pci": "0000:08:11.3", + "source_name": 5, + "vlan": 104 + }, + { + "mac": "e6:7f:8a:48:bc:26", + "pci": "0000:08:11.5", + "source_name": 6, + "vlan": 100 + }, + { + "mac": "2e:4a:e6:68:18:fa", + "pci": "0000:08:11.7", + "source_name": 7, + "vlan": 107 + } + ], + "mac": "90:e2:ba:0c:36:4d" + }, + { + "source_name": "eth26", + "Mbps": 10000, + "pci": "0000:06:00.0", + "switch_port": "port0/0", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "02:79:15:02:ad:cc", + "pci": "0000:06:10.0", + "source_name": 0, + "vlan": 104 + }, + { + "mac": "06:81:ef:de:ec:6b", + "pci": "0000:06:10.2", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "f2:4e:96:f3:8e:73", + "pci": "0000:06:10.4", + "source_name": 2, + "vlan": 106 + }, + { + "mac": "e2:86:91:23:c5:76", + "pci": "0000:06:10.6", + "source_name": 3, + "vlan": 105 + }, + { + "mac": "52:12:a0:77:cc:47", + "pci": "0000:06:11.0", + "source_name": 4, + "vlan": 107 + }, + { + "mac": "ca:17:4a:c4:cb:bf", + "pci": "0000:06:11.2", + "source_name": 5, + "vlan": 102 + }, + { + "mac": "de:13:4c:5d:70:e8", + "pci": "0000:06:11.4", + "source_name": 6, + "vlan": 101 + }, + { + "mac": "0a:5f:d2:db:7f:e2", + "pci": "0000:06:11.6", + "source_name": 7, + "vlan": 100 + } + ], + "mac": "90:e2:ba:0b:6b:b4" + }, + { + "source_name": "eth35", + "Mbps": 10000, + "pci": "0000:06:00.1", + "switch_port": "port0/1", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "a2:08:55:fe:c5:db", + "pci": "0000:06:10.1", + "source_name": 0, + "vlan": 104 + }, + { + "mac": "7a:dd:c7:46:2a:91", + "pci": "0000:06:10.3", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "92:a8:1d:4e:cc:a8", + "pci": "0000:06:10.5", + "source_name": 2, + "vlan": 106 + }, + { + "mac": "02:1a:d6:87:c4:cc", + "pci": "0000:06:10.7", + "source_name": 3, + "vlan": 105 + }, + { + "mac": "ea:1b:8b:2a:da:9a", + "pci": "0000:06:11.1", + "source_name": 4, + "vlan": 107 + }, + { + "mac": "aa:c3:fe:e5:f2:96", + "pci": "0000:06:11.3", + "source_name": 5, + "vlan": 102 + }, + { + "mac": "e2:66:1f:00:b3:45", + "pci": "0000:06:11.5", + "source_name": 6, + "vlan": 101 + }, + { + "mac": "da:3c:d8:a3:f4:e0", + "pci": "0000:06:11.7", + "source_name": 7, + "vlan": 100 + } + ], + "mac": "90:e2:ba:0b:6b:b5" + } + ], + "numa_socket": 0, + "hugepages": 28, + "memory": 32 + } + ] +} +} + diff --git a/test/hosts/host-example1.json b/test/hosts/host-example1.json new file mode 100644 index 0000000..766aaef --- /dev/null +++ b/test/hosts/host-example1.json @@ -0,0 +1,698 @@ +{ +"host":{ + "name": "fake-host-1", + "user": "user", + "password": "password", + "ip_name": "fakehost1" +}, +"host-data": +{ + "name": "fake-host-1", + "ranking": 300, + "description": "fake host 1 for test mode", + "ip_name": "fakehost1", + "features": "lps,dioc,hwsv,ht,64b,tlbps", + "user": "user", + "password": "password", + "numas": [ + { + "cores": [ + { + "core_id": 0, + "thread_id": 1, + "status": "noteligible" + }, + { + "core_id": 0, + "thread_id": 25, + "status": "noteligible" + }, + { + "core_id": 1, + "thread_id": 3 + }, + { + "core_id": 1, + "thread_id": 27 + }, + { + "core_id": 2, + "thread_id": 5 + }, + { + "core_id": 2, + "thread_id": 29 + }, + { + "core_id": 3, + "thread_id": 7 + }, + { + "core_id": 3, + "thread_id": 31 + }, + { + "core_id": 4, + "thread_id": 9 + }, + { + "core_id": 4, + "thread_id": 33 + }, + { + "core_id": 5, + "thread_id": 11 + }, + { + "core_id": 5, + "thread_id": 35 + }, + { + "core_id": 6, + "thread_id": 13 + }, + { + "core_id": 6, + "thread_id": 37 + }, + { + "core_id": 7, + "thread_id": 15 + }, + { + "core_id": 7, + "thread_id": 39 + }, + { + "core_id": 8, + "thread_id": 17 + }, + { + "core_id": 8, + "thread_id": 41 + }, + { + "core_id": 9, + "thread_id": 19 + }, + { + "core_id": 9, + "thread_id": 43 + }, + { + "core_id": 10, + "thread_id": 21 + }, + { + "core_id": 10, + "thread_id": 45 + }, + { + "core_id": 11, + "thread_id": 23 + }, + { + "core_id": 11, + "thread_id": 47 + } + ], + "interfaces": [ + { + "source_name": "p2p1", + "Mbps": 10000, + "pci": "0000:44:00.0", + "switch_port": "port0/4", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "4e:2e:a5:ab:7f:4b", + "pci": "0000:44:10.0", + "source_name": 0, + "vlan": 102 + }, + { + "mac": "a6:43:f6:4f:b6:ea", + "pci": "0000:44:10.2", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "b2:fe:29:81:71:0a", + "pci": "0000:44:10.4", + "source_name": 2, + "vlan": 104 + }, + { + "mac": "aa:9d:13:62:80:e5", + "pci": "0000:44:10.6", + "source_name": 3, + "vlan": 101 + }, + { + "mac": "3a:30:7f:c6:67:04", + "pci": "0000:44:11.0", + "source_name": 4, + "vlan": 107 + }, + { + "mac": "c6:20:45:8f:30:08", + "pci": "0000:44:11.2", + "source_name": 5, + "vlan": 100 + }, + { + "mac": "66:3d:09:bc:d6:32", + "pci": "0000:44:11.4", + "source_name": 6, + "vlan": 105 + }, + { + "mac": "46:e5:0c:f6:c4:ae", + "pci": "0000:44:11.6", + "source_name": 7, + "vlan": 106 + } + ], + "mac": "a0:36:9f:35:ed:14" + }, + { + "source_name": "p2p2", + "Mbps": 10000, + "pci": "0000:44:00.1", + "switch_port": "port0/5", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "ea:b6:5d:98:d9:86", + "pci": "0000:44:10.1", + "source_name": 0, + "vlan": 102 + }, + { + "mac": "36:4c:31:5a:5f:66", + "pci": "0000:44:10.3", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "42:a0:5d:fa:8d:8c", + "pci": "0000:44:10.5", + "source_name": 2, + "vlan": 100 + }, + { + "mac": "c6:50:23:c5:53:ff", + "pci": "0000:44:10.7", + "source_name": 3, + "vlan": 104 + }, + { + "mac": "12:2e:6c:79:a3:cc", + "pci": "0000:44:11.1", + "source_name": 4, + "vlan": 101 + }, + { + "mac": "be:f0:8e:7b:50:46", + "pci": "0000:44:11.3", + "source_name": 5, + "vlan": 105 + }, + { + "mac": "6e:6e:15:44:1c:1d", + "pci": "0000:44:11.5", + "source_name": 6, + "vlan": 106 + }, + { + "mac": "9a:e8:1e:e6:af:31", + "pci": "0000:44:11.7", + "source_name": 7, + "vlan": 107 + } + ], + "mac": "a0:36:9f:35:ed:16" + }, + { + "source_name": "p3p2", + "Mbps": 10000, + "pci": "0000:43:00.1", + "switch_port": "port0/7", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "2e:b7:dc:aa:9a:35", + "pci": "0000:43:10.1", + "source_name": 0, + "vlan": 103 + }, + { + "mac": "42:71:39:50:92:a5", + "pci": "0000:43:10.3", + "source_name": 1, + "vlan": 100 + }, + { + "mac": "22:ac:a0:64:a4:00", + "pci": "0000:43:10.5", + "source_name": 2, + "vlan": 101 + }, + { + "mac": "7e:81:e0:56:c0:aa", + "pci": "0000:43:10.7", + "source_name": 3, + "vlan": 102 + }, + { + "mac": "de:87:8d:ed:81:1a", + "pci": "0000:43:11.1", + "source_name": 4, + "vlan": 104 + }, + { + "mac": "fe:29:e9:da:45:df", + "pci": "0000:43:11.3", + "source_name": 5, + "vlan": 105 + }, + { + "mac": "b6:e7:67:46:22:de", + "pci": "0000:43:11.5", + "source_name": 6, + "vlan": 106 + }, + { + "mac": "5e:9e:10:7a:66:e9", + "pci": "0000:43:11.7", + "source_name": 7, + "vlan": 107 + } + ], + "mac": "a0:36:9f:35:ed:0a" + }, + { + "source_name": "p3p1", + "Mbps": 10000, + "pci": "0000:43:00.0", + "switch_port": "port0/6", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "5e:2e:05:67:7b:05", + "pci": "0000:43:10.0", + "source_name": 0, + "vlan": 102 + }, + { + "mac": "fa:a4:01:f0:7e:d4", + "pci": "0000:43:10.2", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "32:aa:7b:9a:e3:6b", + "pci": "0000:43:10.4", + "source_name": 2, + "vlan": 100 + }, + { + "mac": "fa:8d:b3:2f:a6:59", + "pci": "0000:43:10.6", + "source_name": 3, + "vlan": 101 + }, + { + "mac": "f2:92:f3:f0:ba:06", + "pci": "0000:43:11.0", + "source_name": 4, + "vlan": 104 + }, + { + "mac": "c2:8a:1b:55:13:52", + "pci": "0000:43:11.2", + "source_name": 5, + "vlan": 105 + }, + { + "mac": "5a:55:8e:f4:a5:4f", + "pci": "0000:43:11.4", + "source_name": 6, + "vlan": 107 + }, + { + "mac": "8a:21:a1:bb:d4:b2", + "pci": "0000:43:11.6", + "source_name": 7, + "vlan": 106 + } + ], + "mac": "a0:36:9f:35:ed:08" + } + ], + "numa_socket": 1, + "hugepages": 28, + "memory": 32 + }, + { + "cores": [ + { + "core_id": 0, + "thread_id": 0, + "status": "noteligible" + }, + { + "core_id": 0, + "thread_id": 24, + "status": "noteligible" + }, + { + "core_id": 1, + "thread_id": 2 + }, + { + "core_id": 1, + "thread_id": 26 + }, + { + "core_id": 2, + "thread_id": 4 + }, + { + "core_id": 2, + "thread_id": 28 + }, + { + "core_id": 3, + "thread_id": 6 + }, + { + "core_id": 3, + "thread_id": 30 + }, + { + "core_id": 4, + "thread_id": 8 + }, + { + "core_id": 4, + "thread_id": 32 + }, + { + "core_id": 5, + "thread_id": 10 + }, + { + "core_id": 5, + "thread_id": 34 + }, + { + "core_id": 6, + "thread_id": 12 + }, + { + "core_id": 6, + "thread_id": 36 + }, + { + "core_id": 7, + "thread_id": 14 + }, + { + "core_id": 7, + "thread_id": 38 + }, + { + "core_id": 8, + "thread_id": 16 + }, + { + "core_id": 8, + "thread_id": 40 + }, + { + "core_id": 9, + "thread_id": 18 + }, + { + "core_id": 9, + "thread_id": 42 + }, + { + "core_id": 10, + "thread_id": 20 + }, + { + "core_id": 10, + "thread_id": 44 + }, + { + "core_id": 11, + "thread_id": 22 + }, + { + "core_id": 11, + "thread_id": 46 + } + ], + "interfaces": [ + { + "source_name": "p5p1", + "Mbps": 10000, + "pci": "0000:04:00.0", + "switch_port": "port0/8", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "46:28:47:80:9d:fe", + "pci": "0000:04:10.0", + "source_name": 0, + "vlan": 104 + }, + { + "mac": "5a:5d:cb:52:93:66", + "pci": "0000:04:10.2", + "source_name": 1, + "vlan": 105 + }, + { + "mac": "42:42:2e:2d:44:9d", + "pci": "0000:04:10.4", + "source_name": 2, + "vlan": 106 + }, + { + "mac": "4a:ef:c2:a5:c8:ad", + "pci": "0000:04:10.6", + "source_name": 3, + "vlan": 107 + }, + { + "mac": "0a:0c:b1:8b:da:c6", + "pci": "0000:04:11.0", + "source_name": 4, + "vlan": 102 + }, + { + "mac": "46:e7:85:ad:1f:3e", + "pci": "0000:04:11.2", + "source_name": 5, + "vlan": 103 + }, + { + "mac": "5e:a4:8f:8f:b0:53", + "pci": "0000:04:11.4", + "source_name": 6, + "vlan": 100 + }, + { + "mac": "d2:76:f2:21:fb:42", + "pci": "0000:04:11.6", + "source_name": 7, + "vlan": 101 + } + ], + "mac": "a0:36:9f:33:09:6c" + }, + { + "source_name": "p5p2", + "Mbps": 10000, + "pci": "0000:04:00.1", + "switch_port": "port0/9", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "f2:c8:15:59:9d:9e", + "pci": "0000:04:10.1", + "source_name": 0, + "vlan": 104 + }, + { + "mac": "ca:30:b2:c2:d9:d6", + "pci": "0000:04:10.3", + "source_name": 1, + "vlan": 105 + }, + { + "mac": "1a:03:de:f7:f5:db", + "pci": "0000:04:10.5", + "source_name": 2, + "vlan": 106 + }, + { + "mac": "ca:6d:e0:c3:1e:f5", + "pci": "0000:04:10.7", + "source_name": 3, + "vlan": 107 + }, + { + "mac": "aa:35:ab:70:29:5c", + "pci": "0000:04:11.1", + "source_name": 4, + "vlan": 102 + }, + { + "mac": "02:b3:c4:a6:12:df", + "pci": "0000:04:11.3", + "source_name": 5, + "vlan": 103 + }, + { + "mac": "52:4f:13:67:d5:1f", + "pci": "0000:04:11.5", + "source_name": 6, + "vlan": 100 + }, + { + "mac": "3a:7f:b2:58:61:81", + "pci": "0000:04:11.7", + "source_name": 7, + "vlan": 101 + } + ], + "mac": "a0:36:9f:33:09:6e" + }, + { + "source_name": "p7p1", + "Mbps": 10000, + "pci": "0000:06:00.0", + "switch_port": "port0/10", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "6e:51:ee:3c:66:fa", + "pci": "0000:06:10.0", + "source_name": 0, + "vlan": 104 + }, + { + "mac": "26:0c:7a:db:9b:7e", + "pci": "0000:06:10.2", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "66:b0:59:cf:6b:fc", + "pci": "0000:06:10.4", + "source_name": 2, + "vlan": 106 + }, + { + "mac": "f6:52:a5:ff:97:b9", + "pci": "0000:06:10.6", + "source_name": 3, + "vlan": 105 + }, + { + "mac": "4a:5c:b2:6a:25:15", + "pci": "0000:06:11.0", + "source_name": 4, + "vlan": 107 + }, + { + "mac": "86:0f:85:c2:42:b1", + "pci": "0000:06:11.2", + "source_name": 5, + "vlan": 102 + }, + { + "mac": "b2:3d:24:1d:3e:40", + "pci": "0000:06:11.4", + "source_name": 6, + "vlan": 101 + }, + { + "mac": "ca:3f:fc:7b:32:36", + "pci": "0000:06:11.6", + "source_name": 7, + "vlan": 100 + } + ], + "mac": "a0:36:9f:33:0c:d4" + }, + { + "source_name": "p7p2", + "Mbps": 10000, + "pci": "0000:06:00.1", + "switch_port": "port0/11", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "76:28:ae:b5:31:25", + "pci": "0000:06:10.1", + "source_name": 0, + "vlan": 104 + }, + { + "mac": "5e:fa:d1:f7:eb:44", + "pci": "0000:06:10.3", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "56:d3:6a:b0:af:2e", + "pci": "0000:06:10.5", + "source_name": 2, + "vlan": 106 + }, + { + "mac": "3e:75:f3:00:aa:ba", + "pci": "0000:06:10.7", + "source_name": 3, + "vlan": 105 + }, + { + "mac": "5e:fd:b0:e2:59:47", + "pci": "0000:06:11.1", + "source_name": 4, + "vlan": 107 + }, + { + "mac": "ee:a7:2f:ab:73:0f", + "pci": "0000:06:11.3", + "source_name": 5, + "vlan": 102 + }, + { + "mac": "d6:e9:f1:c6:40:00", + "pci": "0000:06:11.5", + "source_name": 6, + "vlan": 101 + }, + { + "mac": "36:b0:af:0e:5b:68", + "pci": "0000:06:11.7", + "source_name": 7, + "vlan": 100 + } + ], + "mac": "a0:36:9f:33:0c:d6" + } + ], + "numa_socket": 0, + "hugepages": 28, + "memory": 32 + } + ] +} +} diff --git a/test/hosts/host-example2.json b/test/hosts/host-example2.json new file mode 100644 index 0000000..ddda8f7 --- /dev/null +++ b/test/hosts/host-example2.json @@ -0,0 +1,698 @@ +{ + "host":{ + "name": "fake-host-2", + "user": "user", + "password": "password", + "ip_name": "fakehost2" + }, +"host-data": +{ + "name": "fake-host-2", + "ranking": 300, + "description": "fake host 2 for test mode", + "ip_name": "fakehost2", + "features": "lps,dioc,hwsv,ht,64b,tlbps", + "user": "user", + "password": "password", + "numas": [ + { + "cores": [ + { + "core_id": 0, + "thread_id": 1, + "status": "noteligible" + }, + { + "core_id": 0, + "thread_id": 25, + "status": "noteligible" + }, + { + "core_id": 1, + "thread_id": 3 + }, + { + "core_id": 1, + "thread_id": 27 + }, + { + "core_id": 2, + "thread_id": 5 + }, + { + "core_id": 2, + "thread_id": 29 + }, + { + "core_id": 3, + "thread_id": 7 + }, + { + "core_id": 3, + "thread_id": 31 + }, + { + "core_id": 4, + "thread_id": 9 + }, + { + "core_id": 4, + "thread_id": 33 + }, + { + "core_id": 5, + "thread_id": 11 + }, + { + "core_id": 5, + "thread_id": 35 + }, + { + "core_id": 6, + "thread_id": 13 + }, + { + "core_id": 6, + "thread_id": 37 + }, + { + "core_id": 7, + "thread_id": 15 + }, + { + "core_id": 7, + "thread_id": 39 + }, + { + "core_id": 8, + "thread_id": 17 + }, + { + "core_id": 8, + "thread_id": 41 + }, + { + "core_id": 9, + "thread_id": 19 + }, + { + "core_id": 9, + "thread_id": 43 + }, + { + "core_id": 10, + "thread_id": 21 + }, + { + "core_id": 10, + "thread_id": 45 + }, + { + "core_id": 11, + "thread_id": 23 + }, + { + "core_id": 11, + "thread_id": 47 + } + ], + "interfaces": [ + { + "source_name": "p2p1", + "Mbps": 10000, + "pci": "0000:44:00.0", + "switch_port": "port0/12", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "fe:49:52:59:2f:0b", + "pci": "0000:44:10.0", + "source_name": 0, + "vlan": 102 + }, + { + "mac": "16:df:ee:65:d1:a9", + "pci": "0000:44:10.2", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "e6:10:3a:09:cc:eb", + "pci": "0000:44:10.4", + "source_name": 2, + "vlan": 104 + }, + { + "mac": "f6:8e:ef:92:70:9a", + "pci": "0000:44:10.6", + "source_name": 3, + "vlan": 101 + }, + { + "mac": "fa:a5:0e:21:bc:89", + "pci": "0000:44:11.0", + "source_name": 4, + "vlan": 107 + }, + { + "mac": "fa:72:5a:b0:07:44", + "pci": "0000:44:11.2", + "source_name": 5, + "vlan": 100 + }, + { + "mac": "3e:8f:5c:1a:5d:e1", + "pci": "0000:44:11.4", + "source_name": 6, + "vlan": 105 + }, + { + "mac": "f2:e0:34:e2:e1:2d", + "pci": "0000:44:11.6", + "source_name": 7, + "vlan": 106 + } + ], + "mac": "a0:36:9f:35:e9:a0" + }, + { + "source_name": "p2p2", + "Mbps": 10000, + "pci": "0000:44:00.1", + "switch_port": "port0/13", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "ba:76:24:e0:79:75", + "pci": "0000:44:10.1", + "source_name": 0, + "vlan": 102 + }, + { + "mac": "b2:26:81:5b:18:7b", + "pci": "0000:44:10.3", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "da:46:a9:f4:ab:42", + "pci": "0000:44:10.5", + "source_name": 2, + "vlan": 100 + }, + { + "mac": "86:e0:ec:33:cc:18", + "pci": "0000:44:10.7", + "source_name": 3, + "vlan": 104 + }, + { + "mac": "ee:ee:49:da:20:75", + "pci": "0000:44:11.1", + "source_name": 4, + "vlan": 101 + }, + { + "mac": "be:fa:18:da:f3:ba", + "pci": "0000:44:11.3", + "source_name": 5, + "vlan": 105 + }, + { + "mac": "96:fa:8d:ed:50:02", + "pci": "0000:44:11.5", + "source_name": 6, + "vlan": 106 + }, + { + "mac": "22:1b:cb:33:a7:5a", + "pci": "0000:44:11.7", + "source_name": 7, + "vlan": 107 + } + ], + "mac": "a0:36:9f:35:e9:a2" + }, + { + "source_name": "p3p2", + "Mbps": 10000, + "pci": "0000:43:00.1", + "switch_port": "port0/15", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "a6:6e:28:6c:5b:04", + "pci": "0000:43:10.1", + "source_name": 0, + "vlan": 103 + }, + { + "mac": "be:35:42:86:3d:fd", + "pci": "0000:43:10.3", + "source_name": 1, + "vlan": 100 + }, + { + "mac": "0a:92:99:92:02:e4", + "pci": "0000:43:10.5", + "source_name": 2, + "vlan": 101 + }, + { + "mac": "b6:a6:3a:f1:1e:57", + "pci": "0000:43:10.7", + "source_name": 3, + "vlan": 102 + }, + { + "mac": "be:3f:1a:ef:76:c0", + "pci": "0000:43:11.1", + "source_name": 4, + "vlan": 104 + }, + { + "mac": "6a:cc:a6:bf:61:cd", + "pci": "0000:43:11.3", + "source_name": 5, + "vlan": 105 + }, + { + "mac": "fe:c1:15:a9:c5:12", + "pci": "0000:43:11.5", + "source_name": 6, + "vlan": 106 + }, + { + "mac": "6a:ff:59:aa:63:34", + "pci": "0000:43:11.7", + "source_name": 7, + "vlan": 107 + } + ], + "mac": "a0:36:9f:35:ed:42" + }, + { + "source_name": "p3p1", + "Mbps": 10000, + "pci": "0000:43:00.0", + "switch_port": "port0/14", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "c6:18:cb:6c:c4:04", + "pci": "0000:43:10.0", + "source_name": 0, + "vlan": 102 + }, + { + "mac": "a6:4e:90:89:90:d8", + "pci": "0000:43:10.2", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "fe:9a:82:91:a5:3a", + "pci": "0000:43:10.4", + "source_name": 2, + "vlan": 100 + }, + { + "mac": "22:a3:ef:27:50:fd", + "pci": "0000:43:10.6", + "source_name": 3, + "vlan": 101 + }, + { + "mac": "c6:57:98:69:67:e2", + "pci": "0000:43:11.0", + "source_name": 4, + "vlan": 104 + }, + { + "mac": "6a:f9:81:ae:40:94", + "pci": "0000:43:11.2", + "source_name": 5, + "vlan": 105 + }, + { + "mac": "86:00:37:7f:6e:05", + "pci": "0000:43:11.4", + "source_name": 6, + "vlan": 107 + }, + { + "mac": "f2:e6:fc:7d:71:9b", + "pci": "0000:43:11.6", + "source_name": 7, + "vlan": 106 + } + ], + "mac": "a0:36:9f:35:ed:40" + } + ], + "numa_socket": 1, + "hugepages": 60, + "memory": 64 + }, + { + "cores": [ + { + "core_id": 0, + "thread_id": 0, + "status": "noteligible" + }, + { + "core_id": 0, + "thread_id": 24, + "status": "noteligible" + }, + { + "core_id": 1, + "thread_id": 2 + }, + { + "core_id": 1, + "thread_id": 26 + }, + { + "core_id": 2, + "thread_id": 4 + }, + { + "core_id": 2, + "thread_id": 28 + }, + { + "core_id": 3, + "thread_id": 6 + }, + { + "core_id": 3, + "thread_id": 30 + }, + { + "core_id": 4, + "thread_id": 8 + }, + { + "core_id": 4, + "thread_id": 32 + }, + { + "core_id": 5, + "thread_id": 10 + }, + { + "core_id": 5, + "thread_id": 34 + }, + { + "core_id": 6, + "thread_id": 12 + }, + { + "core_id": 6, + "thread_id": 36 + }, + { + "core_id": 7, + "thread_id": 14 + }, + { + "core_id": 7, + "thread_id": 38 + }, + { + "core_id": 8, + "thread_id": 16 + }, + { + "core_id": 8, + "thread_id": 40 + }, + { + "core_id": 9, + "thread_id": 18 + }, + { + "core_id": 9, + "thread_id": 42 + }, + { + "core_id": 10, + "thread_id": 20 + }, + { + "core_id": 10, + "thread_id": 44 + }, + { + "core_id": 11, + "thread_id": 22 + }, + { + "core_id": 11, + "thread_id": 46 + } + ], + "interfaces": [ + { + "source_name": "p5p1", + "Mbps": 10000, + "pci": "0000:04:00.0", + "switch_port": "port0/16", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "02:ef:c9:38:c7:01", + "pci": "0000:04:10.0", + "source_name": 0, + "vlan": 104 + }, + { + "mac": "92:04:e9:fe:c7:f9", + "pci": "0000:04:10.2", + "source_name": 1, + "vlan": 105 + }, + { + "mac": "a2:41:32:78:25:48", + "pci": "0000:04:10.4", + "source_name": 2, + "vlan": 106 + }, + { + "mac": "9e:65:ab:e8:a0:2b", + "pci": "0000:04:10.6", + "source_name": 3, + "vlan": 107 + }, + { + "mac": "0a:38:88:4c:76:1b", + "pci": "0000:04:11.0", + "source_name": 4, + "vlan": 102 + }, + { + "mac": "4e:8f:96:e1:d2:99", + "pci": "0000:04:11.2", + "source_name": 5, + "vlan": 103 + }, + { + "mac": "62:b3:0a:15:1b:cc", + "pci": "0000:04:11.4", + "source_name": 6, + "vlan": 100 + }, + { + "mac": "fa:29:6a:04:c3:fc", + "pci": "0000:04:11.6", + "source_name": 7, + "vlan": 101 + } + ], + "mac": "a0:36:9f:33:16:f4" + }, + { + "source_name": "p5p2", + "Mbps": 10000, + "pci": "0000:04:00.1", + "switch_port": "port0/17", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "1a:07:7d:f5:ab:bf", + "pci": "0000:04:10.1", + "source_name": 0, + "vlan": 104 + }, + { + "mac": "12:63:9b:75:5c:02", + "pci": "0000:04:10.3", + "source_name": 1, + "vlan": 105 + }, + { + "mac": "be:f8:54:de:8e:39", + "pci": "0000:04:10.5", + "source_name": 2, + "vlan": 106 + }, + { + "mac": "82:bb:12:83:82:b9", + "pci": "0000:04:10.7", + "source_name": 3, + "vlan": 107 + }, + { + "mac": "06:5c:e3:40:c3:e0", + "pci": "0000:04:11.1", + "source_name": 4, + "vlan": 102 + }, + { + "mac": "32:54:10:77:36:d1", + "pci": "0000:04:11.3", + "source_name": 5, + "vlan": 103 + }, + { + "mac": "0e:0f:3f:23:d6:17", + "pci": "0000:04:11.5", + "source_name": 6, + "vlan": 100 + }, + { + "mac": "46:14:84:51:3d:ec", + "pci": "0000:04:11.7", + "source_name": 7, + "vlan": 101 + } + ], + "mac": "a0:36:9f:33:16:f6" + }, + { + "source_name": "p7p1", + "Mbps": 10000, + "pci": "0000:06:00.0", + "switch_port": "port0/18", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "d6:8a:a7:27:bc:7c", + "pci": "0000:06:10.0", + "source_name": 0, + "vlan": 104 + }, + { + "mac": "9e:f4:67:21:28:12", + "pci": "0000:06:10.2", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "b6:f8:b7:c8:ae:07", + "pci": "0000:06:10.4", + "source_name": 2, + "vlan": 106 + }, + { + "mac": "0e:e8:34:7a:3b:c4", + "pci": "0000:06:10.6", + "source_name": 3, + "vlan": 105 + }, + { + "mac": "82:5d:0f:7a:20:91", + "pci": "0000:06:11.0", + "source_name": 4, + "vlan": 107 + }, + { + "mac": "f2:6b:6a:4a:e3:93", + "pci": "0000:06:11.2", + "source_name": 5, + "vlan": 102 + }, + { + "mac": "b2:d6:af:b0:12:f4", + "pci": "0000:06:11.4", + "source_name": 6, + "vlan": 101 + }, + { + "mac": "22:0b:01:19:dd:3c", + "pci": "0000:06:11.6", + "source_name": 7, + "vlan": 100 + } + ], + "mac": "a0:36:9f:33:0f:1c" + }, + { + "source_name": "p7p2", + "Mbps": 10000, + "pci": "0000:06:00.1", + "switch_port": "port0/19", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "0a:2f:77:8f:53:da", + "pci": "0000:06:10.1", + "source_name": 0, + "vlan": 104 + }, + { + "mac": "a2:f5:63:1d:1c:4d", + "pci": "0000:06:10.3", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "86:80:6c:d8:da:e0", + "pci": "0000:06:10.5", + "source_name": 2, + "vlan": 106 + }, + { + "mac": "5a:9d:1c:19:3f:07", + "pci": "0000:06:10.7", + "source_name": 3, + "vlan": 105 + }, + { + "mac": "ea:a9:fb:95:29:34", + "pci": "0000:06:11.1", + "source_name": 4, + "vlan": 107 + }, + { + "mac": "0e:3a:e9:65:5b:d3", + "pci": "0000:06:11.3", + "source_name": 5, + "vlan": 102 + }, + { + "mac": "ce:cd:53:c6:7b:ca", + "pci": "0000:06:11.5", + "source_name": 6, + "vlan": 101 + }, + { + "mac": "52:b1:4e:91:34:8c", + "pci": "0000:06:11.7", + "source_name": 7, + "vlan": 100 + } + ], + "mac": "a0:36:9f:33:0f:1e" + } + ], + "numa_socket": 0, + "hugepages": 60, + "memory": 64 + } + ] +} +} diff --git a/test/hosts/host-example3.json b/test/hosts/host-example3.json new file mode 100644 index 0000000..a115e45 --- /dev/null +++ b/test/hosts/host-example3.json @@ -0,0 +1,698 @@ +{ + "host":{ + "name": "fake-host-3", + "user": "user", + "password": "password", + "ip_name": "fakehost3" + }, +"host-data": +{ + "name": "fake-host-3", + "ranking": 300, + "description": "fake host 3 for test mode", + "ip_name": "fakehost3", + "features": "lps,dioc,hwsv,ht,64b,tlbps", + "user": "user", + "password": "password", + "numas": [ + { + "cores": [ + { + "core_id": 0, + "thread_id": 1, + "status": "noteligible" + }, + { + "core_id": 0, + "thread_id": 25, + "status": "noteligible" + }, + { + "core_id": 1, + "thread_id": 3 + }, + { + "core_id": 1, + "thread_id": 27 + }, + { + "core_id": 2, + "thread_id": 5 + }, + { + "core_id": 2, + "thread_id": 29 + }, + { + "core_id": 3, + "thread_id": 7 + }, + { + "core_id": 3, + "thread_id": 31 + }, + { + "core_id": 4, + "thread_id": 9 + }, + { + "core_id": 4, + "thread_id": 33 + }, + { + "core_id": 5, + "thread_id": 11 + }, + { + "core_id": 5, + "thread_id": 35 + }, + { + "core_id": 6, + "thread_id": 13 + }, + { + "core_id": 6, + "thread_id": 37 + }, + { + "core_id": 7, + "thread_id": 15 + }, + { + "core_id": 7, + "thread_id": 39 + }, + { + "core_id": 8, + "thread_id": 17 + }, + { + "core_id": 8, + "thread_id": 41 + }, + { + "core_id": 9, + "thread_id": 19 + }, + { + "core_id": 9, + "thread_id": 43 + }, + { + "core_id": 10, + "thread_id": 21 + }, + { + "core_id": 10, + "thread_id": 45 + }, + { + "core_id": 11, + "thread_id": 23 + }, + { + "core_id": 11, + "thread_id": 47 + } + ], + "interfaces": [ + { + "source_name": "p2p1", + "Mbps": 10000, + "pci": "0000:44:00.0", + "switch_port": "port1/0", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "ce:24:06:7d:7a:5b", + "pci": "0000:44:10.0", + "source_name": 0, + "vlan": 102 + }, + { + "mac": "a6:2d:10:75:dc:06", + "pci": "0000:44:10.2", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "4a:39:f1:d1:aa:46", + "pci": "0000:44:10.4", + "source_name": 2, + "vlan": 104 + }, + { + "mac": "ee:df:34:b3:cf:50", + "pci": "0000:44:10.6", + "source_name": 3, + "vlan": 101 + }, + { + "mac": "46:02:47:d8:f5:66", + "pci": "0000:44:11.0", + "source_name": 4, + "vlan": 107 + }, + { + "mac": "a6:11:78:97:b4:ab", + "pci": "0000:44:11.2", + "source_name": 5, + "vlan": 100 + }, + { + "mac": "36:c0:13:a4:4c:77", + "pci": "0000:44:11.4", + "source_name": 6, + "vlan": 105 + }, + { + "mac": "b6:3b:b5:43:3a:44", + "pci": "0000:44:11.6", + "source_name": 7, + "vlan": 106 + } + ], + "mac": "a0:36:9f:25:97:f4" + }, + { + "source_name": "p2p2", + "Mbps": 10000, + "pci": "0000:44:00.1", + "switch_port": "port1/1", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "f6:38:e9:5e:f9:42", + "pci": "0000:44:10.1", + "source_name": 0, + "vlan": 102 + }, + { + "mac": "f6:1e:b9:d3:15:69", + "pci": "0000:44:10.3", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "2e:22:7f:44:86:e0", + "pci": "0000:44:10.5", + "source_name": 2, + "vlan": 100 + }, + { + "mac": "5a:3f:db:66:d6:90", + "pci": "0000:44:10.7", + "source_name": 3, + "vlan": 104 + }, + { + "mac": "b2:05:b8:f3:ae:20", + "pci": "0000:44:11.1", + "source_name": 4, + "vlan": 101 + }, + { + "mac": "fa:fe:24:82:12:b5", + "pci": "0000:44:11.3", + "source_name": 5, + "vlan": 105 + }, + { + "mac": "36:2a:0e:cb:29:01", + "pci": "0000:44:11.5", + "source_name": 6, + "vlan": 106 + }, + { + "mac": "46:71:c0:8a:9c:48", + "pci": "0000:44:11.7", + "source_name": 7, + "vlan": 107 + } + ], + "mac": "a0:36:9f:25:97:f6" + }, + { + "source_name": "p3p2", + "Mbps": 10000, + "pci": "0000:43:00.1", + "switch_port": "port1/3", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "72:fd:7a:af:91:57", + "pci": "0000:43:10.1", + "source_name": 0, + "vlan": 103 + }, + { + "mac": "fa:c4:d7:e1:3b:07", + "pci": "0000:43:10.3", + "source_name": 1, + "vlan": 100 + }, + { + "mac": "c2:03:28:4a:0a:8e", + "pci": "0000:43:10.5", + "source_name": 2, + "vlan": 101 + }, + { + "mac": "66:1b:78:65:67:e2", + "pci": "0000:43:10.7", + "source_name": 3, + "vlan": 102 + }, + { + "mac": "be:4b:e9:77:ff:a2", + "pci": "0000:43:11.1", + "source_name": 4, + "vlan": 104 + }, + { + "mac": "02:9a:94:e2:79:c0", + "pci": "0000:43:11.3", + "source_name": 5, + "vlan": 105 + }, + { + "mac": "b2:80:62:dd:09:ea", + "pci": "0000:43:11.5", + "source_name": 6, + "vlan": 106 + }, + { + "mac": "ea:41:30:8e:af:b9", + "pci": "0000:43:11.7", + "source_name": 7, + "vlan": 107 + } + ], + "mac": "a0:36:9f:25:97:de" + }, + { + "source_name": "p3p1", + "Mbps": 10000, + "pci": "0000:43:00.0", + "switch_port": "port1/2", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "66:80:a0:55:83:93", + "pci": "0000:43:10.0", + "source_name": 0, + "vlan": 102 + }, + { + "mac": "fe:fd:72:b8:fe:bb", + "pci": "0000:43:10.2", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "6a:70:92:7f:6b:7b", + "pci": "0000:43:10.4", + "source_name": 2, + "vlan": 100 + }, + { + "mac": "3a:14:75:5e:63:15", + "pci": "0000:43:10.6", + "source_name": 3, + "vlan": 101 + }, + { + "mac": "2e:c9:5d:6e:27:48", + "pci": "0000:43:11.0", + "source_name": 4, + "vlan": 104 + }, + { + "mac": "02:f7:bb:82:78:1f", + "pci": "0000:43:11.2", + "source_name": 5, + "vlan": 105 + }, + { + "mac": "0e:5e:b6:a8:6c:80", + "pci": "0000:43:11.4", + "source_name": 6, + "vlan": 107 + }, + { + "mac": "8e:bf:81:bc:cb:44", + "pci": "0000:43:11.6", + "source_name": 7, + "vlan": 106 + } + ], + "mac": "a0:36:9f:25:97:dc" + } + ], + "numa_socket": 1, + "hugepages": 28, + "memory": 32 + }, + { + "cores": [ + { + "core_id": 0, + "thread_id": 0, + "status": "noteligible" + }, + { + "core_id": 0, + "thread_id": 24, + "status": "noteligible" + }, + { + "core_id": 1, + "thread_id": 2 + }, + { + "core_id": 1, + "thread_id": 26 + }, + { + "core_id": 2, + "thread_id": 4 + }, + { + "core_id": 2, + "thread_id": 28 + }, + { + "core_id": 3, + "thread_id": 6 + }, + { + "core_id": 3, + "thread_id": 30 + }, + { + "core_id": 4, + "thread_id": 8 + }, + { + "core_id": 4, + "thread_id": 32 + }, + { + "core_id": 5, + "thread_id": 10 + }, + { + "core_id": 5, + "thread_id": 34 + }, + { + "core_id": 6, + "thread_id": 12 + }, + { + "core_id": 6, + "thread_id": 36 + }, + { + "core_id": 7, + "thread_id": 14 + }, + { + "core_id": 7, + "thread_id": 38 + }, + { + "core_id": 8, + "thread_id": 16 + }, + { + "core_id": 8, + "thread_id": 40 + }, + { + "core_id": 9, + "thread_id": 18 + }, + { + "core_id": 9, + "thread_id": 42 + }, + { + "core_id": 10, + "thread_id": 20 + }, + { + "core_id": 10, + "thread_id": 44 + }, + { + "core_id": 11, + "thread_id": 22 + }, + { + "core_id": 11, + "thread_id": 46 + } + ], + "interfaces": [ + { + "source_name": "p5p1", + "Mbps": 10000, + "pci": "0000:04:00.0", + "switch_port": "port1/4", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "aa:ef:2c:23:ae:04", + "pci": "0000:04:10.0", + "source_name": 0, + "vlan": 104 + }, + { + "mac": "2e:d5:b9:3e:c4:8b", + "pci": "0000:04:10.2", + "source_name": 1, + "vlan": 105 + }, + { + "mac": "4e:d6:55:09:17:1e", + "pci": "0000:04:10.4", + "source_name": 2, + "vlan": 106 + }, + { + "mac": "ce:42:cc:9c:5a:32", + "pci": "0000:04:10.6", + "source_name": 3, + "vlan": 107 + }, + { + "mac": "1e:71:34:7f:5d:47", + "pci": "0000:04:11.0", + "source_name": 4, + "vlan": 102 + }, + { + "mac": "26:5c:e4:db:8c:48", + "pci": "0000:04:11.2", + "source_name": 5, + "vlan": 103 + }, + { + "mac": "5e:48:23:56:63:c0", + "pci": "0000:04:11.4", + "source_name": 6, + "vlan": 100 + }, + { + "mac": "9a:a2:fa:97:19:84", + "pci": "0000:04:11.6", + "source_name": 7, + "vlan": 101 + } + ], + "mac": "a0:36:9f:27:46:a0" + }, + { + "source_name": "p5p2", + "Mbps": 10000, + "pci": "0000:04:00.1", + "switch_port": "port1/5", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "0a:9b:b5:d1:e6:34", + "pci": "0000:04:10.1", + "source_name": 0, + "vlan": 104 + }, + { + "mac": "5e:87:4a:22:e9:57", + "pci": "0000:04:10.3", + "source_name": 1, + "vlan": 105 + }, + { + "mac": "ea:94:d5:f6:ee:bb", + "pci": "0000:04:10.5", + "source_name": 2, + "vlan": 106 + }, + { + "mac": "5a:3f:17:6b:bb:8b", + "pci": "0000:04:10.7", + "source_name": 3, + "vlan": 107 + }, + { + "mac": "8e:af:5c:cc:c8:82", + "pci": "0000:04:11.1", + "source_name": 4, + "vlan": 102 + }, + { + "mac": "3e:a0:df:f0:2b:07", + "pci": "0000:04:11.3", + "source_name": 5, + "vlan": 103 + }, + { + "mac": "ae:7d:00:9b:54:4d", + "pci": "0000:04:11.5", + "source_name": 6, + "vlan": 100 + }, + { + "mac": "fa:b5:0d:c6:78:87", + "pci": "0000:04:11.7", + "source_name": 7, + "vlan": 101 + } + ], + "mac": "a0:36:9f:27:46:a2" + }, + { + "source_name": "p7p1", + "Mbps": 10000, + "pci": "0000:06:00.0", + "switch_port": "port1/6", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "22:4a:21:37:84:80", + "pci": "0000:06:10.0", + "source_name": 0, + "vlan": 104 + }, + { + "mac": "b2:8f:2c:04:08:08", + "pci": "0000:06:10.2", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "fe:2e:71:1f:99:dd", + "pci": "0000:06:10.4", + "source_name": 2, + "vlan": 106 + }, + { + "mac": "1a:2c:7c:98:39:b6", + "pci": "0000:06:10.6", + "source_name": 3, + "vlan": 105 + }, + { + "mac": "6e:39:08:a9:7e:b1", + "pci": "0000:06:11.0", + "source_name": 4, + "vlan": 107 + }, + { + "mac": "4a:00:f3:5b:15:27", + "pci": "0000:06:11.2", + "source_name": 5, + "vlan": 102 + }, + { + "mac": "b2:c3:48:da:24:55", + "pci": "0000:06:11.4", + "source_name": 6, + "vlan": 101 + }, + { + "mac": "72:85:cd:18:ee:5d", + "pci": "0000:06:11.6", + "source_name": 7, + "vlan": 100 + } + ], + "mac": "a0:36:9f:27:45:80" + }, + { + "source_name": "p7p2", + "Mbps": 10000, + "pci": "0000:06:00.1", + "switch_port": "port1/7", + "switch_dpid": "00:01:02:03:04:05:06:07", + "sriovs": [ + { + "mac": "0a:60:1a:9d:ba:e2", + "pci": "0000:06:10.1", + "source_name": 0, + "vlan": 104 + }, + { + "mac": "72:cb:0a:99:2f:7e", + "pci": "0000:06:10.3", + "source_name": 1, + "vlan": 103 + }, + { + "mac": "f2:2b:6b:d8:95:aa", + "pci": "0000:06:10.5", + "source_name": 2, + "vlan": 106 + }, + { + "mac": "b2:3a:4a:a8:c9:e9", + "pci": "0000:06:10.7", + "source_name": 3, + "vlan": 105 + }, + { + "mac": "06:85:75:b1:e8:10", + "pci": "0000:06:11.1", + "source_name": 4, + "vlan": 107 + }, + { + "mac": "12:41:69:61:89:3a", + "pci": "0000:06:11.3", + "source_name": 5, + "vlan": 102 + }, + { + "mac": "86:03:a8:5a:ad:71", + "pci": "0000:06:11.5", + "source_name": 6, + "vlan": 101 + }, + { + "mac": "ca:9c:5c:ad:e2:e2", + "pci": "0000:06:11.7", + "source_name": 7, + "vlan": 100 + } + ], + "mac": "a0:36:9f:27:45:82" + } + ], + "numa_socket": 0, + "hugepages": 28, + "memory": 32 + } + ] +} +} diff --git a/test/hosts/new_host.yaml b/test/hosts/new_host.yaml new file mode 100644 index 0000000..affe668 --- /dev/null +++ b/test/hosts/new_host.yaml @@ -0,0 +1,36 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +{ + "${}":[ + "${name} host name", + "${user} host user (n2)", + "${ip_name} host access ip or name (${name}.hi.inet)", + "${description} host description (${name})" + ], + + "host":{ + "name": "${name}", + "user": "${user}", + "ip_name": "${ip_name}", + "description": "${description}" + } +} diff --git a/test/images/new_image.yaml b/test/images/new_image.yaml new file mode 100644 index 0000000..a599884 --- /dev/null +++ b/test/images/new_image.yaml @@ -0,0 +1,34 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + + +{ + "${}":[ + "${name} image name", + "${path} image path (/mnt/powervault/virtualization/vnfs/os/US1404user.qcow2)", + "${description} image description (${name})" + ], +"image":{ + "name":"${name}", + "description":"${description}", + "path":"${path}" +} +} diff --git a/test/networks/net-example0.yaml b/test/networks/net-example0.yaml new file mode 100644 index 0000000..cfae0c7 --- /dev/null +++ b/test/networks/net-example0.yaml @@ -0,0 +1,6 @@ +network: + name: default + type: bridge_man + provider:physical: default + shared: true + diff --git a/test/networks/net-example1.yaml b/test/networks/net-example1.yaml new file mode 100644 index 0000000..cbc7980 --- /dev/null +++ b/test/networks/net-example1.yaml @@ -0,0 +1,6 @@ +network: + name: macvtap:em1 + type: bridge_man + provider:physical: macvtap:em1 + shared: true + diff --git a/test/networks/net-example2.yaml b/test/networks/net-example2.yaml new file mode 100644 index 0000000..f913670 --- /dev/null +++ b/test/networks/net-example2.yaml @@ -0,0 +1,6 @@ +network: + name: shared_bridge_net + type: bridge_data + provider:physical: bridge:virbrMan1 + shared: true + diff --git a/test/networks/net-example3.yaml b/test/networks/net-example3.yaml new file mode 100644 index 0000000..ed3b940 --- /dev/null +++ b/test/networks/net-example3.yaml @@ -0,0 +1,16 @@ +network: + name: data_net + type: data + + #if you want to connect this network to a concrete switch port for outside connectivity + #indicate it at provider_physical with openflow:[:vlan] + # must be a valid openflow port (one of the listed with openvim openflow-port-list) + #add [:vlan] without spaces if this port must be vlan tagged. If missing it is not tagged + + #provider:vlan contains the vlan used by the SRIOV interfaces connected to this network + #it always contain a value regardless used or not. If missing openvim will assign a value + + provider:physical: openflow:port1/8:vlan + provider:vlan: 3001 + shared: true + diff --git a/test/networks/new_network.yaml b/test/networks/new_network.yaml new file mode 100644 index 0000000..af1e673 --- /dev/null +++ b/test/networks/new_network.yaml @@ -0,0 +1,33 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + + +{ + "${}":[ + "${name} provide a network name", + "${type} provide a type: bridge_data,bridge_man,data,ptp (ptp)" + ], + + "network":{ + "name": "${name}", + "type": "${type}" + } +} diff --git a/test/ports/new_port.yaml b/test/ports/new_port.yaml new file mode 100644 index 0000000..6a07a3a --- /dev/null +++ b/test/ports/new_port.yaml @@ -0,0 +1,41 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + + +{ + "${}":[ + "${name} provide a port name", + "${net_id} provide the network uuid (null):", + "${vlan} provide the vlan if any (null):", + "${port} provide the attached switch port (Te0/47)", + "${mac} provide the mac of external device if known (null):" + ], + + "port":{ + "name": "${name}", + "network_id": "${net_id null}", + "type": "external", + "binding:vlan": "${vlan null-int}", + "binding:switch_port": "${port}", + "mac_address": "${mac null}" + } +} + diff --git a/test/servers/new_server.yaml b/test/servers/new_server.yaml new file mode 100644 index 0000000..6f30d14 --- /dev/null +++ b/test/servers/new_server.yaml @@ -0,0 +1,49 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +{ + "${}":[ + "${name} provide a name (VM)", + "${description} provide a description (${name})", + "${image_id} provide image_id uuid", + "${flavor_id} provide flavor_id uuid", + "${network0} provide a bridge network id; enter='default' (00000000-0000-0000-0000-000000000000)", + "${network1} provide a bridge network id; enter='virbrMan' (60f5227e-195f-11e4-836d-52540030594e)" + ], +"server":{ + "networks":[ + { + "name":"mgmt0", + "vpci": "0000:00:0a.0", + "uuid":"${network0}" + }, + { + "name":"ge0", + "vpci": "0000:00:0b.0", + "uuid":"${network1}" + } + ], + "name":"${name}", + "description":"${description}", + "imageRef": "${image_id}", + "flavorRef": "${flavor_id}" +} +} diff --git a/test/tenants/new_tenant.yaml b/test/tenants/new_tenant.yaml new file mode 100644 index 0000000..82ac36a --- /dev/null +++ b/test/tenants/new_tenant.yaml @@ -0,0 +1,32 @@ +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +{ + "${}":[ + "${name} tenant name", + "${description} tenant description (${name})" + ], + + "tenant":{ + "name": "${name}", + "description": "${description}" + } +} diff --git a/test/test_openflow.sh b/test/test_openflow.sh new file mode 100755 index 0000000..88fdc70 --- /dev/null +++ b/test/test_openflow.sh @@ -0,0 +1,217 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +# +#author Alfonso Tierno +# +#script to test openflow connector with the creation of rules +# + +function usage(){ + echo -e "usage: ${BASH_SOURCE[0]} [OPTIONS] \n test openflow connector " + echo -e " OPTIONS:" + echo -e " -f --force does not prompt for confirmation" + echo -e " -v --same-vlan use this if the parameter 'of_controller_nets_with_same_vlan'" + echo -e " is not false at openvimd.cfg to avoid test unrealizable openflow nets" + echo -e " -d --debug show debug information at each command. It is quite verbose" + echo -e " -h --help shows this help" +} + + +function delete_and_exit(){ + echo + [[ $force != y ]] && read -e -p " Press enter to delete the deployed things " kk + echo + for f in $TODELETE + do + if [[ $f == restore ]] + then + printf "%-50s" "restoring back old rules: " + result=`openflow install ./test_openflow_old_rules.bk $debug ` + if [[ $? != 0 ]] + then + echo "FAIL cannot install old rules:" + echo "$result" + else + rm ./test_openflow_old_rules.bk + echo OK "./test_openflow_old_rules.bk deleted" + fi + else + printf "%-50s" "removing $f rule: " + result=`openflow delete $f -f $debug` + [[ $? != 0 ]] && echo "FAIL cannot delete" && echo "$result" || echo OK + fi + done + exit $1 +} + +force=n +same_vlan=n +debug="" +TODELETE="" +#detect if is called with a source to use the 'exit'/'return' command for exiting +[[ ${BASH_SOURCE[0]} != $0 ]] && echo "Do not execute this script as SOURCE" >&2 && return 1 + +#check correct arguments +for param in $* +do + if [[ $param == -h ]] || [[ $param == --help ]] + then + usage + exit 0 + elif [[ $param == -d ]] || [[ $param == --debug ]] + then + debug="--debug" + elif [[ $param == -v ]] || [[ $param == --same-vlan ]] + then + same_vlan=y + elif [[ $param == -f ]] || [[ $param == --force ]] + then + force=y + else + echo "invalid argument '$param'?. See $0 --help" && exit 1 + fi +done + +#detect if environment variables are set +fail="" +[[ -z $OF_CONTROLLER_TYPE ]] && echo "OF_CONTROLLER_TYPE not defined" >&2 && fail=1 +[[ -z $OF_CONTROLLER_IP ]] && echo "OF_CONTROLLER_IP not defined" >&2 && fail=1 +[[ -z $OF_CONTROLLER_PORT ]] && echo "OF_CONTROLLER_PORT not defined" >&2 && fail=1 +[[ -z $OF_CONTROLLER_DPID ]] && echo "OF_CONTROLLER_DPID not defined" >&2 && fail=1 +[[ -n $fail ]] && exit 1 + + +export _exit=delete_and_exit +if [[ $force != y ]] +then + echo " This will remove temporally the existing openflow rules and restored back a the end" + read -e -p "Press enter to continue, CTRL+C to abort " kk +fi + + +printf "%-50s" "obtain port list: " +result=`openflow port-list $debug | gawk '/^ /{print substr($1,0,length($1)-1)}'` +[[ $? != 0 ]] && echo "FAIL" && echo "$result" && $_exit 1 +ports=`echo $result | wc -w` +[[ $ports -lt 4 ]] && echo "FAIL not enough ports managed by this DPID, needed at least 4" && $_exit 1 +echo OK $ports ports +port0=`echo $result | cut -d" " -f1` +port1=`echo $result | cut -d" " -f2` +port2=`echo $result | cut -d" " -f3` +port3=`echo $result | cut -d" " -f4` + + +printf "%-50s" "saving the current rules: " +openflow list $debug > ./test_openflow_old_rules.bk +[[ $? != 0 ]] && echo "FAIL cannot obtain existing rules" && $_exit 1 +echo OK "> ./test_openflow_old_rules.bk" + +printf "%-50s" "clearing all current rules: " +openflow clear -f $debug +[[ $? != 0 ]] && echo "FAIL cannot clear existing rules" && $_exit 1 +result=`openflow list | wc -l` +[[ $result != 1 ]] && echo "FAIL rules not completely cleared" && $_exit 1 +echo OK +TODELETE="restore" + +printf "%-50s" "clearing again all rules: " +openflow clear -f $debug +[[ $? != 0 ]] && echo "FAIL when there are not any rules" && $_exit 1 +result=`openflow list | wc -l` +[[ $result != 1 ]] && echo "FAIL rules not completely cleared" && $_exit 1 +echo OK +TODELETE="restore" + + +printf "%-50s" "new rule vlan,mac -> no vlan: " +rule_name=fromVlanMac_to_NoVlan1 +openflow add $rule_name --priority 1000 --matchmac "aa:bb:cc:dd:ee:ff" --matchvlan 500 --inport $port0 --stripvlan --out $port1 $debug +[[ $? != 0 ]] && echo "FAIL cannot insert new rule" && $_exit 1 +expected="$OF_CONTROLLER_DPID 1000 $rule_name $port0 aa:bb:cc:dd:ee:ff 500 vlan=None,out=$port1" +result=`openflow list | grep $rule_name` +[[ $? != 0 ]] && echo "FAIL rule bad inserted" && $_exit 1 +result=`echo $result` #remove blanks +[[ "$result" != "$expected" ]] && echo "FAIL" && echo " expected: $expected\n obtained: $result" && $_exit 1 +echo OK $rule_name +TODELETE="$rule_name $TODELETE" + +printf "%-50s" "new rule mac -> vlan: " +rule_name=fromMac_to_Vlan2 +openflow add $rule_name --priority 1001 --matchmac "ff:ff:ff:ff:ff:ff" --inport $port1 --setvlan 501 --out $port2 --out $port3 $debug +[[ $? != 0 ]] && echo "FAIL cannot insert new rule" && $_exit 1 +expected="$OF_CONTROLLER_DPID 1001 $rule_name $port1 ff:ff:ff:ff:ff:ff any vlan=501,out=$port2,out=$port3" +result=`openflow list | grep $rule_name` +[[ $? != 0 ]] && echo "FAIL rule bad inserted" && $_exit 1 +result=`echo $result` #remove blanks +[[ "$result" != "$expected" ]] && echo "FAIL" && echo " expected: $expected\n obtained: $result" && $_exit 1 +echo OK $rule_name +TODELETE="$rule_name $TODELETE" + +printf "%-50s" "new rule None -> None: " +rule_name=fromNone_to_None +openflow add $rule_name --priority 1002 --inport $port2 --out $port0 $debug +[[ $? != 0 ]] && echo "FAIL cannot insert new rule" && $_exit 1 +expected="$OF_CONTROLLER_DPID 1002 $rule_name $port2 any any out=$port0" +result=`openflow list | grep $rule_name` +[[ $? != 0 ]] && echo "FAIL rule bad inserted" && $_exit 1 +result=`echo $result` #remove blanks +[[ "$result" != "$expected" ]] && echo "FAIL" && echo " expected: $expected\n obtained: $result" && $_exit 1 +echo OK $rule_name +TODELETE="$rule_name $TODELETE" + +printf "%-50s" "new rule vlan -> vlan: " +rule_name=fromVlan_to_Vlan1 +openflow add $rule_name --priority 1003 --matchvlan 504 --inport $port3 --setvlan 505 --out $port0 $debug +[[ $? != 0 ]] && echo "FAIL cannot insert new rule" && $_exit 1 +expected="$OF_CONTROLLER_DPID 1003 $rule_name $port3 any 504 vlan=505,out=$port0" +result=`openflow list | grep $rule_name` +[[ $? != 0 ]] && echo "FAIL rule bad inserted" && $_exit 1 +result=`echo $result` #remove blanks +[[ "$result" != "$expected" ]] && echo "FAIL" && echo " expected: $expected\n obtained: $result" && $_exit 1 +echo OK $rule_name +TODELETE="$rule_name $TODELETE" + + +if [[ $same_vlan == n ]] +then + + printf "%-50s" "new rule Vlan -> Vlan_Vlan: " + rule_name=fromVlan_to_Vlan1Vlan1 + openflow add $rule_name --priority 1005 --inport $port3 --matchvlan 505 --setvlan 510 --out $port0 --setvlan 511 --out $port1 --stripvlan --out=$port2 $debug + [[ $? != 0 ]] && echo "FAIL cannot insert new rule" && $_exit 1 + expected="$OF_CONTROLLER_DPID 1005 $rule_name $port3 any 505 vlan=510,out=$port0,vlan=511,out=$port1,vlan=None,out=$port2" + result=`openflow list | grep $rule_name` + [[ $? != 0 ]] && echo "FAIL rule bad inserted" && $_exit 1 + result=`echo $result` #remove blanks + [[ "$result" != "$expected" ]] && echo "FAIL" && echo " expected: $expected\n obtained: $result" && $_exit 1 + echo OK $rule_name + TODELETE="$rule_name $TODELETE" + +fi + +echo +echo DONE + +$_exit 0 + diff --git a/test/test_openvim.py b/test/test_openvim.py new file mode 100755 index 0000000..73fcc23 --- /dev/null +++ b/test/test_openvim.py @@ -0,0 +1,708 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' +This is a client tester for openvim. +It is almost DEPRECATED by the openvim client + +The reason for keeping is because it is used for some scripts +and it contain the -r option (delete recursive) +that it is very useful for deleting content of database. +Another difference from openvim is that it is more verbose +and so more suitable for the developers +''' + +__author__="Alfonso Tierno" +__date__ ="$5-oct-2014 11:09:29$" + +import requests +import json +import yaml +import sys +import getopt +from jsonschema import validate as js_v, exceptions as js_e + +version="0.0.2" +global global_config + + +def get_elements(url): + headers_req = {'content-type': 'application/json'} + try: + vim_response = requests.get(url, headers = headers_req) + #print vim_response + #print vim_response.status_code + if vim_response.status_code == 200: + #print vim_response.json() + #print json.dumps(vim_response.json(), indent=4) + content = vim_response.json() + return 1, content + #print http_content + else: + text = " Error. VIM response '%s': not possible to GET %s" % (vim_response.status_code, url) + text += "\n " + vim_response.text + #print text + return -vim_response.status_code,text + except requests.exceptions.RequestException, e: + return -1, " Exception "+ str(e.message) + +def delete_elements(url): + headers_req = {'content-type': 'application/json'} + + try: + vim_response = requests.delete(url, headers = headers_req) + #print vim_response + #print vim_response.status_code + if vim_response.status_code == 200: + pass + #print vim_response.json() + #print json.dumps(vim_response.json(), indent=4) + else: + #print vim_response.text + text = " Error. VIM response '%s': not possible to DELETE %s" % (vim_response.status_code, url) + text += "\n " + vim_response.text + #print text + return -vim_response.status_code,text + except requests.exceptions.RequestException, e: + return -1, " Exception "+ str(e.message) + return 1, None + + +def new_elements(url, payload): + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + #print str(payload) + try: + vim_response = requests.post(url, data=json.dumps(payload), headers=headers_req) + #print vim_response + #print vim_response.status_code + if vim_response.status_code == 200: + #print vim_response.json() + #print json.dumps(vim_response.json(), indent=4) + return 1, vim_response.text + else: + #print vim_response.text + text = "Error. VIM response '%s': not possible to ADD %s" % (vim_response.status_code, url) + text += "\n" + vim_response.text + #print text + return -vim_response.status_code,text + except requests.exceptions.RequestException, e: + return -1, " Exception "+ str(e.message) + + +def get_details(url, what, c): + item_list = [] + return_dict = {what+'s': []} + + item = c.get(what,None) + if item is None: item = c.get(what+'s',None) + if item is None: + error_text= " Internal error, not found '" + what +"[s]' in content" + print 'get_details()', error_text, c + return -1, error_text + if type(item) is list: + item_list = item + else: + item_list.append(item) + if len(item_list)==0: + print what, "not found" + return 1 + for item in item_list: + uuid = item.get('id',None) + if uuid is None: uuid = item.get('uuid',None) + if uuid is None: + error_text= " Internal error, not found 'id/uuid' in item" + print 'get_details()', error_text, item + return -1, error_text + #print " get", what, uuid, " >>>>>>>> ", + r,c = get_elements(url + "/" + uuid) + if r<0: + # print "fail" + print " get", what, uuid, "fail", c + return -1, c + #else: + # print 'ok' + return_dict[what+'s'].append(c[what]) + return 1, return_dict + + +def action_details(url, what, c, force, payload): + item_list = [] + return_dict = {what+'s': []} + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + fail=0 + ok=0 + + #Allows for payload both keypairs inside a 'server','port' ... or directly. In later case, put keypairs inside what + + item = c.get(what,None) + if item is None: item = c.get(what+'s',None) + if item is None: + error_text= " Internal error, not found '" + what +"[s]' in content" + print 'get_details()', error_text, c + return -1, error_text + if type(item) is list: + item_list = item + else: + item_list.append(item) + if len(item_list)==0: + print what, "not found" + return 1 + for item in item_list: + name = item.get('name',None) + uuid = item.get('id',None) + if uuid is None: uuid = item.get('uuid',None) + if uuid is None: + error_text= " Internal error, not found 'id/uuid' in item" + print 'get_details()', error_text, item + return -1, error_text + if not force: + r = raw_input("Action on " + what + " " + uuid + " " + name + " (y/N)? ") + if len(r)>0 and r[0].lower()=="y": + print " put", what, uuid, " >>>>>>>> ", + else: + continue + + #print str(payload) + try: + vim_response = requests.post(url + "/" + uuid + "/action", data=json.dumps(payload), headers=headers_req) + if vim_response.status_code == 200: + print 'ok' + ok += 1 + return_dict[what+'s'].append(vim_response.json()) + return_dict[what+'s'][-1]['uuid'] = uuid + return_dict[what+'s'][-1]['name'] = name + else: + fail += 1 + print "fail" + #print vim_response.text + #text = "Error. VIM response '%s': not possible to PUT %s" % (vim_response.status_code, url) + #text += "\n" + vim_response.text + #print text + error_dict = vim_response.json() + error_dict['error']['uuid']=uuid + error_dict['error']['name']=name + return_dict[what+'s'].append(error_dict) + except requests.exceptions.RequestException, e: + return -1, " Exception "+ str(e.message) + if ok>0 and fail>0: return 0, return_dict + elif fail==0 : return 1, return_dict + else: return -1, return_dict + + + +def edit_details(url, what, c, force, payload): + item_list = [] + return_dict = {what+'s': []} + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + fail=0 + ok=0 + + #Allows for payload both keypairs inside a 'server','port' ... or directly. In later case, put keypairs inside what + if what not in payload: + payload = {what:payload} + + item = c.get(what,None) + if item is None: item = c.get(what+'s',None) + if item is None: + error_text= " Internal error, not found '" + what +"[s]' in content" + print 'get_details()', error_text, c + return -1, error_text + if type(item) is list: + item_list = item + else: + item_list.append(item) + if len(item_list)==0: + print what, "not found" + return 1 + for item in item_list: + name = item.get('name',None) + uuid = item.get('id',None) + if uuid is None: uuid = item.get('uuid',None) + if uuid is None: + error_text= " Internal error, not found 'id/uuid' in item" + print 'get_details()', error_text, item + return -1, error_text + if not force: + r = raw_input("Edit " + what + " " + uuid + " " + name + " (y/N)? ") + if len(r)>0 and r[0].lower()=="y": + print " put", what, uuid, " >>>>>>>> ", + else: + continue + + #print str(payload) + try: + vim_response = requests.put(url + "/" + uuid, data=json.dumps(payload), headers=headers_req) + if vim_response.status_code == 200: + print 'ok' + ok += 1 + return_dict[what+'s'].append( vim_response.json()[what] ) + else: + fail += 1 + print "fail" + #print vim_response.text + #text = "Error. VIM response '%s': not possible to PUT %s" % (vim_response.status_code, url) + #text += "\n" + vim_response.text + #print text + error_dict = vim_response.json() + error_dict['error']['uuid']=uuid + error_dict['error']['name']=name + return_dict[what+'s'].append(error_dict) + except requests.exceptions.RequestException, e: + return -1, " Exception "+ str(e.message) + if ok>0 and fail>0: return 0, return_dict + elif fail==0 : return 1, return_dict + else: return -1, return_dict + +def get_del_recursive(url, what, url_suffix, force=False, recursive=False): + #print + #print " get", what, a, " >>>>>>>> ", + r,c = get_elements(url + what + 's' + url_suffix) + if r<0: + print c, "when getting", what, url_suffix + return -1 + # print "ok" + + list_todelete = c.get(what, None) + if list_todelete is None: list_todelete = c.get(what+'s', None) + if list_todelete is None: + print " Internal error, not found '" + what +"[s]' in", c + return -3, " Internal error, not found a valid dictionary" + if type(list_todelete) == dict: + list_todelete = (list_todelete, ) + + if len(list_todelete)==0: + print what, url_suffix, "not found" + return 1 + for c in list_todelete: + uuid=c.get('id', None) + if uuid is None: + uuid=c.get('uuid', None) + if uuid is None: + print "Id not found" + continue + name = c.get("name","") + if recursive: + if what=='tenant' : + get_del_recursive(url + uuid + "/", 'server', "", force, recursive) + get_del_recursive(url + uuid + "/", 'flavor', "", force, recursive) + get_del_recursive(url + uuid + "/", 'image', "", force, recursive) + get_del_recursive(url, 'network', "?tenant_id="+uuid, force, recursive) + elif what=='flavors' : + #get_del_recursive(url, 'servers', "?flavorRef="+uuid, force, recursive) + pass + elif what=='image' : + get_del_recursive(url, 'server', "?imageRef="+uuid, force, recursive) + elif what=='hosts' : + get_del_recursive(url, 'server', "?hostId="+uuid, force, recursive) + + if not force: + r = raw_input("Delete " + what + " " + uuid + " " + name + " (y/N)? ") + if len(r)>0 and r[0].lower()=="y": + pass + else: + continue + r,c = delete_elements(url + what + "s/" + uuid) + if r<0: + #print "Error deleting", vimURI, -r + print c + else: + print what, uuid, name, "deleted" + return 1 + +def check_valid_uuid(uuid): + id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"} + try: + js_v(uuid, id_schema) + return True + except js_e.ValidationError: + return False + +def change_string(text, var_list): + end=0 + type_=None + while True: + ini = text.find("${", end) + if ini<0: return text + end = text.find("}", ini) + if end<0: return text + end+=1 + + var = text[ini:end] + if ' ' in var: + kk=var.split(" ") + var=kk[0]+"}" + type_=kk[-1][:-1] + var = var_list.get(var, None) + if var==None: return text + + text = text[:ini] + var + text[end:] + if type_ != None: + if 'null' in type_ and text=="null": + return None + if 'int' in type_ : #and text.isnumeric(): + return int(text) + return text + +def chage_var_recursively(data, var_list): + '''Check recursively the conent of data, and look for "*${*}*" variables and changes + It assumes that this variables are not in the key of dictionary, + Attributes: + 'data': dictionary, or list. None or empty is consideted valid + 'var_list': dictionary (name:change) pairs + Return: + None, data is modified + ''' + + if type(data) is dict: + for k in data.keys(): + if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list: + chage_var_recursively(data[k], var_list) + elif type(data[k]) is str: + data[k] = change_string(data[k], var_list) + if type(data) is list: + for k in range(0,len(data)): + if type(data[k]) is dict or type(data[k]) is list: + chage_var_recursively(data[k], var_list) + elif type(data[k]) is str: + data[k] = change_string(data[k], var_list) + +def change_var(data): + if type(data) is not dict: + return -1, "Format error, not a object (dictionary)" + if "${}" not in data: + return 0, data + + var_list={} + for var in data["${}"]: + r = var.find("}",) + 1 + if r<=2 or var[:2] != '${': + return -1, "Format error at '${}':" + var + #change variables inside description text + if "${" in var[r:]: + var = var[:r] + change_string(var[r:], var_list) + d_start = var.rfind("(",) + 1 + d_end = var.rfind(")",) + if d_start>0 and d_end>=d_start: + default = var[d_start:d_end] + else: default=None + v = raw_input(var[r:] + "? ") + if v=="": + if default != None: + v = default + else: + v = raw_input(" empty string? try again: ") + var_list[ var[:r] ] = str(v) + + del data["${}"] + chage_var_recursively(data, var_list) + return 0, data + +def parse_yaml_json(text): + try: + data = yaml.load(text) + return 0, data + except yaml.YAMLError, exc: + error_pos = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1) + return -1, " Error yaml/json format error at " + error_pos + +def load_file(file_, parse=False): + try: + f = open(file_, 'r') + read_data = f.read() + f.close() + if not parse: + return 0, read_data + except IOError, e: + return -1, " Error opening file '" + file_ + "': " + e.args[1] + + try: + data = yaml.load(read_data) + return change_var(data) + except yaml.YAMLError, exc: + error_pos = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1) + return -2, " Error yaml/json format error at '"+ file_ +"'"+error_pos + +def load_configuration(configuration_file): + default_tokens ={'http_port':8080, 'http_host':'localhost', 'test_mode':False, 'of_controller_nets_with_same_vlan':True} + + r, config = load_file(configuration_file, parse=True) + if r < 0: + return False, config + + #Check default values tokens + for k,v in default_tokens.items(): + if k not in config: config[k]=v + + return (True, config) + +items_list = ('server','host','tenant','image','flavor','network','port') +action_list = ('list','get','new','del','edit','action') + + +def usage(complete=False): + global items_list + global action_list + print "Usage: ", sys.argv[0], "[options]", " [" + ",".join(action_list) +"] ", " [] " + print " Perform an test action over openvim" + print " "+",".join(action_list)+": List (by default), GET detais, Creates, Deletes, Edit" + print " : can be one of " + ",".join(items_list) + print " : list of uuid|name for 'get|del'; list of json/yaml files for 'new' or 'edit'" + if not complete: + print " Type -h or --help for a complete list of options" + return + print " Options:" + print " -v|--version: prints current version" + print " -c|--config [configuration_file]: loads the configuration file (default: openvimd.cfg)" + print " -h|--help: shows this help" + print " -u|--url [URL]: url to use instead of the one loaded from configuration file" + print " -t|--tenant [tenant uuid]: tenant to be used for some comands. IF mising it will use the default obtained in configuration file" + print " -F|--filter [A=B[&C=D...]: URL query string used for 'get' or 'del' commands" + print " -f|--force : Do not ask for confirmation when deleting. Also remove dependent objects." + print " -r|--recursive : Delete also dependency elements, (from tenants: images, flavors,server; from hosts: instances; ..." + print " Examples:" + print " ",sys.argv[0]," tenant #list tenants " + print " ",sys.argv[0]," -F'device_owner=external' get port #get details of all external ports" + print " ",sys.argv[0]," del server ses pan #delete server names 'ses' and 'pan'. Do not ask for confirmation" + print " ",sys.argv[0]," -r -f del host #delete all host and all the dependencies " + print " ",sys.argv[0]," new host ./Host/nfv100.json #add a host which information is in this file" + print " ",sys.argv[0]," edit network f348faf8-59ef-11e4-b4c7-52540030594e '{\"network\":{\"admin_state_up\":false}}'" + print " #change the admin status of this network" + return + + +if __name__=="__main__": + global vimURI + global vimURI_admin + + global what + global query_string +#init variables + action="list" + what=None + url=None + query_string = "" + force = False + recursive = False + tenant = None + additional = [] + #look for parent dir + config_file = '../openvimd.cfg' + pos = sys.argv[0].rfind("/") + if pos<0: + base_dir="./" + else: + base_dir = sys.argv[0] [:pos+1] + if pos>=0: + config_file = base_dir + config_file + +#get params + try: + opts, args = getopt.getopt(sys.argv[1:], "hvrfc:u:t:F:", + ["config", "help", "version", "force", "filter","tenant","url","recursive"]) + except getopt.GetoptError, err: + print " Error:", err # will print something like "option -a not recognized" + usage() + sys.exit(-2) + + for o, a in opts: + if o in ("-v", "--version"): + print "test_openvim version", version, "Oct 2014" + print "(c) Copyright Telefonica" + sys.exit(0) + elif o in ("-h", "--help"): + usage(True) + sys.exit(0) + elif o in ("-c", "--config"): config_file = a + elif o in ("-f", "--force"): force = True + elif o in ("-r", "--recursive"): recursive = True + elif o in ("-F", "--filter"): query_string = "?"+a + elif o in ("-u", "--url"): url = a + elif o in ("-t", "--tenant"): tenant = a + else: + assert False, "Unhandled option" + + for a in args: + if len(a) == 0: + print " Warning!!! Found an empty parameter?" + elif a[0]=="-": + print " Error!!! Put options parameter at the beginning" + sys.exit(-2) + elif what is not None: + additional.append(a) + elif a in items_list: + what=a + elif a[:-1] in items_list and a[-1]=='s': + what=a[:-1] + elif a in action_list: + action=a + else: + print " Missing ", ",".join(items_list) + sys.exit(-2) + if what is None: + usage() + sys.exit(-1) + #Load configuration file + r, config_dic = load_configuration(config_file) + #print config_dic + if not r: + print config_dic + config_dic={} + #exit(-1) + + #override parameters obtained by command line + try: + if url is not None: + vimURI = vimURI_admin = url + else: + vimURI = "http://" + config_dic['http_host'] +":"+ str(config_dic['http_port']) + "/openvim/" + if 'http_admin_port' in config_dic: + vimURI_admin = "http://" + config_dic['http_host'] +":"+ str(config_dic['http_admin_port']) + "/openvim/" + except: #key error + print " Error: can not get URL; neither option --u,-url, nor reading configuration file" + exit(-1) + if tenant is None: + tenant = config_dic.get('tenant_id', None) + +#check enough parameters + URI=vimURI + if (what in ('host','port') and action in ('del','new')) or (what=='host' and action=='edit' ): + if vimURI_admin is None: + print " Error: Can not get admin URL; neither option -t,--tenant, nor reading configuration file" + exit(-1) + else: + URI=vimURI_admin + if URI[-1] != "/": URI+="/" + if what in ('server','image','flavor'): + if tenant is None: + print " Error: Can not get tenant; neither option -t,--tenant, nor reading configuration file" + exit(-1) + URI += tenant + "/" + + + exit_code=0 + try: +#load file for new/edit + payload_list=[] + if action=='new' or action=='edit' or action=='action': + if len(additional)==0: + if action=='new' : + additional.append(base_dir+what+"s/new_"+what+".yaml") + #print " New what? Missing additional parameters to complete action" + else: + print " What must be edited? Missing additional parameters to complete action" + exit(-1) + if action=='edit'or action=='action': + #obtain only last element + additional_temp = additional[:-1] + additional = additional[-1:] + + for a in additional: + r,payload = load_file(a, parse=True) + if r<0: + if r==-1 and "{" in a or ":" in a: + #try to parse directly + r,payload = parse_yaml_json(a) + if r<0: + print payload + exit (-1) + else: + print payload + exit (-1) + payload_list.append(payload) + if action=='edit'or action=='action': + additional = additional_temp + + +#perform actions NEW + if action=='new': + for payload in payload_list: + print "\n new", what, a, " >>>>>>>> ", + r,c = new_elements(URI+what+'s', payload) + if r>0: + print "ok" + else: + print "fail" + exit_code = -1 + print c + #try to decode + exit(exit_code) + + #perform actions GET LIST EDIT DEL + if len(additional)==0: + additional=[""] + for a in additional: + filter_qs = query_string + if a != "" : + if check_valid_uuid(a): + if len(filter_qs) > 0: filter_qs += "&" + "id=" + str(a) + else: filter_qs += "?" + "id=" + str(a) + else: + if len(filter_qs) > 0: filter_qs += "&" + "name=" + str(a) + else: filter_qs += "?" + "name=" + str(a) + + if action=='list' or action=='get' or action=='edit'or action=='action': + url = URI + what+'s' + print url + filter_qs + #print " get", what, a, " >>>>>>>> ", + r,c = get_elements(url + filter_qs) + if r<0: + #print "fail" + exit_code = -1 + print c + else: + #print "ok" + if action=='list': + print json.dumps(c, indent=4) + continue + + if action=='get': + r1,c1 = get_details(url, what, c) + elif action=='action': + r1,c1 = action_details(url, what, c, force, payload_list[0]) + else: # action=='edit': + r1,c1 = edit_details(url, what, c, force, payload_list[0]) + if r1<0: + exit_code = -1 + else: + if r>0: print "ok" + else: print "ok with some fails" + print json.dumps(c1, indent=4) + + elif action=='del': + r = get_del_recursive(URI, what, filter_qs, force, recursive) + if r<0: + exit_code = -1 + exit(exit_code) + + except KeyboardInterrupt: + print " Canceled" + + diff --git a/test/test_vim.sh b/test/test_vim.sh new file mode 100755 index 0000000..58441e5 --- /dev/null +++ b/test/test_vim.sh @@ -0,0 +1,558 @@ +#!/bin/bash + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +# +#author Alfonso Tierno +# +#script to test openvim with the creation of flavors and interfaces, openflow rules +#using images already inserted +# + +function usage(){ + echo -e "usage: ${BASH_SOURCE[0]} [OPTIONS] \n test openvim " + echo -e " OPTIONS:" + echo -e " -f --force does not prompt for confirmation" + echo -e " -v --same-vlan use this if the parameter 'of_controller_nets_with_same_vlan'" + echo -e " is not false at openvimd.cfg to avoid test unrealizable openflow nets" + echo -e " -h --help shows this help" + echo -e " -c --create create management network and two images (valid for test mode)" + echo + echo "This script test openvim, creating flavors, images, vms, de-attaching dataplane port" + echo "from one network to other and testing openflow generated rules." + echo "By default (unless -c option) uses and already created management network and two valid images." + echo "If -c option is set, it creates the network and images with fake content (only usefull for" + echo "openvim in 'test' mode) This is speccified in this shell variables:" + echo " VIM_TEST_NETWORK_INTERNET name of the mamagement network to use" + echo " VIM_TEST_IMAGE_PATH path of a vm image to use, the image is created if not exist" + echo " VIM_TEST_IMAGE_PATH_EXTRA path of another vm image to use, the image is created if not exist" +} + +#detect if is called with a source to use the 'exit'/'return' command for exiting +[[ ${BASH_SOURCE[0]} != $0 ]] && echo "Do not execute this script as SOURCE" >&2 && return 1 + +#check correct arguments +force=n +same_vlan=n +create=n +for param in $* +do + if [[ $param == -h ]] || [[ $param == --help ]] + then + usage + exit 0 + elif [[ $param == -v ]] || [[ $param == --same-vlan ]] + then + same_vlan=y + elif [[ $param == -f ]] || [[ $param == --force ]] + then + force=y + elif [[ $param == -c ]] || [[ $param == --create ]] + then + create=y + else + echo "invalid argument '$param'?" && usage >&2 && exit 1 + fi +done + +#detect if environment variables are set +fail="" +[[ $create == n ]] && [[ -z $VIM_TEST_NETWORK_INTERNET ]] && echo "VIM_TEST_NETWORK_INTERNET not defined" >&2 && fail=1 +[[ $create == n ]] && [[ -z $VIM_TEST_IMAGE_PATH ]] && echo "VIM_TEST_IMAGE_PATH not defined" >&2 && fail=1 +[[ $create == n ]] && [[ -z $VIM_TEST_IMAGE_PATH_EXTRA ]] && echo "VIM_TEST_IMAGE_PATH_EXTRA not defined" >&2 && fail=1 +[[ -n $fail ]] && exit 1 + +[[ $create == y ]] && [[ -z $VIM_TEST_IMAGE_PATH ]] && VIM_TEST_IMAGE_PATH="/test/path/of/image1" +[[ $create == y ]] && [[ -z $VIM_TEST_IMAGE_PATH_EXTRA ]] && VIM_TEST_IMAGE_PATH_EXTRA="/test/path2/of/image2" +TODELETE="" +export _exit=delete_and_exit + +function delete_and_exit(){ + echo + [[ $force != y ]] && read -e -p " Press enter to delete the deployed things " kk + echo + for f in $TODELETE + do + openvim ${f%%:*}-delete ${f##*:} -f + done + exit $1 +} + + +function is_valid_uuid(){ + echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0 + return 1 +} + +function process_cmd(){ + # test the result of the previos command, if fails execute the $_exit command + # params: + # uuid : test that the first word of is a valid uuid, stored it at . Print uuid + # fail : test that the previous command has failed. If not print the why it needs to fail + # ok : test that the previos command has not failed. Print OK + cmd_result=$? + if [[ $1 == uuid ]] + then + [[ $cmd_result == 0 ]] || ! shift 2 || ! echo "FAIL: $*" >&2 || $_exit 1 + is_valid_uuid $3 || ! shift 2 || ! echo "FAIL: $*" >&2 || $_exit 1 + eval $2=$3 + echo $3 + elif [[ $1 == fail ]] + then + [[ $cmd_result != "0" ]] || ! echo "NOT FAIL: $2" >&2 || $_exit 1 + echo "fail OK" + elif [[ $1 == ok ]] + then + [[ $cmd_result == 0 ]] || ! shift 1 || ! echo "FAIL: $*" >&2 || $_exit 1 + echo OK + fi + +} + +function test_of_rules(){ + #test the number of rules of a network, wait until 10 seconds + timeout_=10 + while true + do #it can take some seconds to get it ready + result=`openvim openflow-net-list $1` + nb_rules=`echo $result | grep actions -o | wc -l` + [[ $nb_rules == $2 ]] && echo "OK" && break + [[ $timeout_ == 0 ]] && echo "FAIL $result" >&2 && $_exit 1 + sleep 1 + timeout_=$((timeout_ - 1)) + done +} + +echo " Test VIM with 3 VM deployments. It delete the created items at the end" +echo " " +[[ $force != y ]] && read -e -p "Press enter to continue, CTRL+C to abort " kk + + +printf "%-50s" "1 get ${VIM_TEST_IMAGE_PATH##*/} image: " +image1=`openvim image-list -F"path=${VIM_TEST_IMAGE_PATH}" | gawk '{print $1}'` +if is_valid_uuid $image1 +then + echo $image1 +else + #create the image + echo not found + printf "%-50s" " b create ${VIM_TEST_IMAGE_PATH##*/} image: " + result=`openvim image-create --name=test-image1 --path=${VIM_TEST_IMAGE_PATH} --description=for-test` + process_cmd uuid image1 $result + TODELETE="image:$image1 $TODELETE" +fi + +printf "%-50s" "2 get ${VIM_TEST_IMAGE_PATH_EXTRA##*/} image: " +image2=`openvim image-list -F"path=${VIM_TEST_IMAGE_PATH_EXTRA}" | gawk '{print $1}'` +if is_valid_uuid $image2 +then + echo $image2 +else + #create the image + echo not found + printf "%-50s" " b create ${VIM_TEST_IMAGE_PATH_EXTRA##*/} image: " + result=`openvim image-create --name=test-image1 --path=${VIM_TEST_IMAGE_PATH_EXTRA} --description=for-test` + process_cmd uuid image2 $result + TODELETE="image:$image2 $TODELETE" +fi + +if [[ $create == y ]] +then + printf "%-50s" "3 create management network: " + result=`openvim net-create "name: test_mgmt_net +type: bridge_man"` + process_cmd uuid network_eth0 $result + TODELETE="net:$network_eth0 $TODELETE" +else + printf "%-50s" "3 get ${VIM_TEST_NETWORK_INTERNET} network: " + result=`openvim net-list -F"name=$VIM_TEST_NETWORK_INTERNET"` + process_cmd uuid network_eth0 $result +fi + +printf "%-50s" "4 insert flavor1: " +result=`openvim flavor-create ' +--- +flavor: + name: flavor1 + description: flavor to test openvim + extended: + processor_ranking: 205 + numas: + - memory: 8 + paired-threads: 5 + interfaces: + - name: xe0 + dedicated: "yes" + bandwidth: "10 Gbps" + vpci: "0000:00:10.0" + #mac_address: "10:10:10:10:10:12" + - name: xe1 + dedicated: "no" + bandwidth: "10 Gbps" + vpci: "0000:00:11.0" + mac_address: "10:10:10:10:10:13" +'` +process_cmd uuid flavor1 $result +TODELETE="flavor:$flavor1 $TODELETE" + +printf "%-50s" "5 insert net_ptp: " +result=`openvim net-create ' +--- +network: + name: test_net_ptp + type: ptp +'` +process_cmd uuid net_ptp $result +TODELETE="net:$net_ptp $TODELETE" + +printf "%-50s" " b insert net_data: " +result=`openvim net-create ' +--- +network: + name: test_net_data + type: data +'` +process_cmd uuid net_data $result +TODELETE="net:$net_data $TODELETE" + +printf "%-50s" "6 insert net_bind network bound to net_data: " +result=`openvim net-create 'name: test_net_binded +type: data +bind_net: test_net_data'` +process_cmd uuid net_bind $result +TODELETE="net:$net_bind $TODELETE" + +printf "%-50s" "7 insert bridge network net2: " +result=`openvim net-create ' +--- +network: + name: test_bridge_net2 + type: bridge_data'` +process_cmd uuid network2 $result +TODELETE="net:$network2 $TODELETE" + +printf "%-50s" "8 add VM1 dataplane not connected: " +result=`openvim vm-create " +--- +server: + name: test_VM1 + descrition: US or server with 1 SRIOV 1 PASSTHROUGH + imageRef: '$image1' + flavorRef: '$flavor1' + networks: + - name: mgmt0 + vpci: '0000:00:0a.0' + uuid: ${network_eth0} + mac_address: '10:10:10:10:10:10' + - name: eth0 + vpci: '0000:00:0b.0' + uuid: '$network2' + mac_address: '10:10:10:10:10:11' +"` +process_cmd uuid server1 $result +TODELETE="vm:$server1 $TODELETE" + +printf "%-50s" "9 add VM2 oversubscribe flavor: " +result=`openvim vm-create " +--- +server: + name: test_VM2 + descrition: US or server with direct network attach + imageRef: '$image1' + flavorRef: '$flavor1' + networks: + - name: mgmt0 + vpci: '0000:00:0a.0' + uuid: ${network_eth0} + mac_address: '10:10:10:10:11:10' + - name: eth0 + vpci: '0000:00:0b.0' + uuid: '$network2' + mac_address: '10:10:10:10:11:11' + extended: + processor_ranking: 205 + numas: + - memory: 8 + threads: 10 + interfaces: + - name: xe0 + dedicated: 'yes:sriov' + bandwidth: '10 Gbps' + vpci: '0000:00:11.0' + mac_address: '10:10:10:10:11:12' + uuid: '$net_ptp' + devices: + - type: disk + imageRef: '$image2' +"` +process_cmd uuid server2 $result +TODELETE="vm:$server2 $TODELETE" + +printf "%-50s" "10 test VM with repeated vpci: " +result=`openvim vm-create " +--- +server: + name: test_VMfail + descrition: repeated mac address + imageRef: '$image1' + flavorRef: '$flavor1' + networks: + - name: mgmt0 + vpci: '0000:00:10.0' + uuid: ${network_eth0} +"` +process_cmd fail "Duplicate vpci 0000:00:10.0" $result + +printf "%-50s" " b test VM with repeated mac address: " +result=`openvim vm-create " +--- +server: + name: test_VMfail + descrition: repeated mac address + imageRef: '$image1' + flavorRef: '$flavor1' + networks: + - name: mgmt0 + vpci: '0000:00:0a.0' + uuid: ${network_eth0} + mac_address: '10:10:10:10:10:10' +"` +process_cmd fail "Duplicate mac 10:10:10:10:10:10" $result + + +printf "%-50s" " c test VM with wrong iface name at networks: " +result=`openvim vm-create " +--- +server: + name: test_VMfail + descrition: repeated mac address + imageRef: '$image1' + flavorRef: '$flavor1' + networks: + - name: missing + type: PF + uuid: '$net_ptp' +"` +process_cmd fail "wrong iface name at networks" $result + + +printf "%-50s" " d test VM with wrong iface type at networks: " +result=`openvim vm-create " +--- +server: + name: test_VMfail + descrition: repeated mac address + imageRef: '$image1' + flavorRef: '$flavor1' + networks: + - name: xe0 + type: VF + uuid: '$net_ptp' +"` +process_cmd fail "wrong iface type at networks" $result + + +printf "%-50s" "11 add VM3 dataplane connected: " +result=`openvim vm-create " +--- +server: + name: test_VM3 + descrition: US or server with 2 dataplane connected + imageRef: '$image1' + flavorRef: '$flavor1' + networks: + - name: mgmt0 + vpci: '0000:00:0a.0' + uuid: ${network_eth0} + mac_address: '10:10:10:10:12:10' + - name: eth0 + vpci: '0000:00:0b.0' + uuid: '$network2' + type: virtual + mac_address: '10:10:10:10:12:11' + - name: xe0 + type: PF + uuid: '$net_data' + - name: xe1 + type: VF + uuid: '$net_ptp' + mac_address: '10:10:10:10:12:13' +"` +process_cmd uuid server3 $result +TODELETE="vm:$server3 $TODELETE" + +printf "%-50s" "12 check 2 openflow rules for net_ptp: " +test_of_rules $net_ptp 2 + +printf "%-50s" "13 check net-down net_ptp: " +result=`openvim net-down -f ${net_ptp}` +process_cmd ok $result + +printf "%-50s" " b check 0 openflow rules for net_ptp: " +test_of_rules $net_ptp 0 + +printf "%-50s" " c check net-up net_ptp: " +result=`openvim net-up -f ${net_ptp}` +process_cmd ok $result + +printf "%-50s" " d check 2 openflow rules for net_ptp: " +test_of_rules $net_ptp 2 + +printf "%-50s" "14 check 0 openflow rules for net_data: " +test_of_rules $net_data 0 + +[[ $force != y ]] && read -e -p " Test control plane, and server2:xe0 to server3:xe1 connectivity. Press enter to continue " kk + +printf "%-50s" "15 get xe0 iface uuid from server1: " +result=`openvim port-list -F"device_id=${server1}&name=xe0"` +process_cmd uuid server1_xe0 $result + +printf "%-50s" " b get xe1 iface uuid from server1: " +result=`openvim port-list -F"device_id=${server1}&name=xe1"` +process_cmd uuid server1_xe1 $result + +printf "%-50s" " c get xe0 iface uuid from server3: " +result=`openvim port-list -F"device_id=${server3}&name=xe0"` +process_cmd uuid server3_xe0 $result + +printf "%-50s" " d get xe1 iface uuid from server3: " +result=`openvim port-list -F"device_id=${server3}&name=xe1"` +process_cmd uuid server3_xe1 $result + +printf "%-50s" " e get xe0 iface uuid from server3: " +result=`openvim port-list -F"device_id=${server2}&name=xe0"` +process_cmd uuid server2_xe0 $result + +printf "%-50s" "16 test ptp 3connex server1:xe0 -> net_ptp: " +result=`openvim port-edit $server1_xe0 "network_id: $net_ptp" -f` +process_cmd fail "Can not connect 3 interfaces to ptp network" + +printf "%-50s" "17 attach server1:xe0 to net_data: " +result=`openvim port-edit $server1_xe0 "network_id: $net_data" -f` +process_cmd ok $result + +printf "%-50s" "18 check 2 openflow rules for net_data: " +test_of_rules $net_data 2 + +[[ $force != y ]] && read -e -p " Test server1:xe0 to server3:xe0 connectivity. Press enter to continue " kk + +if [[ $same_vlan == n ]] +then + + printf "%-50s" "19 attach server1:xe1 to net-data: " + result=`openvim port-edit $server1_xe1 "network_id: $net_data" -f` + process_cmd ok $result + + printf "%-50s" " b check 9 openflow rules for net_data: " + test_of_rules $net_data 9 + + [[ $force != y ]] && read -e -p " Test server1:xe0,server1:xe1,server3:xe0 connectivity. Press enter to continue " kk + + printf "%-50s" " c re-attach server3:xe1 to net-data: " + result=`openvim port-edit $server3_xe1 "network_id: $net_data" -f` + process_cmd ok $result + + printf "%-50s" " d check 16 openflow rules for net_data: " + test_of_rules $net_data 16 + + printf "%-50s" " e check 0 openflow rules for net_ptp: " + test_of_rules $net_ptp 0 + + [[ $force != y ]] && read -e -p " Test server1:xe0,server1:xe1,server3:xe0,server3:xe1 connectivity. Press enter to continue " kk + + printf "%-50s" " f detach server1:xe1 from net-data: " + result=`openvim port-edit $server1_xe1 "network_id: null" -f ` + process_cmd ok $result + + printf "%-50s" " g detach server3:xe1 to net-data: " + result=`openvim port-edit $server3_xe1 "network_id: null" -f` + process_cmd ok $result + + printf "%-50s" " h check 2 openflow rules for net_data: " + test_of_rules $net_data 2 + +else + echo "19 skipping unrealizable test because --same_vlan option " +fi + +printf "%-50s" "20 check 2 openflow rules for net_data: " +test_of_rules $net_data 2 + +printf "%-50s" " a attach server2:xe0 to net_bind: " +result=`openvim port-edit $server2_xe0 "network_id: $net_bind" -f` +process_cmd ok $result + +printf "%-50s" " b check 6 openflow rules for net_data: " + #type src_net src_port => dst_port dst_net + #unicast net_data server1:xe0 => server3:xe0 net_data + #unicast net_data server3:xe0 => server1:xe0 net_data + #unicast net_data server1:xe0 => server2:xe0 net_bind + #unicast net_data server3:xe0 => server2:xe0 net_bind + #broadcast net_data server1:xe0 => server3:xe0,server2:xe0 net_data,net_bind + #broadcast net_data server3:xe0 => server1:xe0,server2:xe0 net_data,net_bind +test_of_rules $net_data 6 + + +printf "%-50s" " c check 3 openflow rules for net_bind: " + #type src_net src_port => dst_port dst_net + #unicast net_bind server2:xe0 => server1:xe0 net_data + #unicast net_bind server2:xe0 => server3:xe0 net_data + #broadcast net_bind server2:xe0 => server1:xe0,server3:xe0 net_data,net_data +test_of_rules $net_bind 3 + +printf "%-50s" " d attach server1:xe1 to net_bind: " +result=`openvim port-edit $server1_xe1 "network_id: $net_bind" -f` +process_cmd ok $result + +printf "%-50s" " e check 8 openflow rules for net_data: " + #type src_net src_port => dst_port dst_net + #unicast net_data server1:xe0 => server3:xe0 net_data + #unicast net_data server3:xe0 => server1:xe0 net_data + #unicast net_data server1:xe0 => server2:xe0 net_bind + #unicast net_data server1:xe0 => server1:xe1 net_bind + #unicast net_data server3:xe0 => server2:xe0 net_bind + #unicast net_data server3:xe0 => server1:xe1 net_bind + #broadcast net_data server1:xe0 => server3:xe0,server2:xe0,server1:xe1 net_data,net_bind,net_bind + #broadcast net_data server3:xe0 => server1:xe0,server2:xe0,server1:xe1 net_data,net_bind,net_bind +test_of_rules $net_data 8 + + +printf "%-50s" " f check 8 openflow rules for net_bind: " +test_of_rules $net_bind 8 + +printf "%-50s" " d put net_data down: " +result=`openvim net-down $net_data -f` +process_cmd ok $result + +printf "%-50s" " e check 0 openflow rules for net_data: " +test_of_rules $net_data 0 + +printf "%-50s" " e check 2 openflow rules for net_bind: " +test_of_rules $net_bind 2 + + + +echo +echo DONE + +$_exit 0 + diff --git a/utils/RADclass.py b/utils/RADclass.py new file mode 100644 index 0000000..bfbdbaf --- /dev/null +++ b/utils/RADclass.py @@ -0,0 +1,1619 @@ +# -*- coding: utf-8 -*- +import code + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' +Implement the logic for obtaining compute nodes information +Resource Availability Descriptor +''' +__author__="Pablo Montes" + +#TODO: remove warnings, remove unused things + +from definitionsClass import definitionsClass +from auxiliary_functions import get_ssh_connection +import libvirt +from xml.etree import ElementTree +import paramiko +import re +import yaml + + +def getCredentials(creds, data): + """Used as a backup for libvirt.openAuth in order to provide password that came with data, + not used by the moment + """ + print "RADclass:getCredentials", creds, data + for cred in creds: + print cred[1] + ": ", + if cred[0] == libvirt.VIR_CRED_AUTHNAME: + cred[4] = data + elif cred[0] == libvirt.VIR_CRED_PASSPHRASE: + cred[4] = data + else: + return -1 + return 0 + +class RADclass(): + def __init__(self): + self.name = None + self.machine = None + self.user = None + self.password = None + self.nodes = dict() #Dictionary of nodes. Keys are the node id, values are Node() elements + self.nr_processors = None #Integer. Number of processors in the system + self.processor_family = None #If all nodes have the same value equal them, otherwise keep as None + self.processor_manufacturer = None #If all nodes have the same value equal them, otherwise keep as None + self.processor_version = None #If all nodes have the same value equal them, otherwise keep as None + self.processor_features = None #If all nodes have the same value equal them, otherwise keep as None + self.memory_type = None #If all nodes have the same value equal them, otherwise keep as None + self.memory_freq = None #If all nodes have the same value equal them, otherwise keep as None + self.memory_nr_channels = None #If all nodes have the same value equal them, otherwise keep as None + self.memory_size = None #Integer. Sum of the memory in all nodes + self.memory_hugepage_sz = None + self.hypervisor = Hypervisor() #Hypervisor information + self.os = OpSys() #Operating system information + self.ports_list = list() #List containing all network ports in the node. This is used to avoid having defined multiple times the same port in the system + + + def obtain_RAD(self, user, password, machine): + """This function obtains the RAD information from the remote server. + It uses both a ssh and a libvirt connection. + It is desirable in future versions get rid of the ssh connection, but currently + libvirt does not provide all the needed information. + Returns (True, Warning) in case of success and (False, ) in case of error""" + warning_text="" + try: + #Get virsh and ssh connection + (return_status, code) = get_ssh_connection(machine, user, password) + if not return_status: + print 'RADclass.obtain_RAD() error:', code + return (return_status, code) + ssh_conn = code + + self.connection_IP = machine + #print "libvirt open pre" + virsh_conn=libvirt.open("qemu+ssh://"+user+'@'+machine+"/system") + #virsh_conn=libvirt.openAuth("qemu+ssh://"+user+'@'+machine+"/system", + # [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE, libvirt.VIR_CRED_USERNAME], getCredentials, password], + # 0) + #print "libvirt open after" + + # #Set connection infomation + # (return_status, code) = self.set_connection_info(machine, user, password) + # if not return_status: + # return (return_status, 'Error in '+machine+': '+code) + + #Set server name + machine_name = get_hostname(virsh_conn) + (return_status, code) = self.set_name(machine_name) + if not return_status: + return (return_status, 'Error at self.set_name in '+machine+': '+code) + warning_text += code + + #Get the server processors information + processors = dict() + (return_status, code) = get_processor_information(ssh_conn, virsh_conn, processors) + if not return_status: + return (return_status, 'Error at get_processor_information in '+machine+': '+code) + warning_text += code + + #Get the server memory information + memory_nodes = dict() + (return_status, code) = get_memory_information(ssh_conn, virsh_conn, memory_nodes) + if not return_status: + return (return_status, 'Error at get_memory_information in '+machine+': '+code) + warning_text += code + + #Get nics information + nic_topology = dict() + # (return_status, code) = get_nic_information_old(ssh_conn, nic_topology) + (return_status, code) = get_nic_information(ssh_conn, virsh_conn, nic_topology) + if not return_status: + return (return_status, 'Error at get_nic_information in '+machine+': '+code) + warning_text += code + + #Pack each processor, memory node and nics in a node element + #and add the node to the RAD element + for socket_id, processor in processors.iteritems(): + node = Node() + if not socket_id in nic_topology: + nic_topology[socket_id] = list() + + (return_status, code) = node.set(processor, memory_nodes[socket_id], nic_topology[socket_id]) + # else: + # (return_status, code) = node.set(processor, memory_nodes[socket_id]) + if not return_status: + return (return_status, 'Error at node.set in '+machine+': '+code) + warning_text += code + (return_status, code) = self.insert_node(node) + if not return_status: + return (return_status, 'Error at self.insert_node in '+machine+': '+code) + if code not in warning_text: + warning_text += code + + #Fill os data + os = OpSys() + (return_status, code) = get_os_information(ssh_conn, os) + if not return_status: + return (return_status, 'Error at get_os_information in '+machine+': '+code) + warning_text += code + (return_status, code) = self.set_os(os) + if not return_status: + return (return_status, 'Error at self.set_os in '+machine+': '+code) + warning_text += code + + #Fill hypervisor data + hypervisor = Hypervisor() + (return_status, code) = get_hypervisor_information(virsh_conn, hypervisor) + if not return_status: + return (return_status, 'Error at get_hypervisor_information in '+machine+': '+code) + warning_text += code + (return_status, code) = self.set_hypervisor(hypervisor) + if not return_status: + return (return_status, 'Error at self.set_hypervisor in '+machine+': '+code) + warning_text += code + ssh_conn.close() + + return (True, warning_text) + except libvirt.libvirtError, e: + text = e.get_error_message() + print 'RADclass.obtain_RAD() exception:', text + return (False, text) + except paramiko.ssh_exception.SSHException, e: + text = e.args[0] + print "obtain_RAD ssh Exception:", text + return False, text + + def set_name(self,name): + """Sets the machine name. + Returns (True,Warning) in case of success and ('False',) in case of error""" + if not isinstance(name,str): + return (False, 'The variable \'name\' must be text') + self.name = name + return (True, "") + + def set_connection_info(self, machine, user, password): + """Sets the connection information. + Returns (True,Warning) in case of success and ('False',) in case of error""" + if not isinstance(machine,str): + return (False, 'The variable \'machine\' must be text') + if not isinstance(user,str): + return (False, 'The variable \'user\' must be text') +# if not isinstance(password,str): +# return (False, 'The variable \'password\' must be text') + (self.machine, self.user, self.password) = (machine, user, password) + return (True, "") + + def insert_node(self,node): + """Inserts a new node and updates class variables. + Returns (True,Warning) in case of success and ('False',) in case of error""" + if not isinstance(node,Node): + return (False, 'The variable \'node\' must be a Node element') + + if node.id_ in self.nodes: + return (False, 'The node is already present in the nodes list.') + + #Check if network ports have not been inserted previously as part of another node + for port_key in node.ports_list: + if port_key in self.ports_list: + return (False, 'Network port '+port_key+' defined multiple times in the system') + self.ports_list.append(port_key) + + #Insert the new node + self.nodes[node.id_] = node + + #update variables + self.update_variables() + + return (True, "") + + def update_variables(self): + """Updates class variables. + Returns (True,Warning) in case of success and ('False',) in case of error""" + warning_text="" + #The number of processors and nodes is the same + self.nr_processors = len(self.nodes) + + #If all processors are the same get the values. Otherwise keep them as none + prev_processor_family = prev_processor_manufacturer = prev_processor_version = prev_processor_features = None + different_processor_family = different_processor_manufacturer = different_processor_version = different_processor_features = False + for node in self.nodes.itervalues(): + (self.processor_family, self.processor_manufacturer, self.processor_version, self.processor_features) = node.get_processor_info() + if prev_processor_family != None and self.processor_family != prev_processor_family: + different_processor_family = True + if prev_processor_manufacturer != None and self.processor_manufacturer != prev_processor_manufacturer: + different_processor_manufacturer = True + if prev_processor_version != None and self.processor_version != prev_processor_version: + different_processor_version = True + if prev_processor_features != None and self.processor_features != prev_processor_features: + different_processor_features = True + (prev_processor_family, prev_processor_manufacturer, prev_processor_version, prev_processor_features) = (self.processor_family, self.processor_manufacturer, self.processor_version, self.processor_features) + + if different_processor_family: + self.processor_family = None + if different_processor_features: + self.processor_features = None + if different_processor_manufacturer: + self.processor_manufacturer = None + if different_processor_version: + self.processor_version = None + + #If all memory nodes are the same get the values. Otherwise keep them as none + #Sum the total memory + self.memory_size = 0 + different_memory_freq = different_memory_nr_channels = different_memory_type = different_memory_hugepage_sz = False + prev_memory_freq = prev_memory_nr_channels = prev_memory_type = prev_memory_hugepage_sz = None + for node in self.nodes.itervalues(): + (self.memory_freq, self.memory_nr_channels, self.memory_type, memory_size, self.memory_hugepage_sz) = node.get_memory_info() + self.memory_size += memory_size + if prev_memory_freq != None and self.memory_freq != prev_memory_freq: + different_memory_freq = True + if prev_memory_nr_channels != None and self.memory_nr_channels != prev_memory_nr_channels: + different_memory_nr_channels = True + if prev_memory_type != None and self.memory_type != prev_memory_type: + different_memory_type = True + if prev_memory_hugepage_sz != None and self.memory_hugepage_sz != prev_memory_hugepage_sz: + different_memory_hugepage_sz = True + (prev_memory_freq, prev_memory_nr_channels, prev_memory_type, prev_memory_hugepage_sz) = (self.memory_freq, self.memory_nr_channels, self.memory_type, self.memory_hugepage_sz) + + if different_memory_freq: + self.memory_freq = None + if different_memory_nr_channels: + self.memory_nr_channels = None + if different_memory_type: + self.memory_type = None + if different_memory_hugepage_sz: + warning_text += 'Detected different hugepages size in different sockets\n' + + return (True, warning_text) + + def set_hypervisor(self,hypervisor): + """Sets the hypervisor. + Returns (True,Warning) in case of success and ('False',) in case of error""" + if not isinstance(hypervisor,Hypervisor): + return (False, 'The variable \'hypervisor\' must be of class Hypervisor') + + self.hypervisor.assign(hypervisor) + return (True, "") + + def set_os(self,os): + """Sets the operating system. + Returns (True,Warning) in case of success and ('False',) in case of error""" + if not isinstance(os,OpSys): + return (False, 'The variable \'os\' must be of class OpSys') + + self.os.assign(os) + return (True, "") + + def to_text(self): + text= 'name: '+str(self.name)+'\n' + text+= 'processor:\n' + text+= ' nr_processors: '+str(self.nr_processors)+'\n' + text+= ' family: '+str(self.processor_family)+'\n' + text+= ' manufacturer: '+str(self.processor_manufacturer)+'\n' + text+= ' version: '+str(self.processor_version)+'\n' + text+= ' features: '+str(self.processor_features)+'\n' + text+= 'memory:\n' + text+= ' type: '+str(self.memory_type)+'\n' + text+= ' freq: '+str(self.memory_freq)+'\n' + text+= ' nr_channels: '+str(self.memory_nr_channels)+'\n' + text+= ' size: '+str(self.memory_size)+'\n' + text+= 'hypervisor:\n' + text+= self.hypervisor.to_text() + text+= 'os:\n' + text+= self.os.to_text() + text+= 'resource topology:\n' + text+= ' nr_nodes: '+ str(len(self.nodes))+'\n' + text+= ' nodes:\n' + for node_k, node_v in self.nodes.iteritems(): + text+= ' node'+str(node_k)+':\n' + text+= node_v.to_text() + return text + + def to_yaml(self): + return yaml.load(self.to_text()) + +class Node(): + def __init__(self): + self.id_ = None #Integer. Node id. Unique in the system + self.processor = ProcessorNode() #Information about the processor in the node + self.memory = MemoryNode() #Information about the memory in the node + self.nic_list = list() #List of Nic() containing information about the nics associated to the node + self.ports_list = list() #List containing all network ports in the node. This is used to avoid having defined multiple times the same port in the system + + def get_processor_info(self): + """Gets the processor information. Returns (processor_family, processor_manufacturer, processor_version, processor_features)""" + return self.processor.get_info() + + def get_memory_info(self): + """Gets the memory information. Returns (memory_freq, memory_nr_channels, memory_type, memory_size)""" + return self.memory.get_info() + +# def set(self, *args): +# """Sets the node information. Returns (True,Warning) in case of success and ('False',) in case of error""" +# if len(args)==2: +# processor = args[0] +# memory = args[1] +# nics = False +# elif len(args)==3: +# processor = args[0] +# memory = args[1] +# nic_list = args[2] +# nics = True +# else: +# return (False, 'Wrong number of elements calling Node().set()') + + def set(self, processor, memory, nic_list): + (status, return_code) = self.processor.assign(processor) + if not status: + return (status, return_code) + + self.id_ = processor.id_ + + (status, return_code) = self.memory.assign(memory) + if not status: + return (status, return_code) + +# if nics: + for nic in nic_list: + if not isinstance(nic,Nic): + return (False, 'The nics must be of type Nic') + self.nic_list.append(nic) + for port_key in nic.ports.iterkeys(): + if port_key in self.ports_list: + return (False, 'Network port '+port_key+'defined multiple times in the same node') + self.ports_list.append(port_key) + + return (True,"") + + def assign(self, node): + """Sets the node information. + Returns (True,Warning) in case of success and ('False',) in case of error""" + warning_text="" + processor = node.processor + memory = node.memory + nic_list = node.nic_list + (status, return_code) = self.processor.assign(processor) + if not status: + return (status, return_code) + + self.id_ = processor.id_ + + (status, return_code) = self.memory.assign(memory) + if not status: + return (status, return_code) + warning_text += code + + for nic in nic_list: + if not isinstance(nic,Nic): + return (False, 'The nics must be of type Nic') + self.nic_list.append(nic) + for port_key in nic.ports.iterkeys(): + if port_key in self.ports_list: + return (False, 'Network port '+port_key+'defined multiple times in the same node') + self.ports_list.append(port_key) + + return (True,warning_text) + + def to_text(self): + text= ' id: '+str(self.id_)+'\n' + text+= ' cpu:\n' + text += self.processor.to_text() + text+= ' memory:\n' + text += self.memory.to_text() + if len(self.nic_list) > 0: + text+= ' nics:\n' + nic_index = 0 + for nic in self.nic_list: + text+= ' nic '+str(nic_index)+':\n' + text += nic.to_text() + nic_index += 1 + return text + +class ProcessorNode(): + #Definition of the possible values of processor variables + possible_features = definitionsClass.processor_possible_features + possible_manufacturers = definitionsClass.processor_possible_manufacturers + possible_families = definitionsClass.processor_possible_families + possible_versions = definitionsClass.processor_possible_versions + + def __init__(self): + self.id_ = None #Integer. Numeric identifier of the socket + self.family = None #Text. Family name of the processor + self.manufacturer = None #Text. Manufacturer of the processor + self.version = None #Text. Model version of the processor + self.features = list() #list. List of features offered by the processor + self.cores = list() #list. List of cores in the processor. In case of hyperthreading the coupled cores are expressed as [a,b] + self.eligible_cores = list()#list. List of cores that can be used + #self.decicated_cores + #self.shared_cores -> this should also contain information to know if cores are being used + + def assign(self, processor): + """Sets the processor information. + Returns (True,Warning) in case of success and ('False',) in case of error""" + if not isinstance(processor,ProcessorNode): + return (False, 'The variable \'processor\' must be of class ProcessorNode') + + self.id_ = processor.id_ + self.family = processor.family + self.manufacturer = processor.manufacturer + self.version = processor.version + self.features = processor.features + self.cores = processor.cores + self.eligible_cores = processor.eligible_cores + + return (True, "") + + def set(self, id_, family, manufacturer, version, features, cores): + """Sets the processor information. + Returns (True,Warning) in case of success and ('False',) in case of error""" + warning_text = "" + + if not isinstance(id_,int): + return (False, 'The processor id_ must be of type int') + if not isinstance(family,str): + return (False, 'The processor family must be of type str') + if not isinstance(manufacturer,str): + return (False, 'The processor manufacturer must be of type str') + if not isinstance(version,str): + return (False, 'The processor version must be of type str') + if not isinstance(features,list): + return (False, 'The processor features must be of type list') + if not isinstance(cores,list): + return (False, 'The processor cores must be of type list') + + (self.id_, self.family, self.manufacturer, self.version) = (id_, family, manufacturer, version) + + if not manufacturer in self.possible_manufacturers: + warning_text += "processor manufacturer '%s' not among: %s\n" %(manufacturer, str(self.possible_manufacturers)) + if not family in self.possible_families: + warning_text += "family '%s' not among: %s\n" % (family, str(self.possible_families)) +# if not version in self.possible_versions: +# warning_text += 'The version %s is not one of these: %s\n' % (version, str(self.possible_versions)) + + for feature in features: + if not feature in self.possible_features: + warning_text += "processor feature '%s' not among: %s\n" % (feature, str(self.possible_versions)) + self.features.append(feature) + + for iterator in sorted(cores): + if not isinstance(iterator,list) or not all(isinstance(x, int) for x in iterator): + return (False, 'The cores list must be in the form of [[a,b],[c,d],...] where a,b,c,d are of type int') + self.cores.append(iterator) + + self.set_eligible_cores() + + return (True,warning_text) + + def set_eligible_cores(self): + """Set the default eligible cores, this is all cores non used by the host operating system""" + not_first = False + for iterator in self.cores: + if not_first: + self.eligible_cores.append(iterator) + else: + not_first = True + return + + def get_info(self): + """Returns processor parameters (self.family, self.manufacturer, self.version, self.features)""" + return (self.family, self.manufacturer, self.version, self.features) + + def to_text(self): + text= ' id: '+str(self.id_)+'\n' + text+= ' family: '+self.family+'\n' + text+= ' manufacturer: '+self.manufacturer+'\n' + text+= ' version: '+self.version+'\n' + text+= ' features: '+str(self.features)+'\n' + text+= ' cores: '+str(self.cores)+'\n' + text+= ' eligible_cores: '+str(self.eligible_cores)+'\n' + return text + +class MemoryNode(): + def __init__(self): + self.modules = list() #List of MemoryModule(). List of all modules installed in the node + self.nr_channels = None #Integer. Number of modules installed in the node + self.node_size = None #Integer. Total size in KiB of memory installed in the node + self.eligible_memory = None #Integer. Size in KiB of eligible memory in the node + self.hugepage_sz = None #Integer. Size in KiB of hugepages + self.hugepage_nr = None #Integer. Number of hugepages allocated in the module + self.eligible_hugepage_nr = None #Integer. Number of eligible hugepages in the node + self.type_ = None #Text. Type of memory modules. If modules have a different value keep it as None + self.freq = None #Integer. Frequency of the modules in MHz. If modules have a different value keep it as None + self.module_size = None #Integer. Size of the modules in KiB. If modules have a different value keep it as None + self.form_factor = None #Text. Form factor of the modules. If modules have a different value keep it as None + + def assign(self, memory_node): + return self.set(memory_node.modules, memory_node.hugepage_sz, memory_node.hugepage_nr) + + def set(self, modules, hugepage_sz, hugepage_nr): + """Set the memory node information. hugepage_sz must be expressed in KiB. + Returns (True,Warning) in case of success and ('False',) in case of error""" + if not isinstance(modules, list): + return (False, 'The modules must be a list of elements of class MemoryModule') + if not isinstance(hugepage_sz,int): + return (False, 'The hugepage_sz variable must be an int expressing the size in KiB') + if not isinstance(hugepage_nr,int): + return (False, 'The hugepage_nr variable must be of type int') + + (self.hugepage_sz, self.hugepage_nr) = (hugepage_sz, hugepage_nr) + self.node_size = self.nr_channels = 0 + + different_type = different_freq = different_module_size = different_form_factor = False + prev_type = prev_freq = prev_module_size = prev_form_factor = None + for iterator in modules: + if not isinstance(iterator,MemoryModule): + return (False, 'The modules must be a list of elements of class MemoryModule') + self.modules.append(iterator) + (self.type_, self.freq, self.module_size, self.form_factor) = (iterator.type_, iterator.freq, iterator.size, iterator.form_factor) + self.node_size += self.module_size + self.nr_channels += 1 + if prev_type != None and prev_type != self.type_: + different_type = True + if prev_freq != None and prev_freq != self.freq: + different_freq = True + if prev_module_size != None and prev_module_size != self.module_size: + different_module_size = True + if prev_form_factor != None and prev_form_factor != self.form_factor: + different_form_factor = True + (prev_type, prev_freq, prev_module_size, prev_form_factor) = (self.type_, self.freq, self.module_size, self.form_factor) + + if different_type: + self.type_ = None + if different_freq: + self.freq = None + if different_module_size: + self.module_size = None + if different_form_factor: + self.form_factor = None + + (return_value, error_code) = self.set_eligible_memory() + if not return_value: + return (return_value, error_code) + + return (True, "") + + def set_eligible_memory(self): + """Sets the default eligible_memory and eligible_hugepage_nr. This is all memory but 2GiB and all hugepages""" + self.eligible_memory = self.node_size - 2*1024*1024 + if self.eligible_memory < 0: + return (False, "There is less than 2GiB of memory in the module") + + self.eligible_hugepage_nr = self.hugepage_nr + return (True,"") + + def get_info(self): + """Return memory information (self.freq, self.nr_channels, self.type_, self.node_size)""" + return (self.freq, self.nr_channels, self.type_, self.node_size, self.hugepage_sz) + + def to_text(self): + text= ' node_size: '+str(self.node_size)+'\n' + text+= ' nr_channels: '+str(self.nr_channels)+'\n' + text+= ' eligible_memory: '+str(self.eligible_memory)+'\n' + text+= ' hugepage_sz: '+str(self.hugepage_sz)+'\n' + text+= ' hugepage_nr: '+str(self.hugepage_nr)+'\n' + text+= ' eligible_hugepage_nr: '+str(self.eligible_hugepage_nr)+'\n' + text+= ' type: '+self.type_+'\n' + text+= ' freq: '+str(self.freq)+'\n' + text+= ' module_size: '+str(self.module_size)+'\n' + text+= ' form_factor: '+self.form_factor+'\n' + text+= ' modules details:\n' + for module in self.modules: + text += module.to_text() + return text + +class MemoryModule(): + #Definition of the possible values of module variables + possible_types = definitionsClass.memory_possible_types + possible_form_factors = definitionsClass.memory_possible_form_factors + + def __init__(self): + self.locator = None #Text. Name of the memory module + self.type_ = None #Text. Type of memory module + self.freq = None #Integer. Frequency of the module in MHz + self.size = None #Integer. Size of the module in KiB + self.form_factor = None #Text. Form factor of the module + + def set(self, locator, type_, freq, size, form_factor): + """Sets the memory module information. + Frequency must be expressed in MHz and size in KiB. + Returns (True,Warning) in case of success and ('False',) in case of error""" + warning_text="" + if not isinstance(locator, str): + return (False, "The type of the variable locator must be str") + if not isinstance(type_, str): + return (False, "The type of the variable type_ must be str") + if not isinstance(form_factor, str): + return (False, "The type of the variable form_factor must be str") + if not isinstance(freq, int): + return (False, "The type of the variable freq must be int") + if not isinstance(size, int): + return (False, "The type of the variable size must be int") + + if not form_factor in self.possible_form_factors: + warning_text += "memory form_factor '%s' not among: %s\n" %(form_factor, str(self.possible_form_factors)) + if not type_ in self.possible_types: + warning_text += "memory type '%s' not among: %s\n" %(type_, str(self.possible_types)) + + (self.locator, self.type_, self.freq, self.size, self.form_factor) = (locator, type_, freq, size, form_factor) + return (True, warning_text) + + def to_text(self): + text= ' '+self.locator+':\n' + text+= ' type: '+self.type_+'\n' + text+= ' freq: '+str(self.freq)+'\n' + text+= ' size: '+str(self.size)+'\n' + text+= ' form factor: '+self.form_factor+'\n' + return text + +class Nic(): + def __init__(self): + self.model = None #Text. Model of the nic + self.ports = dict() #Dictionary of ports. Keys are the port name, value are Port() elements + + def set_model(self, model): + """Sets the model of the nic. Returns (True,Warning) in case of success and ('False',) in case of error""" + if not isinstance(model,str): + return (False, 'The \'model\' must be of type str') + + self.model = model + return (True, "") + + def add_port(self, port): + """Adds a port to the nic. Returns (True,Warning) in case of success and ('False',) in case of error""" + if not isinstance(port,Port): + return (False, 'The \'port\' must be of class Port') + +# port_id = str(port.pci_device_id[0])+':'+str(port.pci_device_id[1])+':'+str(port.pci_device_id[2])+'.'+str(port.pci_device_id[3]) +#CHANGED +# port_id = port.name + port_id = port.pci_device_id +#CHANGED END + if port_id in self.ports: + return (False, 'The \'port\' '+port.pci_device_id+' is duplicated in the nic') +# return (False, 'The \'port\' is duplicated in the nic') + + self.ports[port_id] = port + return (True, "") + + def to_text(self): + text= ' model: '+ str(self.model)+'\n' + text+= ' ports: '+'\n' + for key,port in self.ports.iteritems(): + text+= ' "'+key+'":'+'\n' + text += port.to_text() + return text + +class Port(): + def __init__(self): + self.name = None #Text. Port name + self.virtual = None #Boolean. States if the port is a virtual function + self.enabled = None #Boolean. States if the port is enabled + self.eligible = None #Boolean. States if the port is eligible + self.speed = None #Integer. Indicates the speed in Mbps + self.available_bw = None #Integer. BW in Mbps that is available. + self.mac = None #list. Indicates the mac address of the port as a list in format ['XX','XX','XX','XX','XX','XX'] + self.pci_device_id_split = None #list. Indicates the pci address of the port as a list in format ['XXXX','XX','XX','X'] + self.pci_device_id = None + self.PF_pci_device_id = None + +# def set(self, name, virtual, enabled, speed, mac, pci_device_id, pci_device_id_split): +# """Sets the port information. The variable speed indicates the speed in Mbps. Returns (True,Warning) in case of success and ('False',) in case of error""" +# if not isinstance(name,str): +# return (False, 'The variable \'name\' must be of type str') +# if not isinstance(virtual,bool): +# return (False, 'The variable \'virtual\' must be of type bool') +# if not isinstance(enabled,bool): +# return (False, 'The variable \'enabled\' must be of type bool') +# if not isinstance(enabled,bool): +# return (speed, 'The variable \'speed\' must be of type int') +# if not isinstance(mac, list) and not isinstance(mac,NoneType): +# return (False, 'The variable \'enabled\' must be of type list indicating the mac address in format [\'XXXX\',\'XX\',\'XX\',\'X\'] or NoneType') +# if not isinstance(pci_device_id_split, list) or len(pci_device_id_split) != 4: +# return (False, 'The variable \'pci_device_id_split\' must be of type list, indicating the pci address in format [\'XX\',\'XX\',\'XX\',\'XX\',\'XX\',\'XX\']') +# +# expected_len = [4,2,2,1] +# index = 0 +# for iterator in pci_device_id_split: +# if not isinstance(iterator,str) or not iterator.isdigit() or len(iterator) != expected_len[index]: +# return (False, 'The variable \'pci_device_id_split\' must be of type list, indicating the pci address in format [\'XX\',\'XX\',\'XX\',\'XX\',\'XX\',\'XX\']') +# index += 1 +# +# if not isinstance(mac,NoneType): +# for iterator in mac: +# if not isinstance(iterator,str) or not iterator.isalnum() or len(iterator) != 2: +# return (False, 'The variable \'enabled\' must be of type list indicating the mac address in format [\'XXXX\',\'XX\',\'XX\',\'X\'] or NoneType') +# +# #By default only virtual ports are eligible +# # (self.name, self.virtual, self.enabled, self.eligible, self.available_bw, self.speed, self.mac, self.pci_device_id, self.pci_device_id_split) = (name, virtual, enabled, virtual, speed, speed, mac, pci_device_id, pci_device_id_split) +# (self.name, self.virtual, self.enabled, self.eligible, self.available_bw, self.mac, self.pci_device_id, self.pci_device_id_split) = (name, virtual, enabled, virtual, speed, mac, pci_device_id, pci_device_id_split) + + def to_text(self): + text= ' pci: "'+ str(self.pci_device_id)+'"\n' + text+= ' virtual: '+ str(self.virtual)+'\n' + if self.virtual: + text+= ' PF_pci_id: "'+self.PF_pci_device_id+'"\n' + text+= ' eligible: '+ str(self.eligible)+'\n' + text+= ' enabled: '+str(self.enabled)+'\n' + text+= ' speed: '+ str(self.speed)+'\n' + text+= ' available bw: '+ str(self.available_bw)+'\n' + text+= ' mac: '+ str(self.mac)+'\n' + text+= ' source_name: '+ str(self.name)+'\n' + return text + +class Hypervisor(): + #Definition of the possible values of hypervisor variables + possible_types = definitionsClass.hypervisor_possible_types + possible_domain_types = definitionsClass.hypervisor_possible_domain_types + + def __init__(self): + self.type_ = None #Text. Hypervisor type_ + self.version = None #int. Hypervisor version + self.lib_version = None #int. Libvirt version used to compile hypervisor + self.domains = list() #list. List of all the available domains + + def set(self, hypervisor, version, lib_version, domains): + warning_text="" + if not isinstance(hypervisor,str): + return (False, 'The variable type_ must be of type str') + if not isinstance(version,int): + return (False, 'The variable version must be of type int') + if not isinstance(lib_version,int): + return (False, 'The library version must be of type int') + if not isinstance(domains,list): + return (False, 'Domains must be a list of the possible domains as str') + + if not hypervisor in self.possible_types: + warning_text += "Hyperpivor '%s' not among: %s\n" % (hypervisor, str(self.possible_types)) + + valid_domain_found = False + for domain in domains: + if not isinstance(domain,str): + return (False, 'Domains must be a list of the possible domains as str') + if domain in self.possible_domain_types: + valid_domain_found = True + self.domains.append(domain) + + if not valid_domain_found: + warning_text += 'No valid domain found among: %s\n' % str(self.possible_domain_types) + + + (self.version, self.lib_version, self.type_) = (version, lib_version, hypervisor) + return (True, warning_text) + + def assign(self, hypervisor): + (self.version, self.lib_version, self.type_) = (hypervisor.version, hypervisor.lib_version, hypervisor.type_) + for domain in hypervisor.domains: + self.domains.append(domain) + return + + def to_text(self): + text= ' type: '+self.type_+'\n' + text+= ' version: '+str(self.version)+'\n' + text+= ' libvirt version: '+ str(self.lib_version)+'\n' + text+= ' domains: '+str(self.domains)+'\n' + return text + +class OpSys(): + #Definition of the possible values of os variables + possible_id = definitionsClass.os_possible_id + possible_types = definitionsClass.os_possible_types + possible_architectures = definitionsClass.os_possible_architectures + + def __init__(self): + self.id_ = None #Text. Identifier of the OS. Formed by --. In linux this can be obtained using lsb_release -a + self.type_ = None #Text. Type of operating system + self.bit_architecture = None #Integer. Architecture + + def set(self, id_, type_, bit_architecture): + warning_text="" + if not isinstance(type_,str): + return (False, 'The variable type_ must be of type str') + if not isinstance(id_,str): + return (False, 'The variable id_ must be of type str') + if not isinstance(bit_architecture,str): + return (False, 'The variable bit_architecture must be of type str') + + if not type_ in self.possible_types: + warning_text += "os type '%s' not among: %s\n" %(type_, str(self.possible_types)) + if not id_ in self.possible_id: + warning_text += "os release '%s' not among: %s\n" %(id_, str(self.possible_id)) + if not bit_architecture in self.possible_architectures: + warning_text += "os bit_architecture '%s' not among: %s\n" % (bit_architecture, str(self.possible_architectures)) + + (self.id_, self.type_, self.bit_architecture) = (id_, type_, bit_architecture) + return (True, warning_text) + + def assign(self,os): + (self.id_, self.type_, self.bit_architecture) = (os.id_, os.type_, os.bit_architecture) + return + + def to_text(self): + text= ' id: '+self.id_+'\n' + text+= ' type: '+self.type_+'\n' + text+= ' bit_architecture: '+self.bit_architecture+'\n' + return text + +def get_hostname(virsh_conn): + return virsh_conn.getHostname().rstrip('\n') + +def get_hugepage_size(ssh_conn): + command = 'sudo hugeadm --page-sizes' +# command = 'hugeadm --page-sizes-all' + (_, stdout, stderr) = ssh_conn.exec_command(command) + error = stderr.read() + if len(error)>0: + raise paramiko.ssh_exception.SSHException(command +' : '+ error) + mem=stdout.read() + if mem=="": + return 0 + return int(mem) + +def get_hugepage_nr(ssh_conn,hugepage_sz, node_id): + command = 'cat /sys/devices/system/node/node'+str(node_id)+'/hugepages/hugepages-'+str(hugepage_sz/1024)+'kB/nr_hugepages' + (_, stdout, _) = ssh_conn.exec_command(command) + #print command, + #text = stdout.read() + #print "'"+text+"'" + #return int(text) + + try: + value=int(stdout.read()) + except: + value=0 + return value + +def get_memory_information(ssh_conn, virsh_conn, memory_nodes): + warning_text="" + tree=ElementTree.fromstring(virsh_conn.getSysinfo(0)) + memory_dict = dict() + node_id = 0 #TODO revise. Added for allowing VM as compute hosts + for target in tree.findall("memory_device"): + locator_f = size_f = freq_f = type_f = formfactor_f = False + locator_f = True #TODO revise. Added for allowing VM as compute hosts + module_form_factor = "" + for entry in target.findall("entry"): + if entry.get("name") == 'size': + size_f = True + size_split = entry.text.split(' ') + if size_split[1] == 'MB': + module_size = int(size_split[0]) * 1024 * 1024 + elif size_split[1] == 'GB': + module_size = int(size_split[0]) * 1024 * 1024 * 1024 + elif size_split[1] == 'KB': + module_size = int(size_split[0]) * 1024 + else: + module_size = int(size_split[0]) + + elif entry.get("name") == 'speed': + freq_f = True + freq_split = entry.text.split(' ') + if freq_split[1] == 'MHz': + module_freq = int(freq_split[0]) * 1024 * 1024 + elif freq_split[1] == 'GHz': + module_freq = int(freq_split[0]) * 1024 * 1024 * 1024 + elif freq_split[1] == 'KHz': + module_freq = int(freq_split[0]) * 1024 + + elif entry.get("name") == 'type': + type_f = True + module_type = entry.text + + elif entry.get("name") == 'form_factor': + formfactor_f = True + module_form_factor = entry.text + #TODO revise. Commented for allowing VM as compute hosts + # elif entry.get("name") == 'locator' and not locator_f: + # # other case, it is obtained by bank_locator that we give priority to + # locator = entry.text + # pos = locator.find(module_form_factor) + # if module_form_factor == locator[0:len(module_form_factor) ]: + # pos = len(module_form_factor) +1 + # else: + # pos = 0 + # if locator[pos] in "ABCDEFGH": + # locator_f = True + # node_id = ord(locator[pos])-ord('A') + # #print entry.text, node_id + # + # elif entry.get("name") == 'bank_locator': + # locator = entry.text + # pos = locator.find("NODE ") + # if pos >= 0 and len(locator)>pos+5: + # if locator[pos+5] in ("01234567"): #len("NODE ") is 5 + # node_id = int(locator[pos+5]) + # locator_f = True + # + + #When all module fields have been found add a new module to the list + if locator_f and size_f and freq_f and type_f and formfactor_f: + #If the memory node has not yet been created create it + if node_id not in memory_dict: + memory_dict[node_id] = [] + + #Add a new module to the memory node + module = MemoryModule() + #TODO revise. Changed for allowing VM as compute hosts + (return_status, code) = module.set('NODE %d' % node_id, module_type, module_freq, module_size, module_form_factor) + #(return_status, code) = module.set(locator, module_type, module_freq, module_size, module_form_factor) + if not return_status: + return (return_status, code) + memory_dict[node_id].append(module) + if code not in warning_text: + warning_text += code + node_id += 1 #TODO revise. Added for allowing VM as compute hosts + + #Fill memory nodes + #Hugepage size is constant for all nodes + hugepage_sz = get_hugepage_size(ssh_conn) + for node_id, modules in memory_dict.iteritems(): + memory_node = MemoryNode() + memory_node.set(modules, hugepage_sz, get_hugepage_nr(ssh_conn,hugepage_sz, node_id)) + memory_nodes[node_id] = memory_node + + return (True, warning_text) + +def get_cpu_topology_ht(ssh_conn, topology): + command = 'cat /proc/cpuinfo' + (_, stdout, stderr) = ssh_conn.exec_command(command) + error = stderr.read() + if len(error)>0: + raise paramiko.ssh_exception.SSHException(command +' : '+ error) + sockets = [] + cores = [] + core_map = {} + core_details = [] + core_lines = {} + for line in stdout.readlines(): + if len(line.strip()) != 0: + name, value = line.split(":", 1) + core_lines[name.strip()] = value.strip() + else: + core_details.append(core_lines) + core_lines = {} + + for core in core_details: + for field in ["processor", "core id", "physical id"]: + if field not in core: + return(False,'Error getting '+field+' value from /proc/cpuinfo') + core[field] = int(core[field]) + + if core["core id"] not in cores: + cores.append(core["core id"]) + if core["physical id"] not in sockets: + sockets.append(core["physical id"]) + key = (core["physical id"], core["core id"]) + if key not in core_map: + core_map[key] = [] + core_map[key].append(core["processor"]) + + for s in sockets: + hyperthreaded_cores = list() + for c in cores: + hyperthreaded_cores.append(core_map[(s,c)]) + topology[s] = hyperthreaded_cores + + return (True, "") + +def get_processor_information(ssh_conn, vish_conn, processors): + warning_text="" + #Processor features are the same for all processors + #TODO (at least using virsh capabilities)nr_numa_nodes + capabilities = list() + tree=ElementTree.fromstring(vish_conn.getCapabilities()) + for target in tree.findall("host/cpu/feature"): + if target.get("name") == 'pdpe1gb': + capabilities.append('lps') + elif target.get("name") == 'dca': + capabilities.append('dioc') + elif target.get("name") == 'vmx' or target.get("name") == 'svm': + capabilities.append('hwsv') + elif target.get("name") == 'ht': + capabilities.append('ht') + + target = tree.find("host/cpu/arch") + if target.text == 'x86_64' or target.text == 'amd64': + capabilities.append('64b') + + command = 'cat /proc/cpuinfo | grep flags' + (_, stdout, stderr) = ssh_conn.exec_command(command) + error = stderr.read() + if len(error)>0: + raise paramiko.ssh_exception.SSHException(command +' : '+ error) + line = stdout.readline() + if 'ept' in line or 'npt' in line: + capabilities.append('tlbps') + + #Find out if IOMMU is enabled + command = 'dmesg |grep -e Intel-IOMMU' + (_, stdout, stderr) = ssh_conn.exec_command(command) + error = stderr.read() + if len(error)>0: + raise paramiko.ssh_exception.SSHException(command +' : '+ error) + if 'enabled' in stdout.read(): + capabilities.append('iommu') + + #Equivalent for AMD + command = 'dmesg |grep -e AMD-Vi' + (_, stdout, stderr) = ssh_conn.exec_command(command) + error = stderr.read() + if len(error)>0: + raise paramiko.ssh_exception.SSHException(command +' : '+ error) + if len(stdout.read()) > 0: + capabilities.append('iommu') + + #----------------------------------------------------------- + topology = dict() + #In case hyperthreading is active it is necessary to determine cpu topology using /proc/cpuinfo + if 'ht' in capabilities: + (return_status, code) = get_cpu_topology_ht(ssh_conn, topology) + if not return_status: + return (return_status, code) + warning_text += code + + #Otherwise it is possible to do it using virsh capabilities + else: + for target in tree.findall("host/topology/cells/cell"): + socket_id = int(target.get("id")) + topology[socket_id] = list() + for cpu in target.findall("cpus/cpu"): + topology[socket_id].append(int(cpu.get("id"))) + + #----------------------------------------------------------- + #Create a dictionary with the information of all processors + #p_fam = p_man = p_ver = None + tree=ElementTree.fromstring(vish_conn.getSysinfo(0)) + #print vish_conn.getSysinfo(0) + #return (False, 'forces error for debuging') + not_populated=False + socket_id = -1 #in case we can not determine the socket_id we assume incremental order, starting by 0 + for target in tree.findall("processor"): + count = 0 + socket_id += 1 + #Get processor id, family, manufacturer and version + for entry in target.findall("entry"): + if entry.get("name") == "status": + if entry.text[0:11] == "Unpopulated": + not_populated=True + elif entry.get("name") == 'socket_destination': + socket_text = entry.text + if socket_text.startswith('CPU'): + socket_text = socket_text.strip('CPU') + socket_text = socket_text.strip() #removes trailing spaces + if socket_text.isdigit() and int(socket_text)<9 and int(socket_text)>0: + socket_id = int(socket_text) - 1 + + elif entry.get("name") == 'family': + family = entry.text + count += 1 + elif entry.get("name") == 'manufacturer': + manufacturer = entry.text + count += 1 + elif entry.get("name") == 'version': + version = entry.text.strip() + count += 1 + if count != 3: + return (False, 'Error. Not all expected fields could be found in processor') + + #Create and fill processor structure + if not_populated: + continue #avoid inconsistence of some machines where more socket detected than + processor = ProcessorNode() + (return_status, code) = processor.set(socket_id, family, manufacturer, version, capabilities, topology[socket_id]) + if not return_status: + return (return_status, code) + if code not in warning_text: + warning_text += code + + #Add processor to the processors dictionary + processors[socket_id] = processor + + return (True, warning_text) + +def get_nic_information(ssh_conn, virsh_conn, nic_topology): + warning_text="" + #Get list of net devices + net_devices = virsh_conn.listDevices('net',0) + print virsh_conn.listDevices('net',0) + for device in net_devices: + try: + #Get the XML descriptor of the device: + net_XML = ElementTree.fromstring(virsh_conn.nodeDeviceLookupByName(device).XMLDesc(0)) + #print "net_XML:" , net_XML + #obtain the parent + parent = net_XML.find('parent') + if parent == None: + print 'No parent was found in XML for device '+device + #Error. continue?------------------------------------------------------------- + continue + if parent.text == 'computer': + continue + if not parent.text.startswith('pci_'): + print device + ' parent is neither computer nor pci' + #Error. continue?------------------------------------------------------------- + continue + interface = net_XML.find('capability/interface').text + mac = net_XML.find('capability/address').text + + #Get the pci XML + pci_XML = ElementTree.fromstring(virsh_conn.nodeDeviceLookupByName(parent.text).XMLDesc(0)) + #print pci_XML + #Get pci + name = pci_XML.find('name').text.split('_') + pci = name[1]+':'+name[2]+':'+name[3]+'.'+name[4] + + #If slot == 0 it is a PF, otherwise it is a VF + capability = pci_XML.find('capability') + if capability.get('type') != 'pci': + print device + 'Capability is not of type pci in '+parent.text + #Error. continue?------------------------------------------------------------- + continue + slot = capability.find('slot').text + bus = capability.find('bus').text + node_id = None + numa_ = capability.find('numa') + if numa_ != None: + node_id = numa_.get('node'); + if node_id != None: node_id =int(node_id) + if slot == None or bus == None: + print device + 'Bus and slot not detected in '+parent.text + #Error. continue?------------------------------------------------------------- + continue + if slot != '0': + # print ElementTree.tostring(pci_XML) + virtual = True + capability_pf = capability.find('capability') + if capability_pf.get('type') != 'phys_function': + print 'physical_function not found in VF '+parent.text + #Error. continue?------------------------------------------------------------- + continue + PF_pci = capability_pf.find('address').attrib + PF_pci_text = PF_pci['domain'].split('x')[1]+':'+PF_pci['bus'].split('x')[1]+':'+PF_pci['slot'].split('x')[1]+'.'+PF_pci['function'].split('x')[1] + + else: + virtual = False + + #Obtain node for the port + if node_id == None: + node_id = int(bus)>>6 + #print "node_id:", node_id + + #Only for non virtual interfaces: Obtain speed and if link is detected (this must be done using ethtool) + if not virtual: + command = 'sudo ethtool '+interface+' | grep -e Speed -e "Link detected"' + (_, stdout, stderr) = ssh_conn.exec_command(command) + error = stderr.read() + if len(error) >0: + print 'Error running '+command+'\n'+error + #Error. continue?------------------------------------------------------------- + continue + for line in stdout.readlines(): + line = line.strip().rstrip('\n').split(': ') + if line[0] == 'Speed': + if line[1].endswith('Mb/s'): + speed = int(line[1].split('M')[0])*int(1e6) + elif line[1].endswith('Gb/s'): + speed = int(line[1].split('G')[0])*int(1e9) + elif line[1].endswith('Kb/s'): + speed = int(line[1].split('K')[0])*int(1e3) + else: + #the interface is listed but won't be used + speed = 0 + elif line[0] == 'Link detected': + if line[1] == 'yes': + enabled = True + else: + enabled = False + else: + print 'Unnexpected output of command '+command+':' + print line + #Error. continue?------------------------------------------------------------- + continue + + if not node_id in nic_topology: + nic_topology[node_id] = list() + #With this implementation we make the RAD with only one nic per node and this nic has all ports, TODO: change this by including parent information of PF + nic_topology[node_id].append(Nic()) + + #Load the appropriate nic + nic = nic_topology[node_id][0] + + #Create a new port and fill it + port = Port() + port.name = interface + port.virtual = virtual + port.speed = speed + if virtual: + port.available_bw = 0 + port.PF_pci_device_id = PF_pci_text + else: + port.available_bw = speed + if speed == 0: + port.enabled = False + else: + port.enabled = enabled + + port.eligible = virtual #Only virtual ports are eligible + port.mac = mac + port.pci_device_id = pci + port.pci_device_id_split = name[1:] + + #Save the port information + nic.add_port(port) + except Exception,e: + print 'Error: '+str(e) + + #set in vitual ports if they are enabled + for nic in nic_topology.itervalues(): + for port in nic[0].ports.itervalues(): +# print port.pci_device_id + if port.virtual: + enabled = nic[0].ports.get(port.PF_pci_device_id) + if enabled == None: + return(False, 'The PF '+port.PF_pci_device_id+' (VF '+port.pci_device_id+') is not present in ports dict') + #Only if the PF is enabled the VF can be enabled + if nic[0].ports[port.PF_pci_device_id].enabled: + port.enabled = True + else: + port.enabled = False + + return (True, warning_text) + +def get_nic_information_old(ssh_conn, nic_topology): + command = 'lstopo-no-graphics --of xml' + (_, stdout, stderr) = ssh_conn.exec_command(command) + error = stderr.read() + if len(error)>0: + raise paramiko.ssh_exception.SSHException(command +' : '+ error) + tree=ElementTree.fromstring(stdout.read()) + for target in tree.findall("object/object"): + #Find numa nodes + if target.get("type") != "NUMANode": + continue + node_id = int(target.get("os_index")) + nic_topology[node_id] = list() + + #find nics in numa node + for entry in target.findall("object/object"): + if entry.get("type") != 'Bridge': + continue + nic_name = entry.get("name") + model = None + nic = Nic() + + #find ports in nic + for pcidev in entry.findall("object"): + if pcidev.get("type") != 'PCIDev': + continue + enabled = speed = mac = pci_busid = None + port = Port() + model = pcidev.get("name") + virtual = False + if 'Virtual' in model: + virtual = True + pci_busid = pcidev.get("pci_busid") + for osdev in pcidev.findall("object"): + name = osdev.get("name") + for info in osdev.findall("info"): + if info.get("name") != 'Address': + continue + mac = info.get("value") + #get the port speed and status + command = 'sudo ethtool '+name + (_, stdout, stderr) = ssh_conn.exec_command(command) + error = stderr.read() + if len(error)>0: + return (False, 'Error obtaining '+name+' information: '+error) + ethtool = stdout.read() + if '10000baseT/Full' in ethtool: + speed = 10e9 + elif '1000baseT/Full' in ethtool: + speed = 1e9 + elif '100baseT/Full' in ethtool: + speed = 100e6 + elif '10baseT/Full' in ethtool: + speed = 10e6 + else: + return (False, 'Speed not detected in '+name) + + enabled = False + if 'Link detected: yes' in ethtool: + enabled = True + + if speed != None and mac != None and pci_busid != None: + mac = mac.split(':') + pci_busid_split = re.split(':|\.', pci_busid) + #Fill the port information + port.set(name, virtual, enabled, speed, mac, pci_busid, pci_busid_split) + nic.add_port(port) + + if len(nic.ports) > 0: + #Fill the nic model + if model != None: + nic.set_model(model) + else: + nic.set_model(nic_name) + + #Add it to the topology + nic_topology[node_id].append(nic) + + return (True, "") + +def get_os_information(ssh_conn, os): + warning_text="" +# command = 'lsb_release -a' +# (stdin, stdout, stderr) = ssh_conn.exec_command(command) +# cont = 0 +# for line in stdout.readlines(): +# line_split = re.split('\t| *', line.rstrip('\n')) +# if line_split[0] == 'Distributor' and line_split[1] == 'ID:': +# distributor = line_split[2] +# cont += 1 +# elif line_split[0] == 'Release:': +# release = line_split[1] +# cont += 1 +# elif line_split[0] == 'Codename:': +# codename = line_split[1] +# cont += 1 +# if cont != 3: +# return (False, 'It was not possible to obtain the OS id') +# id_ = distributor+'-'+release+'-'+codename + + + command = 'cat /etc/redhat-release' + (_, stdout, _) = ssh_conn.exec_command(command) + id_text= stdout.read() + if len(id_text)==0: + #try with Ubuntu + command = 'lsb_release -d -s' + (_, stdout, _) = ssh_conn.exec_command(command) + id_text= stdout.read() + if len(id_text)==0: + raise paramiko.ssh_exception.SSHException("Can not determinte release neither with 'lsb_release' nor with 'cat /etc/redhat-release'") + id_ = id_text.rstrip('\n') + + command = 'uname -o' + (_, stdout, stderr) = ssh_conn.exec_command(command) + error = stderr.read() + if len(error)>0: + raise paramiko.ssh_exception.SSHException(command +' : '+ error) + type_ = stdout.read().rstrip('\n') + + command = 'uname -i' + (_, stdout, stderr) = ssh_conn.exec_command(command) + error = stderr.read() + if len(error)>0: + raise paramiko.ssh_exception.SSHException(command +' : '+ error) + bit_architecture = stdout.read().rstrip('\n') + + (return_status, code) = os.set(id_, type_, bit_architecture) + if not return_status: + return (return_status, code) + warning_text += code + return (True, warning_text) + +def get_hypervisor_information(virsh_conn, hypervisor): + type_ = virsh_conn.getType().rstrip('\n') + version = virsh_conn.getVersion() + lib_version = virsh_conn.getLibVersion() + + domains = list() + tree=ElementTree.fromstring(virsh_conn.getCapabilities()) + for target in tree.findall("guest"): + os_type = target.find("os_type").text + #We only allow full virtualization + if os_type != 'hvm': + continue + wordsize = int(target.find('arch/wordsize').text) + if wordsize == 64: + for domain in target.findall("arch/domain"): + domains.append(domain.get("type")) + + (return_status, code) = hypervisor.set(type_, version, lib_version, domains) + if not return_status: + return (return_status, code) + return (True, code) + +class RADavailableResourcesClass(RADclass): + def __init__(self, resources): + """Copy resources from the RADclass (server resources not taking into account resources used by VMs""" + #New + self.reserved = dict() #Dictionary of reserved resources for a server. Key are VNFC names and values RADreservedResources + self.cores_consumption = None #Dictionary of cpu consumption. Key is the cpu and the value is + + self.machine = resources.machine + self.user = resources.user + self.password = resources.password + self.name = resources.name + self.nr_processors = resources.nr_processors + self.processor_family = resources.processor_family + self.processor_manufacturer = resources.processor_manufacturer + self.processor_version = resources.processor_version + self.processor_features = resources.processor_features + self.memory_type = resources.memory_type + self.memory_freq = resources.memory_freq + self.memory_nr_channels = resources.memory_nr_channels + self.memory_size = resources.memory_size + self.memory_hugepage_sz = resources.memory_hugepage_sz + self.hypervisor = Hypervisor() + self.hypervisor.assign(resources.hypervisor) + self.os = OpSys() + self.os.assign(resources.os) + self.nodes = dict() + for node_k, node_v in resources.nodes.iteritems(): + self.nodes[node_k] = Node() + self.nodes[node_k].assign(node_v) + return + + def _get_cores_consumption_warnings(self): + """Returns list of warning strings in case warnings are generated. + In case no warnings are generated the return value will be an empty list""" + warnings = list() + #Get the cores consumption + (return_status, code) = get_ssh_connection(self.machine, self.user, self.password) + if not return_status: + return (return_status, code) + ssh_conn = code + command = 'mpstat -P ALL 1 1 | grep Average | egrep -v CPU\|all' + (_, stdout, stderr) = ssh_conn.exec_command(command) + error = stderr.read() + if len(error) > 0: + return (False, error) + + self.cores_consumption = dict() + for line in stdout.readlines(): + cpu_usage_split = re.split('\t| *', line.rstrip('\n')) + usage = 100 *(1 - float(cpu_usage_split[10])) + if usage > 0: + self.cores_consumption[int(cpu_usage_split[1])] = usage + ssh_conn.close() + #Check if any core marked as available in the nodes has cpu_usage > 0 + for _, node_v in self.nodes.iteritems(): + cores = node_v.processor.eligible_cores + for cpu in cores: + if len(cpu) > 1: + for core in cpu: + if core in self.cores_consumption: + warnings.append('Warning: Core '+str(core)+' is supposed to be idle but it is consuming '+str(self.cores_consumption[core])+'%') + else: + if cpu in self.cores_consumption: + warnings.append('Warning: Core '+str(core)+' is supposed to be idle but it is consuming '+str(self.cores_consumption[cpu])+'%') + + return warnings + + def reserved_to_text(self): + text = str() + for VNFC_name, VNFC_reserved in self.reserved.iteritems(): + text += ' VNFC: '+str(VNFC_name)+'\n' + text += VNFC_reserved.to_text() + + return text + + def obtain_usage(self): + resp = dict() + #Iterate through nodes to get cores, eligible cores, memory and physical ports (save ports usage for next section) + nodes = dict() + ports_usage = dict() + hugepage_size = dict() + for node_k, node_v in self.nodes.iteritems(): + node = dict() + ports_usage[node_k] = dict() + eligible_cores = list() + for pair in node_v.processor.eligible_cores: + if isinstance(pair, list): + for element in pair: + eligible_cores.append(element) + else: + eligible_cores.append(pair) + node['cpus'] = {'cores':node_v.processor.cores,'eligible_cores':eligible_cores} + node['memory'] = {'size':str(node_v.memory.node_size/(1024*1024*1024))+'GB','eligible':str(node_v.memory.eligible_memory/(1024*1024*1024))+'GB'} + hugepage_size[node_k] = node_v.memory.hugepage_sz + + ports = dict() + for nic in node_v.nic_list: + for port in nic.ports.itervalues(): + if port.enabled and not port.virtual: + ports[port.name] = {'speed':str(port.speed/1000000000)+'G'} +# print '*************** ',port.name,'speed',port.speed + ports_usage[node_k][port.name] = 100 - int(100*float(port.available_bw)/float(port.speed)) + node['ports'] = ports + nodes[node_k] = node + resp['RAD'] = nodes + + #Iterate through reserved section to get used cores, used memory and port usage + cores = dict() + memory = dict() + #reserved_cores = list + for node_k in self.nodes.iterkeys(): + if not node_k in cores: + cores[node_k] = list() + memory[node_k] = 0 + for _, reserved in self.reserved.iteritems(): + if node_k in reserved.node_reserved_resources: + node_v = reserved.node_reserved_resources[node_k] + cores[node_k].extend(node_v.reserved_cores) + memory[node_k] += node_v.reserved_hugepage_nr * hugepage_size[node_k] + + occupation = dict() + for node_k in self.nodes.iterkeys(): + ports = dict() + for name, usage in ports_usage[node_k].iteritems(): + ports[name] = {'occupied':str(usage)+'%'} +# print '****************cores',cores +# print '****************memory',memory + occupation[node_k] = {'cores':cores[node_k],'memory':str(memory[node_k]/(1024*1024*1024))+'GB','ports':ports} + resp['occupation'] = occupation + + return resp + +class RADreservedResources(): + def __init__(self): + self.node_reserved_resources = dict() #dict. keys are the RAD nodes id, values are NodeReservedResources + self.mgmt_interface_pci = None #pci in the VNF for the management interface + self.image = None #Path in remote machine of the VNFC image + + def update(self,reserved): + self.image = reserved.image + self.mgmt_interface_pci = reserved.mgmt_interface_pci + for k,v in reserved.node_reserved_resources.iteritems(): + if k in self.node_reserved_resources.keys(): + return (False, 'Duplicated node entry '+str(k)+' in reserved resources') + self.node_reserved_resources[k]=v + + return (True, "") + + def to_text(self): + text = ' image: '+str(self.image)+'\n' + for node_id, node_reserved in self.node_reserved_resources.iteritems(): + text += ' Node ID: '+str(node_id)+'\n' + text += node_reserved.to_text() + return text + +class NodeReservedResources(): + def __init__(self): + # reserved_shared_cores = None #list. List of all cores that the VNFC needs in shared mode #TODO Not used + # reserved_memory = None #Integer. Amount of KiB needed by the VNFC #TODO. Not used since hugepages are used + self.reserved_cores = list() #list. List of all cores that the VNFC uses + self.reserved_hugepage_nr = 0 #Integer. Number of hugepages needed by the VNFC + self.reserved_ports = dict() #dict. The key is the physical port pci and the value the VNFC port description + self.vlan_tags = dict() + self.cpu_pinning = None + + def to_text(self): + text = ' cores: '+str(self.reserved_cores)+'\n' + text += ' cpu_pinning: '+str(self.cpu_pinning)+'\n' + text += ' hugepages_nr: '+str(self.reserved_hugepage_nr)+'\n' + for port_pci, port_description in self.reserved_ports.iteritems(): + text += ' port: '+str(port_pci)+'\n' + text += port_description.to_text() + return text + +# def update(self,reserved): +# self.reserved_cores = list(reserved.reserved_cores) +# self.reserved_hugepage_nr = reserved.reserved_hugepage_nr +# self.reserved_ports = dict(reserved.reserved_ports) +# self.cpu_pinning = list(reserved.cpu_pinning) + + + diff --git a/utils/auxiliary_functions.py b/utils/auxiliary_functions.py new file mode 100644 index 0000000..44e7fcb --- /dev/null +++ b/utils/auxiliary_functions.py @@ -0,0 +1,227 @@ +# -*- coding: utf-8 -*- + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' +Common usuful functions +''' + +__author__="Alfonso Tierno, Pablo Montes" +__date__ ="$10-jul-2014 12:07:15$" + + +import yaml +import paramiko +from definitionsClass import definitionsClass +from definitionsClass import Units +import random +from jsonschema import validate as js_v, exceptions as js_e + +def check_and_convert_units(value, value_type): + """TODO: Update description + This method receives a text with 2 fields using a blank as separator and a list of valid units. The first field must represent a number + and the second one units. + In case the second field is not one of valid_units (False, ) is returned. + In case the second field is a valid unit the first number is converted in the following way: + Gbps, Mbps, kbps -> Mbps + GB,MB,KB,B,GiB,MiB,KiB -> B + GHz,MHz,KHz,Hz -> Hz + If conversion is done successfully (True, ) is returned""" + try: + if value_type == Units.no_units: + if not isinstance(value,int) and not isinstance(value,float): + return (False, 'When no units are used only an integer or float must be used') + elif value_type == Units.name: + if not isinstance(value,str): + return (False, 'For names str must be used') + elif value_type == Units.boolean: + if not isinstance(value,bool): + return (False, 'A boolean or Yes/No mut be used') + else: + splitted = value.split(' ') + if len(splitted) != 2: + return (False, 'Expected format: ') + (value, units) = splitted + if ',' in value or '.' in value: + return (False, 'Use integers to represent numeric values') + + value = int(value) + +# if not isinstance(value_type, Units): +# return (False, 'Not valid value_type') + + valid_units = definitionsClass.units[value_type] + + #Convert everything to upper in order to make comparations easier + units = units.upper() + for i in range(0, len(valid_units)): + valid_units[i] = valid_units[i].upper() + + #Check the used units are valid ones + if units not in valid_units: + return (False, 'Valid units are: '+', '.join(valid_units)) + + if units.startswith('GI'): + value = value *1024*1024*1024 + elif units.startswith('MI'): + value = value *1024*1024 + elif units.startswith('KI'): + value = value *1024 + elif units.startswith('G'): + value = value *1000000000 + elif units.startswith('M'): + value = value *1000000 + elif units.startswith('K'): + value = value *1000 + except Exception,e: + return (False, 'Unexpected error in auxiliary_functions.py - check_and_convert_units:\n'+str(e)) + + return (True, value) + +def get_ssh_connection(machine, user=None, password=None): + """Stablishes an ssh connection to the remote server. Returns (True, paramiko_ssh) in case of success or (False, ) in case of error""" + try: + s = paramiko.SSHClient() + s.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + s.load_system_host_keys() + s.connect(machine, 22, user, password, timeout=10) + except Exception,e: + return (False, 'It was not possible to connect to '+machine+str(e)) + + return (True, s) + +def run_in_remote_server(s,command): + """Runs in the remote server the specified command. Returns (True, stdout) in case of success or (False, ) in case of error""" + try: + (_, stdout, stderr) = s.exec_command(command) + error_msg = stderr.read() + if len(error_msg) > 0: + return (False, error_msg) + except Exception,e: + return (False, str(e)) + + return (True, stdout) + +def read_file(file_): + """Reads a file specified by 'file' and returns (True,) in case of success or (False, ) in case of failure""" + try: + f = open(file_, 'r') + read_data = f.read() + f.close() + except Exception,e: + return (False, str(e)) + + return (True, read_data) + +def check_contains(element, keywords): + """Auxiliary function used to check if a yaml structure contains or not + an specific field. Returns a bool""" + for key in keywords: + if not key in element: + return False + return True + +def check_contains_(element, keywords): + """Auxiliary function used to check if a yaml structure contains or not + an specific field. Returns a bool,missing_variables""" + for key in keywords: + if not key in element: + return False, key + return True, None + +def write_file(file_, content): + """Generates a file specified by 'file' and fills it using 'content'""" + f = open(file_, 'w') + f.write(content) + f.close() + +def nice_print(yaml_element): + """Print a yaml structure. Used mainly for debugging""" + print(yaml.dump(yaml_element, default_flow_style=False)) + +def new_random_mac(): + mac = (0xE2, random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff) ) + return ':'.join(map(lambda x: "%02X" % x, mac)) + +def parse_dict(var, template): + if type(var) is not dict: return -1, 'not a dictionary' + for _,tv in template.items(): + if type(tv) is list: + return + +def delete_nulls(var): + if type(var) is dict: + for k in var.keys(): + if var[k] is None: del var[k] + elif type(var[k]) is dict or type(var[k]) is list or type(var[k]) is tuple: + if delete_nulls(var[k]): del var[k] + if len(var) == 0: return True + elif type(var) is list or type(var) is tuple: + for k in var: + if type(k) is dict: delete_nulls(k) + if len(var) == 0: return True + return False + +def get_next_2pow(var): + if var==0: return 0 + v=1 + while vhighest_version_int: + highest_version_int, highest_version = row[0:2] + return highest_version_int, highest_version + except (mdb.Error, AttributeError) as e: + self.logger.error("get_db_version DB Exception %d: %s. Command %s",e.args[0], e.args[1], cmd) + r,c = self.format_error(e) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def disconnect(self): + '''disconnect from the data base''' + try: + self.con.close() + del self.con + except mdb.Error as e: + self.logger.error("while disconnecting from DB: Error %d: %s",e.args[0], e.args[1]) + return -1 + except AttributeError as e: #self.con not defined + if e[0][-5:] == "'con'": return -1, "Database internal error, no connection." + else: raise + + def format_error(self, e, func, cmd, command=None, extra=None): + '''Creates a text error base on the produced exception + Params: + e: mdb exception + func: name of the function that makes the call, for logging purposes + cmd: database command that produce the exception + command: if the intention is update or delete + extra: extra information to add to some commands + Return + HTTP error in negative, formatted error text + ''' + + self.logger.error("%s DB Exception %s. Command %s",func, str(e), cmd) + if type(e[0]) is str: + if e[0][-5:] == "'con'": return -HTTP_Internal_Server_Error, "DB Exception, no connection." + else: raise + if e.args[0]==2006 or e.args[0]==2013 : #MySQL server has gone away (((or))) Exception 2013: Lost connection to MySQL server during query + #reconnect + self.connect() + return -HTTP_Request_Timeout,"Database reconnection. Try Again" + fk=e.args[1].find("foreign key constraint fails") + if fk>=0: + if command=="update": return -HTTP_Bad_Request, "tenant_id %s not found." % extra + elif command=="delete": return -HTTP_Bad_Request, "Resource is not free. There are %s that prevent its deletion." % extra + de = e.args[1].find("Duplicate entry") + fk = e.args[1].find("for key") + uk = e.args[1].find("Unknown column") + wc = e.args[1].find("in 'where clause'") + fl = e.args[1].find("in 'field list'") + #print de, fk, uk, wc,fl + if de>=0: + if fk>=0: #error 1062 + return -HTTP_Conflict, "Value %s already in use for %s" % (e.args[1][de+15:fk], e.args[1][fk+7:]) + if uk>=0: + if wc>=0: + return -HTTP_Bad_Request, "Field %s can not be used for filtering" % e.args[1][uk+14:wc] + if fl>=0: + return -HTTP_Bad_Request, "Field %s does not exist" % e.args[1][uk+14:wc] + return -HTTP_Internal_Server_Error, "Database internal Error %d: %s" % (e.args[0], e.args[1]) + + def __data2db_format(self, data): + '''convert data to database format. If data is None it return the 'Null' text, + otherwise it return the text surrounded by quotes ensuring internal quotes are escaped''' + if data==None: + return 'Null' + out=str(data) + if "'" not in out: + return "'" + out + "'" + elif '"' not in out: + return '"' + out + '"' + else: + return json.dumps(out) + + def __get_used_net_vlan(self): + #get used from database if needed + try: + cmd = "SELECT vlan FROM nets WHERE vlan>='%s' and (type='ptp' or type='data') ORDER BY vlan LIMIT 25" % self.net_vlan_lastused + with self.con: + self.cur = self.con.cursor() + self.logger.debug(cmd) + self.cur.execute(cmd) + vlan_tuple = self.cur.fetchall() + #convert a tuple of tuples in a list of numbers + self.net_vlan_usedlist = [] + for k in vlan_tuple: + self.net_vlan_usedlist.append(k[0]) + return 0 + except (mdb.Error, AttributeError) as e: + return self.format_error(e, "get_free_net_vlan", cmd) + + def get_free_net_vlan(self): + '''obtain a vlan not used in any net''' + + while True: + self.logger.debug("net_vlan_lastused:%d net_vlan_range:%d-%d net_vlan_usedlist:%s", + self.net_vlan_lastused, self.net_vlan_range[0], self.net_vlan_range[1], str(self.net_vlan_usedlist)) + self.net_vlan_lastused += 1 + if self.net_vlan_lastused == self.net_vlan_range[1]: + #start from the begining + self.net_vlan_lastused = self.net_vlan_range[0] + self.net_vlan_usedlist = None + if self.net_vlan_usedlist is None \ + or (len(self.net_vlan_usedlist)>0 and self.net_vlan_lastused >= self.net_vlan_usedlist[-1] and len(self.net_vlan_usedlist)==25): + r = self.__get_used_net_vlan() + if r<0: return r + self.logger.debug("new net_vlan_usedlist %s", str(self.net_vlan_usedlist)) + if self.net_vlan_lastused in self.net_vlan_usedlist: + continue + else: + return self.net_vlan_lastused + + def get_table(self, **sql_dict): + ''' Obtain rows from a table. + Atribure sql_dir: dictionary with the following key: value + 'SELECT': [list of fields to retrieve] (by default all) + 'FROM': string of table name (Mandatory) + 'WHERE': dict of key:values, translated to key=value AND ... (Optional) + 'WHERE_NOT': dict of key:values, translated to key!=value AND ... (Optional) + 'WHERE_OR': dict of key:values, translated to key=value OR ... (Optional) + 'LIMIT': limit of number of rows (Optional) + Return: a list with dictionarys at each row + ''' + #print sql_dict + select_= "SELECT " + ("*" if 'SELECT' not in sql_dict else ",".join(map(str,sql_dict['SELECT'])) ) + #print 'select_', select_ + from_ = "FROM " + str(sql_dict['FROM']) + #print 'from_', from_ + + where_and = None + where_or = None + if 'WHERE' in sql_dict and len(sql_dict['WHERE']) > 0: + w=sql_dict['WHERE'] + where_and = " AND ".join(map( lambda x: str(x) + (" is Null" if w[x] is None else "='"+str(w[x])+"'"), w.keys()) ) + if 'WHERE_NOT' in sql_dict and len(sql_dict['WHERE_NOT']) > 0: + w=sql_dict['WHERE_NOT'] + where_and_not = " AND ".join(map( lambda x: str(x) + (" is not Null" if w[x] is None else "!='"+str(w[x])+"'"), w.keys()) ) + if where_and: + where_and += " AND " + where_and_not + else: + where_and = where_and_not + if 'WHERE_OR' in sql_dict and len(sql_dict['WHERE_OR']) > 0: + w=sql_dict['WHERE_OR'] + where_or = " OR ".join(map( lambda x: str(x) + (" is Null" if w[x] is None else "='"+str(w[x])+"'"), w.keys()) ) + + if where_and!=None and where_or!=None: + where_ = "WHERE (" + where_and + ") OR " + where_or + elif where_and!=None and where_or==None: + where_ = "WHERE " + where_and + elif where_and==None and where_or!=None: + where_ = "WHERE " + where_or + else: + where_ = "" + #print 'where_', where_ + limit_ = "LIMIT " + str(sql_dict['LIMIT']) if 'LIMIT' in sql_dict else "" + #print 'limit_', limit_ + cmd = " ".join( (select_, from_, where_, limit_) ) + for retry_ in range(0,2): + try: + with self.con: + self.cur = self.con.cursor(mdb.cursors.DictCursor) + self.logger.debug(cmd) + self.cur.execute(cmd) + rows = self.cur.fetchall() + return self.cur.rowcount, rows + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "get_table", cmd) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def new_tenant(self, tenant_dict): + ''' Add one row into a table. + Attribure + tenant_dict: dictionary with the key: value to insert + It checks presence of uuid and add one automatically otherwise + Return: (result, uuid) where result can be 0 if error, or 1 if ok + ''' + for retry_ in range(0,2): + cmd="" + inserted=-1 + try: + #create uuid if not provided + if 'uuid' not in tenant_dict: + uuid = tenant_dict['uuid'] = str(myUuid.uuid1()) # create_uuid + else: + uuid = str(tenant_dict['uuid']) + #obtain tenant_id for logs + tenant_id = uuid + with self.con: + self.cur = self.con.cursor() + #inserting new uuid + cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','tenants')" % uuid + self.logger.debug(cmd) + self.cur.execute(cmd) + #insert tenant + cmd= "INSERT INTO tenants (" + \ + ",".join(map(str, tenant_dict.keys() )) + ") VALUES(" + \ + ",".join(map(lambda x: "Null" if x is None else "'"+str(x)+"'",tenant_dict.values() )) + ")" + self.logger.debug(cmd) + self.cur.execute(cmd) + inserted = self.cur.rowcount + ##inserting new log + #del tenant_dict['uuid'] # not interested for the log + #cmd = "INSERT INTO logs (related,level,tenant_id,uuid,description) VALUES ('tenants','debug','%s','%s',\"new tenant %s\")" % (uuid, tenant_id, str(tenant_dict)) + #self.logger.debug(cmd) + #self.cur.execute(cmd) + #commit transaction + self.cur.close() + if inserted == 0: return 0, uuid + with self.con: + self.cur = self.con.cursor() + #adding public flavors + cmd = "INSERT INTO tenants_flavors(flavor_id,tenant_id) SELECT uuid as flavor_id,'"+ tenant_id + "' FROM flavors WHERE public = 'yes'" + self.logger.debug(cmd) + self.cur.execute(cmd) + self.logger.debug("attached public flavors: %s", str(self.cur.rowcount)) + #rows = self.cur.fetchall() + #for row in rows: + # cmd = "INSERT INTO tenants_flavors(flavor_id,tenant_id) VALUES('%s','%s')" % (row[0], tenant_id) + # self.cur.execute(cmd ) + #adding public images + cmd = "INSERT INTO tenants_images(image_id,tenant_id) SELECT uuid as image_id,'"+ tenant_id + "' FROM images WHERE public = 'yes'" + self.logger.debug(cmd) + self.cur.execute(cmd) + self.logger.debug("attached public images: %s", str(self.cur.rowcount)) + return 1, uuid + except (mdb.Error, AttributeError) as e: + if inserted==1: + self.logger.warning("new_tenant DB Exception %d: %s. Command %s",e.args[0], e.args[1], cmd) + return 1, uuid + else: + r,c = self.format_error(e, "new_tenant", cmd) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def new_row(self, table, INSERT, add_uuid=False, log=False): + ''' Add one row into a table. + Atribure + INSERT: dictionary with the key: value to insert + table: table where to insert + add_uuid: if True, it will crated an uuid key entry at INSERT if not provided + It checks presence of uuid and add one automatically otherwise + Return: (result, uuid) where result can be 0 if error, or 1 if ok + ''' + for retry_ in range(0,2): + cmd="" + try: + if add_uuid: + #create uuid if not provided + if 'uuid' not in INSERT: + uuid = INSERT['uuid'] = str(myUuid.uuid1()) # create_uuid + else: + uuid = str(INSERT['uuid']) + else: + uuid=None + with self.con: + self.cur = self.con.cursor() + if add_uuid: + #inserting new uuid + cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','%s')" % (uuid, table) + self.logger.debug(cmd) + self.cur.execute(cmd) + #insertion + cmd= "INSERT INTO " + table +" (" + \ + ",".join(map(str, INSERT.keys() )) + ") VALUES(" + \ + ",".join(map(lambda x: 'Null' if x is None else "'"+str(x)+"'", INSERT.values() )) + ")" + self.logger.debug(cmd) + self.cur.execute(cmd) + nb_rows = self.cur.rowcount + #inserting new log + #if nb_rows > 0 and log: + # if add_uuid: del INSERT['uuid'] + # #obtain tenant_id for logs + # if 'tenant_id' in INSERT: + # tenant_id = INSERT['tenant_id'] + # del INSERT['tenant_id'] + # elif table == 'tenants': + # tenant_id = uuid + # else: + # tenant_id = None + # if uuid is None: uuid_k = uuid_v = "" + # else: uuid_k=",uuid"; uuid_v=",'" + str(uuid) + "'" + # if tenant_id is None: tenant_k = tenant_v = "" + # else: tenant_k=",tenant_id"; tenant_v=",'" + str(tenant_id) + "'" + # cmd = "INSERT INTO logs (related,level%s%s,description) VALUES ('%s','debug'%s%s,\"new %s %s\")" \ + # % (uuid_k, tenant_k, table, uuid_v, tenant_v, table[:-1], str(INSERT)) + # self.logger.debug(cmd) + # self.cur.execute(cmd) + return nb_rows, uuid + + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "new_row", cmd) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def __remove_quotes(self, data): + '''remove single quotes ' of any string content of data dictionary''' + for k,v in data.items(): + if type(v) == str: + if "'" in v: + data[k] = data[k].replace("'","_") + + def _update_rows_internal(self, table, UPDATE, WHERE={}): + cmd= "UPDATE " + table +" SET " + \ + ",".join(map(lambda x: str(x)+'='+ self.__data2db_format(UPDATE[x]), UPDATE.keys() )); + if WHERE: + cmd += " WHERE " + " and ".join(map(lambda x: str(x)+ (' is Null' if WHERE[x] is None else"='"+str(WHERE[x])+"'" ), WHERE.keys() )) + self.logger.debug(cmd) + self.cur.execute(cmd) + nb_rows = self.cur.rowcount + return nb_rows, None + + def update_rows(self, table, UPDATE, WHERE={}, log=False): + ''' Update one or several rows into a table. + Atributes + UPDATE: dictionary with the key-new_value pairs to change + table: table to be modified + WHERE: dictionary to filter target rows, key-value + log: if true, a log entry is added at logs table + Return: (result, None) where result indicates the number of updated files + ''' + for retry_ in range(0,2): + cmd="" + try: + #gettting uuid + uuid = WHERE.get('uuid') + + with self.con: + self.cur = self.con.cursor() + cmd= "UPDATE " + table +" SET " + \ + ",".join(map(lambda x: str(x)+'='+ self.__data2db_format(UPDATE[x]), UPDATE.keys() )); + if WHERE: + cmd += " WHERE " + " and ".join(map(lambda x: str(x)+ (' is Null' if WHERE[x] is None else"='"+str(WHERE[x])+"'" ), WHERE.keys() )) + self.logger.debug(cmd) + self.cur.execute(cmd) + nb_rows = self.cur.rowcount + #if nb_rows > 0 and log: + # #inserting new log + # if uuid is None: uuid_k = uuid_v = "" + # else: uuid_k=",uuid"; uuid_v=",'" + str(uuid) + "'" + # cmd = "INSERT INTO logs (related,level%s,description) VALUES ('%s','debug'%s,\"updating %d entry %s\")" \ + # % (uuid_k, table, uuid_v, nb_rows, (str(UPDATE)).replace('"','-') ) + # self.logger.debug(cmd) + # self.cur.execute(cmd) + return nb_rows, uuid + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "update_rows", cmd) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def get_host(self, host_id): + if af.check_valid_uuid(host_id): + where_filter="uuid='" + host_id + "'" + else: + where_filter="name='" + host_id + "'" + for retry_ in range(0,2): + cmd="" + try: + with self.con: + self.cur = self.con.cursor(mdb.cursors.DictCursor) + #get HOST + cmd = "SELECT uuid, user, name, ip_name, description, ranking, admin_state_up, DATE_FORMAT(created_at,'%Y-%m-%dT%H:%i:%s') as created_at \ + FROM hosts WHERE " + where_filter + self.logger.debug(cmd) + self.cur.execute(cmd) + if self.cur.rowcount == 0 : + return 0, "host '" + str(host_id) +"'not found." + elif self.cur.rowcount > 1 : + return 0, "host '" + str(host_id) +"' matches more than one result." + host = self.cur.fetchone() + host_id = host['uuid'] + #get numa + cmd = "SELECT id, numa_socket, hugepages, memory, admin_state_up FROM numas WHERE host_id = '" + str(host_id) + "'" + self.logger.debug(cmd) + self.cur.execute(cmd) + host['numas'] = self.cur.fetchall() + for numa in host['numas']: + #print "SELECT core_id, instance_id, status, thread_id, v_thread_id FROM resources_core WHERE numa_id = '" + str(numa['id']) + "'" + #get cores + cmd = "SELECT core_id, instance_id, status, thread_id, v_thread_id FROM resources_core WHERE numa_id = '" + str(numa['id']) + "'" + self.logger.debug(cmd) + self.cur.execute(cmd) + numa['cores'] = self.cur.fetchall() + for core in numa['cores']: + if core['instance_id'] == None: del core['instance_id'], core['v_thread_id'] + if core['status'] == 'ok': del core['status'] + #get used memory + cmd = "SELECT sum(consumed) as hugepages_consumed FROM resources_mem WHERE numa_id = '" + str(numa['id']) + "' GROUP BY numa_id" + self.logger.debug(cmd) + self.cur.execute(cmd) + used = self.cur.fetchone() + used_= int(used['hugepages_consumed']) if used != None else 0 + numa['hugepages_consumed'] = used_ + #get ports + #cmd = "CALL GetPortsFromNuma(%s)'" % str(numa['id']) + #self.cur.callproc('GetPortsFromNuma', (numa['id'],) ) + #every time a Procedure is launched you need to close and open the cursor + #under Error 2014: Commands out of sync; you can't run this command now + #self.cur.close() + #self.cur = self.con.cursor(mdb.cursors.DictCursor) + cmd="SELECT Mbps, pci, status, Mbps_used, instance_id, if(id=root_id,'PF','VF') as type_,\ + switch_port, switch_dpid, mac, source_name\ + FROM resources_port WHERE numa_id=%d ORDER BY root_id, type_ DESC" % (numa['id']) + self.logger.debug(cmd) + self.cur.execute(cmd) + ifaces = self.cur.fetchall() + #The SQL query will ensure to have SRIOV interfaces from a port first + sriovs=[] + Mpbs_consumed = 0 + numa['interfaces'] = [] + for iface in ifaces: + if not iface["instance_id"]: + del iface["instance_id"] + if iface['status'] == 'ok': + del iface['status'] + Mpbs_consumed += int(iface["Mbps_used"]) + del iface["Mbps_used"] + if iface["type_"]=='PF': + if not iface["switch_dpid"]: + del iface["switch_dpid"] + if not iface["switch_port"]: + del iface["switch_port"] + if sriovs: + iface["sriovs"] = sriovs + if Mpbs_consumed: + iface["Mpbs_consumed"] = Mpbs_consumed + del iface["type_"] + numa['interfaces'].append(iface) + sriovs=[] + Mpbs_consumed = 0 + else: #VF, SRIOV + del iface["switch_port"] + del iface["switch_dpid"] + del iface["type_"] + del iface["Mbps"] + sriovs.append(iface) + + #delete internal field + del numa['id'] + return 1, host + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "get_host", cmd) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def new_uuid(self): + max_retries=10 + while max_retries>0: + uuid = str( myUuid.uuid1() ) + if self.check_uuid(uuid)[0] == 0: + return uuid + max_retries-=1 + return uuid + + def check_uuid(self, uuid): + '''check in the database if this uuid is already present''' + try: + cmd = "SELECT * FROM uuids where uuid='" + str(uuid) + "'" + with self.con: + self.cur = self.con.cursor(mdb.cursors.DictCursor) + self.logger.debug(cmd) + self.cur.execute(cmd) + rows = self.cur.fetchall() + return self.cur.rowcount, rows + except (mdb.Error, AttributeError) as e: + return self.format_error(e, "check_uuid", cmd) + + def __get_next_ids(self): + '''get next auto increment index of all table in the database''' + self.cur.execute("SELECT table_name,AUTO_INCREMENT FROM information_schema.tables WHERE AUTO_INCREMENT IS NOT NULL AND table_schema = DATABASE()") + rows = self.cur.fetchall() + return self.cur.rowcount, dict(rows) + + def edit_host(self, host_id, host_dict): + #get next port index + for retry_ in range(0,2): + cmd="" + try: + with self.con: + self.cur = self.con.cursor() + + #update table host + numa_list = host_dict.pop('numas', () ) + if host_dict: + self._update_rows_internal("hosts", host_dict, {"uuid": host_id}) + + where = {"host_id": host_id} + for numa_dict in numa_list: + where["numa_socket"] = str(numa_dict.pop('numa_socket')) + interface_list = numa_dict.pop('interfaces', () ) + if numa_dict: + self._update_rows_internal("numas", numa_dict, where) + for interface in interface_list: + source_name = str(interface.pop("source_name") ) + if interface: + #get interface id from resources_port + cmd= "SELECT rp.id as id FROM resources_port as rp join numas as n on n.id=rp.numa_id join hosts as h on h.uuid=n.host_id " +\ + "WHERE host_id='%s' and rp.source_name='%s'" %(host_id, source_name) + self.logger.debug(cmd) + self.cur.execute(cmd) + row = self.cur.fetchone() + if self.cur.rowcount<=0: + return -HTTP_Bad_Request, "Interface source_name='%s' from numa_socket='%s' not found" % (source_name, str(where["numa_socket"])) + interface_id = row[0] + self._update_rows_internal("resources_port", interface, {"root_id": interface_id}) + return self.get_host(host_id) + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "edit_host", cmd) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def new_host(self, host_dict): + #get next port index + for retry_ in range(0,2): + cmd="" + try: + with self.con: + self.cur = self.con.cursor() + + result, next_ids = self.__get_next_ids() + #print "next_ids: " + str(next_ids) + if result <= 0: return result, "Internal DataBase error getting next id of tables" + + #create uuid if not provided + if 'uuid' not in host_dict: + uuid = host_dict['uuid'] = str(myUuid.uuid1()) # create_uuid + else: #check uuid is valid + uuid = str(host_dict['uuid']) + # result, data = self.check_uuid(uuid) + # if (result == 1): + # return -1, "UUID '%s' already in use" % uuid + + #inserting new uuid + cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','hosts')" % uuid + self.logger.debug(cmd) + result = self.cur.execute(cmd) + + #insert in table host + numa_list = host_dict.pop('numas', []) + #get nonhupages and nonisolated cpus + host_dict['RAM']=0 + host_dict['cpus']=0 + for numa in numa_list: + mem_numa = numa.get('memory', 0) - numa.get('hugepages',0) + if mem_numa>0: + host_dict['RAM'] += mem_numa + for core in numa.get("cores", []): + if "status" in core and core["status"]=="noteligible": + host_dict['cpus']+=1 + host_dict['RAM']*=1024 # from GB to MB + + keys = ",".join(host_dict.keys()) + values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", host_dict.values() ) ) + cmd = "INSERT INTO hosts (" + keys + ") VALUES (" + values + ")" + self.logger.debug(cmd) + result = self.cur.execute(cmd) + #if result != 1: return -1, "Database Error while inserting at hosts table" + + #insert numas + nb_numas = nb_cores = nb_ifaces = 0 + for numa_dict in numa_list: + nb_numas += 1 + interface_list = numa_dict.pop('interfaces', []) + core_list = numa_dict.pop('cores', []) + numa_dict['id'] = next_ids['numas']; next_ids['numas'] += 1 + numa_dict['host_id'] = uuid + keys = ",".join(numa_dict.keys()) + values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", numa_dict.values() ) ) + cmd = "INSERT INTO numas (" + keys + ") VALUES (" + values + ")" + self.logger.debug(cmd) + result = self.cur.execute(cmd) + + #insert cores + for core_dict in core_list: + nb_cores += 1 + core_dict['numa_id'] = numa_dict['id'] + keys = ",".join(core_dict.keys()) + values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", core_dict.values() ) ) + cmd = "INSERT INTO resources_core (" + keys + ") VALUES (" + values + ")" + self.logger.debug(cmd) + result = self.cur.execute(cmd) + + #insert ports + for port_dict in interface_list: + nb_ifaces += 1 + sriov_list = port_dict.pop('sriovs', []) + port_dict['numa_id'] = numa_dict['id'] + port_dict['id'] = port_dict['root_id'] = next_ids['resources_port'] + next_ids['resources_port'] += 1 + switch_port = port_dict.get('switch_port', None) + switch_dpid = port_dict.get('switch_dpid', None) + keys = ",".join(port_dict.keys()) + values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", port_dict.values() ) ) + cmd = "INSERT INTO resources_port (" + keys + ") VALUES (" + values + ")" + self.logger.debug(cmd) + result = self.cur.execute(cmd) + + #insert sriovs into port table + for sriov_dict in sriov_list: + sriov_dict['switch_port'] = switch_port + sriov_dict['switch_dpid'] = switch_dpid + sriov_dict['numa_id'] = port_dict['numa_id'] + sriov_dict['Mbps'] = port_dict['Mbps'] + sriov_dict['root_id'] = port_dict['id'] + sriov_dict['id'] = next_ids['resources_port'] + if "vlan" in sriov_dict: + del sriov_dict["vlan"] + next_ids['resources_port'] += 1 + keys = ",".join(sriov_dict.keys()) + values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", sriov_dict.values() ) ) + cmd = "INSERT INTO resources_port (" + keys + ") VALUES (" + values + ")" + self.logger.debug(cmd) + result = self.cur.execute(cmd) + + #inserting new log + #cmd = "INSERT INTO logs (related,level,uuid,description) VALUES ('hosts','debug','%s','new host: %d numas, %d theads, %d ifaces')" % (uuid, nb_numas, nb_cores, nb_ifaces) + #self.logger.debug(cmd) + #result = self.cur.execute(cmd) + + #inseted ok + with self.con: + self.cur = self.con.cursor() + self.logger.debug("callproc('UpdateSwitchPort', () )") + self.cur.callproc('UpdateSwitchPort', () ) + + self.logger.debug("getting host '%s'",str(host_dict['uuid'])) + return self.get_host(host_dict['uuid']) + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "new_host", cmd) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def new_flavor(self, flavor_dict, tenant_id ): + '''Add new flavor into the database. Create uuid if not provided + Atributes + flavor_dict: flavor dictionary with the key: value to insert. Must be valid flavors columns + tenant_id: if not 'any', it matches this flavor/tenant inserting at tenants_flavors table + Return: (result, data) where result can be + negative: error at inserting. data contain text + 1, inserted, data contain inserted uuid flavor + ''' + for retry_ in range(0,2): + cmd="" + try: + with self.con: + self.cur = self.con.cursor() + + #create uuid if not provided + if 'uuid' not in flavor_dict: + uuid = flavor_dict['uuid'] = str(myUuid.uuid1()) # create_uuid + else: #check uuid is valid + uuid = str(flavor_dict['uuid']) + # result, data = self.check_uuid(uuid) + # if (result == 1): + # return -1, "UUID '%s' already in use" % uuid + + #inserting new uuid + cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','flavors')" % uuid + self.logger.debug(cmd) + self.cur.execute(cmd) + + #insert in table flavor + keys = ",".join(flavor_dict.keys()) + values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", flavor_dict.values() ) ) + cmd = "INSERT INTO flavors (" + keys + ") VALUES (" + values + ")" + self.logger.debug(cmd) + self.cur.execute(cmd) + #if result != 1: return -1, "Database Error while inserting at flavors table" + + #insert tenants_flavors + if tenant_id != 'any': + cmd = "INSERT INTO tenants_flavors (tenant_id,flavor_id) VALUES ('%s','%s')" % (tenant_id, uuid) + self.logger.debug(cmd) + self.cur.execute(cmd) + + #inserting new log + #del flavor_dict['uuid'] + #if 'extended' in flavor_dict: del flavor_dict['extended'] #remove two many information + #cmd = "INSERT INTO logs (related,level,uuid, tenant_id, description) VALUES ('flavors','debug','%s','%s',\"new flavor: %s\")" \ + # % (uuid, tenant_id, str(flavor_dict)) + #self.logger.debug(cmd) + #self.cur.execute(cmd) + + #inseted ok + return 1, uuid + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "new_flavor", cmd, "update", tenant_id) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def new_image(self, image_dict, tenant_id): + '''Add new image into the database. Create uuid if not provided + Atributes + image_dict: image dictionary with the key: value to insert. Must be valid images columns + tenant_id: if not 'any', it matches this image/tenant inserting at tenants_images table + Return: (result, data) where result can be + negative: error at inserting. data contain text + 1, inserted, data contain inserted uuid image + ''' + for retry_ in range(0,2): + cmd="" + try: + with self.con: + self.cur = self.con.cursor() + + #create uuid if not provided + if 'uuid' not in image_dict: + uuid = image_dict['uuid'] = str(myUuid.uuid1()) # create_uuid + else: #check uuid is valid + uuid = str(image_dict['uuid']) + # result, data = self.check_uuid(uuid) + # if (result == 1): + # return -1, "UUID '%s' already in use" % uuid + + #inserting new uuid + cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','images')" % uuid + self.logger.debug(cmd) + self.cur.execute(cmd) + + #insert in table image + keys = ",".join(image_dict.keys()) + values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", image_dict.values() ) ) + cmd = "INSERT INTO images (" + keys + ") VALUES (" + values + ")" + self.logger.debug(cmd) + self.cur.execute(cmd) + #if result != 1: return -1, "Database Error while inserting at images table" + + #insert tenants_images + if tenant_id != 'any': + cmd = "INSERT INTO tenants_images (tenant_id,image_id) VALUES ('%s','%s')" % (tenant_id, uuid) + self.logger.debug(cmd) + self.cur.execute(cmd) + + ##inserting new log + #cmd = "INSERT INTO logs (related,level,uuid, tenant_id, description) VALUES ('images','debug','%s','%s',\"new image: %s path: %s\")" % (uuid, tenant_id, image_dict['name'], image_dict['path']) + #self.logger.debug(cmd) + #self.cur.execute(cmd) + + #inseted ok + return 1, uuid + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "new_image", cmd, "update", tenant_id) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def delete_image_flavor(self, item_type, item_id, tenant_id): + '''deletes an image or flavor from database + item_type must be a 'image' or 'flavor' + item_id is the uuid + tenant_id is the asociated tenant, can be 'any' with means all + If tenan_id is not any, it deletes from tenants_images/flavors, + which means this image/flavor is used by this tenant, and if success, + it tries to delete from images/flavors in case this is not public, + that only will success if image is private and not used by other tenants + If tenant_id is any, it tries to delete from both tables at the same transaction + so that image/flavor is completely deleted from all tenants or nothing + ''' + for retry_ in range(0,2): + deleted = -1 + deleted_item = -1 + result = (-HTTP_Internal_Server_Error, "internal error") + cmd="" + try: + with self.con: + self.cur = self.con.cursor() + cmd = "DELETE FROM tenants_%ss WHERE %s_id = '%s'" % (item_type, item_type, item_id) + if tenant_id != 'any': + cmd += " AND tenant_id = '%s'" % tenant_id + self.logger.debug(cmd) + self.cur.execute(cmd) + deleted = self.cur.rowcount + if tenant_id == 'any': #delete from images/flavors in the SAME transaction + cmd = "DELETE FROM %ss WHERE uuid = '%s'" % (item_type, item_id) + self.logger.debug(cmd) + self.cur.execute(cmd) + deleted = self.cur.rowcount + if deleted>=1: + #delete uuid + cmd = "DELETE FROM uuids WHERE uuid = '%s'" % item_id + self.logger.debug(cmd) + self.cur.execute(cmd) + ##inserting new log + #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \ + # VALUES ('%ss','debug','%s','%s','delete %s completely')" % \ + # (item_type, item_id, tenant_id, item_type) + #self.logger.debug(cmd) + #self.cur.execute(cmd) + return deleted, "%s '%s' completely deleted" % (item_type, item_id) + return 0, "%s '%s' not found" % (item_type, item_id) + + if deleted == 1: + ##inserting new log + #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \ + # VALUES ('%ss','debug','%s','%s','delete %s reference for this tenant')" % \ + # (item_type, item_id, tenant_id, item_type) + #self.logger.debug(cmd) + #self.cur.execute(cmd) + + #commit transaction + self.cur.close() + #if tenant!=any delete from images/flavors in OTHER transaction. If fails is because dependencies so that not return error + if deleted==1: + with self.con: + self.cur = self.con.cursor() + + #delete image/flavor if not public + cmd = "DELETE FROM %ss WHERE uuid = '%s' AND public = 'no'" % (item_type, item_id) + self.logger.debug(cmd) + self.cur.execute(cmd) + deleted_item = self.cur.rowcount + if deleted_item == 1: + #delete uuid + cmd = "DELETE FROM uuids WHERE uuid = '%s'" % item_id + self.logger.debug(cmd) + self.cur.execute(cmd) + ##inserting new log + #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \ + # VALUES ('%ss','debug','%s','%s','delete %s completely')" % \ + # (item_type, item_id, tenant_id, item_type) + #self.logger.debug(cmd) + #self.cur.execute(cmd) + except (mdb.Error, AttributeError) as e: + #print "delete_%s DB Exception %d: %s" % (item_type, e.args[0], e.args[1]) + if deleted <0: + result = self.format_error(e, "delete_"+item_type, cmd, "delete", "servers") + finally: + if deleted==1: + return 1, "%s '%s' from tenant '%s' %sdeleted" % \ + (item_type, item_id, tenant_id, "completely " if deleted_item==1 else "") + elif deleted==0: + return 0, "%s '%s' from tenant '%s' not found" % (item_type, item_id, tenant_id) + else: + if result[0]!=-HTTP_Request_Timeout or retry_==1: return result + + def delete_row(self, table, uuid): + for retry_ in range(0,2): + cmd="" + try: + with self.con: + #delete host + self.cur = self.con.cursor() + cmd = "DELETE FROM %s WHERE uuid = '%s'" % (table, uuid) + self.logger.debug(cmd) + self.cur.execute(cmd) + deleted = self.cur.rowcount + if deleted == 1: + #delete uuid + if table == 'tenants': tenant_str=uuid + else: tenant_str='Null' + self.cur = self.con.cursor() + cmd = "DELETE FROM uuids WHERE uuid = '%s'" % uuid + self.logger.debug(cmd) + self.cur.execute(cmd) + ##inserting new log + #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) VALUES ('%s','debug','%s','%s','delete %s')" % (table, uuid, tenant_str, table[:-1]) + #self.logger.debug(cmd) + #self.cur.execute(cmd) + return deleted, table[:-1] + " '%s' %s" %(uuid, "deleted" if deleted==1 else "not found") + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "delete_row", cmd, "delete", 'instances' if table=='hosts' or table=='tenants' else 'dependencies') + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def delete_row_by_key(self, table, key, value): + for retry_ in range(0,2): + cmd="" + try: + with self.con: + #delete host + self.cur = self.con.cursor() + cmd = "DELETE FROM %s" % (table) + if key!=None: + if value!=None: + cmd += " WHERE %s = '%s'" % (key, value) + else: + cmd += " WHERE %s is null" % (key) + else: #delete all + pass + self.logger.debug(cmd) + self.cur.execute(cmd) + deleted = self.cur.rowcount + if deleted < 1: + return -1, 'Not found' + #delete uuid + return 0, deleted + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "delete_row_by_key", cmd, "delete", 'instances' if table=='hosts' or table=='tenants' else 'dependencies') + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def delete_row_by_dict(self, **sql_dict): + ''' Deletes rows from a table. + Attribute sql_dir: dictionary with the following key: value + 'FROM': string of table name (Mandatory) + 'WHERE': dict of key:values, translated to key=value AND ... (Optional) + 'WHERE_NOT': dict of key:values, translated to key<>value AND ... (Optional) + 'WHERE_NOTNULL': (list or tuple of items that must not be null in a where ... (Optional) + 'LIMIT': limit of number of rows (Optional) + Return: the (number of items deleted, descriptive test) if ok; (negative, descriptive text) if error + ''' + #print sql_dict + from_ = "FROM " + str(sql_dict['FROM']) + #print 'from_', from_ + if 'WHERE' in sql_dict and len(sql_dict['WHERE']) > 0: + w=sql_dict['WHERE'] + where_ = "WHERE " + " AND ".join(map( lambda x: str(x) + (" is Null" if w[x] is None else "='"+str(w[x])+"'"), w.keys()) ) + else: where_ = "" + if 'WHERE_NOT' in sql_dict and len(sql_dict['WHERE_NOT']) > 0: + w=sql_dict['WHERE_NOT'] + where_2 = " AND ".join(map( lambda x: str(x) + (" is not Null" if w[x] is None else "<>'"+str(w[x])+"'"), w.keys()) ) + if len(where_)==0: where_ = "WHERE " + where_2 + else: where_ = where_ + " AND " + where_2 + if 'WHERE_NOTNULL' in sql_dict and len(sql_dict['WHERE_NOTNULL']) > 0: + w=sql_dict['WHERE_NOTNULL'] + where_2 = " AND ".join(map( lambda x: str(x) + " is not Null", w) ) + if len(where_)==0: where_ = "WHERE " + where_2 + else: where_ = where_ + " AND " + where_2 + #print 'where_', where_ + limit_ = "LIMIT " + str(sql_dict['LIMIT']) if 'LIMIT' in sql_dict else "" + #print 'limit_', limit_ + cmd = " ".join( ("DELETE", from_, where_, limit_) ) + self.logger.debug(cmd) + for retry_ in range(0,2): + try: + with self.con: + #delete host + self.cur = self.con.cursor() + self.cur.execute(cmd) + deleted = self.cur.rowcount + return deleted, "%d deleted from %s" % (deleted, sql_dict['FROM'][:-1] ) + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "delete_row_by_dict", cmd, "delete", 'dependencies') + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + + def get_instance(self, instance_id): + for retry_ in range(0,2): + cmd="" + try: + with self.con: + self.cur = self.con.cursor(mdb.cursors.DictCursor) + #get INSTANCE + cmd = "SELECT uuid, name, description, progress, host_id, flavor_id, image_id, status, last_error, tenant_id, ram, vcpus, created_at \ + FROM instances WHERE uuid = '" + str(instance_id) +"'" + self.logger.debug(cmd) + self.cur.execute(cmd) + if self.cur.rowcount == 0 : return 0, "instance '" + str(instance_id) +"'not found." + instance = self.cur.fetchone() + #get networks + cmd = "SELECT uuid as iface_id, net_id, mac as mac_address, ip_address, name, Mbps as bandwidth, vpci, model \ + FROM ports WHERE type = 'instance:bridge' AND instance_id = '" + instance_id + "'" + self.logger.debug(cmd) + self.cur.execute(cmd) + if self.cur.rowcount > 0 : + instance['networks'] = self.cur.fetchall() + + #get extended + extended = {} + #get devices + cmd = "SELECT type, vpci, image_id, xml,dev FROM instance_devices WHERE instance_id = '%s' " % str(instance_id) + self.logger.debug(cmd) + self.cur.execute(cmd) + if self.cur.rowcount > 0 : + extended['devices'] = self.cur.fetchall() + #get numas + numas = [] + cmd = "SELECT id, numa_socket as source FROM numas WHERE host_id = '" + str(instance['host_id']) + "'" + self.logger.debug(cmd) + self.cur.execute(cmd) + host_numas = self.cur.fetchall() + #print 'host_numas', host_numas + for k in host_numas: + numa_id = str(k['id']) + numa_dict ={} + #get memory + cmd = "SELECT consumed FROM resources_mem WHERE instance_id = '%s' AND numa_id = '%s'" % ( instance_id, numa_id) + self.logger.debug(cmd) + self.cur.execute(cmd) + if self.cur.rowcount > 0: + mem_dict = self.cur.fetchone() + numa_dict['memory'] = mem_dict['consumed'] + #get full cores + cursor2 = self.con.cursor() + cmd = "SELECT core_id, paired, MIN(v_thread_id) as v1, MAX(v_thread_id) as v2, COUNT(instance_id) as nb, MIN(thread_id) as t1, MAX(thread_id) as t2 FROM resources_core WHERE instance_id = '%s' AND numa_id = '%s' GROUP BY core_id,paired" % ( str(instance_id), numa_id) + self.logger.debug(cmd) + cursor2.execute(cmd) + core_list = []; core_source = [] + paired_list = []; paired_source = [] + thread_list = []; thread_source = [] + if cursor2.rowcount > 0: + cores = cursor2.fetchall() + for core in cores: + if core[4] == 2: #number of used threads from core + if core[3] == core[2]: #only one thread asigned to VM, so completely core + core_list.append(core[2]) + core_source.append(core[5]) + elif core[1] == 'Y': + paired_list.append(core[2:4]) + paired_source.append(core[5:7]) + else: + thread_list.extend(core[2:4]) + thread_source.extend(core[5:7]) + + else: + thread_list.append(core[2]) + thread_source.append(core[5]) + if len(core_list) > 0: + numa_dict['cores'] = len(core_list) + numa_dict['cores-id'] = core_list + numa_dict['cores-source'] = core_source + if len(paired_list) > 0: + numa_dict['paired-threads'] = len(paired_list) + numa_dict['paired-threads-id'] = paired_list + numa_dict['paired-threads-source'] = paired_source + if len(thread_list) > 0: + numa_dict['threads'] = len(thread_list) + numa_dict['threads-id'] = thread_list + numa_dict['threads-source'] = thread_source + + #get dedicated ports and SRIOV + cmd = "SELECT port_id as iface_id, p.vlan as vlan, p.mac as mac_address, net_id, if(model='PF','yes',if(model='VF','no','yes:sriov')) as dedicated,\ + rp.Mbps as bandwidth, name, vpci, pci as source \ + FROM resources_port as rp join ports as p on port_id=uuid WHERE p.instance_id = '%s' AND numa_id = '%s' and p.type='instance:data'" % (instance_id, numa_id) + self.logger.debug(cmd) + self.cur.execute(cmd) + if self.cur.rowcount > 0: + numa_dict['interfaces'] = self.cur.fetchall() + #print 'interfaces', numa_dict + + if len(numa_dict) > 0 : + numa_dict['source'] = k['source'] #numa socket + numas.append(numa_dict) + + if len(numas) > 0 : extended['numas'] = numas + if len(extended) > 0 : instance['extended'] = extended + af.DeleteNone(instance) + return 1, instance + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "get_instance", cmd) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def get_numas(self, requirements, prefered_host_id=None, only_of_ports=True): + '''Obtain a valid NUMA/HOST for deployment a VM + requirements: contain requirement regarding: + requirements['ram']: Non huge page memory in MB; 0 to skip + requirements['vcpus']: Non isolated cpus; 0 to skip + requirements['numa']: Requiremets to be fixed in ONE Numa node + requirements['numa']['memory']: Huge page memory in GB at ; 0 for any + requirements['numa']['proc_req_type']: Type of processor, cores or threads + requirements['numa']['proc_req_nb']: Number of isolated cpus + requirements['numa']['port_list']: Physical NIC ports list ; [] for any + requirements['numa']['sriov_list']: Virtual function NIC ports list ; [] for any + prefered_host_id: if not None return this host if it match + only_of_ports: if True only those ports conected to the openflow (of) are valid, + that is, with switch_port information filled; if False, all NIC ports are valid. + Return a valid numa and host + ''' + + for retry_ in range(0,2): + cmd="" + try: + with self.con: +# #Find numas of prefered host +# prefered_numas = () +# if prefered_host_id != None: +# self.cur = self.con.cursor() +# self.cur.execute("SELECT id FROM numas WHERE host_id='%s'" + prefered_host_id) +# prefered_numas = self.cur.fetchall() +# self.cur.close() + + #Find valid host for the ram and vcpus + self.cur = self.con.cursor(mdb.cursors.DictCursor) + cmd = "CALL GetHostByMemCpu(%s, %s)" % (str(requirements['ram']), str(requirements['vcpus'])) + self.logger.debug(cmd) + self.cur.callproc('GetHostByMemCpu', (str(requirements['ram']), str(requirements['vcpus'])) ) + valid_hosts = self.cur.fetchall() + self.cur.close() + self.cur = self.con.cursor() + match_found = False + if len(valid_hosts)<=0: + error_text = 'No room at data center. Can not find a host with %s MB memory and %s cpus available' % (str(requirements['ram']), str(requirements['vcpus'])) + #self.logger.debug(error_text) + return -1, error_text + + #elif req_numa != None: + #Find valid numa nodes for memory requirements + self.cur = self.con.cursor(mdb.cursors.DictCursor) + cmd = "CALL GetNumaByMemory(%s)" % str(requirements['numa']['memory']) + self.logger.debug(cmd) + self.cur.callproc('GetNumaByMemory', (requirements['numa']['memory'],) ) + valid_for_memory = self.cur.fetchall() + self.cur.close() + self.cur = self.con.cursor() + if len(valid_for_memory)<=0: + error_text = 'No room at data center. Can not find a host with %s GB Hugepages memory available' % str(requirements['numa']['memory']) + #self.logger.debug(error_text) + return -1, error_text + + #Find valid numa nodes for processor requirements + self.cur = self.con.cursor(mdb.cursors.DictCursor) + if requirements['numa']['proc_req_type'] == 'threads': + cpu_requirement_text='cpu-threads' + cmd = "CALL GetNumaByThread(%s)" % str(requirements['numa']['proc_req_nb']) + self.logger.debug(cmd) + self.cur.callproc('GetNumaByThread', (requirements['numa']['proc_req_nb'],) ) + else: + cpu_requirement_text='cpu-cores' + cmd = "CALL GetNumaByCore(%s)" % str(requirements['numa']['proc_req_nb']) + self.logger.debug(cmd) + self.cur.callproc('GetNumaByCore', (requirements['numa']['proc_req_nb'],) ) + valid_for_processor = self.cur.fetchall() + self.cur.close() + self.cur = self.con.cursor() + if len(valid_for_processor)<=0: + error_text = 'No room at data center. Can not find a host with %s %s available' % (str(requirements['numa']['proc_req_nb']),cpu_requirement_text) + #self.logger.debug(error_text) + return -1, error_text + + #Find the numa nodes that comply for memory and processor requirements + #sorting from less to more memory capacity + valid_numas = [] + for m_numa in valid_for_memory: + numa_valid_for_processor = False + for p_numa in valid_for_processor: + if m_numa['numa_id'] == p_numa['numa_id']: + numa_valid_for_processor = True + break + numa_valid_for_host = False + prefered_numa = False + for p_host in valid_hosts: + if m_numa['host_id'] == p_host['uuid']: + numa_valid_for_host = True + if p_host['uuid'] == prefered_host_id: + prefered_numa = True + break + if numa_valid_for_host and numa_valid_for_processor: + if prefered_numa: + valid_numas.insert(0, m_numa['numa_id']) + else: + valid_numas.append(m_numa['numa_id']) + if len(valid_numas)<=0: + error_text = 'No room at data center. Can not find a host with %s MB hugepages memory and %s %s available in the same numa' %\ + (requirements['numa']['memory'], str(requirements['numa']['proc_req_nb']),cpu_requirement_text) + #self.logger.debug(error_text) + return -1, error_text + + # print 'Valid numas list: '+str(valid_numas) + + #Find valid numa nodes for interfaces requirements + #For each valid numa we will obtain the number of available ports and check if these are valid + match_found = False + for numa_id in valid_numas: + # print 'Checking '+str(numa_id) + match_found = False + self.cur = self.con.cursor(mdb.cursors.DictCursor) + if only_of_ports: + cmd="CALL GetAvailablePorts(%s)" % str(numa_id) + self.logger.debug(cmd) + self.cur.callproc('GetAvailablePorts', (numa_id,) ) + else: + cmd="CALL GetAllAvailablePorts(%s)" % str(numa_id) + self.logger.debug(cmd) + self.cur.callproc('GetAllAvailablePorts', (numa_id,) ) + available_ports = self.cur.fetchall() + self.cur.close() + self.cur = self.con.cursor() + + #Set/reset reservations + for port in available_ports: + port['Mbps_reserved'] = 0 + port['SRIOV_reserved'] = 0 + + #Try to allocate physical ports + physical_ports_found = True + for iface in requirements['numa']['port_list']: + # print '\t\tchecking iface: '+str(iface) + portFound = False + for port in available_ports: + # print '\t\t\tfor port: '+str(port) + #If the port is not empty continue + if port['Mbps_free'] != port['Mbps'] or port['Mbps_reserved'] != 0: + # print '\t\t\t\t Not empty port' + continue; + #If the port speed is not enough continue + if port['Mbps'] < iface['bandwidth']: + # print '\t\t\t\t Not enough speed' + continue; + + #Otherwise this is a valid port + port['Mbps_reserved'] = port['Mbps'] + port['SRIOV_reserved'] = 0 + iface['port_id'] = port['port_id'] + iface['vlan'] = None + iface['mac'] = port['mac'] + iface['switch_port'] = port['switch_port'] + # print '\t\t\t\t Dedicated port found '+str(port['port_id']) + portFound = True + break; + + #if all ports have been checked and no match has been found + #this is not a valid numa + if not portFound: + # print '\t\t\t\t\tAll ports have been checked and no match has been found for numa '+str(numa_id)+'\n\n' + physical_ports_found = False + break + + #if there is no match continue checking the following numa + if not physical_ports_found: + continue + + #Try to allocate SR-IOVs + sriov_ports_found = True + for iface in requirements['numa']['sriov_list']: + # print '\t\tchecking iface: '+str(iface) + portFound = False + for port in available_ports: + # print '\t\t\tfor port: '+str(port) + #If there are not available SR-IOVs continue + if port['availableSRIOV'] - port['SRIOV_reserved'] <= 0: + # print '\t\t\t\t Not enough SR-IOV' + continue; + #If the port free speed is not enough continue + if port['Mbps_free'] - port['Mbps_reserved'] < iface['bandwidth']: + # print '\t\t\t\t Not enough speed' + continue; + + #Otherwise this is a valid port + port['Mbps_reserved'] += iface['bandwidth'] + port['SRIOV_reserved'] += 1 + # print '\t\t\t\t SR-IOV found '+str(port['port_id']) + iface['port_id'] = port['port_id'] + iface['vlan'] = None + iface['mac'] = port['mac'] + iface['switch_port'] = port['switch_port'] + portFound = True + break; + + #if all ports have been checked and no match has been found + #this is not a valid numa + if not portFound: + # print '\t\t\t\t\tAll ports have been checked and no match has been found for numa '+str(numa_id)+'\n\n' + sriov_ports_found = False + break + + #if there is no match continue checking the following numa + if not sriov_ports_found: + continue + + + if sriov_ports_found and physical_ports_found: + match_found = True + break + + if not match_found: + error_text = 'No room at data center. Can not find a host with the required hugepages, vcpus and interfaces' + #self.logger.debug(error_text) + return -1, error_text + + #self.logger.debug('Full match found in numa %s', str(numa_id)) + + for numa in valid_for_processor: + if numa_id==numa['numa_id']: + host_id=numa['host_id'] + break + return 0, {'numa_id':numa_id, 'host_id': host_id, } + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "get_numas", cmd) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def new_instance(self, instance_dict, nets, ports_to_free): + for retry_ in range(0,2): + cmd="" + try: + with self.con: + self.cur = self.con.cursor() + + #create uuid if not provided + if 'uuid' not in instance_dict: + uuid = instance_dict['uuid'] = str(myUuid.uuid1()) # create_uuid + else: #check uuid is valid + uuid = str(instance_dict['uuid']) + + + #inserting new uuid + cmd = "INSERT INTO uuids (uuid, root_uuid, used_at) VALUES ('%s','%s', 'instances')" % (uuid, uuid) + self.logger.debug(cmd) + self.cur.execute(cmd) + + #insert in table instance + extended = instance_dict.pop('extended', None); + bridgedifaces = instance_dict.pop('bridged-ifaces', () ); + + keys = ",".join(instance_dict.keys()) + values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", instance_dict.values() ) ) + cmd = "INSERT INTO instances (" + keys + ") VALUES (" + values + ")" + self.logger.debug(cmd) + self.cur.execute(cmd) + #if result != 1: return -1, "Database Error while inserting at instances table" + + #insert resources + nb_bridge_ifaces = nb_cores = nb_ifaces = nb_numas = 0 + #insert bridged_ifaces + for iface in bridgedifaces: + #generate and insert a iface uuid + iface['uuid'] = str(myUuid.uuid1()) # create_uuid + cmd = "INSERT INTO uuids (uuid, root_uuid, used_at) VALUES ('%s','%s', 'ports')" % (iface['uuid'], uuid) + self.logger.debug(cmd) + self.cur.execute(cmd) + #insert iface + iface['instance_id'] = uuid + iface['type'] = 'instance:bridge' + if 'name' not in iface: iface['name']="br"+str(nb_bridge_ifaces) + iface['Mbps']=iface.pop('bandwidth', None) + if 'mac_address' not in iface: + iface['mac'] = af.gen_random_mac() + else: + iface['mac'] = iface['mac_address'] + del iface['mac_address'] + #iface['mac']=iface.pop('mac_address', None) #for leaving mac generation to libvirt + keys = ",".join(iface.keys()) + values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", iface.values() ) ) + cmd = "INSERT INTO ports (" + keys + ") VALUES (" + values + ")" + self.logger.debug(cmd) + self.cur.execute(cmd) + nb_bridge_ifaces += 1 + + if extended is not None: + if 'numas' not in extended or extended['numas'] is None: extended['numas'] = () + for numa in extended['numas']: + nb_numas += 1 + #cores + if 'cores' not in numa or numa['cores'] is None: numa['cores'] = () + for core in numa['cores']: + nb_cores += 1 + cmd = "UPDATE resources_core SET instance_id='%s'%s%s WHERE id='%s'" \ + % (uuid, \ + (",v_thread_id='" + str(core['vthread']) + "'") if 'vthread' in core else '', \ + (",paired='" + core['paired'] + "'") if 'paired' in core else '', \ + core['id'] ) + self.logger.debug(cmd) + self.cur.execute(cmd) + #interfaces + if 'interfaces' not in numa or numa['interfaces'] is None: numa['interfaces'] = () + for iface in numa['interfaces']: + #generate and insert an uuid; iface[id]=iface_uuid; iface[uuid]= net_id + iface['id'] = str(myUuid.uuid1()) # create_uuid + cmd = "INSERT INTO uuids (uuid, root_uuid, used_at) VALUES ('%s','%s', 'ports')" % (iface['id'], uuid) + self.logger.debug(cmd) + self.cur.execute(cmd) + nb_ifaces += 1 + mbps_=("'"+str(iface['Mbps_used'])+"'") if 'Mbps_used' in iface and iface['Mbps_used'] is not None else "Mbps" + if iface["dedicated"]=="yes": + iface_model="PF" + elif iface["dedicated"]=="yes:sriov": + iface_model="VFnotShared" + elif iface["dedicated"]=="no": + iface_model="VF" + #else error + INSERT=(iface['mac_address'], iface['switch_port'], iface.get('vlan',None), 'instance:data', iface['Mbps_used'], iface['id'], + uuid, instance_dict['tenant_id'], iface.get('name',None), iface.get('vpci',None), iface.get('uuid',None), iface_model ) + cmd = "INSERT INTO ports (mac,switch_port,vlan,type,Mbps,uuid,instance_id,tenant_id,name,vpci,net_id, model) " + \ + " VALUES (" + ",".join(map(lambda x: 'Null' if x is None else "'"+str(x)+"'", INSERT )) + ")" + self.logger.debug(cmd) + self.cur.execute(cmd) + if 'uuid' in iface: + nets.append(iface['uuid']) + + #discover if this port is not used by anyone + cmd = "SELECT source_name, mac FROM ( SELECT root_id, count(instance_id) as used FROM resources_port" \ + " WHERE root_id=(SELECT root_id from resources_port WHERE id='%s')"\ + " GROUP BY root_id ) AS A JOIN resources_port as B ON A.root_id=B.id AND A.used=0" % iface['port_id'] + self.logger.debug(cmd) + self.cur.execute(cmd) + ports_to_free += self.cur.fetchall() + + cmd = "UPDATE resources_port SET instance_id='%s', port_id='%s',Mbps_used=%s WHERE id='%s'" \ + % (uuid, iface['id'], mbps_, iface['port_id']) + #if Mbps_used not suply, set the same value of 'Mpbs', that is the total + self.logger.debug(cmd) + self.cur.execute(cmd) + #memory + if 'memory' in numa and numa['memory'] is not None and numa['memory']>0: + cmd = "INSERT INTO resources_mem (numa_id, instance_id, consumed) VALUES ('%s','%s','%s')" % (numa['numa_id'], uuid, numa['memory']) + self.logger.debug(cmd) + self.cur.execute(cmd) + if 'devices' not in extended or extended['devices'] is None: extended['devices'] = () + for device in extended['devices']: + if 'vpci' in device: vpci = "'" + device['vpci'] + "'" + else: vpci = 'Null' + if 'image_id' in device: image_id = "'" + device['image_id'] + "'" + else: image_id = 'Null' + if 'xml' in device: xml = "'" + device['xml'] + "'" + else: xml = 'Null' + if 'dev' in device: dev = "'" + device['dev'] + "'" + else: dev = 'Null' + cmd = "INSERT INTO instance_devices (type, instance_id, image_id, vpci, xml, dev) VALUES ('%s','%s', %s, %s, %s, %s)" % \ + (device['type'], uuid, image_id, vpci, xml, dev) + self.logger.debug(cmd) + self.cur.execute(cmd) + ##inserting new log + #cmd = "INSERT INTO logs (related,level,uuid,description) VALUES ('instances','debug','%s','new instance: %d numas, %d theads, %d ifaces %d bridge_ifaces')" % (uuid, nb_numas, nb_cores, nb_ifaces, nb_bridge_ifaces) + #self.logger.debug(cmd) + #self.cur.execute(cmd) + + #inseted ok + return 1, uuid + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "new_instance", cmd) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def delete_instance(self, instance_id, tenant_id, net_list, ports_to_free, logcause="requested by http"): + for retry_ in range(0,2): + cmd="" + try: + with self.con: + self.cur = self.con.cursor() + #get INSTANCE + cmd = "SELECT uuid FROM instances WHERE uuid='%s' AND tenant_id='%s'" % (instance_id, tenant_id) + self.logger.debug(cmd) + self.cur.execute(cmd) + if self.cur.rowcount == 0 : return 0, "instance %s not found in tenant %s" % (instance_id, tenant_id) + + #delete bridged ifaces, instace_devices, resources_mem; done by database: it is automatic by Database; FOREIGN KEY DELETE CASCADE + + #get nets afected + cmd = "SELECT DISTINCT net_id from ports WHERE instance_id = '%s' AND net_id is not Null AND type='instance:data'" % instance_id + self.logger.debug(cmd) + self.cur.execute(cmd) + net_list__ = self.cur.fetchall() + for net in net_list__: + net_list.append(net[0]) + + #get dataplane interfaces releases by this VM; both PF and VF with no other VF + cmd="SELECT source_name, mac FROM (SELECT root_id, count(instance_id) as used FROM resources_port WHERE instance_id='%s' GROUP BY root_id ) AS A" % instance_id \ + + " JOIN (SELECT root_id, count(instance_id) as used FROM resources_port GROUP BY root_id) AS B ON A.root_id=B.root_id AND A.used=B.used"\ + + " JOIN resources_port as C ON A.root_id=C.id" +# cmd = "SELECT DISTINCT root_id FROM resources_port WHERE instance_id = '%s'" % instance_id + self.logger.debug(cmd) + self.cur.execute(cmd) + ports_to_free += self.cur.fetchall() + + #update resources port + cmd = "UPDATE resources_port SET instance_id=Null, port_id=Null, Mbps_used='0' WHERE instance_id = '%s'" % instance_id + self.logger.debug(cmd) + self.cur.execute(cmd) + +# #filter dataplane ports used by this VM that now are free +# for port in ports_list__: +# cmd = "SELECT mac, count(instance_id) FROM resources_port WHERE root_id = '%s'" % port[0] +# self.logger.debug(cmd) +# self.cur.execute(cmd) +# mac_list__ = self.cur.fetchone() +# if mac_list__ and mac_list__[1]==0: +# ports_to_free.append(mac_list__[0]) + + + #update resources core + cmd = "UPDATE resources_core SET instance_id=Null, v_thread_id=Null, paired='N' WHERE instance_id = '%s'" % instance_id + self.logger.debug(cmd) + self.cur.execute(cmd) + + #delete all related uuids + cmd = "DELETE FROM uuids WHERE root_uuid='%s'" % instance_id + self.logger.debug(cmd) + self.cur.execute(cmd) + + ##insert log + #cmd = "INSERT INTO logs (related,level,uuid,description) VALUES ('instances','debug','%s','delete instance %s')" % (instance_id, logcause) + #self.logger.debug(cmd) + #self.cur.execute(cmd) + + #delete instance + cmd = "DELETE FROM instances WHERE uuid='%s' AND tenant_id='%s'" % (instance_id, tenant_id) + self.cur.execute(cmd) + return 1, "instance %s from tenant %s DELETED" % (instance_id, tenant_id) + + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "delete_instance", cmd) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def get_ports(self, WHERE): + ''' Obtain ports using the WHERE filtering. + Attributes: + 'where_': dict of key:values, translated to key=value AND ... (Optional) + Return: a list with dictionarys at each row + ''' + for retry_ in range(0,2): + cmd="" + try: + with self.con: + + self.cur = self.con.cursor(mdb.cursors.DictCursor) + select_ = "SELECT uuid,'ACTIVE' as status,admin_state_up,name,net_id,\ + tenant_id,type,mac,vlan,switch_port,instance_id,Mbps FROM ports " + + if WHERE is None or len(WHERE) == 0: where_ = "" + else: + where_ = "WHERE " + " AND ".join(map( lambda x: str(x) + (" is Null" if WHERE[x] is None else "='"+str(WHERE[x])+"'"), WHERE.keys()) ) + limit_ = "LIMIT 100" + cmd = " ".join( (select_, where_, limit_) ) + # print "SELECT multiple de instance_ifaces, iface_uuid, external_ports" #print cmd + self.logger.debug(cmd) + self.cur.execute(cmd) + ports = self.cur.fetchall() + if self.cur.rowcount>0: af.DeleteNone(ports) + return self.cur.rowcount, ports + # return self.get_table(FROM=from_, SELECT=select_,WHERE=where_,LIMIT=100) + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "get_ports", cmd) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + + def check_target_net(self, net_id, tenant_id, port_type): + '''check if valid attachement of a port into a target net + Attributes: + net_id: target net uuid + tenant_id: client where tenant belongs. Not used in this version + port_type: string with the option 'instance:bridge', 'instance:data', 'external' + Return: + (0,net_dict) if ok, where net_dict contain 'uuid','type','vlan', ... + (negative,string-error) if error + ''' + for retry_ in range(0,2): + cmd="" + try: + with self.con: + self.cur = self.con.cursor(mdb.cursors.DictCursor) + cmd = "SELECT * FROM nets WHERE uuid='%s'" % net_id + self.logger.debug(cmd) + self.cur.execute(cmd) + if self.cur.rowcount == 0 : return -1, "network_id %s does not match any net" % net_id + net = self.cur.fetchone() + break + + except (mdb.Error, AttributeError) as e: + r,c = self.format_error(e, "check_target_net", cmd) + if r!=-HTTP_Request_Timeout or retry_==1: return r,c + #check permissions + if tenant_id is not None and tenant_id is not "admin": + if net['tenant_id']==tenant_id and net['shared']=='false': + return -1, "needed admin privileges to attach to the net %s" % net_id + #check types + if (net['type'] in ('p2p','data') and 'port_type' == 'instance:bridge') or \ + (net['type'] in ('bridge_data','bridge_man') and 'port_type' != 'instance:bridge') : + return -1, "can not attach a port of type %s into a net of type %s" % (port_type, net['type']) + if net['type'] == 'ptp': + #look how many + nb_ports, data = self.get_ports( {'net_id':net_id} ) + if nb_ports<0: + return -1, data + else: + if net['provider']: + nb_ports +=1 + if nb_ports >=2: + return -1, "net of type p2p already contain two ports attached. No room for another" + + return 0, net + +if __name__ == "__main__": + print "Hello World" diff --git a/vim_schema.py b/vim_schema.py new file mode 100644 index 0000000..f0aecb7 --- /dev/null +++ b/vim_schema.py @@ -0,0 +1,712 @@ +# -*- coding: utf-8 -*- + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' Definition of dictionaries schemas used by validating input + These dictionaries are validated using jsonschema library +''' +__author__="Alfonso Tierno" +__date__ ="$10-jul-2014 12:07:15$" + +# +# SCHEMAS to validate input data +# + +path_schema={"type":"string", "pattern":"^(\.){0,2}(/[^/\"':{}\(\)]+)+$"} +http_schema={"type":"string", "pattern":"^https?://[^'\"=]+$"} +port_schema={"type":"integer","minimum":1,"maximun":65534} +ip_schema={"type":"string","pattern":"^([0-9]{1,3}.){3}[0-9]{1,3}$"} +cidr_schema={"type":"string","pattern":"^([0-9]{1,3}.){3}[0-9]{1,3}/[0-9]{1,2}$"} +name_schema={"type" : "string", "minLength":1, "maxLength":255, "pattern" : "^[^,;()'\"]+$"} +nameshort_schema={"type" : "string", "minLength":1, "maxLength":64, "pattern" : "^[^,;()'\"]+$"} +nametiny_schema={"type" : "string", "minLength":1, "maxLength":12, "pattern" : "^[^,;()'\"]+$"} +xml_text_schema={"type" : "string", "minLength":1, "maxLength":1000, "pattern" : "^[^']+$"} +description_schema={"type" : ["string","null"], "maxLength":255, "pattern" : "^[^'\"]+$"} +id_schema_fake = {"type" : "string", "minLength":2, "maxLength":36 } #"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$" +id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"} +pci_schema={"type":"string", "pattern":"^[0-9a-fA-F]{4}(:[0-9a-fA-F]{2}){2}\.[0-9a-fA-F]$"} +bandwidth_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]bps)?$"} +integer0_schema={"type":"integer","minimum":0} +integer1_schema={"type":"integer","minimum":1} +vlan_schema={"type":"integer","minimum":1,"maximun":4095} +vlan1000_schema={"type":"integer","minimum":1000,"maximun":4095} +mac_schema={"type":"string", "pattern":"^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){5}$"} #must be unicast LSB bit of MSB byte ==0 +net_bind_schema={"oneOf":[{"type":"null"},{"type":"string", "pattern":"^(default|((bridge|macvtap):[0-9a-zA-Z\.\-]{1,50})|openflow:[/0-9a-zA-Z\.\-]{1,50}(:vlan)?)$"}]} +yes_no_schema={"type":"string", "enum":["yes", "no"]} +log_level_schema={"type":"string", "enum":["DEBUG", "INFO", "WARNING","ERROR","CRITICAL"]} + +config_schema = { + "title":"main configuration information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "http_port": port_schema, + "http_admin_port": port_schema, + "http_host": nameshort_schema, + "http_url_prefix": path_schema, # it does not work yet; it's supposed to be the base path to be used by bottle, but it must be explicitly declared + "db_host": nameshort_schema, + "db_user": nameshort_schema, + "db_passwd": {"type":"string"}, + "db_name": nameshort_schema, + "of_controller_ip": ip_schema, + "of_controller_port": port_schema, + "of_controller_dpid": nameshort_schema, + "of_controller_nets_with_same_vlan": {"type" : "boolean"}, + "of_controller": nameshort_schema, #{"type":"string", "enum":["floodlight", "opendaylight"]}, + "of_controller_module": {"type":"string"}, + #"of_user": nameshort_schema, + #"of_password": nameshort_schema, + "test_mode": {"type": "boolean"}, #leave for backward compatibility + "mode": {"type":"string", "enum":["normal", "host only", "OF only", "development", "test"] }, + "development_bridge": {"type":"string"}, + "tenant_id": {"type" : "string"}, + "image_path": path_schema, + "network_vlan_range_start": vlan_schema, + "network_vlan_range_end": vlan_schema, + "bridge_ifaces": { + "type": "object", + "patternProperties": { + "." : { + "type": "array", + "items": integer0_schema, + "minItems":2, + "maxItems":2, + }, + }, + "minProperties": 2 + }, + "dhcp_server": { + "type": "object", + "properties": { + "host" : name_schema, + "port" : port_schema, + "provider" : {"type": "string", "enum": ["isc-dhcp-server"]}, + "user" : nameshort_schema, + "password" : {"type": "string"}, + "key" : {"type": "string"}, + "bridge_ifaces" :{ + "type": "array", + "items": nameshort_schema, + }, + "nets" :{ + "type": "array", + "items": name_schema, + }, + }, + "required": ['host', 'provider', 'user'] + }, + "log_level": log_level_schema, + "log_level_db": log_level_schema, + "log_level_of": log_level_schema, + }, + "patternProperties": { + "of_*" : {"type": ["string", "integer", "boolean"]} + }, + "required": ['db_host', 'db_user', 'db_passwd', 'db_name', + 'of_controller_ip', 'of_controller_port', 'of_controller_dpid', 'bridge_ifaces', 'of_controller'], + "additionalProperties": False +} + + + +metadata_schema={ + "type":"object", + "properties":{ + "architecture": {"type":"string"}, + "use_incremental": yes_no_schema, + "vpci": pci_schema, + "os_distro": {"type":"string"}, + "os_type": {"type":"string"}, + "os_version": {"type":"string"}, + "bus": {"type":"string"}, + "topology": {"type":"string", "enum": ["oneSocket"]} + } +} + + + +tenant_new_schema = { + "title":"tenant creation information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "tenant":{ + "type":"object", + "properties":{ + "id":id_schema, + "name": nameshort_schema, + "description":description_schema, + "enabled":{"type" : "boolean"} + }, + "required": ["name"] + } + }, + "required": ["tenant"], + "additionalProperties": False +} +tenant_edit_schema = { + "title":"tenant edition information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "tenant":{ + "type":"object", + "minProperties":1, + "properties":{ + "name":nameshort_schema, + "description":description_schema, + "enabled":{"type" : "boolean"} + }, + "additionalProperties": False, + } + }, + "required": ["tenant"], + "additionalProperties": False +} +interfaces_schema={ + "type":"array", + "minItems":0, + "items":{ + "type":"object", + "properties":{ + "name":name_schema, + "dedicated":{"type":"string","enum":["yes","no","yes:sriov"]}, + "bandwidth":bandwidth_schema, + "vpci":pci_schema, + "uuid":id_schema, + "mac_address":mac_schema + }, + "additionalProperties": False, + "required": ["dedicated", "bandwidth"] + } +} + +extended_schema={ + "type":"object", + "properties":{ + "processor_ranking":integer0_schema, + "devices":{ + "type": "array", + "items":{ + "type": "object", + "properties":{ + "type":{"type":"string", "enum":["usb","disk","cdrom","xml"]}, + "vpci":pci_schema, + "imageRef":id_schema, + "xml":xml_text_schema, + "dev":nameshort_schema + }, + "additionalProperties": False, + "required": ["type"] + } + }, + "numas":{ + "type": "array", + "items":{ + "type": "object", + "properties":{ + "memory":integer1_schema, + "cores":integer1_schema, + "paired-threads":integer1_schema, + "threads":integer1_schema, + "cores-id":{"type":"array","items":integer0_schema}, + "paired-threads-id":{"type":"array","items":{"type":"array","minItems":2,"maxItems":2,"items":integer0_schema}}, + "threads-id":{"type":"array","items":integer0_schema}, + "interfaces":interfaces_schema + }, + "additionalProperties": False, + "minProperties": 1, + #"required": ["memory"] + } + } + }, + #"additionalProperties": False, + #"required": ["processor_ranking"] +} + +host_data_schema={ + "title":"hosts manual insertion information schema", + "type":"object", + "properties":{ + "ip_name":nameshort_schema, + "name": name_schema, + "description":description_schema, + "user":nameshort_schema, + "password":nameshort_schema, + "features":description_schema, + "ranking":integer0_schema, + "devices":{ + "type": "array", + "items":{ + "type": "object", + "properties":{ + "type":{"type":"string", "enum":["usb","disk"]}, + "vpci":pci_schema + }, + "additionalProperties": False, + "required": ["type"] + } + }, + "numas":{ + "type": "array", + "minItems":1, + "items":{ + "type": "object", + "properties":{ + "admin_state_up":{"type":"boolean"}, + "hugepages":integer1_schema, + "cores":{ + "type": "array", + "minItems":2, + "items":{ + "type": "object", + "properties":{ + "core_id":integer0_schema, + "thread_id":integer0_schema, + "status": {"type":"string", "enum":["noteligible"]} + }, + "additionalProperties": False, + "required": ["core_id","thread_id"] + } + }, + "interfaces":{ + "type": "array", + "minItems":1, + "items":{ + "type": "object", + "properties":{ + "source_name":nameshort_schema, + "mac":mac_schema, + "Mbps":integer0_schema, + "pci":pci_schema, + "sriovs":{ + "type": "array", + "minItems":1, + "items":{ + "type": "object", + "properties":{ + "source_name":{"oneOf":[integer0_schema, nameshort_schema]}, + "mac":mac_schema, + "vlan":integer0_schema, + "pci":pci_schema, + }, + "additionalProperties": False, + "required": ["source_name","mac","pci"] + } + }, + "switch_port": nameshort_schema, + "switch_dpid": nameshort_schema, + }, + "additionalProperties": False, + "required": ["source_name","mac","Mbps","pci"] + } + }, + "numa_socket":integer0_schema, + "memory":integer1_schema + }, + "additionalProperties": False, + "required": ["hugepages","cores","numa_socket"] + } + } + }, + "additionalProperties": False, + "required": ["ranking", "numas","ip_name","user"] +} + +host_edit_schema={ + "title":"hosts creation information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "host":{ + "type":"object", + "properties":{ + "ip_name":nameshort_schema, + "name": name_schema, + "description":description_schema, + "user":nameshort_schema, + "password":nameshort_schema, + "admin_state_up":{"type":"boolean"}, + "numas":{ + "type":"array", + "items":{ + "type": "object", + "properties":{ + "numa_socket": integer0_schema, + "admin_state_up":{"type":"boolean"}, + "interfaces":{ + "type":"array", + "items":{ + "type": "object", + "properties":{ + "source_name": nameshort_schema, + "switch_dpid": nameshort_schema, + "switch_port": nameshort_schema, + }, + "required": ["source_name"], + } + } + }, + "required": ["numa_socket"], + "additionalProperties": False, + } + } + }, + "minProperties": 1, + "additionalProperties": False + }, + }, + "required": ["host"], + "minProperties": 1, + "additionalProperties": False +} + +host_new_schema = { + "title":"hosts creation information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "host":{ + "type":"object", + "properties":{ + "id":id_schema, + "ip_name":nameshort_schema, + "name": name_schema, + "description":description_schema, + "user":nameshort_schema, + "password":nameshort_schema, + "admin_state_up":{"type":"boolean"}, + }, + "required": ["name","ip_name","user"] + }, + "host-data":host_data_schema + }, + "required": ["host"], + "minProperties": 1, + "maxProperties": 2, + "additionalProperties": False +} + + +flavor_new_schema = { + "title":"flavor creation information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "flavor":{ + "type":"object", + "properties":{ + "id":id_schema, + "name":name_schema, + "description":description_schema, + "ram":integer0_schema, + "vcpus":integer0_schema, + "extended": extended_schema, + "public": yes_no_schema + }, + "required": ["name"] + } + }, + "required": ["flavor"], + "additionalProperties": False +} +flavor_update_schema = { + "title":"flavor update information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "flavor":{ + "type":"object", + "properties":{ + "name":name_schema, + "description":description_schema, + "ram":integer0_schema, + "vcpus":integer0_schema, + "extended": extended_schema, + "public": yes_no_schema + }, + "minProperties": 1, + "additionalProperties": False + } + }, + "required": ["flavor"], + "additionalProperties": False +} + +image_new_schema = { + "title":"image creation information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "image":{ + "type":"object", + "properties":{ + "id":id_schema, + "path": {"oneOf": [path_schema, http_schema]}, + "description":description_schema, + "name":name_schema, + "metadata":metadata_schema, + "public": yes_no_schema + }, + "required": ["name","path"] + } + }, + "required": ["image"], + "additionalProperties": False +} + +image_update_schema = { + "title":"image update information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "image":{ + "type":"object", + "properties":{ + "path":{"oneOf": [path_schema, http_schema]}, + "description":description_schema, + "name":name_schema, + "metadata":metadata_schema, + "public": yes_no_schema + }, + "minProperties": 1, + "additionalProperties": False + } + }, + "required": ["image"], + "additionalProperties": False +} + +networks_schema={ + "type":"array", + "items":{ + "type":"object", + "properties":{ + "name":name_schema, + "bandwidth":bandwidth_schema, + "vpci":pci_schema, + "uuid":id_schema, + "mac_address": mac_schema, + "model": {"type":"string", "enum":["virtio","e1000","ne2k_pci","pcnet","rtl8139"]}, + "type": {"type":"string", "enum":["virtual","PF","VF","VFnotShared"]} + }, + "additionalProperties": False, + "required": ["uuid"] + } +} + +server_new_schema = { + "title":"server creation information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "server":{ + "type":"object", + "properties":{ + "id":id_schema, + "name":name_schema, + "description":description_schema, + "start":{"type":"string", "enum":["yes","no","paused"]}, + "hostId":id_schema, + "flavorRef":id_schema, + "imageRef":id_schema, + "extended": extended_schema, + "networks":networks_schema + }, + "required": ["name","flavorRef","imageRef"] + } + }, + "required": ["server"], + "additionalProperties": False +} + +server_action_schema = { + "title":"server action information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "start":{"oneOf":[{"type": "null"}, {"type":"string", "enum":["rebuild","null"] }]}, + "pause":{"type": "null"}, + "resume":{"type": "null"}, + "shutoff":{"type": "null"}, + "shutdown":{"type": "null"}, + "forceOff":{"type": "null"}, + "terminate":{"type": "null"}, + "createImage":{ + "type":"object", + "properties":{ + "path":path_schema, + "description":description_schema, + "name":name_schema, + "metadata":metadata_schema, + "imageRef": id_schema, + "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] }, + }, + "required": ["name"] + }, + "rebuild":{"type": ["object","null"]}, + "reboot":{ + "type": ["object","null"], +# "properties": { +# "type":{"type":"string", "enum":["SOFT"] } +# }, +# "minProperties": 1, +# "maxProperties": 1, +# "additionalProperties": False + } + }, + "minProperties": 1, + "maxProperties": 1, + "additionalProperties": False +} + +network_new_schema = { + "title":"network creation information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "network":{ + "type":"object", + "properties":{ + "id":id_schema, + "name":name_schema, + "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]}, + "shared":{"type":"boolean"}, + "tenant_id":id_schema, + "admin_state_up":{"type":"boolean"}, + "provider:vlan":vlan_schema, + "provider:physical":net_bind_schema, + "cidr":cidr_schema, + "enable_dhcp": {"type":"boolean"}, + "dhcp_first_ip": ip_schema, + "dhcp_last_ip": ip_schema, + "bind_net":name_schema, #can be name, or uuid + "bind_type":{"oneOf":[{"type":"null"},{"type":"string", "pattern":"^vlan:[0-9]{1,4}$"}]} + }, + "required": ["name"] + } + }, + "required": ["network"], + "additionalProperties": False +} +network_update_schema = { + "title":"network update information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "network":{ + "type":"object", + "properties":{ + "name":name_schema, + "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]}, + "shared":{"type":"boolean"}, + "tenant_id":id_schema, + "admin_state_up":{"type":"boolean"}, + "provider:vlan":vlan_schema, + "provider:physical":net_bind_schema, + "cidr":cidr_schema, + "enable_dhcp": {"type":"boolean"}, + "dhcp_first_ip": ip_schema, + "dhcp_last_ip": ip_schema, + "bind_net":name_schema, #can be name, or uuid + "bind_type":{"oneOf":[{"type":"null"},{"type":"string", "pattern":"^vlan:[0-9]{1,4}$"}]} + }, + "minProperties": 1, + "additionalProperties": False + } + }, + "required": ["network"], + "additionalProperties": False +} + + +port_new_schema = { + "title":"port creation information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "port":{ + "type":"object", + "properties":{ + "id":id_schema, + "name":nameshort_schema, + "network_id":{"oneOf":[{"type": "null"}, id_schema ]}, + "tenant_id":id_schema, + "mac_address": {"oneOf":[{"type": "null"}, mac_schema] }, + "admin_state_up":{"type":"boolean"}, + "bandwidth":bandwidth_schema, + "binding:switch_port":nameshort_schema, + "binding:vlan": {"oneOf":[{"type": "null"}, vlan_schema ]} + }, + "required": ["name"] + } + }, + "required": ["port"], + "additionalProperties": False +} + +port_update_schema = { + "title":"port update information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "port":{ + "type":"object", + "properties":{ + "name":nameshort_schema, + "network_id":{"anyOf":[{"type":"null"}, id_schema ] } + }, + "minProperties": 1, + "additionalProperties": False + } + }, + "required": ["port"], + "additionalProperties": False +} + +localinfo_schema = { + "title":"localinfo information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "files":{ "type": "object"}, + "inc_files":{ "type": "object"}, + "server_files":{ "type": "object"} + }, + "required": ["files"] +} + +hostinfo_schema = { + "title":"host information schema", + "$schema": "http://json-schema.org/draft-04/schema#", + "type":"object", + "properties":{ + "iface_names":{ + "type":"object", + "patternProperties":{ + ".":{ "type": "string"} + }, + "minProperties": 1 + } + }, + "required": ["iface_names"] +}