Modify openvim code structure, improve py package 76/1576/8
authormirabal <leonardo.mirabal@altran.com>
Mon, 10 Apr 2017 18:05:40 +0000 (20:05 +0200)
committertierno <alfonso.tiernosepulveda@telefonica.com>
Thu, 20 Apr 2017 15:37:03 +0000 (16:37 +0100)
- Move python code to osm_openvim folder
- Python package install the code in a folder called osm_openvim

Change-Id: I09f1cdac84418a6a34ceaec9a69c2315e10296a7
Signed-off-by: mirabal <leonardo.mirabal@altran.com>
47 files changed:
MANIFEST.in
Makefile
ODL.py [deleted file]
RADclass.py [deleted file]
auxiliary_functions.py [deleted file]
database_utils/migrate_vim_db.sh
definitionsClass.py [deleted file]
dhcp_thread.py [deleted file]
floodlight.py [deleted file]
host_thread.py [deleted file]
httpserver.py [deleted file]
onos.py [deleted file]
openflow
openflow_conn.py [deleted file]
openflow_thread.py [deleted file]
openvimd [new file with mode: 0755]
openvimd.cfg [deleted file]
openvimd.py [deleted file]
osm_openvim/ODL.py [new file with mode: 0644]
osm_openvim/RADclass.py [new file with mode: 0644]
osm_openvim/__init__.py [new file with mode: 0644]
osm_openvim/auxiliary_functions.py [new file with mode: 0644]
osm_openvim/definitionsClass.py [new file with mode: 0644]
osm_openvim/dhcp_thread.py [new file with mode: 0644]
osm_openvim/floodlight.py [new file with mode: 0644]
osm_openvim/host_thread.py [new file with mode: 0644]
osm_openvim/httpserver.py [new file with mode: 0644]
osm_openvim/onos.py [new file with mode: 0644]
osm_openvim/openflow_conn.py [new file with mode: 0644]
osm_openvim/openflow_thread.py [new file with mode: 0644]
osm_openvim/openvimd.cfg [new file with mode: 0644]
osm_openvim/ovim.py [new file with mode: 0755]
osm_openvim/vim_db.py [new file with mode: 0644]
osm_openvim/vim_schema.py [new file with mode: 0644]
ovim.py [deleted file]
scripts/openvim-report [new file with mode: 0755]
scripts/openvim-report.sh [deleted file]
scripts/service-floodlight [new file with mode: 0755]
scripts/service-floodlight.sh [deleted file]
scripts/service-opendaylight [new file with mode: 0755]
scripts/service-opendaylight.sh [deleted file]
scripts/service-openvim [new file with mode: 0755]
scripts/service-openvim.sh [deleted file]
setup.py
setup_lite.py [new file with mode: 0755]
vim_db.py [deleted file]
vim_schema.py [deleted file]

index 8cf96f0..db411b1 100644 (file)
@@ -1,4 +1,5 @@
-include openvimd.cfg
-recursive-include database_utils *
-recursive-include scripts *
+include openflow
+include openvimd
+include openvim
+recursive-include osm_openvim *
 
index 03b2f8b..3f9feff 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,36 +1,69 @@
 #!/usr/bin/env bash
 SHELL := /bin/bash
 
-all: clean build  pip install
-lite: clean build  pip install_lite
+all: clean build pip install
+lite: clean build_lite pip_lite install_lite
+
+clean:
+       rm -rf build
+
+prepare_lite:
+       mkdir -p build
+       cp -r  osm_openvim/ build/lib_osm_openvim
+       rm build/lib_osm_openvim/httpserver.py
+       rm build/lib_osm_openvim/openvimd.cfg
+       cp -r database_utils build/lib_osm_openvim/
+       cp -r scripts build/lib_osm_openvim/
+       cp MANIFEST.in build/
+       cp setup_lite.py build/setup.py
+       cp openflow build/
+       sed -i "s/from osm_openvim/from lib_osm_openvim/g" build/openflow
+       sed -i "s/import osm_openvim/import lib_osm_openvim/g" build/openflow
+       sed -i "s/import osm_openvim; print osm_openvim.__path__[0]/import lib_osm_openvim; print lib_osm_openvim.__path__[0]/g" build/lib_osm_openvim/database_utils/migrate_vim_db.sh
+       sed -i "s/recursive-include osm_openvim */recursive-include lib_osm_openvim */g" build/MANIFEST.in
+       sed '/include openvimd/d' build/MANIFEST.in
+       sed '/include openvim/d' build/MANIFEST.in
 
 prepare:
        mkdir -p build
-       cp *.py build/
+       cp -r osm_openvim/  build/
+       cp -r scripts build/osm_openvim/
+       cp -r database_utils build/osm_openvim/
+       cp -r templates build/osm_openvim/
+       cp -r test build/osm_openvim/
+       cp -r charm build/osm_openvim/
        cp MANIFEST.in build/
-       cp openvimd.py openvimd; cp openvimd build/openvimd
-       cp ovim.py ovim; cp ovim build/ovim
-       cp openvim build/
+       cp setup.py build/
        cp openflow build/
-       cp openvimd.cfg build/
-       cp -r scripts build/
-       cp -r database_utils build/
+       cp openvim build/
+       cp openvimd build/
 
 build: prepare
-       python -m py_compile build/*.py
+       python -m py_compile build/osm_openvim/*.py
 
-clean:
-       rm -rf build
-       rm -rf openvimd ovim
+build_lite: prepare_lite
+       python -m py_compile build/lib_osm_openvim/*.py
+
+#deb:
+#      cd build && python setup.py --command-packages=stdeb.command bdist_deb
+#
+#debianize:
+#      cd build && python setup.py --command-packages=stdeb.command debianize
+
+pip: clean build
+       cd build; ./setup.py sdist
 
-pip:
+pip_lite: clean build_lite
        cd build; ./setup.py sdist
 
-install:
-       cd build/dist; pip  install lib*
+install: clean build
+       cd build/dist; pip  install osm_openvim*
+
+install_lite: clean build_lite
+       cd build/dist; pip  install  lib_osm_openvim-*
+
+
 
-install_lite:
-       cd build/dist; pip  install lib*
 
 
 
diff --git a/ODL.py b/ODL.py
deleted file mode 100644 (file)
index 588409e..0000000
--- a/ODL.py
+++ /dev/null
@@ -1,553 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-"""
-Implement the plugging for OpendayLight openflow controller
-It creates the class OF_conn to create dataplane connections
-with static rules based on packet destination MAC address
-"""
-
-__author__="Pablo Montes, Alfonso Tierno"
-__date__ ="$28-oct-2014 12:07:15$"
-
-
-import json
-import requests
-import base64
-import logging
-import openflow_conn
-
-
-class OF_conn(openflow_conn.OpenflowConn):
-    """OpenDayLight connector. No MAC learning is used"""
-
-    def __init__(self, params):
-        """ Constructor.
-            Params: dictionary with the following keys:
-                of_dpid:     DPID to use for this controller
-                of_ip:       controller IP address
-                of_port:     controller TCP port
-                of_user:     user credentials, can be missing or None
-                of_password: password credentials
-                of_debug:    debug level for logging. Default to ERROR
-                other keys are ignored
-            Raise an exception if same parameter is missing or wrong
-        """
-
-        # check params
-        if "of_ip" not in params or params["of_ip"]==None or "of_port" not in params or params["of_port"]==None:
-            raise ValueError("IP address and port must be provided")
-
-        openflow_conn.OpenflowConn.__init__(self, params)
-        # internal variables
-        self.name = "OpenDayLight"
-        self.headers = {'content-type': 'application/json', 'Accept': 'application/json'}
-        self.auth=None
-        self.pp2ofi={}  # From Physical Port to OpenFlow Index
-        self.ofi2pp={}  # From OpenFlow Index to Physical Port
-
-        self.dpid = str(params["of_dpid"])
-        self.id = 'openflow:'+str(int(self.dpid.replace(':', ''), 16))
-        self.url = "http://%s:%s" %( str(params["of_ip"]), str(params["of_port"] ) )
-        if "of_user" in params and params["of_user"]!=None:
-            if not params.get("of_password"):
-                of_password=""
-            else:
-                of_password=str(params["of_password"])
-            self.auth = base64.b64encode(str(params["of_user"])+":"+of_password)
-            self.headers['Authorization'] = 'Basic '+self.auth
-
-        self.logger = logging.getLogger('vim.OF.ODL')
-        self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR")) )
-
-    def get_of_switches(self):
-        """
-        Obtain a a list of switches or DPID detected by this controller
-        :return: list length, and a list where each element a tuple pair (DPID, IP address)
-                 Raise an OpenflowconnConnectionException exception if fails with text_error
-        """
-        try:
-            of_response = requests.get(self.url+"/restconf/operational/opendaylight-inventory:nodes",
-                                       headers=self.headers)
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-            if of_response.status_code != 200:
-                self.logger.warning("get_of_switches " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Error get_of_switches " + error_text)
-
-            self.logger.debug("get_of_switches " + error_text)
-            info = of_response.json()
-
-            if type(info) != dict:
-                self.logger.error("get_of_switches. Unexpected response, not a dict: %s", str(info))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, not a dict. Wrong version?")
-
-            nodes = info.get('nodes')
-            if type(nodes) is not dict:
-                self.logger.error("get_of_switches. Unexpected response at 'nodes', not found or not a dict: %s", str(type(info)))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'nodes', not found or "
-                                                                   "not a dict. Wrong version?")
-
-            node_list = nodes.get('node')
-            if type(node_list) is not list:
-                self.logger.error("get_of_switches. Unexpected response, at 'nodes':'node', "
-                                  "not found or not a list: %s", str(type(node_list)))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, at 'nodes':'node', not found "
-                                                                   "or not a list. Wrong version?")
-
-            switch_list=[]
-            for node in node_list:
-                node_id = node.get('id')
-                if node_id is None:
-                    self.logger.error("get_of_switches. Unexpected response at 'nodes':'node'[]:'id', not found: %s", str(node))
-                    raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'id', "
-                                                                       "not found . Wrong version?")
-
-                if node_id == 'controller-config':
-                    continue
-
-                node_ip_address = node.get('flow-node-inventory:ip-address')
-                if node_ip_address is None:
-                    self.logger.error("get_of_switches. Unexpected response at 'nodes':'node'[]:'flow-node-inventory:"
-                                      "ip-address', not found: %s", str(node))
-                    raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:"
-                                                                       "'flow-node-inventory:ip-address', "
-                                                                       "not found. Wrong version?")
-
-                node_id_hex=hex(int(node_id.split(':')[1])).split('x')[1].zfill(16)
-                switch_list.append( (':'.join(a+b for a,b in zip(node_id_hex[::2], node_id_hex[1::2])), node_ip_address))
-
-            return len(switch_list), switch_list
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("get_of_switches " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-        except ValueError as e:
-            # ValueError in the case that JSON can not be decoded
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("get_of_switches " + error_text)
-            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-    def obtain_port_correspondence(self):
-        """
-        Obtain the correspondence between physical and openflow port names
-        :return: dictionary: with physical name as key, openflow name as value,
-                 Raise a OpenflowconnConnectionException expection in case of failure
-        """
-        try:
-            of_response = requests.get(self.url+"/restconf/operational/opendaylight-inventory:nodes",
-                                       headers=self.headers)
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-            if of_response.status_code != 200:
-                self.logger.warning("obtain_port_correspondence " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-            self.logger.debug("obtain_port_correspondence " + error_text)
-            info = of_response.json()
-
-            if type(info) != dict:
-                self.logger.error("obtain_port_correspondence. Unexpected response not a dict: %s", str(info))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected openflow response, not a dict. "
-                                                                   "Wrong version?")
-
-            nodes = info.get('nodes')
-            if type(nodes) is not dict:
-                self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes', "
-                                  "not found or not a dict: %s", str(type(nodes)))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'nodes',not found or not a dict. Wrong version?")
-
-            node_list = nodes.get('node')
-            if type(node_list) is not list:
-                self.logger.error("obtain_port_correspondence. Unexpected response, at 'nodes':'node', "
-                                  "not found or not a list: %s", str(type(node_list)))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, at 'nodes':'node', "
-                                                                   "not found or not a list. Wrong version?")
-
-            for node in node_list:
-                node_id = node.get('id')
-                if node_id is None:
-                    self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:'id', "
-                                      "not found: %s", str(node))
-                    raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'id', "
-                                                                       "not found . Wrong version?")
-
-                if node_id == 'controller-config':
-                    continue
-
-                # Figure out if this is the appropriate switch. The 'id' is 'openflow:' plus the decimal value
-                # of the dpid
-                #  In case this is not the desired switch, continue
-                if self.id != node_id:
-                    continue
-
-                node_connector_list = node.get('node-connector')
-                if type(node_connector_list) is not list:
-                    self.logger.error("obtain_port_correspondence. Unexpected response at "
-                                      "'nodes':'node'[]:'node-connector', not found or not a list: %s", str(node))
-                    raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:"
-                                                                       "'node-connector', not found  or not a list. "
-                                                                       "Wrong version?")
-
-                for node_connector in node_connector_list:
-                    self.pp2ofi[ str(node_connector['flow-node-inventory:name']) ] = str(node_connector['id'] )
-                    self.ofi2pp[ node_connector['id'] ] =  str(node_connector['flow-node-inventory:name'])
-
-                node_ip_address = node.get('flow-node-inventory:ip-address')
-                if node_ip_address is None:
-                    self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:"
-                                      "'flow-node-inventory:ip-address', not found: %s", str(node))
-                    raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:"
-                                                                       "'flow-node-inventory:ip-address', not found. Wrong version?")
-                self.ip_address = node_ip_address
-
-                # If we found the appropriate dpid no need to continue in the for loop
-                break
-
-            # print self.name, ": obtain_port_correspondence ports:", self.pp2ofi
-            return self.pp2ofi
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("obtain_port_correspondence " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-        except ValueError as e:
-            # ValueError in the case that JSON can not be decoded
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("obtain_port_correspondence " + error_text)
-            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-    def get_of_rules(self, translate_of_ports=True):
-        """
-        Obtain the rules inserted at openflow controller
-        :param translate_of_ports:
-        :return: dict if ok: with the rule name as key and value is another dictionary with the following content:
-                    priority: rule priority
-                    name:         rule name (present also as the master dict key)
-                    ingress_port: match input port of the rule
-                    dst_mac:      match destination mac address of the rule, can be missing or None if not apply
-                    vlan_id:      match vlan tag of the rule, can be missing or None if not apply
-                    actions:      list of actions, composed by a pair tuples:
-                        (vlan, None/int): for stripping/setting a vlan tag
-                        (out, port):      send to this port
-                    switch:       DPID, all
-                    Raise a OpenflowconnConnectionException expection in case of failure
-
-        """
-
-        try:
-            # get rules
-            if len(self.ofi2pp) == 0:
-                self.obtain_port_correspondence()
-
-            of_response = requests.get(self.url+"/restconf/config/opendaylight-inventory:nodes/node/" + self.id +
-                                          "/table/0", headers=self.headers)
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-
-            # The configured page does not exist if there are no rules installed. In that case we return an empty dict
-            if of_response.status_code == 404:
-                return {}
-
-            elif of_response.status_code != 200:
-                self.logger.warning("get_of_rules " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-            self.logger.debug("get_of_rules " + error_text)
-
-            info = of_response.json()
-
-            if type(info) != dict:
-                self.logger.error("get_of_rules. Unexpected response not a dict: %s", str(info))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected openflow response, not a dict. "
-                                                                   "Wrong version?")
-
-            table = info.get('flow-node-inventory:table')
-            if type(table) is not list:
-                self.logger.error("get_of_rules. Unexpected response at 'flow-node-inventory:table', "
-                                  "not a list: %s", str(type(table)))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'flow-node-inventory:table',"
-                                                                   " not a list. Wrong version?")
-
-            flow_list = table[0].get('flow')
-            if flow_list is None:
-                return {}
-
-            if type(flow_list) is not list:
-                self.logger.error("get_of_rules. Unexpected response at 'flow-node-inventory:table'[0]:'flow', not a list: %s", str(type(flow_list)))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'flow-node-inventory:"
-                                                                   "table'[0]:'flow', not a list. Wrong version?")
-
-            # TODO translate ports according to translate_of_ports parameter
-
-            rules = dict()
-            for flow in flow_list:
-                if not ('id' in flow and 'match' in flow and 'instructions' in flow and
-                                'instruction' in flow['instructions'] and
-                                'apply-actions' in flow['instructions']['instruction'][0] and
-                                'action' in flow['instructions']['instruction'][0]['apply-actions']):
-                    raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow response, one or more "
-                                                                       "elements are missing. Wrong version?")
-
-                flow['instructions']['instruction'][0]['apply-actions']['action']
-
-                rule = dict()
-                rule['switch'] = self.dpid
-                rule['priority'] = flow.get('priority')
-                # rule['name'] = flow['id']
-                # rule['cookie'] = flow['cookie']
-                if 'in-port' in flow['match']:
-                    in_port = flow['match']['in-port']
-                    if not in_port in self.ofi2pp:
-                        raise openflow_conn.OpenflowconnUnexpectedResponse("Error: Ingress port " + in_port +
-                                                                           " is not in switch port list")
-
-                    if translate_of_ports:
-                        in_port = self.ofi2pp[in_port]
-
-                    rule['ingress_port'] = in_port
-
-                    if 'vlan-match' in flow['match'] and 'vlan-id' in flow['match']['vlan-match'] and \
-                                'vlan-id' in flow['match']['vlan-match']['vlan-id'] and \
-                                'vlan-id-present' in flow['match']['vlan-match']['vlan-id'] and \
-                                flow['match']['vlan-match']['vlan-id']['vlan-id-present'] == True:
-                        rule['vlan_id'] = flow['match']['vlan-match']['vlan-id']['vlan-id']
-
-                    if 'ethernet-match' in flow['match'] and 'ethernet-destination' in flow['match']['ethernet-match'] and \
-                        'address' in flow['match']['ethernet-match']['ethernet-destination']:
-                        rule['dst_mac'] = flow['match']['ethernet-match']['ethernet-destination']['address']
-
-                instructions=flow['instructions']['instruction'][0]['apply-actions']['action']
-
-                max_index=0
-                for instruction in instructions:
-                    if instruction['order'] > max_index:
-                        max_index = instruction['order']
-
-                actions=[None]*(max_index+1)
-                for instruction in instructions:
-                    if 'output-action' in instruction:
-                        if not 'output-node-connector' in instruction['output-action']:
-                            raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow response, one or "
-                                                                               "more elementa are missing. "
-                                                                               "Wrong version?")
-
-                        out_port = instruction['output-action']['output-node-connector']
-                        if not out_port in self.ofi2pp:
-                            raise openflow_conn.OpenflowconnUnexpectedResponse("Error: Output port " + out_port +
-                                                                               " is not in switch port list")
-
-                        if translate_of_ports:
-                            out_port = self.ofi2pp[out_port]
-
-                        actions[instruction['order']] = ('out',out_port)
-
-                    elif 'strip-vlan-action' in instruction:
-                        actions[instruction['order']] = ('vlan', None)
-
-                    elif 'set-field' in instruction:
-                        if not ('vlan-match' in instruction['set-field'] and 'vlan-id' in  instruction['set-field']['vlan-match'] and 'vlan-id' in instruction['set-field']['vlan-match']['vlan-id']):
-                            raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow response, one or "
-                                                                               "more elements are missing. "
-                                                                               "Wrong version?")
-
-                        actions[instruction['order']] = ('vlan', instruction['set-field']['vlan-match']['vlan-id']['vlan-id'])
-
-                actions = [x for x in actions if x != None]
-
-                rule['actions'] = list(actions)
-                rules[flow['id']] = dict(rule)
-
-                #flow['id']
-                #flow['priority']
-                #flow['cookie']
-                #flow['match']['in-port']
-                #flow['match']['vlan-match']['vlan-id']['vlan-id']
-                # match -> in-port
-                #      -> vlan-match -> vlan-id -> vlan-id
-                #flow['match']['vlan-match']['vlan-id']['vlan-id-present']
-                #TODO we asume that is not using rules with vlan-id-present:false
-                #instructions -> instruction -> apply-actions -> action
-                #instructions=flow['instructions']['instruction'][0]['apply-actions']['action']
-                #Es una lista. Posibles elementos:
-                #max_index=0
-                #for instruction in instructions:
-                #  if instruction['order'] > max_index:
-                #    max_index = instruction['order']
-                #actions=[None]*(max_index+1)
-                #for instruction in instructions:
-                #   if 'output-action' in instruction:
-                #     actions[instruction['order']] = ('out',instruction['output-action']['output-node-connector'])
-                #   elif 'strip-vlan-action' in instruction:
-                #     actions[instruction['order']] = ('vlan', None)
-                #   elif 'set-field' in instruction:
-                #     actions[instruction['order']] = ('vlan', instruction['set-field']['vlan-match']['vlan-id']['vlan-id'])
-                #
-                #actions = [x for x in actions if x != None]
-                #                                                       -> output-action -> output-node-connector
-                #                                                       -> pop-vlan-action
-            return rules
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("get_of_rules " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-        except ValueError as e:
-            # ValueError in the case that JSON can not be decoded
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("get_of_rules " + error_text)
-            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-    def del_flow(self, flow_name):
-        """
-        Delete an existing rule
-        :param flow_name: flow_name, this is the rule name
-        :return: Raise a OpenflowconnConnectionException expection in case of failure
-        """
-
-        try:
-            of_response = requests.delete(self.url+"/restconf/config/opendaylight-inventory:nodes/node/" + self.id +
-                                          "/table/0/flow/"+flow_name, headers=self.headers)
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-            if of_response.status_code != 200:
-                self.logger.warning("del_flow " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-            self.logger.debug("del_flow OK " + error_text)
-            return None
-        except requests.exceptions.RequestException as e:
-            # raise an exception in case of contection error
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("del_flow " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-
-    def new_flow(self, data):
-        """
-        Insert a new static rule
-        :param data: dictionary with the following content:
-                priority:     rule priority
-                name:         rule name
-                ingress_port: match input port of the rule
-                dst_mac:      match destination mac address of the rule, missing or None if not apply
-                vlan_id:      match vlan tag of the rule, missing or None if not apply
-                actions:      list of actions, composed by a pair tuples with these posibilities:
-                    ('vlan', None/int): for stripping/setting a vlan tag
-                    ('out', port):      send to this port
-        :return: Raise a OpenflowconnConnectionException expection in case of failure
-        """
-
-        try:
-
-            if len(self.pp2ofi) == 0:
-                self.obtain_port_correspondence()
-
-            # We have to build the data for the opendaylight call from the generic data
-            sdata = dict()
-            sdata['flow-node-inventory:flow'] = list()
-            sdata['flow-node-inventory:flow'].append(dict())
-            flow = sdata['flow-node-inventory:flow'][0]
-            flow['id'] = data['name']
-            flow['flow-name'] = data['name']
-            flow['idle-timeout'] = 0
-            flow['hard-timeout'] = 0
-            flow['table_id'] = 0
-            flow['priority'] = data.get('priority')
-            flow['match'] = dict()
-            if not data['ingress_port'] in self.pp2ofi:
-                error_text = 'Error. Port '+data['ingress_port']+' is not present in the switch'
-                self.logger.warning("new_flow " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-            flow['match']['in-port'] = self.pp2ofi[data['ingress_port']]
-            if 'dst_mac' in data:
-                flow['match']['ethernet-match'] = dict()
-                flow['match']['ethernet-match']['ethernet-destination'] = dict()
-                flow['match']['ethernet-match']['ethernet-destination']['address'] = data['dst_mac']
-            if data.get('vlan_id'):
-                flow['match']['vlan-match'] = dict()
-                flow['match']['vlan-match']['vlan-id'] = dict()
-                flow['match']['vlan-match']['vlan-id']['vlan-id-present'] = True
-                flow['match']['vlan-match']['vlan-id']['vlan-id'] = int(data['vlan_id'])
-            flow['instructions'] = dict()
-            flow['instructions']['instruction'] = list()
-            flow['instructions']['instruction'].append(dict())
-            flow['instructions']['instruction'][0]['order'] = 1
-            flow['instructions']['instruction'][0]['apply-actions'] = dict()
-            flow['instructions']['instruction'][0]['apply-actions']['action'] = list()
-            actions = flow['instructions']['instruction'][0]['apply-actions']['action']
-
-            order = 0
-            for action in data['actions']:
-                new_action = { 'order': order }
-                if  action[0] == "vlan":
-                    if action[1] == None:
-                        # strip vlan
-                        new_action['strip-vlan-action'] = dict()
-                    else:
-                        new_action['set-field'] = dict()
-                        new_action['set-field']['vlan-match'] = dict()
-                        new_action['set-field']['vlan-match']['vlan-id'] = dict()
-                        new_action['set-field']['vlan-match']['vlan-id']['vlan-id-present'] = True
-                        new_action['set-field']['vlan-match']['vlan-id']['vlan-id'] = int(action[1])
-                elif action[0] == 'out':
-                    new_action['output-action'] = dict()
-                    if not action[1] in self.pp2ofi:
-                        error_msj = 'Port '+action[1]+' is not present in the switch'
-                        raise openflow_conn.OpenflowconnUnexpectedResponse(error_msj)
-
-                    new_action['output-action']['output-node-connector'] = self.pp2ofi[ action[1] ]
-                else:
-                    error_msj = "Unknown item '%s' in action list" % action[0]
-                    self.logger.error("new_flow " + error_msj)
-                    raise openflow_conn.OpenflowconnUnexpectedResponse(error_msj)
-
-                actions.append(new_action)
-                order += 1
-
-            # print json.dumps(sdata)
-            of_response = requests.put(self.url+"/restconf/config/opendaylight-inventory:nodes/node/" + self.id +
-                          "/table/0/flow/" + data['name'],
-                                headers=self.headers, data=json.dumps(sdata) )
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-            if of_response.status_code != 200:
-                self.logger.warning("new_flow " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-            self.logger.debug("new_flow OK " + error_text)
-            return None
-
-        except requests.exceptions.RequestException as e:
-            # raise an exception in case of contection error
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("new_flow " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-
-    def clear_all_flows(self):
-        """
-        Delete all existing rules
-        :return: Raise a OpenflowconnConnectionException expection in case of failure
-        """
-        try:
-            of_response = requests.delete(self.url+"/restconf/config/opendaylight-inventory:nodes/node/" + self.id +
-                                      "/table/0", headers=self.headers)
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-            if of_response.status_code != 200 and of_response.status_code != 404: #HTTP_Not_Found
-                self.logger.warning("clear_all_flows " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-            self.logger.debug("clear_all_flows OK " + error_text)
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("clear_all_flows " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
diff --git a/RADclass.py b/RADclass.py
deleted file mode 100644 (file)
index a4c10ec..0000000
+++ /dev/null
@@ -1,1618 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-Implement the logic for obtaining compute nodes information 
-Resource Availability Descriptor 
-'''
-__author__="Pablo Montes"
-
-#TODO: remove warnings, remove unused things 
-
-from definitionsClass import definitionsClass
-from auxiliary_functions import get_ssh_connection
-import libvirt
-from xml.etree import ElementTree
-import paramiko 
-import re
-import yaml
-
-
-def getCredentials(creds, data):
-    """Used as a backup for libvirt.openAuth in order to provide password that came with data,
-    not used by the moment
-    """
-    print "RADclass:getCredentials", creds, data
-    for cred in creds:
-        print cred[1] + ": ",
-        if cred[0] == libvirt.VIR_CRED_AUTHNAME:
-            cred[4] = data
-        elif cred[0] == libvirt.VIR_CRED_PASSPHRASE:
-            cred[4] = data
-        else:
-            return -1
-    return 0
-
-class RADclass():
-    def __init__(self):
-        self.name = None
-        self.machine = None
-        self.user = None
-        self.password = None
-        self.nodes = dict()                 #Dictionary of nodes. Keys are the node id, values are Node() elements
-        self.nr_processors = None           #Integer. Number of processors in the system 
-        self.processor_family = None        #If all nodes have the same value equal them, otherwise keep as None
-        self.processor_manufacturer = None  #If all nodes have the same value equal them, otherwise keep as None
-        self.processor_version = None       #If all nodes have the same value equal them, otherwise keep as None
-        self.processor_features = None      #If all nodes have the same value equal them, otherwise keep as None
-        self.memory_type = None             #If all nodes have the same value equal them, otherwise keep as None
-        self.memory_freq = None             #If all nodes have the same value equal them, otherwise keep as None
-        self.memory_nr_channels = None      #If all nodes have the same value equal them, otherwise keep as None
-        self.memory_size = None             #Integer. Sum of the memory in all nodes
-        self.memory_hugepage_sz = None
-        self.hypervisor = Hypervisor()      #Hypervisor information
-        self.os = OpSys()                   #Operating system information
-        self.ports_list = list()            #List containing all network ports in the node. This is used to avoid having defined multiple times the same port in the system
-    
-    
-    def obtain_RAD(self, user, password, machine):
-        """This function obtains the RAD information from the remote server.
-        It uses both a ssh and a libvirt connection. 
-        It is desirable in future versions get rid of the ssh connection, but currently 
-        libvirt does not provide all the needed information. 
-        Returns (True, Warning) in case of success and (False, <error>) in case of error"""
-        warning_text=""
-        try:
-            #Get virsh and ssh connection
-            (return_status, code) = get_ssh_connection(machine, user, password)
-            if not return_status:
-                print 'RADclass.obtain_RAD() error:', code
-                return (return_status, code)
-            ssh_conn = code
-            
-            self.connection_IP = machine
-            #print "libvirt open pre"
-            virsh_conn=libvirt.open("qemu+ssh://"+user+'@'+machine+"/system")
-            #virsh_conn=libvirt.openAuth("qemu+ssh://"+user+'@'+machine+"/system", 
-            #        [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE, libvirt.VIR_CRED_USERNAME], getCredentials, password],
-            #        0)
-            #print "libvirt open after"
-            
-    #         #Set connection infomation
-    #         (return_status, code) = self.set_connection_info(machine, user, password)
-    #         if not return_status:
-    #             return (return_status, 'Error in '+machine+': '+code)
-            
-            #Set server name
-            machine_name = get_hostname(virsh_conn)
-            (return_status, code) = self.set_name(machine_name)
-            if not return_status:
-                return (return_status, 'Error at self.set_name in '+machine+': '+code)
-            warning_text += code
-            
-            #Get the server processors information
-            processors = dict()
-            (return_status, code) = get_processor_information(ssh_conn, virsh_conn, processors)
-            if not return_status:
-                return (return_status, 'Error at get_processor_information in '+machine+': '+code)
-            warning_text += code
-            
-            #Get the server memory information
-            memory_nodes = dict()
-            (return_status, code) = get_memory_information(ssh_conn, virsh_conn, memory_nodes)
-            if not return_status:
-                return (return_status, 'Error at get_memory_information in '+machine+': '+code)
-            warning_text += code
-            
-            #Get nics information
-            nic_topology = dict()
-    #         (return_status, code) = get_nic_information_old(ssh_conn, nic_topology)
-            (return_status, code) = get_nic_information(ssh_conn, virsh_conn, nic_topology)
-            if not return_status:
-                return (return_status, 'Error at get_nic_information in '+machine+': '+code)
-            warning_text += code
-            
-            #Pack each processor, memory node  and nics in a node element
-            #and add the node to the RAD element
-            for socket_id, processor in processors.iteritems():
-                node = Node()
-                if not socket_id in nic_topology:
-                    nic_topology[socket_id] = list()
-                    
-                (return_status, code) = node.set(processor, memory_nodes[socket_id], nic_topology[socket_id])
-    #             else:
-    #                 (return_status, code) = node.set(processor, memory_nodes[socket_id])
-                if not return_status:
-                    return (return_status, 'Error at node.set in '+machine+': '+code)
-                warning_text += code
-                (return_status, code) = self.insert_node(node)
-                if not return_status:
-                    return (return_status, 'Error at self.insert_node in '+machine+': '+code)
-                if code not in warning_text:
-                    warning_text += code
-            
-            #Fill os data
-            os = OpSys()
-            (return_status, code) = get_os_information(ssh_conn, os)
-            if not return_status:
-                return (return_status, 'Error at get_os_information in '+machine+': '+code)
-            warning_text += code
-            (return_status, code) = self.set_os(os)
-            if not return_status:
-                return (return_status, 'Error at self.set_os in '+machine+': '+code)
-            warning_text += code
-            
-            #Fill hypervisor data
-            hypervisor = Hypervisor()
-            (return_status, code) = get_hypervisor_information(virsh_conn, hypervisor)
-            if not return_status:
-                return (return_status, 'Error at get_hypervisor_information in '+machine+': '+code)
-            warning_text += code
-            (return_status, code) = self.set_hypervisor(hypervisor)
-            if not return_status:
-                return (return_status, 'Error at self.set_hypervisor in '+machine+': '+code)
-            warning_text += code
-            ssh_conn.close()
-                
-            return (True, warning_text)
-        except libvirt.libvirtError, e:
-            text = e.get_error_message()
-            print 'RADclass.obtain_RAD() exception:', text
-            return (False, text)
-        except paramiko.ssh_exception.SSHException, e:
-            text = e.args[0]
-            print  "obtain_RAD ssh Exception:", text
-            return False, text
-
-    def set_name(self,name):
-        """Sets the machine name. 
-        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-        if not isinstance(name,str):
-            return (False, 'The variable \'name\' must be text')
-        self.name = name
-        return (True, "")
-    
-    def set_connection_info(self, machine, user, password):
-        """Sets the connection information. 
-        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-        if not isinstance(machine,str):
-            return (False, 'The variable \'machine\' must be text')
-        if not isinstance(user,str):
-            return (False, 'The variable \'user\' must be text')
-#         if not isinstance(password,str):
-#             return (False, 'The variable \'password\' must be text')
-        (self.machine, self.user, self.password) = (machine, user, password)
-        return (True, "")
-        
-    def insert_node(self,node):
-        """Inserts a new node and updates class variables. 
-        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-        if not isinstance(node,Node):
-            return (False, 'The variable \'node\' must be a Node element')
-        
-        if node.id_ in self.nodes:
-            return (False, 'The node is already present in the nodes list.')
-        
-        #Check if network ports have not been inserted previously as part of another node
-        for port_key in node.ports_list:
-            if port_key in self.ports_list:
-                return (False, 'Network port '+port_key+' defined multiple times in the system')
-            self.ports_list.append(port_key)
-        
-        #Insert the new node
-        self.nodes[node.id_] = node
-        
-        #update variables
-        self.update_variables()
-        
-        return (True, "")
-    
-    def update_variables(self):
-        """Updates class variables. 
-        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-        warning_text=""
-        #The number of processors and nodes is the same
-        self.nr_processors = len(self.nodes)
-        
-        #If all processors are the same get the values. Otherwise keep them as none
-        prev_processor_family = prev_processor_manufacturer = prev_processor_version = prev_processor_features = None
-        different_processor_family = different_processor_manufacturer = different_processor_version = different_processor_features = False
-        for node in self.nodes.itervalues():
-            (self.processor_family, self.processor_manufacturer, self.processor_version, self.processor_features) = node.get_processor_info()
-            if prev_processor_family != None and self.processor_family != prev_processor_family:
-                different_processor_family = True
-            if prev_processor_manufacturer != None and self.processor_manufacturer != prev_processor_manufacturer:
-                different_processor_manufacturer = True
-            if prev_processor_version != None and self.processor_version != prev_processor_version:
-                different_processor_version = True
-            if prev_processor_features != None and self.processor_features != prev_processor_features:
-                different_processor_features = True
-            (prev_processor_family, prev_processor_manufacturer, prev_processor_version, prev_processor_features) = (self.processor_family, self.processor_manufacturer, self.processor_version, self.processor_features)
-
-        if different_processor_family:
-            self.processor_family = None
-        if different_processor_features:
-            self.processor_features = None
-        if different_processor_manufacturer:
-            self.processor_manufacturer = None
-        if different_processor_version:
-            self.processor_version = None
-            
-        #If all memory nodes are the same get the values. Otherwise keep them as none
-        #Sum the total memory
-        self.memory_size = 0
-        different_memory_freq = different_memory_nr_channels = different_memory_type = different_memory_hugepage_sz = False
-        prev_memory_freq = prev_memory_nr_channels = prev_memory_type = prev_memory_hugepage_sz = None
-        for node in self.nodes.itervalues():
-            (self.memory_freq, self.memory_nr_channels, self.memory_type, memory_size, self.memory_hugepage_sz) = node.get_memory_info()
-            self.memory_size += memory_size 
-            if prev_memory_freq != None and self.memory_freq != prev_memory_freq:
-                different_memory_freq = True
-            if prev_memory_nr_channels != None and self.memory_nr_channels != prev_memory_nr_channels:
-                different_memory_nr_channels = True
-            if prev_memory_type != None and self.memory_type != prev_memory_type:
-                different_memory_type = True
-            if prev_memory_hugepage_sz != None and self.memory_hugepage_sz != prev_memory_hugepage_sz:
-                different_memory_hugepage_sz = True
-            (prev_memory_freq, prev_memory_nr_channels, prev_memory_type, prev_memory_hugepage_sz) = (self.memory_freq, self.memory_nr_channels, self.memory_type, self.memory_hugepage_sz)
-            
-        if different_memory_freq:
-            self.memory_freq = None
-        if different_memory_nr_channels:
-            self.memory_nr_channels = None
-        if different_memory_type:
-            self.memory_type = None
-        if different_memory_hugepage_sz:
-            warning_text += 'Detected different hugepages size in different sockets\n'
-            
-        return (True, warning_text)
-        
-    def set_hypervisor(self,hypervisor):
-        """Sets the hypervisor. 
-        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-        if not isinstance(hypervisor,Hypervisor):
-            return (False, 'The variable \'hypervisor\' must be of class Hypervisor')
-        
-        self.hypervisor.assign(hypervisor) 
-        return (True, "")
-    
-    def set_os(self,os):
-        """Sets the operating system. 
-        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-        if not isinstance(os,OpSys):
-            return (False, 'The variable \'os\' must be of class OpSys')
-        
-        self.os.assign(os)
-        return (True, "")
-    
-    def to_text(self):
-        text= 'name: '+str(self.name)+'\n'
-        text+= 'processor:\n'
-        text+= '    nr_processors: '+str(self.nr_processors)+'\n' 
-        text+= '    family: '+str(self.processor_family)+'\n'
-        text+= '    manufacturer: '+str(self.processor_manufacturer)+'\n'
-        text+= '    version: '+str(self.processor_version)+'\n'
-        text+= '    features: '+str(self.processor_features)+'\n'
-        text+= 'memory:\n'
-        text+= '    type: '+str(self.memory_type)+'\n'
-        text+= '    freq: '+str(self.memory_freq)+'\n'
-        text+= '    nr_channels: '+str(self.memory_nr_channels)+'\n'
-        text+= '    size: '+str(self.memory_size)+'\n'
-        text+= 'hypervisor:\n'
-        text+= self.hypervisor.to_text()
-        text+= 'os:\n'
-        text+= self.os.to_text()
-        text+= 'resource topology:\n'
-        text+= '    nr_nodes: '+ str(len(self.nodes))+'\n'
-        text+= '    nodes:\n'
-        for node_k, node_v in self.nodes.iteritems():
-            text+= '        node'+str(node_k)+':\n'
-            text+= node_v.to_text()
-        return text
-    
-    def to_yaml(self):
-        return yaml.load(self.to_text())
-    
-class Node():
-    def __init__(self):
-        self.id_ = None                      #Integer. Node id. Unique in the system
-        self.processor = ProcessorNode()    #Information about the processor in the node
-        self.memory = MemoryNode()          #Information about the memory in the node
-        self.nic_list = list()              #List of Nic() containing information about the nics associated to the node
-        self.ports_list = list()            #List containing all network ports in the node. This is used to avoid having defined multiple times the same port in the system
-        
-    def get_processor_info(self):
-        """Gets the processor information. Returns (processor_family, processor_manufacturer, processor_version, processor_features)"""
-        return self.processor.get_info()
-        
-    def get_memory_info(self):
-        """Gets the memory information. Returns (memory_freq, memory_nr_channels, memory_type, memory_size)"""
-        return self.memory.get_info()
-    
-#     def set(self, *args):
-#         """Sets the node information. Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-#         if len(args)==2:
-#             processor = args[0]
-#             memory = args[1]
-#             nics = False
-#         elif len(args)==3:
-#             processor = args[0]
-#             memory = args[1]
-#             nic_list = args[2]
-#             nics = True
-#         else:
-#             return (False, 'Wrong number of elements calling Node().set()')
-
-    def set(self, processor, memory, nic_list):
-        (status, return_code) = self.processor.assign(processor)
-        if not status:
-            return (status, return_code)
-        
-        self.id_ = processor.id_
-        
-        (status, return_code) = self.memory.assign(memory)
-        if not status:
-            return (status, return_code)
-        
-#         if nics:
-        for nic in nic_list:
-            if not isinstance(nic,Nic):
-                return (False, 'The nics must be of type Nic')
-            self.nic_list.append(nic)
-            for port_key in nic.ports.iterkeys():
-                if port_key in self.ports_list:
-                    return (False, 'Network port '+port_key+'defined multiple times in the same node')
-                self.ports_list.append(port_key)
-            
-        return (True,"")
-   
-    def assign(self, node):
-        """Sets the node information. 
-        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-        warning_text=""
-        processor = node.processor
-        memory = node.memory
-        nic_list = node.nic_list
-        (status, return_code) = self.processor.assign(processor)
-        if not status:
-            return (status, return_code)
-        
-        self.id_ = processor.id_
-        
-        (status, return_code) = self.memory.assign(memory)
-        if not status:
-            return (status, return_code)
-        warning_text += code
-        
-        for nic in nic_list:
-            if not isinstance(nic,Nic):
-                return (False, 'The nics must be of type Nic')
-            self.nic_list.append(nic)
-            for port_key in nic.ports.iterkeys():
-                if port_key in self.ports_list:
-                    return (False, 'Network port '+port_key+'defined multiple times in the same node')
-                self.ports_list.append(port_key)
-            
-        return (True,warning_text)
-   
-    def to_text(self):
-        text= '            id: '+str(self.id_)+'\n'
-        text+= '            cpu:\n'
-        text += self.processor.to_text()
-        text+= '            memory:\n'
-        text += self.memory.to_text()
-        if len(self.nic_list) > 0:
-            text+= '            nics:\n'
-            nic_index = 0
-            for nic in self.nic_list:
-                text+= '                nic '+str(nic_index)+':\n'
-                text += nic.to_text()
-                nic_index += 1
-        return text
-    
-class ProcessorNode():
-    #Definition of the possible values of processor variables
-    possible_features = definitionsClass.processor_possible_features
-    possible_manufacturers = definitionsClass.processor_possible_manufacturers
-    possible_families = definitionsClass.processor_possible_families
-    possible_versions = definitionsClass.processor_possible_versions
-    
-    def __init__(self):
-        self.id_ = None              #Integer. Numeric identifier of the socket
-        self.family = None          #Text. Family name of the processor
-        self.manufacturer = None    #Text. Manufacturer of the processor
-        self.version = None         #Text. Model version of the processor
-        self.features = list()      #list. List of features offered by the processor
-        self.cores = list()         #list. List of cores in the processor. In case of hyperthreading the coupled cores are expressed as [a,b]
-        self.eligible_cores = list()#list. List of cores that can be used
-        #self.decicated_cores
-        #self.shared_cores -> this should also contain information to know if cores are being used
-        
-    def assign(self, processor):
-        """Sets the processor information. 
-        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-        if not isinstance(processor,ProcessorNode):
-            return (False, 'The variable \'processor\' must be of class ProcessorNode')
-        
-        self.id_ = processor.id_
-        self.family = processor.family
-        self.manufacturer = processor.manufacturer
-        self.version = processor.version
-        self.features = processor.features
-        self.cores = processor.cores
-        self.eligible_cores = processor.eligible_cores
-        
-        return (True, "")
-    
-    def set(self, id_, family, manufacturer, version, features, cores):
-        """Sets the processor information. 
-        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-        warning_text = ""
-
-        if not isinstance(id_,int):
-            return (False, 'The processor id_ must be of type int')  
-        if not isinstance(family,str):
-            return (False, 'The processor family must be of type str')
-        if not isinstance(manufacturer,str):
-            return (False, 'The processor manufacturer must be of type str')
-        if not isinstance(version,str):
-            return (False, 'The processor version must be of type str')        
-        if not isinstance(features,list):
-            return (False, 'The processor features must be of type list')
-        if not isinstance(cores,list):
-            return (False, 'The processor cores must be of type list')
-        (self.id_, self.family, self.manufacturer, self.version) = (id_, family, manufacturer, version)
-        if not manufacturer in self.possible_manufacturers:
-            warning_text += "processor manufacturer '%s' not among: %s\n" %(manufacturer, str(self.possible_manufacturers))     
-        if not family in self.possible_families:
-            warning_text += "family '%s' not among: %s\n" % (family, str(self.possible_families))
-#        if not version in self.possible_versions:
-#            warning_text += 'The version %s is not one of these: %s\n' % (version, str(self.possible_versions))
-        
-        for feature in features:
-            if not feature in self.possible_features:
-                warning_text += "processor feature '%s' not among: %s\n" % (feature, str(self.possible_versions))
-            self.features.append(feature)
-        
-        for iterator in sorted(cores):
-            if not isinstance(iterator,list) or not all(isinstance(x, int) for x in iterator):
-                return (False, 'The cores list must be in the form of [[a,b],[c,d],...] where a,b,c,d are of type int')
-            self.cores.append(iterator)
-        
-        self.set_eligible_cores()
-        
-        return (True,warning_text)
-           
-    def set_eligible_cores(self):
-        """Set the default eligible cores, this is all cores non used by the host operating system"""
-        not_first = False
-        for iterator in self.cores:
-            if not_first:
-                self.eligible_cores.append(iterator)
-            else:
-                not_first = True                
-        return
-    
-    def get_info(self):
-        """Returns processor parameters (self.family, self.manufacturer, self.version, self.features)"""
-        return (self.family, self.manufacturer, self.version, self.features)
-    
-    def to_text(self):
-        text= '                id: '+str(self.id_)+'\n'
-        text+= '                family: '+self.family+'\n'
-        text+= '                manufacturer: '+self.manufacturer+'\n'
-        text+= '                version: '+self.version+'\n'
-        text+= '                features: '+str(self.features)+'\n'
-        text+= '                cores: '+str(self.cores)+'\n'
-        text+= '                eligible_cores: '+str(self.eligible_cores)+'\n'
-        return text
-    
-class MemoryNode():
-    def __init__(self):
-        self.modules = list()               #List of MemoryModule(). List of all modules installed in the node
-        self.nr_channels = None             #Integer. Number of modules installed in the node
-        self.node_size = None               #Integer. Total size in KiB of memory installed in the node
-        self.eligible_memory = None         #Integer. Size in KiB of eligible memory in the node     
-        self.hugepage_sz = None             #Integer. Size in KiB of hugepages
-        self.hugepage_nr = None             #Integer. Number of hugepages allocated in the module
-        self.eligible_hugepage_nr = None    #Integer. Number of eligible hugepages in the node
-        self.type_ = None                    #Text. Type of memory modules. If modules have a different value keep it as None
-        self.freq = None                    #Integer. Frequency of the modules in MHz. If modules have a different value keep it as None
-        self.module_size = None             #Integer. Size of the modules in KiB. If modules have a different value keep it as None
-        self.form_factor = None             #Text. Form factor of the modules. If modules have a different value keep it as None
-       
-    def assign(self, memory_node):
-        return self.set(memory_node.modules, memory_node.hugepage_sz, memory_node.hugepage_nr)
-         
-    def set(self, modules, hugepage_sz, hugepage_nr):
-        """Set the memory node information. hugepage_sz must be expressed in KiB. 
-        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-        if not isinstance(modules, list):
-            return (False, 'The modules must be a list of elements of class MemoryModule')
-        if not isinstance(hugepage_sz,int):
-            return (False, 'The hugepage_sz variable must be an int expressing the size in KiB')
-        if not isinstance(hugepage_nr,int):
-            return (False, 'The hugepage_nr variable must be of type int')
-        
-        (self.hugepage_sz, self.hugepage_nr) = (hugepage_sz, hugepage_nr)
-        self.node_size = self.nr_channels = 0
-        
-        different_type = different_freq = different_module_size = different_form_factor = False
-        prev_type = prev_freq = prev_module_size = prev_form_factor = None
-        for iterator in modules:
-            if not isinstance(iterator,MemoryModule):
-                return (False, 'The modules must be a list of elements of class MemoryModule')
-            self.modules.append(iterator)
-            (self.type_, self.freq, self.module_size, self.form_factor) = (iterator.type_, iterator.freq, iterator.size, iterator.form_factor)
-            self.node_size += self.module_size
-            self.nr_channels += 1
-            if prev_type != None and prev_type != self.type_:
-                different_type = True
-            if prev_freq != None and prev_freq != self.freq:
-                different_freq = True
-            if prev_module_size != None and prev_module_size != self.module_size:
-                different_module_size = True
-            if prev_form_factor != None and prev_form_factor != self.form_factor:
-                different_form_factor = True
-            (prev_type, prev_freq, prev_module_size, prev_form_factor) = (self.type_, self.freq, self.module_size, self.form_factor)
-        
-        if different_type:
-            self.type_ = None
-        if different_freq:
-            self.freq = None
-        if different_module_size:
-            self.module_size = None
-        if different_form_factor:
-            self.form_factor = None
-        
-        (return_value, error_code) = self.set_eligible_memory()
-        if not return_value:
-            return (return_value, error_code)
-        
-        return (True, "")
-    
-    def set_eligible_memory(self):
-        """Sets the default eligible_memory and eligible_hugepage_nr. This is all memory but 2GiB and all hugepages"""
-        self.eligible_memory = self.node_size - 2*1024*1024
-        if self.eligible_memory < 0:
-            return (False, "There is less than 2GiB of memory in the module")
-        
-        self.eligible_hugepage_nr = self.hugepage_nr 
-        return (True,"")
-    
-    def get_info(self):
-        """Return memory information (self.freq, self.nr_channels, self.type_, self.node_size)"""
-        return (self.freq, self.nr_channels, self.type_, self.node_size, self.hugepage_sz)
-        
-    def to_text(self):
-        text= '                node_size: '+str(self.node_size)+'\n'
-        text+= '                nr_channels: '+str(self.nr_channels)+'\n'
-        text+= '                eligible_memory: '+str(self.eligible_memory)+'\n'
-        text+= '                hugepage_sz: '+str(self.hugepage_sz)+'\n'
-        text+= '                hugepage_nr: '+str(self.hugepage_nr)+'\n'
-        text+= '                eligible_hugepage_nr: '+str(self.eligible_hugepage_nr)+'\n'
-        text+= '                type: '+self.type_+'\n'
-        text+= '                freq: '+str(self.freq)+'\n'
-        text+= '                module_size: '+str(self.module_size)+'\n'
-        text+= '                form_factor: '+self.form_factor+'\n'
-        text+= '                modules details:\n'
-        for module in self.modules:
-            text += module.to_text()
-        return text
-        
-class MemoryModule():
-    #Definition of the possible values of module variables
-    possible_types = definitionsClass.memory_possible_types
-    possible_form_factors = definitionsClass.memory_possible_form_factors
-    
-    def __init__(self):
-        self.locator = None     #Text. Name of the memory module
-        self.type_ = None        #Text. Type of memory module
-        self.freq = None        #Integer. Frequency of the module in MHz
-        self.size = None        #Integer. Size of the module in KiB
-        self.form_factor = None #Text. Form factor of the module
-        
-    def set(self, locator, type_, freq, size, form_factor):
-        """Sets the memory module information. 
-        Frequency must be expressed in MHz and size in KiB.
-        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-        warning_text=""
-        if not isinstance(locator, str):
-            return (False, "The type of the variable locator must be str")
-        if not isinstance(type_, str):
-            return (False, "The type of the variable type_ must be str")
-        if not isinstance(form_factor, str):
-            return (False, "The type of the variable form_factor must be str")
-        if not isinstance(freq, int):
-            return (False, "The type of the variable freq must be int")
-        if not isinstance(size, int):
-            return (False, "The type of the variable size must be int")
-        
-        if not form_factor in self.possible_form_factors:
-            warning_text += "memory form_factor '%s' not among: %s\n" %(form_factor, str(self.possible_form_factors))
-        if not type_ in self.possible_types:
-            warning_text += "memory type '%s' not among: %s\n" %(type_, str(self.possible_types))
-        
-        (self.locator, self.type_, self.freq, self.size, self.form_factor) = (locator, type_, freq, size, form_factor)
-        return (True, warning_text)   
-    
-    def to_text(self):
-        text= '                    '+self.locator+':\n'
-        text+= '                        type: '+self.type_+'\n'
-        text+= '                        freq: '+str(self.freq)+'\n'
-        text+= '                        size: '+str(self.size)+'\n'
-        text+= '                        form factor: '+self.form_factor+'\n'
-        return text
-         
-class Nic():
-    def __init__(self):
-        self.model = None       #Text. Model of the nic
-        self.ports = dict()     #Dictionary of ports. Keys are the port name, value are Port() elements
-    
-    def set_model(self, model):
-        """Sets the model of the nic. Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-        if not isinstance(model,str):
-            return (False, 'The \'model\' must be of type str')
-           
-        self.model = model
-        return (True, "")
-   
-    def add_port(self, port):
-        """Adds a port to the nic. Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-        if not isinstance(port,Port):
-            return (False, 'The \'port\' must be of class Port')
-       
-#        port_id = str(port.pci_device_id[0])+':'+str(port.pci_device_id[1])+':'+str(port.pci_device_id[2])+'.'+str(port.pci_device_id[3])
-#CHANGED
-#        port_id = port.name
-        port_id = port.pci_device_id
-#CHANGED END 
-        if port_id in self.ports:
-            return (False, 'The \'port\' '+port.pci_device_id+' is duplicated in the nic')
-#             return (False, 'The \'port\' is duplicated in the nic')
-       
-        self.ports[port_id] = port
-        return (True, "")
-   
-    def to_text(self):
-        text= '                    model: '+ str(self.model)+'\n'
-        text+= '                    ports: '+'\n'
-        for key,port in self.ports.iteritems():
-            text+= '                        "'+key+'":'+'\n'
-            text += port.to_text()
-        return text
-               
-class Port():
-    def __init__(self):
-        self.name = None            #Text. Port name
-        self.virtual = None         #Boolean. States if the port is a virtual function
-        self.enabled = None         #Boolean. States if the port is enabled
-        self.eligible = None        #Boolean. States if the port is eligible
-        self.speed = None           #Integer. Indicates the speed in Mbps
-        self.available_bw = None    #Integer. BW in Mbps that is available.
-        self.mac = None             #list. Indicates the mac address of the port as a list in format ['XX','XX','XX','XX','XX','XX']
-        self.pci_device_id_split = None   #list. Indicates the pci address  of the port as a list in format ['XXXX','XX','XX','X']
-        self.pci_device_id = None
-        self.PF_pci_device_id = None
-        
-#     def set(self, name, virtual, enabled, speed, mac, pci_device_id, pci_device_id_split):
-#         """Sets the port information. The variable speed indicates the speed in Mbps. Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
-#         if not isinstance(name,str):
-#             return (False, 'The variable \'name\' must be of type str')
-#         if not isinstance(virtual,bool):
-#             return (False, 'The variable \'virtual\' must be of type bool')
-#         if not isinstance(enabled,bool):
-#             return (False, 'The variable \'enabled\' must be of type bool')
-#         if not isinstance(enabled,bool):
-#             return (speed, 'The variable \'speed\' must be of type int')
-#         if not isinstance(mac, list) and not isinstance(mac,NoneType):
-#             return (False, 'The variable \'enabled\' must be of type list indicating the mac address in format [\'XXXX\',\'XX\',\'XX\',\'X\'] or NoneType')
-#         if not isinstance(pci_device_id_split, list) or len(pci_device_id_split) != 4: 
-#             return (False, 'The variable \'pci_device_id_split\' must be of type list, indicating the pci address in format [\'XX\',\'XX\',\'XX\',\'XX\',\'XX\',\'XX\']')
-#         
-#         expected_len = [4,2,2,1]
-#         index = 0
-#         for iterator in pci_device_id_split:
-#             if not isinstance(iterator,str) or not iterator.isdigit() or len(iterator) != expected_len[index]:
-#                 return (False, 'The variable \'pci_device_id_split\' must be of type list, indicating the pci address in format [\'XX\',\'XX\',\'XX\',\'XX\',\'XX\',\'XX\']')
-#             index += 1
-#             
-#         if not isinstance(mac,NoneType):
-#             for iterator in mac:
-#                 if not isinstance(iterator,str) or not iterator.isalnum() or len(iterator) != 2:
-#                     return (False, 'The variable \'enabled\' must be of type list indicating the mac address in format [\'XXXX\',\'XX\',\'XX\',\'X\'] or NoneType')
-#         
-#         #By default only virtual ports are eligible
-# #         (self.name, self.virtual, self.enabled, self.eligible, self.available_bw, self.speed, self.mac, self.pci_device_id, self.pci_device_id_split) = (name, virtual, enabled, virtual, speed, speed, mac, pci_device_id, pci_device_id_split)
-#         (self.name, self.virtual, self.enabled, self.eligible, self.available_bw, self.mac, self.pci_device_id, self.pci_device_id_split) = (name, virtual, enabled, virtual, speed, mac, pci_device_id, pci_device_id_split)
-
-    def to_text(self):
-        text= '                            pci: "'+ str(self.pci_device_id)+'"\n'
-        text+= '                            virtual: '+ str(self.virtual)+'\n'
-        if self.virtual:
-            text+= '                            PF_pci_id: "'+self.PF_pci_device_id+'"\n'
-        text+= '                            eligible: '+ str(self.eligible)+'\n'
-        text+= '                            enabled: '+str(self.enabled)+'\n'
-        text+= '                            speed: '+ str(self.speed)+'\n'
-        text+= '                            available bw: '+ str(self.available_bw)+'\n'
-        text+= '                            mac: '+ str(self.mac)+'\n'
-        text+= '                            source_name: '+ str(self.name)+'\n'
-        return text
-    
-class Hypervisor():
-    #Definition of the possible values of hypervisor variables
-    possible_types = definitionsClass.hypervisor_possible_types
-    possible_domain_types = definitionsClass.hypervisor_possible_domain_types
-
-    def __init__(self):
-        self.type_ = None            #Text. Hypervisor type_
-        self.version = None         #int. Hypervisor version
-        self.lib_version = None     #int. Libvirt version used to compile hypervisor
-        self.domains = list()       #list. List of all the available domains
-        
-    def set(self, hypervisor, version, lib_version, domains):
-        warning_text=""
-        if not isinstance(hypervisor,str):
-            return (False, 'The variable type_ must be of type str')
-        if not isinstance(version,int):
-            return (False, 'The variable version must be of type int')
-        if not isinstance(lib_version,int):
-            return (False, 'The library version must be of type int')
-        if not isinstance(domains,list):
-            return (False, 'Domains must be a list of the possible domains as str')
-        
-        if not hypervisor in self.possible_types:
-            warning_text += "Hyperpivor '%s' not among: %s\n" % (hypervisor, str(self.possible_types))
-        
-        valid_domain_found = False
-        for domain in domains:
-            if not isinstance(domain,str):
-                return (False, 'Domains must be a list of the possible domains as str')
-            if domain in self.possible_domain_types:
-                valid_domain_found = True
-                self.domains.append(domain)
-                
-        if not valid_domain_found:
-            warning_text += 'No valid domain found among: %s\n' % str(self.possible_domain_types)
-            
-        
-        (self.version, self.lib_version, self.type_) = (version, lib_version, hypervisor)
-        return (True, warning_text)
-     
-    def assign(self, hypervisor):
-        (self.version, self.lib_version, self.type_) = (hypervisor.version, hypervisor.lib_version, hypervisor.type_)
-        for domain in hypervisor.domains:
-            self.domains.append(domain)
-        return
-           
-    def to_text(self):
-        text= '    type: '+self.type_+'\n'
-        text+= '    version: '+str(self.version)+'\n'
-        text+= '    libvirt version: '+ str(self.lib_version)+'\n'
-        text+= '    domains: '+str(self.domains)+'\n'
-        return text
-        
-class OpSys():
-    #Definition of the possible values of os variables
-    possible_id = definitionsClass.os_possible_id
-    possible_types = definitionsClass.os_possible_types
-    possible_architectures = definitionsClass.os_possible_architectures
-
-    def __init__(self):
-        self.id_ = None                   #Text. Identifier of the OS. Formed by <Distibutor ID>-<Release>-<Codename>. In linux this can be obtained using lsb_release -a
-        self.type_ = None                 #Text. Type of operating system
-        self.bit_architecture = None     #Integer. Architecture
-        
-    def set(self, id_, type_, bit_architecture):
-        warning_text=""
-        if not isinstance(type_,str):
-            return (False, 'The variable type_ must be of type str')
-        if not isinstance(id_,str):
-            return (False, 'The variable id_ must be of type str')
-        if not isinstance(bit_architecture,str):
-            return (False, 'The variable bit_architecture must be of type str')
-        
-        if not type_ in self.possible_types:
-            warning_text += "os type '%s' not among: %s\n" %(type_, str(self.possible_types))
-        if not id_ in self.possible_id:
-            warning_text += "os release '%s' not among: %s\n" %(id_, str(self.possible_id))
-        if not bit_architecture in self.possible_architectures:
-            warning_text += "os bit_architecture '%s' not among: %s\n" % (bit_architecture, str(self.possible_architectures))
-        
-        (self.id_, self.type_, self.bit_architecture) = (id_, type_, bit_architecture)
-        return (True, warning_text)
-    
-    def assign(self,os):
-        (self.id_, self.type_, self.bit_architecture) = (os.id_, os.type_, os.bit_architecture)
-        return
-    
-    def to_text(self):
-        text= '    id: '+self.id_+'\n'
-        text+= '    type: '+self.type_+'\n'
-        text+= '    bit_architecture: '+self.bit_architecture+'\n'
-        return text
-     
-def get_hostname(virsh_conn):
-    return virsh_conn.getHostname().rstrip('\n')
-
-def get_hugepage_size(ssh_conn):
-    command = 'sudo hugeadm --page-sizes'
-#  command = 'hugeadm --page-sizes-all'
-    (_, stdout, stderr) = ssh_conn.exec_command(command)
-    error = stderr.read()
-    if len(error)>0:
-        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
-    mem=stdout.read()
-    if mem=="":
-        return 0
-    return int(mem)
-
-def get_hugepage_nr(ssh_conn,hugepage_sz, node_id):
-    command = 'cat /sys/devices/system/node/node'+str(node_id)+'/hugepages/hugepages-'+str(hugepage_sz/1024)+'kB/nr_hugepages'
-    (_, stdout, _) = ssh_conn.exec_command(command)
-    #print command, 
-    #text = stdout.read()
-    #print "'"+text+"'"
-    #return int(text)
-    
-    try:
-        value=int(stdout.read())
-    except: 
-        value=0
-    return value
-
-def get_memory_information(ssh_conn, virsh_conn, memory_nodes):
-    warning_text=""
-    tree=ElementTree.fromstring(virsh_conn.getSysinfo(0))
-    memory_dict = dict()
-    node_id = 0 #TODO revise. Added for allowing VM as compute hosts 
-    for target in tree.findall("memory_device"):
-        locator_f = size_f = freq_f = type_f = formfactor_f = False
-        locator_f = True #TODO revise. Added for allowing VM as compute hosts
-        module_form_factor = ""
-        for entry in target.findall("entry"):
-            if entry.get("name") == 'size':
-                size_f = True
-                size_split = entry.text.split(' ')
-                if size_split[1] == 'MB':
-                    module_size = int(size_split[0]) * 1024 * 1024
-                elif size_split[1] == 'GB':
-                    module_size = int(size_split[0]) * 1024 * 1024 * 1024
-                elif size_split[1] == 'KB':
-                    module_size = int(size_split[0]) * 1024
-                else:
-                    module_size = int(size_split[0])
-                
-            elif entry.get("name") == 'speed':
-                freq_f = True
-                freq_split = entry.text.split(' ')
-                if freq_split[1] == 'MHz':
-                    module_freq = int(freq_split[0]) * 1024 * 1024
-                elif freq_split[1] == 'GHz':
-                    module_freq = int(freq_split[0]) * 1024 * 1024 * 1024
-                elif freq_split[1] == 'KHz':
-                    module_freq = int(freq_split[0]) * 1024
-            
-            elif entry.get("name") == 'type':
-                type_f = True
-                module_type = entry.text
-                   
-            elif entry.get("name") == 'form_factor':
-                formfactor_f = True
-                module_form_factor = entry.text  
-            #TODO revise. Commented for allowing VM as compute hosts
-            # elif entry.get("name") == 'locator' and not locator_f:
-            #     # other case, it is obtained by bank_locator that we give priority to
-            #     locator = entry.text
-            #     pos = locator.find(module_form_factor)
-            #     if module_form_factor == locator[0:len(module_form_factor) ]:
-            #         pos = len(module_form_factor) +1 
-            #     else:
-            #         pos = 0
-            #     if locator[pos] in "ABCDEFGH":  
-            #         locator_f = True
-            #         node_id = ord(locator[pos])-ord('A')
-            #         #print entry.text, node_id
-            # 
-            # elif entry.get("name") == 'bank_locator':
-            #     locator = entry.text
-            #     pos = locator.find("NODE ")
-            #     if pos >= 0 and len(locator)>pos+5:
-            #         if locator[pos+5] in ("01234567"): #len("NODE ") is 5
-            #             node_id = int(locator[pos+5])
-            #             locator_f = True
-            #  
-
-        #When all module fields have been found add a new module to the list 
-        if locator_f and size_f and freq_f and type_f and formfactor_f:
-            #If the memory node has not yet been created create it
-            if node_id not in memory_dict:
-                memory_dict[node_id] = []
-                
-            #Add a new module to the memory node
-            module = MemoryModule()
-            #TODO revise. Changed for allowing VM as compute hosts
-            (return_status, code) = module.set('NODE %d' % node_id, module_type, module_freq, module_size, module_form_factor)
-            #(return_status, code) = module.set(locator, module_type, module_freq, module_size, module_form_factor)
-            if not return_status:
-                return (return_status, code)
-            memory_dict[node_id].append(module)
-            if code not in warning_text:
-                warning_text += code
-            node_id += 1 #TODO revise. Added for allowing VM as compute hosts
-    
-    #Fill memory nodes
-    #Hugepage size is constant for all nodes
-    hugepage_sz = get_hugepage_size(ssh_conn)
-    for node_id, modules in memory_dict.iteritems():
-        memory_node = MemoryNode()
-        memory_node.set(modules, hugepage_sz, get_hugepage_nr(ssh_conn,hugepage_sz, node_id))
-        memory_nodes[node_id] = memory_node
-        
-    return (True, warning_text)
-
-def get_cpu_topology_ht(ssh_conn, topology):
-    command = 'cat /proc/cpuinfo'
-    (_, stdout, stderr) = ssh_conn.exec_command(command)
-    error = stderr.read()
-    if len(error)>0:
-        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
-    sockets = []
-    cores = []
-    core_map = {}
-    core_details = []
-    core_lines = {}
-    for line in stdout.readlines():
-        if len(line.strip()) != 0:
-            name, value = line.split(":", 1)
-            core_lines[name.strip()] = value.strip()
-        else:
-            core_details.append(core_lines)
-            core_lines = {}
-    
-    for core in core_details:
-        for field in ["processor", "core id", "physical id"]:
-            if field not in core:
-                return(False,'Error getting '+field+' value from /proc/cpuinfo')
-            core[field] = int(core[field])
-    
-        if core["core id"] not in cores:
-            cores.append(core["core id"])
-        if core["physical id"] not in sockets:
-            sockets.append(core["physical id"])
-        key = (core["physical id"], core["core id"])
-        if key not in core_map:
-            core_map[key] = []
-        core_map[key].append(core["processor"])
-      
-    for s in sockets:
-        hyperthreaded_cores = list()
-        for c in cores:
-            hyperthreaded_cores.append(core_map[(s,c)])
-        topology[s] = hyperthreaded_cores
-      
-    return (True, "")
-
-def get_processor_information(ssh_conn, vish_conn, processors):
-    warning_text=""
-    #Processor features are the same for all processors
-    #TODO (at least using virsh capabilities)nr_numa_nodes
-    capabilities = list()
-    tree=ElementTree.fromstring(vish_conn.getCapabilities())
-    for target in tree.findall("host/cpu/feature"):
-        if target.get("name") == 'pdpe1gb':
-            capabilities.append('lps')
-        elif target.get("name") == 'dca':
-            capabilities.append('dioc')  
-        elif target.get("name") == 'vmx' or target.get("name") == 'svm':
-            capabilities.append('hwsv')
-        elif target.get("name") == 'ht':
-            capabilities.append('ht')
-        
-    target = tree.find("host/cpu/arch")
-    if target.text == 'x86_64' or target.text == 'amd64':
-        capabilities.append('64b')
-      
-    command = 'cat /proc/cpuinfo | grep flags'
-    (_, stdout, stderr) = ssh_conn.exec_command(command)
-    error = stderr.read()
-    if len(error)>0:
-        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
-    line = stdout.readline()
-    if 'ept' in line or 'npt' in line:
-        capabilities.append('tlbps')
-    
-    #Find out if IOMMU is enabled
-    command = 'dmesg |grep -e Intel-IOMMU'
-    (_, stdout, stderr) = ssh_conn.exec_command(command)
-    error = stderr.read()
-    if len(error)>0:
-        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
-    if 'enabled' in stdout.read():
-        capabilities.append('iommu')
-      
-    #Equivalent for AMD
-    command = 'dmesg |grep -e AMD-Vi'
-    (_, stdout, stderr) = ssh_conn.exec_command(command)
-    error = stderr.read()
-    if len(error)>0:
-        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
-    if len(stdout.read()) > 0:
-        capabilities.append('iommu')
-    
-    #-----------------------------------------------------------
-    topology = dict()
-    #In case hyperthreading is active it is necessary to determine cpu topology using /proc/cpuinfo
-    if 'ht' in capabilities:
-        (return_status, code) = get_cpu_topology_ht(ssh_conn, topology)
-        if not return_status:
-            return (return_status, code)
-        warning_text += code
-
-    #Otherwise it is possible to do it using virsh capabilities
-    else:
-        for target in tree.findall("host/topology/cells/cell"):
-            socket_id = int(target.get("id"))
-            topology[socket_id] = list()
-            for cpu in target.findall("cpus/cpu"):
-                topology[socket_id].append(int(cpu.get("id")))
-    
-    #-----------------------------------------------------------         
-    #Create a dictionary with the information of all processors
-    #p_fam = p_man = p_ver = None
-    tree=ElementTree.fromstring(vish_conn.getSysinfo(0))
-    #print vish_conn.getSysinfo(0)
-    #return (False, 'forces error for debuging')
-    not_populated=False
-    socket_id = -1     #in case we can not determine the socket_id we assume incremental order, starting by 0
-    for target in tree.findall("processor"):
-        count = 0
-        socket_id += 1
-        #Get processor id, family, manufacturer and version
-        for entry in target.findall("entry"):
-            if entry.get("name") == "status":
-                if entry.text[0:11] == "Unpopulated":
-                    not_populated=True
-            elif entry.get("name") == 'socket_destination':
-                socket_text = entry.text
-                if socket_text.startswith('CPU'):
-                    socket_text = socket_text.strip('CPU')
-                    socket_text = socket_text.strip() #removes trailing spaces
-                    if socket_text.isdigit() and int(socket_text)<9 and int(socket_text)>0:
-                        socket_id = int(socket_text) - 1
-              
-            elif entry.get("name") == 'family':
-                family = entry.text
-                count += 1
-            elif entry.get("name") == 'manufacturer':
-                manufacturer = entry.text
-                count += 1
-            elif entry.get("name") == 'version':
-                version = entry.text.strip()
-                count += 1
-        if count != 3:
-            return (False, 'Error. Not all expected fields could be found in processor')
-        
-        #Create and fill processor structure
-        if not_populated:
-            continue  #avoid inconsistence of some machines where more socket detected than 
-        processor = ProcessorNode()
-        (return_status, code) = processor.set(socket_id, family, manufacturer, version, capabilities, topology[socket_id])
-        if not return_status:
-            return (return_status, code)
-        if code not in warning_text:
-            warning_text += code
-
-        #Add processor to the processors dictionary
-        processors[socket_id] = processor
-    
-    return (True, warning_text)
-
-def get_nic_information(ssh_conn, virsh_conn, nic_topology):   
-    warning_text=""
-    #Get list of net devices
-    net_devices = virsh_conn.listDevices('net',0)
-    print virsh_conn.listDevices('net',0)
-    for device in net_devices:
-        try:
-            #Get the XML descriptor of the device:
-            net_XML = ElementTree.fromstring(virsh_conn.nodeDeviceLookupByName(device).XMLDesc(0))
-            #print "net_XML:" , net_XML
-            #obtain the parent
-            parent = net_XML.find('parent')
-            if parent == None:
-                print 'No parent was found in XML for device '+device
-                #Error. continue?-------------------------------------------------------------
-                continue
-            if parent.text == 'computer':
-                continue
-            if not parent.text.startswith('pci_'):
-                print device + ' parent is neither computer nor pci'
-                #Error. continue?-------------------------------------------------------------
-                continue
-            interface = net_XML.find('capability/interface').text
-            mac = net_XML.find('capability/address').text
-            
-            #Get the pci XML
-            pci_XML = ElementTree.fromstring(virsh_conn.nodeDeviceLookupByName(parent.text).XMLDesc(0))
-            #print pci_XML
-            #Get pci
-            name = pci_XML.find('name').text.split('_')
-            pci = name[1]+':'+name[2]+':'+name[3]+'.'+name[4]
-            
-            #If slot == 0 it is a PF, otherwise it is a VF
-            capability = pci_XML.find('capability')
-            if capability.get('type') != 'pci':
-                print device + 'Capability is not of type pci in '+parent.text
-                #Error. continue?-------------------------------------------------------------
-                continue
-            slot = capability.find('slot').text
-            bus = capability.find('bus').text
-            node_id = None
-            numa_ = capability.find('numa')
-            if numa_ != None:
-                node_id = numa_.get('node');
-                if node_id != None: node_id =int(node_id)
-            if slot == None or bus == None:
-                print device + 'Bus and slot not detected in '+parent.text
-                #Error. continue?-------------------------------------------------------------
-                continue
-            if slot != '0':
-    #             print ElementTree.tostring(pci_XML)
-                virtual = True
-                capability_pf = capability.find('capability')
-                if capability_pf.get('type') != 'phys_function':
-                    print 'physical_function not found in VF '+parent.text
-                    #Error. continue?-------------------------------------------------------------
-                    continue
-                PF_pci = capability_pf.find('address').attrib
-                PF_pci_text = PF_pci['domain'].split('x')[1]+':'+PF_pci['bus'].split('x')[1]+':'+PF_pci['slot'].split('x')[1]+'.'+PF_pci['function'].split('x')[1]
-                
-            else:
-                virtual = False
-            
-            #Obtain node for the port
-            if node_id == None:
-                node_id = int(bus)>>6
-            #print "node_id:", node_id
-            
-            #Only for non virtual interfaces: Obtain speed and if link is detected (this must be done using ethtool)
-            if not virtual:
-                command = 'sudo ethtool '+interface+' | grep -e Speed -e "Link detected"'
-                (_, stdout, stderr) = ssh_conn.exec_command(command)
-                error = stderr.read()
-                if len(error) >0:
-                    print 'Error running '+command+'\n'+error
-                    #Error. continue?-------------------------------------------------------------
-                    continue
-                for line in stdout.readlines():
-                    line = line.strip().rstrip('\n').split(': ')
-                    if line[0] == 'Speed':
-                        if line[1].endswith('Mb/s'):
-                            speed = int(line[1].split('M')[0])*int(1e6)
-                        elif line[1].endswith('Gb/s'):
-                            speed = int(line[1].split('G')[0])*int(1e9)
-                        elif line[1].endswith('Kb/s'):
-                            speed = int(line[1].split('K')[0])*int(1e3)
-                        else:
-                            #the interface is listed but won't be used
-                            speed = 0
-                    elif line[0] == 'Link detected':
-                        if line[1] == 'yes':
-                            enabled = True
-                        else:
-                            enabled = False
-                    else:
-                        print 'Unnexpected output of command '+command+':'
-                        print line
-                        #Error. continue?-------------------------------------------------------------
-                        continue
-                
-            if not node_id in nic_topology:
-                nic_topology[node_id] = list()
-                #With this implementation we make the RAD with only one nic per node and this nic has all ports, TODO: change this by including parent information of PF
-                nic_topology[node_id].append(Nic())
-             
-            #Load the appropriate nic    
-            nic = nic_topology[node_id][0]
-            
-            #Create a new port and fill it
-            port = Port()
-            port.name = interface
-            port.virtual = virtual
-            port.speed = speed
-            if virtual:
-                port.available_bw = 0
-                port.PF_pci_device_id = PF_pci_text
-            else:
-                port.available_bw = speed
-                if speed == 0:
-                    port.enabled = False
-                else:
-                    port.enabled = enabled
-
-            port.eligible = virtual  #Only virtual ports are eligible
-            port.mac = mac
-            port.pci_device_id = pci
-            port.pci_device_id_split = name[1:]
-            
-            #Save the port information
-            nic.add_port(port)         
-        except Exception,e:
-            print 'Error: '+str(e)
-
-    #set in vitual ports if they are enabled
-    for nic in nic_topology.itervalues():
-        for port in nic[0].ports.itervalues():
-#             print port.pci_device_id
-            if port.virtual:
-                enabled = nic[0].ports.get(port.PF_pci_device_id)
-                if enabled == None:
-                    return(False, 'The PF '+port.PF_pci_device_id+' (VF '+port.pci_device_id+') is not present in ports dict')
-                #Only if the PF is enabled the VF can be enabled
-                if nic[0].ports[port.PF_pci_device_id].enabled:
-                    port.enabled = True
-                else:
-                    port.enabled = False
-            
-    return (True, warning_text)     
-
-def get_nic_information_old(ssh_conn, nic_topology):
-    command = 'lstopo-no-graphics --of xml'
-    (_, stdout, stderr) = ssh_conn.exec_command(command)
-    error = stderr.read()
-    if len(error)>0:
-        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
-    tree=ElementTree.fromstring(stdout.read())
-    for target in tree.findall("object/object"):
-        #Find numa nodes
-        if target.get("type") != "NUMANode":
-            continue
-        node_id = int(target.get("os_index"))
-        nic_topology[node_id] = list()
-        
-        #find nics in numa node
-        for entry in target.findall("object/object"):
-            if entry.get("type") != 'Bridge':
-                continue
-            nic_name = entry.get("name")
-            model = None
-            nic = Nic()
-            
-            #find ports in nic
-            for pcidev in entry.findall("object"):
-                if pcidev.get("type") != 'PCIDev':
-                    continue
-                enabled = speed = mac = pci_busid = None
-                port = Port()
-                model = pcidev.get("name")
-                virtual = False
-                if 'Virtual' in model:
-                    virtual = True
-                pci_busid = pcidev.get("pci_busid")
-                for osdev in pcidev.findall("object"):
-                    name = osdev.get("name")
-                    for info in osdev.findall("info"):
-                        if info.get("name") != 'Address':
-                            continue
-                        mac = info.get("value")
-                        #get the port speed and status
-                        command = 'sudo ethtool '+name
-                        (_, stdout, stderr) = ssh_conn.exec_command(command)
-                        error = stderr.read()
-                        if len(error)>0:
-                            return (False, 'Error obtaining '+name+' information: '+error)
-                        ethtool = stdout.read()
-                        if '10000baseT/Full' in ethtool:
-                            speed = 10e9
-                        elif '1000baseT/Full' in ethtool:
-                            speed = 1e9
-                        elif '100baseT/Full' in ethtool:
-                            speed = 100e6
-                        elif '10baseT/Full' in ethtool:
-                            speed = 10e6
-                        else:
-                            return (False, 'Speed not detected in '+name)
-
-                    enabled = False
-                    if 'Link detected: yes' in ethtool:
-                        enabled = True
-                    
-                    if speed != None and mac != None and pci_busid != None:
-                        mac = mac.split(':')
-                        pci_busid_split = re.split(':|\.', pci_busid)
-                        #Fill the port information
-                        port.set(name, virtual, enabled, speed, mac, pci_busid, pci_busid_split)
-                        nic.add_port(port)
-              
-            if len(nic.ports) > 0:  
-                #Fill the nic model
-                if model != None:
-                    nic.set_model(model)
-                else:
-                    nic.set_model(nic_name)
-                
-                #Add it to the topology
-                nic_topology[node_id].append(nic)
-                
-    return (True, "")
-
-def get_os_information(ssh_conn, os):
-    warning_text=""
-#    command = 'lsb_release -a'
-#    (stdin, stdout, stderr) = ssh_conn.exec_command(command)
-#    cont = 0
-#    for line in stdout.readlines():
-#        line_split = re.split('\t| *', line.rstrip('\n'))
-#        if line_split[0] == 'Distributor' and line_split[1] == 'ID:':
-#            distributor = line_split[2]
-#            cont += 1
-#        elif line_split[0] == 'Release:':
-#            release = line_split[1]
-#            cont += 1
-#        elif line_split[0] == 'Codename:':
-#            codename = line_split[1]
-#            cont += 1
-#    if cont != 3:
-#        return (False, 'It was not possible to obtain the OS id')
-#    id_ = distributor+'-'+release+'-'+codename
-
-
-    command = 'cat /etc/redhat-release'
-    (_, stdout, _) = ssh_conn.exec_command(command)
-    id_text= stdout.read()
-    if len(id_text)==0:
-        #try with Ubuntu
-        command = 'lsb_release -d -s'
-        (_, stdout, _) = ssh_conn.exec_command(command)
-        id_text= stdout.read()
-    if len(id_text)==0:
-        raise paramiko.ssh_exception.SSHException("Can not determinte release neither with 'lsb_release' nor with 'cat /etc/redhat-release'")
-    id_ = id_text.rstrip('\n')
-   
-    command = 'uname -o'
-    (_, stdout, stderr) = ssh_conn.exec_command(command)
-    error = stderr.read()
-    if len(error)>0:
-        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
-    type_ = stdout.read().rstrip('\n')
-    
-    command = 'uname -i'
-    (_, stdout, stderr) = ssh_conn.exec_command(command)
-    error = stderr.read()
-    if len(error)>0:
-        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
-    bit_architecture = stdout.read().rstrip('\n')
-    
-    (return_status, code) = os.set(id_, type_, bit_architecture)
-    if not return_status:
-        return (return_status, code)
-    warning_text += code
-    return (True, warning_text) 
-
-def get_hypervisor_information(virsh_conn, hypervisor):
-    type_ = virsh_conn.getType().rstrip('\n')
-    version = virsh_conn.getVersion()
-    lib_version = virsh_conn.getLibVersion()
-    
-    domains = list()
-    tree=ElementTree.fromstring(virsh_conn.getCapabilities())
-    for target in tree.findall("guest"):
-        os_type = target.find("os_type").text
-        #We only allow full virtualization
-        if os_type != 'hvm':
-            continue
-        wordsize = int(target.find('arch/wordsize').text)
-        if wordsize == 64:
-            for domain in target.findall("arch/domain"):
-                domains.append(domain.get("type"))
-            
-    (return_status, code) = hypervisor.set(type_, version, lib_version, domains)
-    if not return_status:
-        return (return_status, code)
-    return (True, code)      
-     
-class RADavailableResourcesClass(RADclass):
-    def __init__(self, resources):
-        """Copy resources from the RADclass (server resources not taking into account resources used by VMs"""
-        #New
-        self.reserved = dict()          #Dictionary of reserved resources for a server. Key are VNFC names and values RADreservedResources
-        self.cores_consumption = None   #Dictionary of cpu consumption. Key is the cpu and the value is
-        
-        self.machine = resources.machine
-        self.user = resources.user
-        self.password = resources.password
-        self.name = resources.name
-        self.nr_processors = resources.nr_processors 
-        self.processor_family = resources.processor_family
-        self.processor_manufacturer = resources.processor_manufacturer
-        self.processor_version = resources.processor_version
-        self.processor_features = resources.processor_features
-        self.memory_type = resources.memory_type
-        self.memory_freq = resources.memory_freq
-        self.memory_nr_channels = resources.memory_nr_channels
-        self.memory_size = resources.memory_size
-        self.memory_hugepage_sz = resources.memory_hugepage_sz
-        self.hypervisor = Hypervisor()
-        self.hypervisor.assign(resources.hypervisor)
-        self.os = OpSys()
-        self.os.assign(resources.os)
-        self.nodes = dict()
-        for node_k, node_v in resources.nodes.iteritems():
-            self.nodes[node_k] = Node()
-            self.nodes[node_k].assign(node_v)
-        return
-    
-    def _get_cores_consumption_warnings(self):
-        """Returns list of warning strings in case warnings are generated. 
-        In case no warnings are generated the return value will be an empty list"""
-        warnings = list()
-        #Get the cores consumption
-        (return_status, code) = get_ssh_connection(self.machine, self.user, self.password)
-        if not return_status:
-            return (return_status, code)
-        ssh_conn = code
-        command = 'mpstat -P ALL 1 1 | grep Average | egrep -v CPU\|all'
-        (_, stdout, stderr) = ssh_conn.exec_command(command)
-        error = stderr.read()
-        if len(error) > 0:
-            return (False, error)
-    
-        self.cores_consumption = dict()
-        for line in stdout.readlines():
-            cpu_usage_split = re.split('\t| *', line.rstrip('\n'))
-            usage = 100 *(1 - float(cpu_usage_split[10]))
-            if usage > 0:
-                self.cores_consumption[int(cpu_usage_split[1])] = usage 
-        ssh_conn.close()   
-        #Check if any core marked as available in the nodes has cpu_usage > 0
-        for _, node_v in self.nodes.iteritems():
-            cores = node_v.processor.eligible_cores
-            for cpu in cores:
-                if len(cpu) > 1:
-                    for core in cpu:
-                        if core in self.cores_consumption:
-                            warnings.append('Warning: Core '+str(core)+' is supposed to be idle but it is consuming '+str(self.cores_consumption[core])+'%')
-                else:
-                    if cpu in self.cores_consumption:
-                        warnings.append('Warning: Core '+str(core)+' is supposed to be idle but it is consuming '+str(self.cores_consumption[cpu])+'%')
-        
-        return warnings
-    
-    def reserved_to_text(self):
-        text = str()
-        for VNFC_name, VNFC_reserved in self.reserved.iteritems():
-            text += '    VNFC: '+str(VNFC_name)+'\n'
-            text += VNFC_reserved.to_text()
-                    
-        return text
-    
-    def obtain_usage(self):
-        resp = dict()
-        #Iterate through nodes to get cores, eligible cores, memory and physical ports (save ports usage for next section)
-        nodes = dict()
-        ports_usage = dict()
-        hugepage_size = dict()
-        for node_k, node_v in self.nodes.iteritems():
-            node = dict()
-            ports_usage[node_k] = dict()
-            eligible_cores = list()
-            for pair in node_v.processor.eligible_cores:
-                if isinstance(pair, list):
-                    for element in pair:
-                        eligible_cores.append(element)
-                else:
-                    eligible_cores.append(pair)
-            node['cpus'] = {'cores':node_v.processor.cores,'eligible_cores':eligible_cores}
-            node['memory'] = {'size':str(node_v.memory.node_size/(1024*1024*1024))+'GB','eligible':str(node_v.memory.eligible_memory/(1024*1024*1024))+'GB'}
-            hugepage_size[node_k] = node_v.memory.hugepage_sz
-            
-            ports = dict()
-            for nic in node_v.nic_list:
-                for port in nic.ports.itervalues():
-                    if port.enabled and not port.virtual: 
-                        ports[port.name] = {'speed':str(port.speed/1000000000)+'G'}
-#                         print '*************** ',port.name,'speed',port.speed 
-                        ports_usage[node_k][port.name] = 100 - int(100*float(port.available_bw)/float(port.speed))
-            node['ports'] = ports
-            nodes[node_k] = node
-        resp['RAD'] = nodes
-        
-        #Iterate through reserved section to get used cores, used memory and port usage
-        cores = dict()
-        memory = dict()
-        #reserved_cores = list
-        for node_k in self.nodes.iterkeys():
-            if not node_k in cores:
-                cores[node_k] = list()
-                memory[node_k] = 0
-            for _, reserved in self.reserved.iteritems():
-                if node_k in reserved.node_reserved_resources:
-                    node_v = reserved.node_reserved_resources[node_k]
-                    cores[node_k].extend(node_v.reserved_cores)
-                    memory[node_k] += node_v.reserved_hugepage_nr * hugepage_size[node_k]
-                            
-        occupation = dict()       
-        for node_k in self.nodes.iterkeys():
-            ports = dict()
-            for name, usage in ports_usage[node_k].iteritems():
-                ports[name] = {'occupied':str(usage)+'%'}
-#             print '****************cores',cores
-#             print '****************memory',memory
-            occupation[node_k] = {'cores':cores[node_k],'memory':str(memory[node_k]/(1024*1024*1024))+'GB','ports':ports}
-        resp['occupation'] = occupation
-        
-        return resp            
-    
-class RADreservedResources():
-    def __init__(self):
-        self.node_reserved_resources = dict()      #dict. keys are the RAD nodes id, values are NodeReservedResources
-        self.mgmt_interface_pci = None             #pci in the VNF for the management interface
-        self.image = None                          #Path in remote machine of the VNFC image
-    
-    def update(self,reserved):
-        self.image = reserved.image
-        self.mgmt_interface_pci = reserved.mgmt_interface_pci
-        for k,v in reserved.node_reserved_resources.iteritems():
-            if k in self.node_reserved_resources.keys():
-                return (False, 'Duplicated node entry '+str(k)+' in reserved resources')
-            self.node_reserved_resources[k]=v
-            
-        return (True, "")
-    
-    def to_text(self):
-        text = '        image: '+str(self.image)+'\n'
-        for node_id, node_reserved in self.node_reserved_resources.iteritems():
-            text += '        Node ID: '+str(node_id)+'\n'
-            text += node_reserved.to_text()
-        return text
-
-class NodeReservedResources():
-    def __init__(self):
-    #     reserved_shared_cores = None      #list. List of all cores that the VNFC needs in shared mode  #TODO Not used
-    #     reserved_memory = None            #Integer. Amount of KiB needed by the VNFC #TODO. Not used since hugepages are used
-        self.reserved_cores = list()             #list. List of all cores that the VNFC uses
-        self.reserved_hugepage_nr = 0            #Integer. Number of hugepages needed by the VNFC 
-        self.reserved_ports = dict()             #dict. The key is the physical port pci and the value the VNFC port description
-        self.vlan_tags = dict()
-        self.cpu_pinning = None
-    
-    def to_text(self):
-        text = '            cores: '+str(self.reserved_cores)+'\n'
-        text += '            cpu_pinning: '+str(self.cpu_pinning)+'\n'
-        text += '            hugepages_nr: '+str(self.reserved_hugepage_nr)+'\n'
-        for port_pci, port_description in self.reserved_ports.iteritems():
-            text += '            port: '+str(port_pci)+'\n'
-            text += port_description.to_text()
-        return text
-    
-#     def update(self,reserved):
-#         self.reserved_cores = list(reserved.reserved_cores)
-#         self.reserved_hugepage_nr = reserved.reserved_hugepage_nr
-#         self.reserved_ports = dict(reserved.reserved_ports)
-#         self.cpu_pinning = list(reserved.cpu_pinning)
-    
-    
-        
diff --git a/auxiliary_functions.py b/auxiliary_functions.py
deleted file mode 100644 (file)
index 795d84a..0000000
+++ /dev/null
@@ -1,227 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-Common usuful functions 
-'''
-
-__author__="Alfonso Tierno, Pablo Montes"
-__date__ ="$10-jul-2014 12:07:15$"
-
-
-import yaml
-import paramiko 
-from definitionsClass import definitionsClass
-from definitionsClass import Units
-import random
-from jsonschema import validate as js_v, exceptions as js_e
-
-def check_and_convert_units(value, value_type):
-    """TODO: Update description
-    This method receives a text with 2 fields using a blank as separator and a list of valid units. The first field must represent a number
-    and the second one units. 
-    In case the second field is not one of valid_units (False, <error description>) is returned.
-    In case the second field is a valid unit the first number is converted in the following way:
-    Gbps, Mbps, kbps -> Mbps
-    GB,MB,KB,B,GiB,MiB,KiB -> B
-    GHz,MHz,KHz,Hz -> Hz
-    If conversion is done successfully (True, <converted value>) is returned"""
-    try:
-        if value_type == Units.no_units:
-            if not isinstance(value,int) and not isinstance(value,float):
-                return (False, 'When no units are used only an integer or float must be used')
-        elif value_type == Units.name:
-            if not isinstance(value,str):
-                return (False, 'For names str must be used')
-        elif value_type == Units.boolean:
-            if not isinstance(value,bool):
-                return (False, 'A boolean or Yes/No mut be used')
-        else:
-            splitted  = value.split(' ')
-            if len(splitted) != 2:
-                return (False, 'Expected format: <value> <units>')
-            (value, units) = splitted 
-            if ',' in value or '.' in value:
-                return (False, 'Use integers to represent numeric values')
-                
-            value = int(value)
-            
-#            if not isinstance(value_type, Units):
-#                return (False, 'Not valid value_type')
-            
-            valid_units = definitionsClass.units[value_type]
-            
-            #Convert everything to upper in order to make comparations easier
-            units = units.upper()
-            for i in range(0, len(valid_units)):
-                valid_units[i] = valid_units[i].upper()
-            
-            #Check the used units are valid ones
-            if units not in valid_units:
-                return (False, 'Valid units are: '+', '.join(valid_units))
-
-            if units.startswith('GI'):
-                value = value *1024*1024*1024
-            elif units.startswith('MI'):
-                value = value *1024*1024
-            elif units.startswith('KI'):
-                value = value *1024
-            elif units.startswith('G'):
-                value = value *1000000000
-            elif units.startswith('M'):
-                value = value *1000000
-            elif units.startswith('K'):
-                value = value *1000
-    except Exception,e:
-        return (False, 'Unexpected error in auxiliary_functions.py - check_and_convert_units:\n'+str(e))
-
-    return (True, value)
-        
-def get_ssh_connection(machine, user=None, password=None):
-    """Stablishes an ssh connection to the remote server. Returns (True, paramiko_ssh) in case of success or (False, <error message>) in case of error"""
-    try:
-        s = paramiko.SSHClient()
-        s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-        s.load_system_host_keys()
-        s.connect(machine, 22, user, password, timeout=10)
-    except Exception,e:
-        return (False, 'It was not possible to connect to '+machine+str(e))
-        
-    return (True, s)
-
-def run_in_remote_server(s,command):
-    """Runs in the remote server the specified command. Returns (True, stdout) in case of success or (False, <error message>) in case of error"""
-    try:
-        (_, stdout, stderr) = s.exec_command(command)
-        error_msg = stderr.read()
-        if len(error_msg) > 0:
-            return (False, error_msg)
-    except Exception,e:
-        return (False, str(e))
-    
-    return (True, stdout)
-
-def read_file(file_):
-    """Reads a file specified by 'file' and returns (True,<its content as a string>) in case of success or (False, <error message>) in case of failure"""
-    try:
-        f = open(file_, 'r')
-        read_data = f.read()
-        f.close()
-    except Exception,e:
-        return (False, str(e))
-      
-    return (True, read_data)
-
-def check_contains(element, keywords):
-    """Auxiliary function used to check if a yaml structure contains or not
-    an specific field. Returns a bool"""
-    for key in keywords:
-        if not key in element:
-            return False      
-    return True
-
-def check_contains_(element, keywords):
-    """Auxiliary function used to check if a yaml structure contains or not
-    an specific field. Returns a bool,missing_variables"""
-    for key in keywords:
-        if not key in element:
-            return False, key      
-    return True, None
-
-def write_file(file_, content):
-    """Generates a file specified by 'file' and fills it using 'content'"""
-    f = open(file_, 'w')
-    f.write(content)
-    f.close()
-
-def nice_print(yaml_element):
-    """Print a yaml structure. Used mainly for debugging"""
-    print(yaml.dump(yaml_element, default_flow_style=False))
-    
-def new_random_mac():
-    mac = (0xE2, random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff) )
-    return ':'.join(map(lambda x: "%02X" % x, mac)) 
-
-def parse_dict(var, template):
-    if type(var) is not dict: return -1, 'not a dictionary'
-    for _,tv in template.items():
-        if type(tv) is list:
-            return
-    
-def delete_nulls(var):
-    if type(var) is dict:
-        for k in var.keys():
-            if var[k] is None: del var[k]
-            elif type(var[k]) is dict or type(var[k]) is list or type(var[k]) is tuple: 
-                if delete_nulls(var[k]): del var[k]
-        if len(var) == 0: return True
-    elif type(var) is list or type(var) is tuple:
-        for k in var:
-            if type(k) is dict: delete_nulls(k)
-        if len(var) == 0: return True
-    return False
-
-def get_next_2pow(var):
-    if var==0: return 0
-    v=1
-    while v<var: v=v*2
-    return v        
-
-def check_valid_uuid(uuid):
-    id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
-    try:
-        js_v(uuid, id_schema)
-        return True
-    except js_e.ValidationError:
-        return False
-
-def DeleteNone(var):
-    '''Removes recursively empty dictionaries or lists
-    return True if var is an empty dict or list '''
-    if type(var) is dict:
-        for k in var.keys():
-            if var[k] is None: del var[k]
-            elif type(var[k]) is dict or type(var[k]) is list or type(var[k]) is tuple: 
-                if DeleteNone(var[k]): del var[k]
-        if len(var) == 0: return True
-    elif type(var) is list or type(var) is tuple:
-        for k in var:
-            if type(k) is dict: DeleteNone(k)
-        if len(var) == 0: return True
-    return False
-    
-def gen_random_mac():
-    '''generates a random mac address. Avoid multicast, broadcast, etc
-    '''
-    mac = (
-        #52,54,00,
-        #2 + 4*random.randint(0x00, 0x3f), #4 multiple, unicast local mac address
-        0x52,
-        random.randint(0x00, 0xff),
-        random.randint(0x00, 0xff),
-        random.randint(0x00, 0xff),
-        random.randint(0x00, 0xff),
-        random.randint(0x00, 0xff) 
-    )
-    return ':'.join(map(lambda x: "%02x" % x, mac))
-
index 599938d..6318769 100755 (executable)
@@ -92,8 +92,9 @@ shift $((OPTIND-1))
 #GET OPENVIM VERSION
 OPENVIM_VER="$1"
 if [ -z "$OPENVIM_VER" ]
-then 
-    OPENVIM_VER=`ovim -v`
+then
+    OVIM_PATH=`python -c 'import osm_openvim; print osm_openvim.__path__[0]' 2> /dev/null`
+    OPENVIM_VER=`python ${OVIM_PATH}/ovim.py -v 2> /dev/null`
     OPENVIM_VER=${OPENVIM_VER%%-r*}
     OPENVIM_VER=${OPENVIM_VER##*version }
     echo "    Detected openvim version $OPENVIM_VER"
diff --git a/definitionsClass.py b/definitionsClass.py
deleted file mode 100644 (file)
index 70168e8..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-Definitions of classes for the Host operating server, ...  
-'''
-
-__author__="Pablo Montes"
-
-
-class Units():
-    memory_1000 = 1
-    memory_1024 = 2
-    memory_full = 3
-    bw = 4
-    freq = 5
-    no_units = 6
-    name = 7
-    boolean = 8
-    
-class definitionsClass():
-    user = 'n2'
-    password = 'n2'
-    extrict_hugepages_allocation = True
-    processor_possible_features = ['64b','iommu','lps','tlbps','hwsv','dioc','ht']
-    processor_possible_manufacturers = ['Intel','AMD']
-    processor_possible_families = ['Xeon']
-    processor_possible_versions = ['Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz', 'Intel(R) Xeon(R) CPU E5-2680 0 @ 2.70GHz','Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz']
-    memory_possible_types = ['DDR2','DDR3']
-    memory_possible_form_factors = ['DIMM']
-    hypervisor_possible_types = ['QEMU']
-    hypervisor_possible_domain_types = ['kvm'] #['qemu', 'kvm']
-    os_possible_id = ['Red Hat Enterprise Linux Server release 6.4 (Santiago)',
-                      'Red Hat Enterprise Linux Server release 6.5 (Santiago)',
-                      'Red Hat Enterprise Linux Server release 6.6 (Santiago)',
-                      'CentOS release 6.5 (Final)',
-                      'CentOS release 6.6 (Final)',
-                      'Red Hat Enterprise Linux Server release 7.0 (Maipo)',
-                      'Red Hat Enterprise Linux Server release 7.1 (Maipo)',
-                    ]
-    os_possible_types = ['GNU/Linux']
-    os_possible_architectures = ['x86_64']
-    hypervisor_possible_composed_versions = ['QEMU-kvm']
-    units = dict() 
-    units[Units.bw] = ['Gbps', 'Mbps', 'kbps', 'bps']
-    units[Units.freq] = ['GHz', 'MHz', 'KHz', 'Hz']
-    units[Units.memory_1000] = ['GB', 'MB', 'KB', 'B']
-    units[Units.memory_1024] = ['GiB', 'MiB', 'KiB', 'B']
-    units[Units.memory_full] = ['GB', 'MB', 'KB', 'GiB', 'MiB', 'KiB', 'B']
-    valid_hugepage_sz = [1073741824, 2097152] #In bytes
-    valid_VNFC_iface_types = ['mgmt','data']
-    
-    def __init__(self):
-        return
-        
diff --git a/dhcp_thread.py b/dhcp_thread.py
deleted file mode 100644 (file)
index da7176b..0000000
+++ /dev/null
@@ -1,296 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-This is thread that interact with the dhcp server to get the IP addresses 
-'''
-__author__="Pablo Montes, Alfonso Tierno"
-__date__ ="$4-Jan-2016 12:07:15$"
-
-
-
-import threading
-import time
-import Queue
-import paramiko
-import random
-import subprocess
-import logging
-
-#TODO: insert a logging system
-
-class dhcp_thread(threading.Thread):
-    def __init__(self, dhcp_params, db, db_lock, test, dhcp_nets, logger_name=None, debug=None):
-        '''Init a thread.
-        Arguments: thread_info must be a dictionary with:
-            'dhcp_params' dhcp server parameters with the following keys:
-                mandatory : user, host, port, key, ifaces(interface name list of the one managed by the dhcp)
-                optional:  password, key, port(22)
-            'db' 'db_lock': database class and lock for accessing it
-            'test': in test mode no acces to a server is done, and ip is invented
-        '''
-        threading.Thread.__init__(self)
-        self.dhcp_params = dhcp_params
-        self.db = db
-        self.db_lock = db_lock
-        self.test = test
-        self.dhcp_nets = dhcp_nets
-        self.ssh_conn = None
-        if logger_name:
-            self.logger_name = logger_name
-        else:
-            self.logger_name = "openvim.dhcp"
-        self.logger = logging.getLogger(self.logger_name)
-        if debug:
-            self.logger.setLevel(getattr(logging, debug))
-
-        self.mac_status ={} #dictionary of mac_address to retrieve information
-            #ip: None
-            #retries: 
-            #next_reading: time for the next trying to check ACTIVE status or IP
-            #created: time when it was added 
-            #active: time when the VM becomes into ACTIVE status
-            
-        
-        self.queueLock = threading.Lock()
-        self.taskQueue = Queue.Queue(2000)
-        
-    def ssh_connect(self):
-        try:
-            #Connect SSH
-            self.ssh_conn = paramiko.SSHClient()
-            self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-            self.ssh_conn.load_system_host_keys()
-            self.ssh_conn.connect(self.dhcp_params["host"], port=self.dhcp_params.get("port", 22),
-                                  username=self.dhcp_params["user"], password=self.dhcp_params.get("password"),
-                                  key_filename=self.dhcp_params.get("key"), timeout=2)
-        except paramiko.ssh_exception.SSHException as e:
-            self.logger.error("ssh_connect ssh Exception " + str(e))
-        
-    def load_mac_from_db(self):
-        #TODO get macs to follow from the database
-        self.logger.debug("load macs from db")
-        self.db_lock.acquire()
-        r,c = self.db.get_table(SELECT=('mac','ip_address','nets.uuid as net_id', ),
-                                FROM='ports join nets on ports.net_id=nets.uuid', 
-                                WHERE_NOT={'ports.instance_id': None, 'nets.provider': None})
-        self.db_lock.release()
-        now = time.time()
-        self.mac_status ={}
-        if r<0:
-            self.logger.error("Error getting data from database: " + c)
-            return
-        for port in c:
-            if port["net_id"] in self.dhcp_nets:
-                self.mac_status[ port["mac"] ] = {"ip": port["ip_address"], "next_reading": now, "created": now, "retries":0}
-    
-    def insert_task(self, task, *aditional):
-        try:
-            self.queueLock.acquire()
-            task = self.taskQueue.put( (task,) + aditional, timeout=5) 
-            self.queueLock.release()
-            return 1, None
-        except Queue.Full:
-            return -1, "timeout inserting a task over dhcp_thread"
-
-    def run(self):
-        self.logger.debug("starting, nets: " + str(self.dhcp_nets))
-        next_iteration = time.time() + 10
-        while True:
-            self.load_mac_from_db()
-            while True:
-                try:
-                    self.queueLock.acquire()
-                    if not self.taskQueue.empty():
-                        task = self.taskQueue.get()
-                    else:
-                        task = None
-                    self.queueLock.release()
-
-                    if task is None:
-                        now=time.time()
-                        if now >= next_iteration:
-                            next_iteration = self.get_ip_from_dhcp()
-                        else:
-                            time.sleep(1)
-                        continue
-
-                    if task[0] == 'add':
-                        self.logger.debug("processing task add mac " + str(task[1]))
-                        now=time.time()
-                        self.mac_status[task[1] ] = {"ip": None, "next_reading": now, "created": now, "retries":0}
-                        next_iteration = now
-                    elif task[0] == 'del':
-                        self.logger.debug("processing task del mac " + str(task[1]))
-                        if task[1] in self.mac_status:
-                            del self.mac_status[task[1] ]
-                    elif task[0] == 'exit':
-                        self.logger.debug("processing task exit")
-                        self.terminate()
-                        return 0
-                    else:
-                        self.logger.error("unknown task: " + str(task))
-                except Exception as e:
-                    self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
-          
-    def terminate(self):
-        try:
-            if self.ssh_conn:
-                self.ssh_conn.close()
-        except Exception as e:
-            self.logger.error("terminate Exception: " + str(e))
-        self.logger.debug("exit from dhcp_thread")
-
-    def get_ip_from_dhcp(self):
-        
-        now = time.time()
-        next_iteration= now + 40000 # >10 hores
-        
-        #print self.name, "Iteration" 
-        for mac_address in self.mac_status:
-            if now < self.mac_status[mac_address]["next_reading"]:
-                if self.mac_status[mac_address]["next_reading"] < next_iteration:
-                    next_iteration = self.mac_status[mac_address]["next_reading"]
-                continue
-            
-            if self.mac_status[mac_address].get("active") == None:
-                #check from db if already active
-                self.db_lock.acquire()
-                r,c = self.db.get_table(FROM="ports as p join instances as i on p.instance_id=i.uuid",
-                                        WHERE={"p.mac": mac_address, "i.status": "ACTIVE"})
-                self.db_lock.release()
-                if r>0:
-                    self.mac_status[mac_address]["active"] = now
-                    self.mac_status[mac_address]["next_reading"] = (int(now)/2 +1)* 2
-                    self.logger.debug("mac %s VM ACTIVE", mac_address)
-                    self.mac_status[mac_address]["retries"] = 0
-                else:
-                    #print self.name, "mac %s  VM INACTIVE" % (mac_address)
-                    if now - self.mac_status[mac_address]["created"] > 300:
-                        #modify Database to tell openmano that we can not get dhcp from the machine
-                        if not self.mac_status[mac_address].get("ip"):
-                            self.db_lock.acquire()
-                            r,c = self.db.update_rows("ports", {"ip_address": "0.0.0.0"}, {"mac": mac_address})
-                            self.db_lock.release()
-                            self.mac_status[mac_address]["ip"] = "0.0.0.0"
-                            self.logger.debug("mac %s >> set to 0.0.0.0 because of timeout", mac_address)
-                        self.mac_status[mac_address]["next_reading"] = (int(now)/60 +1)* 60
-                    else:
-                        self.mac_status[mac_address]["next_reading"] = (int(now)/6 +1)* 6
-                if self.mac_status[mac_address]["next_reading"] < next_iteration:
-                    next_iteration = self.mac_status[mac_address]["next_reading"]
-                continue
-            
-
-            if self.test:
-                if self.mac_status[mac_address]["retries"]>random.randint(10,100): #wait between 10 and 100 seconds to produce a fake IP
-                    content = self.get_fake_ip()
-                else:
-                    content = None
-            elif self.dhcp_params["host"]=="localhost":
-                try:
-                    command = ['get_dhcp_lease.sh',  mac_address]
-                    content = subprocess.check_output(command)
-                except Exception as e:
-                    self.logger.error("get_ip_from_dhcp subprocess Exception " + str(e))
-                    content = None
-            else:
-                try:
-                    if not self.ssh_conn:
-                        self.ssh_connect()
-                    command = 'get_dhcp_lease.sh ' +  mac_address
-                    (_, stdout, _) = self.ssh_conn.exec_command(command)
-                    content = stdout.read()
-                except paramiko.ssh_exception.SSHException as e:
-                    self.logger.error("get_ip_from_dhcp: ssh_Exception: " + srt(e))
-                    content = None
-                    self.ssh_conn = None
-                except Exception as e:
-                    self.logger.error("get_ip_from_dhcp: Exception: " + str(e))
-                    content = None
-                    self.ssh_conn = None
-
-            if content:
-                self.mac_status[mac_address]["ip"] = content
-                #modify Database
-                self.db_lock.acquire()
-                r,c = self.db.update_rows("ports", {"ip_address": content}, {"mac": mac_address})
-                self.db_lock.release()
-                if r<0:
-                    self.logger.error("Database update error: " + c)
-                else:
-                    self.mac_status[mac_address]["retries"] = 0
-                    self.mac_status[mac_address]["next_reading"] = (int(now)/3600 +1)* 36000 # 10 hores
-                    if self.mac_status[mac_address]["next_reading"] < next_iteration:
-                        next_iteration = self.mac_status[mac_address]["next_reading"]
-                    self.logger.debug("mac %s >> %s", mac_address, content)
-                    continue
-            #a fail has happen
-            self.mac_status[mac_address]["retries"] +=1
-            #next iteration is every 2sec at the beginning; every 5sec after a minute, every 1min after a 5min
-            if now - self.mac_status[mac_address]["active"] > 120:
-                #modify Database to tell openmano that we can not get dhcp from the machine
-                if not self.mac_status[mac_address].get("ip"):
-                    self.db_lock.acquire()
-                    r,c = self.db.update_rows("ports", {"ip_address": "0.0.0.0"}, {"mac": mac_address})
-                    self.db_lock.release()
-                    self.mac_status[mac_address]["ip"] = "0.0.0.0"
-                    self.logger.debug("mac %s >> set to 0.0.0.0 because of timeout", mac_address)
-            
-            if now - self.mac_status[mac_address]["active"] > 60:
-                self.mac_status[mac_address]["next_reading"] = (int(now)/6 +1)* 6
-            elif now - self.mac_status[mac_address]["active"] > 300:
-                self.mac_status[mac_address]["next_reading"] = (int(now)/60 +1)* 60
-            else:
-                self.mac_status[mac_address]["next_reading"] = (int(now)/2 +1)* 2
-                
-            if self.mac_status[mac_address]["next_reading"] < next_iteration:
-                next_iteration = self.mac_status[mac_address]["next_reading"]
-        return next_iteration    
-    
-    def get_fake_ip(self):
-        fake_ip = "192.168.{}.{}".format(random.randint(1,254), random.randint(1,254) )
-        while True:
-            #check not already provided
-            already_used = False
-            for mac_address in self.mac_status:
-                if self.mac_status[mac_address]["ip"] == fake_ip:
-                    already_used = True
-                    break
-            if not already_used:
-                return fake_ip
-
-
-#EXAMPLE of bash script that must be available at the DHCP server for "isc-dhcp-server" type
-#     $ cat ./get_dhcp_lease.sh
-#     #!/bin/bash
-#     awk '
-#     ($1=="lease" && $3=="{"){ lease=$2; active="no"; found="no" }
-#     ($1=="binding" && $2=="state" && $3=="active;"){ active="yes" }
-#     ($1=="hardware" && $2=="ethernet" && $3==tolower("'$1';")){ found="yes" }
-#     ($1=="client-hostname"){ name=$2 }
-#     ($1=="}"){ if (active=="yes" && found=="yes"){ target_lease=lease; target_name=name}}
-#     END{printf("%s", target_lease)} #print target_name
-#     ' /var/lib/dhcp/dhcpd.leases
-
diff --git a/floodlight.py b/floodlight.py
deleted file mode 100644 (file)
index 826e300..0000000
+++ /dev/null
@@ -1,473 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-"""
-Implement the plugging for floodligth openflow controller
-It creates the class OF_conn to create dataplane connections
-with static rules based on packet destination MAC address
-"""
-
-__author__ = "Pablo Montes, Alfonso Tierno"
-__date__ = "$28-oct-2014 12:07:15$"
-
-import json
-import requests
-import logging
-import openflow_conn
-
-
-class OF_conn(openflow_conn.OpenflowConn):
-    """
-    Openflow Connector for Floodlight.
-    No MAC learning is used
-    version 0.9 or 1.X is autodetected
-    version 1.X is in progress, not finished!!!
-    """
-
-    def __init__(self, params):
-        """
-        Constructor
-        :param self:
-        :param params: dictionay with the following keys:
-               of_dpid:     DPID to use for this controller
-               of_ip:       controller IP address
-               of_port:     controller TCP port
-               of_version:  version, can be "0.9" or "1.X". By default it is autodetected
-               of_debug:    debug level for logging. Default to ERROR
-               other keys are ignored
-        :return:  Raise an ValueError exception if same parameter is missing or wrong
-        """
-        # check params
-        if "of_ip" not in params or params["of_ip"] == None or "of_port" not in params or params["of_port"] == None:
-            raise ValueError("IP address and port must be provided")
-
-        openflow_conn.OpenflowConn.__init__(self, params)
-
-        self.name = "Floodlight"
-        self.dpid = str(params["of_dpid"])
-        self.url = "http://%s:%s" % (str(params["of_ip"]), str(params["of_port"]))
-
-        self.pp2ofi = {}  # From Physical Port to OpenFlow Index
-        self.ofi2pp = {}  # From OpenFlow Index to Physical Port
-        self.headers = {'content-type': 'application/json', 'Accept': 'application/json'}
-        self.version = None
-        self.logger = logging.getLogger('vim.OF.FL')
-        self.logger.setLevel(getattr(logging, params.get("of_debug", "ERROR")))
-        self._set_version(params.get("of_version"))
-
-    def _set_version(self, version):
-        """
-        set up a version of the controller.
-         Depending on the version it fills the self.ver_names with the naming used in this version
-        :param version: Openflow controller version
-        :return: Raise an ValueError exception if same parameter is missing or wrong
-        """
-        # static version names
-        if version == None:
-            self.version = None
-        elif version == "0.9":
-            self.version = version
-            self.name = "Floodlightv0.9"
-            self.ver_names = {
-                "dpid": "dpid",
-                "URLmodifier": "staticflowentrypusher",
-                "destmac": "dst-mac",
-                "vlanid": "vlan-id",
-                "inport": "ingress-port",
-                "setvlan": "set-vlan-id",
-                "stripvlan": "strip-vlan",
-            }
-        elif version[0] == "1":  # version 1.X
-            self.version = version
-            self.name = "Floodlightv1.X"
-            self.ver_names = {
-                "dpid": "switchDPID",
-                "URLmodifier": "staticflowpusher",
-                "destmac": "eth_dst",
-                "vlanid": "eth_vlan_vid",
-                "inport": "in_port",
-                "setvlan": "set_vlan_vid",
-                "stripvlan": "strip_vlan",
-            }
-        else:
-            raise ValueError("Invalid version for floodlight controller")
-
-    def get_of_switches(self):
-        """
-        Obtain a a list of switches or DPID detected by this controller
-        :return: list where each element a tuple pair (DPID, IP address)
-                      Raise an OpenflowconnConnectionException or OpenflowconnConnectionException exception if same
-                      parameter is missing or wrong
-        """
-        try:
-            of_response = requests.get(self.url + "/wm/core/controller/switches/json", headers=self.headers)
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-            if of_response.status_code != 200:
-                self.logger.warning("get_of_switches " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-            self.logger.debug("get_of_switches " + error_text)
-            info = of_response.json()
-            if type(info) != list and type(info) != tuple:
-                self.logger.error("get_of_switches. Unexpected response not a list %s", str(type(info)))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, not a list. Wrong version?")
-            if len(info) == 0:
-                return info
-            # autodiscover version
-            if self.version == None:
-                if 'dpid' in info[0] and 'inetAddress' in info[0]:
-                    self._set_version("0.9")
-                elif 'switchDPID' in info[0] and 'inetAddress' in info[0]:
-                    self._set_version("1.X")
-                else:
-                    self.logger.error(
-                        "get_of_switches. Unexpected response, not found 'dpid' or 'switchDPID' field: %s",
-                        str(info[0]))
-                    raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, not found 'dpid' or "
-                                                                       "'switchDPID' field. Wrong version?")
-
-            switch_list = []
-            for switch in info:
-                switch_list.append((switch[self.ver_names["dpid"]], switch['inetAddress']))
-            return switch_list
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("get_of_switches " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-        except ValueError as e:
-            # ValueError in the case that JSON can not be decoded
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("get_of_switches " + error_text)
-            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-    def get_of_rules(self, translate_of_ports=True):
-        """
-        Obtain the rules inserted at openflow controller
-        :param translate_of_ports: if True it translates ports from openflow index to physical switch name
-        :return: dict if ok: with the rule name as key and value is another dictionary with the following content:
-                    priority: rule priority
-                    name:         rule name (present also as the master dict key)
-                    ingress_port: match input port of the rule
-                    dst_mac:      match destination mac address of the rule, can be missing or None if not apply
-                    vlan_id:      match vlan tag of the rule, can be missing or None if not apply
-                    actions:      list of actions, composed by a pair tuples:
-                        (vlan, None/int): for stripping/setting a vlan tag
-                        (out, port):      send to this port
-                    switch:       DPID, all
-                Raise an openflowconnUnexpectedResponse exception if fails with text_error
-        """
-
-        try:
-            # get translation, autodiscover version
-            if len(self.ofi2pp) == 0:
-                self.obtain_port_correspondence()
-
-            of_response = requests.get(self.url + "/wm/%s/list/%s/json" % (self.ver_names["URLmodifier"], self.dpid),
-                                       headers=self.headers)
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-            if of_response.status_code != 200:
-                self.logger.warning("get_of_rules " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-            self.logger.debug("get_of_rules " + error_text)
-            info = of_response.json()
-            if type(info) != dict:
-                self.logger.error("get_of_rules. Unexpected response not a dict %s", str(type(info)))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, not a dict. Wrong version?")
-            rule_dict = {}
-            for switch, switch_info in info.iteritems():
-                if switch_info == None:
-                    continue
-                if str(switch) != self.dpid:
-                    continue
-                for name, details in switch_info.iteritems():
-                    rule = {}
-                    rule["switch"] = str(switch)
-                    # rule["active"] = "true"
-                    rule["priority"] = int(details["priority"])
-                    if self.version[0] == "0":
-                        if translate_of_ports:
-                            rule["ingress_port"] = self.ofi2pp[details["match"]["inputPort"]]
-                        else:
-                            rule["ingress_port"] = str(details["match"]["inputPort"])
-                        dst_mac = details["match"]["dataLayerDestination"]
-                        if dst_mac != "00:00:00:00:00:00":
-                            rule["dst_mac"] = dst_mac
-                        vlan = details["match"]["dataLayerVirtualLan"]
-                        if vlan != -1:
-                            rule["vlan_id"] = vlan
-                        actionlist = []
-                        for action in details["actions"]:
-                            if action["type"] == "OUTPUT":
-                                if translate_of_ports:
-                                    port = self.ofi2pp[action["port"]]
-                                else:
-                                    port = action["port"]
-                                actionlist.append(("out", port))
-                            elif action["type"] == "STRIP_VLAN":
-                                actionlist.append(("vlan", None))
-                            elif action["type"] == "SET_VLAN_ID":
-                                actionlist.append(("vlan", action["virtualLanIdentifier"]))
-                            else:
-                                actionlist.append((action["type"], str(action)))
-                                self.logger.warning("get_of_rules() Unknown action in rule %s: %s", rule["name"],
-                                                    str(action))
-                            rule["actions"] = actionlist
-                    elif self.version[0] == "1":
-                        if translate_of_ports:
-                            rule["ingress_port"] = self.ofi2pp[details["match"]["in_port"]]
-                        else:
-                            rule["ingress_port"] = details["match"]["in_port"]
-                        if "eth_dst" in details["match"]:
-                            dst_mac = details["match"]["eth_dst"]
-                            if dst_mac != "00:00:00:00:00:00":
-                                rule["dst_mac"] = dst_mac
-                        if "eth_vlan_vid" in details["match"]:
-                            vlan = int(details["match"]["eth_vlan_vid"], 16) & 0xFFF
-                            rule["vlan_id"] = str(vlan)
-                        actionlist = []
-                        for action in details["instructions"]["instruction_apply_actions"]:
-                            if action == "output":
-                                if translate_of_ports:
-                                    port = self.ofi2pp[details["instructions"]["instruction_apply_actions"]["output"]]
-                                else:
-                                    port = details["instructions"]["instruction_apply_actions"]["output"]
-                                actionlist.append(("out", port))
-                            elif action == "strip_vlan":
-                                actionlist.append(("vlan", None))
-                            elif action == "set_vlan_vid":
-                                actionlist.append(
-                                    ("vlan", details["instructions"]["instruction_apply_actions"]["set_vlan_vid"]))
-                            else:
-                                self.logger.error("get_of_rules Unknown action in rule %s: %s", rule["name"],
-                                                  str(action))
-                                # actionlist.append( (action, str(details["instructions"]["instruction_apply_actions"]) ))
-                    rule_dict[str(name)] = rule
-            return rule_dict
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("get_of_rules " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-        except ValueError as e:
-            # ValueError in the case that JSON can not be decoded
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("get_of_rules " + error_text)
-            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-    def obtain_port_correspondence(self):
-        """
-        Obtain the correspondence between physical and openflow port names
-        :return: dictionary: with physical name as key, openflow name as value
-                 Raise an openflowconnUnexpectedResponse exception if fails with text_error
-        """
-        try:
-            of_response = requests.get(self.url + "/wm/core/controller/switches/json", headers=self.headers)
-            # print vim_response.status_code
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-            if of_response.status_code != 200:
-                self.logger.warning("obtain_port_correspondence " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-            self.logger.debug("obtain_port_correspondence " + error_text)
-            info = of_response.json()
-
-            if type(info) != list and type(info) != tuple:
-                raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow response, not a list. "
-                                                                   "Wrong version?")
-
-            index = -1
-            if len(info) > 0:
-                # autodiscover version
-                if self.version == None:
-                    if 'dpid' in info[0] and 'ports' in info[0]:
-                        self._set_version("0.9")
-                    elif 'switchDPID' in info[0]:
-                        self._set_version("1.X")
-                    else:
-                        raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow response, "
-                                                                           "Wrong version?")
-
-            for i in range(0, len(info)):
-                if info[i][self.ver_names["dpid"]] == self.dpid:
-                    index = i
-                    break
-            if index == -1:
-                text = "DPID '" + self.dpid + "' not present in controller " + self.url
-                # print self.name, ": get_of_controller_info ERROR", text
-                raise openflow_conn.OpenflowconnUnexpectedResponse(text)
-            else:
-                if self.version[0] == "0":
-                    ports = info[index]["ports"]
-                else:  # version 1.X
-                    of_response = requests.get(self.url + "/wm/core/switch/%s/port-desc/json" % self.dpid,
-                                               headers=self.headers)
-                    # print vim_response.status_code
-                    error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-                    if of_response.status_code != 200:
-                        self.logger.warning("obtain_port_correspondence " + error_text)
-                        raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-                    self.logger.debug("obtain_port_correspondence " + error_text)
-                    info = of_response.json()
-                    if type(info) != dict:
-                        raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow port-desc response, "
-                                                                           "not a dict. Wrong version?")
-                    if "portDesc" not in info:
-                        raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow port-desc response, "
-                                                                           "'portDesc' not found. Wrong version?")
-                    if type(info["portDesc"]) != list and type(info["portDesc"]) != tuple:
-                        raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow port-desc response at "
-                                                                           "'portDesc', not a list. Wrong version?")
-                    ports = info["portDesc"]
-                for port in ports:
-                    self.pp2ofi[str(port["name"])] = str(port["portNumber"])
-                    self.ofi2pp[port["portNumber"]] = str(port["name"])
-                    # print self.name, ": get_of_controller_info ports:", self.pp2ofi
-            return self.pp2ofi
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("obtain_port_correspondence " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-        except ValueError as e:
-            # ValueError in the case that JSON can not be decoded
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("obtain_port_correspondence " + error_text)
-            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-    def del_flow(self, flow_name):
-        """
-        Delete an existing rule
-        :param flow_name: this is the rule name
-        :return: None if ok
-                 Raise an openflowconnUnexpectedResponse exception if fails with text_error
-        """
-        try:
-
-            # Raise an openflowconnUnexpectedResponse exception if fails with text_error
-            # autodiscover version
-
-            if self.version == None:
-                self.get_of_switches()
-
-            of_response = requests.delete(self.url + "/wm/%s/json" % self.ver_names["URLmodifier"],
-                                          headers=self.headers,
-                                          data='{"switch":"%s","name":"%s"}' % (self.dpid, flow_name)
-                                          )
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-            if of_response.status_code != 200:
-                self.logger.warning("del_flow " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-            self.logger.debug("del_flow OK " + error_text)
-            return None
-
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("del_flow " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-
-    def new_flow(self, data):
-        """
-        Insert a new static rule
-        :param data: dictionary with the following content:
-                        priority:     rule priority
-                        name:         rule name
-                        ingress_port: match input port of the rule
-                        dst_mac:      match destination mac address of the rule, missing or None if not apply
-                        vlan_id:      match vlan tag of the rule, missing or None if not apply
-                        actions:      list of actions, composed by a pair tuples with these posibilities:
-                            ('vlan', None/int): for stripping/setting a vlan tag
-                            ('out', port):      send to this port
-        :return: None if ok
-                 Raise an openflowconnUnexpectedResponse exception if fails with text_error
-        """
-        # get translation, autodiscover version
-        if len(self.pp2ofi) == 0:
-            self.obtain_port_correspondence()
-
-        try:
-            # We have to build the data for the floodlight call from the generic data
-            sdata = {'active': "true", "name": data["name"]}
-            if data.get("priority"):
-                sdata["priority"] = str(data["priority"])
-            if data.get("vlan_id"):
-                sdata[self.ver_names["vlanid"]] = data["vlan_id"]
-            if data.get("dst_mac"):
-                sdata[self.ver_names["destmac"]] = data["dst_mac"]
-            sdata['switch'] = self.dpid
-            if not data['ingress_port'] in self.pp2ofi:
-                error_text = 'Error. Port ' + data['ingress_port'] + ' is not present in the switch'
-                self.logger.warning("new_flow " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-            sdata[self.ver_names["inport"]] = self.pp2ofi[data['ingress_port']]
-            sdata['actions'] = ""
-
-            for action in data['actions']:
-                if len(sdata['actions']) > 0:
-                    sdata['actions'] += ','
-                if action[0] == "vlan":
-                    if action[1] == None:
-                        sdata['actions'] += self.ver_names["stripvlan"]
-                    else:
-                        sdata['actions'] += self.ver_names["setvlan"] + "=" + str(action[1])
-                elif action[0] == 'out':
-                    sdata['actions'] += "output=" + self.pp2ofi[action[1]]
-
-            of_response = requests.post(self.url + "/wm/%s/json" % self.ver_names["URLmodifier"],
-                                        headers=self.headers, data=json.dumps(sdata))
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-            if of_response.status_code != 200:
-                self.logger.warning("new_flow " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-            self.logger.debug("new_flow OK" + error_text)
-            return None
-
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("new_flow " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-
-    def clear_all_flows(self):
-        """
-        Delete all existing rules
-        :return: None if ok
-                 Raise an openflowconnUnexpectedResponse exception if fails with text_error
-        """
-
-        try:
-            # autodiscover version
-            if self.version == None:
-                sw_list = self.get_of_switches()
-                if len(sw_list) == 0:  # empty
-                    return None
-
-            url = self.url + "/wm/%s/clear/%s/json" % (self.ver_names["URLmodifier"], self.dpid)
-            of_response = requests.get(url)
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-            if of_response.status_code < 200 or of_response.status_code >= 300:
-                self.logger.warning("clear_all_flows " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-            self.logger.debug("clear_all_flows OK " + error_text)
-            return None
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("clear_all_flows " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-
diff --git a/host_thread.py b/host_thread.py
deleted file mode 100644 (file)
index d8bca2e..0000000
+++ /dev/null
@@ -1,2274 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-This is thread that interact with the host and the libvirt to manage VM
-One thread will be launched per host 
-'''
-__author__ = "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
-__date__ = "$10-jul-2014 12:07:15$"
-
-import json
-import yaml
-import threading
-import time
-import Queue
-import paramiko
-from jsonschema import validate as js_v, exceptions as js_e
-#import libvirt
-import imp
-from vim_schema import localinfo_schema, hostinfo_schema
-import random
-import os
-
-#TODO: insert a logging system
-
-# from logging import Logger
-# import auxiliary_functions as af
-
-# TODO: insert a logging system
-
-
-class host_thread(threading.Thread):
-    lvirt_module = None
-
-    def __init__(self, name, host, user, db, db_lock, test, image_path, host_id, version, develop_mode,
-                 develop_bridge_iface):
-        '''Init a thread.
-        Arguments:
-            'id' number of thead
-            'name' name of thread
-            'host','user':  host ip or name to manage and user
-            'db', 'db_lock': database class and lock to use it in exclusion
-        '''
-        threading.Thread.__init__(self)
-        self.name = name
-        self.host = host
-        self.user = user
-        self.db = db
-        self.db_lock = db_lock
-        self.test = test
-
-        if not test and not host_thread.lvirt_module:
-            try:
-                module_info = imp.find_module("libvirt")
-                host_thread.lvirt_module = imp.load_module("libvirt", *module_info)
-            except (IOError, ImportError) as e:
-                raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e))
-
-
-        self.develop_mode = develop_mode
-        self.develop_bridge_iface = develop_bridge_iface
-        self.image_path = image_path
-        self.host_id = host_id
-        self.version = version
-        
-        self.xml_level = 0
-        #self.pending ={}
-        
-        self.server_status = {} #dictionary with pairs server_uuid:server_status 
-        self.pending_terminate_server =[] #list  with pairs (time,server_uuid) time to send a terminate for a server being destroyed
-        self.next_update_server_status = 0 #time when must be check servers status
-        
-        self.hostinfo = None 
-        
-        self.queueLock = threading.Lock()
-        self.taskQueue = Queue.Queue(2000)
-        self.ssh_conn = None
-
-    def ssh_connect(self):
-        try:
-            #Connect SSH
-            self.ssh_conn = paramiko.SSHClient()
-            self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-            self.ssh_conn.load_system_host_keys()
-            self.ssh_conn.connect(self.host, username=self.user, timeout=10) #, None)
-        except paramiko.ssh_exception.SSHException as e:
-            text = e.args[0]
-            print self.name, ": ssh_connect ssh Exception:", text
-        
-    def load_localinfo(self):
-        if not self.test:
-            try:
-                #Connect SSH
-                self.ssh_connect()
-    
-                command = 'mkdir -p ' +  self.image_path
-                #print self.name, ': command:', command
-                (_, stdout, stderr) = self.ssh_conn.exec_command(command)
-                content = stderr.read()
-                if len(content) > 0:
-                    print self.name, ': command:', command, "stderr:", content
-
-                command = 'cat ' +  self.image_path + '/.openvim.yaml'
-                #print self.name, ': command:', command
-                (_, stdout, stderr) = self.ssh_conn.exec_command(command)
-                content = stdout.read()
-                if len(content) == 0:
-                    print self.name, ': command:', command, "stderr:", stderr.read()
-                    raise paramiko.ssh_exception.SSHException("Error empty file ")
-                self.localinfo = yaml.load(content)
-                js_v(self.localinfo, localinfo_schema)
-                self.localinfo_dirty=False
-                if 'server_files' not in self.localinfo:
-                    self.localinfo['server_files'] = {}
-                print self.name, ': localinfo load from host'
-                return
-    
-            except paramiko.ssh_exception.SSHException as e:
-                text = e.args[0]
-                print self.name, ": load_localinfo ssh Exception:", text
-            except host_thread.lvirt_module.libvirtError as e:
-                text = e.get_error_message()
-                print self.name, ": load_localinfo libvirt Exception:", text
-            except yaml.YAMLError as exc:
-                text = ""
-                if hasattr(exc, 'problem_mark'):
-                    mark = exc.problem_mark
-                    text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
-                print self.name, ": load_localinfo yaml format Exception", text
-            except js_e.ValidationError as e:
-                text = ""
-                if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
-                print self.name, ": load_localinfo format Exception:", text, e.message 
-            except Exception as e:
-                text = str(e)
-                print self.name, ": load_localinfo Exception:", text
-        
-        #not loaded, insert a default data and force saving by activating dirty flag
-        self.localinfo = {'files':{}, 'server_files':{} } 
-        #self.localinfo_dirty=True
-        self.localinfo_dirty=False
-
-    def load_hostinfo(self):
-        if self.test:
-            return;
-        try:
-            #Connect SSH
-            self.ssh_connect()
-
-
-            command = 'cat ' +  self.image_path + '/hostinfo.yaml'
-            #print self.name, ': command:', command
-            (_, stdout, stderr) = self.ssh_conn.exec_command(command)
-            content = stdout.read()
-            if len(content) == 0:
-                print self.name, ': command:', command, "stderr:", stderr.read()
-                raise paramiko.ssh_exception.SSHException("Error empty file ")
-            self.hostinfo = yaml.load(content)
-            js_v(self.hostinfo, hostinfo_schema)
-            print self.name, ': hostlinfo load from host', self.hostinfo
-            return
-
-        except paramiko.ssh_exception.SSHException as e:
-            text = e.args[0]
-            print self.name, ": load_hostinfo ssh Exception:", text
-        except host_thread.lvirt_module.libvirtError as e:
-            text = e.get_error_message()
-            print self.name, ": load_hostinfo libvirt Exception:", text
-        except yaml.YAMLError as exc:
-            text = ""
-            if hasattr(exc, 'problem_mark'):
-                mark = exc.problem_mark
-                text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
-            print self.name, ": load_hostinfo yaml format Exception", text
-        except js_e.ValidationError as e:
-            text = ""
-            if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
-            print self.name, ": load_hostinfo format Exception:", text, e.message 
-        except Exception as e:
-            text = str(e)
-            print self.name, ": load_hostinfo Exception:", text
-        
-        #not loaded, insert a default data 
-        self.hostinfo = None 
-        
-    def save_localinfo(self, tries=3):
-        if self.test:
-            self.localinfo_dirty = False
-            return
-        
-        while tries>=0:
-            tries-=1
-            
-            try:
-                command = 'cat > ' +  self.image_path + '/.openvim.yaml'
-                print self.name, ': command:', command
-                (stdin, _, _) = self.ssh_conn.exec_command(command)
-                yaml.safe_dump(self.localinfo, stdin, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True)
-                self.localinfo_dirty = False
-                break #while tries
-    
-            except paramiko.ssh_exception.SSHException as e:
-                text = e.args[0]
-                print self.name, ": save_localinfo ssh Exception:", text
-                if "SSH session not active" in text:
-                    self.ssh_connect()
-            except host_thread.lvirt_module.libvirtError as e:
-                text = e.get_error_message()
-                print self.name, ": save_localinfo libvirt Exception:", text
-            except yaml.YAMLError as exc:
-                text = ""
-                if hasattr(exc, 'problem_mark'):
-                    mark = exc.problem_mark
-                    text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
-                print self.name, ": save_localinfo yaml format Exception", text
-            except Exception as e:
-                text = str(e)
-                print self.name, ": save_localinfo Exception:", text
-
-    def load_servers_from_db(self):
-        self.db_lock.acquire()
-        r,c = self.db.get_table(SELECT=('uuid','status', 'image_id'), FROM='instances', WHERE={'host_id': self.host_id})
-        self.db_lock.release()
-
-        self.server_status = {}
-        if r<0:
-            print self.name, ": Error getting data from database:", c
-            return
-        for server in c:
-            self.server_status[ server['uuid'] ] = server['status']
-            
-            #convert from old version to new one
-            if 'inc_files' in self.localinfo and server['uuid'] in self.localinfo['inc_files']:
-                server_files_dict = {'source file': self.localinfo['inc_files'][ server['uuid'] ] [0],  'file format':'raw' }
-                if server_files_dict['source file'][-5:] == 'qcow2':
-                    server_files_dict['file format'] = 'qcow2'
-                    
-                self.localinfo['server_files'][ server['uuid'] ] = { server['image_id'] : server_files_dict }
-        if 'inc_files' in self.localinfo:
-            del self.localinfo['inc_files']
-            self.localinfo_dirty = True
-    
-    def delete_unused_files(self):
-        '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
-        Deletes unused entries at self.loacalinfo and the corresponding local files.
-        The only reason for this mismatch is the manual deletion of instances (VM) at database
-        ''' 
-        if self.test:
-            return
-        for uuid,images in self.localinfo['server_files'].items():
-            if uuid not in self.server_status:
-                for localfile in images.values():
-                    try:
-                        print self.name, ": deleting file '%s' of unused server '%s'" %(localfile['source file'], uuid)
-                        self.delete_file(localfile['source file'])
-                    except paramiko.ssh_exception.SSHException as e:
-                        print self.name, ": Exception deleting file '%s': %s" %(localfile['source file'], str(e))
-                del self.localinfo['server_files'][uuid]
-                self.localinfo_dirty = True
-   
-    def insert_task(self, task, *aditional):
-        try:
-            self.queueLock.acquire()
-            task = self.taskQueue.put( (task,) + aditional, timeout=5) 
-            self.queueLock.release()
-            return 1, None
-        except Queue.Full:
-            return -1, "timeout inserting a task over host " + self.name
-
-    def run(self):
-        while True:
-            self.load_localinfo()
-            self.load_hostinfo()
-            self.load_servers_from_db()
-            self.delete_unused_files()
-            while True:
-                self.queueLock.acquire()
-                if not self.taskQueue.empty():
-                    task = self.taskQueue.get()
-                else:
-                    task = None
-                self.queueLock.release()
-    
-                if task is None:
-                    now=time.time()
-                    if self.localinfo_dirty:
-                        self.save_localinfo()
-                    elif self.next_update_server_status < now:
-                        self.update_servers_status()
-                        self.next_update_server_status = now + 5
-                    elif len(self.pending_terminate_server)>0 and self.pending_terminate_server[0][0]<now:
-                        self.server_forceoff()
-                    else:
-                        time.sleep(1)
-                    continue        
-    
-                if task[0] == 'instance':
-                    print self.name, ": processing task instance", task[1]['action']
-                    retry=0
-                    while retry <2:
-                        retry += 1
-                        r=self.action_on_server(task[1], retry==2)
-                        if r>=0: 
-                            break
-                elif task[0] == 'image':
-                    pass
-                elif task[0] == 'exit':
-                    print self.name, ": processing task exit"
-                    self.terminate()
-                    return 0
-                elif task[0] == 'reload':
-                    print self.name, ": processing task reload terminating and relaunching"
-                    self.terminate()
-                    break
-                elif task[0] == 'edit-iface':
-                    print self.name, ": processing task edit-iface port=%s, old_net=%s, new_net=%s" % (task[1], task[2], task[3])
-                    self.edit_iface(task[1], task[2], task[3])
-                elif task[0] == 'restore-iface':
-                    print self.name, ": processing task restore-iface %s mac=%s" % (task[1], task[2])
-                    self.restore_iface(task[1], task[2])
-                elif task[0] == 'new-ovsbridge':
-                    print self.name, ": Creating compute OVS bridge"
-                    self.create_ovs_bridge()
-                elif task[0] == 'new-vxlan':
-                    print self.name, ": Creating vxlan tunnel=" + task[1] + ", remote ip=" + task[2]
-                    self.create_ovs_vxlan_tunnel(task[1], task[2])
-                elif task[0] == 'del-ovsbridge':
-                    print self.name, ": Deleting OVS bridge"
-                    self.delete_ovs_bridge()
-                elif task[0] == 'del-vxlan':
-                    print self.name, ": Deleting vxlan " + task[1] + " tunnel"
-                    self.delete_ovs_vxlan_tunnel(task[1])
-                elif task[0] == 'create-ovs-bridge-port':
-                    print self.name, ": Adding port ovim-" + task[1] + " to OVS bridge"
-                    self.create_ovs_bridge_port(task[1])
-                elif task[0] == 'del-ovs-port':
-                    print self.name, ": Delete bridge attached to ovs port vlan {} net {}".format(task[1], task[2])
-                    self.delete_bridge_port_attached_to_ovs(task[1], task[2])
-                else:
-                    print self.name, ": unknown task", task
-                
-    def server_forceoff(self, wait_until_finished=False):
-        while len(self.pending_terminate_server)>0:
-            now = time.time()
-            if self.pending_terminate_server[0][0]>now:
-                if wait_until_finished:
-                    time.sleep(1)
-                    continue
-                else:
-                    return
-            req={'uuid':self.pending_terminate_server[0][1],
-                'action':{'terminate':'force'},
-                'status': None
-            }
-            self.action_on_server(req)
-            self.pending_terminate_server.pop(0)
-    
-    def terminate(self):
-        try:
-            self.server_forceoff(True)
-            if self.localinfo_dirty:
-                self.save_localinfo()
-            if not self.test:
-                self.ssh_conn.close()
-        except Exception as e:
-            text = str(e)
-            print self.name, ": terminate Exception:", text
-        print self.name, ": exit from host_thread" 
-
-    def get_local_iface_name(self, generic_name):
-        if self.hostinfo != None and "iface_names" in self.hostinfo and generic_name in self.hostinfo["iface_names"]:
-            return self.hostinfo["iface_names"][generic_name]
-        return generic_name
-        
-    def create_xml_server(self, server, dev_list, server_metadata={}):
-        """Function that implements the generation of the VM XML definition.
-        Additional devices are in dev_list list
-        The main disk is upon dev_list[0]"""
-        
-    #get if operating system is Windows        
-        windows_os = False
-        os_type = server_metadata.get('os_type', None)
-        if os_type == None and 'metadata' in dev_list[0]:
-            os_type = dev_list[0]['metadata'].get('os_type', None)
-        if os_type != None and os_type.lower() == "windows":
-            windows_os = True
-    #get type of hard disk bus  
-        bus_ide = True if windows_os else False   
-        bus = server_metadata.get('bus', None)
-        if bus == None and 'metadata' in dev_list[0]:
-            bus = dev_list[0]['metadata'].get('bus', None)
-        if bus != None:
-            bus_ide = True if bus=='ide' else False
-            
-        self.xml_level = 0
-
-        text = "<domain type='kvm'>"
-    #get topology
-        topo = server_metadata.get('topology', None)
-        if topo == None and 'metadata' in dev_list[0]:
-            topo = dev_list[0]['metadata'].get('topology', None)
-    #name
-        name = server.get('name','') + "_" + server['uuid']
-        name = name[:58]  #qemu impose a length  limit of 59 chars or not start. Using 58
-        text += self.inc_tab() + "<name>" + name+ "</name>"
-    #uuid
-        text += self.tab() + "<uuid>" + server['uuid'] + "</uuid>" 
-        
-        numa={}
-        if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:
-            numa = server['extended']['numas'][0]
-    #memory
-        use_huge = False
-        memory = int(numa.get('memory',0))*1024*1024 #in KiB
-        if memory==0:
-            memory = int(server['ram'])*1024;
-        else:
-            if not self.develop_mode:
-                use_huge = True
-        if memory==0:
-            return -1, 'No memory assigned to instance'
-        memory = str(memory)
-        text += self.tab() + "<memory unit='KiB'>" +memory+"</memory>" 
-        text += self.tab() + "<currentMemory unit='KiB'>" +memory+ "</currentMemory>"
-        if use_huge:
-            text += self.tab()+'<memoryBacking>'+ \
-                self.inc_tab() + '<hugepages/>'+ \
-                self.dec_tab()+ '</memoryBacking>'
-
-    #cpu
-        use_cpu_pinning=False
-        vcpus = int(server.get("vcpus",0))
-        cpu_pinning = []
-        if 'cores-source' in numa:
-            use_cpu_pinning=True
-            for index in range(0, len(numa['cores-source'])):
-                cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )
-                vcpus += 1
-        if 'threads-source' in numa:
-            use_cpu_pinning=True
-            for index in range(0, len(numa['threads-source'])):
-                cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )
-                vcpus += 1
-        if 'paired-threads-source' in numa:
-            use_cpu_pinning=True
-            for index in range(0, len(numa['paired-threads-source'])):
-                cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )
-                cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )
-                vcpus += 2
-        
-        if use_cpu_pinning and not self.develop_mode:
-            text += self.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning)) +"</vcpu>" + \
-                self.tab()+'<cputune>'
-            self.xml_level += 1
-            for i in range(0, len(cpu_pinning)):
-                text += self.tab() + "<vcpupin vcpu='" +str(cpu_pinning[i][0])+ "' cpuset='" +str(cpu_pinning[i][1]) +"'/>"
-            text += self.dec_tab()+'</cputune>'+ \
-                self.tab() + '<numatune>' +\
-                self.inc_tab() + "<memory mode='strict' nodeset='" +str(numa['source'])+ "'/>" +\
-                self.dec_tab() + '</numatune>'
-        else:
-            if vcpus==0:
-                return -1, "Instance without number of cpus"
-            text += self.tab()+"<vcpu>" + str(vcpus)  + "</vcpu>"
-
-    #boot
-        boot_cdrom = False
-        for dev in dev_list:
-            if dev['type']=='cdrom' :
-                boot_cdrom = True
-                break
-        text += self.tab()+ '<os>' + \
-            self.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
-        if boot_cdrom:
-            text +=  self.tab() + "<boot dev='cdrom'/>" 
-        text +=  self.tab() + "<boot dev='hd'/>" + \
-            self.dec_tab()+'</os>'
-    #features
-        text += self.tab()+'<features>'+\
-            self.inc_tab()+'<acpi/>' +\
-            self.tab()+'<apic/>' +\
-            self.tab()+'<pae/>'+ \
-            self.dec_tab() +'</features>'
-        if topo == "oneSocket:hyperthreading":
-            if vcpus % 2 != 0:
-                return -1, 'Cannot expose hyperthreading with an odd number of vcpus'
-            text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>" % vcpus/2
-        elif windows_os or topo == "oneSocket":
-            text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>" % vcpus
-        else:
-            text += self.tab() + "<cpu mode='host-model'></cpu>"
-        text += self.tab() + "<clock offset='utc'/>" +\
-            self.tab() + "<on_poweroff>preserve</on_poweroff>" + \
-            self.tab() + "<on_reboot>restart</on_reboot>" + \
-            self.tab() + "<on_crash>restart</on_crash>"
-        text += self.tab() + "<devices>" + \
-            self.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
-            self.tab() + "<serial type='pty'>" +\
-            self.inc_tab() + "<target port='0'/>" + \
-            self.dec_tab() + "</serial>" +\
-            self.tab() + "<console type='pty'>" + \
-            self.inc_tab()+ "<target type='serial' port='0'/>" + \
-            self.dec_tab()+'</console>'
-        if windows_os:
-            text += self.tab() + "<controller type='usb' index='0'/>" + \
-                self.tab() + "<controller type='ide' index='0'/>" + \
-                self.tab() + "<input type='mouse' bus='ps2'/>" + \
-                self.tab() + "<sound model='ich6'/>" + \
-                self.tab() + "<video>" + \
-                self.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
-                self.dec_tab() + "</video>" + \
-                self.tab() + "<memballoon model='virtio'/>" + \
-                self.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
-
-#>             self.tab()+'<alias name=\'hostdev0\'/>\n' +\
-#>             self.dec_tab()+'</hostdev>\n' +\
-#>             self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
-        if windows_os:
-            text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
-        else:
-            #If image contains 'GRAPH' include graphics
-            #if 'GRAPH' in image:
-            text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
-                self.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
-                self.dec_tab() + "</graphics>"
-
-        vd_index = 'a'
-        for dev in dev_list:
-            bus_ide_dev = bus_ide
-            if dev['type']=='cdrom' or dev['type']=='disk':
-                if dev['type']=='cdrom':
-                    bus_ide_dev = True
-                text += self.tab() + "<disk type='file' device='"+dev['type']+"'>"
-                if 'file format' in dev:
-                    text += self.inc_tab() + "<driver name='qemu' type='"  +dev['file format']+ "' cache='writethrough'/>"
-                if 'source file' in dev:
-                    text += self.tab() + "<source file='" +dev['source file']+ "'/>"
-                #elif v['type'] == 'block':
-                #    text += self.tab() + "<source dev='" + v['source'] + "'/>"
-                #else:
-                #    return -1, 'Unknown disk type ' + v['type']
-                vpci = dev.get('vpci',None)
-                if vpci == None:
-                    vpci = dev['metadata'].get('vpci',None)
-                text += self.pci2xml(vpci)
-               
-                if bus_ide_dev:
-                    text += self.tab() + "<target dev='hd" +vd_index+ "' bus='ide'/>"   #TODO allows several type of disks
-                else:
-                    text += self.tab() + "<target dev='vd" +vd_index+ "' bus='virtio'/>" 
-                text += self.dec_tab() + '</disk>'
-                vd_index = chr(ord(vd_index)+1)
-            elif dev['type']=='xml':
-                dev_text = dev['xml']
-                if 'vpci' in dev:
-                    dev_text = dev_text.replace('__vpci__', dev['vpci'])
-                if 'source file' in dev:
-                    dev_text = dev_text.replace('__file__', dev['source file'])
-                if 'file format' in dev:
-                    dev_text = dev_text.replace('__format__', dev['source file'])
-                if '__dev__' in dev_text:
-                    dev_text = dev_text.replace('__dev__', vd_index)
-                    vd_index = chr(ord(vd_index)+1)
-                text += dev_text
-            else:
-                return -1, 'Unknown device type ' + dev['type']
-
-        net_nb=0
-        bridge_interfaces = server.get('networks', [])
-        for v in bridge_interfaces:
-            #Get the brifge name
-            self.db_lock.acquire()
-            result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )
-            self.db_lock.release()
-            if result <= 0:
-                print "create_xml_server ERROR getting nets",result, content
-                return -1, content
-            #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
-            #I know it is not secure    
-            #for v in sorted(desc['network interfaces'].itervalues()):
-            model = v.get("model", None)
-            if content[0]['provider']=='default':
-                text += self.tab() + "<interface type='network'>" + \
-                    self.inc_tab() + "<source network='" +content[0]['provider']+ "'/>"
-            elif content[0]['provider'][0:7]=='macvtap':
-                text += self.tab()+"<interface type='direct'>" + \
-                    self.inc_tab() + "<source dev='" + self.get_local_iface_name(content[0]['provider'][8:]) + "' mode='bridge'/>" + \
-                    self.tab() + "<target dev='macvtap0'/>"
-                if windows_os:
-                    text += self.tab() + "<alias name='net" + str(net_nb) + "'/>"
-                elif model==None:
-                    model = "virtio"
-            elif content[0]['provider'][0:6]=='bridge':
-                text += self.tab() + "<interface type='bridge'>" +  \
-                    self.inc_tab()+"<source bridge='" +self.get_local_iface_name(content[0]['provider'][7:])+ "'/>"
-                if windows_os:
-                    text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
-                        self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
-                elif model==None:
-                    model = "virtio"
-            elif content[0]['provider'][0:3] == "OVS":
-                vlan = content[0]['provider'].replace('OVS:', '')
-                text += self.tab() + "<interface type='bridge'>" + \
-                        self.inc_tab() + "<source bridge='ovim-" + vlan + "'/>"
-            else:
-                return -1, 'Unknown Bridge net provider ' + content[0]['provider']
-            if model!=None:
-                text += self.tab() + "<model type='" +model+ "'/>"
-            if v.get('mac_address', None) != None:
-                text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
-            text += self.pci2xml(v.get('vpci',None))
-            text += self.dec_tab()+'</interface>'
-            
-            net_nb += 1
-
-        interfaces = numa.get('interfaces', [])
-
-        net_nb=0
-        for v in interfaces:
-            if self.develop_mode: #map these interfaces to bridges
-                text += self.tab() + "<interface type='bridge'>" +  \
-                    self.inc_tab()+"<source bridge='" +self.develop_bridge_iface+ "'/>"
-                if windows_os:
-                    text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
-                        self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
-                else:
-                    text += self.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
-                if v.get('mac_address', None) != None:
-                    text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
-                text += self.pci2xml(v.get('vpci',None))
-                text += self.dec_tab()+'</interface>'
-                continue
-                
-            if v['dedicated'] == 'yes':  #passthrought
-                text += self.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
-                    self.inc_tab() + "<source>"
-                self.inc_tab()
-                text += self.pci2xml(v['source'])
-                text += self.dec_tab()+'</source>'
-                text += self.pci2xml(v.get('vpci',None))
-                if windows_os:
-                    text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
-                text += self.dec_tab()+'</hostdev>'
-                net_nb += 1
-            else:        #sriov_interfaces
-                #skip not connected interfaces
-                if v.get("net_id") == None:
-                    continue
-                text += self.tab() + "<interface type='hostdev' managed='yes'>"
-                self.inc_tab()
-                if v.get('mac_address', None) != None:
-                    text+= self.tab() + "<mac address='" +v['mac_address']+ "'/>"
-                text+= self.tab()+'<source>'
-                self.inc_tab()
-                text += self.pci2xml(v['source'])
-                text += self.dec_tab()+'</source>'
-                if v.get('vlan',None) != None:
-                    text += self.tab() + "<vlan>   <tag id='" + str(v['vlan']) + "'/>   </vlan>"
-                text += self.pci2xml(v.get('vpci',None))
-                if windows_os:
-                    text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
-                text += self.dec_tab()+'</interface>'
-
-            
-        text += self.dec_tab()+'</devices>'+\
-        self.dec_tab()+'</domain>'
-        return 0, text
-    
-    def pci2xml(self, pci):
-        '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
-        alows an empty pci text'''
-        if pci is None:
-            return ""
-        first_part = pci.split(':')
-        second_part = first_part[2].split('.')
-        return self.tab() + "<address type='pci' domain='0x" + first_part[0] + \
-                    "' bus='0x" + first_part[1] + "' slot='0x" + second_part[0] + \
-                    "' function='0x" + second_part[1] + "'/>" 
-    
-    def tab(self):
-        """Return indentation according to xml_level"""
-        return "\n" + ('  '*self.xml_level)
-    
-    def inc_tab(self):
-        """Increment and return indentation according to xml_level"""
-        self.xml_level += 1
-        return self.tab()
-    
-    def dec_tab(self):
-        """Decrement and return indentation according to xml_level"""
-        self.xml_level -= 1
-        return self.tab()
-
-    def create_ovs_bridge(self):
-        """
-        Create a bridge in compute OVS to allocate VMs
-        :return: True if success
-        """
-        if self.test:
-            return
-        command = 'sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true'
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-        if len(content) == 0:
-            return True
-        else:
-            return False
-
-    def delete_port_to_ovs_bridge(self, vlan, net_uuid):
-        """
-        Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
-        :param vlan: vlan port id
-        :param net_uuid: network id
-        :return:
-        """
-
-        if self.test:
-            return
-
-        port_name = 'ovim-' + vlan
-        command = 'sudo ovs-vsctl del-port br-int ' + port_name
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-        if len(content) == 0:
-            return True
-        else:
-            return False
-
-    def delete_dhcp_server(self, vlan, net_uuid, dhcp_path):
-        """
-        Delete dhcp server process lining in namespace
-        :param vlan: segmentation id
-        :param net_uuid: network uuid
-        :param dhcp_path: conf fiel path that live in namespace side
-        :return:
-        """
-        if self.test:
-            return
-        if not self.is_dhcp_port_free(vlan, net_uuid):
-            return True
-
-        net_namespace = 'ovim-' + vlan
-        dhcp_path = os.path.join(dhcp_path, net_namespace)
-        pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
-
-        command = 'sudo ip netns exec ' + net_namespace + ' cat ' + pid_file
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        command = 'sudo ip netns exec ' + net_namespace + ' kill -9 ' + content
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        # if len(content) == 0:
-        #     return True
-        # else:
-        #     return False
-
-    def is_dhcp_port_free(self, host_id, net_uuid):
-        """
-        Check if any port attached to the a net in a vxlan mesh across computes nodes
-        :param host_id: host id
-        :param net_uuid: network id
-        :return: True if is not free
-        """
-        self.db_lock.acquire()
-        result, content = self.db.get_table(
-            FROM='ports',
-            WHERE={'p.type': 'instance:ovs', 'p.net_id': net_uuid}
-        )
-        self.db_lock.release()
-
-        if len(content) > 0:
-            return False
-        else:
-            return True
-
-    def is_port_free(self, host_id, net_uuid):
-        """
-        Check if there not ovs ports of a network in a compute host.
-        :param host_id:  host id
-        :param net_uuid: network id
-        :return: True if is not free
-        """
-
-        self.db_lock.acquire()
-        result, content = self.db.get_table(
-            FROM='ports as p join instances as i on p.instance_id=i.uuid',
-            WHERE={"i.host_id": self.host_id, 'p.type': 'instance:ovs', 'p.net_id': net_uuid}
-        )
-        self.db_lock.release()
-
-        if len(content) > 0:
-            return False
-        else:
-            return True
-
-    def add_port_to_ovs_bridge(self, vlan):
-        """
-        Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
-        :param vlan: vlan port id
-        :return: True if success
-        """
-
-        if self.test:
-            return
-
-        port_name = 'ovim-' + vlan
-        command = 'sudo ovs-vsctl add-port br-int ' + port_name + ' tag=' + vlan
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-        if len(content) == 0:
-            return True
-        else:
-            return False
-
-    def delete_dhcp_port(self, vlan, net_uuid):
-        """
-        Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
-        :param vlan: segmentation id
-        :param net_uuid: network id
-        :return: True if success
-        """
-
-        if self.test:
-            return
-
-        if not self.is_dhcp_port_free(vlan, net_uuid):
-            return True
-        self.delete_dhcp_interfaces(vlan)
-        return True
-
-    def delete_bridge_port_attached_to_ovs(self, vlan, net_uuid):
-        """
-        Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
-        :param vlan:
-        :param net_uuid:
-        :return: True if success
-        """
-        if self.test:
-            return
-
-        if not self.is_port_free(vlan, net_uuid):
-            return True
-        self.delete_port_to_ovs_bridge(vlan, net_uuid)
-        self.delete_linux_bridge(vlan)
-        return True
-
-    def delete_linux_bridge(self, vlan):
-        """
-        Delete a linux bridge in a scpecific compute.
-        :param vlan: vlan port id
-        :return: True if success
-        """
-
-        if self.test:
-            return
-
-        port_name = 'ovim-' + vlan
-        command = 'sudo ip link set dev veth0-' + vlan + ' down'
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-        #
-        # if len(content) != 0:
-        #     return False
-
-        command = 'sudo ifconfig ' + port_name + ' down &&  sudo brctl delbr ' + port_name
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-        if len(content) == 0:
-            return True
-        else:
-            return False
-
-    def create_ovs_bridge_port(self, vlan):
-        """
-        Generate a linux bridge and attache the port to a OVS bridge
-        :param vlan: vlan port id
-        :return:
-        """
-        if self.test:
-            return
-        self.create_linux_bridge(vlan)
-        self.add_port_to_ovs_bridge(vlan)
-
-    def create_linux_bridge(self, vlan):
-        """
-        Create a linux bridge with STP active
-        :param vlan: netowrk vlan id
-        :return:
-        """
-
-        if self.test:
-            return
-
-        port_name = 'ovim-' + vlan
-        command = 'sudo brctl show | grep ' + port_name
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        # if exist nothing to create
-        # if len(content) == 0:
-        #     return False
-
-        command = 'sudo brctl addbr ' + port_name
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        # if len(content) == 0:
-        #     return True
-        # else:
-        #     return False
-
-        command = 'sudo brctl stp ' + port_name + ' on'
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        # if len(content) == 0:
-        #     return True
-        # else:
-        #     return False
-        command = 'sudo ip link set dev ' + port_name + ' up'
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        if len(content) == 0:
-            return True
-        else:
-            return False
-
-    def set_mac_dhcp_server(self, ip, mac, vlan, netmask, dhcp_path):
-        """
-        Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
-        :param ip: IP address asigned to a VM
-        :param mac: VM vnic mac to be macthed with the IP received
-        :param vlan: Segmentation id
-        :param netmask: netmask value
-        :param path: dhcp conf file path that live in namespace side
-        :return: True if success
-        """
-
-        if self.test:
-            return
-
-        net_namespace = 'ovim-' + vlan
-        dhcp_path = os.path.join(dhcp_path, net_namespace)
-        dhcp_hostsdir = os.path.join(dhcp_path, net_namespace)
-
-        if not ip:
-            return False
-
-        ip_data = mac.upper() + ',' + ip
-
-        command = 'sudo  ip netns exec ' + net_namespace + ' touch ' + dhcp_hostsdir
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        command = 'sudo  ip netns exec ' + net_namespace + ' sudo bash -ec "echo ' + ip_data + ' >> ' + dhcp_hostsdir + '"'
-
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        if len(content) == 0:
-            return True
-        else:
-            return False
-
-    def delete_mac_dhcp_server(self, ip, mac, vlan, dhcp_path):
-        """
-        Delete into dhcp conf file the ip  assigned to a specific MAC address
-
-        :param ip: IP address asigned to a VM
-        :param mac:  VM vnic mac to be macthed with the IP received
-        :param vlan:  Segmentation id
-        :param dhcp_path: dhcp conf file path that live in namespace side
-        :return:
-        """
-
-        if self.test:
-            return
-
-        net_namespace = 'ovim-' + vlan
-        dhcp_path = os.path.join(dhcp_path, net_namespace)
-        dhcp_hostsdir = os.path.join(dhcp_path, net_namespace)
-
-        if not ip:
-            return False
-
-        ip_data = mac.upper() + ',' + ip
-
-        command = 'sudo  ip netns exec ' + net_namespace + ' sudo sed -i \'/' + ip_data + '/d\' ' + dhcp_hostsdir
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        if len(content) == 0:
-            return True
-        else:
-            return False
-
-    def launch_dhcp_server(self, vlan, ip_range, netmask, dhcp_path, gateway):
-        """
-        Generate a linux bridge and attache the port to a OVS bridge
-        :param self:
-        :param vlan: Segmentation id
-        :param ip_range: IP dhcp range
-        :param netmask: network netmask
-        :param dhcp_path: dhcp conf file path that live in namespace side
-        :param gateway: Gateway address for dhcp net
-        :return: True if success
-        """
-
-        if self.test:
-            return
-
-        interface = 'tap-' + vlan
-        net_namespace = 'ovim-' + vlan
-        dhcp_path = os.path.join(dhcp_path, net_namespace)
-        leases_path = os.path.join(dhcp_path, "dnsmasq.leases")
-        pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
-
-        dhcp_range = ip_range[0] + ',' + ip_range[1] + ',' + netmask
-
-        command = 'sudo ip netns exec ' + net_namespace + ' mkdir -p ' + dhcp_path
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        pid_path = os.path.join(dhcp_path, 'dnsmasq.pid')
-        command = 'sudo  ip netns exec ' + net_namespace + ' cat ' + pid_path
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-        # check if pid is runing
-        pid_status_path = content
-        if content:
-            command = "ps aux | awk '{print $2 }' | grep " + pid_status_path
-            print self.name, ': command:', command
-            (_, stdout, _) = self.ssh_conn.exec_command(command)
-            content = stdout.read()
-        if not content:
-            command = 'sudo  ip netns exec ' + net_namespace + ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
-              '--interface=' + interface + ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path + \
-              ' --dhcp-range ' + dhcp_range + ' --pid-file=' + pid_file + ' --dhcp-leasefile=' + leases_path + \
-              '  --listen-address ' + gateway
-
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.readline()
-
-        if len(content) == 0:
-            return True
-        else:
-            return False
-
-    def delete_dhcp_interfaces(self, vlan):
-        """
-        Create a linux bridge with STP active
-        :param vlan: netowrk vlan id
-        :return:
-        """
-
-        if self.test:
-            return
-
-        net_namespace = 'ovim-' + vlan
-        command = 'sudo ovs-vsctl del-port br-int ovs-tap-' + vlan
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        command = 'sudo ip netns exec ' + net_namespace + ' ip link set dev tap-' + vlan + ' down'
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        command = 'sudo ip link set dev ovs-tap-' + vlan + ' down'
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-    def create_dhcp_interfaces(self, vlan, ip, netmask):
-        """
-        Create a linux bridge with STP active
-        :param vlan: segmentation id
-        :param ip: Ip included in the dhcp range for the tap interface living in namesapce side
-        :param netmask: dhcp net CIDR
-        :return: True if success
-        """
-
-        if self.test:
-            return
-
-        net_namespace = 'ovim-' + vlan
-        namespace_interface = 'tap-' + vlan
-
-        command = 'sudo ip netns add ' + net_namespace
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        command = 'sudo ip link add tap-' + vlan + ' type veth peer name ovs-tap-' + vlan
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        command = 'sudo ovs-vsctl add-port br-int ovs-tap-' + vlan + ' tag=' + vlan
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        command = 'sudo ip link set tap-' + vlan + ' netns ' + net_namespace
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        command = 'sudo ip netns exec ' + net_namespace + ' ip link set dev tap-' + vlan + ' up'
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        command = 'sudo ip link set dev ovs-tap-' + vlan + ' up'
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        command = 'sudo  ip netns exec ' + net_namespace + ' ' + ' ifconfig  ' + namespace_interface \
-                  + ' ' + ip + ' netmask ' + netmask
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-
-        if len(content) == 0:
-            return True
-        else:
-            return False
-
-    def create_ovs_vxlan_tunnel(self, vxlan_interface, remote_ip):
-        """
-        Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
-        :param vxlan_interface: vlxan inteface name.
-        :param remote_ip: tunnel endpoint remote compute ip.
-        :return:
-        """
-        if self.test:
-            return
-        command = 'sudo ovs-vsctl add-port br-int ' + vxlan_interface + \
-                  ' -- set Interface ' + vxlan_interface + '  type=vxlan options:remote_ip=' + remote_ip + \
-                  ' -- set Port ' + vxlan_interface + ' other_config:stp-path-cost=10'
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-        print content
-        if len(content) == 0:
-            return True
-        else:
-            return False
-
-    def delete_ovs_vxlan_tunnel(self, vxlan_interface):
-        """
-        Delete a vlxan tunnel  port from a OVS brdige.
-        :param vxlan_interface: vlxan name to be delete it.
-        :return: True if success.
-        """
-        if self.test:
-            return
-        command = 'sudo ovs-vsctl del-port br-int ' + vxlan_interface
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-        print content
-        if len(content) == 0:
-            return True
-        else:
-            return False
-
-    def delete_ovs_bridge(self):
-        """
-        Delete a OVS bridge from  a compute.
-        :return: True if success
-        """
-        if self.test:
-            return
-        command = 'sudo ovs-vsctl del-br br-int'
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-        if len(content) == 0:
-            return True
-        else:
-            return False
-
-    def get_file_info(self, path):
-        command = 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
-        print self.name, ': command:', command
-        (_, stdout, _) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-        if len(content) == 0:
-            return None # file does not exist
-        else:
-            return content.split(" ") #(permission, 1, owner, group, size, date, file)
-
-    def qemu_get_info(self, path):
-        command = 'qemu-img info ' + path
-        print self.name, ': command:', command
-        (_, stdout, stderr) = self.ssh_conn.exec_command(command)
-        content = stdout.read()
-        if len(content) == 0:
-            error = stderr.read()
-            print self.name, ": get_qemu_info error ", error
-            raise paramiko.ssh_exception.SSHException("Error getting qemu_info: " + error)
-        else:
-            try: 
-                return yaml.load(content)
-            except yaml.YAMLError as exc:
-                text = ""
-                if hasattr(exc, 'problem_mark'):
-                    mark = exc.problem_mark
-                    text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
-                print self.name, ": get_qemu_info yaml format Exception", text
-                raise paramiko.ssh_exception.SSHException("Error getting qemu_info yaml format" + text)
-
-    def qemu_change_backing(self, inc_file, new_backing_file):
-        command = 'qemu-img rebase -u -b ' + new_backing_file + ' ' + inc_file 
-        print self.name, ': command:', command
-        (_, _, stderr) = self.ssh_conn.exec_command(command)
-        content = stderr.read()
-        if len(content) == 0:
-            return 0
-        else:
-            print self.name, ": qemu_change_backing error: ", content
-            return -1
-    
-    def get_notused_filename(self, proposed_name, suffix=''):
-        '''Look for a non existing file_name in the host
-            proposed_name: proposed file name, includes path
-            suffix: suffix to be added to the name, before the extention
-        '''
-        extension = proposed_name.rfind(".")
-        slash = proposed_name.rfind("/")
-        if extension < 0 or extension < slash: # no extension
-            extension = len(proposed_name)
-        target_name = proposed_name[:extension] + suffix + proposed_name[extension:]
-        info = self.get_file_info(target_name)
-        if info is None:
-            return target_name
-        
-        index=0
-        while info is not None:
-            target_name = proposed_name[:extension] + suffix +  "-" + str(index) + proposed_name[extension:]
-            index+=1
-            info = self.get_file_info(target_name) 
-        return target_name
-    
-    def get_notused_path(self, proposed_path, suffix=''):
-        '''Look for a non existing path at database for images
-            proposed_path: proposed file name, includes path
-            suffix: suffix to be added to the name, before the extention
-        '''
-        extension = proposed_path.rfind(".")
-        if extension < 0:
-            extension = len(proposed_path)
-        if suffix != None:
-            target_path = proposed_path[:extension] + suffix + proposed_path[extension:]
-        index=0
-        while True:
-            r,_=self.db.get_table(FROM="images",WHERE={"path":target_path})
-            if r<=0:
-                return target_path
-            target_path = proposed_path[:extension] + suffix +  "-" + str(index) + proposed_path[extension:]
-            index+=1
-
-    
-    def delete_file(self, file_name):
-        command = 'rm -f '+file_name
-        print self.name, ': command:', command
-        (_, _, stderr) = self.ssh_conn.exec_command(command)
-        error_msg = stderr.read()
-        if len(error_msg) > 0:
-            raise paramiko.ssh_exception.SSHException("Error deleting file: " + error_msg)
-
-    def copy_file(self, source, destination, perserve_time=True):
-        if source[0:4]=="http":
-            command = "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
-                dst=destination, src=source, dst_result=destination + ".result" )
-        else:
-            command = 'cp --no-preserve=mode'
-            if perserve_time:
-                command += ' --preserve=timestamps'
-            command +=  " '{}' '{}'".format(source, destination)
-        print self.name, ': command:', command
-        (_, _, stderr) = self.ssh_conn.exec_command(command)
-        error_msg = stderr.read()
-        if len(error_msg) > 0:
-            raise paramiko.ssh_exception.SSHException("Error copying image to local host: " + error_msg)
-
-    def copy_remote_file(self, remote_file, use_incremental):
-        ''' Copy a file from the repository to local folder and recursively 
-            copy the backing files in case the remote file is incremental
-            Read and/or modified self.localinfo['files'] that contain the
-            unmodified copies of images in the local path
-            params:
-                remote_file: path of remote file
-                use_incremental: None (leave the decision to this function), True, False
-            return:
-                local_file: name of local file
-                qemu_info: dict with quemu information of local file
-                use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
-        '''
-        
-        use_incremental_out = use_incremental
-        new_backing_file = None
-        local_file = None
-        file_from_local = True
-
-        #in case incremental use is not decided, take the decision depending on the image
-        #avoid the use of incremental if this image is already incremental
-        if remote_file[0:4] == "http":
-            file_from_local = False
-        if file_from_local:
-            qemu_remote_info = self.qemu_get_info(remote_file)
-        if use_incremental_out==None:
-            use_incremental_out = not ( file_from_local and 'backing file' in qemu_remote_info)
-        #copy recursivelly the backing files
-        if  file_from_local and 'backing file' in qemu_remote_info:
-            new_backing_file, _, _ = self.copy_remote_file(qemu_remote_info['backing file'], True)
-        
-        #check if remote file is present locally
-        if use_incremental_out and remote_file in self.localinfo['files']:
-            local_file = self.localinfo['files'][remote_file]
-            local_file_info =  self.get_file_info(local_file)
-            if file_from_local:
-                remote_file_info = self.get_file_info(remote_file)
-            if local_file_info == None:
-                local_file = None
-            elif file_from_local and (local_file_info[4]!=remote_file_info[4] or local_file_info[5]!=remote_file_info[5]):
-                #local copy of file not valid because date or size are different. 
-                #TODO DELETE local file if this file is not used by any active virtual machine
-                try:
-                    self.delete_file(local_file)
-                    del self.localinfo['files'][remote_file]
-                except Exception:
-                    pass
-                local_file = None
-            else: #check that the local file has the same backing file, or there are not backing at all
-                qemu_info = self.qemu_get_info(local_file)
-                if new_backing_file != qemu_info.get('backing file'):
-                    local_file = None
-                
-
-        if local_file == None: #copy the file 
-            img_name= remote_file.split('/') [-1]
-            img_local = self.image_path + '/' + img_name
-            local_file = self.get_notused_filename(img_local)
-            self.copy_file(remote_file, local_file, use_incremental_out)
-
-            if use_incremental_out:
-                self.localinfo['files'][remote_file] = local_file
-            if new_backing_file:
-                self.qemu_change_backing(local_file, new_backing_file)
-            qemu_info = self.qemu_get_info(local_file)
-            
-        return local_file, qemu_info, use_incremental_out
-            
-    def launch_server(self, conn, server, rebuild=False, domain=None):
-        if self.test:
-            time.sleep(random.randint(20,150)) #sleep random timeto be make it a bit more real
-            return 0, 'Success'
-
-        server_id = server['uuid']
-        paused = server.get('paused','no')
-        try:
-            if domain!=None and rebuild==False:
-                domain.resume()
-                #self.server_status[server_id] = 'ACTIVE'
-                return 0, 'Success'
-
-            self.db_lock.acquire()
-            result, server_data = self.db.get_instance(server_id)
-            self.db_lock.release()
-            if result <= 0:
-                print self.name, ": launch_server ERROR getting server from DB",result, server_data
-                return result, server_data
-    
-        #0: get image metadata
-            server_metadata = server.get('metadata', {})
-            use_incremental = None
-             
-            if "use_incremental" in server_metadata:
-                use_incremental = False if server_metadata["use_incremental"]=="no" else True
-
-            server_host_files = self.localinfo['server_files'].get( server['uuid'], {})
-            if rebuild:
-                #delete previous incremental files
-                for file_ in server_host_files.values():
-                    self.delete_file(file_['source file'] )
-                server_host_files={}
-    
-        #1: obtain aditional devices (disks)
-            #Put as first device the main disk
-            devices = [  {"type":"disk", "image_id":server['image_id'], "vpci":server_metadata.get('vpci', None) } ] 
-            if 'extended' in server_data and server_data['extended']!=None and "devices" in server_data['extended']:
-                devices += server_data['extended']['devices']
-
-            for dev in devices:
-                if dev['image_id'] == None:
-                    continue
-                
-                self.db_lock.acquire()
-                result, content = self.db.get_table(FROM='images', SELECT=('path', 'metadata'),
-                                                    WHERE={'uuid': dev['image_id']})
-                self.db_lock.release()
-                if result <= 0:
-                    error_text = "ERROR", result, content, "when getting image", dev['image_id']
-                    print self.name, ": launch_server", error_text 
-                    return -1, error_text
-                if content[0]['metadata'] is not None:
-                    dev['metadata'] = json.loads(content[0]['metadata'])
-                else:
-                    dev['metadata'] = {}
-                
-                if dev['image_id'] in server_host_files:
-                    dev['source file'] = server_host_files[ dev['image_id'] ] ['source file'] #local path
-                    dev['file format'] = server_host_files[ dev['image_id'] ] ['file format'] # raw or qcow2
-                    continue
-                
-            #2: copy image to host
-                remote_file = content[0]['path']
-                use_incremental_image = use_incremental
-                if dev['metadata'].get("use_incremental") == "no":
-                    use_incremental_image = False
-                local_file, qemu_info, use_incremental_image = self.copy_remote_file(remote_file, use_incremental_image)
-                
-                #create incremental image
-                if use_incremental_image:
-                    local_file_inc = self.get_notused_filename(local_file, '.inc')
-                    command = 'qemu-img create -f qcow2 '+local_file_inc+ ' -o backing_file='+ local_file
-                    print 'command:', command
-                    (_, _, stderr) = self.ssh_conn.exec_command(command)
-                    error_msg = stderr.read()
-                    if len(error_msg) > 0:
-                        raise paramiko.ssh_exception.SSHException("Error creating incremental file: " + error_msg)
-                    local_file = local_file_inc
-                    qemu_info = {'file format':'qcow2'}
-                
-                server_host_files[ dev['image_id'] ] = {'source file': local_file, 'file format': qemu_info['file format']}
-
-                dev['source file'] = local_file 
-                dev['file format'] = qemu_info['file format']
-
-            self.localinfo['server_files'][ server['uuid'] ] = server_host_files
-            self.localinfo_dirty = True
-
-        #3 Create XML
-            result, xml = self.create_xml_server(server_data, devices, server_metadata)  #local_file
-            if result <0:
-                print self.name, ": create xml server error:", xml
-                return -2, xml
-            print self.name, ": create xml:", xml
-            atribute = host_thread.lvirt_module.VIR_DOMAIN_START_PAUSED if paused == "yes" else 0
-        #4 Start the domain
-            if not rebuild: #ensures that any pending destroying server is done
-                self.server_forceoff(True)
-            #print self.name, ": launching instance" #, xml
-            conn.createXML(xml, atribute)
-            #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
-
-            return 0, 'Success'
-
-        except paramiko.ssh_exception.SSHException as e:
-            text = e.args[0]
-            print self.name, ": launch_server(%s) ssh Exception: %s" %(server_id, text)
-            if "SSH session not active" in text:
-                self.ssh_connect()
-        except host_thread.lvirt_module.libvirtError as e:
-            text = e.get_error_message()
-            print self.name, ": launch_server(%s) libvirt Exception: %s"  %(server_id, text)
-        except Exception as e:
-            text = str(e)
-            print self.name, ": launch_server(%s) Exception: %s"  %(server_id, text)
-        return -1, text
-    
-    def update_servers_status(self):
-                            # # virDomainState
-                            # VIR_DOMAIN_NOSTATE = 0
-                            # VIR_DOMAIN_RUNNING = 1
-                            # VIR_DOMAIN_BLOCKED = 2
-                            # VIR_DOMAIN_PAUSED = 3
-                            # VIR_DOMAIN_SHUTDOWN = 4
-                            # VIR_DOMAIN_SHUTOFF = 5
-                            # VIR_DOMAIN_CRASHED = 6
-                            # VIR_DOMAIN_PMSUSPENDED = 7   #TODO suspended
-    
-        if self.test or len(self.server_status)==0:
-            return            
-        
-        try:
-            conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
-            domains=  conn.listAllDomains() 
-            domain_dict={}
-            for domain in domains:
-                uuid = domain.UUIDString() ;
-                libvirt_status = domain.state()
-                #print libvirt_status
-                if libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_RUNNING or libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTDOWN:
-                    new_status = "ACTIVE"
-                elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_PAUSED:
-                    new_status = "PAUSED"
-                elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTOFF:
-                    new_status = "INACTIVE"
-                elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_CRASHED:
-                    new_status = "ERROR"
-                else:
-                    new_status = None
-                domain_dict[uuid] = new_status
-            conn.close()
-        except host_thread.lvirt_module.libvirtError as e:
-            print self.name, ": get_state() Exception '", e.get_error_message()
-            return
-
-        for server_id, current_status in self.server_status.iteritems():
-            new_status = None
-            if server_id in domain_dict:
-                new_status = domain_dict[server_id]
-            else:
-                new_status = "INACTIVE"
-                            
-            if new_status == None or new_status == current_status:
-                continue
-            if new_status == 'INACTIVE' and current_status == 'ERROR':
-                continue #keep ERROR status, because obviously this machine is not running
-            #change status
-            print self.name, ": server ", server_id, "status change from ", current_status, "to", new_status
-            STATUS={'progress':100, 'status':new_status}
-            if new_status == 'ERROR':
-                STATUS['last_error'] = 'machine has crashed'
-            self.db_lock.acquire()
-            r,_ = self.db.update_rows('instances', STATUS, {'uuid':server_id}, log=False)
-            self.db_lock.release()
-            if r>=0:
-                self.server_status[server_id] = new_status
-                        
-    def action_on_server(self, req, last_retry=True):
-        '''Perform an action on a req
-        Attributes:
-            req: dictionary that contain:
-                server properties: 'uuid','name','tenant_id','status'
-                action: 'action'
-                host properties: 'user', 'ip_name'
-        return (error, text)  
-             0: No error. VM is updated to new state,  
-            -1: Invalid action, as trying to pause a PAUSED VM
-            -2: Error accessing host
-            -3: VM nor present
-            -4: Error at DB access
-            -5: Error while trying to perform action. VM is updated to ERROR
-        '''
-        server_id = req['uuid']
-        conn = None
-        new_status = None
-        old_status = req['status']
-        last_error = None
-        
-        if self.test:
-            if 'terminate' in req['action']:
-                new_status = 'deleted'
-            elif 'shutoff' in req['action'] or 'shutdown' in req['action'] or 'forceOff' in req['action']:
-                if req['status']!='ERROR':
-                    time.sleep(5)
-                    new_status = 'INACTIVE'
-            elif 'start' in req['action']  and req['status']!='ERROR':      new_status = 'ACTIVE'
-            elif 'resume' in req['action'] and req['status']!='ERROR' and req['status']!='INACTIVE' :     new_status = 'ACTIVE'
-            elif 'pause' in req['action']  and req['status']!='ERROR':      new_status = 'PAUSED'
-            elif 'reboot' in req['action'] and req['status']!='ERROR':     new_status = 'ACTIVE'
-            elif 'rebuild' in req['action']:
-                time.sleep(random.randint(20,150))
-                new_status = 'ACTIVE'
-            elif 'createImage' in req['action']:
-                time.sleep(5)
-                self.create_image(None, req)
-        else:
-            try:
-                conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
-                try:
-                    dom = conn.lookupByUUIDString(server_id)
-                except host_thread.lvirt_module.libvirtError as e:
-                    text = e.get_error_message()
-                    if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
-                        dom = None
-                    else:
-                        print self.name, ": action_on_server(",server_id,") libvirt exception:", text
-                        raise e
-                
-                if 'forceOff' in req['action']:
-                    if dom == None:
-                        print self.name, ": action_on_server(",server_id,") domain not running" 
-                    else:
-                        try:
-                            print self.name, ": sending DESTROY to server", server_id 
-                            dom.destroy()
-                        except Exception as e:
-                            if "domain is not running" not in e.get_error_message():
-                                print self.name, ": action_on_server(",server_id,") Exception while sending force off:", e.get_error_message()
-                                last_error =  'action_on_server Exception while destroy: ' + e.get_error_message()
-                                new_status = 'ERROR'
-                
-                elif 'terminate' in req['action']:
-                    if dom == None:
-                        print self.name, ": action_on_server(",server_id,") domain not running" 
-                        new_status = 'deleted'
-                    else:
-                        try:
-                            if req['action']['terminate'] == 'force':
-                                print self.name, ": sending DESTROY to server", server_id 
-                                dom.destroy()
-                                new_status = 'deleted'
-                            else:
-                                print self.name, ": sending SHUTDOWN to server", server_id 
-                                dom.shutdown()
-                                self.pending_terminate_server.append( (time.time()+10,server_id) )
-                        except Exception as e:
-                            print self.name, ": action_on_server(",server_id,") Exception while destroy:", e.get_error_message() 
-                            last_error =  'action_on_server Exception while destroy: ' + e.get_error_message()
-                            new_status = 'ERROR'
-                            if "domain is not running" in e.get_error_message():
-                                try:
-                                    dom.undefine()
-                                    new_status = 'deleted'
-                                except Exception:
-                                    print self.name, ": action_on_server(",server_id,") Exception while undefine:", e.get_error_message() 
-                                    last_error =  'action_on_server Exception2 while undefine:', e.get_error_message()
-                            #Exception: 'virDomainDetachDevice() failed'
-                    if new_status=='deleted':
-                        if server_id in self.server_status:
-                            del self.server_status[server_id]
-                        if req['uuid'] in self.localinfo['server_files']:
-                            for file_ in self.localinfo['server_files'][ req['uuid'] ].values():
-                                try:
-                                    self.delete_file(file_['source file'])
-                                except Exception:
-                                    pass
-                            del self.localinfo['server_files'][ req['uuid'] ]
-                            self.localinfo_dirty = True
-
-                elif 'shutoff' in req['action'] or 'shutdown' in req['action']:
-                    try:
-                        if dom == None:
-                            print self.name, ": action_on_server(",server_id,") domain not running"
-                        else: 
-                            dom.shutdown()
-#                        new_status = 'INACTIVE'
-                        #TODO: check status for changing at database
-                    except Exception as e:
-                        new_status = 'ERROR'
-                        print self.name, ": action_on_server(",server_id,") Exception while shutdown:", e.get_error_message() 
-                        last_error =  'action_on_server Exception while shutdown: ' + e.get_error_message()
-    
-                elif 'rebuild' in req['action']:
-                    if dom != None:
-                        dom.destroy()
-                    r = self.launch_server(conn, req, True, None)
-                    if r[0] <0:
-                        new_status = 'ERROR'
-                        last_error = r[1]
-                    else:
-                        new_status = 'ACTIVE'
-                elif 'start' in req['action']:
-                    # The instance is only create in DB but not yet at libvirt domain, needs to be create
-                    rebuild = True if req['action']['start'] == 'rebuild'  else False
-                    r = self.launch_server(conn, req, rebuild, dom)
-                    if r[0] <0:
-                        new_status = 'ERROR'
-                        last_error = r[1]
-                    else:
-                        new_status = 'ACTIVE'
-                
-                elif 'resume' in req['action']:
-                    try:
-                        if dom == None:
-                            pass
-                        else:
-                            dom.resume()
-#                            new_status = 'ACTIVE'
-                    except Exception as e:
-                        print self.name, ": action_on_server(",server_id,") Exception while resume:", e.get_error_message() 
-                    
-                elif 'pause' in req['action']:
-                    try: 
-                        if dom == None:
-                            pass
-                        else:
-                            dom.suspend()
-#                            new_status = 'PAUSED'
-                    except Exception as e:
-                        print self.name, ": action_on_server(",server_id,") Exception while pause:", e.get_error_message() 
-    
-                elif 'reboot' in req['action']:
-                    try: 
-                        if dom == None:
-                            pass
-                        else:
-                            dom.reboot()
-                        print self.name, ": action_on_server(",server_id,") reboot:" 
-                        #new_status = 'ACTIVE'
-                    except Exception as e:
-                        print self.name, ": action_on_server(",server_id,") Exception while reboot:", e.get_error_message() 
-                elif 'createImage' in req['action']:
-                    self.create_image(dom, req)
-                        
-        
-                conn.close()    
-            except host_thread.lvirt_module.libvirtError as e:
-                if conn is not None: conn.close()
-                text = e.get_error_message()
-                new_status = "ERROR"
-                last_error = text
-                print self.name, ": action_on_server(",server_id,") Exception '", text
-                if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
-                    print self.name, ": action_on_server(",server_id,") Exception removed from host"
-        #end of if self.test
-        if new_status ==  None:
-            return 1
-
-        print self.name, ": action_on_server(",server_id,") new status", new_status, last_error
-        UPDATE = {'progress':100, 'status':new_status}
-        
-        if new_status=='ERROR':
-            if not last_retry:  #if there will be another retry do not update database 
-                return -1 
-            elif 'terminate' in req['action']:
-                #PUT a log in the database
-                print self.name, ": PANIC deleting server", server_id, last_error
-                self.db_lock.acquire()
-                self.db.new_row('logs', 
-                            {'uuid':server_id, 'tenant_id':req['tenant_id'], 'related':'instances','level':'panic',
-                             'description':'PANIC deleting server from host '+self.name+': '+last_error}
-                        )
-                self.db_lock.release()
-                if server_id in self.server_status:
-                    del self.server_status[server_id]
-                return -1
-            else:
-                UPDATE['last_error'] = last_error
-        if new_status != 'deleted' and (new_status != old_status or new_status == 'ERROR') :
-            self.db_lock.acquire()
-            self.db.update_rows('instances', UPDATE, {'uuid':server_id}, log=True)
-            self.server_status[server_id] = new_status
-            self.db_lock.release()
-        if new_status == 'ERROR':
-            return -1
-        return 1
-     
-    
-    def restore_iface(self, name, mac, lib_conn=None):
-        ''' make an ifdown, ifup to restore default parameter of na interface
-            Params:
-                mac: mac address of the interface
-                lib_conn: connection to the libvirt, if None a new connection is created
-            Return 0,None if ok, -1,text if fails
-        ''' 
-        conn=None
-        ret = 0
-        error_text=None
-        if self.test:
-            print self.name, ": restore_iface '%s' %s" % (name, mac)
-            return 0, None
-        try:
-            if not lib_conn:
-                conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
-            else:
-                conn = lib_conn
-                
-            #wait to the pending VM deletion
-            #TODO.Revise  self.server_forceoff(True)
-
-            iface = conn.interfaceLookupByMACString(mac)
-            iface.destroy()
-            iface.create()
-            print self.name, ": restore_iface '%s' %s" % (name, mac)
-        except host_thread.lvirt_module.libvirtError as e:
-            error_text = e.get_error_message()
-            print self.name, ": restore_iface '%s' '%s' libvirt exception: %s" %(name, mac, error_text) 
-            ret=-1
-        finally:
-            if lib_conn is None and conn is not None:
-                conn.close()
-        return ret, error_text
-
-        
-    def create_image(self,dom, req):
-        if self.test:
-            if 'path' in req['action']['createImage']:
-                file_dst = req['action']['createImage']['path']
-            else:
-                createImage=req['action']['createImage']
-                img_name= createImage['source']['path']
-                index=img_name.rfind('/')
-                file_dst = self.get_notused_path(img_name[:index+1] + createImage['name'] + '.qcow2')
-            image_status='ACTIVE'
-        else:
-            for retry in (0,1):
-                try:
-                    server_id = req['uuid']
-                    createImage=req['action']['createImage']
-                    file_orig = self.localinfo['server_files'][server_id] [ createImage['source']['image_id'] ] ['source file']
-                    if 'path' in req['action']['createImage']:
-                        file_dst = req['action']['createImage']['path']
-                    else:
-                        img_name= createImage['source']['path']
-                        index=img_name.rfind('/')
-                        file_dst = self.get_notused_filename(img_name[:index+1] + createImage['name'] + '.qcow2')
-                          
-                    self.copy_file(file_orig, file_dst)
-                    qemu_info = self.qemu_get_info(file_orig)
-                    if 'backing file' in qemu_info:
-                        for k,v in self.localinfo['files'].items():
-                            if v==qemu_info['backing file']:
-                                self.qemu_change_backing(file_dst, k)
-                                break
-                    image_status='ACTIVE'
-                    break
-                except paramiko.ssh_exception.SSHException as e:
-                    image_status='ERROR'
-                    error_text = e.args[0]
-                    print self.name, "': create_image(",server_id,") ssh Exception:", error_text
-                    if "SSH session not active" in error_text and retry==0:
-                        self.ssh_connect()
-                except Exception as e:
-                    image_status='ERROR'
-                    error_text = str(e)
-                    print self.name, "': create_image(",server_id,") Exception:", error_text
-        
-                #TODO insert a last_error at database
-        self.db_lock.acquire()
-        self.db.update_rows('images', {'status':image_status, 'progress': 100, 'path':file_dst}, 
-                {'uuid':req['new_image']['uuid']}, log=True)
-        self.db_lock.release()
-  
-    def edit_iface(self, port_id, old_net, new_net):
-        #This action imply remove and insert interface to put proper parameters
-        if self.test:
-            time.sleep(1)
-        else:
-        #get iface details
-            self.db_lock.acquire()
-            r,c = self.db.get_table(FROM='ports as p join resources_port as rp on p.uuid=rp.port_id',
-                                    WHERE={'port_id': port_id})
-            self.db_lock.release()
-            if r<0:
-                print self.name, ": edit_iface(",port_id,") DDBB error:", c
-                return
-            elif r==0:
-                print self.name, ": edit_iface(",port_id,") por not found"
-                return
-            port=c[0]
-            if port["model"]!="VF":
-                print self.name, ": edit_iface(",port_id,") ERROR model must be VF"
-                return
-            #create xml detach file
-            xml=[]
-            self.xml_level = 2
-            xml.append("<interface type='hostdev' managed='yes'>")
-            xml.append("  <mac address='" +port['mac']+ "'/>")
-            xml.append("  <source>"+ self.pci2xml(port['pci'])+"\n  </source>")
-            xml.append('</interface>')                
-
-            
-            try:
-                conn=None
-                conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
-                dom = conn.lookupByUUIDString(port["instance_id"])
-                if old_net:
-                    text="\n".join(xml)
-                    print self.name, ": edit_iface detaching SRIOV interface", text
-                    dom.detachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
-                if new_net:
-                    xml[-1] ="  <vlan>   <tag id='" + str(port['vlan']) + "'/>   </vlan>"
-                    self.xml_level = 1
-                    xml.append(self.pci2xml(port.get('vpci',None)) )
-                    xml.append('</interface>')                
-                    text="\n".join(xml)
-                    print self.name, ": edit_iface attaching SRIOV interface", text
-                    dom.attachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
-                    
-            except host_thread.lvirt_module.libvirtError as e:
-                text = e.get_error_message()
-                print self.name, ": edit_iface(",port["instance_id"],") libvirt exception:", text 
-                
-            finally:
-                if conn is not None: conn.close()
-
-
-def create_server(server, db, db_lock, only_of_ports):
-    #print "server"
-    #print "server"
-    #print server
-    #print "server"
-    #print "server"
-    #try:
-#            host_id = server.get('host_id', None)
-    extended = server.get('extended', None)
-    
-#             print '----------------------'
-#             print json.dumps(extended, indent=4)
-    
-    requirements={}
-    requirements['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
-    requirements['ram'] = server['flavor'].get('ram', 0)
-    if requirements['ram']== None:
-        requirements['ram'] = 0
-    requirements['vcpus'] = server['flavor'].get('vcpus', 0)
-    if requirements['vcpus']== None:
-        requirements['vcpus'] = 0
-    #If extended is not defined get requirements from flavor
-    if extended is None:
-        #If extended is defined in flavor convert to dictionary and use it
-        if 'extended' in server['flavor'] and  server['flavor']['extended'] != None:
-            json_acceptable_string = server['flavor']['extended'].replace("'", "\"")
-            extended = json.loads(json_acceptable_string)
-        else:
-            extended = None
-    #print json.dumps(extended, indent=4)
-    
-    #For simplicity only one numa VM are supported in the initial implementation
-    if extended != None:
-        numas = extended.get('numas', [])
-        if len(numas)>1:
-            return (-2, "Multi-NUMA VMs are not supported yet")
-        #elif len(numas)<1:
-        #    return (-1, "At least one numa must be specified")
-    
-        #a for loop is used in order to be ready to multi-NUMA VMs
-        request = []
-        for numa in numas:
-            numa_req = {}
-            numa_req['memory'] = numa.get('memory', 0)
-            if 'cores' in numa: 
-                numa_req['proc_req_nb'] = numa['cores']                     #number of cores or threads to be reserved
-                numa_req['proc_req_type'] = 'cores'                         #indicates whether cores or threads must be reserved
-                numa_req['proc_req_list'] = numa.get('cores-id', None)      #list of ids to be assigned to the cores or threads
-            elif 'paired-threads' in numa:
-                numa_req['proc_req_nb'] = numa['paired-threads']
-                numa_req['proc_req_type'] = 'paired-threads'
-                numa_req['proc_req_list'] = numa.get('paired-threads-id', None)
-            elif 'threads' in numa:
-                numa_req['proc_req_nb'] = numa['threads']
-                numa_req['proc_req_type'] = 'threads'
-                numa_req['proc_req_list'] = numa.get('threads-id', None)
-            else:
-                numa_req['proc_req_nb'] = 0 # by default
-                numa_req['proc_req_type'] = 'threads'
-
-            
-            
-            #Generate a list of sriov and another for physical interfaces 
-            interfaces = numa.get('interfaces', [])
-            sriov_list = []
-            port_list = []
-            for iface in interfaces:
-                iface['bandwidth'] = int(iface['bandwidth'])
-                if iface['dedicated'][:3]=='yes':
-                    port_list.append(iface)
-                else:
-                    sriov_list.append(iface)
-                    
-            #Save lists ordered from more restrictive to less bw requirements
-            numa_req['sriov_list'] = sorted(sriov_list, key=lambda k: k['bandwidth'], reverse=True)
-            numa_req['port_list'] = sorted(port_list, key=lambda k: k['bandwidth'], reverse=True)
-            
-            
-            request.append(numa_req)
-                
-    #                 print "----------\n"+json.dumps(request[0], indent=4)
-    #                 print '----------\n\n'
-            
-        #Search in db for an appropriate numa for each requested numa
-        #at the moment multi-NUMA VMs are not supported
-        if len(request)>0:
-            requirements['numa'].update(request[0])
-    if requirements['numa']['memory']>0:
-        requirements['ram']=0  #By the moment I make incompatible ask for both Huge and non huge pages memory
-    elif requirements['ram']==0:
-        return (-1, "Memory information not set neither at extended field not at ram")
-    if requirements['numa']['proc_req_nb']>0:
-        requirements['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
-    elif requirements['vcpus']==0:
-        return (-1, "Processor information not set neither at extended field not at vcpus")    
-
-
-    db_lock.acquire()
-    result, content = db.get_numas(requirements, server.get('host_id', None), only_of_ports)
-    db_lock.release()
-    
-    if result == -1:
-        return (-1, content)
-    
-    numa_id = content['numa_id']
-    host_id = content['host_id']
-
-    #obtain threads_id and calculate pinning
-    cpu_pinning = []
-    reserved_threads=[]
-    if requirements['numa']['proc_req_nb']>0:
-        db_lock.acquire()
-        result, content = db.get_table(FROM='resources_core', 
-                                       SELECT=('id','core_id','thread_id'),
-                                       WHERE={'numa_id':numa_id,'instance_id': None, 'status':'ok'} )
-        db_lock.release()
-        if result <= 0:
-            print content
-            return -1, content
-    
-        #convert rows to a dictionary indexed by core_id
-        cores_dict = {}
-        for row in content:
-            if not row['core_id'] in cores_dict:
-                cores_dict[row['core_id']] = []
-            cores_dict[row['core_id']].append([row['thread_id'],row['id']]) 
-           
-        #In case full cores are requested 
-        paired = 'N'
-        if requirements['numa']['proc_req_type'] == 'cores':
-            #Get/create the list of the vcpu_ids
-            vcpu_id_list = requirements['numa']['proc_req_list']
-            if vcpu_id_list == None:
-                vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
-            
-            for threads in cores_dict.itervalues():
-                #we need full cores
-                if len(threads) != 2:
-                    continue
-                
-                #set pinning for the first thread
-                cpu_pinning.append( [ vcpu_id_list.pop(0), threads[0][0], threads[0][1] ] )
-                
-                #reserve so it is not used the second thread
-                reserved_threads.append(threads[1][1])
-                
-                if len(vcpu_id_list) == 0:
-                    break
-                
-        #In case paired threads are requested
-        elif requirements['numa']['proc_req_type'] == 'paired-threads':
-            paired = 'Y'
-            #Get/create the list of the vcpu_ids
-            if requirements['numa']['proc_req_list'] != None:
-                vcpu_id_list = []
-                for pair in requirements['numa']['proc_req_list']:
-                    if len(pair)!=2:
-                        return -1, "Field paired-threads-id not properly specified"
-                        return
-                    vcpu_id_list.append(pair[0])
-                    vcpu_id_list.append(pair[1])
-            else:
-                vcpu_id_list = range(0,2*int(requirements['numa']['proc_req_nb']))
-                
-            for threads in cores_dict.itervalues():
-                #we need full cores
-                if len(threads) != 2:
-                    continue
-                #set pinning for the first thread
-                cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
-                
-                #set pinning for the second thread
-                cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
-                
-                if len(vcpu_id_list) == 0:
-                    break    
-        
-        #In case normal threads are requested
-        elif requirements['numa']['proc_req_type'] == 'threads':
-            #Get/create the list of the vcpu_ids
-            vcpu_id_list = requirements['numa']['proc_req_list']
-            if vcpu_id_list == None:
-                vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
-                                
-            for threads_index in sorted(cores_dict, key=lambda k: len(cores_dict[k])):
-                threads = cores_dict[threads_index]
-                #set pinning for the first thread
-                cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
-                
-                #if exists, set pinning for the second thread
-                if len(threads) == 2 and len(vcpu_id_list) != 0:
-                    cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
-                
-                if len(vcpu_id_list) == 0:
-                    break    
-    
-        #Get the source pci addresses for the selected numa
-        used_sriov_ports = []
-        for port in requirements['numa']['sriov_list']:
-            db_lock.acquire()
-            result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
-            db_lock.release()
-            if result <= 0:
-                print content
-                return -1, content
-            for row in content:
-                if row['id'] in used_sriov_ports or row['id']==port['port_id']:
-                    continue
-                port['pci'] = row['pci']
-                if 'mac_address' not in port: 
-                    port['mac_address'] = row['mac']
-                del port['mac']
-                port['port_id']=row['id']
-                port['Mbps_used'] = port['bandwidth']
-                used_sriov_ports.append(row['id'])
-                break
-        
-        for port in requirements['numa']['port_list']:
-            port['Mbps_used'] = None
-            if port['dedicated'] != "yes:sriov":
-                port['mac_address'] = port['mac']
-                del port['mac']
-                continue
-            db_lock.acquire()
-            result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac', 'Mbps'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
-            db_lock.release()
-            if result <= 0:
-                print content
-                return -1, content
-            port['Mbps_used'] = content[0]['Mbps']
-            for row in content:
-                if row['id'] in used_sriov_ports or row['id']==port['port_id']:
-                    continue
-                port['pci'] = row['pci']
-                if 'mac_address' not in port: 
-                    port['mac_address'] = row['mac']  # mac cannot be set to passthrough ports 
-                del port['mac']
-                port['port_id']=row['id']
-                used_sriov_ports.append(row['id'])
-                break
-    
-    #             print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
-    #             print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
-        
-    server['host_id'] = host_id
-        
-
-    #Generate dictionary for saving in db the instance resources
-    resources = {}
-    resources['bridged-ifaces'] = []
-    
-    numa_dict = {}
-    numa_dict['interfaces'] = []
-    
-    numa_dict['interfaces'] += requirements['numa']['port_list']
-    numa_dict['interfaces'] += requirements['numa']['sriov_list']
-  
-    #Check bridge information
-    unified_dataplane_iface=[]
-    unified_dataplane_iface += requirements['numa']['port_list']
-    unified_dataplane_iface += requirements['numa']['sriov_list']
-    
-    for control_iface in server.get('networks', []):
-        control_iface['net_id']=control_iface.pop('uuid')
-        #Get the brifge name
-        db_lock.acquire()
-        result, content = db.get_table(FROM='nets',
-                                       SELECT=('name', 'type', 'vlan', 'provider', 'enable_dhcp',
-                                                 'dhcp_first_ip', 'dhcp_last_ip', 'cidr'),
-                                       WHERE={'uuid': control_iface['net_id']})
-        db_lock.release()
-        if result < 0: 
-            pass
-        elif result==0:
-            return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface['net_id']
-        else:
-            network=content[0]
-            if control_iface.get("type", 'virtual') == 'virtual':
-                if network['type']!='bridge_data' and network['type']!='bridge_man':
-                    return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface['net_id']
-                resources['bridged-ifaces'].append(control_iface)
-                if network.get("provider") and network["provider"][0:3] == "OVS":
-                    control_iface["type"] = "instance:ovs"
-                else:
-                    control_iface["type"] = "instance:bridge"
-                if network.get("vlan"):
-                    control_iface["vlan"] = network["vlan"]
-
-                if network.get("enable_dhcp") == 'true':
-                    control_iface["enable_dhcp"] = network.get("enable_dhcp")
-                    control_iface["dhcp_first_ip"] = network["dhcp_first_ip"]
-                    control_iface["dhcp_last_ip"] = network["dhcp_last_ip"]
-                    control_iface["cidr"] = network["cidr"]
-            else:
-                if network['type']!='data' and network['type']!='ptp':
-                    return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface['net_id']
-                #dataplane interface, look for it in the numa tree and asign this network
-                iface_found=False
-                for dataplane_iface in numa_dict['interfaces']:
-                    if dataplane_iface['name'] == control_iface.get("name"):
-                        if (dataplane_iface['dedicated'] == "yes" and control_iface["type"] != "PF") or \
-                            (dataplane_iface['dedicated'] == "no" and control_iface["type"] != "VF") or \
-                            (dataplane_iface['dedicated'] == "yes:sriov" and control_iface["type"] != "VFnotShared") :
-                                return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
-                                    (control_iface.get("name"), dataplane_iface['dedicated'], control_iface["type"])
-                        dataplane_iface['uuid'] = control_iface['net_id']
-                        if dataplane_iface['dedicated'] == "no":
-                            dataplane_iface['vlan'] = network['vlan']
-                        if dataplane_iface['dedicated'] != "yes" and control_iface.get("mac_address"):
-                            dataplane_iface['mac_address'] = control_iface.get("mac_address")
-                        if control_iface.get("vpci"):
-                            dataplane_iface['vpci'] = control_iface.get("vpci")
-                        iface_found=True
-                        break
-                if not iface_found:
-                    return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface.get("name")
-        
-    resources['host_id'] = host_id
-    resources['image_id'] = server['image_id']
-    resources['flavor_id'] = server['flavor_id']
-    resources['tenant_id'] = server['tenant_id']
-    resources['ram'] = requirements['ram']
-    resources['vcpus'] = requirements['vcpus']
-    resources['status'] = 'CREATING'
-    
-    if 'description' in server: resources['description'] = server['description']
-    if 'name' in server: resources['name'] = server['name']
-    
-    resources['extended'] = {}                          #optional
-    resources['extended']['numas'] = []
-    numa_dict['numa_id'] = numa_id
-    numa_dict['memory'] = requirements['numa']['memory']
-    numa_dict['cores'] = []
-
-    for core in cpu_pinning:
-        numa_dict['cores'].append({'id': core[2], 'vthread': core[0], 'paired': paired})
-    for core in reserved_threads:
-        numa_dict['cores'].append({'id': core})
-    resources['extended']['numas'].append(numa_dict)
-    if extended!=None and 'devices' in extended:   #TODO allow extra devices without numa
-        resources['extended']['devices'] = extended['devices']
-    
-
-    print '===================================={'
-    print json.dumps(resources, indent=4)
-    print '====================================}'
-    
-    return 0, resources
-
diff --git a/httpserver.py b/httpserver.py
deleted file mode 100644 (file)
index edf1e8d..0000000
+++ /dev/null
@@ -1,2430 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-This is the thread for the http server North API. 
-Two thread will be launched, with normal and administrative permissions.
-'''
-
-__author__="Alfonso Tierno, Gerardo Garcia, Leonardo Mirabal"
-__date__ ="$10-jul-2014 12:07:15$"
-
-import bottle
-import urlparse
-import yaml
-import json
-import threading
-import datetime
-import hashlib
-import os
-import imp
-from netaddr import IPNetwork, IPAddress, all_matching_cidrs
-#import only if needed because not needed in test mode. To allow an easier installation   import RADclass
-from jsonschema import validate as js_v, exceptions as js_e
-import host_thread as ht
-from vim_schema import host_new_schema, host_edit_schema, tenant_new_schema, \
-    tenant_edit_schema, \
-    flavor_new_schema, flavor_update_schema, \
-    image_new_schema, image_update_schema, \
-    server_new_schema, server_action_schema, network_new_schema, network_update_schema, \
-    port_new_schema, port_update_schema, openflow_controller_schema, of_port_map_new_schema
-import ovim
-import logging
-
-global my
-global url_base
-global config_dic
-global RADclass_module
-RADclass=None  #RADclass module is charged only if not in test mode
-
-url_base="/openvim"
-
-HTTP_Bad_Request =          400
-HTTP_Unauthorized =         401 
-HTTP_Not_Found =            404 
-HTTP_Forbidden =            403
-HTTP_Method_Not_Allowed =   405 
-HTTP_Not_Acceptable =       406
-HTTP_Request_Timeout =      408
-HTTP_Conflict =             409
-HTTP_Service_Unavailable =  503 
-HTTP_Internal_Server_Error= 500 
-
-def md5(fname):
-    hash_md5 = hashlib.md5()
-    with open(fname, "rb") as f:
-        for chunk in iter(lambda: f.read(4096), b""):
-            hash_md5.update(chunk)
-    return hash_md5.hexdigest()
-
-def md5_string(fname):
-    hash_md5 = hashlib.md5()
-    hash_md5.update(fname)
-    return hash_md5.hexdigest()
-
-def check_extended(extended, allow_net_attach=False):
-    '''Makes and extra checking of extended input that cannot be done using jsonschema
-    Attributes: 
-        allow_net_attach:  for allowing or not the uuid field at interfaces
-        that are allowed for instance, but not for flavors
-    Return: (<0, error_text) if error; (0,None) if not error '''
-    if "numas" not in extended: return 0, None
-    id_s=[]
-    numaid=0
-    for numa in extended["numas"]:
-        nb_formats = 0
-        if "cores" in numa:
-            nb_formats += 1
-            if "cores-id" in numa:
-                if len(numa["cores-id"]) != numa["cores"]:
-                    return -HTTP_Bad_Request, "different number of cores-id (%d) than cores (%d) at numa %d" % (len(numa["cores-id"]), numa["cores"],numaid)
-                id_s.extend(numa["cores-id"])
-        if "threads" in numa:
-            nb_formats += 1
-            if "threads-id" in numa:
-                if len(numa["threads-id"]) != numa["threads"]:
-                    return -HTTP_Bad_Request, "different number of threads-id (%d) than threads (%d) at numa %d" % (len(numa["threads-id"]), numa["threads"],numaid) 
-                id_s.extend(numa["threads-id"])
-        if "paired-threads" in numa:
-            nb_formats += 1
-            if "paired-threads-id" in numa:
-                if len(numa["paired-threads-id"]) != numa["paired-threads"]:
-                    return -HTTP_Bad_Request, "different number of paired-threads-id (%d) than paired-threads (%d) at numa %d" % (len(numa["paired-threads-id"]), numa["paired-threads"],numaid) 
-                for pair in numa["paired-threads-id"]:
-                    if len(pair) != 2:
-                        return -HTTP_Bad_Request, "paired-threads-id must contain a list of two elements list at numa %d" % (numaid) 
-                    id_s.extend(pair)
-        if nb_formats > 1:
-            return -HTTP_Service_Unavailable, "only one of cores, threads,  paired-threads are allowed in this version at numa %d" % numaid 
-        #check interfaces
-        if "interfaces" in numa:
-            ifaceid=0
-            names=[]
-            vpcis=[]
-            for interface in numa["interfaces"]:
-                if "uuid" in interface and not allow_net_attach: 
-                    return -HTTP_Bad_Request, "uuid field is not allowed at numa %d interface %s position %d" % (numaid, interface.get("name",""), ifaceid )
-                if "mac_address" in interface and interface["dedicated"]=="yes":
-                    return -HTTP_Bad_Request, "mac_address can not be set for dedicated (passthrough) at numa %d, interface %s position %d" % (numaid, interface.get("name",""), ifaceid )
-                if "name" in interface:
-                    if interface["name"] in names:
-                        return -HTTP_Bad_Request, "name repeated at numa %d, interface %s position %d" % (numaid, interface.get("name",""), ifaceid )
-                    names.append(interface["name"])
-                if "vpci" in interface:
-                    if interface["vpci"] in vpcis:
-                        return -HTTP_Bad_Request, "vpci %s repeated at numa %d, interface %s position %d" % (interface["vpci"], numaid, interface.get("name",""), ifaceid )
-                    vpcis.append(interface["vpci"])
-                ifaceid+=1
-        numaid+=1
-    if numaid > 1:
-        return -HTTP_Service_Unavailable, "only one numa can be defined in this version " 
-    for a in range(0,len(id_s)):
-        if a not in id_s:
-            return -HTTP_Bad_Request, "core/thread identifiers must start at 0 and gaps are not alloed. Missing id number %d" % a 
-    
-    return 0, None
-
-#
-# dictionaries that change from HTTP API to database naming
-#
-http2db_id={'id':'uuid'}
-http2db_host={'id':'uuid'}
-http2db_tenant={'id':'uuid'}
-http2db_flavor={'id':'uuid','imageRef':'image_id'}
-http2db_image={'id':'uuid', 'created':'created_at', 'updated':'modified_at', 'public': 'public'}
-http2db_server={'id':'uuid','hostId':'host_id','flavorRef':'flavor_id','imageRef':'image_id','created':'created_at'}
-http2db_network={'id':'uuid','provider:vlan':'vlan', 'provider:physical': 'provider'}
-http2db_ofc = {'id': 'uuid'}
-http2db_port={'id':'uuid', 'network_id':'net_id', 'mac_address':'mac', 'device_owner':'type','device_id':'instance_id','binding:switch_port':'switch_port','binding:vlan':'vlan', 'bandwidth':'Mbps'}
-
-def remove_extra_items(data, schema):
-    deleted=[]
-    if type(data) is tuple or type(data) is list:
-        for d in data:
-            a= remove_extra_items(d, schema['items'])
-            if a is not None: deleted.append(a)
-    elif type(data) is dict:
-        for k in data.keys():
-            if 'properties' not in schema or k not in schema['properties'].keys():
-                del data[k]
-                deleted.append(k)
-            else:
-                a = remove_extra_items(data[k], schema['properties'][k])
-                if a is not None:  deleted.append({k:a})
-    if len(deleted) == 0: return None
-    elif len(deleted) == 1: return deleted[0]
-    else: return deleted
-                
-def delete_nulls(var):
-    if type(var) is dict:
-        for k in var.keys():
-            if var[k] is None: del var[k]
-            elif type(var[k]) is dict or type(var[k]) is list or type(var[k]) is tuple: 
-                if delete_nulls(var[k]): del var[k]
-        if len(var) == 0: return True
-    elif type(var) is list or type(var) is tuple:
-        for k in var:
-            if type(k) is dict: delete_nulls(k)
-        if len(var) == 0: return True
-    return False
-
-
-class httpserver(threading.Thread):
-    def __init__(self, ovim, name="http", host='localhost', port=8080, admin=False, config_=None):
-        '''
-        Creates a new thread to attend the http connections
-        Attributes:
-            db_conn: database connection
-            name: name of this thread
-            host: ip or name where to listen
-            port: port where to listen
-            admin: if this has privileges of administrator or not 
-            config_: unless the first thread must be provided. It is a global dictionary where to allocate the self variable 
-        '''
-        global url_base
-        global config_dic
-        
-        #initialization
-        if config_ is not None:
-            config_dic = config_
-        if 'http_threads' not in config_dic:
-            config_dic['http_threads'] = {}
-        threading.Thread.__init__(self)
-        self.host = host
-        self.port = port  
-        self.db = ovim.db  #TODO OVIM remove
-        self.ovim = ovim
-        self.admin = admin
-        if name in config_dic:
-            print "httpserver Warning!!! Onether thread with the same name", name
-            n=0
-            while name+str(n) in config_dic:
-                n +=1
-            name +=str(n)
-        self.name = name
-        self.url_preffix = 'http://' + self.host + ':' + str(self.port) + url_base
-        config_dic['http_threads'][name] = self
-
-        #Ensure that when the main program exits the thread will also exit
-        self.daemon = True      
-        self.setDaemon(True)
-        self.logger = logging.getLogger("openvim.http")
-         
-    def run(self):
-        bottle.run(host=self.host, port=self.port, debug=True) #quiet=True
-           
-    def gethost(self, host_id):
-        result, content = self.db.get_host(host_id)
-        if result < 0:
-            print "httpserver.gethost error %d %s" % (result, content)
-            bottle.abort(-result, content)
-        elif result==0:
-            print "httpserver.gethost host '%s' not found" % host_id
-            bottle.abort(HTTP_Not_Found, content)
-        else:
-            data={'host' : content}
-            convert_boolean(content, ('admin_state_up',) )
-            change_keys_http2db(content, http2db_host, reverse=True)
-            print data['host']
-            return format_out(data)
-
-@bottle.route(url_base + '/', method='GET')
-def http_get():
-    print 
-    return 'works' #TODO: put links or redirection to /openvim???
-
-#
-# Util funcions
-#
-
-def change_keys_http2db(data, http_db, reverse=False):
-    '''Change keys of dictionary data according to the key_dict values
-    This allow change from http interface names to database names.
-    When reverse is True, the change is otherwise
-    Attributes:
-        data: can be a dictionary or a list
-        http_db: is a dictionary with hhtp names as keys and database names as value
-        reverse: by default change is done from http API to database. If True change is done otherwise
-    Return: None, but data is modified'''
-    if type(data) is tuple or type(data) is list:
-        for d in data:
-            change_keys_http2db(d, http_db, reverse)
-    elif type(data) is dict or type(data) is bottle.FormsDict:
-        if reverse:
-            for k,v in http_db.items():
-                if v in data: data[k]=data.pop(v)
-        else:
-            for k,v in http_db.items():
-                if k in data: data[v]=data.pop(k)
-
-
-
-def format_out(data):
-    '''return string of dictionary data according to requested json, yaml, xml. By default json'''
-    if 'application/yaml' in bottle.request.headers.get('Accept'):
-        bottle.response.content_type='application/yaml'
-        return yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) #, canonical=True, default_style='"'
-    else: #by default json
-        bottle.response.content_type='application/json'
-        #return data #json no style
-        return json.dumps(data, indent=4) + "\n"
-
-def format_in(schema):
-    try:
-        error_text = "Invalid header format "
-        format_type = bottle.request.headers.get('Content-Type', 'application/json')
-        if 'application/json' in format_type:
-            error_text = "Invalid json format "
-            #Use the json decoder instead of bottle decoder because it informs about the location of error formats with a ValueError exception
-            client_data = json.load(bottle.request.body)
-            #client_data = bottle.request.json()
-        elif 'application/yaml' in format_type:
-            error_text = "Invalid yaml format "
-            client_data = yaml.load(bottle.request.body)
-        elif format_type == 'application/xml':
-            bottle.abort(501, "Content-Type: application/xml not supported yet.")
-        else:
-            print "HTTP HEADERS: " + str(bottle.request.headers.items())
-            bottle.abort(HTTP_Not_Acceptable, 'Content-Type ' + str(format_type) + ' not supported.')
-            return
-        #if client_data == None:
-        #    bottle.abort(HTTP_Bad_Request, "Content error, empty")
-        #    return
-        #check needed_items
-
-        #print "HTTP input data: ", str(client_data)
-        error_text = "Invalid content "
-        js_v(client_data, schema)
-
-        return client_data
-    except (ValueError, yaml.YAMLError) as exc:
-        error_text += str(exc)
-        print error_text 
-        bottle.abort(HTTP_Bad_Request, error_text)
-    except js_e.ValidationError as exc:
-        print "HTTP validate_in error, jsonschema exception ", exc.message, "at", exc.path
-        print "  CONTENT: " + str(bottle.request.body.readlines())
-        error_pos = ""
-        if len(exc.path)>0: error_pos=" at '" +  ":".join(map(str, exc.path)) + "'"
-        bottle.abort(HTTP_Bad_Request, error_text + error_pos+": "+exc.message)
-    #except:
-    #    bottle.abort(HTTP_Bad_Request, "Content error: Failed to parse Content-Type",  error_pos)
-    #    raise
-
-def filter_query_string(qs, http2db, allowed):
-    '''Process query string (qs) checking that contains only valid tokens for avoiding SQL injection
-    Attributes:
-        'qs': bottle.FormsDict variable to be processed. None or empty is considered valid
-        'allowed': list of allowed string tokens (API http naming). All the keys of 'qs' must be one of 'allowed'
-        'http2db': dictionary with change from http API naming (dictionary key) to database naming(dictionary value)
-    Return: A tuple with the (select,where,limit) to be use in a database query. All of then transformed to the database naming
-        select: list of items to retrieve, filtered by query string 'field=token'. If no 'field' is present, allowed list is returned
-        where: dictionary with key, value, taken from the query string token=value. Empty if nothing is provided
-        limit: limit dictated by user with the query string 'limit'. 100 by default
-    abort if not permitted, using bottel.abort
-    '''
-    where = {}
-    limit = 100
-    select = []
-    if type(qs) is not bottle.FormsDict:
-        print '!!!!!!!!!!!!!!invalid query string not a dictionary'
-        # bottle.abort(HTTP_Internal_Server_Error, "call programmer")
-    else:
-        for k in qs:
-            if k == 'field':
-                select += qs.getall(k)
-                for v in select:
-                    if v not in allowed:
-                        bottle.abort(HTTP_Bad_Request, "Invalid query string at 'field=" + v + "'")
-            elif k == 'limit':
-                try:
-                    limit = int(qs[k])
-                except:
-                    bottle.abort(HTTP_Bad_Request, "Invalid query string at 'limit=" + qs[k] + "'")
-            else:
-                if k not in allowed:
-                    bottle.abort(HTTP_Bad_Request, "Invalid query string at '" + k + "=" + qs[k] + "'")
-                if qs[k] != "null":
-                    where[k] = qs[k]
-                else:
-                    where[k] = None
-    if len(select) == 0: select += allowed
-    # change from http api to database naming
-    for i in range(0, len(select)):
-        k = select[i]
-        if k in http2db:
-            select[i] = http2db[k]
-    change_keys_http2db(where, http2db)
-    # print "filter_query_string", select,where,limit
-
-    return select, where, limit
-
-def convert_bandwidth(data, reverse=False):
-    '''Check the field bandwidth recursively and when found, it removes units and convert to number 
-    It assumes that bandwidth is well formed
-    Attributes:
-        'data': dictionary bottle.FormsDict variable to be checked. None or empty is considered valid
-        'reverse': by default convert form str to int (Mbps), if True it convert from number to units
-    Return:
-        None
-    '''
-    if type(data) is dict:
-        for k in data.keys():
-            if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
-                convert_bandwidth(data[k], reverse)
-        if "bandwidth" in data:
-            try:
-                value=str(data["bandwidth"])
-                if not reverse:
-                    pos = value.find("bps")
-                    if pos>0:
-                        if value[pos-1]=="G": data["bandwidth"] =  int(data["bandwidth"][:pos-1]) * 1000
-                        elif value[pos-1]=="k": data["bandwidth"]= int(data["bandwidth"][:pos-1]) / 1000
-                        else: data["bandwidth"]= int(data["bandwidth"][:pos-1])
-                else:
-                    value = int(data["bandwidth"])
-                    if value % 1000 == 0: data["bandwidth"]=str(value/1000) + " Gbps"
-                    else: data["bandwidth"]=str(value) + " Mbps"
-            except:
-                print "convert_bandwidth exception for type", type(data["bandwidth"]), " data", data["bandwidth"]
-                return
-    if type(data) is tuple or type(data) is list:
-        for k in data:
-            if type(k) is dict or type(k) is tuple or type(k) is list:
-                convert_bandwidth(k, reverse)
-
-def convert_boolean(data, items): #TODO OVIM delete
-    '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean 
-    It assumes that bandwidth is well formed
-    Attributes:
-        'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
-        'items': tuple of keys to convert
-    Return:
-        None
-    '''
-    if type(data) is dict:
-        for k in data.keys():
-            if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
-                convert_boolean(data[k], items)
-            if k in items:
-                if type(data[k]) is str:
-                    if   data[k]=="false": data[k]=False
-                    elif data[k]=="true":  data[k]=True
-    if type(data) is tuple or type(data) is list:
-        for k in data:
-            if type(k) is dict or type(k) is tuple or type(k) is list:
-                convert_boolean(k, items)
-
-def convert_datetime2str(var):
-    '''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s'
-    It enters recursively in the dict var finding this kind of variables
-    '''
-    if type(var) is dict:
-        for k,v in var.items():
-            if type(v) is datetime.datetime:
-                var[k]= v.strftime('%Y-%m-%dT%H:%M:%S')
-            elif type(v) is dict or type(v) is list or type(v) is tuple: 
-                convert_datetime2str(v)
-        if len(var) == 0: return True
-    elif type(var) is list or type(var) is tuple:
-        for v in var:
-            convert_datetime2str(v)
-
-def check_valid_tenant(my, tenant_id):
-    if tenant_id=='any':
-        if not my.admin:
-            return HTTP_Unauthorized, "Needed admin privileges"
-    else:
-        result, _ = my.db.get_table(FROM='tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id})
-        if result<=0:
-            return HTTP_Not_Found, "tenant '%s' not found" % tenant_id
-    return 0, None
-
-def is_url(url):
-    '''
-    Check if string value is a well-wormed url
-    :param url: string url
-    :return: True if is a valid url, False if is not well-formed
-    '''
-
-    parsed_url = urlparse.urlparse(url)
-    return parsed_url
-
-
-@bottle.error(400)
-@bottle.error(401) 
-@bottle.error(404) 
-@bottle.error(403)
-@bottle.error(405) 
-@bottle.error(406)
-@bottle.error(408)
-@bottle.error(409)
-@bottle.error(503) 
-@bottle.error(500)
-def error400(error):
-    e={"error":{"code":error.status_code, "type":error.status, "description":error.body}}
-    return format_out(e)
-
-@bottle.hook('after_request')
-def enable_cors():
-    #TODO: Alf: Is it needed??
-    bottle.response.headers['Access-Control-Allow-Origin'] = '*'
-
-#
-# HOSTS
-#
-
-@bottle.route(url_base + '/hosts', method='GET')
-def http_get_hosts():
-    return format_out(get_hosts())
-
-
-def get_hosts():
-    select_, where_, limit_ = filter_query_string(bottle.request.query, http2db_host,
-                                                  ('id', 'name', 'description', 'status', 'admin_state_up', 'ip_name'))
-    
-    myself = config_dic['http_threads'][ threading.current_thread().name ]
-    result, content = myself.db.get_table(FROM='hosts', SELECT=select_, WHERE=where_, LIMIT=limit_)
-    if result < 0:
-        print "http_get_hosts Error", content
-        bottle.abort(-result, content)
-    else:
-        convert_boolean(content, ('admin_state_up',) )
-        change_keys_http2db(content, http2db_host, reverse=True)
-        for row in content:
-            row['links'] = ( {'href': myself.url_preffix + '/hosts/' + str(row['id']), 'rel': 'bookmark'}, )
-        data={'hosts' : content}
-        return data
-
-@bottle.route(url_base + '/hosts/<host_id>', method='GET')
-def http_get_host_id(host_id):
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    return my.gethost(host_id)
-
-@bottle.route(url_base + '/hosts', method='POST')
-def http_post_hosts():
-    '''insert a host into the database. All resources are got and inserted'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check permissions
-    if not my.admin:
-        bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
-    
-    #parse input data
-    http_content = format_in( host_new_schema )
-    r = remove_extra_items(http_content, host_new_schema)
-    if r is not None: print "http_post_host_id: Warning: remove extra items ", r
-    change_keys_http2db(http_content['host'], http2db_host)
-
-    host = http_content['host']
-    warning_text=""
-    if 'host-data' in http_content:
-        host.update(http_content['host-data'])
-        ip_name=http_content['host-data']['ip_name']
-        user=http_content['host-data']['user']
-        password=http_content['host-data'].get('password', None)
-    else:
-        ip_name=host['ip_name']
-        user=host['user']
-        password=host.get('password', None)
-        if not RADclass_module:
-            try:
-                RADclass_module = imp.find_module("RADclass")
-            except (IOError, ImportError) as e:
-                raise ImportError("Cannot import RADclass.py Openvim not properly installed" +str(e))
-
-        #fill rad info
-        rad = RADclass_module.RADclass()
-        (return_status, code) = rad.obtain_RAD(user, password, ip_name)
-        
-        #return 
-        if not return_status:
-            print 'http_post_hosts ERROR obtaining RAD', code
-            bottle.abort(HTTP_Bad_Request, code)
-            return
-        warning_text=code
-        rad_structure = yaml.load(rad.to_text())
-        print 'rad_structure\n---------------------'
-        print json.dumps(rad_structure, indent=4)
-        print '---------------------'
-        #return
-        WHERE_={"family":rad_structure['processor']['family'], 'manufacturer':rad_structure['processor']['manufacturer'], 'version':rad_structure['processor']['version']} 
-        result, content = my.db.get_table(FROM='host_ranking', 
-                    SELECT=('ranking',),
-                    WHERE=WHERE_)
-        if result > 0:
-            host['ranking'] = content[0]['ranking']
-        else:
-            #error_text= "Host " + str(WHERE_)+ " not found in ranking table. Not valid for VIM management"
-            #bottle.abort(HTTP_Bad_Request, error_text)
-            #return
-            warning_text += "Host " + str(WHERE_)+ " not found in ranking table. Assuming lowest value 100\n"
-            host['ranking'] = 100 #TODO: as not used in this version, set the lowest value
-    
-        features = rad_structure['processor'].get('features', ())
-        host['features'] = ",".join(features)
-        host['numas'] = [] 
-        
-        for node in (rad_structure['resource topology']['nodes'] or {}).itervalues():
-            interfaces= []
-            cores = []
-            eligible_cores=[]
-            count = 0
-            for core in node['cpu']['eligible_cores']:
-                eligible_cores.extend(core)
-            for core in node['cpu']['cores']:
-                for thread_id in core:
-                    c={'core_id': count, 'thread_id': thread_id}
-                    if thread_id not in eligible_cores: c['status'] = 'noteligible'
-                    cores.append(c)
-                count = count+1 
-
-            if 'nics' in node:    
-                for port_k, port_v in node['nics']['nic 0']['ports'].iteritems():
-                    if port_v['virtual']:
-                        continue
-                    else:
-                        sriovs = []
-                        for port_k2, port_v2 in node['nics']['nic 0']['ports'].iteritems():
-                            if port_v2['virtual'] and port_v2['PF_pci_id']==port_k:
-                                sriovs.append({'pci':port_k2, 'mac':port_v2['mac'], 'source_name':port_v2['source_name']})
-                        if len(sriovs)>0:
-                            #sort sriov according to pci and rename them to the vf number
-                            new_sriovs = sorted(sriovs, key=lambda k: k['pci'])
-                            index=0 
-                            for sriov in new_sriovs:
-                                sriov['source_name'] = index
-                                index += 1
-                            interfaces.append  ({'pci':str(port_k), 'Mbps': port_v['speed']/1000000, 'sriovs': new_sriovs, 'mac':port_v['mac'], 'source_name':port_v['source_name']})
-            memory=node['memory']['node_size'] / (1024*1024*1024)
-            #memory=get_next_2pow(node['memory']['hugepage_nr'])
-            host['numas'].append( {'numa_socket': node['id'], 'hugepages': node['memory']['hugepage_nr'], 'memory':memory, 'interfaces': interfaces, 'cores': cores } )
-    print json.dumps(host, indent=4)
-    #return
-    #
-    #insert in data base
-    result, content = my.db.new_host(host)
-    if result >= 0:
-        if content['admin_state_up']:
-            #create thread
-            host_test_mode = True if config_dic['mode']=='test' or config_dic['mode']=="OF only" else False
-            host_develop_mode = True if config_dic['mode']=='development' else False
-            host_develop_bridge_iface = config_dic.get('development_bridge', None)
-            thread = ht.host_thread(name=host.get('name',ip_name), user=user, host=ip_name, db=config_dic['db'], db_lock=config_dic['db_lock'], 
-                test=host_test_mode, image_path=config_dic['image_path'],
-                version=config_dic['version'], host_id=content['uuid'],
-                develop_mode=host_develop_mode, develop_bridge_iface=host_develop_bridge_iface   )
-            thread.start()
-            config_dic['host_threads'][ content['uuid'] ] = thread
-
-            if config_dic['network_type'] == 'ovs':
-                # create bridge
-                create_dhcp_ovs_bridge()
-                config_dic['host_threads'][content['uuid']].insert_task("new-ovsbridge")
-                # check if more host exist
-                create_vxlan_mesh(content['uuid'])
-
-        #return host data
-        change_keys_http2db(content, http2db_host, reverse=True)
-        if len(warning_text)>0:
-            content["warning"]= warning_text
-        data={'host' : content}
-        return format_out(data)
-    else:
-        bottle.abort(HTTP_Bad_Request, content)
-        return
-
-
-def delete_dhcp_ovs_bridge(vlan, net_uuid):
-    """
-    Delete bridges and port created during dhcp launching at openvim controller
-    :param vlan: net vlan id
-    :param net_uuid: network identifier
-    :return:
-    """
-    dhcp_path = config_dic['ovs_controller_file_path']
-
-    http_controller = config_dic['http_threads'][threading.current_thread().name]
-    dhcp_controller = http_controller.ovim.get_dhcp_controller()
-
-    dhcp_controller.delete_dhcp_port(vlan, net_uuid)
-    dhcp_controller.delete_dhcp_server(vlan, net_uuid, dhcp_path)
-
-
-def create_dhcp_ovs_bridge():
-    """
-    Initialize bridge to allocate the dhcp server at openvim controller
-    :return:
-    """
-    http_controller = config_dic['http_threads'][threading.current_thread().name]
-    dhcp_controller = http_controller.ovim.get_dhcp_controller()
-
-    dhcp_controller.create_ovs_bridge()
-
-
-def set_mac_dhcp(vm_ip, vlan, first_ip, last_ip, cidr, mac):
-    """"
-    Launch a dhcpserver base on dnsmasq attached to the net base on vlan id across the the openvim computes
-    :param vm_ip: IP address asigned to a VM
-    :param vlan: Segmentation id
-    :param first_ip: First dhcp range ip
-    :param last_ip: Last dhcp range ip
-    :param cidr: net cidr
-    :param mac: VM vnic mac to be macthed with the IP received
-    """
-    if not vm_ip:
-        return
-    ip_tools = IPNetwork(cidr)
-    cidr_len = ip_tools.prefixlen
-    dhcp_netmask = str(ip_tools.netmask)
-    dhcp_path = config_dic['ovs_controller_file_path']
-
-    new_cidr = [first_ip + '/' + str(cidr_len)]
-    if not len(all_matching_cidrs(vm_ip, new_cidr)):
-        vm_ip = None
-
-    http_controller = config_dic['http_threads'][threading.current_thread().name]
-    dhcp_controller = http_controller.ovim.get_dhcp_controller()
-
-    dhcp_controller.set_mac_dhcp_server(vm_ip, mac, vlan, dhcp_netmask, dhcp_path)
-
-
-def delete_mac_dhcp(vm_ip, vlan, mac):
-    """
-    Delete into dhcp conf file the ip  assigned to a specific MAC address
-    :param vm_ip: IP address asigned to a VM
-    :param vlan: Segmentation id
-    :param mac:  VM vnic mac to be macthed with the IP received
-    :return:
-    """
-
-    dhcp_path = config_dic['ovs_controller_file_path']
-
-    http_controller = config_dic['http_threads'][threading.current_thread().name]
-    dhcp_controller = http_controller.ovim.get_dhcp_controller()
-
-    dhcp_controller.delete_mac_dhcp_server(vm_ip, mac, vlan, dhcp_path)
-
-
-def create_vxlan_mesh(host_id):
-    """
-    Create vxlan mesh across all openvimc controller and computes.
-    :param host_id: host identifier
-    :param host_id: host identifier
-    :return:
-    """
-    dhcp_compute_name = get_vxlan_interface("dhcp")
-    existing_hosts = get_hosts()
-    if len(existing_hosts['hosts']) > 0:
-        # vlxan mesh creation between openvim controller and computes
-        computes_available = existing_hosts['hosts']
-
-        http_controller = config_dic['http_threads'][threading.current_thread().name]
-        dhcp_controller = http_controller.ovim.get_dhcp_controller()
-
-        for compute in computes_available:
-            vxlan_interface_name = get_vxlan_interface(compute['id'][:8])
-            config_dic['host_threads'][compute['id']].insert_task("new-vxlan", dhcp_compute_name, dhcp_controller.host)
-            dhcp_controller.create_ovs_vxlan_tunnel(vxlan_interface_name, compute['ip_name'])
-
-        # vlxan mesh creation between openvim computes
-        for count, compute_owner in enumerate(computes_available):
-            for compute in computes_available:
-                if compute_owner['id'] == compute['id']:
-                    pass
-                else:
-                    vxlan_interface_name = get_vxlan_interface(compute_owner['id'][:8])
-                    dhcp_controller.create_ovs_vxlan_tunnel(vxlan_interface_name, compute_owner['ip_name'])
-                    config_dic['host_threads'][compute['id']].insert_task("new-vxlan",
-                                                                          vxlan_interface_name,
-                                                                          compute_owner['ip_name'])
-
-
-def delete_vxlan_mesh(host_id):
-    """
-    Create a task for remove a specific compute of the vlxan mesh
-    :param host_id: host id to be deleted.
-    """
-    existing_hosts = get_hosts()
-    computes_available = existing_hosts['hosts']
-    #
-    vxlan_interface_name = get_vxlan_interface(host_id[:8])
-
-    http_controller = config_dic['http_threads'][threading.current_thread().name]
-    dhcp_host = http_controller.ovim.get_dhcp_controller()
-
-    dhcp_host.delete_ovs_vxlan_tunnel(vxlan_interface_name)
-    # remove bridge from openvim controller if no more computes exist
-    if len(existing_hosts):
-        dhcp_host.delete_ovs_bridge()
-    # Remove vxlan mesh
-    for compute in computes_available:
-        if host_id == compute['id']:
-            pass
-        else:
-            dhcp_host.delete_ovs_vxlan_tunnel(vxlan_interface_name)
-            config_dic['host_threads'][compute['id']].insert_task("del-vxlan", vxlan_interface_name)
-
-
-def get_vxlan_interface(local_uuid):
-    """
-    Genearte a vxlan interface name
-    :param local_uuid: host id
-    :return: vlxan-8digits
-    """
-    return 'vxlan-' + local_uuid[:8]
-
-
-@bottle.route(url_base + '/hosts/<host_id>', method='PUT')
-def http_put_host_id(host_id):
-    '''modify a host into the database. All resources are got and inserted'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check permissions
-    if not my.admin:
-        bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
-    
-    #parse input data
-    http_content = format_in( host_edit_schema )
-    r = remove_extra_items(http_content, host_edit_schema)
-    if r is not None: print "http_post_host_id: Warning: remove extra items ", r
-    change_keys_http2db(http_content['host'], http2db_host)
-
-    #insert in data base
-    result, content = my.db.edit_host(host_id, http_content['host'])
-    if result >= 0:
-        convert_boolean(content, ('admin_state_up',) )
-        change_keys_http2db(content, http2db_host, reverse=True)
-        data={'host' : content}
-
-        if config_dic['network_type'] == 'ovs':
-            delete_vxlan_mesh(host_id)
-            config_dic['host_threads'][host_id].insert_task("del-ovsbridge")
-
-        #reload thread
-        config_dic['host_threads'][host_id].name = content.get('name',content['ip_name'])
-        config_dic['host_threads'][host_id].user = content['user']
-        config_dic['host_threads'][host_id].host = content['ip_name']
-        config_dic['host_threads'][host_id].insert_task("reload")
-
-        if config_dic['network_type'] == 'ovs':
-            # create mesh with new host data
-            config_dic['host_threads'][host_id].insert_task("new-ovsbridge")
-            create_vxlan_mesh(host_id)
-
-        #print data
-        return format_out(data)
-    else:
-        bottle.abort(HTTP_Bad_Request, content)
-        return
-
-
-
-@bottle.route(url_base + '/hosts/<host_id>', method='DELETE')
-def http_delete_host_id(host_id):
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check permissions
-    if not my.admin:
-        bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
-    result, content = my.db.delete_row('hosts', host_id)
-    if result == 0:
-        bottle.abort(HTTP_Not_Found, content)
-    elif result > 0:
-        if config_dic['network_type'] == 'ovs':
-            delete_vxlan_mesh(host_id)
-        # terminate thread
-        if host_id in config_dic['host_threads']:
-            if config_dic['network_type'] == 'ovs':
-                config_dic['host_threads'][host_id].insert_task("del-ovsbridge")
-            config_dic['host_threads'][host_id].insert_task("exit")
-        #return data
-        data={'result' : content}
-        return format_out(data)
-    else:
-        print "http_delete_host_id error",result, content
-        bottle.abort(-result, content)
-        return
-#
-# TENANTS
-#
-
-
-@bottle.route(url_base + '/tenants', method='GET')
-def http_get_tenants():
-    """
-    Retreive tenant list from DB
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        select_, where_, limit_ = filter_query_string(bottle.request.query, http2db_tenant,
-                                                      ('id', 'name', 'description', 'enabled'))
-        tenants = my.ovim.get_tenants(select_, where_)
-        delete_nulls(tenants)
-        change_keys_http2db(tenants, http2db_tenant, reverse=True)
-        data = {'tenants': tenants}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/tenants/<tenant_id>', method='GET')
-def http_get_tenant_id(tenant_id):
-    """
-    Get tenant from DB by id
-    :param tenant_id: tenant id
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        tenant = my.ovim.show_tenant_id(tenant_id)
-        delete_nulls(tenant)
-        change_keys_http2db(tenant, http2db_tenant, reverse=True)
-        data = {'tenant': tenant}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/tenants', method='POST')
-def http_post_tenants():
-    """
-    Insert a tenant into the database.
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        http_content = format_in(tenant_new_schema)
-        r = remove_extra_items(http_content, tenant_new_schema)
-        if r is not None:
-            my.logger.error("http_post_tenants: Warning: remove extra items " + str(r), exc_info=True)
-        # insert in data base
-        tenant_id = my.ovim.new_tentant(http_content['tenant'])
-        tenant = my.ovim.show_tenant_id(tenant_id)
-        change_keys_http2db(tenant, http2db_tenant, reverse=True)
-        delete_nulls(tenant)
-        data = {'tenant': tenant}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-    
-@bottle.route(url_base + '/tenants/<tenant_id>', method='PUT')
-def http_put_tenant_id(tenant_id):
-    """
-    Update a tenantinto DB.
-    :param tenant_id: tentant id
-    :return:
-    """
-
-    my = config_dic['http_threads'][threading.current_thread().name]
-    try:
-        # parse input data
-        http_content = format_in(tenant_edit_schema)
-        r = remove_extra_items(http_content, tenant_edit_schema)
-        if r is not None:
-            print "http_put_tenant_id: Warning: remove extra items ", r
-        change_keys_http2db(http_content['tenant'], http2db_tenant)
-        # insert in data base
-        my.ovim.edit_tenant(tenant_id, http_content['tenant'])
-        tenant = my.ovim.show_tenant_id(tenant_id)
-        change_keys_http2db(tenant, http2db_tenant, reverse=True)
-        data = {'tenant': tenant}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/tenants/<tenant_id>', method='DELETE')
-def http_delete_tenant_id(tenant_id):
-    """
-    Delete a tenant from the database.
-    :param tenant_id: tenant id
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        content = my.ovim.delete_tentant(tenant_id)
-        data = {'result': content}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-#
-# FLAVORS
-#
-
-
-@bottle.route(url_base + '/<tenant_id>/flavors', method='GET')
-def http_get_flavors(tenant_id):
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-    #obtain data
-    select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_flavor,
-            ('id','name','description','public') )
-    if tenant_id=='any':
-        from_  ='flavors'
-    else:
-        from_  ='tenants_flavors inner join flavors on tenants_flavors.flavor_id=flavors.uuid'
-        where_['tenant_id'] = tenant_id
-    result, content = my.db.get_table(FROM=from_, SELECT=select_, WHERE=where_, LIMIT=limit_)
-    if result < 0:
-        print "http_get_flavors Error", content
-        bottle.abort(-result, content)
-    else:
-        change_keys_http2db(content, http2db_flavor, reverse=True)
-        for row in content:
-            row['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'flavors', str(row['id']) ) ), 'rel':'bookmark' } ]
-        data={'flavors' : content}
-        return format_out(data)
-
-@bottle.route(url_base + '/<tenant_id>/flavors/<flavor_id>', method='GET')
-def http_get_flavor_id(tenant_id, flavor_id):
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-    #obtain data
-    select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_flavor,
-            ('id','name','description','ram', 'vcpus', 'extended', 'disk', 'public') )
-    if tenant_id=='any':
-        from_  ='flavors'
-    else:
-        from_  ='tenants_flavors as tf inner join flavors as f on tf.flavor_id=f.uuid'
-        where_['tenant_id'] = tenant_id
-    where_['uuid'] = flavor_id
-    result, content = my.db.get_table(SELECT=select_, FROM=from_, WHERE=where_, LIMIT=limit_)
-
-    if result < 0:
-        print "http_get_flavor_id error %d %s" % (result, content)
-        bottle.abort(-result, content)
-    elif result==0:
-        print "http_get_flavors_id flavor '%s' not found" % str(flavor_id)
-        bottle.abort(HTTP_Not_Found, 'flavor %s not found' % flavor_id)
-    else:
-        change_keys_http2db(content, http2db_flavor, reverse=True)
-        if 'extended' in content[0] and content[0]['extended'] is not None:
-            extended = json.loads(content[0]['extended'])
-            if 'devices' in extended: 
-                change_keys_http2db(extended['devices'], http2db_flavor, reverse=True)
-            content[0]['extended']=extended
-        convert_bandwidth(content[0], reverse=True)
-        content[0]['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'flavors', str(content[0]['id']) ) ), 'rel':'bookmark' } ]
-        data={'flavor' : content[0]}
-        #data['tenants_links'] = dict([('tenant', row['id']) for row in content])
-        return format_out(data)
-
-
-@bottle.route(url_base + '/<tenant_id>/flavors', method='POST')
-def http_post_flavors(tenant_id):
-    '''insert a flavor into the database, and attach to tenant.'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-    http_content = format_in( flavor_new_schema )
-    r = remove_extra_items(http_content, flavor_new_schema)
-    if r is not None: print "http_post_flavors: Warning: remove extra items ", r
-    change_keys_http2db(http_content['flavor'], http2db_flavor)
-    extended_dict = http_content['flavor'].pop('extended', None)
-    if extended_dict is not None: 
-        result, content = check_extended(extended_dict)
-        if result<0:
-            print "http_post_flavors wrong input extended error %d %s" % (result, content)
-            bottle.abort(-result, content)
-            return
-        convert_bandwidth(extended_dict)
-        if 'devices' in extended_dict: change_keys_http2db(extended_dict['devices'], http2db_flavor)
-        http_content['flavor']['extended'] = json.dumps(extended_dict)
-    #insert in data base
-    result, content = my.db.new_flavor(http_content['flavor'], tenant_id)
-    if result >= 0:
-        return http_get_flavor_id(tenant_id, content)
-    else:
-        print "http_psot_flavors error %d %s" % (result, content)
-        bottle.abort(-result, content)
-        return
-    
-@bottle.route(url_base + '/<tenant_id>/flavors/<flavor_id>', method='DELETE')
-def http_delete_flavor_id(tenant_id, flavor_id):
-    '''Deletes the flavor_id of a tenant. IT removes from tenants_flavors table.'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-        return
-    result, content = my.db.delete_image_flavor('flavor', flavor_id, tenant_id)
-    if result == 0:
-        bottle.abort(HTTP_Not_Found, content)
-    elif result >0:
-        data={'result' : content}
-        return format_out(data)
-    else:
-        print "http_delete_flavor_id error",result, content
-        bottle.abort(-result, content)
-        return
-
-@bottle.route(url_base + '/<tenant_id>/flavors/<flavor_id>/<action>', method='POST')
-def http_attach_detach_flavors(tenant_id, flavor_id, action):
-    '''attach/detach an existing flavor in this tenant. That is insert/remove at tenants_flavors table.'''
-    #TODO alf:  not tested at all!!!
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-    if tenant_id=='any':
-        bottle.abort(HTTP_Bad_Request, "Invalid tenant 'any' with this command")
-    #check valid action
-    if action!='attach' and action != 'detach':
-        bottle.abort(HTTP_Method_Not_Allowed, "actions can be attach or detach")
-        return
-
-    #Ensure that flavor exist 
-    from_  ='tenants_flavors as tf right join flavors as f on tf.flavor_id=f.uuid'
-    where_={'uuid': flavor_id}
-    result, content = my.db.get_table(SELECT=('public','tenant_id'), FROM=from_, WHERE=where_)
-    if result==0:
-        if action=='attach':
-            text_error="Flavor '%s' not found" % flavor_id
-        else:
-            text_error="Flavor '%s' not found for tenant '%s'" % (flavor_id, tenant_id)
-        bottle.abort(HTTP_Not_Found, text_error)
-        return
-    elif result>0:
-        flavor=content[0]
-        if action=='attach':
-            if flavor['tenant_id']!=None:
-                bottle.abort(HTTP_Conflict, "Flavor '%s' already attached to tenant '%s'" % (flavor_id, tenant_id))
-            if flavor['public']=='no' and not my.admin:
-                #allow only attaching public flavors
-                bottle.abort(HTTP_Unauthorized, "Needed admin rights to attach a private flavor")
-                return
-            #insert in data base
-            result, content = my.db.new_row('tenants_flavors', {'flavor_id':flavor_id, 'tenant_id': tenant_id})
-            if result >= 0:
-                return http_get_flavor_id(tenant_id, flavor_id)
-        else: #detach
-            if flavor['tenant_id']==None:
-                bottle.abort(HTTP_Not_Found, "Flavor '%s' not attached to tenant '%s'" % (flavor_id, tenant_id))
-            result, content = my.db.delete_row_by_dict(FROM='tenants_flavors', WHERE={'flavor_id':flavor_id, 'tenant_id':tenant_id})
-            if result>=0:
-                if flavor['public']=='no':
-                    #try to delete the flavor completely to avoid orphan flavors, IGNORE error
-                    my.db.delete_row_by_dict(FROM='flavors', WHERE={'uuid':flavor_id})
-                data={'result' : "flavor detached"}
-                return format_out(data)
-    
-    #if get here is because an error
-    print "http_attach_detach_flavors error %d %s" % (result, content)
-    bottle.abort(-result, content)
-    return
-
-@bottle.route(url_base + '/<tenant_id>/flavors/<flavor_id>', method='PUT')
-def http_put_flavor_id(tenant_id, flavor_id):
-    '''update a flavor_id into the database.'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-    #parse input data
-    http_content = format_in( flavor_update_schema )
-    r = remove_extra_items(http_content, flavor_update_schema)
-    if r is not None: print "http_put_flavor_id: Warning: remove extra items ", r
-    change_keys_http2db(http_content['flavor'], http2db_flavor)
-    extended_dict = http_content['flavor'].pop('extended', None)
-    if extended_dict is not None: 
-        result, content = check_extended(extended_dict)
-        if result<0:
-            print "http_put_flavor_id wrong input extended error %d %s" % (result, content)
-            bottle.abort(-result, content)
-            return
-        convert_bandwidth(extended_dict)
-        if 'devices' in extended_dict: change_keys_http2db(extended_dict['devices'], http2db_flavor)
-        http_content['flavor']['extended'] = json.dumps(extended_dict)
-    #Ensure that flavor exist 
-    where_={'uuid': flavor_id}
-    if tenant_id=='any':
-        from_  ='flavors'
-    else:
-        from_  ='tenants_flavors as ti inner join flavors as i on ti.flavor_id=i.uuid'
-        where_['tenant_id'] = tenant_id
-    result, content = my.db.get_table(SELECT=('public',), FROM=from_, WHERE=where_)
-    if result==0:
-        text_error="Flavor '%s' not found" % flavor_id
-        if tenant_id!='any':
-            text_error +=" for tenant '%s'" % flavor_id
-        bottle.abort(HTTP_Not_Found, text_error)
-        return
-    elif result>0:
-        if content[0]['public']=='yes' and not my.admin:
-            #allow only modifications over private flavors
-            bottle.abort(HTTP_Unauthorized, "Needed admin rights to edit a public flavor")
-            return
-        #insert in data base
-        result, content = my.db.update_rows('flavors', http_content['flavor'], {'uuid': flavor_id})
-
-    if result < 0:
-        print "http_put_flavor_id error %d %s" % (result, content)
-        bottle.abort(-result, content)
-        return
-    else:
-        return http_get_flavor_id(tenant_id, flavor_id)
-
-
-
-#
-# IMAGES
-#
-
-@bottle.route(url_base + '/<tenant_id>/images', method='GET')
-def http_get_images(tenant_id):
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-    #obtain data
-    select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_image,
-            ('id','name','checksum','description','path','public') )
-    if tenant_id=='any':
-        from_  ='images'
-        where_or_ = None
-    else:
-        from_  ='tenants_images right join images on tenants_images.image_id=images.uuid'
-        where_or_ = {'tenant_id': tenant_id, 'public': 'yes'}
-    result, content = my.db.get_table(SELECT=select_, DISTINCT=True, FROM=from_, WHERE=where_, WHERE_OR=where_or_, WHERE_AND_OR="AND", LIMIT=limit_)
-    if result < 0:
-        print "http_get_images Error", content
-        bottle.abort(-result, content)
-    else:
-        change_keys_http2db(content, http2db_image, reverse=True)
-        #for row in content: row['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'images', str(row['id']) ) ), 'rel':'bookmark' } ]
-        data={'images' : content}
-        return format_out(data)
-
-@bottle.route(url_base + '/<tenant_id>/images/<image_id>', method='GET')
-def http_get_image_id(tenant_id, image_id):
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-    #obtain data
-    select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_image,
-            ('id','name','checksum','description','progress', 'status','path', 'created', 'updated','public') )
-    if tenant_id=='any':
-        from_  ='images'
-        where_or_ = None
-    else:
-        from_  ='tenants_images as ti right join images as i on ti.image_id=i.uuid'
-        where_or_ = {'tenant_id': tenant_id, 'public': "yes"}
-    where_['uuid'] = image_id
-    result, content = my.db.get_table(SELECT=select_, DISTINCT=True, FROM=from_, WHERE=where_, WHERE_OR=where_or_, WHERE_AND_OR="AND", LIMIT=limit_)
-
-    if result < 0:
-        print "http_get_images error %d %s" % (result, content)
-        bottle.abort(-result, content)
-    elif result==0:
-        print "http_get_images image '%s' not found" % str(image_id)
-        bottle.abort(HTTP_Not_Found, 'image %s not found' % image_id)
-    else:
-        convert_datetime2str(content)
-        change_keys_http2db(content, http2db_image, reverse=True)
-        if 'metadata' in content[0] and content[0]['metadata'] is not None:
-            metadata = json.loads(content[0]['metadata'])
-            content[0]['metadata']=metadata
-        content[0]['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'images', str(content[0]['id']) ) ), 'rel':'bookmark' } ]
-        data={'image' : content[0]}
-        #data['tenants_links'] = dict([('tenant', row['id']) for row in content])
-        return format_out(data)
-
-@bottle.route(url_base + '/<tenant_id>/images', method='POST')
-def http_post_images(tenant_id):
-    '''insert a image into the database, and attach to tenant.'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-    http_content = format_in(image_new_schema)
-    r = remove_extra_items(http_content, image_new_schema)
-    if r is not None: print "http_post_images: Warning: remove extra items ", r
-    change_keys_http2db(http_content['image'], http2db_image)
-    metadata_dict = http_content['image'].pop('metadata', None)
-    if metadata_dict is not None: 
-        http_content['image']['metadata'] = json.dumps(metadata_dict)
-    #calculate checksum
-    try:
-        image_file = http_content['image'].get('path',None)
-        parsed_url = urlparse.urlparse(image_file)
-        if parsed_url.scheme == "" and parsed_url.netloc == "":
-            # The path is a local file
-            if os.path.exists(image_file):
-                http_content['image']['checksum'] = md5(image_file)
-        else:
-            # The path is a URL. Code should be added to download the image and calculate the checksum
-            #http_content['image']['checksum'] = md5(downloaded_image)
-            pass
-        # Finally, only if we are in test mode and checksum has not been calculated, we calculate it from the path
-        host_test_mode = True if config_dic['mode']=='test' or config_dic['mode']=="OF only" else False
-        if host_test_mode:
-            if 'checksum' not in http_content['image']:
-                http_content['image']['checksum'] = md5_string(image_file)
-        else:
-            # At this point, if the path is a local file and no chechsum has been obtained yet, an error is sent back.
-            # If it is a URL, no error is sent. Checksum will be an empty string
-            if parsed_url.scheme == "" and parsed_url.netloc == "" and 'checksum' not in http_content['image']:
-                content = "Image file not found"
-                print "http_post_images error: %d %s" % (HTTP_Bad_Request, content)
-                bottle.abort(HTTP_Bad_Request, content)
-    except Exception as e:
-        print "ERROR. Unexpected exception: %s" % (str(e))
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
-    #insert in data base
-    result, content = my.db.new_image(http_content['image'], tenant_id)
-    if result >= 0:
-        return http_get_image_id(tenant_id, content)
-    else:
-        print "http_post_images error %d %s" % (result, content)
-        bottle.abort(-result, content)
-        return
-    
-@bottle.route(url_base + '/<tenant_id>/images/<image_id>', method='DELETE')
-def http_delete_image_id(tenant_id, image_id):
-    '''Deletes the image_id of a tenant. IT removes from tenants_images table.'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-    result, content = my.db.delete_image_flavor('image', image_id, tenant_id)
-    if result == 0:
-        bottle.abort(HTTP_Not_Found, content)
-    elif result >0:
-        data={'result' : content}
-        return format_out(data)
-    else:
-        print "http_delete_image_id error",result, content
-        bottle.abort(-result, content)
-        return
-
-@bottle.route(url_base + '/<tenant_id>/images/<image_id>/<action>', method='POST')
-def http_attach_detach_images(tenant_id, image_id, action):
-    '''attach/detach an existing image in this tenant. That is insert/remove at tenants_images table.'''
-    #TODO alf:  not tested at all!!!
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-    if tenant_id=='any':
-        bottle.abort(HTTP_Bad_Request, "Invalid tenant 'any' with this command")
-    #check valid action
-    if action!='attach' and action != 'detach':
-        bottle.abort(HTTP_Method_Not_Allowed, "actions can be attach or detach")
-        return
-
-    #Ensure that image exist 
-    from_  ='tenants_images as ti right join images as i on ti.image_id=i.uuid'
-    where_={'uuid': image_id}
-    result, content = my.db.get_table(SELECT=('public','tenant_id'), FROM=from_, WHERE=where_)
-    if result==0:
-        if action=='attach':
-            text_error="Image '%s' not found" % image_id
-        else:
-            text_error="Image '%s' not found for tenant '%s'" % (image_id, tenant_id)
-        bottle.abort(HTTP_Not_Found, text_error)
-        return
-    elif result>0:
-        image=content[0]
-        if action=='attach':
-            if image['tenant_id']!=None:
-                bottle.abort(HTTP_Conflict, "Image '%s' already attached to tenant '%s'" % (image_id, tenant_id))
-            if image['public']=='no' and not my.admin:
-                #allow only attaching public images
-                bottle.abort(HTTP_Unauthorized, "Needed admin rights to attach a private image")
-                return
-            #insert in data base
-            result, content = my.db.new_row('tenants_images', {'image_id':image_id, 'tenant_id': tenant_id})
-            if result >= 0:
-                return http_get_image_id(tenant_id, image_id)
-        else: #detach
-            if image['tenant_id']==None:
-                bottle.abort(HTTP_Not_Found, "Image '%s' not attached to tenant '%s'" % (image_id, tenant_id))
-            result, content = my.db.delete_row_by_dict(FROM='tenants_images', WHERE={'image_id':image_id, 'tenant_id':tenant_id})
-            if result>=0:
-                if image['public']=='no':
-                    #try to delete the image completely to avoid orphan images, IGNORE error
-                    my.db.delete_row_by_dict(FROM='images', WHERE={'uuid':image_id})
-                data={'result' : "image detached"}
-                return format_out(data)
-    
-    #if get here is because an error
-    print "http_attach_detach_images error %d %s" % (result, content)
-    bottle.abort(-result, content)
-    return
-
-@bottle.route(url_base + '/<tenant_id>/images/<image_id>', method='PUT')
-def http_put_image_id(tenant_id, image_id):
-    '''update a image_id into the database.'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-    #parse input data
-    http_content = format_in( image_update_schema )
-    r = remove_extra_items(http_content, image_update_schema)
-    if r is not None: print "http_put_image_id: Warning: remove extra items ", r
-    change_keys_http2db(http_content['image'], http2db_image)
-    metadata_dict = http_content['image'].pop('metadata', None)
-    if metadata_dict is not None: 
-        http_content['image']['metadata'] = json.dumps(metadata_dict)
-    #Ensure that image exist 
-    where_={'uuid': image_id}
-    if tenant_id=='any':
-        from_  ='images'
-        where_or_ = None
-    else:
-        from_  ='tenants_images as ti right join images as i on ti.image_id=i.uuid'
-        where_or_ = {'tenant_id': tenant_id, 'public': 'yes'}
-    result, content = my.db.get_table(SELECT=('public',), DISTINCT=True, FROM=from_, WHERE=where_, WHERE_OR=where_or_, WHERE_AND_OR="AND")
-    if result==0:
-        text_error="Image '%s' not found" % image_id
-        if tenant_id!='any':
-            text_error +=" for tenant '%s'" % image_id
-        bottle.abort(HTTP_Not_Found, text_error)
-        return
-    elif result>0:
-        if content[0]['public']=='yes' and not my.admin:
-            #allow only modifications over private images
-            bottle.abort(HTTP_Unauthorized, "Needed admin rights to edit a public image")
-            return
-        #insert in data base
-        result, content = my.db.update_rows('images', http_content['image'], {'uuid': image_id})
-
-    if result < 0:
-        print "http_put_image_id error %d %s" % (result, content)
-        bottle.abort(-result, content)
-        return
-    else:
-        return http_get_image_id(tenant_id, image_id)
-
-
-#
-# SERVERS
-#
-
-@bottle.route(url_base + '/<tenant_id>/servers', method='GET')
-def http_get_servers(tenant_id):
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-        return
-    #obtain data
-    select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_server,
-            ('id','name','description','hostId','imageRef','flavorRef','status', 'tenant_id') )
-    if tenant_id!='any':
-        where_['tenant_id'] = tenant_id
-    result, content = my.db.get_table(SELECT=select_, FROM='instances', WHERE=where_, LIMIT=limit_)
-    if result < 0:
-        print "http_get_servers Error", content
-        bottle.abort(-result, content)
-    else:
-        change_keys_http2db(content, http2db_server, reverse=True)
-        for row in content:
-            tenant_id = row.pop('tenant_id')
-            row['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'servers', str(row['id']) ) ), 'rel':'bookmark' } ]
-        data={'servers' : content}
-        return format_out(data)
-
-@bottle.route(url_base + '/<tenant_id>/servers/<server_id>', method='GET')
-def http_get_server_id(tenant_id, server_id):
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-        return
-    #obtain data
-    result, content = my.db.get_instance(server_id)
-    if result == 0:
-        bottle.abort(HTTP_Not_Found, content)
-    elif result >0:
-        #change image/flavor-id to id and link
-        convert_bandwidth(content, reverse=True)
-        convert_datetime2str(content)
-        if content["ram"]==0 : del content["ram"]
-        if content["vcpus"]==0 : del content["vcpus"]
-        if 'flavor_id' in content:
-            if content['flavor_id'] is not None:
-                content['flavor'] = {'id':content['flavor_id'], 
-                                     'links':[{'href':  "/".join( (my.url_preffix, content['tenant_id'], 'flavors', str(content['flavor_id']) ) ), 'rel':'bookmark'}] 
-                                }
-            del content['flavor_id']
-        if 'image_id' in content:
-            if content['image_id'] is not None:
-                content['image'] = {'id':content['image_id'], 
-                                    'links':[{'href':  "/".join( (my.url_preffix, content['tenant_id'], 'images', str(content['image_id']) ) ), 'rel':'bookmark'}]
-                                }
-            del content['image_id']
-        change_keys_http2db(content, http2db_server, reverse=True)
-        if 'extended' in content:
-            if 'devices' in content['extended']: change_keys_http2db(content['extended']['devices'], http2db_server, reverse=True)
-            
-        data={'server' : content}
-        return format_out(data)
-    else:
-        bottle.abort(-result, content)
-        return
-
-@bottle.route(url_base + '/<tenant_id>/servers', method='POST')
-def http_post_server_id(tenant_id):
-    '''deploys a new server'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-        return
-    if tenant_id=='any':
-        bottle.abort(HTTP_Bad_Request, "Invalid tenant 'any' with this command")
-    #chek input
-    http_content = format_in( server_new_schema )
-    r = remove_extra_items(http_content, server_new_schema)
-    if r is not None: print "http_post_serves: Warning: remove extra items ", r
-    change_keys_http2db(http_content['server'], http2db_server)
-    extended_dict = http_content['server'].get('extended', None)
-    if extended_dict is not None:
-        result, content = check_extended(extended_dict, True)
-        if result<0:
-            print "http_post_servers wrong input extended error %d %s" % (result, content)
-            bottle.abort(-result, content)
-            return
-        convert_bandwidth(extended_dict)
-        if 'devices' in extended_dict: change_keys_http2db(extended_dict['devices'], http2db_server)
-
-    server = http_content['server']
-    server_start = server.get('start', 'yes')
-    server['tenant_id'] = tenant_id
-    #check flavor valid and take info
-    result, content = my.db.get_table(FROM='tenants_flavors as tf join flavors as f on tf.flavor_id=f.uuid',
-             SELECT=('ram','vcpus','extended'), WHERE={'uuid':server['flavor_id'], 'tenant_id':tenant_id})
-    if result<=0:
-        bottle.abort(HTTP_Not_Found, 'flavor_id %s not found' % server['flavor_id'])
-        return
-    server['flavor']=content[0]
-    #check image valid and take info
-    result, content = my.db.get_table(FROM='tenants_images as ti right join images as i on ti.image_id=i.uuid',
-                                      SELECT=('path', 'metadata', 'image_id'),
-                                      WHERE={'uuid':server['image_id'], "status":"ACTIVE"},
-                                      WHERE_OR={'tenant_id':tenant_id, 'public': 'yes'},
-                                      WHERE_AND_OR="AND",
-                                      DISTINCT=True)
-    if result<=0:
-        bottle.abort(HTTP_Not_Found, 'image_id %s not found or not ACTIVE' % server['image_id'])
-        return
-    for image_dict in content:
-        if image_dict.get("image_id"):
-            break
-    else:
-        # insert in data base tenants_images
-        r2, c2 = my.db.new_row('tenants_images', {'image_id': server['image_id'], 'tenant_id': tenant_id})
-        if r2<=0:
-            bottle.abort(HTTP_Not_Found, 'image_id %s cannot be used. Error %s' % (server['image_id'], c2))
-            return
-    server['image']={"path": content[0]["path"], "metadata": content[0]["metadata"]}
-    if "hosts_id" in server:
-        result, content = my.db.get_table(FROM='hosts', SELECT=('uuid',), WHERE={'uuid': server['host_id']})
-        if result<=0:
-            bottle.abort(HTTP_Not_Found, 'hostId %s not found' % server['host_id'])
-            return
-    #print json.dumps(server, indent=4)
-     
-    result, content = ht.create_server(server, config_dic['db'], config_dic['db_lock'], config_dic['mode']=='normal')
-
-    if result >= 0:
-    #Insert instance to database
-        nets=[]
-        print
-        print "inserting at DB"
-        print
-        if server_start == 'no':
-            content['status'] = 'INACTIVE'
-        dhcp_nets_id = []
-        for net in http_content['server']['networks']:
-            if net['type'] == 'instance:ovs':
-                dhcp_nets_id.append(get_network_id(net['net_id']))
-
-        ports_to_free=[]
-        new_instance_result, new_instance = my.db.new_instance(content, nets, ports_to_free)
-        if new_instance_result < 0:
-            print "Error http_post_servers() :", new_instance_result, new_instance
-            bottle.abort(-new_instance_result, new_instance)
-            return
-        print
-        print "inserted at DB"
-        print
-        for port in ports_to_free:
-            r,c = config_dic['host_threads'][ server['host_id'] ].insert_task( 'restore-iface',*port )
-            if r < 0:
-                print ' http_post_servers ERROR RESTORE IFACE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' +  c
-        # update nets
-        for net_id in nets:
-            try:
-                my.ovim.net_update_ofc_thread(net_id)
-            except ovim.ovimException as e:
-                my.logger.error("http_post_servers, Error updating network with id '{}', '{}'".format(net_id, str(e)))
-
-        # look for dhcp ip address
-        r2, c2 = my.db.get_table(FROM="ports", SELECT=["mac", "ip_address", "net_id"], WHERE={"instance_id": new_instance})
-        if r2 >0:
-            for iface in c2:
-                if config_dic.get("dhcp_server") and iface["net_id"] in config_dic["dhcp_nets"]:
-                    #print "dhcp insert add task"
-                    r,c = config_dic['dhcp_thread'].insert_task("add", iface["mac"])
-                    if r < 0:
-                        print ':http_post_servers ERROR UPDATING dhcp_server !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' +  c
-
-                #ensure compute contain the bridge for ovs networks:
-                server_net = get_network_id(iface['net_id'])
-                if server_net["network"].get('provider:physical', "")[:3] == 'OVS':
-                    vlan = str(server_net['network']['provider:vlan'])
-                    dhcp_enable = bool(server_net['network']['enable_dhcp'])
-                    if dhcp_enable:
-                        dhcp_firt_ip = str(server_net['network']['dhcp_first_ip'])
-                        dhcp_last_ip = str(server_net['network']['dhcp_last_ip'])
-                        dhcp_cidr = str(server_net['network']['cidr'])
-                        gateway = str(server_net['network']['gateway'])
-                        vm_dhcp_ip = c2[0]["ip_address"]
-                        config_dic['host_threads'][server['host_id']].insert_task("create-ovs-bridge-port", vlan)
-
-                        set_mac_dhcp(vm_dhcp_ip, vlan, dhcp_firt_ip, dhcp_last_ip, dhcp_cidr, c2[0]['mac'])
-                        http_controller = config_dic['http_threads'][threading.current_thread().name]
-                        http_controller.ovim.launch_dhcp_server(vlan, dhcp_firt_ip, dhcp_last_ip, dhcp_cidr, gateway)
-
-        #Start server
-        server['uuid'] = new_instance
-        server_start = server.get('start', 'yes')
-
-        if server_start != 'no':
-            server['paused'] = True if server_start == 'paused' else False
-            server['action'] = {"start":None}
-            server['status'] = "CREATING"
-            #Program task
-            r,c = config_dic['host_threads'][ server['host_id'] ].insert_task( 'instance',server )
-            if r<0:
-                my.db.update_rows('instances', {'status':"ERROR"}, {'uuid':server['uuid'], 'last_error':c}, log=True)
-        
-        return http_get_server_id(tenant_id, new_instance)
-    else:
-        bottle.abort(HTTP_Bad_Request, content)
-        return
-
-def http_server_action(server_id, tenant_id, action):
-    '''Perform actions over a server as resume, reboot, terminate, ...'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    server={"uuid": server_id, "action":action}
-    where={'uuid': server_id}
-    if tenant_id!='any':
-        where['tenant_id']= tenant_id
-    result, content = my.db.get_table(FROM='instances', WHERE=where)
-    if result == 0:
-        bottle.abort(HTTP_Not_Found, "server %s not found" % server_id)
-        return
-    if result < 0:
-        print "http_post_server_action error getting data %d %s" % (result, content)
-        bottle.abort(HTTP_Internal_Server_Error, content)
-        return
-    server.update(content[0])
-    tenant_id = server["tenant_id"]
-
-    #TODO check a right content
-    new_status = None
-    if 'terminate' in action:
-        new_status='DELETING'
-    elif server['status'] == 'ERROR': #or server['status'] == 'CREATING':
-        if 'terminate' not in action and 'rebuild' not in action:
-            bottle.abort(HTTP_Method_Not_Allowed, "Server is in ERROR status, must be rebuit or deleted ")
-            return
-#     elif server['status'] == 'INACTIVE':
-#         if 'start' not in action and 'createImage' not in action:
-#             bottle.abort(HTTP_Method_Not_Allowed, "The only possible action over an instance in 'INACTIVE' status is 'start'")
-#             return
-#         if 'start' in action:
-#             new_status='CREATING'
-#             server['paused']='no'
-#     elif server['status'] == 'PAUSED':
-#         if 'resume' not in action:
-#             bottle.abort(HTTP_Method_Not_Allowed, "The only possible action over an instance in 'PAUSED' status is 'resume'")
-#             return
-#     elif server['status'] == 'ACTIVE':
-#         if 'pause' not in action and 'reboot'not in action and 'shutoff'not in action:
-#             bottle.abort(HTTP_Method_Not_Allowed, "The only possible action over an instance in 'ACTIVE' status is 'pause','reboot' or 'shutoff'")
-#             return
-
-    if 'start' in action or 'createImage' in action or 'rebuild' in action:
-        #check image valid and take info
-        image_id = server['image_id']
-        if 'createImage' in action:
-            if 'imageRef' in action['createImage']:
-                image_id = action['createImage']['imageRef']
-            elif 'disk' in action['createImage']:
-                result, content = my.db.get_table(FROM='instance_devices',
-                    SELECT=('image_id','dev'), WHERE={'instance_id':server['uuid'],"type":"disk"})
-                if result<=0:
-                    bottle.abort(HTTP_Not_Found, 'disk not found for server')
-                    return
-                elif result>1:
-                    disk_id=None
-                    if action['createImage']['imageRef']['disk'] != None:
-                        for disk in content:
-                            if disk['dev'] == action['createImage']['imageRef']['disk']:
-                                disk_id = disk['image_id']
-                                break
-                        if disk_id == None:
-                            bottle.abort(HTTP_Not_Found, 'disk %s not found for server' % action['createImage']['imageRef']['disk'])
-                            return
-                    else:
-                        bottle.abort(HTTP_Not_Found, 'more than one disk found for server' )
-                        return
-                    image_id = disk_id    
-                else: #result==1
-                    image_id = content[0]['image_id']    
-                
-        result, content = my.db.get_table(FROM='tenants_images as ti right join images as i on ti.image_id=i.uuid',
-            SELECT=('path','metadata'), WHERE={'uuid':image_id, "status":"ACTIVE"},
-            WHERE_OR={'tenant_id':tenant_id, 'public': 'yes'}, WHERE_AND_OR="AND", DISTINCT=True)
-        if result<=0:
-            bottle.abort(HTTP_Not_Found, 'image_id %s not found or not ACTIVE' % image_id)
-            return
-        if content[0]['metadata'] is not None:
-            try:
-                metadata = json.loads(content[0]['metadata'])
-            except:
-                return -HTTP_Internal_Server_Error, "Can not decode image metadata"
-            content[0]['metadata']=metadata
-        else:
-            content[0]['metadata'] = {}
-        server['image']=content[0]
-        if 'createImage' in action:
-            action['createImage']['source'] = {'image_id': image_id, 'path': content[0]['path']}
-    if 'createImage' in action:
-        #Create an entry in Database for the new image
-        new_image={'status':'BUILD', 'progress': 0 }
-        new_image_metadata=content[0]
-        if 'metadata' in server['image'] and server['image']['metadata'] != None:
-            new_image_metadata.update(server['image']['metadata'])
-        new_image_metadata = {"use_incremental":"no"}
-        if 'metadata' in action['createImage']:
-            new_image_metadata.update(action['createImage']['metadata'])
-        new_image['metadata'] = json.dumps(new_image_metadata)
-        new_image['name'] = action['createImage'].get('name', None)
-        new_image['description'] = action['createImage'].get('description', None)
-        new_image['uuid']=my.db.new_uuid()
-        if 'path' in action['createImage']:
-            new_image['path'] = action['createImage']['path']
-        else:
-            new_image['path']="/provisional/path/" + new_image['uuid']
-        result, image_uuid = my.db.new_image(new_image, tenant_id)
-        if result<=0:
-            bottle.abort(HTTP_Bad_Request, 'Error: ' + image_uuid)
-            return
-        server['new_image'] = new_image
-
-                
-    #Program task
-    r,c = config_dic['host_threads'][ server['host_id'] ].insert_task( 'instance',server )
-    if r<0:
-        print "Task queue full at host ", server['host_id']
-        bottle.abort(HTTP_Request_Timeout, c)
-    if 'createImage' in action and result >= 0:
-        return http_get_image_id(tenant_id, image_uuid)
-    
-    #Update DB only for CREATING or DELETING status
-    data={'result' : 'deleting in process'}
-    warn_text=""
-    if new_status != None and new_status == 'DELETING':
-        nets=[]
-        ports_to_free=[]
-
-        net_ovs_list = []
-        #look for dhcp ip address
-        r2, c2 = my.db.get_table(FROM="ports", SELECT=["mac", "net_id"], WHERE={"instance_id": server_id})
-        r, c = my.db.delete_instance(server_id, tenant_id, nets, ports_to_free, net_ovs_list, "requested by http")
-        for port in ports_to_free:
-            r1,c1 = config_dic['host_threads'][ server['host_id'] ].insert_task( 'restore-iface',*port )
-            if r1 < 0:
-                my.logger.error("http_post_server_action server deletion ERROR at resore-iface!!!! " + c1)
-                warn_text += "; Error iface '{}' cannot be restored '{}'".format(str(port), str(e))
-        for net_id in nets:
-            try:
-                my.ovim.net_update_ofc_thread(net_id)
-            except ovim.ovimException as e:
-                my.logger.error("http_server_action, Error updating network with id '{}', '{}'".format(net_id, str(e)))
-                warn_text += "; Error openflow rules of network '{}' cannot be restore '{}'".format(net_id, str (e))
-
-        # look for dhcp ip address
-        if r2 >0 and config_dic.get("dhcp_server"):
-            for iface in c2:
-                if iface["net_id"] in config_dic["dhcp_nets"]:
-                    r,c = config_dic['dhcp_thread'].insert_task("del", iface["mac"])
-                    #print "dhcp insert del task"
-                    if r < 0:
-                        print ':http_post_servers ERROR UPDATING dhcp_server !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' +  c 
-        # delete ovs-port and linux bridge, contains a list of tuple (net_id,vlan)
-        for net in net_ovs_list:
-            mac = str(net[3])
-            vm_ip = str(net[2])
-            vlan = str(net[1])
-            net_id = net[0]
-            delete_dhcp_ovs_bridge(vlan, net_id)
-            delete_mac_dhcp(vm_ip, vlan, mac)
-            config_dic['host_threads'][server['host_id']].insert_task('del-ovs-port', vlan, net_id)
-    return format_out(data + warn_text)
-
-
-
-@bottle.route(url_base + '/<tenant_id>/servers/<server_id>', method='DELETE')
-def http_delete_server_id(tenant_id, server_id):
-    '''delete a server'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-        return
-
-    return http_server_action(server_id, tenant_id, {"terminate":None} )
-
-    
-@bottle.route(url_base + '/<tenant_id>/servers/<server_id>/action', method='POST')
-def http_post_server_action(tenant_id, server_id):
-    '''take an action over a server'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #check valid tenant_id
-    result,content = check_valid_tenant(my, tenant_id)
-    if result != 0:
-        bottle.abort(result, content)
-        return
-    http_content = format_in( server_action_schema )
-    #r = remove_extra_items(http_content, server_action_schema)
-    #if r is not None: print "http_post_server_action: Warning: remove extra items ", r
-    
-    return http_server_action(server_id, tenant_id, http_content)
-
-#
-# NETWORKS
-#
-
-
-@bottle.route(url_base + '/networks', method='GET')
-def http_get_networks():
-    """
-    Get all networks available
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        # obtain data
-        select_, where_, limit_ = filter_query_string(bottle.request.query, http2db_network,
-                                                      ('id', 'name', 'tenant_id', 'type',
-                                                       'shared', 'provider:vlan', 'status', 'last_error',
-                                                       'admin_state_up', 'provider:physical'))
-        if "tenant_id" in where_:
-            del where_["tenant_id"]
-
-        content = my.ovim.get_networks(select_, where_, limit_)
-
-        delete_nulls(content)
-        change_keys_http2db(content, http2db_network, reverse=True)
-        data = {'networks': content}
-        return format_out(data)
-
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/networks/<network_id>', method='GET')
-def http_get_network_id(network_id):
-    """
-    Get a network data by id
-    :param network_id:
-    :return:
-    """
-    data = get_network_id(network_id)
-    return format_out(data)
-
-
-def get_network_id(network_id):
-    """
-    Get network from DB by id
-    :param network_id: network Id
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        # obtain data
-        where_ = bottle.request.query
-        content = my.ovim.show_network(network_id, where_)
-
-        change_keys_http2db(content, http2db_network, reverse=True)
-        delete_nulls(content)
-        data = {'network': content}
-        return data
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/networks', method='POST')
-def http_post_networks():
-    """
-    Insert a network into the database.
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        # parse input data
-        http_content = format_in(network_new_schema )
-        r = remove_extra_items(http_content, network_new_schema)
-        if r is not None:
-            print "http_post_networks: Warning: remove extra items ", r
-        change_keys_http2db(http_content['network'], http2db_network)
-        network = http_content['network']
-        content = my.ovim.new_network(network)
-        return format_out(get_network_id(content))
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/networks/<network_id>', method='PUT')
-def http_put_network_id(network_id):
-    """
-    Update a network_id into DB.
-    :param network_id: network id
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-    
-    try:
-        # parse input data
-        http_content = format_in(network_update_schema)
-        change_keys_http2db(http_content['network'], http2db_network)
-        network = http_content['network']
-        return format_out(my.ovim.edit_network(network_id, network))
-
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/networks/<network_id>', method='DELETE')
-def http_delete_network_id(network_id):
-    """
-    Delete a network_id from the database.
-    :param network_id: Network id
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        # delete from the data base
-        content = my.ovim.delete_network(network_id)
-        data = {'result': content}
-        return format_out(data)
-
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-#
-# OPENFLOW
-#
-
-
-@bottle.route(url_base + '/openflow/controller', method='GET')
-def http_get_openflow_controller():
-    """
-    Retrieve a openflow controllers list from DB.
-    :return:
-    """
-    # TODO check if show a proper list
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        select_, where_, limit_ = filter_query_string(bottle.request.query, http2db_ofc,
-                                                      ('id', 'name', 'dpid', 'ip', 'port', 'type',
-                                                       'version', 'user', 'password'))
-
-        content = my.ovim.get_of_controllers(select_, where_)
-        delete_nulls(content)
-        change_keys_http2db(content, http2db_ofc, reverse=True)
-        data = {'ofcs': content}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/openflow/controller/<uuid>', method='GET')
-def http_get_openflow_controller_id(uuid):
-    """
-    Get an openflow controller by dpid from DB.get_of_controllers
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-
-        content = my.ovim.show_of_controller(uuid)
-        delete_nulls(content)
-        change_keys_http2db(content, http2db_ofc, reverse=True)
-        data = {'ofc': content}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/openflow/controller/', method='POST')
-def http_post_openflow_controller():
-    """
-    Create a new openflow controller into DB
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        http_content = format_in(openflow_controller_schema)
-        of_c = http_content['ofc']
-        uuid = my.ovim.new_of_controller(of_c)
-        content = my.ovim.show_of_controller(uuid)
-        delete_nulls(content)
-        change_keys_http2db(content, http2db_ofc, reverse=True)
-        data = {'ofc': content}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/openflow/controller/<of_controller_id>', method='PUT')
-def http_put_openflow_controller_by_id(of_controller_id):
-    """
-    Create an openflow controller into DB
-    :param of_controller_id: openflow controller dpid
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        http_content = format_in(openflow_controller_schema)
-        of_c = http_content['ofc']
-
-        content = my.ovim.edit_of_controller(of_controller_id, of_c)
-        delete_nulls(content)
-        change_keys_http2db(content, http2db_ofc, reverse=True)
-        data = {'ofc': content}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/openflow/controller/<of_controller_id>', method='DELETE')
-def http_delete_openflow_controller(of_controller_id):
-    """
-    Delete  an openflow controller from DB.
-    :param of_controller_id: openflow controller dpid
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        content = my.ovim.delete_of_controller(of_controller_id)
-        data = {'result': content}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/networks/<network_id>/openflow', method='GET')
-def http_get_openflow_id(network_id):
-    """
-    To obtain the list of openflow rules of a network
-    :param network_id: network id
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    # ignore input data
-    if network_id == 'all':
-        network_id = None
-    try:
-        content = my.ovim.get_openflow_rules(network_id)
-        data = {'openflow-rules': content}
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-    return format_out(data)
-
-
-@bottle.route(url_base + '/networks/<network_id>/openflow', method='PUT')
-def http_put_openflow_id(network_id):
-    """
-    To make actions over the net. The action is to reinstall the openflow rules
-    network_id can be 'all'
-    :param network_id: network id
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    if not my.admin:
-        bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
-
-    if network_id == 'all':
-        network_id = None
-
-    try:
-        result = my.ovim.edit_openflow_rules(network_id)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-    data = {'result': str(result) + " nets updates"}
-    return format_out(data)
-
-@bottle.route(url_base + '/networks/clear/openflow/<ofc_id>', method='DELETE')
-@bottle.route(url_base + '/networks/clear/openflow', method='DELETE')
-def http_clear_openflow_rules(ofc_id=None):
-    """
-    To make actions over the net. The action is to delete ALL openflow rules
-    :return:
-    """
-    my = config_dic['http_threads'][ threading.current_thread().name]
-
-    if not my.admin:
-        bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
-    try:
-        my.ovim.delete_openflow_rules(ofc_id)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-    data = {'result': " Clearing openflow rules in process"}
-    return format_out(data)
-
-@bottle.route(url_base + '/networks/openflow/ports/<ofc_id>', method='GET')
-@bottle.route(url_base + '/networks/openflow/ports', method='GET')
-def http_get_openflow_ports(ofc_id=None):
-    """
-    Obtain switch ports names of openflow controller
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        ports = my.ovim.get_openflow_ports(ofc_id)
-        data = {'ports': ports}
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-    return format_out(data)
-#
-# PORTS
-#
-
-
-@bottle.route(url_base + '/ports', method='GET')
-def http_get_ports():
-    #obtain data
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_port,
-            ('id','name','tenant_id','network_id','vpci','mac_address','device_owner','device_id',
-             'binding:switch_port','binding:vlan','bandwidth','status','admin_state_up','ip_address') )
-    try:
-        ports = my.ovim.get_ports(columns=select_, filter=where_, limit=limit_)
-        delete_nulls(ports)
-        change_keys_http2db(ports, http2db_port, reverse=True)
-        data={'ports' : ports}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-@bottle.route(url_base + '/ports/<port_id>', method='GET')
-def http_get_port_id(port_id):
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    try:
-        ports = my.ovim.get_ports(filter={"uuid": port_id})
-        if not ports:
-            bottle.abort(HTTP_Not_Found, 'port %s not found' % port_id)
-            return
-        delete_nulls(ports)
-        change_keys_http2db(ports, http2db_port, reverse=True)
-        data = {'port': ports[0]}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-@bottle.route(url_base + '/ports', method='POST')
-def http_post_ports():
-    '''insert an external port into the database.'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    if not my.admin:
-        bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
-    #parse input data
-    http_content = format_in( port_new_schema )
-    r = remove_extra_items(http_content, port_new_schema)
-    if r is not None: print "http_post_ports: Warning: remove extra items ", r
-    change_keys_http2db(http_content['port'], http2db_port)
-    port=http_content['port']
-    try:
-        port_id = my.ovim.new_port(port)
-        ports = my.ovim.get_ports(filter={"uuid": port_id})
-        if not ports:
-            bottle.abort(HTTP_Internal_Server_Error, "port '{}' inserted but not found at database".format(port_id))
-            return
-        delete_nulls(ports)
-        change_keys_http2db(ports, http2db_port, reverse=True)
-        data = {'port': ports[0]}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-@bottle.route(url_base + '/ports/<port_id>', method='PUT')
-def http_put_port_id(port_id):
-    '''update a port_id into the database.'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    #parse input data
-    http_content = format_in( port_update_schema )
-    change_keys_http2db(http_content['port'], http2db_port)
-    port_dict=http_content['port']
-
-    for k in ('vlan', 'switch_port', 'mac_address', 'tenant_id'):
-        if k in port_dict and not my.admin:
-            bottle.abort(HTTP_Unauthorized, "Needed admin privileges for changing " + k)
-            return
-    try:
-        port_id = my.ovim.edit_port(port_id, port_dict, my.admin)
-        ports = my.ovim.get_ports(filter={"uuid": port_id})
-        if not ports:
-            bottle.abort(HTTP_Internal_Server_Error, "port '{}' edited but not found at database".format(port_id))
-            return
-        delete_nulls(ports)
-        change_keys_http2db(ports, http2db_port, reverse=True)
-        data = {'port': ports[0]}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/ports/<port_id>', method='DELETE')
-def http_delete_port_id(port_id):
-    '''delete a port_id from the database.'''
-    my = config_dic['http_threads'][ threading.current_thread().name ]
-    if not my.admin:
-        bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
-        return
-    try:
-        result = my.ovim.delete_port(port_id)
-        data = {'result': result}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/openflow/mapping', method='POST')
-def http_of_port_mapping():
-    """
-    Create new compute port mapping entry
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        http_content = format_in(of_port_map_new_schema)
-        r = remove_extra_items(http_content, of_port_map_new_schema)
-        if r is not None:
-            my.logger.error("http_of_port_mapping: Warning: remove extra items " + str(r), exc_info=True)
-
-        # insert in data base
-        port_mapping = my.ovim.set_of_port_mapping(http_content['of_port_mapings'])
-        change_keys_http2db(port_mapping, http2db_id, reverse=True)
-        delete_nulls(port_mapping)
-        data = {'of_port_mappings': port_mapping}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/openflow/mapping', method='GET')
-def get_of_port_mapping():
-    """
-    Get compute port mapping
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        select_, where_, limit_ = filter_query_string(bottle.request.query, http2db_id,
-                                                      ('id', 'ofc_id', 'region', 'compute_node', 'pci',
-                                                       'switch_dpid', 'switch_port', 'switch_mac'))
-        # insert in data base
-        port_mapping = my.ovim.get_of_port_mappings(select_, where_)
-        change_keys_http2db(port_mapping, http2db_id, reverse=True)
-        delete_nulls(port_mapping)
-        data = {'of_port_mappings': port_mapping}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
-
-@bottle.route(url_base + '/openflow/mapping/<region>', method='DELETE')
-def delete_of_port_mapping(region):
-    """
-    Insert a tenant into the database.
-    :return:
-    """
-    my = config_dic['http_threads'][threading.current_thread().name]
-
-    try:
-        # insert in data base
-        db_filter = {'region': region}
-        result = my.ovim.clear_of_port_mapping(db_filter)
-        data = {'result': result}
-        return format_out(data)
-    except ovim.ovimException as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(e.http_code, str(e))
-    except Exception as e:
-        my.logger.error(str(e), exc_info=True)
-        bottle.abort(HTTP_Bad_Request, str(e))
-
diff --git a/onos.py b/onos.py
deleted file mode 100644 (file)
index 338412f..0000000
--- a/onos.py
+++ /dev/null
@@ -1,470 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2016, I2T Research Group (UPV/EHU)
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: alaitz.mendiola@ehu.eus or alaitz.mendiola@gmail.com
-##
-
-'''
-ImplementS the pluging for the Open Network Operating System (ONOS) openflow
-controller. It creates the class OF_conn to create dataplane connections
-with static rules based on packet destination MAC address
-'''
-
-__author__="Alaitz Mendiola"
-__date__ ="$22-nov-2016$"
-
-
-import json
-import requests
-import base64
-import logging
-import openflow_conn
-
-
-class OF_conn(openflow_conn.OpenflowConn):
-    """
-    ONOS connector. No MAC learning is used
-    """
-    def __init__(self, params):
-        """ Constructor.
-            Params: dictionary with the following keys:
-                of_dpid:     DPID to use for this controller ?? Does a controller have a dpid?
-                of_ip:       controller IP address
-                of_port:     controller TCP port
-                of_user:     user credentials, can be missing or None
-                of_password: password credentials
-                of_debug:    debug level for logging. Default to ERROR
-                other keys are ignored
-            Raise an exception if same parameter is missing or wrong
-        """
-
-        openflow_conn.OpenflowConn.__init__(self, params)
-
-        # check params
-        if "of_ip" not in params or params["of_ip"]==None or "of_port" not in params or params["of_port"]==None:
-            raise ValueError("IP address and port must be provided")
-        #internal variables
-        self.name = "onos"
-        self.headers = {'content-type':'application/json','accept':'application/json',}
-
-        self.auth="None"
-        self.pp2ofi={}  # From Physical Port to OpenFlow Index
-        self.ofi2pp={}  # From OpenFlow Index to Physical Port
-
-        self.dpid = str(params["of_dpid"])
-        self.id = 'of:'+str(self.dpid.replace(':', ''))
-        self.url = "http://%s:%s/onos/v1/" %( str(params["of_ip"]), str(params["of_port"] ) )
-
-        # TODO This may not be straightforward
-        if "of_user" in params and params["of_user"]!=None:
-            if not params.get("of_password"):
-                of_password=""
-            else:
-                of_password=str(params["of_password"])
-            self.auth = base64.b64encode(str(params["of_user"])+":"+of_password)
-            self.headers['authorization'] = 'Basic ' + self.auth
-
-        self.logger = logging.getLogger('vim.OF.onos')
-        self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR")) )
-        self.ip_address = None
-
-    def get_of_switches(self):
-        """
-        Obtain a a list of switches or DPID detected by this controller
-        :return: list where each element a tuple pair (DPID, IP address)
-                 Raise a openflowconnUnexpectedResponse expection in case of failure
-        """
-        try:
-            self.headers['content-type'] = 'text/plain'
-            of_response = requests.get(self.url + "devices", headers=self.headers)
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-            if of_response.status_code != 200:
-                self.logger.warning("get_of_switches " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-            self.logger.debug("get_of_switches " + error_text)
-            info = of_response.json()
-
-            if type(info) != dict:
-                self.logger.error("get_of_switches. Unexpected response, not a dict: %s", str(info))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, not a dict. Wrong version?")
-
-            node_list = info.get('devices')
-
-            if type(node_list) is not list:
-                self.logger.error(
-                    "get_of_switches. Unexpected response, at 'devices', not found or not a list: %s",
-                    str(type(node_list)))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, at 'devices', not found "
-                                                                   "or not a list. Wrong version?")
-
-            switch_list = []
-            for node in node_list:
-                node_id = node.get('id')
-                if node_id is None:
-                    self.logger.error("get_of_switches. Unexpected response at 'device':'id', not found: %s",
-                                      str(node))
-                    raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'device':'id', "
-                                                                       "not found . Wrong version?")
-
-                node_ip_address = node.get('annotations').get('managementAddress')
-                if node_ip_address is None:
-                    self.logger.error(
-                        "get_of_switches. Unexpected response at 'device':'managementAddress', not found: %s",
-                        str(node))
-                    raise openflow_conn.OpenflowconnUnexpectedResponse(
-                        "Unexpected response at 'device':'managementAddress', not found. Wrong version?")
-
-                node_id_hex = hex(int(node_id.split(':')[1])).split('x')[1].zfill(16)
-
-                switch_list.append(
-                    (':'.join(a + b for a, b in zip(node_id_hex[::2], node_id_hex[1::2])), node_ip_address))
-            raise switch_list
-
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("get_of_switches " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-        except ValueError as e:
-            # ValueError in the case that JSON can not be decoded
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("get_of_switches " + error_text)
-            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-    def obtain_port_correspondence(self):
-        """
-        Obtain the correspondence between physical and openflow port names
-        :return: dictionary with physical name as key, openflow name as value
-                 Raise a openflowconnUnexpectedResponse expection in case of failure
-        """
-        try:
-            self.headers['content-type'] = 'text/plain'
-            of_response = requests.get(self.url + "devices/" + self.id + "/ports", headers=self.headers)
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-            if of_response.status_code != 200:
-                self.logger.warning("obtain_port_correspondence " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-            self.logger.debug("obtain_port_correspondence " + error_text)
-            info = of_response.json()
-
-            node_connector_list = info.get('ports')
-            if type(node_connector_list) is not list:
-                self.logger.error(
-                    "obtain_port_correspondence. Unexpected response at 'ports', not found or not a list: %s",
-                    str(node_connector_list))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'ports', not found  or not "
-                                                                   "a list. Wrong version?")
-
-            for node_connector in node_connector_list:
-                if node_connector['port'] != "local":
-                    self.pp2ofi[str(node_connector['annotations']['portName'])] = str(node_connector['port'])
-                    self.ofi2pp[str(node_connector['port'])] = str(node_connector['annotations']['portName'])
-
-            node_ip_address = info['annotations']['managementAddress']
-            if node_ip_address is None:
-                self.logger.error(
-                    "obtain_port_correspondence. Unexpected response at 'managementAddress', not found: %s",
-                    str(self.id))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'managementAddress', "
-                                                                   "not found. Wrong version?")
-            self.ip_address = node_ip_address
-
-            # print self.name, ": obtain_port_correspondence ports:", self.pp2ofi
-            return self.pp2ofi
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("obtain_port_correspondence " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-        except ValueError as e:
-            # ValueError in the case that JSON can not be decoded
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("obtain_port_correspondence " + error_text)
-            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-    def get_of_rules(self, translate_of_ports=True):
-        """
-        Obtain the rules inserted at openflow controller
-        :param translate_of_ports: if True it translates ports from openflow index to physical switch name
-        :return: dict if ok: with the rule name as key and value is another dictionary with the following content:
-                    priority: rule priority
-                    name:         rule name (present also as the master dict key)
-                    ingress_port: match input port of the rule
-                    dst_mac:      match destination mac address of the rule, can be missing or None if not apply
-                    vlan_id:      match vlan tag of the rule, can be missing or None if not apply
-                    actions:      list of actions, composed by a pair tuples:
-                        (vlan, None/int): for stripping/setting a vlan tag
-                        (out, port):      send to this port
-                    switch:       DPID, all
-                 Raise a openflowconnUnexpectedResponse expection in case of failure
-        """
-
-        try:
-
-            if len(self.ofi2pp) == 0:
-                self.obtain_port_correspondence()
-
-            # get rules
-            self.headers['content-type'] = 'text/plain'
-            of_response = requests.get(self.url + "flows/" + self.id, headers=self.headers)
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-
-            # The configured page does not exist if there are no rules installed. In that case we return an empty dict
-            if of_response.status_code == 404:
-                return {}
-
-            elif of_response.status_code != 200:
-                self.logger.warning("get_of_rules " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-            self.logger.debug("get_of_rules " + error_text)
-
-            info = of_response.json()
-
-            if type(info) != dict:
-                self.logger.error("get_of_rules. Unexpected response, not a dict: %s", str(info))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected openflow response, not a dict. "
-                                                                   "Wrong version?")
-
-            flow_list = info.get('flows')
-
-            if flow_list is None:
-                return {}
-
-            if type(flow_list) is not list:
-                self.logger.error(
-                    "get_of_rules. Unexpected response at 'flows', not a list: %s",
-                    str(type(flow_list)))
-                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'flows', not a list. "
-                                                                   "Wrong version?")
-
-            rules = dict() # Response dictionary
-
-            for flow in flow_list:
-                if not ('id' in flow and 'selector' in flow and 'treatment' in flow and \
-                                    'instructions' in flow['treatment'] and 'criteria' in \
-                                    flow['selector']):
-                    raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow response, one or more "
-                                                                       "elements are missing. Wrong version?")
-
-                rule = dict()
-                rule['switch'] = self.dpid
-                rule['priority'] = flow.get('priority')
-                rule['name'] = flow['id']
-
-                for criteria in flow['selector']['criteria']:
-                    if criteria['type'] == 'IN_PORT':
-                        in_port = str(criteria['port'])
-                        if in_port != "CONTROLLER":
-                            if not in_port in self.ofi2pp:
-                                raise openflow_conn.OpenflowconnUnexpectedResponse("Error: Ingress port {} is not "
-                                                                                   "in switch port list".format(in_port))
-                            if translate_of_ports:
-                                in_port = self.ofi2pp[in_port]
-                        rule['ingress_port'] = in_port
-
-                    elif criteria['type'] == 'VLAN_VID':
-                        rule['vlan_id'] = criteria['vlanId']
-
-                    elif criteria['type'] == 'ETH_DST':
-                        rule['dst_mac'] = str(criteria['mac']).lower()
-
-                actions = []
-                for instruction in flow['treatment']['instructions']:
-                    if instruction['type'] == "OUTPUT":
-                        out_port = str(instruction['port'])
-                        if out_port != "CONTROLLER":
-                            if not out_port in self.ofi2pp:
-                                raise openflow_conn.OpenflowconnUnexpectedResponse("Error: Output port {} is not in "
-                                                                                   "switch port list".format(out_port))
-
-                            if translate_of_ports:
-                                out_port = self.ofi2pp[out_port]
-
-                        actions.append( ('out', out_port) )
-
-                    if instruction['type'] == "L2MODIFICATION" and instruction['subtype'] == "VLAN_POP":
-                        actions.append( ('vlan', 'None') )
-                    if instruction['type'] == "L2MODIFICATION" and instruction['subtype'] == "VLAN_ID":
-                        actions.append( ('vlan', instruction['vlanId']) )
-
-                rule['actions'] = actions
-                rules[flow['id']] = dict(rule)
-            return rules
-
-        except requests.exceptions.RequestException as e:
-            # ValueError in the case that JSON can not be decoded
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("get_of_rules " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-        except ValueError as e:
-            # ValueError in the case that JSON can not be decoded
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("get_of_rules " + error_text)
-            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-    def del_flow(self, flow_name):
-        """
-        Delete an existing rule
-        :param flow_name:
-        :return: Raise a openflowconnUnexpectedResponse expection in case of failure
-        """
-
-        try:
-            self.headers['content-type'] = None
-            of_response = requests.delete(self.url + "flows/" + self.id + "/" + flow_name, headers=self.headers)
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-
-            if of_response.status_code != 204:
-                self.logger.warning("del_flow " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-            self.logger.debug("del_flow OK " + error_text)
-            return None
-
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("del_flow " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-
-    def new_flow(self, data):
-        """
-        Insert a new static rule
-        :param data: dictionary with the following content:
-                priority:     rule priority
-                name:         rule name
-                ingress_port: match input port of the rule
-                dst_mac:      match destination mac address of the rule, missing or None if not apply
-                vlan_id:      match vlan tag of the rule, missing or None if not apply
-                actions:      list of actions, composed by a pair tuples with these posibilities:
-                    ('vlan', None/int): for stripping/setting a vlan tag
-                    ('out', port):      send to this port
-        :return: Raise a openflowconnUnexpectedResponse expection in case of failure
-        """
-        try:
-
-            if len(self.pp2ofi) == 0:
-                self.obtain_port_correspondence()
-
-            # Build the dictionary with the flow rule information for ONOS
-            flow = dict()
-            #flow['id'] = data['name']
-            flow['tableId'] = 0
-            flow['priority'] = data.get('priority')
-            flow['timeout'] = 0
-            flow['isPermanent'] = "true"
-            flow['appId'] = 10 # FIXME We should create an appId for OSM
-            flow['selector'] = dict()
-            flow['selector']['criteria'] = list()
-
-            # Flow rule matching criteria
-            if not data['ingress_port'] in self.pp2ofi:
-                error_text = 'Error. Port ' + data['ingress_port'] + ' is not present in the switch'
-                self.logger.warning("new_flow " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-            ingress_port_criteria = dict()
-            ingress_port_criteria['type'] = "IN_PORT"
-            ingress_port_criteria['port'] = self.pp2ofi[data['ingress_port']]
-            flow['selector']['criteria'].append(ingress_port_criteria)
-
-            if 'dst_mac' in data:
-                dst_mac_criteria = dict()
-                dst_mac_criteria["type"] = "ETH_DST"
-                dst_mac_criteria["mac"] = data['dst_mac']
-                flow['selector']['criteria'].append(dst_mac_criteria)
-
-            if data.get('vlan_id'):
-                vlan_criteria = dict()
-                vlan_criteria["type"] = "VLAN_VID"
-                vlan_criteria["vlanId"] = int(data['vlan_id'])
-                flow['selector']['criteria'].append(vlan_criteria)
-
-            # Flow rule treatment
-            flow['treatment'] = dict()
-            flow['treatment']['instructions'] = list()
-            flow['treatment']['deferred'] = list()
-
-            for action in data['actions']:
-                new_action = dict()
-                if  action[0] == "vlan":
-                    new_action['type'] = "L2MODIFICATION"
-                    if action[1] == None:
-                        new_action['subtype'] = "VLAN_POP"
-                    else:
-                        new_action['subtype'] = "VLAN_ID"
-                        new_action['vlanId'] = int(action[1])
-                elif action[0] == 'out':
-                    new_action['type'] = "OUTPUT"
-                    if not action[1] in self.pp2ofi:
-                        error_msj = 'Port '+ action[1] + ' is not present in the switch'
-                        raise openflow_conn.OpenflowconnUnexpectedResponse(error_msj)
-                    new_action['port'] = self.pp2ofi[action[1]]
-                else:
-                    error_msj = "Unknown item '%s' in action list" % action[0]
-                    self.logger.error("new_flow " + error_msj)
-                    raise openflow_conn.OpenflowconnUnexpectedResponse(error_msj)
-
-                flow['treatment']['instructions'].append(new_action)
-
-            self.headers['content-type'] = 'application/json'
-            path = self.url + "flows/" + self.id
-            of_response = requests.post(path, headers=self.headers, data=json.dumps(flow) )
-
-            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
-            if of_response.status_code != 201:
-                self.logger.warning("new_flow " + error_text)
-                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
-
-            flowId = of_response.headers['location'][path.__len__() + 1:]
-
-            data['name'] = flowId
-
-            self.logger.debug("new_flow OK " + error_text)
-            return None
-
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("new_flow " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-
-    def clear_all_flows(self):
-        """
-        Delete all existing rules
-        :return: Raise a openflowconnUnexpectedResponse expection in case of failure
-        """
-        try:
-            rules = self.get_of_rules(True)
-
-            for rule in rules:
-                self.del_flow(rule)
-
-            self.logger.debug("clear_all_flows OK ")
-            return None
-
-        except requests.exceptions.RequestException as e:
-            error_text = type(e).__name__ + ": " + str(e)
-            self.logger.error("clear_all_flows " + error_text)
-            raise openflow_conn.OpenflowconnConnectionException(error_text)
-
-
-
-
-
index 4df9338..07261df 100755 (executable)
--- a/openflow
+++ b/openflow
@@ -40,8 +40,8 @@ import imp
 import yaml
 import requests
 import logging
-import openflow_conn
-from openflow_thread import change_db2of, FlowBadFormat
+import osm_openvim.openflow_conn as openflow_conn
+from osm_openvim.openflow_thread import change_db2of, FlowBadFormat
 
 
 def of_switches(args):
diff --git a/openflow_conn.py b/openflow_conn.py
deleted file mode 100644 (file)
index f42f4dc..0000000
+++ /dev/null
@@ -1,223 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openmano
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-import logging
-import base64
-
-"""
-vimconn implement an Abstract class for the vim connector plugins
- with the definition of the method to be implemented.
-"""
-__author__ = "Alfonso Tierno, Leonardo Mirabal"
-__date__ = "$16-oct-2015 11:09:29$"
-
-
-
-# Error variables
-HTTP_Bad_Request = 400
-HTTP_Unauthorized = 401
-HTTP_Not_Found = 404
-HTTP_Method_Not_Allowed = 405
-HTTP_Request_Timeout = 408
-HTTP_Conflict = 409
-HTTP_Not_Implemented = 501
-HTTP_Service_Unavailable = 503
-HTTP_Internal_Server_Error = 500
-
-
-class OpenflowconnException(Exception):
-    """Common and base class Exception for all vimconnector exceptions"""
-    def __init__(self, message, http_code=HTTP_Bad_Request):
-        Exception.__init__(self, message)
-        self.http_code = http_code
-
-
-class OpenflowconnConnectionException(OpenflowconnException):
-    """Connectivity error with the VIM"""
-    def __init__(self, message, http_code=HTTP_Service_Unavailable):
-        OpenflowconnException.__init__(self, message, http_code)
-
-
-class OpenflowconnUnexpectedResponse(OpenflowconnException):
-    """Get an wrong response from VIM"""
-    def __init__(self, message, http_code=HTTP_Internal_Server_Error):
-        OpenflowconnException.__init__(self, message, http_code)
-
-
-class OpenflowconnAuthException(OpenflowconnException):
-    """Invalid credentials or authorization to perform this action over the VIM"""
-    def __init__(self, message, http_code=HTTP_Unauthorized):
-        OpenflowconnException.__init__(self, message, http_code)
-
-
-class OpenflowconnNotFoundException(OpenflowconnException):
-    """The item is not found at VIM"""
-    def __init__(self, message, http_code=HTTP_Not_Found):
-        OpenflowconnException.__init__(self, message, http_code)
-
-
-class OpenflowconnConflictException(OpenflowconnException):
-    """There is a conflict, e.g. more item found than one"""
-    def __init__(self, message, http_code=HTTP_Conflict):
-        OpenflowconnException.__init__(self, message, http_code)
-
-
-class OpenflowconnNotSupportedException(OpenflowconnException):
-    """The request is not supported by connector"""
-    def __init__(self, message, http_code=HTTP_Service_Unavailable):
-        OpenflowconnException.__init__(self, message, http_code)
-
-
-class OpenflowconnNotImplemented(OpenflowconnException):
-    """The method is not implemented by the connected"""
-    def __init__(self, message, http_code=HTTP_Not_Implemented):
-        OpenflowconnException.__init__(self, message, http_code)
-
-
-class OpenflowConn:
-    """
-    Openflow controller connector abstract implementeation.
-    """
-    def __init__(self, params):
-        self.name = "openflow_conector"
-        self.headers = {'content-type': 'application/json', 'Accept': 'application/json'}
-        self.auth = None
-        self.pp2ofi = {}  # From Physical Port to OpenFlow Index
-        self.ofi2pp = {}  # From OpenFlow Index to Physical Port
-        self.dpid = '00:01:02:03:04:05:06:07'
-        self.id = 'openflow:00:01:02:03:04:05:06:07'
-        self.rules = {}
-        self.url = "http://%s:%s" % ('localhost', str(8081))
-        self.auth = base64.b64encode('of_user:of_password')
-        self.headers['Authorization'] = 'Basic ' + self.auth
-        self.logger = logging.getLogger('openflow_conn')
-        self.logger.setLevel(getattr(logging, params.get("of_debug", "ERROR")))
-        self.ip_address = None
-
-    def get_of_switches(self):
-        """"
-        Obtain a a list of switches or DPID detected by this controller
-        :return: list length, and a list where each element a tuple pair (DPID, IP address), text_error: if fails
-        """
-        raise OpenflowconnNotImplemented("Should have implemented this")
-
-    def obtain_port_correspondence(self):
-        """
-        Obtain the correspondence between physical and openflow port names
-        :return: dictionary: with physical name as key, openflow name as value, error_text: if fails
-        """
-        raise OpenflowconnNotImplemented("Should have implemented this")
-
-    def get_of_rules(self, translate_of_ports=True):
-        """
-        Obtain the rules inserted at openflow controller
-        :param translate_of_ports: if True it translates ports from openflow index to physical switch name
-        :return: dict if ok: with the rule name as key and value is another dictionary with the following content:
-                    priority: rule priority
-                    name:         rule name (present also as the master dict key)
-                    ingress_port: match input port of the rule
-                    dst_mac:      match destination mac address of the rule, can be missing or None if not apply
-                    vlan_id:      match vlan tag of the rule, can be missing or None if not apply
-                    actions:      list of actions, composed by a pair tuples:
-                        (vlan, None/int): for stripping/setting a vlan tag
-                        (out, port):      send to this port
-                    switch:       DPID, all
-                 text_error if fails
-        """
-        raise OpenflowconnNotImplemented("Should have implemented this")
-
-    def del_flow(self, flow_name):
-        """
-        Delete all existing rules
-        :param flow_name: flow_name, this is the rule name
-        :return: None if ok, text_error if fails
-        """
-        raise OpenflowconnNotImplemented("Should have implemented this")
-
-    def new_flow(self, data):
-        """
-        Insert a new static rule
-        :param data: dictionary with the following content:
-                priority:     rule priority
-                name:         rule name
-                ingress_port: match input port of the rule
-                dst_mac:      match destination mac address of the rule, missing or None if not apply
-                vlan_id:      match vlan tag of the rule, missing or None if not apply
-                actions:      list of actions, composed by a pair tuples with these posibilities:
-                    ('vlan', None/int): for stripping/setting a vlan tag
-                    ('out', port):      send to this port
-        :return: None if ok, text_error if fails
-        """
-        raise OpenflowconnNotImplemented("Should have implemented this")
-
-    def clear_all_flows(self):
-        """"
-        Delete all existing rules
-        :return: None if ok, text_error if fails
-        """
-        raise OpenflowconnNotImplemented("Should have implemented this")
-
-
-class OfTestConnector(OpenflowConn):
-    """
-    This is a fake openflow connector for testing.
-    It does nothing and it is used for running openvim without an openflow controller
-    """
-
-    def __init__(self, params):
-        OpenflowConn.__init__(self, params)
-
-        name = params.get("name", "test-ofc")
-        self.name = name
-        self.dpid = params.get("dpid")
-        self.rules = {}
-        self.logger = logging.getLogger('vim.OF.TEST')
-        self.logger.setLevel(getattr(logging, params.get("of_debug", "ERROR")))
-        self.pp2ofi = {}
-
-    def get_of_switches(self):
-        return ()
-
-    def obtain_port_correspondence(self):
-        return ()
-
-    def del_flow(self, flow_name):
-        if flow_name in self.rules:
-            self.logger.debug("del_flow OK")
-            del self.rules[flow_name]
-            return None
-        else:
-            self.logger.warning("del_flow not found")
-            raise OpenflowconnUnexpectedResponse("flow {} not found".format(flow_name))
-
-    def new_flow(self, data):
-        self.rules[data["name"]] = data
-        self.logger.debug("new_flow OK")
-        return None
-
-    def get_of_rules(self, translate_of_ports=True):
-        return self.rules
-
-    def clear_all_flows(self):
-        self.logger.debug("clear_all_flows OK")
-        self.rules = {}
-        return None
diff --git a/openflow_thread.py b/openflow_thread.py
deleted file mode 100644 (file)
index cd873e7..0000000
+++ /dev/null
@@ -1,598 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-This thread interacts with a openflow controller to create dataplane connections
-'''
-
-__author__="Pablo Montes, Alfonso Tierno"
-__date__ ="17-jul-2015"
-
-
-#import json
-import threading
-import time
-import Queue
-import requests
-import logging
-import openflow_conn
-
-OFC_STATUS_ACTIVE = 'ACTIVE'
-OFC_STATUS_INACTIVE = 'INACTIVE'
-OFC_STATUS_ERROR = 'ERROR'
-
-class FlowBadFormat(Exception):
-    '''raise when a bad format of flow is found''' 
-
-def change_of2db(flow):
-    '''Change 'flow' dictionary from openflow format to database format
-    Basically the change consist of changing 'flow[actions] from a list of
-    double tuple to a string
-    from [(A,B),(C,D),..] to "A=B,C=D" '''
-    action_str_list=[]
-    if type(flow)!=dict or "actions" not in flow:
-        raise FlowBadFormat("Bad input parameters, expect dictionary with 'actions' as key")
-    try:
-        for action in flow['actions']:
-            action_str_list.append( action[0] + "=" + str(action[1]) )
-        flow['actions'] = ",".join(action_str_list)
-    except:
-        raise FlowBadFormat("Unexpected format at 'actions'")
-
-def change_db2of(flow):
-    '''Change 'flow' dictionary from database format to openflow format
-    Basically the change consist of changing 'flow[actions]' from a string to 
-    a double tuple list
-    from "A=B,C=D,..." to [(A,B),(C,D),..] 
-    raise FlowBadFormat '''
-    actions=[]
-    if type(flow)!=dict or "actions" not in flow or type(flow["actions"])!=str:
-        raise FlowBadFormat("Bad input parameters, expect dictionary with 'actions' as key")
-    action_list = flow['actions'].split(",")
-    for action_item in action_list:
-        action_tuple = action_item.split("=")
-        if len(action_tuple) != 2:
-            raise FlowBadFormat("Expected key=value format at 'actions'")
-        if action_tuple[0].strip().lower()=="vlan":
-            if action_tuple[1].strip().lower() in ("none", "strip"):
-                actions.append( ("vlan",None) )
-            else:
-                try:
-                    actions.append( ("vlan", int(action_tuple[1])) )
-                except:
-                    raise FlowBadFormat("Expected integer after vlan= at 'actions'")
-        elif action_tuple[0].strip().lower()=="out":
-            actions.append( ("out", str(action_tuple[1])) )
-        else:
-            raise FlowBadFormat("Unexpected '%s' at 'actions'"%action_tuple[0])
-    flow['actions'] = actions
-
-
-class openflow_thread(threading.Thread):
-    """
-    This thread interacts with a openflow controller to create dataplane connections
-    """
-    def __init__(self, of_uuid, of_connector, db, db_lock, of_test, pmp_with_same_vlan=False, debug='ERROR'):
-        threading.Thread.__init__(self)
-        self.of_uuid = of_uuid
-        self.db = db
-        self.pmp_with_same_vlan = pmp_with_same_vlan
-        self.name = "openflow"
-        self.test = of_test
-        self.db_lock = db_lock
-        self.OF_connector = of_connector
-        self.logger = logging.getLogger('vim.OF-' + of_uuid)
-        self.logger.setLevel(getattr(logging, debug))
-        self.logger.name = of_connector.name + " " + self.OF_connector.dpid
-        self.queueLock = threading.Lock()
-        self.taskQueue = Queue.Queue(2000)
-        
-    def insert_task(self, task, *aditional):
-        try:
-            self.queueLock.acquire()
-            task = self.taskQueue.put( (task,) + aditional, timeout=5) 
-            self.queueLock.release()
-            return 1, None
-        except Queue.Full:
-            return -1, "timeout inserting a task over openflow thread " + self.name
-
-    def run(self):
-        self.logger.debug("Start openflow thread")
-        self.set_openflow_controller_status(OFC_STATUS_ACTIVE)
-
-        while True:
-            try:
-                self.queueLock.acquire()
-                if not self.taskQueue.empty():
-                    task = self.taskQueue.get()
-                else:
-                    task = None
-                self.queueLock.release()
-
-                if task is None:
-                    time.sleep(1)
-                    continue
-
-                if task[0] == 'update-net':
-                    r,c = self.update_of_flows(task[1])
-                    # update database status
-                    if r<0:
-                        UPDATE={'status':'ERROR', 'last_error': str(c)}
-                        self.logger.error("processing task 'update-net' %s: %s", str(task[1]), c)
-                        self.set_openflow_controller_status(OFC_STATUS_ERROR, "Error updating net {}".format(task[1]))
-                    else:
-                        UPDATE={'status':'ACTIVE', 'last_error': None}
-                        self.logger.debug("processing task 'update-net' %s: OK", str(task[1]))
-                        self.set_openflow_controller_status(OFC_STATUS_ACTIVE)
-                    self.db_lock.acquire()
-                    self.db.update_rows('nets', UPDATE, WHERE={'uuid': task[1]})
-                    self.db_lock.release()
-
-                elif task[0] == 'clear-all':
-                    r,c = self.clear_all_flows()
-                    if r<0:
-                        self.logger.error("processing task 'clear-all': %s", c)
-                        self.set_openflow_controller_status(OFC_STATUS_ERROR, "Error deleting all flows")
-                    else:
-                        self.set_openflow_controller_status(OFC_STATUS_ACTIVE)
-                        self.logger.debug("processing task 'clear-all': OK")
-                elif task[0] == 'exit':
-                    self.logger.debug("exit from openflow_thread")
-                    self.terminate()
-                    self.set_openflow_controller_status(OFC_STATUS_INACTIVE, "Ofc with thread killed")
-                    return 0
-                else:
-                    self.logger.error("unknown task %s", str(task))
-            except openflow_conn.OpenflowconnException as e:
-                self.set_openflow_controller_status(OFC_STATUS_ERROR, str(e))
-
-    def terminate(self):
-        pass
-        # print self.name, ": exit from openflow_thread"
-
-    def update_of_flows(self, net_id):
-        ports=()
-        self.db_lock.acquire()
-        select_= ('type','admin_state_up', 'vlan', 'provider', 'bind_net','bind_type','uuid')
-        result, nets = self.db.get_table(FROM='nets', SELECT=select_, WHERE={'uuid':net_id} )
-        #get all the networks binding to this
-        if result > 0:
-            if nets[0]['bind_net']:
-                bind_id = nets[0]['bind_net']
-            else:
-                bind_id = net_id
-            #get our net and all bind_nets
-            result, nets = self.db.get_table(FROM='nets', SELECT=select_,
-                                                WHERE_OR={'bind_net':bind_id, 'uuid':bind_id} )
-            
-        self.db_lock.release()
-        if result < 0:
-            return -1, "DB error getting net: " + nets
-        #elif result==0:
-            #net has been deleted
-        ifaces_nb = 0
-        database_flows = []
-        for net in nets:
-            net_id = net["uuid"]
-            if net['admin_state_up'] == 'false':
-                net['ports'] = ()
-            else:
-                self.db_lock.acquire()
-                nb_ports, net_ports = self.db.get_table(
-                        FROM='ports',
-                        SELECT=('switch_port','vlan','uuid','mac','type','model'),
-                        WHERE={'net_id':net_id, 'admin_state_up':'true', 'status':'ACTIVE'} )
-                self.db_lock.release()
-                if nb_ports < 0:
-
-                    #print self.name, ": update_of_flows() ERROR getting ports", ports
-                    return -1, "DB error getting ports from net '%s': %s" % (net_id, net_ports)
-                
-                #add the binding as an external port
-                if net['provider'] and net['provider'][:9]=="openflow:":
-                    external_port={"type":"external","mac":None}
-                    external_port['uuid'] = net_id + ".1" #fake uuid
-                    if net['provider'][-5:]==":vlan":
-                        external_port["vlan"] = net["vlan"]
-                        external_port["switch_port"] = net['provider'][9:-5]
-                    else:
-                        external_port["vlan"] = None
-                        external_port["switch_port"] = net['provider'][9:]
-                    net_ports = net_ports + (external_port,)
-                    nb_ports += 1
-                net['ports'] = net_ports
-                ifaces_nb += nb_ports
-        
-            # Get the name of flows that will be affected by this NET 
-            self.db_lock.acquire()
-            result, database_net_flows = self.db.get_table(FROM='of_flows', WHERE={'net_id':net_id})
-            self.db_lock.release()
-            if result < 0:
-                error_msg = "DB error getting flows from net '{}': {}".format(net_id, database_net_flows)
-                # print self.name, ": update_of_flows() ERROR getting flows from database", database_flows
-                return -1, error_msg
-            database_flows += database_net_flows
-        # Get the name of flows where net_id==NULL that means net deleted (At DB foreign key: On delete set null)
-        self.db_lock.acquire()
-        result, database_net_flows = self.db.get_table(FROM='of_flows', WHERE={'net_id':None})
-        self.db_lock.release()
-        if result < 0:
-            error_msg = "DB error getting flows from net 'null': {}".format(database_net_flows)
-            # print self.name, ": update_of_flows() ERROR getting flows from database", database_flows
-            return -1, error_msg
-        database_flows += database_net_flows
-
-        # Get the existing flows at openflow controller
-        try:
-            of_flows = self.OF_connector.get_of_rules()
-            # print self.name, ": update_of_flows() ERROR getting flows from controller", of_flows
-        except openflow_conn.OpenflowconnException as e:
-            # self.set_openflow_controller_status(OFC_STATUS_ERROR, "OF error {} getting flows".format(str(e)))
-            return -1, "OF error {} getting flows".format(str(e))
-
-        if ifaces_nb < 2:
-            pass
-        elif net['type'] == 'ptp':
-            if ifaces_nb > 2:
-                #print self.name, 'Error, network '+str(net_id)+' has been defined as ptp but it has '+\
-                #                 str(ifaces_nb)+' interfaces.'
-                return -1, "'ptp' type network cannot connect %d interfaces, only 2" % ifaces_nb
-        elif net['type'] == 'data':
-            if ifaces_nb > 2 and self.pmp_with_same_vlan:
-                # check all ports are VLAN (tagged) or none
-                vlan_tag = None
-                for port in ports:
-                    if port["type"]=="external":
-                        if port["vlan"] != None:
-                            if port["vlan"]!=net["vlan"]:
-                                text="External port vlan-tag and net vlan-tag must be the same when flag 'of_controller_nets_with_same_vlan' is True"
-                                #print self.name, "Error", text
-                                return -1, text
-                            if vlan_tag == None:
-                                vlan_tag=True
-                            elif vlan_tag==False:
-                                text="Passthrough and external port vlan-tagged cannot be connected when flag 'of_controller_nets_with_same_vlan' is True"
-                                #print self.name, "Error", text
-                                return -1, text
-                        else:
-                            if vlan_tag == None:
-                                vlan_tag=False
-                            elif vlan_tag == True:
-                                text="SR-IOV and external port not vlan-tagged cannot be connected when flag 'of_controller_nets_with_same_vlan' is True"
-                                #print self.name, "Error", text
-                                return -1, text
-                    elif port["model"]=="PF" or port["model"]=="VFnotShared":
-                        if vlan_tag == None:
-                            vlan_tag=False
-                        elif vlan_tag==True:
-                            text="Passthrough and SR-IOV ports cannot be connected when flag 'of_controller_nets_with_same_vlan' is True"
-                            #print self.name, "Error", text
-                            return -1, text
-                    elif port["model"] == "VF":
-                        if vlan_tag == None:
-                            vlan_tag=True
-                        elif vlan_tag==False:
-                            text="Passthrough and SR-IOV ports cannot be connected when flag 'of_controller_nets_with_same_vlan' is True"
-                            #print self.name, "Error", text
-                            return -1, text
-        else:
-            return -1, 'Only ptp and data networks are supported for openflow'
-            
-        # calculate new flows to be inserted
-        result, new_flows = self._compute_net_flows(nets)
-        if result < 0:
-            return result, new_flows
-
-        #modify database flows format and get the used names
-        used_names=[]
-        for flow in database_flows:
-            try:
-                change_db2of(flow)
-            except FlowBadFormat as e:
-                self.logger.error("Exception FlowBadFormat: '%s', flow: '%s'",str(e), str(flow))
-                continue
-            used_names.append(flow['name'])
-        name_index=0
-        # insert at database the new flows, change actions to human text
-        for flow in new_flows:
-            # 1 check if an equal flow is already present
-            index = self._check_flow_already_present(flow, database_flows)
-            if index>=0:
-                database_flows[index]["not delete"]=True
-                self.logger.debug("Skipping already present flow %s", str(flow))
-                continue
-            # 2 look for a non used name
-            flow_name=flow["net_id"]+"."+str(name_index)
-            while flow_name in used_names or flow_name in of_flows:         
-                name_index += 1   
-                flow_name=flow["net_id"]+"."+str(name_index)
-            used_names.append(flow_name)
-            flow['name'] = flow_name
-            # 3 insert at openflow
-
-            try:
-                self.OF_connector.new_flow(flow)
-            except openflow_conn.OpenflowconnException as e:
-                return -1, "Error creating new flow {}".format(str(e))
-
-            # 4 insert at database
-            try:
-                change_of2db(flow)
-            except FlowBadFormat as e:
-                # print self.name, ": Error Exception FlowBadFormat '%s'" % str(e), flow
-                return -1, str(e)
-            self.db_lock.acquire()
-            result, content = self.db.new_row('of_flows', flow)
-            self.db_lock.release()
-            if result < 0:
-                # print self.name, ": Error '%s' at database insertion" % content, flow
-                return -1, content
-
-        #delete not needed old flows from openflow and from DDBB, 
-        #check that the needed flows at DDBB are present in controller or insert them otherwise
-        for flow in database_flows:
-            if "not delete" in flow:
-                if flow["name"] not in of_flows:
-                    # not in controller, insert it
-                    try:
-                        self.OF_connector.new_flow(flow)
-                    except openflow_conn.OpenflowconnException as e:
-                        return -1, "Error creating new flow {}".format(str(e))
-
-                continue
-            # Delete flow
-            if flow["name"] in of_flows:
-                try:
-                    self.OF_connector.del_flow(flow['name'])
-                except openflow_conn.OpenflowconnException as e:
-                    self.logger.error("cannot delete flow '%s' from OF: %s", flow['name'], str(e))
-                    # skip deletion from database
-                    continue
-
-            # delete from database
-            self.db_lock.acquire()
-            result, content = self.db.delete_row_by_key('of_flows', 'id', flow['id'])
-            self.db_lock.release()
-            if result<0:
-                self.logger.error("cannot delete flow '%s' from DB: %s", flow['name'], content )
-        
-        return 0, 'Success'
-
-    def clear_all_flows(self):
-        try:
-            if not self.test:
-                self.OF_connector.clear_all_flows()
-
-            # remove from database
-            self.db_lock.acquire()
-            self.db.delete_row_by_key('of_flows', None, None) #this will delete all lines
-            self.db_lock.release()
-            return 0, None
-        except openflow_conn.OpenflowconnException as e:
-            return -1, self.logger.error("Error deleting all flows {}", str(e))
-
-    flow_fields = ('priority', 'vlan', 'ingress_port', 'actions', 'dst_mac', 'src_mac', 'net_id')
-
-    def _check_flow_already_present(self, new_flow, flow_list):
-        '''check if the same flow is already present in the flow list
-        The flow is repeated if all the fields, apart from name, are equal
-        Return the index of matching flow, -1 if not match'''
-        index=0
-        for flow in flow_list:
-            equal=True
-            for f in self.flow_fields:
-                if flow.get(f) != new_flow.get(f):
-                    equal=False
-                    break
-            if equal:
-                return index
-            index += 1
-        return -1
-        
-    def _compute_net_flows(self, nets):
-        new_flows=[]
-        new_broadcast_flows={}
-        nb_ports = 0
-
-        # Check switch_port information is right
-        self.logger.debug("_compute_net_flows nets: %s", str(nets))
-        for net in nets:
-            for port in net['ports']:
-                nb_ports += 1
-                if not self.test and str(port['switch_port']) not in self.OF_connector.pp2ofi:
-                    error_text= "switch port name '%s' is not valid for the openflow controller" % str(port['switch_port'])
-                    # print self.name, ": ERROR " + error_text
-                    return -1, error_text
-
-        for net_src in nets:
-            net_id = net_src["uuid"]
-            for net_dst in nets:
-                vlan_net_in  = None
-                vlan_net_out = None
-                if net_src == net_dst:
-                    #intra net rules    
-                    priority = 1000
-                elif net_src['bind_net'] == net_dst['uuid']:
-                    if net_src.get('bind_type') and net_src['bind_type'][0:5] == "vlan:":
-                        vlan_net_out = int(net_src['bind_type'][5:])
-                    priority = 1100
-                elif net_dst['bind_net'] == net_src['uuid']:
-                    if net_dst.get('bind_type') and net_dst['bind_type'][0:5] == "vlan:":
-                        vlan_net_in = int(net_dst['bind_type'][5:])
-                    priority = 1100
-                else:
-                    #nets not binding
-                    continue
-                for src_port in net_src['ports']:
-                    vlan_in  = vlan_net_in
-                    if vlan_in == None  and src_port['vlan'] != None:
-                        vlan_in  = src_port['vlan']
-                    elif vlan_in != None  and src_port['vlan'] != None:
-                        #TODO this is something that we cannot do. It requires a double VLAN check
-                        #outer VLAN should be src_port['vlan'] and inner VLAN should be vlan_in
-                        continue
-
-                    # BROADCAST:
-                    broadcast_key = src_port['uuid'] + "." + str(vlan_in)
-                    if broadcast_key in new_broadcast_flows:
-                        flow_broadcast = new_broadcast_flows[broadcast_key]
-                    else:
-                        flow_broadcast = {'priority': priority,
-                            'net_id':  net_id,
-                            'dst_mac': 'ff:ff:ff:ff:ff:ff',
-                            "ingress_port": str(src_port['switch_port']),
-                            'actions': [] 
-                        }
-                        new_broadcast_flows[broadcast_key] = flow_broadcast
-                        if vlan_in is not None:
-                            flow_broadcast['vlan_id'] = str(vlan_in)
-
-                    for dst_port in net_dst['ports']:
-                        vlan_out = vlan_net_out 
-                        if vlan_out == None and dst_port['vlan'] != None:
-                            vlan_out = dst_port['vlan']
-                        elif vlan_out != None and dst_port['vlan'] != None:
-                            #TODO this is something that we cannot do. It requires a double VLAN set
-                            #outer VLAN should be dst_port['vlan'] and inner VLAN should be vlan_out
-                            continue
-                        #if src_port == dst_port:
-                        #    continue
-                        if src_port['switch_port'] == dst_port['switch_port'] and vlan_in == vlan_out:
-                            continue
-                        flow = {
-                            "priority": priority,
-                            'net_id':  net_id,
-                            "ingress_port": str(src_port['switch_port']),
-                            'actions': []
-                        }
-                        if vlan_in is not None:
-                            flow['vlan_id'] = str(vlan_in)
-                        # allow that one port have no mac
-                        if dst_port['mac'] is None or nb_ports==2:  # point to point or nets with 2 elements
-                            flow['priority'] = priority-5  # less priority
-                        else:
-                            flow['dst_mac'] = str(dst_port['mac'])
-            
-                        if vlan_out == None:
-                            if vlan_in != None:
-                                flow['actions'].append( ('vlan',None) )
-                        else:
-                            flow['actions'].append( ('vlan', vlan_out ) )
-                        flow['actions'].append( ('out', str(dst_port['switch_port'])) )
-            
-                        if self._check_flow_already_present(flow, new_flows) >= 0:
-                            self.logger.debug("Skipping repeated flow '%s'", str(flow))
-                            continue
-                        
-                        new_flows.append(flow)
-                    
-                        # BROADCAST:
-                        if nb_ports <= 2:  # point to multipoint or nets with more than 2 elements
-                            continue
-                        out = (vlan_out, str(dst_port['switch_port']))
-                        if out not in flow_broadcast['actions']:
-                            flow_broadcast['actions'].append( out )
-
-        #BROADCAST
-        for flow_broadcast in new_broadcast_flows.values():      
-            if len(flow_broadcast['actions'])==0:
-                continue #nothing to do, skip
-            flow_broadcast['actions'].sort()
-            if 'vlan_id' in flow_broadcast:
-                previous_vlan = 0  # indicates that a packet contains a vlan, and the vlan
-            else:
-                previous_vlan = None
-            final_actions=[]
-            action_number = 0
-            for action in flow_broadcast['actions']:
-                if action[0] != previous_vlan:
-                    final_actions.append( ('vlan', action[0]) )
-                    previous_vlan = action[0]
-                    if self.pmp_with_same_vlan and action_number:
-                        return -1, "Cannot interconnect different vlan tags in a network when flag 'of_controller_nets_with_same_vlan' is True."
-                    action_number += 1
-                final_actions.append( ('out', action[1]) )
-            flow_broadcast['actions'] = final_actions
-
-            if self._check_flow_already_present(flow_broadcast, new_flows) >= 0:
-                self.logger.debug("Skipping repeated flow '%s'", str(flow_broadcast))
-                continue
-            
-            new_flows.append(flow_broadcast)        
-        
-        #UNIFY openflow rules with the same input port and vlan and the same output actions
-        #These flows differ at the dst_mac; and they are unified by not filtering by dst_mac
-        #this can happen if there is only two ports. It is converted to a point to point connection
-        flow_dict={} # use as key vlan_id+ingress_port and as value the list of flows matching these values
-        for flow in new_flows:
-            key = str(flow.get("vlan_id"))+":"+flow["ingress_port"]
-            if key in flow_dict:
-                flow_dict[key].append(flow)
-            else:
-                flow_dict[key]=[ flow ]
-        new_flows2=[]
-        for flow_list in flow_dict.values():
-            convert2ptp=False
-            if len (flow_list)>=2:
-                convert2ptp=True
-                for f in flow_list:
-                    if f['actions'] != flow_list[0]['actions']:
-                        convert2ptp=False
-                        break
-            if convert2ptp: # add only one unified rule without dst_mac
-                self.logger.debug("Convert flow rules to NON mac dst_address " + str(flow_list) )
-                flow_list[0].pop('dst_mac')
-                flow_list[0]["priority"] -= 5
-                new_flows2.append(flow_list[0])
-            else:  # add all the rules
-                new_flows2 += flow_list
-        return 0, new_flows2
-
-    def set_openflow_controller_status(self, status, error_text=None):
-        """
-        Set openflow controller last operation status in DB
-        :param status: ofc status ('ACTIVE','INACTIVE','ERROR')
-        :param error_text: error text
-        :return:
-        """
-        if self.of_uuid == "Default":
-            return True
-
-        ofc = {}
-        ofc['status'] = status
-        ofc['last_error'] = error_text
-        self.db_lock.acquire()
-        result, content = self.db.update_rows('ofcs', ofc, WHERE={'uuid': self.of_uuid}, log=False)
-        self.db_lock.release()
-        if result >= 0:
-            return True
-        else:
-            return False
-
-
-
-
-
-
-
diff --git a/openvimd b/openvimd
new file mode 100755 (executable)
index 0000000..0578ba5
--- /dev/null
+++ b/openvimd
@@ -0,0 +1,279 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+This is the main program of openvim, it reads the configuration 
+and launches the rest of threads: http clients, openflow controller
+and host controllers  
+'''
+
+__author__ = "Alfonso Tierno"
+__date__ = "$10-jul-2014 12:07:15$"
+
+import osm_openvim.httpserver as httpserver
+import osm_openvim.auxiliary_functions as af
+import sys
+import getopt
+import time
+import yaml
+import os
+from jsonschema import validate as js_v, exceptions as js_e
+from vim_schema import config_schema
+import logging
+import logging.handlers as log_handlers
+import socket
+import osm_openvim.ovim as ovim
+
+global config_dic
+global logger
+logger = logging.getLogger('vim')
+
+
+class LoadConfigurationException(Exception):
+    pass
+
+
+def load_configuration(configuration_file):
+    default_tokens ={'http_port':9080, 'http_host':'localhost', 
+                     'of_controller_nets_with_same_vlan':True,
+                     'image_path':'/opt/VNF/images',
+                     'network_vlan_range_start':1000,
+                     'network_vlan_range_end': 4096,
+                     'log_level': "DEBUG",
+                     'log_level_db': "ERROR",
+                     'log_level_of': 'ERROR',
+                     'bridge_ifaces': {},
+                     'network_type': 'ovs',
+                     'ovs_controller_user': 'osm_dhcp',
+                     'ovs_controller_file_path': '/var/lib/',
+            }
+    try:
+        #First load configuration from configuration file
+        #Check config file exists
+        if not os.path.isfile(configuration_file):
+            return (False, "Configuration file '"+configuration_file+"' does not exists")
+            
+        #Read and parse file
+        (return_status, code) = af.read_file(configuration_file)
+        if not return_status:
+            return (return_status, "Error loading configuration file '"+configuration_file+"': "+code)
+        try:
+            config = yaml.load(code)
+        except yaml.YAMLError, exc:
+            error_pos = ""
+            if hasattr(exc, 'problem_mark'):
+                mark = exc.problem_mark
+                error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
+            return (False, "Error loading configuration file '"+configuration_file+"'"+error_pos+": content format error: Failed to parse yaml format")
+        
+        
+        try:
+            js_v(config, config_schema)
+        except js_e.ValidationError, exc:
+            error_pos = ""
+            if len(exc.path)>0: error_pos=" at '" + ":".join(map(str, exc.path))+"'"
+            return False, "Error loading configuration file '"+configuration_file+"'"+error_pos+": "+exc.message 
+        
+        
+        #Check default values tokens
+        for k,v in default_tokens.items():
+            if k not in config: config[k]=v
+        #Check vlan ranges
+        if config["network_vlan_range_start"]+10 >= config["network_vlan_range_end"]:
+            return False, "Error invalid network_vlan_range less than 10 elements"
+    
+    except Exception,e:
+        return (False, "Error loading configuration file '"+configuration_file+"': "+str(e))
+    return (True, config)
+
+def usage():
+    print "Usage: ", sys.argv[0], "[options]"
+    print "      -v|--version: prints current version"
+    print "      -c|--config FILE: loads the configuration file (default: openvimd.cfg)"
+    print "      -h|--help: shows this help"
+    print "      -p|--port PORT: changes port number and overrides the port number in the configuration file (default: 908)"
+    print "      -P|--adminport PORT: changes admin port number and overrides the port number in the configuration file (default: not listen)"
+    print "      --dbname NAME: changes db_name and overrides the db_name in the configuration file"
+    #print( "      --log-socket-host HOST: send logs to this host")
+    #print( "      --log-socket-port PORT: send logs using this port (default: 9022)")
+    print( "      --log-file FILE: send logs to this file")
+    return
+
+
+if __name__=="__main__":
+    hostname = socket.gethostname()
+    #streamformat = "%(levelname)s (%(module)s:%(lineno)d) %(message)s"
+    log_formatter_complete = logging.Formatter(
+        '%(asctime)s.%(msecs)03d00Z[{host}@openmanod] %(filename)s:%(lineno)s severity:%(levelname)s logger:%(name)s log:%(message)s'.format(host=hostname),
+        datefmt='%Y-%m-%dT%H:%M:%S',
+    )
+    log_format_simple =  "%(asctime)s %(levelname)s  %(name)s %(filename)s:%(lineno)s %(message)s"
+    log_formatter_simple = logging.Formatter(log_format_simple, datefmt='%Y-%m-%dT%H:%M:%S')
+    logging.basicConfig(format=log_format_simple, level= logging.DEBUG)
+    logger = logging.getLogger('openvim')
+    logger.setLevel(logging.DEBUG)
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "hvc:p:P:", ["config=", "help", "version", "port=", "adminport=", "log-file=", "dbname="])
+    except getopt.GetoptError, err:
+        # print help information and exit:
+        logger.error("%s. Type -h for help", err) # will print something like "option -a not recognized"
+        #usage()
+        sys.exit(-2)
+
+    port=None
+    port_admin = None
+    config_file = 'openvimd.cfg'
+    log_file = None
+    db_name = None
+
+    for o, a in opts:
+        if o in ("-v", "--version"):
+            print "openvimd version", ovim.ovim.get_version(), ovim.ovim.get_version_date()
+            print "(c) Copyright Telefonica"
+            sys.exit(0)
+        elif o in ("-h", "--help"):
+            usage()
+            sys.exit(0)
+        elif o in ("-c", "--config"):
+            config_file = a
+        elif o in ("-p", "--port"):
+            port = a
+        elif o in ("-P", "--adminport"):
+            port_admin = a
+        elif o in ("-P", "--dbname"):
+            db_name = a
+        elif o == "--log-file":
+            log_file = a
+        else:
+            assert False, "Unhandled option"
+
+    
+    engine = None
+    http_thread = None
+    http_thread_admin = None
+
+    try:
+        #Load configuration file
+        r, config_dic = load_configuration(config_file)
+        #print config_dic
+        if not r:
+            logger.error(config_dic)
+            config_dic={}
+            exit(-1)
+        if log_file:
+            try:
+                file_handler= logging.handlers.RotatingFileHandler(log_file, maxBytes=100e6, backupCount=9, delay=0)
+                file_handler.setFormatter(log_formatter_simple)
+                logger.addHandler(file_handler)
+                #logger.debug("moving logs to '%s'", global_config["log_file"])
+                #remove initial stream handler
+                logging.root.removeHandler(logging.root.handlers[0])
+                print ("logging on '{}'".format(log_file))
+            except IOError as e:
+                raise LoadConfigurationException("Cannot open logging file '{}': {}. Check folder exist and permissions".format(log_file, str(e)) ) 
+
+        logger.setLevel(getattr(logging, config_dic['log_level']))
+        logger.critical("Starting openvim server command: '%s'", sys.argv[0])
+        #override parameters obtained by command line
+        if port: 
+            config_dic['http_port'] = port
+        if port_admin:
+            config_dic['http_admin_port'] = port_admin
+        if db_name: 
+            config_dic['db_name'] = db_name
+        
+        #check mode
+        if 'mode' not in config_dic:
+            config_dic['mode'] = 'normal'
+            #allow backward compatibility of test_mode option
+            if 'test_mode' in config_dic and config_dic['test_mode']==True:
+                config_dic['mode'] = 'test' 
+        if config_dic['mode'] == 'development' and config_dic['network_type'] == 'bridge' and \
+                ( 'development_bridge' not in config_dic or config_dic['development_bridge'] not in config_dic.get("bridge_ifaces",None) ):
+            logger.error("'%s' is not a valid 'development_bridge', not one of the 'bridge_ifaces'", config_file)
+            exit(-1)
+
+        if config_dic['mode'] != 'normal':
+            print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
+            print "!! Warning, openvimd in TEST mode '%s'" % config_dic['mode']
+            print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
+        config_dic['version'] = ovim.ovim.get_version()
+        config_dic["logger_name"] = "openvim"
+
+        engine = ovim.ovim(config_dic)
+        engine.start_service()
+
+        
+    #Create thread to listen to web requests
+        http_thread = httpserver.httpserver(engine, 'http', config_dic['http_host'], config_dic['http_port'], False, config_dic)
+        http_thread.start()
+        
+        if 'http_admin_port' in config_dic:
+            engine2 = ovim.ovim(config_dic)
+            http_thread_admin = httpserver.httpserver(engine2, 'http-admin', config_dic['http_host'], config_dic['http_admin_port'], True)
+            http_thread_admin.start()
+        else:
+            http_thread_admin = None
+        time.sleep(1)      
+        logger.info('Waiting for http clients')
+        print ('openvimd ready')
+        print ('====================')
+        sys.stdout.flush()
+        
+        #TODO: Interactive console would be nice here instead of join or sleep
+        
+        r="help" #force print help at the beginning
+        while True:
+            if r=='exit':
+                break      
+            elif r!='':
+                print "type 'exit' for terminate"
+            r = raw_input('> ')
+
+    except (KeyboardInterrupt, SystemExit):
+        pass
+    except SystemExit:
+        pass
+    except getopt.GetoptError as e:
+        logger.critical(str(e)) # will print something like "option -a not recognized"
+        #usage()
+        exit(-1)
+    except LoadConfigurationException as e:
+        logger.critical(str(e))
+        exit(-1)
+    except ovim.ovimException as e:
+        logger.critical(str(e))
+        exit(-1)
+
+    logger.info('Exiting openvimd')
+    if engine:
+        engine.stop_service()
+    if http_thread:
+        http_thread.join(1)
+    if http_thread_admin:
+        http_thread_admin.join(1)
+
+    logger.debug( "bye!")
+    exit()
+
diff --git a/openvimd.cfg b/openvimd.cfg
deleted file mode 100644 (file)
index 9641938..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-
-
-#Miscellaneous
-#Option to test openvim without the needed infrastructure, possible values are
-#    "normal"      by default, Openflow controller (OFC), switch and real host are needed
-#    "test"        Used for testing http API and database without connecting to host or to OFC
-#    "host only"   Used when neither OFC nor OF switch are provided. 
-#                  Dataplane network connection must be done manually.
-#    "OF only"     Used for testing of new openflow controllers support. No real VM deployments will be done but
-#                  OFC will be used as in real mode
-#    "development" Forced a cloud-type deployment, nomal memory instead of hugepages is used, 
-#                  without cpu pinning, and using a bridge network instead of a real OFC dataplane networks.
-#                  The same 'development_bridge' (see below) is used for all dataplane networks
-mode: test
-
-#Openflow controller information
-of_controller:      floodlight                   # Type of controller to be used.
-                                                 # Valid controllers are 'opendaylight', 'floodlight' or <custom>
-#of_controller_module: module                    # Only needed for <custom>.  Python module that implement
-                                                 # this controller. By default a file with the name  <custom>.py is used 
-#of_<other>:           value                     # Other parameters required by <custom> controller. Consumed by __init__
-of_user:            user credentials             # User credentials for the controller if needed
-of_password:        passwd credentials           # Password credentials for the controller if needed
-of_controller_ip:   127.0.0.1                    # IP address where the Openflow controller is listening
-of_controller_port: 7070                         # TCP port where the Openflow controller is listening (REST API server)
-of_controller_dpid: '00:01:02:03:04:05:06:07'    # Openflow Switch identifier (put here the right number)
-
-#This option is used for those openflow switch that cannot deliver one packet to several output with different vlan tags
-#When set to true, it fails when trying to attach different vlan tagged ports to the same net
-of_controller_nets_with_same_vlan: false         # (by default, true)
-
-#Server parameters
-http_host:       0.0.0.0             # IP address where openvim is listening (by default, localhost)
-http_port:       9080                # General port where openvim is listening (by default, 9080)
-http_admin_port: 9085                # Admin port where openvim is listening (when missing, no administration server is launched)
-
-#database parameters
-db_host:   localhost                   # by default localhost
-db_user:   vim                       # DB user
-db_passwd: vimpw                     # DB password
-db_name:   vim_db                    # Name of the VIM DB
-
-#host paremeters
-image_path: "/opt/VNF/images"        # Folder, same for every host, where the VNF images will be copied
-
-#testing parameters (used by ./test/test_openvim.py)
-tenant_id: fc7b43b6-6bfa-11e4-84d2-5254006d6777   # Default tenant identifier for testing
-
-#VLAN ranges used for the dataplane networks (ptp, data)
-#When a network is created an unused value in this range is used
-network_vlan_range_start: 3000
-network_vlan_range_end:   4000
-
-# Overlay network implementation. Options are:
-# - ovs :   (by default) Use a vlxan mesh between computes to handle the network overlay.
-# - bridge: Use pre-populated linux bridges with L2 conectivity at compte nodes.
-network_type : ovs
-ovs_controller_ip   :   localhost                   # dhcp controller IP address, must be change in order to
-ovs_controller_user :   "osm_dhcp"                  # User for the dchp controller for OVS networks
-ovs_controller_file_path  :   "/var/lib/openvim"    # Path for dhcp daemon configuration, by default '/var/lib/openvim'
-
-
-#host bridge interfaces for networks
-# Apply only for 'network_type: bridge'
-# Indicates the bridges at compute nodes to be used for the overlay networks
-# Bridge networks need to be pre-provisioned on each host and Openvim uses those pre-provisioned bridge networks.
-# Openvim assumes that the following bridge interfaces have been created on each host, appropriately associated to a physical port.
-# The following information needs to be provided:
-#    - Name of the bridge (identical in all hosts)
-#    - VLAN tag associated to each bridge interface
-#    - The speed of the physical port in Gbps, where that bridge interface was created
-# For instance, next example assumes that 10 bridges have been created on each host
-# using vlans 2001 to 2010, associated to a 1Gbps physical port 
-#bridge_ifaces:
-#   #name:      [vlan, speed in Gbps]
-#   virbrMan1:  [2001, 1]
-#   virbrMan2:  [2002, 1]
-#   virbrMan3:  [2003, 1]
-#   virbrMan4:  [2004, 1]
-#   virbrMan5:  [2005, 1]
-#   virbrMan6:  [2006, 1]
-#   virbrMan7:  [2007, 1]
-#   virbrMan8:  [2008, 1]
-#   virbrMan9:  [2009, 1]
-#   virbrMan10: [2010, 1]
-
-#Used only when 'mode' is at development'. Indicates which 'bridge_ifaces' is used for dataplane networks
-#development_bridge: virbrMan10
-
-#DHCP SERVER PARAMETERS. 
-#In case some of the previous 'bridge_ifaces' are connected to an EXTERNAL dhcp server, provide 
-#   the server parameters to allow openvim getting the allocated IP addresses of virtual machines
-#   connected to the indicated 'bridge_ifaces' and or 'nets'. Openvim will connect to the dhcp server by ssh.
-#DHCP server must contain a shell script "get_dhcp_lease.sh" included in the path, that accepts a mac address as 
-#   parameter and return empty or the allocated IP address. See an example at the end of the file 
-#   ./openvim/dhcp_thread.py 
-#COMMENT all lines in case you do not have a DHCP server in 'normal', 'development'  or 'host only' modes.
-#   For 'test' or 'OF only' modes you can leave then uncommented, because in these modes fake IP 
-#   address are generated instead of connecting with a real DHCP server.
-dhcp_server:
-   host:     host-ip-or-name  
-   #port:     22               #ssh port, by default 22
-   provider: isc-dhcp-server  #dhcp-server type
-   user:     user
-   #provide password, or key if needed
-   password: passwd           
-   #key:     ssh-access-key
-   #list of the previous bridge interfaces attached to this dhcp server
-   bridge_ifaces:   [ virbrMan1, virbrMan2 ] 
-   #list of the networks attached to this dhcp server
-   nets: [default]
-
-
-#logging parameters       # DEBUG, INFO, WARNING, ERROR, CRITICAL
-log_level:       ERROR
-log_level_db:    DEBUG
-log_level_of:    DEBUG
-
-
diff --git a/openvimd.py b/openvimd.py
deleted file mode 100755 (executable)
index b9d9d82..0000000
+++ /dev/null
@@ -1,278 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-This is the main program of openvim, it reads the configuration 
-and launches the rest of threads: http clients, openflow controller
-and host controllers  
-'''
-
-__author__ = "Alfonso Tierno"
-__date__ = "$10-jul-2014 12:07:15$"
-
-
-import httpserver
-import auxiliary_functions as af
-import sys
-import getopt
-import time
-import yaml
-import os
-from jsonschema import validate as js_v, exceptions as js_e
-from vim_schema import config_schema
-import logging
-import logging.handlers as log_handlers
-import socket
-import ovim
-
-global config_dic
-global logger
-logger = logging.getLogger('vim')
-
-class LoadConfigurationException(Exception):
-    pass
-
-def load_configuration(configuration_file):
-    default_tokens ={'http_port':9080, 'http_host':'localhost', 
-                     'of_controller_nets_with_same_vlan':True,
-                     'image_path':'/opt/VNF/images',
-                     'network_vlan_range_start':1000,
-                     'network_vlan_range_end': 4096,
-                     'log_level': "DEBUG",
-                     'log_level_db': "ERROR",
-                     'log_level_of': 'ERROR',
-                     'bridge_ifaces': {},
-                     'network_type': 'ovs',
-                     'ovs_controller_user': 'osm_dhcp',
-                     'ovs_controller_file_path': '/var/lib/',
-            }
-    try:
-        #First load configuration from configuration file
-        #Check config file exists
-        if not os.path.isfile(configuration_file):
-            return (False, "Configuration file '"+configuration_file+"' does not exists")
-            
-        #Read and parse file
-        (return_status, code) = af.read_file(configuration_file)
-        if not return_status:
-            return (return_status, "Error loading configuration file '"+configuration_file+"': "+code)
-        try:
-            config = yaml.load(code)
-        except yaml.YAMLError, exc:
-            error_pos = ""
-            if hasattr(exc, 'problem_mark'):
-                mark = exc.problem_mark
-                error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
-            return (False, "Error loading configuration file '"+configuration_file+"'"+error_pos+": content format error: Failed to parse yaml format")
-        
-        
-        try:
-            js_v(config, config_schema)
-        except js_e.ValidationError, exc:
-            error_pos = ""
-            if len(exc.path)>0: error_pos=" at '" + ":".join(map(str, exc.path))+"'"
-            return False, "Error loading configuration file '"+configuration_file+"'"+error_pos+": "+exc.message 
-        
-        
-        #Check default values tokens
-        for k,v in default_tokens.items():
-            if k not in config: config[k]=v
-        #Check vlan ranges
-        if config["network_vlan_range_start"]+10 >= config["network_vlan_range_end"]:
-            return False, "Error invalid network_vlan_range less than 10 elements"
-    
-    except Exception,e:
-        return (False, "Error loading configuration file '"+configuration_file+"': "+str(e))
-    return (True, config)
-
-def usage():
-    print "Usage: ", sys.argv[0], "[options]"
-    print "      -v|--version: prints current version"
-    print "      -c|--config FILE: loads the configuration file (default: openvimd.cfg)"
-    print "      -h|--help: shows this help"
-    print "      -p|--port PORT: changes port number and overrides the port number in the configuration file (default: 908)"
-    print "      -P|--adminport PORT: changes admin port number and overrides the port number in the configuration file (default: not listen)"
-    print "      --dbname NAME: changes db_name and overrides the db_name in the configuration file"
-    #print( "      --log-socket-host HOST: send logs to this host")
-    #print( "      --log-socket-port PORT: send logs using this port (default: 9022)")
-    print( "      --log-file FILE: send logs to this file")
-    return
-
-
-if __name__=="__main__":
-    hostname = socket.gethostname()
-    #streamformat = "%(levelname)s (%(module)s:%(lineno)d) %(message)s"
-    log_formatter_complete = logging.Formatter(
-        '%(asctime)s.%(msecs)03d00Z[{host}@openmanod] %(filename)s:%(lineno)s severity:%(levelname)s logger:%(name)s log:%(message)s'.format(host=hostname),
-        datefmt='%Y-%m-%dT%H:%M:%S',
-    )
-    log_format_simple =  "%(asctime)s %(levelname)s  %(name)s %(filename)s:%(lineno)s %(message)s"
-    log_formatter_simple = logging.Formatter(log_format_simple, datefmt='%Y-%m-%dT%H:%M:%S')
-    logging.basicConfig(format=log_format_simple, level= logging.DEBUG)
-    logger = logging.getLogger('openvim')
-    logger.setLevel(logging.DEBUG)
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "hvc:p:P:", ["config=", "help", "version", "port=", "adminport=", "log-file=", "dbname="])
-    except getopt.GetoptError, err:
-        # print help information and exit:
-        logger.error("%s. Type -h for help", err) # will print something like "option -a not recognized"
-        #usage()
-        sys.exit(-2)
-
-    port=None
-    port_admin = None
-    config_file = 'openvimd.cfg'
-    log_file = None
-    db_name = None
-
-    for o, a in opts:
-        if o in ("-v", "--version"):
-            print "openvimd version", ovim.ovim.get_version(), ovim.ovim.get_version_date()
-            print "(c) Copyright Telefonica"
-            sys.exit(0)
-        elif o in ("-h", "--help"):
-            usage()
-            sys.exit(0)
-        elif o in ("-c", "--config"):
-            config_file = a
-        elif o in ("-p", "--port"):
-            port = a
-        elif o in ("-P", "--adminport"):
-            port_admin = a
-        elif o in ("-P", "--dbname"):
-            db_name = a
-        elif o == "--log-file":
-            log_file = a
-        else:
-            assert False, "Unhandled option"
-
-    
-    engine = None
-    http_thread = None
-    http_thread_admin = None
-
-    try:
-        #Load configuration file
-        r, config_dic = load_configuration(config_file)
-        #print config_dic
-        if not r:
-            logger.error(config_dic)
-            config_dic={}
-            exit(-1)
-        if log_file:
-            try:
-                file_handler= logging.handlers.RotatingFileHandler(log_file, maxBytes=100e6, backupCount=9, delay=0)
-                file_handler.setFormatter(log_formatter_simple)
-                logger.addHandler(file_handler)
-                #logger.debug("moving logs to '%s'", global_config["log_file"])
-                #remove initial stream handler
-                logging.root.removeHandler(logging.root.handlers[0])
-                print ("logging on '{}'".format(log_file))
-            except IOError as e:
-                raise LoadConfigurationException("Cannot open logging file '{}': {}. Check folder exist and permissions".format(log_file, str(e)) ) 
-
-        logger.setLevel(getattr(logging, config_dic['log_level']))
-        logger.critical("Starting openvim server command: '%s'", sys.argv[0])
-        #override parameters obtained by command line
-        if port: 
-            config_dic['http_port'] = port
-        if port_admin:
-            config_dic['http_admin_port'] = port_admin
-        if db_name: 
-            config_dic['db_name'] = db_name
-        
-        #check mode
-        if 'mode' not in config_dic:
-            config_dic['mode'] = 'normal'
-            #allow backward compatibility of test_mode option
-            if 'test_mode' in config_dic and config_dic['test_mode']==True:
-                config_dic['mode'] = 'test' 
-        if config_dic['mode'] == 'development' and config_dic['network_type'] == 'bridge' and \
-                ( 'development_bridge' not in config_dic or config_dic['development_bridge'] not in config_dic.get("bridge_ifaces",None) ):
-            logger.error("'%s' is not a valid 'development_bridge', not one of the 'bridge_ifaces'", config_file)
-            exit(-1)
-
-        if config_dic['mode'] != 'normal':
-            print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
-            print "!! Warning, openvimd in TEST mode '%s'" % config_dic['mode']
-            print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
-        config_dic['version'] = ovim.ovim.get_version()
-        config_dic["logger_name"] = "openvim"
-
-        engine = ovim.ovim(config_dic)
-        engine.start_service()
-
-        
-    #Create thread to listen to web requests
-        http_thread = httpserver.httpserver(engine, 'http', config_dic['http_host'], config_dic['http_port'], False, config_dic)
-        http_thread.start()
-        
-        if 'http_admin_port' in config_dic:
-            engine2 = ovim.ovim(config_dic)
-            http_thread_admin = httpserver.httpserver(engine2, 'http-admin', config_dic['http_host'], config_dic['http_admin_port'], True)
-            http_thread_admin.start()
-        else:
-            http_thread_admin = None
-        time.sleep(1)      
-        logger.info('Waiting for http clients')
-        print ('openvimd ready')
-        print ('====================')
-        sys.stdout.flush()
-        
-        #TODO: Interactive console would be nice here instead of join or sleep
-        
-        r="help" #force print help at the beginning
-        while True:
-            if r=='exit':
-                break      
-            elif r!='':
-                print "type 'exit' for terminate"
-            r = raw_input('> ')
-
-    except (KeyboardInterrupt, SystemExit):
-        pass
-    except SystemExit:
-        pass
-    except getopt.GetoptError as e:
-        logger.critical(str(e)) # will print something like "option -a not recognized"
-        #usage()
-        exit(-1)
-    except LoadConfigurationException as e:
-        logger.critical(str(e))
-        exit(-1)
-    except ovim.ovimException as e:
-        logger.critical(str(e))
-        exit(-1)
-
-    logger.info('Exiting openvimd')
-    if engine:
-        engine.stop_service()
-    if http_thread:
-        http_thread.join(1)
-    if http_thread_admin:
-        http_thread_admin.join(1)
-
-    logger.debug( "bye!")
-    exit()
-
diff --git a/osm_openvim/ODL.py b/osm_openvim/ODL.py
new file mode 100644 (file)
index 0000000..588409e
--- /dev/null
@@ -0,0 +1,553 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+"""
+Implement the plugging for OpendayLight openflow controller
+It creates the class OF_conn to create dataplane connections
+with static rules based on packet destination MAC address
+"""
+
+__author__="Pablo Montes, Alfonso Tierno"
+__date__ ="$28-oct-2014 12:07:15$"
+
+
+import json
+import requests
+import base64
+import logging
+import openflow_conn
+
+
+class OF_conn(openflow_conn.OpenflowConn):
+    """OpenDayLight connector. No MAC learning is used"""
+
+    def __init__(self, params):
+        """ Constructor.
+            Params: dictionary with the following keys:
+                of_dpid:     DPID to use for this controller
+                of_ip:       controller IP address
+                of_port:     controller TCP port
+                of_user:     user credentials, can be missing or None
+                of_password: password credentials
+                of_debug:    debug level for logging. Default to ERROR
+                other keys are ignored
+            Raise an exception if same parameter is missing or wrong
+        """
+
+        # check params
+        if "of_ip" not in params or params["of_ip"]==None or "of_port" not in params or params["of_port"]==None:
+            raise ValueError("IP address and port must be provided")
+
+        openflow_conn.OpenflowConn.__init__(self, params)
+        # internal variables
+        self.name = "OpenDayLight"
+        self.headers = {'content-type': 'application/json', 'Accept': 'application/json'}
+        self.auth=None
+        self.pp2ofi={}  # From Physical Port to OpenFlow Index
+        self.ofi2pp={}  # From OpenFlow Index to Physical Port
+
+        self.dpid = str(params["of_dpid"])
+        self.id = 'openflow:'+str(int(self.dpid.replace(':', ''), 16))
+        self.url = "http://%s:%s" %( str(params["of_ip"]), str(params["of_port"] ) )
+        if "of_user" in params and params["of_user"]!=None:
+            if not params.get("of_password"):
+                of_password=""
+            else:
+                of_password=str(params["of_password"])
+            self.auth = base64.b64encode(str(params["of_user"])+":"+of_password)
+            self.headers['Authorization'] = 'Basic '+self.auth
+
+        self.logger = logging.getLogger('vim.OF.ODL')
+        self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR")) )
+
+    def get_of_switches(self):
+        """
+        Obtain a a list of switches or DPID detected by this controller
+        :return: list length, and a list where each element a tuple pair (DPID, IP address)
+                 Raise an OpenflowconnConnectionException exception if fails with text_error
+        """
+        try:
+            of_response = requests.get(self.url+"/restconf/operational/opendaylight-inventory:nodes",
+                                       headers=self.headers)
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+            if of_response.status_code != 200:
+                self.logger.warning("get_of_switches " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Error get_of_switches " + error_text)
+
+            self.logger.debug("get_of_switches " + error_text)
+            info = of_response.json()
+
+            if type(info) != dict:
+                self.logger.error("get_of_switches. Unexpected response, not a dict: %s", str(info))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, not a dict. Wrong version?")
+
+            nodes = info.get('nodes')
+            if type(nodes) is not dict:
+                self.logger.error("get_of_switches. Unexpected response at 'nodes', not found or not a dict: %s", str(type(info)))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'nodes', not found or "
+                                                                   "not a dict. Wrong version?")
+
+            node_list = nodes.get('node')
+            if type(node_list) is not list:
+                self.logger.error("get_of_switches. Unexpected response, at 'nodes':'node', "
+                                  "not found or not a list: %s", str(type(node_list)))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, at 'nodes':'node', not found "
+                                                                   "or not a list. Wrong version?")
+
+            switch_list=[]
+            for node in node_list:
+                node_id = node.get('id')
+                if node_id is None:
+                    self.logger.error("get_of_switches. Unexpected response at 'nodes':'node'[]:'id', not found: %s", str(node))
+                    raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'id', "
+                                                                       "not found . Wrong version?")
+
+                if node_id == 'controller-config':
+                    continue
+
+                node_ip_address = node.get('flow-node-inventory:ip-address')
+                if node_ip_address is None:
+                    self.logger.error("get_of_switches. Unexpected response at 'nodes':'node'[]:'flow-node-inventory:"
+                                      "ip-address', not found: %s", str(node))
+                    raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:"
+                                                                       "'flow-node-inventory:ip-address', "
+                                                                       "not found. Wrong version?")
+
+                node_id_hex=hex(int(node_id.split(':')[1])).split('x')[1].zfill(16)
+                switch_list.append( (':'.join(a+b for a,b in zip(node_id_hex[::2], node_id_hex[1::2])), node_ip_address))
+
+            return len(switch_list), switch_list
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("get_of_switches " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+        except ValueError as e:
+            # ValueError in the case that JSON can not be decoded
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("get_of_switches " + error_text)
+            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+    def obtain_port_correspondence(self):
+        """
+        Obtain the correspondence between physical and openflow port names
+        :return: dictionary: with physical name as key, openflow name as value,
+                 Raise a OpenflowconnConnectionException expection in case of failure
+        """
+        try:
+            of_response = requests.get(self.url+"/restconf/operational/opendaylight-inventory:nodes",
+                                       headers=self.headers)
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+            if of_response.status_code != 200:
+                self.logger.warning("obtain_port_correspondence " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+            self.logger.debug("obtain_port_correspondence " + error_text)
+            info = of_response.json()
+
+            if type(info) != dict:
+                self.logger.error("obtain_port_correspondence. Unexpected response not a dict: %s", str(info))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected openflow response, not a dict. "
+                                                                   "Wrong version?")
+
+            nodes = info.get('nodes')
+            if type(nodes) is not dict:
+                self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes', "
+                                  "not found or not a dict: %s", str(type(nodes)))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'nodes',not found or not a dict. Wrong version?")
+
+            node_list = nodes.get('node')
+            if type(node_list) is not list:
+                self.logger.error("obtain_port_correspondence. Unexpected response, at 'nodes':'node', "
+                                  "not found or not a list: %s", str(type(node_list)))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, at 'nodes':'node', "
+                                                                   "not found or not a list. Wrong version?")
+
+            for node in node_list:
+                node_id = node.get('id')
+                if node_id is None:
+                    self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:'id', "
+                                      "not found: %s", str(node))
+                    raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'id', "
+                                                                       "not found . Wrong version?")
+
+                if node_id == 'controller-config':
+                    continue
+
+                # Figure out if this is the appropriate switch. The 'id' is 'openflow:' plus the decimal value
+                # of the dpid
+                #  In case this is not the desired switch, continue
+                if self.id != node_id:
+                    continue
+
+                node_connector_list = node.get('node-connector')
+                if type(node_connector_list) is not list:
+                    self.logger.error("obtain_port_correspondence. Unexpected response at "
+                                      "'nodes':'node'[]:'node-connector', not found or not a list: %s", str(node))
+                    raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:"
+                                                                       "'node-connector', not found  or not a list. "
+                                                                       "Wrong version?")
+
+                for node_connector in node_connector_list:
+                    self.pp2ofi[ str(node_connector['flow-node-inventory:name']) ] = str(node_connector['id'] )
+                    self.ofi2pp[ node_connector['id'] ] =  str(node_connector['flow-node-inventory:name'])
+
+                node_ip_address = node.get('flow-node-inventory:ip-address')
+                if node_ip_address is None:
+                    self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:"
+                                      "'flow-node-inventory:ip-address', not found: %s", str(node))
+                    raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:"
+                                                                       "'flow-node-inventory:ip-address', not found. Wrong version?")
+                self.ip_address = node_ip_address
+
+                # If we found the appropriate dpid no need to continue in the for loop
+                break
+
+            # print self.name, ": obtain_port_correspondence ports:", self.pp2ofi
+            return self.pp2ofi
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("obtain_port_correspondence " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+        except ValueError as e:
+            # ValueError in the case that JSON can not be decoded
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("obtain_port_correspondence " + error_text)
+            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+    def get_of_rules(self, translate_of_ports=True):
+        """
+        Obtain the rules inserted at openflow controller
+        :param translate_of_ports:
+        :return: dict if ok: with the rule name as key and value is another dictionary with the following content:
+                    priority: rule priority
+                    name:         rule name (present also as the master dict key)
+                    ingress_port: match input port of the rule
+                    dst_mac:      match destination mac address of the rule, can be missing or None if not apply
+                    vlan_id:      match vlan tag of the rule, can be missing or None if not apply
+                    actions:      list of actions, composed by a pair tuples:
+                        (vlan, None/int): for stripping/setting a vlan tag
+                        (out, port):      send to this port
+                    switch:       DPID, all
+                    Raise a OpenflowconnConnectionException expection in case of failure
+
+        """
+
+        try:
+            # get rules
+            if len(self.ofi2pp) == 0:
+                self.obtain_port_correspondence()
+
+            of_response = requests.get(self.url+"/restconf/config/opendaylight-inventory:nodes/node/" + self.id +
+                                          "/table/0", headers=self.headers)
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+
+            # The configured page does not exist if there are no rules installed. In that case we return an empty dict
+            if of_response.status_code == 404:
+                return {}
+
+            elif of_response.status_code != 200:
+                self.logger.warning("get_of_rules " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+            self.logger.debug("get_of_rules " + error_text)
+
+            info = of_response.json()
+
+            if type(info) != dict:
+                self.logger.error("get_of_rules. Unexpected response not a dict: %s", str(info))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected openflow response, not a dict. "
+                                                                   "Wrong version?")
+
+            table = info.get('flow-node-inventory:table')
+            if type(table) is not list:
+                self.logger.error("get_of_rules. Unexpected response at 'flow-node-inventory:table', "
+                                  "not a list: %s", str(type(table)))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'flow-node-inventory:table',"
+                                                                   " not a list. Wrong version?")
+
+            flow_list = table[0].get('flow')
+            if flow_list is None:
+                return {}
+
+            if type(flow_list) is not list:
+                self.logger.error("get_of_rules. Unexpected response at 'flow-node-inventory:table'[0]:'flow', not a list: %s", str(type(flow_list)))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'flow-node-inventory:"
+                                                                   "table'[0]:'flow', not a list. Wrong version?")
+
+            # TODO translate ports according to translate_of_ports parameter
+
+            rules = dict()
+            for flow in flow_list:
+                if not ('id' in flow and 'match' in flow and 'instructions' in flow and
+                                'instruction' in flow['instructions'] and
+                                'apply-actions' in flow['instructions']['instruction'][0] and
+                                'action' in flow['instructions']['instruction'][0]['apply-actions']):
+                    raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow response, one or more "
+                                                                       "elements are missing. Wrong version?")
+
+                flow['instructions']['instruction'][0]['apply-actions']['action']
+
+                rule = dict()
+                rule['switch'] = self.dpid
+                rule['priority'] = flow.get('priority')
+                # rule['name'] = flow['id']
+                # rule['cookie'] = flow['cookie']
+                if 'in-port' in flow['match']:
+                    in_port = flow['match']['in-port']
+                    if not in_port in self.ofi2pp:
+                        raise openflow_conn.OpenflowconnUnexpectedResponse("Error: Ingress port " + in_port +
+                                                                           " is not in switch port list")
+
+                    if translate_of_ports:
+                        in_port = self.ofi2pp[in_port]
+
+                    rule['ingress_port'] = in_port
+
+                    if 'vlan-match' in flow['match'] and 'vlan-id' in flow['match']['vlan-match'] and \
+                                'vlan-id' in flow['match']['vlan-match']['vlan-id'] and \
+                                'vlan-id-present' in flow['match']['vlan-match']['vlan-id'] and \
+                                flow['match']['vlan-match']['vlan-id']['vlan-id-present'] == True:
+                        rule['vlan_id'] = flow['match']['vlan-match']['vlan-id']['vlan-id']
+
+                    if 'ethernet-match' in flow['match'] and 'ethernet-destination' in flow['match']['ethernet-match'] and \
+                        'address' in flow['match']['ethernet-match']['ethernet-destination']:
+                        rule['dst_mac'] = flow['match']['ethernet-match']['ethernet-destination']['address']
+
+                instructions=flow['instructions']['instruction'][0]['apply-actions']['action']
+
+                max_index=0
+                for instruction in instructions:
+                    if instruction['order'] > max_index:
+                        max_index = instruction['order']
+
+                actions=[None]*(max_index+1)
+                for instruction in instructions:
+                    if 'output-action' in instruction:
+                        if not 'output-node-connector' in instruction['output-action']:
+                            raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow response, one or "
+                                                                               "more elementa are missing. "
+                                                                               "Wrong version?")
+
+                        out_port = instruction['output-action']['output-node-connector']
+                        if not out_port in self.ofi2pp:
+                            raise openflow_conn.OpenflowconnUnexpectedResponse("Error: Output port " + out_port +
+                                                                               " is not in switch port list")
+
+                        if translate_of_ports:
+                            out_port = self.ofi2pp[out_port]
+
+                        actions[instruction['order']] = ('out',out_port)
+
+                    elif 'strip-vlan-action' in instruction:
+                        actions[instruction['order']] = ('vlan', None)
+
+                    elif 'set-field' in instruction:
+                        if not ('vlan-match' in instruction['set-field'] and 'vlan-id' in  instruction['set-field']['vlan-match'] and 'vlan-id' in instruction['set-field']['vlan-match']['vlan-id']):
+                            raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow response, one or "
+                                                                               "more elements are missing. "
+                                                                               "Wrong version?")
+
+                        actions[instruction['order']] = ('vlan', instruction['set-field']['vlan-match']['vlan-id']['vlan-id'])
+
+                actions = [x for x in actions if x != None]
+
+                rule['actions'] = list(actions)
+                rules[flow['id']] = dict(rule)
+
+                #flow['id']
+                #flow['priority']
+                #flow['cookie']
+                #flow['match']['in-port']
+                #flow['match']['vlan-match']['vlan-id']['vlan-id']
+                # match -> in-port
+                #      -> vlan-match -> vlan-id -> vlan-id
+                #flow['match']['vlan-match']['vlan-id']['vlan-id-present']
+                #TODO we asume that is not using rules with vlan-id-present:false
+                #instructions -> instruction -> apply-actions -> action
+                #instructions=flow['instructions']['instruction'][0]['apply-actions']['action']
+                #Es una lista. Posibles elementos:
+                #max_index=0
+                #for instruction in instructions:
+                #  if instruction['order'] > max_index:
+                #    max_index = instruction['order']
+                #actions=[None]*(max_index+1)
+                #for instruction in instructions:
+                #   if 'output-action' in instruction:
+                #     actions[instruction['order']] = ('out',instruction['output-action']['output-node-connector'])
+                #   elif 'strip-vlan-action' in instruction:
+                #     actions[instruction['order']] = ('vlan', None)
+                #   elif 'set-field' in instruction:
+                #     actions[instruction['order']] = ('vlan', instruction['set-field']['vlan-match']['vlan-id']['vlan-id'])
+                #
+                #actions = [x for x in actions if x != None]
+                #                                                       -> output-action -> output-node-connector
+                #                                                       -> pop-vlan-action
+            return rules
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("get_of_rules " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+        except ValueError as e:
+            # ValueError in the case that JSON can not be decoded
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("get_of_rules " + error_text)
+            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+    def del_flow(self, flow_name):
+        """
+        Delete an existing rule
+        :param flow_name: flow_name, this is the rule name
+        :return: Raise a OpenflowconnConnectionException expection in case of failure
+        """
+
+        try:
+            of_response = requests.delete(self.url+"/restconf/config/opendaylight-inventory:nodes/node/" + self.id +
+                                          "/table/0/flow/"+flow_name, headers=self.headers)
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+            if of_response.status_code != 200:
+                self.logger.warning("del_flow " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+            self.logger.debug("del_flow OK " + error_text)
+            return None
+        except requests.exceptions.RequestException as e:
+            # raise an exception in case of contection error
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("del_flow " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+
+    def new_flow(self, data):
+        """
+        Insert a new static rule
+        :param data: dictionary with the following content:
+                priority:     rule priority
+                name:         rule name
+                ingress_port: match input port of the rule
+                dst_mac:      match destination mac address of the rule, missing or None if not apply
+                vlan_id:      match vlan tag of the rule, missing or None if not apply
+                actions:      list of actions, composed by a pair tuples with these posibilities:
+                    ('vlan', None/int): for stripping/setting a vlan tag
+                    ('out', port):      send to this port
+        :return: Raise a OpenflowconnConnectionException expection in case of failure
+        """
+
+        try:
+
+            if len(self.pp2ofi) == 0:
+                self.obtain_port_correspondence()
+
+            # We have to build the data for the opendaylight call from the generic data
+            sdata = dict()
+            sdata['flow-node-inventory:flow'] = list()
+            sdata['flow-node-inventory:flow'].append(dict())
+            flow = sdata['flow-node-inventory:flow'][0]
+            flow['id'] = data['name']
+            flow['flow-name'] = data['name']
+            flow['idle-timeout'] = 0
+            flow['hard-timeout'] = 0
+            flow['table_id'] = 0
+            flow['priority'] = data.get('priority')
+            flow['match'] = dict()
+            if not data['ingress_port'] in self.pp2ofi:
+                error_text = 'Error. Port '+data['ingress_port']+' is not present in the switch'
+                self.logger.warning("new_flow " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+            flow['match']['in-port'] = self.pp2ofi[data['ingress_port']]
+            if 'dst_mac' in data:
+                flow['match']['ethernet-match'] = dict()
+                flow['match']['ethernet-match']['ethernet-destination'] = dict()
+                flow['match']['ethernet-match']['ethernet-destination']['address'] = data['dst_mac']
+            if data.get('vlan_id'):
+                flow['match']['vlan-match'] = dict()
+                flow['match']['vlan-match']['vlan-id'] = dict()
+                flow['match']['vlan-match']['vlan-id']['vlan-id-present'] = True
+                flow['match']['vlan-match']['vlan-id']['vlan-id'] = int(data['vlan_id'])
+            flow['instructions'] = dict()
+            flow['instructions']['instruction'] = list()
+            flow['instructions']['instruction'].append(dict())
+            flow['instructions']['instruction'][0]['order'] = 1
+            flow['instructions']['instruction'][0]['apply-actions'] = dict()
+            flow['instructions']['instruction'][0]['apply-actions']['action'] = list()
+            actions = flow['instructions']['instruction'][0]['apply-actions']['action']
+
+            order = 0
+            for action in data['actions']:
+                new_action = { 'order': order }
+                if  action[0] == "vlan":
+                    if action[1] == None:
+                        # strip vlan
+                        new_action['strip-vlan-action'] = dict()
+                    else:
+                        new_action['set-field'] = dict()
+                        new_action['set-field']['vlan-match'] = dict()
+                        new_action['set-field']['vlan-match']['vlan-id'] = dict()
+                        new_action['set-field']['vlan-match']['vlan-id']['vlan-id-present'] = True
+                        new_action['set-field']['vlan-match']['vlan-id']['vlan-id'] = int(action[1])
+                elif action[0] == 'out':
+                    new_action['output-action'] = dict()
+                    if not action[1] in self.pp2ofi:
+                        error_msj = 'Port '+action[1]+' is not present in the switch'
+                        raise openflow_conn.OpenflowconnUnexpectedResponse(error_msj)
+
+                    new_action['output-action']['output-node-connector'] = self.pp2ofi[ action[1] ]
+                else:
+                    error_msj = "Unknown item '%s' in action list" % action[0]
+                    self.logger.error("new_flow " + error_msj)
+                    raise openflow_conn.OpenflowconnUnexpectedResponse(error_msj)
+
+                actions.append(new_action)
+                order += 1
+
+            # print json.dumps(sdata)
+            of_response = requests.put(self.url+"/restconf/config/opendaylight-inventory:nodes/node/" + self.id +
+                          "/table/0/flow/" + data['name'],
+                                headers=self.headers, data=json.dumps(sdata) )
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+            if of_response.status_code != 200:
+                self.logger.warning("new_flow " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+            self.logger.debug("new_flow OK " + error_text)
+            return None
+
+        except requests.exceptions.RequestException as e:
+            # raise an exception in case of contection error
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("new_flow " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+
+    def clear_all_flows(self):
+        """
+        Delete all existing rules
+        :return: Raise a OpenflowconnConnectionException expection in case of failure
+        """
+        try:
+            of_response = requests.delete(self.url+"/restconf/config/opendaylight-inventory:nodes/node/" + self.id +
+                                      "/table/0", headers=self.headers)
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+            if of_response.status_code != 200 and of_response.status_code != 404: #HTTP_Not_Found
+                self.logger.warning("clear_all_flows " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+            self.logger.debug("clear_all_flows OK " + error_text)
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("clear_all_flows " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
diff --git a/osm_openvim/RADclass.py b/osm_openvim/RADclass.py
new file mode 100644 (file)
index 0000000..a4c10ec
--- /dev/null
@@ -0,0 +1,1618 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+Implement the logic for obtaining compute nodes information 
+Resource Availability Descriptor 
+'''
+__author__="Pablo Montes"
+
+#TODO: remove warnings, remove unused things 
+
+from definitionsClass import definitionsClass
+from auxiliary_functions import get_ssh_connection
+import libvirt
+from xml.etree import ElementTree
+import paramiko 
+import re
+import yaml
+
+
+def getCredentials(creds, data):
+    """Used as a backup for libvirt.openAuth in order to provide password that came with data,
+    not used by the moment
+    """
+    print "RADclass:getCredentials", creds, data
+    for cred in creds:
+        print cred[1] + ": ",
+        if cred[0] == libvirt.VIR_CRED_AUTHNAME:
+            cred[4] = data
+        elif cred[0] == libvirt.VIR_CRED_PASSPHRASE:
+            cred[4] = data
+        else:
+            return -1
+    return 0
+
+class RADclass():
+    def __init__(self):
+        self.name = None
+        self.machine = None
+        self.user = None
+        self.password = None
+        self.nodes = dict()                 #Dictionary of nodes. Keys are the node id, values are Node() elements
+        self.nr_processors = None           #Integer. Number of processors in the system 
+        self.processor_family = None        #If all nodes have the same value equal them, otherwise keep as None
+        self.processor_manufacturer = None  #If all nodes have the same value equal them, otherwise keep as None
+        self.processor_version = None       #If all nodes have the same value equal them, otherwise keep as None
+        self.processor_features = None      #If all nodes have the same value equal them, otherwise keep as None
+        self.memory_type = None             #If all nodes have the same value equal them, otherwise keep as None
+        self.memory_freq = None             #If all nodes have the same value equal them, otherwise keep as None
+        self.memory_nr_channels = None      #If all nodes have the same value equal them, otherwise keep as None
+        self.memory_size = None             #Integer. Sum of the memory in all nodes
+        self.memory_hugepage_sz = None
+        self.hypervisor = Hypervisor()      #Hypervisor information
+        self.os = OpSys()                   #Operating system information
+        self.ports_list = list()            #List containing all network ports in the node. This is used to avoid having defined multiple times the same port in the system
+    
+    
+    def obtain_RAD(self, user, password, machine):
+        """This function obtains the RAD information from the remote server.
+        It uses both a ssh and a libvirt connection. 
+        It is desirable in future versions get rid of the ssh connection, but currently 
+        libvirt does not provide all the needed information. 
+        Returns (True, Warning) in case of success and (False, <error>) in case of error"""
+        warning_text=""
+        try:
+            #Get virsh and ssh connection
+            (return_status, code) = get_ssh_connection(machine, user, password)
+            if not return_status:
+                print 'RADclass.obtain_RAD() error:', code
+                return (return_status, code)
+            ssh_conn = code
+            
+            self.connection_IP = machine
+            #print "libvirt open pre"
+            virsh_conn=libvirt.open("qemu+ssh://"+user+'@'+machine+"/system")
+            #virsh_conn=libvirt.openAuth("qemu+ssh://"+user+'@'+machine+"/system", 
+            #        [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE, libvirt.VIR_CRED_USERNAME], getCredentials, password],
+            #        0)
+            #print "libvirt open after"
+            
+    #         #Set connection infomation
+    #         (return_status, code) = self.set_connection_info(machine, user, password)
+    #         if not return_status:
+    #             return (return_status, 'Error in '+machine+': '+code)
+            
+            #Set server name
+            machine_name = get_hostname(virsh_conn)
+            (return_status, code) = self.set_name(machine_name)
+            if not return_status:
+                return (return_status, 'Error at self.set_name in '+machine+': '+code)
+            warning_text += code
+            
+            #Get the server processors information
+            processors = dict()
+            (return_status, code) = get_processor_information(ssh_conn, virsh_conn, processors)
+            if not return_status:
+                return (return_status, 'Error at get_processor_information in '+machine+': '+code)
+            warning_text += code
+            
+            #Get the server memory information
+            memory_nodes = dict()
+            (return_status, code) = get_memory_information(ssh_conn, virsh_conn, memory_nodes)
+            if not return_status:
+                return (return_status, 'Error at get_memory_information in '+machine+': '+code)
+            warning_text += code
+            
+            #Get nics information
+            nic_topology = dict()
+    #         (return_status, code) = get_nic_information_old(ssh_conn, nic_topology)
+            (return_status, code) = get_nic_information(ssh_conn, virsh_conn, nic_topology)
+            if not return_status:
+                return (return_status, 'Error at get_nic_information in '+machine+': '+code)
+            warning_text += code
+            
+            #Pack each processor, memory node  and nics in a node element
+            #and add the node to the RAD element
+            for socket_id, processor in processors.iteritems():
+                node = Node()
+                if not socket_id in nic_topology:
+                    nic_topology[socket_id] = list()
+                    
+                (return_status, code) = node.set(processor, memory_nodes[socket_id], nic_topology[socket_id])
+    #             else:
+    #                 (return_status, code) = node.set(processor, memory_nodes[socket_id])
+                if not return_status:
+                    return (return_status, 'Error at node.set in '+machine+': '+code)
+                warning_text += code
+                (return_status, code) = self.insert_node(node)
+                if not return_status:
+                    return (return_status, 'Error at self.insert_node in '+machine+': '+code)
+                if code not in warning_text:
+                    warning_text += code
+            
+            #Fill os data
+            os = OpSys()
+            (return_status, code) = get_os_information(ssh_conn, os)
+            if not return_status:
+                return (return_status, 'Error at get_os_information in '+machine+': '+code)
+            warning_text += code
+            (return_status, code) = self.set_os(os)
+            if not return_status:
+                return (return_status, 'Error at self.set_os in '+machine+': '+code)
+            warning_text += code
+            
+            #Fill hypervisor data
+            hypervisor = Hypervisor()
+            (return_status, code) = get_hypervisor_information(virsh_conn, hypervisor)
+            if not return_status:
+                return (return_status, 'Error at get_hypervisor_information in '+machine+': '+code)
+            warning_text += code
+            (return_status, code) = self.set_hypervisor(hypervisor)
+            if not return_status:
+                return (return_status, 'Error at self.set_hypervisor in '+machine+': '+code)
+            warning_text += code
+            ssh_conn.close()
+                
+            return (True, warning_text)
+        except libvirt.libvirtError, e:
+            text = e.get_error_message()
+            print 'RADclass.obtain_RAD() exception:', text
+            return (False, text)
+        except paramiko.ssh_exception.SSHException, e:
+            text = e.args[0]
+            print  "obtain_RAD ssh Exception:", text
+            return False, text
+
+    def set_name(self,name):
+        """Sets the machine name. 
+        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+        if not isinstance(name,str):
+            return (False, 'The variable \'name\' must be text')
+        self.name = name
+        return (True, "")
+    
+    def set_connection_info(self, machine, user, password):
+        """Sets the connection information. 
+        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+        if not isinstance(machine,str):
+            return (False, 'The variable \'machine\' must be text')
+        if not isinstance(user,str):
+            return (False, 'The variable \'user\' must be text')
+#         if not isinstance(password,str):
+#             return (False, 'The variable \'password\' must be text')
+        (self.machine, self.user, self.password) = (machine, user, password)
+        return (True, "")
+        
+    def insert_node(self,node):
+        """Inserts a new node and updates class variables. 
+        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+        if not isinstance(node,Node):
+            return (False, 'The variable \'node\' must be a Node element')
+        
+        if node.id_ in self.nodes:
+            return (False, 'The node is already present in the nodes list.')
+        
+        #Check if network ports have not been inserted previously as part of another node
+        for port_key in node.ports_list:
+            if port_key in self.ports_list:
+                return (False, 'Network port '+port_key+' defined multiple times in the system')
+            self.ports_list.append(port_key)
+        
+        #Insert the new node
+        self.nodes[node.id_] = node
+        
+        #update variables
+        self.update_variables()
+        
+        return (True, "")
+    
+    def update_variables(self):
+        """Updates class variables. 
+        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+        warning_text=""
+        #The number of processors and nodes is the same
+        self.nr_processors = len(self.nodes)
+        
+        #If all processors are the same get the values. Otherwise keep them as none
+        prev_processor_family = prev_processor_manufacturer = prev_processor_version = prev_processor_features = None
+        different_processor_family = different_processor_manufacturer = different_processor_version = different_processor_features = False
+        for node in self.nodes.itervalues():
+            (self.processor_family, self.processor_manufacturer, self.processor_version, self.processor_features) = node.get_processor_info()
+            if prev_processor_family != None and self.processor_family != prev_processor_family:
+                different_processor_family = True
+            if prev_processor_manufacturer != None and self.processor_manufacturer != prev_processor_manufacturer:
+                different_processor_manufacturer = True
+            if prev_processor_version != None and self.processor_version != prev_processor_version:
+                different_processor_version = True
+            if prev_processor_features != None and self.processor_features != prev_processor_features:
+                different_processor_features = True
+            (prev_processor_family, prev_processor_manufacturer, prev_processor_version, prev_processor_features) = (self.processor_family, self.processor_manufacturer, self.processor_version, self.processor_features)
+
+        if different_processor_family:
+            self.processor_family = None
+        if different_processor_features:
+            self.processor_features = None
+        if different_processor_manufacturer:
+            self.processor_manufacturer = None
+        if different_processor_version:
+            self.processor_version = None
+            
+        #If all memory nodes are the same get the values. Otherwise keep them as none
+        #Sum the total memory
+        self.memory_size = 0
+        different_memory_freq = different_memory_nr_channels = different_memory_type = different_memory_hugepage_sz = False
+        prev_memory_freq = prev_memory_nr_channels = prev_memory_type = prev_memory_hugepage_sz = None
+        for node in self.nodes.itervalues():
+            (self.memory_freq, self.memory_nr_channels, self.memory_type, memory_size, self.memory_hugepage_sz) = node.get_memory_info()
+            self.memory_size += memory_size 
+            if prev_memory_freq != None and self.memory_freq != prev_memory_freq:
+                different_memory_freq = True
+            if prev_memory_nr_channels != None and self.memory_nr_channels != prev_memory_nr_channels:
+                different_memory_nr_channels = True
+            if prev_memory_type != None and self.memory_type != prev_memory_type:
+                different_memory_type = True
+            if prev_memory_hugepage_sz != None and self.memory_hugepage_sz != prev_memory_hugepage_sz:
+                different_memory_hugepage_sz = True
+            (prev_memory_freq, prev_memory_nr_channels, prev_memory_type, prev_memory_hugepage_sz) = (self.memory_freq, self.memory_nr_channels, self.memory_type, self.memory_hugepage_sz)
+            
+        if different_memory_freq:
+            self.memory_freq = None
+        if different_memory_nr_channels:
+            self.memory_nr_channels = None
+        if different_memory_type:
+            self.memory_type = None
+        if different_memory_hugepage_sz:
+            warning_text += 'Detected different hugepages size in different sockets\n'
+            
+        return (True, warning_text)
+        
+    def set_hypervisor(self,hypervisor):
+        """Sets the hypervisor. 
+        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+        if not isinstance(hypervisor,Hypervisor):
+            return (False, 'The variable \'hypervisor\' must be of class Hypervisor')
+        
+        self.hypervisor.assign(hypervisor) 
+        return (True, "")
+    
+    def set_os(self,os):
+        """Sets the operating system. 
+        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+        if not isinstance(os,OpSys):
+            return (False, 'The variable \'os\' must be of class OpSys')
+        
+        self.os.assign(os)
+        return (True, "")
+    
+    def to_text(self):
+        text= 'name: '+str(self.name)+'\n'
+        text+= 'processor:\n'
+        text+= '    nr_processors: '+str(self.nr_processors)+'\n' 
+        text+= '    family: '+str(self.processor_family)+'\n'
+        text+= '    manufacturer: '+str(self.processor_manufacturer)+'\n'
+        text+= '    version: '+str(self.processor_version)+'\n'
+        text+= '    features: '+str(self.processor_features)+'\n'
+        text+= 'memory:\n'
+        text+= '    type: '+str(self.memory_type)+'\n'
+        text+= '    freq: '+str(self.memory_freq)+'\n'
+        text+= '    nr_channels: '+str(self.memory_nr_channels)+'\n'
+        text+= '    size: '+str(self.memory_size)+'\n'
+        text+= 'hypervisor:\n'
+        text+= self.hypervisor.to_text()
+        text+= 'os:\n'
+        text+= self.os.to_text()
+        text+= 'resource topology:\n'
+        text+= '    nr_nodes: '+ str(len(self.nodes))+'\n'
+        text+= '    nodes:\n'
+        for node_k, node_v in self.nodes.iteritems():
+            text+= '        node'+str(node_k)+':\n'
+            text+= node_v.to_text()
+        return text
+    
+    def to_yaml(self):
+        return yaml.load(self.to_text())
+    
+class Node():
+    def __init__(self):
+        self.id_ = None                      #Integer. Node id. Unique in the system
+        self.processor = ProcessorNode()    #Information about the processor in the node
+        self.memory = MemoryNode()          #Information about the memory in the node
+        self.nic_list = list()              #List of Nic() containing information about the nics associated to the node
+        self.ports_list = list()            #List containing all network ports in the node. This is used to avoid having defined multiple times the same port in the system
+        
+    def get_processor_info(self):
+        """Gets the processor information. Returns (processor_family, processor_manufacturer, processor_version, processor_features)"""
+        return self.processor.get_info()
+        
+    def get_memory_info(self):
+        """Gets the memory information. Returns (memory_freq, memory_nr_channels, memory_type, memory_size)"""
+        return self.memory.get_info()
+    
+#     def set(self, *args):
+#         """Sets the node information. Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+#         if len(args)==2:
+#             processor = args[0]
+#             memory = args[1]
+#             nics = False
+#         elif len(args)==3:
+#             processor = args[0]
+#             memory = args[1]
+#             nic_list = args[2]
+#             nics = True
+#         else:
+#             return (False, 'Wrong number of elements calling Node().set()')
+
+    def set(self, processor, memory, nic_list):
+        (status, return_code) = self.processor.assign(processor)
+        if not status:
+            return (status, return_code)
+        
+        self.id_ = processor.id_
+        
+        (status, return_code) = self.memory.assign(memory)
+        if not status:
+            return (status, return_code)
+        
+#         if nics:
+        for nic in nic_list:
+            if not isinstance(nic,Nic):
+                return (False, 'The nics must be of type Nic')
+            self.nic_list.append(nic)
+            for port_key in nic.ports.iterkeys():
+                if port_key in self.ports_list:
+                    return (False, 'Network port '+port_key+'defined multiple times in the same node')
+                self.ports_list.append(port_key)
+            
+        return (True,"")
+   
+    def assign(self, node):
+        """Sets the node information. 
+        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+        warning_text=""
+        processor = node.processor
+        memory = node.memory
+        nic_list = node.nic_list
+        (status, return_code) = self.processor.assign(processor)
+        if not status:
+            return (status, return_code)
+        
+        self.id_ = processor.id_
+        
+        (status, return_code) = self.memory.assign(memory)
+        if not status:
+            return (status, return_code)
+        warning_text += code
+        
+        for nic in nic_list:
+            if not isinstance(nic,Nic):
+                return (False, 'The nics must be of type Nic')
+            self.nic_list.append(nic)
+            for port_key in nic.ports.iterkeys():
+                if port_key in self.ports_list:
+                    return (False, 'Network port '+port_key+'defined multiple times in the same node')
+                self.ports_list.append(port_key)
+            
+        return (True,warning_text)
+   
+    def to_text(self):
+        text= '            id: '+str(self.id_)+'\n'
+        text+= '            cpu:\n'
+        text += self.processor.to_text()
+        text+= '            memory:\n'
+        text += self.memory.to_text()
+        if len(self.nic_list) > 0:
+            text+= '            nics:\n'
+            nic_index = 0
+            for nic in self.nic_list:
+                text+= '                nic '+str(nic_index)+':\n'
+                text += nic.to_text()
+                nic_index += 1
+        return text
+    
+class ProcessorNode():
+    #Definition of the possible values of processor variables
+    possible_features = definitionsClass.processor_possible_features
+    possible_manufacturers = definitionsClass.processor_possible_manufacturers
+    possible_families = definitionsClass.processor_possible_families
+    possible_versions = definitionsClass.processor_possible_versions
+    
+    def __init__(self):
+        self.id_ = None              #Integer. Numeric identifier of the socket
+        self.family = None          #Text. Family name of the processor
+        self.manufacturer = None    #Text. Manufacturer of the processor
+        self.version = None         #Text. Model version of the processor
+        self.features = list()      #list. List of features offered by the processor
+        self.cores = list()         #list. List of cores in the processor. In case of hyperthreading the coupled cores are expressed as [a,b]
+        self.eligible_cores = list()#list. List of cores that can be used
+        #self.decicated_cores
+        #self.shared_cores -> this should also contain information to know if cores are being used
+        
+    def assign(self, processor):
+        """Sets the processor information. 
+        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+        if not isinstance(processor,ProcessorNode):
+            return (False, 'The variable \'processor\' must be of class ProcessorNode')
+        
+        self.id_ = processor.id_
+        self.family = processor.family
+        self.manufacturer = processor.manufacturer
+        self.version = processor.version
+        self.features = processor.features
+        self.cores = processor.cores
+        self.eligible_cores = processor.eligible_cores
+        
+        return (True, "")
+    
+    def set(self, id_, family, manufacturer, version, features, cores):
+        """Sets the processor information. 
+        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+        warning_text = ""
+
+        if not isinstance(id_,int):
+            return (False, 'The processor id_ must be of type int')  
+        if not isinstance(family,str):
+            return (False, 'The processor family must be of type str')
+        if not isinstance(manufacturer,str):
+            return (False, 'The processor manufacturer must be of type str')
+        if not isinstance(version,str):
+            return (False, 'The processor version must be of type str')        
+        if not isinstance(features,list):
+            return (False, 'The processor features must be of type list')
+        if not isinstance(cores,list):
+            return (False, 'The processor cores must be of type list')
+        (self.id_, self.family, self.manufacturer, self.version) = (id_, family, manufacturer, version)
+        if not manufacturer in self.possible_manufacturers:
+            warning_text += "processor manufacturer '%s' not among: %s\n" %(manufacturer, str(self.possible_manufacturers))     
+        if not family in self.possible_families:
+            warning_text += "family '%s' not among: %s\n" % (family, str(self.possible_families))
+#        if not version in self.possible_versions:
+#            warning_text += 'The version %s is not one of these: %s\n' % (version, str(self.possible_versions))
+        
+        for feature in features:
+            if not feature in self.possible_features:
+                warning_text += "processor feature '%s' not among: %s\n" % (feature, str(self.possible_versions))
+            self.features.append(feature)
+        
+        for iterator in sorted(cores):
+            if not isinstance(iterator,list) or not all(isinstance(x, int) for x in iterator):
+                return (False, 'The cores list must be in the form of [[a,b],[c,d],...] where a,b,c,d are of type int')
+            self.cores.append(iterator)
+        
+        self.set_eligible_cores()
+        
+        return (True,warning_text)
+           
+    def set_eligible_cores(self):
+        """Set the default eligible cores, this is all cores non used by the host operating system"""
+        not_first = False
+        for iterator in self.cores:
+            if not_first:
+                self.eligible_cores.append(iterator)
+            else:
+                not_first = True                
+        return
+    
+    def get_info(self):
+        """Returns processor parameters (self.family, self.manufacturer, self.version, self.features)"""
+        return (self.family, self.manufacturer, self.version, self.features)
+    
+    def to_text(self):
+        text= '                id: '+str(self.id_)+'\n'
+        text+= '                family: '+self.family+'\n'
+        text+= '                manufacturer: '+self.manufacturer+'\n'
+        text+= '                version: '+self.version+'\n'
+        text+= '                features: '+str(self.features)+'\n'
+        text+= '                cores: '+str(self.cores)+'\n'
+        text+= '                eligible_cores: '+str(self.eligible_cores)+'\n'
+        return text
+    
+class MemoryNode():
+    def __init__(self):
+        self.modules = list()               #List of MemoryModule(). List of all modules installed in the node
+        self.nr_channels = None             #Integer. Number of modules installed in the node
+        self.node_size = None               #Integer. Total size in KiB of memory installed in the node
+        self.eligible_memory = None         #Integer. Size in KiB of eligible memory in the node     
+        self.hugepage_sz = None             #Integer. Size in KiB of hugepages
+        self.hugepage_nr = None             #Integer. Number of hugepages allocated in the module
+        self.eligible_hugepage_nr = None    #Integer. Number of eligible hugepages in the node
+        self.type_ = None                    #Text. Type of memory modules. If modules have a different value keep it as None
+        self.freq = None                    #Integer. Frequency of the modules in MHz. If modules have a different value keep it as None
+        self.module_size = None             #Integer. Size of the modules in KiB. If modules have a different value keep it as None
+        self.form_factor = None             #Text. Form factor of the modules. If modules have a different value keep it as None
+       
+    def assign(self, memory_node):
+        return self.set(memory_node.modules, memory_node.hugepage_sz, memory_node.hugepage_nr)
+         
+    def set(self, modules, hugepage_sz, hugepage_nr):
+        """Set the memory node information. hugepage_sz must be expressed in KiB. 
+        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+        if not isinstance(modules, list):
+            return (False, 'The modules must be a list of elements of class MemoryModule')
+        if not isinstance(hugepage_sz,int):
+            return (False, 'The hugepage_sz variable must be an int expressing the size in KiB')
+        if not isinstance(hugepage_nr,int):
+            return (False, 'The hugepage_nr variable must be of type int')
+        
+        (self.hugepage_sz, self.hugepage_nr) = (hugepage_sz, hugepage_nr)
+        self.node_size = self.nr_channels = 0
+        
+        different_type = different_freq = different_module_size = different_form_factor = False
+        prev_type = prev_freq = prev_module_size = prev_form_factor = None
+        for iterator in modules:
+            if not isinstance(iterator,MemoryModule):
+                return (False, 'The modules must be a list of elements of class MemoryModule')
+            self.modules.append(iterator)
+            (self.type_, self.freq, self.module_size, self.form_factor) = (iterator.type_, iterator.freq, iterator.size, iterator.form_factor)
+            self.node_size += self.module_size
+            self.nr_channels += 1
+            if prev_type != None and prev_type != self.type_:
+                different_type = True
+            if prev_freq != None and prev_freq != self.freq:
+                different_freq = True
+            if prev_module_size != None and prev_module_size != self.module_size:
+                different_module_size = True
+            if prev_form_factor != None and prev_form_factor != self.form_factor:
+                different_form_factor = True
+            (prev_type, prev_freq, prev_module_size, prev_form_factor) = (self.type_, self.freq, self.module_size, self.form_factor)
+        
+        if different_type:
+            self.type_ = None
+        if different_freq:
+            self.freq = None
+        if different_module_size:
+            self.module_size = None
+        if different_form_factor:
+            self.form_factor = None
+        
+        (return_value, error_code) = self.set_eligible_memory()
+        if not return_value:
+            return (return_value, error_code)
+        
+        return (True, "")
+    
+    def set_eligible_memory(self):
+        """Sets the default eligible_memory and eligible_hugepage_nr. This is all memory but 2GiB and all hugepages"""
+        self.eligible_memory = self.node_size - 2*1024*1024
+        if self.eligible_memory < 0:
+            return (False, "There is less than 2GiB of memory in the module")
+        
+        self.eligible_hugepage_nr = self.hugepage_nr 
+        return (True,"")
+    
+    def get_info(self):
+        """Return memory information (self.freq, self.nr_channels, self.type_, self.node_size)"""
+        return (self.freq, self.nr_channels, self.type_, self.node_size, self.hugepage_sz)
+        
+    def to_text(self):
+        text= '                node_size: '+str(self.node_size)+'\n'
+        text+= '                nr_channels: '+str(self.nr_channels)+'\n'
+        text+= '                eligible_memory: '+str(self.eligible_memory)+'\n'
+        text+= '                hugepage_sz: '+str(self.hugepage_sz)+'\n'
+        text+= '                hugepage_nr: '+str(self.hugepage_nr)+'\n'
+        text+= '                eligible_hugepage_nr: '+str(self.eligible_hugepage_nr)+'\n'
+        text+= '                type: '+self.type_+'\n'
+        text+= '                freq: '+str(self.freq)+'\n'
+        text+= '                module_size: '+str(self.module_size)+'\n'
+        text+= '                form_factor: '+self.form_factor+'\n'
+        text+= '                modules details:\n'
+        for module in self.modules:
+            text += module.to_text()
+        return text
+        
+class MemoryModule():
+    #Definition of the possible values of module variables
+    possible_types = definitionsClass.memory_possible_types
+    possible_form_factors = definitionsClass.memory_possible_form_factors
+    
+    def __init__(self):
+        self.locator = None     #Text. Name of the memory module
+        self.type_ = None        #Text. Type of memory module
+        self.freq = None        #Integer. Frequency of the module in MHz
+        self.size = None        #Integer. Size of the module in KiB
+        self.form_factor = None #Text. Form factor of the module
+        
+    def set(self, locator, type_, freq, size, form_factor):
+        """Sets the memory module information. 
+        Frequency must be expressed in MHz and size in KiB.
+        Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+        warning_text=""
+        if not isinstance(locator, str):
+            return (False, "The type of the variable locator must be str")
+        if not isinstance(type_, str):
+            return (False, "The type of the variable type_ must be str")
+        if not isinstance(form_factor, str):
+            return (False, "The type of the variable form_factor must be str")
+        if not isinstance(freq, int):
+            return (False, "The type of the variable freq must be int")
+        if not isinstance(size, int):
+            return (False, "The type of the variable size must be int")
+        
+        if not form_factor in self.possible_form_factors:
+            warning_text += "memory form_factor '%s' not among: %s\n" %(form_factor, str(self.possible_form_factors))
+        if not type_ in self.possible_types:
+            warning_text += "memory type '%s' not among: %s\n" %(type_, str(self.possible_types))
+        
+        (self.locator, self.type_, self.freq, self.size, self.form_factor) = (locator, type_, freq, size, form_factor)
+        return (True, warning_text)   
+    
+    def to_text(self):
+        text= '                    '+self.locator+':\n'
+        text+= '                        type: '+self.type_+'\n'
+        text+= '                        freq: '+str(self.freq)+'\n'
+        text+= '                        size: '+str(self.size)+'\n'
+        text+= '                        form factor: '+self.form_factor+'\n'
+        return text
+         
+class Nic():
+    def __init__(self):
+        self.model = None       #Text. Model of the nic
+        self.ports = dict()     #Dictionary of ports. Keys are the port name, value are Port() elements
+    
+    def set_model(self, model):
+        """Sets the model of the nic. Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+        if not isinstance(model,str):
+            return (False, 'The \'model\' must be of type str')
+           
+        self.model = model
+        return (True, "")
+   
+    def add_port(self, port):
+        """Adds a port to the nic. Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+        if not isinstance(port,Port):
+            return (False, 'The \'port\' must be of class Port')
+       
+#        port_id = str(port.pci_device_id[0])+':'+str(port.pci_device_id[1])+':'+str(port.pci_device_id[2])+'.'+str(port.pci_device_id[3])
+#CHANGED
+#        port_id = port.name
+        port_id = port.pci_device_id
+#CHANGED END 
+        if port_id in self.ports:
+            return (False, 'The \'port\' '+port.pci_device_id+' is duplicated in the nic')
+#             return (False, 'The \'port\' is duplicated in the nic')
+       
+        self.ports[port_id] = port
+        return (True, "")
+   
+    def to_text(self):
+        text= '                    model: '+ str(self.model)+'\n'
+        text+= '                    ports: '+'\n'
+        for key,port in self.ports.iteritems():
+            text+= '                        "'+key+'":'+'\n'
+            text += port.to_text()
+        return text
+               
+class Port():
+    def __init__(self):
+        self.name = None            #Text. Port name
+        self.virtual = None         #Boolean. States if the port is a virtual function
+        self.enabled = None         #Boolean. States if the port is enabled
+        self.eligible = None        #Boolean. States if the port is eligible
+        self.speed = None           #Integer. Indicates the speed in Mbps
+        self.available_bw = None    #Integer. BW in Mbps that is available.
+        self.mac = None             #list. Indicates the mac address of the port as a list in format ['XX','XX','XX','XX','XX','XX']
+        self.pci_device_id_split = None   #list. Indicates the pci address  of the port as a list in format ['XXXX','XX','XX','X']
+        self.pci_device_id = None
+        self.PF_pci_device_id = None
+        
+#     def set(self, name, virtual, enabled, speed, mac, pci_device_id, pci_device_id_split):
+#         """Sets the port information. The variable speed indicates the speed in Mbps. Returns (True,Warning) in case of success and ('False',<error description>) in case of error"""
+#         if not isinstance(name,str):
+#             return (False, 'The variable \'name\' must be of type str')
+#         if not isinstance(virtual,bool):
+#             return (False, 'The variable \'virtual\' must be of type bool')
+#         if not isinstance(enabled,bool):
+#             return (False, 'The variable \'enabled\' must be of type bool')
+#         if not isinstance(enabled,bool):
+#             return (speed, 'The variable \'speed\' must be of type int')
+#         if not isinstance(mac, list) and not isinstance(mac,NoneType):
+#             return (False, 'The variable \'enabled\' must be of type list indicating the mac address in format [\'XXXX\',\'XX\',\'XX\',\'X\'] or NoneType')
+#         if not isinstance(pci_device_id_split, list) or len(pci_device_id_split) != 4: 
+#             return (False, 'The variable \'pci_device_id_split\' must be of type list, indicating the pci address in format [\'XX\',\'XX\',\'XX\',\'XX\',\'XX\',\'XX\']')
+#         
+#         expected_len = [4,2,2,1]
+#         index = 0
+#         for iterator in pci_device_id_split:
+#             if not isinstance(iterator,str) or not iterator.isdigit() or len(iterator) != expected_len[index]:
+#                 return (False, 'The variable \'pci_device_id_split\' must be of type list, indicating the pci address in format [\'XX\',\'XX\',\'XX\',\'XX\',\'XX\',\'XX\']')
+#             index += 1
+#             
+#         if not isinstance(mac,NoneType):
+#             for iterator in mac:
+#                 if not isinstance(iterator,str) or not iterator.isalnum() or len(iterator) != 2:
+#                     return (False, 'The variable \'enabled\' must be of type list indicating the mac address in format [\'XXXX\',\'XX\',\'XX\',\'X\'] or NoneType')
+#         
+#         #By default only virtual ports are eligible
+# #         (self.name, self.virtual, self.enabled, self.eligible, self.available_bw, self.speed, self.mac, self.pci_device_id, self.pci_device_id_split) = (name, virtual, enabled, virtual, speed, speed, mac, pci_device_id, pci_device_id_split)
+#         (self.name, self.virtual, self.enabled, self.eligible, self.available_bw, self.mac, self.pci_device_id, self.pci_device_id_split) = (name, virtual, enabled, virtual, speed, mac, pci_device_id, pci_device_id_split)
+
+    def to_text(self):
+        text= '                            pci: "'+ str(self.pci_device_id)+'"\n'
+        text+= '                            virtual: '+ str(self.virtual)+'\n'
+        if self.virtual:
+            text+= '                            PF_pci_id: "'+self.PF_pci_device_id+'"\n'
+        text+= '                            eligible: '+ str(self.eligible)+'\n'
+        text+= '                            enabled: '+str(self.enabled)+'\n'
+        text+= '                            speed: '+ str(self.speed)+'\n'
+        text+= '                            available bw: '+ str(self.available_bw)+'\n'
+        text+= '                            mac: '+ str(self.mac)+'\n'
+        text+= '                            source_name: '+ str(self.name)+'\n'
+        return text
+    
+class Hypervisor():
+    #Definition of the possible values of hypervisor variables
+    possible_types = definitionsClass.hypervisor_possible_types
+    possible_domain_types = definitionsClass.hypervisor_possible_domain_types
+
+    def __init__(self):
+        self.type_ = None            #Text. Hypervisor type_
+        self.version = None         #int. Hypervisor version
+        self.lib_version = None     #int. Libvirt version used to compile hypervisor
+        self.domains = list()       #list. List of all the available domains
+        
+    def set(self, hypervisor, version, lib_version, domains):
+        warning_text=""
+        if not isinstance(hypervisor,str):
+            return (False, 'The variable type_ must be of type str')
+        if not isinstance(version,int):
+            return (False, 'The variable version must be of type int')
+        if not isinstance(lib_version,int):
+            return (False, 'The library version must be of type int')
+        if not isinstance(domains,list):
+            return (False, 'Domains must be a list of the possible domains as str')
+        
+        if not hypervisor in self.possible_types:
+            warning_text += "Hyperpivor '%s' not among: %s\n" % (hypervisor, str(self.possible_types))
+        
+        valid_domain_found = False
+        for domain in domains:
+            if not isinstance(domain,str):
+                return (False, 'Domains must be a list of the possible domains as str')
+            if domain in self.possible_domain_types:
+                valid_domain_found = True
+                self.domains.append(domain)
+                
+        if not valid_domain_found:
+            warning_text += 'No valid domain found among: %s\n' % str(self.possible_domain_types)
+            
+        
+        (self.version, self.lib_version, self.type_) = (version, lib_version, hypervisor)
+        return (True, warning_text)
+     
+    def assign(self, hypervisor):
+        (self.version, self.lib_version, self.type_) = (hypervisor.version, hypervisor.lib_version, hypervisor.type_)
+        for domain in hypervisor.domains:
+            self.domains.append(domain)
+        return
+           
+    def to_text(self):
+        text= '    type: '+self.type_+'\n'
+        text+= '    version: '+str(self.version)+'\n'
+        text+= '    libvirt version: '+ str(self.lib_version)+'\n'
+        text+= '    domains: '+str(self.domains)+'\n'
+        return text
+        
+class OpSys():
+    #Definition of the possible values of os variables
+    possible_id = definitionsClass.os_possible_id
+    possible_types = definitionsClass.os_possible_types
+    possible_architectures = definitionsClass.os_possible_architectures
+
+    def __init__(self):
+        self.id_ = None                   #Text. Identifier of the OS. Formed by <Distibutor ID>-<Release>-<Codename>. In linux this can be obtained using lsb_release -a
+        self.type_ = None                 #Text. Type of operating system
+        self.bit_architecture = None     #Integer. Architecture
+        
+    def set(self, id_, type_, bit_architecture):
+        warning_text=""
+        if not isinstance(type_,str):
+            return (False, 'The variable type_ must be of type str')
+        if not isinstance(id_,str):
+            return (False, 'The variable id_ must be of type str')
+        if not isinstance(bit_architecture,str):
+            return (False, 'The variable bit_architecture must be of type str')
+        
+        if not type_ in self.possible_types:
+            warning_text += "os type '%s' not among: %s\n" %(type_, str(self.possible_types))
+        if not id_ in self.possible_id:
+            warning_text += "os release '%s' not among: %s\n" %(id_, str(self.possible_id))
+        if not bit_architecture in self.possible_architectures:
+            warning_text += "os bit_architecture '%s' not among: %s\n" % (bit_architecture, str(self.possible_architectures))
+        
+        (self.id_, self.type_, self.bit_architecture) = (id_, type_, bit_architecture)
+        return (True, warning_text)
+    
+    def assign(self,os):
+        (self.id_, self.type_, self.bit_architecture) = (os.id_, os.type_, os.bit_architecture)
+        return
+    
+    def to_text(self):
+        text= '    id: '+self.id_+'\n'
+        text+= '    type: '+self.type_+'\n'
+        text+= '    bit_architecture: '+self.bit_architecture+'\n'
+        return text
+     
+def get_hostname(virsh_conn):
+    return virsh_conn.getHostname().rstrip('\n')
+
+def get_hugepage_size(ssh_conn):
+    command = 'sudo hugeadm --page-sizes'
+#  command = 'hugeadm --page-sizes-all'
+    (_, stdout, stderr) = ssh_conn.exec_command(command)
+    error = stderr.read()
+    if len(error)>0:
+        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
+    mem=stdout.read()
+    if mem=="":
+        return 0
+    return int(mem)
+
+def get_hugepage_nr(ssh_conn,hugepage_sz, node_id):
+    command = 'cat /sys/devices/system/node/node'+str(node_id)+'/hugepages/hugepages-'+str(hugepage_sz/1024)+'kB/nr_hugepages'
+    (_, stdout, _) = ssh_conn.exec_command(command)
+    #print command, 
+    #text = stdout.read()
+    #print "'"+text+"'"
+    #return int(text)
+    
+    try:
+        value=int(stdout.read())
+    except: 
+        value=0
+    return value
+
+def get_memory_information(ssh_conn, virsh_conn, memory_nodes):
+    warning_text=""
+    tree=ElementTree.fromstring(virsh_conn.getSysinfo(0))
+    memory_dict = dict()
+    node_id = 0 #TODO revise. Added for allowing VM as compute hosts 
+    for target in tree.findall("memory_device"):
+        locator_f = size_f = freq_f = type_f = formfactor_f = False
+        locator_f = True #TODO revise. Added for allowing VM as compute hosts
+        module_form_factor = ""
+        for entry in target.findall("entry"):
+            if entry.get("name") == 'size':
+                size_f = True
+                size_split = entry.text.split(' ')
+                if size_split[1] == 'MB':
+                    module_size = int(size_split[0]) * 1024 * 1024
+                elif size_split[1] == 'GB':
+                    module_size = int(size_split[0]) * 1024 * 1024 * 1024
+                elif size_split[1] == 'KB':
+                    module_size = int(size_split[0]) * 1024
+                else:
+                    module_size = int(size_split[0])
+                
+            elif entry.get("name") == 'speed':
+                freq_f = True
+                freq_split = entry.text.split(' ')
+                if freq_split[1] == 'MHz':
+                    module_freq = int(freq_split[0]) * 1024 * 1024
+                elif freq_split[1] == 'GHz':
+                    module_freq = int(freq_split[0]) * 1024 * 1024 * 1024
+                elif freq_split[1] == 'KHz':
+                    module_freq = int(freq_split[0]) * 1024
+            
+            elif entry.get("name") == 'type':
+                type_f = True
+                module_type = entry.text
+                   
+            elif entry.get("name") == 'form_factor':
+                formfactor_f = True
+                module_form_factor = entry.text  
+            #TODO revise. Commented for allowing VM as compute hosts
+            # elif entry.get("name") == 'locator' and not locator_f:
+            #     # other case, it is obtained by bank_locator that we give priority to
+            #     locator = entry.text
+            #     pos = locator.find(module_form_factor)
+            #     if module_form_factor == locator[0:len(module_form_factor) ]:
+            #         pos = len(module_form_factor) +1 
+            #     else:
+            #         pos = 0
+            #     if locator[pos] in "ABCDEFGH":  
+            #         locator_f = True
+            #         node_id = ord(locator[pos])-ord('A')
+            #         #print entry.text, node_id
+            # 
+            # elif entry.get("name") == 'bank_locator':
+            #     locator = entry.text
+            #     pos = locator.find("NODE ")
+            #     if pos >= 0 and len(locator)>pos+5:
+            #         if locator[pos+5] in ("01234567"): #len("NODE ") is 5
+            #             node_id = int(locator[pos+5])
+            #             locator_f = True
+            #  
+
+        #When all module fields have been found add a new module to the list 
+        if locator_f and size_f and freq_f and type_f and formfactor_f:
+            #If the memory node has not yet been created create it
+            if node_id not in memory_dict:
+                memory_dict[node_id] = []
+                
+            #Add a new module to the memory node
+            module = MemoryModule()
+            #TODO revise. Changed for allowing VM as compute hosts
+            (return_status, code) = module.set('NODE %d' % node_id, module_type, module_freq, module_size, module_form_factor)
+            #(return_status, code) = module.set(locator, module_type, module_freq, module_size, module_form_factor)
+            if not return_status:
+                return (return_status, code)
+            memory_dict[node_id].append(module)
+            if code not in warning_text:
+                warning_text += code
+            node_id += 1 #TODO revise. Added for allowing VM as compute hosts
+    
+    #Fill memory nodes
+    #Hugepage size is constant for all nodes
+    hugepage_sz = get_hugepage_size(ssh_conn)
+    for node_id, modules in memory_dict.iteritems():
+        memory_node = MemoryNode()
+        memory_node.set(modules, hugepage_sz, get_hugepage_nr(ssh_conn,hugepage_sz, node_id))
+        memory_nodes[node_id] = memory_node
+        
+    return (True, warning_text)
+
+def get_cpu_topology_ht(ssh_conn, topology):
+    command = 'cat /proc/cpuinfo'
+    (_, stdout, stderr) = ssh_conn.exec_command(command)
+    error = stderr.read()
+    if len(error)>0:
+        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
+    sockets = []
+    cores = []
+    core_map = {}
+    core_details = []
+    core_lines = {}
+    for line in stdout.readlines():
+        if len(line.strip()) != 0:
+            name, value = line.split(":", 1)
+            core_lines[name.strip()] = value.strip()
+        else:
+            core_details.append(core_lines)
+            core_lines = {}
+    
+    for core in core_details:
+        for field in ["processor", "core id", "physical id"]:
+            if field not in core:
+                return(False,'Error getting '+field+' value from /proc/cpuinfo')
+            core[field] = int(core[field])
+    
+        if core["core id"] not in cores:
+            cores.append(core["core id"])
+        if core["physical id"] not in sockets:
+            sockets.append(core["physical id"])
+        key = (core["physical id"], core["core id"])
+        if key not in core_map:
+            core_map[key] = []
+        core_map[key].append(core["processor"])
+      
+    for s in sockets:
+        hyperthreaded_cores = list()
+        for c in cores:
+            hyperthreaded_cores.append(core_map[(s,c)])
+        topology[s] = hyperthreaded_cores
+      
+    return (True, "")
+
+def get_processor_information(ssh_conn, vish_conn, processors):
+    warning_text=""
+    #Processor features are the same for all processors
+    #TODO (at least using virsh capabilities)nr_numa_nodes
+    capabilities = list()
+    tree=ElementTree.fromstring(vish_conn.getCapabilities())
+    for target in tree.findall("host/cpu/feature"):
+        if target.get("name") == 'pdpe1gb':
+            capabilities.append('lps')
+        elif target.get("name") == 'dca':
+            capabilities.append('dioc')  
+        elif target.get("name") == 'vmx' or target.get("name") == 'svm':
+            capabilities.append('hwsv')
+        elif target.get("name") == 'ht':
+            capabilities.append('ht')
+        
+    target = tree.find("host/cpu/arch")
+    if target.text == 'x86_64' or target.text == 'amd64':
+        capabilities.append('64b')
+      
+    command = 'cat /proc/cpuinfo | grep flags'
+    (_, stdout, stderr) = ssh_conn.exec_command(command)
+    error = stderr.read()
+    if len(error)>0:
+        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
+    line = stdout.readline()
+    if 'ept' in line or 'npt' in line:
+        capabilities.append('tlbps')
+    
+    #Find out if IOMMU is enabled
+    command = 'dmesg |grep -e Intel-IOMMU'
+    (_, stdout, stderr) = ssh_conn.exec_command(command)
+    error = stderr.read()
+    if len(error)>0:
+        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
+    if 'enabled' in stdout.read():
+        capabilities.append('iommu')
+      
+    #Equivalent for AMD
+    command = 'dmesg |grep -e AMD-Vi'
+    (_, stdout, stderr) = ssh_conn.exec_command(command)
+    error = stderr.read()
+    if len(error)>0:
+        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
+    if len(stdout.read()) > 0:
+        capabilities.append('iommu')
+    
+    #-----------------------------------------------------------
+    topology = dict()
+    #In case hyperthreading is active it is necessary to determine cpu topology using /proc/cpuinfo
+    if 'ht' in capabilities:
+        (return_status, code) = get_cpu_topology_ht(ssh_conn, topology)
+        if not return_status:
+            return (return_status, code)
+        warning_text += code
+
+    #Otherwise it is possible to do it using virsh capabilities
+    else:
+        for target in tree.findall("host/topology/cells/cell"):
+            socket_id = int(target.get("id"))
+            topology[socket_id] = list()
+            for cpu in target.findall("cpus/cpu"):
+                topology[socket_id].append(int(cpu.get("id")))
+    
+    #-----------------------------------------------------------         
+    #Create a dictionary with the information of all processors
+    #p_fam = p_man = p_ver = None
+    tree=ElementTree.fromstring(vish_conn.getSysinfo(0))
+    #print vish_conn.getSysinfo(0)
+    #return (False, 'forces error for debuging')
+    not_populated=False
+    socket_id = -1     #in case we can not determine the socket_id we assume incremental order, starting by 0
+    for target in tree.findall("processor"):
+        count = 0
+        socket_id += 1
+        #Get processor id, family, manufacturer and version
+        for entry in target.findall("entry"):
+            if entry.get("name") == "status":
+                if entry.text[0:11] == "Unpopulated":
+                    not_populated=True
+            elif entry.get("name") == 'socket_destination':
+                socket_text = entry.text
+                if socket_text.startswith('CPU'):
+                    socket_text = socket_text.strip('CPU')
+                    socket_text = socket_text.strip() #removes trailing spaces
+                    if socket_text.isdigit() and int(socket_text)<9 and int(socket_text)>0:
+                        socket_id = int(socket_text) - 1
+              
+            elif entry.get("name") == 'family':
+                family = entry.text
+                count += 1
+            elif entry.get("name") == 'manufacturer':
+                manufacturer = entry.text
+                count += 1
+            elif entry.get("name") == 'version':
+                version = entry.text.strip()
+                count += 1
+        if count != 3:
+            return (False, 'Error. Not all expected fields could be found in processor')
+        
+        #Create and fill processor structure
+        if not_populated:
+            continue  #avoid inconsistence of some machines where more socket detected than 
+        processor = ProcessorNode()
+        (return_status, code) = processor.set(socket_id, family, manufacturer, version, capabilities, topology[socket_id])
+        if not return_status:
+            return (return_status, code)
+        if code not in warning_text:
+            warning_text += code
+
+        #Add processor to the processors dictionary
+        processors[socket_id] = processor
+    
+    return (True, warning_text)
+
+def get_nic_information(ssh_conn, virsh_conn, nic_topology):   
+    warning_text=""
+    #Get list of net devices
+    net_devices = virsh_conn.listDevices('net',0)
+    print virsh_conn.listDevices('net',0)
+    for device in net_devices:
+        try:
+            #Get the XML descriptor of the device:
+            net_XML = ElementTree.fromstring(virsh_conn.nodeDeviceLookupByName(device).XMLDesc(0))
+            #print "net_XML:" , net_XML
+            #obtain the parent
+            parent = net_XML.find('parent')
+            if parent == None:
+                print 'No parent was found in XML for device '+device
+                #Error. continue?-------------------------------------------------------------
+                continue
+            if parent.text == 'computer':
+                continue
+            if not parent.text.startswith('pci_'):
+                print device + ' parent is neither computer nor pci'
+                #Error. continue?-------------------------------------------------------------
+                continue
+            interface = net_XML.find('capability/interface').text
+            mac = net_XML.find('capability/address').text
+            
+            #Get the pci XML
+            pci_XML = ElementTree.fromstring(virsh_conn.nodeDeviceLookupByName(parent.text).XMLDesc(0))
+            #print pci_XML
+            #Get pci
+            name = pci_XML.find('name').text.split('_')
+            pci = name[1]+':'+name[2]+':'+name[3]+'.'+name[4]
+            
+            #If slot == 0 it is a PF, otherwise it is a VF
+            capability = pci_XML.find('capability')
+            if capability.get('type') != 'pci':
+                print device + 'Capability is not of type pci in '+parent.text
+                #Error. continue?-------------------------------------------------------------
+                continue
+            slot = capability.find('slot').text
+            bus = capability.find('bus').text
+            node_id = None
+            numa_ = capability.find('numa')
+            if numa_ != None:
+                node_id = numa_.get('node');
+                if node_id != None: node_id =int(node_id)
+            if slot == None or bus == None:
+                print device + 'Bus and slot not detected in '+parent.text
+                #Error. continue?-------------------------------------------------------------
+                continue
+            if slot != '0':
+    #             print ElementTree.tostring(pci_XML)
+                virtual = True
+                capability_pf = capability.find('capability')
+                if capability_pf.get('type') != 'phys_function':
+                    print 'physical_function not found in VF '+parent.text
+                    #Error. continue?-------------------------------------------------------------
+                    continue
+                PF_pci = capability_pf.find('address').attrib
+                PF_pci_text = PF_pci['domain'].split('x')[1]+':'+PF_pci['bus'].split('x')[1]+':'+PF_pci['slot'].split('x')[1]+'.'+PF_pci['function'].split('x')[1]
+                
+            else:
+                virtual = False
+            
+            #Obtain node for the port
+            if node_id == None:
+                node_id = int(bus)>>6
+            #print "node_id:", node_id
+            
+            #Only for non virtual interfaces: Obtain speed and if link is detected (this must be done using ethtool)
+            if not virtual:
+                command = 'sudo ethtool '+interface+' | grep -e Speed -e "Link detected"'
+                (_, stdout, stderr) = ssh_conn.exec_command(command)
+                error = stderr.read()
+                if len(error) >0:
+                    print 'Error running '+command+'\n'+error
+                    #Error. continue?-------------------------------------------------------------
+                    continue
+                for line in stdout.readlines():
+                    line = line.strip().rstrip('\n').split(': ')
+                    if line[0] == 'Speed':
+                        if line[1].endswith('Mb/s'):
+                            speed = int(line[1].split('M')[0])*int(1e6)
+                        elif line[1].endswith('Gb/s'):
+                            speed = int(line[1].split('G')[0])*int(1e9)
+                        elif line[1].endswith('Kb/s'):
+                            speed = int(line[1].split('K')[0])*int(1e3)
+                        else:
+                            #the interface is listed but won't be used
+                            speed = 0
+                    elif line[0] == 'Link detected':
+                        if line[1] == 'yes':
+                            enabled = True
+                        else:
+                            enabled = False
+                    else:
+                        print 'Unnexpected output of command '+command+':'
+                        print line
+                        #Error. continue?-------------------------------------------------------------
+                        continue
+                
+            if not node_id in nic_topology:
+                nic_topology[node_id] = list()
+                #With this implementation we make the RAD with only one nic per node and this nic has all ports, TODO: change this by including parent information of PF
+                nic_topology[node_id].append(Nic())
+             
+            #Load the appropriate nic    
+            nic = nic_topology[node_id][0]
+            
+            #Create a new port and fill it
+            port = Port()
+            port.name = interface
+            port.virtual = virtual
+            port.speed = speed
+            if virtual:
+                port.available_bw = 0
+                port.PF_pci_device_id = PF_pci_text
+            else:
+                port.available_bw = speed
+                if speed == 0:
+                    port.enabled = False
+                else:
+                    port.enabled = enabled
+
+            port.eligible = virtual  #Only virtual ports are eligible
+            port.mac = mac
+            port.pci_device_id = pci
+            port.pci_device_id_split = name[1:]
+            
+            #Save the port information
+            nic.add_port(port)         
+        except Exception,e:
+            print 'Error: '+str(e)
+
+    #set in vitual ports if they are enabled
+    for nic in nic_topology.itervalues():
+        for port in nic[0].ports.itervalues():
+#             print port.pci_device_id
+            if port.virtual:
+                enabled = nic[0].ports.get(port.PF_pci_device_id)
+                if enabled == None:
+                    return(False, 'The PF '+port.PF_pci_device_id+' (VF '+port.pci_device_id+') is not present in ports dict')
+                #Only if the PF is enabled the VF can be enabled
+                if nic[0].ports[port.PF_pci_device_id].enabled:
+                    port.enabled = True
+                else:
+                    port.enabled = False
+            
+    return (True, warning_text)     
+
+def get_nic_information_old(ssh_conn, nic_topology):
+    command = 'lstopo-no-graphics --of xml'
+    (_, stdout, stderr) = ssh_conn.exec_command(command)
+    error = stderr.read()
+    if len(error)>0:
+        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
+    tree=ElementTree.fromstring(stdout.read())
+    for target in tree.findall("object/object"):
+        #Find numa nodes
+        if target.get("type") != "NUMANode":
+            continue
+        node_id = int(target.get("os_index"))
+        nic_topology[node_id] = list()
+        
+        #find nics in numa node
+        for entry in target.findall("object/object"):
+            if entry.get("type") != 'Bridge':
+                continue
+            nic_name = entry.get("name")
+            model = None
+            nic = Nic()
+            
+            #find ports in nic
+            for pcidev in entry.findall("object"):
+                if pcidev.get("type") != 'PCIDev':
+                    continue
+                enabled = speed = mac = pci_busid = None
+                port = Port()
+                model = pcidev.get("name")
+                virtual = False
+                if 'Virtual' in model:
+                    virtual = True
+                pci_busid = pcidev.get("pci_busid")
+                for osdev in pcidev.findall("object"):
+                    name = osdev.get("name")
+                    for info in osdev.findall("info"):
+                        if info.get("name") != 'Address':
+                            continue
+                        mac = info.get("value")
+                        #get the port speed and status
+                        command = 'sudo ethtool '+name
+                        (_, stdout, stderr) = ssh_conn.exec_command(command)
+                        error = stderr.read()
+                        if len(error)>0:
+                            return (False, 'Error obtaining '+name+' information: '+error)
+                        ethtool = stdout.read()
+                        if '10000baseT/Full' in ethtool:
+                            speed = 10e9
+                        elif '1000baseT/Full' in ethtool:
+                            speed = 1e9
+                        elif '100baseT/Full' in ethtool:
+                            speed = 100e6
+                        elif '10baseT/Full' in ethtool:
+                            speed = 10e6
+                        else:
+                            return (False, 'Speed not detected in '+name)
+
+                    enabled = False
+                    if 'Link detected: yes' in ethtool:
+                        enabled = True
+                    
+                    if speed != None and mac != None and pci_busid != None:
+                        mac = mac.split(':')
+                        pci_busid_split = re.split(':|\.', pci_busid)
+                        #Fill the port information
+                        port.set(name, virtual, enabled, speed, mac, pci_busid, pci_busid_split)
+                        nic.add_port(port)
+              
+            if len(nic.ports) > 0:  
+                #Fill the nic model
+                if model != None:
+                    nic.set_model(model)
+                else:
+                    nic.set_model(nic_name)
+                
+                #Add it to the topology
+                nic_topology[node_id].append(nic)
+                
+    return (True, "")
+
+def get_os_information(ssh_conn, os):
+    warning_text=""
+#    command = 'lsb_release -a'
+#    (stdin, stdout, stderr) = ssh_conn.exec_command(command)
+#    cont = 0
+#    for line in stdout.readlines():
+#        line_split = re.split('\t| *', line.rstrip('\n'))
+#        if line_split[0] == 'Distributor' and line_split[1] == 'ID:':
+#            distributor = line_split[2]
+#            cont += 1
+#        elif line_split[0] == 'Release:':
+#            release = line_split[1]
+#            cont += 1
+#        elif line_split[0] == 'Codename:':
+#            codename = line_split[1]
+#            cont += 1
+#    if cont != 3:
+#        return (False, 'It was not possible to obtain the OS id')
+#    id_ = distributor+'-'+release+'-'+codename
+
+
+    command = 'cat /etc/redhat-release'
+    (_, stdout, _) = ssh_conn.exec_command(command)
+    id_text= stdout.read()
+    if len(id_text)==0:
+        #try with Ubuntu
+        command = 'lsb_release -d -s'
+        (_, stdout, _) = ssh_conn.exec_command(command)
+        id_text= stdout.read()
+    if len(id_text)==0:
+        raise paramiko.ssh_exception.SSHException("Can not determinte release neither with 'lsb_release' nor with 'cat /etc/redhat-release'")
+    id_ = id_text.rstrip('\n')
+   
+    command = 'uname -o'
+    (_, stdout, stderr) = ssh_conn.exec_command(command)
+    error = stderr.read()
+    if len(error)>0:
+        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
+    type_ = stdout.read().rstrip('\n')
+    
+    command = 'uname -i'
+    (_, stdout, stderr) = ssh_conn.exec_command(command)
+    error = stderr.read()
+    if len(error)>0:
+        raise paramiko.ssh_exception.SSHException(command +' : '+ error)
+    bit_architecture = stdout.read().rstrip('\n')
+    
+    (return_status, code) = os.set(id_, type_, bit_architecture)
+    if not return_status:
+        return (return_status, code)
+    warning_text += code
+    return (True, warning_text) 
+
+def get_hypervisor_information(virsh_conn, hypervisor):
+    type_ = virsh_conn.getType().rstrip('\n')
+    version = virsh_conn.getVersion()
+    lib_version = virsh_conn.getLibVersion()
+    
+    domains = list()
+    tree=ElementTree.fromstring(virsh_conn.getCapabilities())
+    for target in tree.findall("guest"):
+        os_type = target.find("os_type").text
+        #We only allow full virtualization
+        if os_type != 'hvm':
+            continue
+        wordsize = int(target.find('arch/wordsize').text)
+        if wordsize == 64:
+            for domain in target.findall("arch/domain"):
+                domains.append(domain.get("type"))
+            
+    (return_status, code) = hypervisor.set(type_, version, lib_version, domains)
+    if not return_status:
+        return (return_status, code)
+    return (True, code)      
+     
+class RADavailableResourcesClass(RADclass):
+    def __init__(self, resources):
+        """Copy resources from the RADclass (server resources not taking into account resources used by VMs"""
+        #New
+        self.reserved = dict()          #Dictionary of reserved resources for a server. Key are VNFC names and values RADreservedResources
+        self.cores_consumption = None   #Dictionary of cpu consumption. Key is the cpu and the value is
+        
+        self.machine = resources.machine
+        self.user = resources.user
+        self.password = resources.password
+        self.name = resources.name
+        self.nr_processors = resources.nr_processors 
+        self.processor_family = resources.processor_family
+        self.processor_manufacturer = resources.processor_manufacturer
+        self.processor_version = resources.processor_version
+        self.processor_features = resources.processor_features
+        self.memory_type = resources.memory_type
+        self.memory_freq = resources.memory_freq
+        self.memory_nr_channels = resources.memory_nr_channels
+        self.memory_size = resources.memory_size
+        self.memory_hugepage_sz = resources.memory_hugepage_sz
+        self.hypervisor = Hypervisor()
+        self.hypervisor.assign(resources.hypervisor)
+        self.os = OpSys()
+        self.os.assign(resources.os)
+        self.nodes = dict()
+        for node_k, node_v in resources.nodes.iteritems():
+            self.nodes[node_k] = Node()
+            self.nodes[node_k].assign(node_v)
+        return
+    
+    def _get_cores_consumption_warnings(self):
+        """Returns list of warning strings in case warnings are generated. 
+        In case no warnings are generated the return value will be an empty list"""
+        warnings = list()
+        #Get the cores consumption
+        (return_status, code) = get_ssh_connection(self.machine, self.user, self.password)
+        if not return_status:
+            return (return_status, code)
+        ssh_conn = code
+        command = 'mpstat -P ALL 1 1 | grep Average | egrep -v CPU\|all'
+        (_, stdout, stderr) = ssh_conn.exec_command(command)
+        error = stderr.read()
+        if len(error) > 0:
+            return (False, error)
+    
+        self.cores_consumption = dict()
+        for line in stdout.readlines():
+            cpu_usage_split = re.split('\t| *', line.rstrip('\n'))
+            usage = 100 *(1 - float(cpu_usage_split[10]))
+            if usage > 0:
+                self.cores_consumption[int(cpu_usage_split[1])] = usage 
+        ssh_conn.close()   
+        #Check if any core marked as available in the nodes has cpu_usage > 0
+        for _, node_v in self.nodes.iteritems():
+            cores = node_v.processor.eligible_cores
+            for cpu in cores:
+                if len(cpu) > 1:
+                    for core in cpu:
+                        if core in self.cores_consumption:
+                            warnings.append('Warning: Core '+str(core)+' is supposed to be idle but it is consuming '+str(self.cores_consumption[core])+'%')
+                else:
+                    if cpu in self.cores_consumption:
+                        warnings.append('Warning: Core '+str(core)+' is supposed to be idle but it is consuming '+str(self.cores_consumption[cpu])+'%')
+        
+        return warnings
+    
+    def reserved_to_text(self):
+        text = str()
+        for VNFC_name, VNFC_reserved in self.reserved.iteritems():
+            text += '    VNFC: '+str(VNFC_name)+'\n'
+            text += VNFC_reserved.to_text()
+                    
+        return text
+    
+    def obtain_usage(self):
+        resp = dict()
+        #Iterate through nodes to get cores, eligible cores, memory and physical ports (save ports usage for next section)
+        nodes = dict()
+        ports_usage = dict()
+        hugepage_size = dict()
+        for node_k, node_v in self.nodes.iteritems():
+            node = dict()
+            ports_usage[node_k] = dict()
+            eligible_cores = list()
+            for pair in node_v.processor.eligible_cores:
+                if isinstance(pair, list):
+                    for element in pair:
+                        eligible_cores.append(element)
+                else:
+                    eligible_cores.append(pair)
+            node['cpus'] = {'cores':node_v.processor.cores,'eligible_cores':eligible_cores}
+            node['memory'] = {'size':str(node_v.memory.node_size/(1024*1024*1024))+'GB','eligible':str(node_v.memory.eligible_memory/(1024*1024*1024))+'GB'}
+            hugepage_size[node_k] = node_v.memory.hugepage_sz
+            
+            ports = dict()
+            for nic in node_v.nic_list:
+                for port in nic.ports.itervalues():
+                    if port.enabled and not port.virtual: 
+                        ports[port.name] = {'speed':str(port.speed/1000000000)+'G'}
+#                         print '*************** ',port.name,'speed',port.speed 
+                        ports_usage[node_k][port.name] = 100 - int(100*float(port.available_bw)/float(port.speed))
+            node['ports'] = ports
+            nodes[node_k] = node
+        resp['RAD'] = nodes
+        
+        #Iterate through reserved section to get used cores, used memory and port usage
+        cores = dict()
+        memory = dict()
+        #reserved_cores = list
+        for node_k in self.nodes.iterkeys():
+            if not node_k in cores:
+                cores[node_k] = list()
+                memory[node_k] = 0
+            for _, reserved in self.reserved.iteritems():
+                if node_k in reserved.node_reserved_resources:
+                    node_v = reserved.node_reserved_resources[node_k]
+                    cores[node_k].extend(node_v.reserved_cores)
+                    memory[node_k] += node_v.reserved_hugepage_nr * hugepage_size[node_k]
+                            
+        occupation = dict()       
+        for node_k in self.nodes.iterkeys():
+            ports = dict()
+            for name, usage in ports_usage[node_k].iteritems():
+                ports[name] = {'occupied':str(usage)+'%'}
+#             print '****************cores',cores
+#             print '****************memory',memory
+            occupation[node_k] = {'cores':cores[node_k],'memory':str(memory[node_k]/(1024*1024*1024))+'GB','ports':ports}
+        resp['occupation'] = occupation
+        
+        return resp            
+    
+class RADreservedResources():
+    def __init__(self):
+        self.node_reserved_resources = dict()      #dict. keys are the RAD nodes id, values are NodeReservedResources
+        self.mgmt_interface_pci = None             #pci in the VNF for the management interface
+        self.image = None                          #Path in remote machine of the VNFC image
+    
+    def update(self,reserved):
+        self.image = reserved.image
+        self.mgmt_interface_pci = reserved.mgmt_interface_pci
+        for k,v in reserved.node_reserved_resources.iteritems():
+            if k in self.node_reserved_resources.keys():
+                return (False, 'Duplicated node entry '+str(k)+' in reserved resources')
+            self.node_reserved_resources[k]=v
+            
+        return (True, "")
+    
+    def to_text(self):
+        text = '        image: '+str(self.image)+'\n'
+        for node_id, node_reserved in self.node_reserved_resources.iteritems():
+            text += '        Node ID: '+str(node_id)+'\n'
+            text += node_reserved.to_text()
+        return text
+
+class NodeReservedResources():
+    def __init__(self):
+    #     reserved_shared_cores = None      #list. List of all cores that the VNFC needs in shared mode  #TODO Not used
+    #     reserved_memory = None            #Integer. Amount of KiB needed by the VNFC #TODO. Not used since hugepages are used
+        self.reserved_cores = list()             #list. List of all cores that the VNFC uses
+        self.reserved_hugepage_nr = 0            #Integer. Number of hugepages needed by the VNFC 
+        self.reserved_ports = dict()             #dict. The key is the physical port pci and the value the VNFC port description
+        self.vlan_tags = dict()
+        self.cpu_pinning = None
+    
+    def to_text(self):
+        text = '            cores: '+str(self.reserved_cores)+'\n'
+        text += '            cpu_pinning: '+str(self.cpu_pinning)+'\n'
+        text += '            hugepages_nr: '+str(self.reserved_hugepage_nr)+'\n'
+        for port_pci, port_description in self.reserved_ports.iteritems():
+            text += '            port: '+str(port_pci)+'\n'
+            text += port_description.to_text()
+        return text
+    
+#     def update(self,reserved):
+#         self.reserved_cores = list(reserved.reserved_cores)
+#         self.reserved_hugepage_nr = reserved.reserved_hugepage_nr
+#         self.reserved_ports = dict(reserved.reserved_ports)
+#         self.cpu_pinning = list(reserved.cpu_pinning)
+    
+    
+        
diff --git a/osm_openvim/__init__.py b/osm_openvim/__init__.py
new file mode 100644 (file)
index 0000000..8b13789
--- /dev/null
@@ -0,0 +1 @@
+
diff --git a/osm_openvim/auxiliary_functions.py b/osm_openvim/auxiliary_functions.py
new file mode 100644 (file)
index 0000000..795d84a
--- /dev/null
@@ -0,0 +1,227 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+Common usuful functions 
+'''
+
+__author__="Alfonso Tierno, Pablo Montes"
+__date__ ="$10-jul-2014 12:07:15$"
+
+
+import yaml
+import paramiko 
+from definitionsClass import definitionsClass
+from definitionsClass import Units
+import random
+from jsonschema import validate as js_v, exceptions as js_e
+
+def check_and_convert_units(value, value_type):
+    """TODO: Update description
+    This method receives a text with 2 fields using a blank as separator and a list of valid units. The first field must represent a number
+    and the second one units. 
+    In case the second field is not one of valid_units (False, <error description>) is returned.
+    In case the second field is a valid unit the first number is converted in the following way:
+    Gbps, Mbps, kbps -> Mbps
+    GB,MB,KB,B,GiB,MiB,KiB -> B
+    GHz,MHz,KHz,Hz -> Hz
+    If conversion is done successfully (True, <converted value>) is returned"""
+    try:
+        if value_type == Units.no_units:
+            if not isinstance(value,int) and not isinstance(value,float):
+                return (False, 'When no units are used only an integer or float must be used')
+        elif value_type == Units.name:
+            if not isinstance(value,str):
+                return (False, 'For names str must be used')
+        elif value_type == Units.boolean:
+            if not isinstance(value,bool):
+                return (False, 'A boolean or Yes/No mut be used')
+        else:
+            splitted  = value.split(' ')
+            if len(splitted) != 2:
+                return (False, 'Expected format: <value> <units>')
+            (value, units) = splitted 
+            if ',' in value or '.' in value:
+                return (False, 'Use integers to represent numeric values')
+                
+            value = int(value)
+            
+#            if not isinstance(value_type, Units):
+#                return (False, 'Not valid value_type')
+            
+            valid_units = definitionsClass.units[value_type]
+            
+            #Convert everything to upper in order to make comparations easier
+            units = units.upper()
+            for i in range(0, len(valid_units)):
+                valid_units[i] = valid_units[i].upper()
+            
+            #Check the used units are valid ones
+            if units not in valid_units:
+                return (False, 'Valid units are: '+', '.join(valid_units))
+
+            if units.startswith('GI'):
+                value = value *1024*1024*1024
+            elif units.startswith('MI'):
+                value = value *1024*1024
+            elif units.startswith('KI'):
+                value = value *1024
+            elif units.startswith('G'):
+                value = value *1000000000
+            elif units.startswith('M'):
+                value = value *1000000
+            elif units.startswith('K'):
+                value = value *1000
+    except Exception,e:
+        return (False, 'Unexpected error in auxiliary_functions.py - check_and_convert_units:\n'+str(e))
+
+    return (True, value)
+        
+def get_ssh_connection(machine, user=None, password=None):
+    """Stablishes an ssh connection to the remote server. Returns (True, paramiko_ssh) in case of success or (False, <error message>) in case of error"""
+    try:
+        s = paramiko.SSHClient()
+        s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+        s.load_system_host_keys()
+        s.connect(machine, 22, user, password, timeout=10)
+    except Exception,e:
+        return (False, 'It was not possible to connect to '+machine+str(e))
+        
+    return (True, s)
+
+def run_in_remote_server(s,command):
+    """Runs in the remote server the specified command. Returns (True, stdout) in case of success or (False, <error message>) in case of error"""
+    try:
+        (_, stdout, stderr) = s.exec_command(command)
+        error_msg = stderr.read()
+        if len(error_msg) > 0:
+            return (False, error_msg)
+    except Exception,e:
+        return (False, str(e))
+    
+    return (True, stdout)
+
+def read_file(file_):
+    """Reads a file specified by 'file' and returns (True,<its content as a string>) in case of success or (False, <error message>) in case of failure"""
+    try:
+        f = open(file_, 'r')
+        read_data = f.read()
+        f.close()
+    except Exception,e:
+        return (False, str(e))
+      
+    return (True, read_data)
+
+def check_contains(element, keywords):
+    """Auxiliary function used to check if a yaml structure contains or not
+    an specific field. Returns a bool"""
+    for key in keywords:
+        if not key in element:
+            return False      
+    return True
+
+def check_contains_(element, keywords):
+    """Auxiliary function used to check if a yaml structure contains or not
+    an specific field. Returns a bool,missing_variables"""
+    for key in keywords:
+        if not key in element:
+            return False, key      
+    return True, None
+
+def write_file(file_, content):
+    """Generates a file specified by 'file' and fills it using 'content'"""
+    f = open(file_, 'w')
+    f.write(content)
+    f.close()
+
+def nice_print(yaml_element):
+    """Print a yaml structure. Used mainly for debugging"""
+    print(yaml.dump(yaml_element, default_flow_style=False))
+    
+def new_random_mac():
+    mac = (0xE2, random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff) )
+    return ':'.join(map(lambda x: "%02X" % x, mac)) 
+
+def parse_dict(var, template):
+    if type(var) is not dict: return -1, 'not a dictionary'
+    for _,tv in template.items():
+        if type(tv) is list:
+            return
+    
+def delete_nulls(var):
+    if type(var) is dict:
+        for k in var.keys():
+            if var[k] is None: del var[k]
+            elif type(var[k]) is dict or type(var[k]) is list or type(var[k]) is tuple: 
+                if delete_nulls(var[k]): del var[k]
+        if len(var) == 0: return True
+    elif type(var) is list or type(var) is tuple:
+        for k in var:
+            if type(k) is dict: delete_nulls(k)
+        if len(var) == 0: return True
+    return False
+
+def get_next_2pow(var):
+    if var==0: return 0
+    v=1
+    while v<var: v=v*2
+    return v        
+
+def check_valid_uuid(uuid):
+    id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+    try:
+        js_v(uuid, id_schema)
+        return True
+    except js_e.ValidationError:
+        return False
+
+def DeleteNone(var):
+    '''Removes recursively empty dictionaries or lists
+    return True if var is an empty dict or list '''
+    if type(var) is dict:
+        for k in var.keys():
+            if var[k] is None: del var[k]
+            elif type(var[k]) is dict or type(var[k]) is list or type(var[k]) is tuple: 
+                if DeleteNone(var[k]): del var[k]
+        if len(var) == 0: return True
+    elif type(var) is list or type(var) is tuple:
+        for k in var:
+            if type(k) is dict: DeleteNone(k)
+        if len(var) == 0: return True
+    return False
+    
+def gen_random_mac():
+    '''generates a random mac address. Avoid multicast, broadcast, etc
+    '''
+    mac = (
+        #52,54,00,
+        #2 + 4*random.randint(0x00, 0x3f), #4 multiple, unicast local mac address
+        0x52,
+        random.randint(0x00, 0xff),
+        random.randint(0x00, 0xff),
+        random.randint(0x00, 0xff),
+        random.randint(0x00, 0xff),
+        random.randint(0x00, 0xff) 
+    )
+    return ':'.join(map(lambda x: "%02x" % x, mac))
+
diff --git a/osm_openvim/definitionsClass.py b/osm_openvim/definitionsClass.py
new file mode 100644 (file)
index 0000000..70168e8
--- /dev/null
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+Definitions of classes for the Host operating server, ...  
+'''
+
+__author__="Pablo Montes"
+
+
+class Units():
+    memory_1000 = 1
+    memory_1024 = 2
+    memory_full = 3
+    bw = 4
+    freq = 5
+    no_units = 6
+    name = 7
+    boolean = 8
+    
+class definitionsClass():
+    user = 'n2'
+    password = 'n2'
+    extrict_hugepages_allocation = True
+    processor_possible_features = ['64b','iommu','lps','tlbps','hwsv','dioc','ht']
+    processor_possible_manufacturers = ['Intel','AMD']
+    processor_possible_families = ['Xeon']
+    processor_possible_versions = ['Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz', 'Intel(R) Xeon(R) CPU E5-2680 0 @ 2.70GHz','Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz']
+    memory_possible_types = ['DDR2','DDR3']
+    memory_possible_form_factors = ['DIMM']
+    hypervisor_possible_types = ['QEMU']
+    hypervisor_possible_domain_types = ['kvm'] #['qemu', 'kvm']
+    os_possible_id = ['Red Hat Enterprise Linux Server release 6.4 (Santiago)',
+                      'Red Hat Enterprise Linux Server release 6.5 (Santiago)',
+                      'Red Hat Enterprise Linux Server release 6.6 (Santiago)',
+                      'CentOS release 6.5 (Final)',
+                      'CentOS release 6.6 (Final)',
+                      'Red Hat Enterprise Linux Server release 7.0 (Maipo)',
+                      'Red Hat Enterprise Linux Server release 7.1 (Maipo)',
+                    ]
+    os_possible_types = ['GNU/Linux']
+    os_possible_architectures = ['x86_64']
+    hypervisor_possible_composed_versions = ['QEMU-kvm']
+    units = dict() 
+    units[Units.bw] = ['Gbps', 'Mbps', 'kbps', 'bps']
+    units[Units.freq] = ['GHz', 'MHz', 'KHz', 'Hz']
+    units[Units.memory_1000] = ['GB', 'MB', 'KB', 'B']
+    units[Units.memory_1024] = ['GiB', 'MiB', 'KiB', 'B']
+    units[Units.memory_full] = ['GB', 'MB', 'KB', 'GiB', 'MiB', 'KiB', 'B']
+    valid_hugepage_sz = [1073741824, 2097152] #In bytes
+    valid_VNFC_iface_types = ['mgmt','data']
+    
+    def __init__(self):
+        return
+        
diff --git a/osm_openvim/dhcp_thread.py b/osm_openvim/dhcp_thread.py
new file mode 100644 (file)
index 0000000..da7176b
--- /dev/null
@@ -0,0 +1,296 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+This is thread that interact with the dhcp server to get the IP addresses 
+'''
+__author__="Pablo Montes, Alfonso Tierno"
+__date__ ="$4-Jan-2016 12:07:15$"
+
+
+
+import threading
+import time
+import Queue
+import paramiko
+import random
+import subprocess
+import logging
+
+#TODO: insert a logging system
+
+class dhcp_thread(threading.Thread):
+    def __init__(self, dhcp_params, db, db_lock, test, dhcp_nets, logger_name=None, debug=None):
+        '''Init a thread.
+        Arguments: thread_info must be a dictionary with:
+            'dhcp_params' dhcp server parameters with the following keys:
+                mandatory : user, host, port, key, ifaces(interface name list of the one managed by the dhcp)
+                optional:  password, key, port(22)
+            'db' 'db_lock': database class and lock for accessing it
+            'test': in test mode no acces to a server is done, and ip is invented
+        '''
+        threading.Thread.__init__(self)
+        self.dhcp_params = dhcp_params
+        self.db = db
+        self.db_lock = db_lock
+        self.test = test
+        self.dhcp_nets = dhcp_nets
+        self.ssh_conn = None
+        if logger_name:
+            self.logger_name = logger_name
+        else:
+            self.logger_name = "openvim.dhcp"
+        self.logger = logging.getLogger(self.logger_name)
+        if debug:
+            self.logger.setLevel(getattr(logging, debug))
+
+        self.mac_status ={} #dictionary of mac_address to retrieve information
+            #ip: None
+            #retries: 
+            #next_reading: time for the next trying to check ACTIVE status or IP
+            #created: time when it was added 
+            #active: time when the VM becomes into ACTIVE status
+            
+        
+        self.queueLock = threading.Lock()
+        self.taskQueue = Queue.Queue(2000)
+        
+    def ssh_connect(self):
+        try:
+            #Connect SSH
+            self.ssh_conn = paramiko.SSHClient()
+            self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+            self.ssh_conn.load_system_host_keys()
+            self.ssh_conn.connect(self.dhcp_params["host"], port=self.dhcp_params.get("port", 22),
+                                  username=self.dhcp_params["user"], password=self.dhcp_params.get("password"),
+                                  key_filename=self.dhcp_params.get("key"), timeout=2)
+        except paramiko.ssh_exception.SSHException as e:
+            self.logger.error("ssh_connect ssh Exception " + str(e))
+        
+    def load_mac_from_db(self):
+        #TODO get macs to follow from the database
+        self.logger.debug("load macs from db")
+        self.db_lock.acquire()
+        r,c = self.db.get_table(SELECT=('mac','ip_address','nets.uuid as net_id', ),
+                                FROM='ports join nets on ports.net_id=nets.uuid', 
+                                WHERE_NOT={'ports.instance_id': None, 'nets.provider': None})
+        self.db_lock.release()
+        now = time.time()
+        self.mac_status ={}
+        if r<0:
+            self.logger.error("Error getting data from database: " + c)
+            return
+        for port in c:
+            if port["net_id"] in self.dhcp_nets:
+                self.mac_status[ port["mac"] ] = {"ip": port["ip_address"], "next_reading": now, "created": now, "retries":0}
+    
+    def insert_task(self, task, *aditional):
+        try:
+            self.queueLock.acquire()
+            task = self.taskQueue.put( (task,) + aditional, timeout=5) 
+            self.queueLock.release()
+            return 1, None
+        except Queue.Full:
+            return -1, "timeout inserting a task over dhcp_thread"
+
+    def run(self):
+        self.logger.debug("starting, nets: " + str(self.dhcp_nets))
+        next_iteration = time.time() + 10
+        while True:
+            self.load_mac_from_db()
+            while True:
+                try:
+                    self.queueLock.acquire()
+                    if not self.taskQueue.empty():
+                        task = self.taskQueue.get()
+                    else:
+                        task = None
+                    self.queueLock.release()
+
+                    if task is None:
+                        now=time.time()
+                        if now >= next_iteration:
+                            next_iteration = self.get_ip_from_dhcp()
+                        else:
+                            time.sleep(1)
+                        continue
+
+                    if task[0] == 'add':
+                        self.logger.debug("processing task add mac " + str(task[1]))
+                        now=time.time()
+                        self.mac_status[task[1] ] = {"ip": None, "next_reading": now, "created": now, "retries":0}
+                        next_iteration = now
+                    elif task[0] == 'del':
+                        self.logger.debug("processing task del mac " + str(task[1]))
+                        if task[1] in self.mac_status:
+                            del self.mac_status[task[1] ]
+                    elif task[0] == 'exit':
+                        self.logger.debug("processing task exit")
+                        self.terminate()
+                        return 0
+                    else:
+                        self.logger.error("unknown task: " + str(task))
+                except Exception as e:
+                    self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
+          
+    def terminate(self):
+        try:
+            if self.ssh_conn:
+                self.ssh_conn.close()
+        except Exception as e:
+            self.logger.error("terminate Exception: " + str(e))
+        self.logger.debug("exit from dhcp_thread")
+
+    def get_ip_from_dhcp(self):
+        
+        now = time.time()
+        next_iteration= now + 40000 # >10 hores
+        
+        #print self.name, "Iteration" 
+        for mac_address in self.mac_status:
+            if now < self.mac_status[mac_address]["next_reading"]:
+                if self.mac_status[mac_address]["next_reading"] < next_iteration:
+                    next_iteration = self.mac_status[mac_address]["next_reading"]
+                continue
+            
+            if self.mac_status[mac_address].get("active") == None:
+                #check from db if already active
+                self.db_lock.acquire()
+                r,c = self.db.get_table(FROM="ports as p join instances as i on p.instance_id=i.uuid",
+                                        WHERE={"p.mac": mac_address, "i.status": "ACTIVE"})
+                self.db_lock.release()
+                if r>0:
+                    self.mac_status[mac_address]["active"] = now
+                    self.mac_status[mac_address]["next_reading"] = (int(now)/2 +1)* 2
+                    self.logger.debug("mac %s VM ACTIVE", mac_address)
+                    self.mac_status[mac_address]["retries"] = 0
+                else:
+                    #print self.name, "mac %s  VM INACTIVE" % (mac_address)
+                    if now - self.mac_status[mac_address]["created"] > 300:
+                        #modify Database to tell openmano that we can not get dhcp from the machine
+                        if not self.mac_status[mac_address].get("ip"):
+                            self.db_lock.acquire()
+                            r,c = self.db.update_rows("ports", {"ip_address": "0.0.0.0"}, {"mac": mac_address})
+                            self.db_lock.release()
+                            self.mac_status[mac_address]["ip"] = "0.0.0.0"
+                            self.logger.debug("mac %s >> set to 0.0.0.0 because of timeout", mac_address)
+                        self.mac_status[mac_address]["next_reading"] = (int(now)/60 +1)* 60
+                    else:
+                        self.mac_status[mac_address]["next_reading"] = (int(now)/6 +1)* 6
+                if self.mac_status[mac_address]["next_reading"] < next_iteration:
+                    next_iteration = self.mac_status[mac_address]["next_reading"]
+                continue
+            
+
+            if self.test:
+                if self.mac_status[mac_address]["retries"]>random.randint(10,100): #wait between 10 and 100 seconds to produce a fake IP
+                    content = self.get_fake_ip()
+                else:
+                    content = None
+            elif self.dhcp_params["host"]=="localhost":
+                try:
+                    command = ['get_dhcp_lease.sh',  mac_address]
+                    content = subprocess.check_output(command)
+                except Exception as e:
+                    self.logger.error("get_ip_from_dhcp subprocess Exception " + str(e))
+                    content = None
+            else:
+                try:
+                    if not self.ssh_conn:
+                        self.ssh_connect()
+                    command = 'get_dhcp_lease.sh ' +  mac_address
+                    (_, stdout, _) = self.ssh_conn.exec_command(command)
+                    content = stdout.read()
+                except paramiko.ssh_exception.SSHException as e:
+                    self.logger.error("get_ip_from_dhcp: ssh_Exception: " + srt(e))
+                    content = None
+                    self.ssh_conn = None
+                except Exception as e:
+                    self.logger.error("get_ip_from_dhcp: Exception: " + str(e))
+                    content = None
+                    self.ssh_conn = None
+
+            if content:
+                self.mac_status[mac_address]["ip"] = content
+                #modify Database
+                self.db_lock.acquire()
+                r,c = self.db.update_rows("ports", {"ip_address": content}, {"mac": mac_address})
+                self.db_lock.release()
+                if r<0:
+                    self.logger.error("Database update error: " + c)
+                else:
+                    self.mac_status[mac_address]["retries"] = 0
+                    self.mac_status[mac_address]["next_reading"] = (int(now)/3600 +1)* 36000 # 10 hores
+                    if self.mac_status[mac_address]["next_reading"] < next_iteration:
+                        next_iteration = self.mac_status[mac_address]["next_reading"]
+                    self.logger.debug("mac %s >> %s", mac_address, content)
+                    continue
+            #a fail has happen
+            self.mac_status[mac_address]["retries"] +=1
+            #next iteration is every 2sec at the beginning; every 5sec after a minute, every 1min after a 5min
+            if now - self.mac_status[mac_address]["active"] > 120:
+                #modify Database to tell openmano that we can not get dhcp from the machine
+                if not self.mac_status[mac_address].get("ip"):
+                    self.db_lock.acquire()
+                    r,c = self.db.update_rows("ports", {"ip_address": "0.0.0.0"}, {"mac": mac_address})
+                    self.db_lock.release()
+                    self.mac_status[mac_address]["ip"] = "0.0.0.0"
+                    self.logger.debug("mac %s >> set to 0.0.0.0 because of timeout", mac_address)
+            
+            if now - self.mac_status[mac_address]["active"] > 60:
+                self.mac_status[mac_address]["next_reading"] = (int(now)/6 +1)* 6
+            elif now - self.mac_status[mac_address]["active"] > 300:
+                self.mac_status[mac_address]["next_reading"] = (int(now)/60 +1)* 60
+            else:
+                self.mac_status[mac_address]["next_reading"] = (int(now)/2 +1)* 2
+                
+            if self.mac_status[mac_address]["next_reading"] < next_iteration:
+                next_iteration = self.mac_status[mac_address]["next_reading"]
+        return next_iteration    
+    
+    def get_fake_ip(self):
+        fake_ip = "192.168.{}.{}".format(random.randint(1,254), random.randint(1,254) )
+        while True:
+            #check not already provided
+            already_used = False
+            for mac_address in self.mac_status:
+                if self.mac_status[mac_address]["ip"] == fake_ip:
+                    already_used = True
+                    break
+            if not already_used:
+                return fake_ip
+
+
+#EXAMPLE of bash script that must be available at the DHCP server for "isc-dhcp-server" type
+#     $ cat ./get_dhcp_lease.sh
+#     #!/bin/bash
+#     awk '
+#     ($1=="lease" && $3=="{"){ lease=$2; active="no"; found="no" }
+#     ($1=="binding" && $2=="state" && $3=="active;"){ active="yes" }
+#     ($1=="hardware" && $2=="ethernet" && $3==tolower("'$1';")){ found="yes" }
+#     ($1=="client-hostname"){ name=$2 }
+#     ($1=="}"){ if (active=="yes" && found=="yes"){ target_lease=lease; target_name=name}}
+#     END{printf("%s", target_lease)} #print target_name
+#     ' /var/lib/dhcp/dhcpd.leases
+
diff --git a/osm_openvim/floodlight.py b/osm_openvim/floodlight.py
new file mode 100644 (file)
index 0000000..826e300
--- /dev/null
@@ -0,0 +1,473 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+"""
+Implement the plugging for floodligth openflow controller
+It creates the class OF_conn to create dataplane connections
+with static rules based on packet destination MAC address
+"""
+
+__author__ = "Pablo Montes, Alfonso Tierno"
+__date__ = "$28-oct-2014 12:07:15$"
+
+import json
+import requests
+import logging
+import openflow_conn
+
+
+class OF_conn(openflow_conn.OpenflowConn):
+    """
+    Openflow Connector for Floodlight.
+    No MAC learning is used
+    version 0.9 or 1.X is autodetected
+    version 1.X is in progress, not finished!!!
+    """
+
+    def __init__(self, params):
+        """
+        Constructor
+        :param self:
+        :param params: dictionay with the following keys:
+               of_dpid:     DPID to use for this controller
+               of_ip:       controller IP address
+               of_port:     controller TCP port
+               of_version:  version, can be "0.9" or "1.X". By default it is autodetected
+               of_debug:    debug level for logging. Default to ERROR
+               other keys are ignored
+        :return:  Raise an ValueError exception if same parameter is missing or wrong
+        """
+        # check params
+        if "of_ip" not in params or params["of_ip"] == None or "of_port" not in params or params["of_port"] == None:
+            raise ValueError("IP address and port must be provided")
+
+        openflow_conn.OpenflowConn.__init__(self, params)
+
+        self.name = "Floodlight"
+        self.dpid = str(params["of_dpid"])
+        self.url = "http://%s:%s" % (str(params["of_ip"]), str(params["of_port"]))
+
+        self.pp2ofi = {}  # From Physical Port to OpenFlow Index
+        self.ofi2pp = {}  # From OpenFlow Index to Physical Port
+        self.headers = {'content-type': 'application/json', 'Accept': 'application/json'}
+        self.version = None
+        self.logger = logging.getLogger('vim.OF.FL')
+        self.logger.setLevel(getattr(logging, params.get("of_debug", "ERROR")))
+        self._set_version(params.get("of_version"))
+
+    def _set_version(self, version):
+        """
+        set up a version of the controller.
+         Depending on the version it fills the self.ver_names with the naming used in this version
+        :param version: Openflow controller version
+        :return: Raise an ValueError exception if same parameter is missing or wrong
+        """
+        # static version names
+        if version == None:
+            self.version = None
+        elif version == "0.9":
+            self.version = version
+            self.name = "Floodlightv0.9"
+            self.ver_names = {
+                "dpid": "dpid",
+                "URLmodifier": "staticflowentrypusher",
+                "destmac": "dst-mac",
+                "vlanid": "vlan-id",
+                "inport": "ingress-port",
+                "setvlan": "set-vlan-id",
+                "stripvlan": "strip-vlan",
+            }
+        elif version[0] == "1":  # version 1.X
+            self.version = version
+            self.name = "Floodlightv1.X"
+            self.ver_names = {
+                "dpid": "switchDPID",
+                "URLmodifier": "staticflowpusher",
+                "destmac": "eth_dst",
+                "vlanid": "eth_vlan_vid",
+                "inport": "in_port",
+                "setvlan": "set_vlan_vid",
+                "stripvlan": "strip_vlan",
+            }
+        else:
+            raise ValueError("Invalid version for floodlight controller")
+
+    def get_of_switches(self):
+        """
+        Obtain a a list of switches or DPID detected by this controller
+        :return: list where each element a tuple pair (DPID, IP address)
+                      Raise an OpenflowconnConnectionException or OpenflowconnConnectionException exception if same
+                      parameter is missing or wrong
+        """
+        try:
+            of_response = requests.get(self.url + "/wm/core/controller/switches/json", headers=self.headers)
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+            if of_response.status_code != 200:
+                self.logger.warning("get_of_switches " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+            self.logger.debug("get_of_switches " + error_text)
+            info = of_response.json()
+            if type(info) != list and type(info) != tuple:
+                self.logger.error("get_of_switches. Unexpected response not a list %s", str(type(info)))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, not a list. Wrong version?")
+            if len(info) == 0:
+                return info
+            # autodiscover version
+            if self.version == None:
+                if 'dpid' in info[0] and 'inetAddress' in info[0]:
+                    self._set_version("0.9")
+                elif 'switchDPID' in info[0] and 'inetAddress' in info[0]:
+                    self._set_version("1.X")
+                else:
+                    self.logger.error(
+                        "get_of_switches. Unexpected response, not found 'dpid' or 'switchDPID' field: %s",
+                        str(info[0]))
+                    raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, not found 'dpid' or "
+                                                                       "'switchDPID' field. Wrong version?")
+
+            switch_list = []
+            for switch in info:
+                switch_list.append((switch[self.ver_names["dpid"]], switch['inetAddress']))
+            return switch_list
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("get_of_switches " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+        except ValueError as e:
+            # ValueError in the case that JSON can not be decoded
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("get_of_switches " + error_text)
+            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+    def get_of_rules(self, translate_of_ports=True):
+        """
+        Obtain the rules inserted at openflow controller
+        :param translate_of_ports: if True it translates ports from openflow index to physical switch name
+        :return: dict if ok: with the rule name as key and value is another dictionary with the following content:
+                    priority: rule priority
+                    name:         rule name (present also as the master dict key)
+                    ingress_port: match input port of the rule
+                    dst_mac:      match destination mac address of the rule, can be missing or None if not apply
+                    vlan_id:      match vlan tag of the rule, can be missing or None if not apply
+                    actions:      list of actions, composed by a pair tuples:
+                        (vlan, None/int): for stripping/setting a vlan tag
+                        (out, port):      send to this port
+                    switch:       DPID, all
+                Raise an openflowconnUnexpectedResponse exception if fails with text_error
+        """
+
+        try:
+            # get translation, autodiscover version
+            if len(self.ofi2pp) == 0:
+                self.obtain_port_correspondence()
+
+            of_response = requests.get(self.url + "/wm/%s/list/%s/json" % (self.ver_names["URLmodifier"], self.dpid),
+                                       headers=self.headers)
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+            if of_response.status_code != 200:
+                self.logger.warning("get_of_rules " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+            self.logger.debug("get_of_rules " + error_text)
+            info = of_response.json()
+            if type(info) != dict:
+                self.logger.error("get_of_rules. Unexpected response not a dict %s", str(type(info)))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, not a dict. Wrong version?")
+            rule_dict = {}
+            for switch, switch_info in info.iteritems():
+                if switch_info == None:
+                    continue
+                if str(switch) != self.dpid:
+                    continue
+                for name, details in switch_info.iteritems():
+                    rule = {}
+                    rule["switch"] = str(switch)
+                    # rule["active"] = "true"
+                    rule["priority"] = int(details["priority"])
+                    if self.version[0] == "0":
+                        if translate_of_ports:
+                            rule["ingress_port"] = self.ofi2pp[details["match"]["inputPort"]]
+                        else:
+                            rule["ingress_port"] = str(details["match"]["inputPort"])
+                        dst_mac = details["match"]["dataLayerDestination"]
+                        if dst_mac != "00:00:00:00:00:00":
+                            rule["dst_mac"] = dst_mac
+                        vlan = details["match"]["dataLayerVirtualLan"]
+                        if vlan != -1:
+                            rule["vlan_id"] = vlan
+                        actionlist = []
+                        for action in details["actions"]:
+                            if action["type"] == "OUTPUT":
+                                if translate_of_ports:
+                                    port = self.ofi2pp[action["port"]]
+                                else:
+                                    port = action["port"]
+                                actionlist.append(("out", port))
+                            elif action["type"] == "STRIP_VLAN":
+                                actionlist.append(("vlan", None))
+                            elif action["type"] == "SET_VLAN_ID":
+                                actionlist.append(("vlan", action["virtualLanIdentifier"]))
+                            else:
+                                actionlist.append((action["type"], str(action)))
+                                self.logger.warning("get_of_rules() Unknown action in rule %s: %s", rule["name"],
+                                                    str(action))
+                            rule["actions"] = actionlist
+                    elif self.version[0] == "1":
+                        if translate_of_ports:
+                            rule["ingress_port"] = self.ofi2pp[details["match"]["in_port"]]
+                        else:
+                            rule["ingress_port"] = details["match"]["in_port"]
+                        if "eth_dst" in details["match"]:
+                            dst_mac = details["match"]["eth_dst"]
+                            if dst_mac != "00:00:00:00:00:00":
+                                rule["dst_mac"] = dst_mac
+                        if "eth_vlan_vid" in details["match"]:
+                            vlan = int(details["match"]["eth_vlan_vid"], 16) & 0xFFF
+                            rule["vlan_id"] = str(vlan)
+                        actionlist = []
+                        for action in details["instructions"]["instruction_apply_actions"]:
+                            if action == "output":
+                                if translate_of_ports:
+                                    port = self.ofi2pp[details["instructions"]["instruction_apply_actions"]["output"]]
+                                else:
+                                    port = details["instructions"]["instruction_apply_actions"]["output"]
+                                actionlist.append(("out", port))
+                            elif action == "strip_vlan":
+                                actionlist.append(("vlan", None))
+                            elif action == "set_vlan_vid":
+                                actionlist.append(
+                                    ("vlan", details["instructions"]["instruction_apply_actions"]["set_vlan_vid"]))
+                            else:
+                                self.logger.error("get_of_rules Unknown action in rule %s: %s", rule["name"],
+                                                  str(action))
+                                # actionlist.append( (action, str(details["instructions"]["instruction_apply_actions"]) ))
+                    rule_dict[str(name)] = rule
+            return rule_dict
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("get_of_rules " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+        except ValueError as e:
+            # ValueError in the case that JSON can not be decoded
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("get_of_rules " + error_text)
+            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+    def obtain_port_correspondence(self):
+        """
+        Obtain the correspondence between physical and openflow port names
+        :return: dictionary: with physical name as key, openflow name as value
+                 Raise an openflowconnUnexpectedResponse exception if fails with text_error
+        """
+        try:
+            of_response = requests.get(self.url + "/wm/core/controller/switches/json", headers=self.headers)
+            # print vim_response.status_code
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+            if of_response.status_code != 200:
+                self.logger.warning("obtain_port_correspondence " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+            self.logger.debug("obtain_port_correspondence " + error_text)
+            info = of_response.json()
+
+            if type(info) != list and type(info) != tuple:
+                raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow response, not a list. "
+                                                                   "Wrong version?")
+
+            index = -1
+            if len(info) > 0:
+                # autodiscover version
+                if self.version == None:
+                    if 'dpid' in info[0] and 'ports' in info[0]:
+                        self._set_version("0.9")
+                    elif 'switchDPID' in info[0]:
+                        self._set_version("1.X")
+                    else:
+                        raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow response, "
+                                                                           "Wrong version?")
+
+            for i in range(0, len(info)):
+                if info[i][self.ver_names["dpid"]] == self.dpid:
+                    index = i
+                    break
+            if index == -1:
+                text = "DPID '" + self.dpid + "' not present in controller " + self.url
+                # print self.name, ": get_of_controller_info ERROR", text
+                raise openflow_conn.OpenflowconnUnexpectedResponse(text)
+            else:
+                if self.version[0] == "0":
+                    ports = info[index]["ports"]
+                else:  # version 1.X
+                    of_response = requests.get(self.url + "/wm/core/switch/%s/port-desc/json" % self.dpid,
+                                               headers=self.headers)
+                    # print vim_response.status_code
+                    error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+                    if of_response.status_code != 200:
+                        self.logger.warning("obtain_port_correspondence " + error_text)
+                        raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+                    self.logger.debug("obtain_port_correspondence " + error_text)
+                    info = of_response.json()
+                    if type(info) != dict:
+                        raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow port-desc response, "
+                                                                           "not a dict. Wrong version?")
+                    if "portDesc" not in info:
+                        raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow port-desc response, "
+                                                                           "'portDesc' not found. Wrong version?")
+                    if type(info["portDesc"]) != list and type(info["portDesc"]) != tuple:
+                        raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow port-desc response at "
+                                                                           "'portDesc', not a list. Wrong version?")
+                    ports = info["portDesc"]
+                for port in ports:
+                    self.pp2ofi[str(port["name"])] = str(port["portNumber"])
+                    self.ofi2pp[port["portNumber"]] = str(port["name"])
+                    # print self.name, ": get_of_controller_info ports:", self.pp2ofi
+            return self.pp2ofi
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("obtain_port_correspondence " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+        except ValueError as e:
+            # ValueError in the case that JSON can not be decoded
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("obtain_port_correspondence " + error_text)
+            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+    def del_flow(self, flow_name):
+        """
+        Delete an existing rule
+        :param flow_name: this is the rule name
+        :return: None if ok
+                 Raise an openflowconnUnexpectedResponse exception if fails with text_error
+        """
+        try:
+
+            # Raise an openflowconnUnexpectedResponse exception if fails with text_error
+            # autodiscover version
+
+            if self.version == None:
+                self.get_of_switches()
+
+            of_response = requests.delete(self.url + "/wm/%s/json" % self.ver_names["URLmodifier"],
+                                          headers=self.headers,
+                                          data='{"switch":"%s","name":"%s"}' % (self.dpid, flow_name)
+                                          )
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+            if of_response.status_code != 200:
+                self.logger.warning("del_flow " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+            self.logger.debug("del_flow OK " + error_text)
+            return None
+
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("del_flow " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+
+    def new_flow(self, data):
+        """
+        Insert a new static rule
+        :param data: dictionary with the following content:
+                        priority:     rule priority
+                        name:         rule name
+                        ingress_port: match input port of the rule
+                        dst_mac:      match destination mac address of the rule, missing or None if not apply
+                        vlan_id:      match vlan tag of the rule, missing or None if not apply
+                        actions:      list of actions, composed by a pair tuples with these posibilities:
+                            ('vlan', None/int): for stripping/setting a vlan tag
+                            ('out', port):      send to this port
+        :return: None if ok
+                 Raise an openflowconnUnexpectedResponse exception if fails with text_error
+        """
+        # get translation, autodiscover version
+        if len(self.pp2ofi) == 0:
+            self.obtain_port_correspondence()
+
+        try:
+            # We have to build the data for the floodlight call from the generic data
+            sdata = {'active': "true", "name": data["name"]}
+            if data.get("priority"):
+                sdata["priority"] = str(data["priority"])
+            if data.get("vlan_id"):
+                sdata[self.ver_names["vlanid"]] = data["vlan_id"]
+            if data.get("dst_mac"):
+                sdata[self.ver_names["destmac"]] = data["dst_mac"]
+            sdata['switch'] = self.dpid
+            if not data['ingress_port'] in self.pp2ofi:
+                error_text = 'Error. Port ' + data['ingress_port'] + ' is not present in the switch'
+                self.logger.warning("new_flow " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+            sdata[self.ver_names["inport"]] = self.pp2ofi[data['ingress_port']]
+            sdata['actions'] = ""
+
+            for action in data['actions']:
+                if len(sdata['actions']) > 0:
+                    sdata['actions'] += ','
+                if action[0] == "vlan":
+                    if action[1] == None:
+                        sdata['actions'] += self.ver_names["stripvlan"]
+                    else:
+                        sdata['actions'] += self.ver_names["setvlan"] + "=" + str(action[1])
+                elif action[0] == 'out':
+                    sdata['actions'] += "output=" + self.pp2ofi[action[1]]
+
+            of_response = requests.post(self.url + "/wm/%s/json" % self.ver_names["URLmodifier"],
+                                        headers=self.headers, data=json.dumps(sdata))
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+            if of_response.status_code != 200:
+                self.logger.warning("new_flow " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+            self.logger.debug("new_flow OK" + error_text)
+            return None
+
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("new_flow " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+
+    def clear_all_flows(self):
+        """
+        Delete all existing rules
+        :return: None if ok
+                 Raise an openflowconnUnexpectedResponse exception if fails with text_error
+        """
+
+        try:
+            # autodiscover version
+            if self.version == None:
+                sw_list = self.get_of_switches()
+                if len(sw_list) == 0:  # empty
+                    return None
+
+            url = self.url + "/wm/%s/clear/%s/json" % (self.ver_names["URLmodifier"], self.dpid)
+            of_response = requests.get(url)
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+            if of_response.status_code < 200 or of_response.status_code >= 300:
+                self.logger.warning("clear_all_flows " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+            self.logger.debug("clear_all_flows OK " + error_text)
+            return None
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("clear_all_flows " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+
diff --git a/osm_openvim/host_thread.py b/osm_openvim/host_thread.py
new file mode 100644 (file)
index 0000000..d8bca2e
--- /dev/null
@@ -0,0 +1,2274 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+This is thread that interact with the host and the libvirt to manage VM
+One thread will be launched per host 
+'''
+__author__ = "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
+__date__ = "$10-jul-2014 12:07:15$"
+
+import json
+import yaml
+import threading
+import time
+import Queue
+import paramiko
+from jsonschema import validate as js_v, exceptions as js_e
+#import libvirt
+import imp
+from vim_schema import localinfo_schema, hostinfo_schema
+import random
+import os
+
+#TODO: insert a logging system
+
+# from logging import Logger
+# import auxiliary_functions as af
+
+# TODO: insert a logging system
+
+
+class host_thread(threading.Thread):
+    lvirt_module = None
+
+    def __init__(self, name, host, user, db, db_lock, test, image_path, host_id, version, develop_mode,
+                 develop_bridge_iface):
+        '''Init a thread.
+        Arguments:
+            'id' number of thead
+            'name' name of thread
+            'host','user':  host ip or name to manage and user
+            'db', 'db_lock': database class and lock to use it in exclusion
+        '''
+        threading.Thread.__init__(self)
+        self.name = name
+        self.host = host
+        self.user = user
+        self.db = db
+        self.db_lock = db_lock
+        self.test = test
+
+        if not test and not host_thread.lvirt_module:
+            try:
+                module_info = imp.find_module("libvirt")
+                host_thread.lvirt_module = imp.load_module("libvirt", *module_info)
+            except (IOError, ImportError) as e:
+                raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e))
+
+
+        self.develop_mode = develop_mode
+        self.develop_bridge_iface = develop_bridge_iface
+        self.image_path = image_path
+        self.host_id = host_id
+        self.version = version
+        
+        self.xml_level = 0
+        #self.pending ={}
+        
+        self.server_status = {} #dictionary with pairs server_uuid:server_status 
+        self.pending_terminate_server =[] #list  with pairs (time,server_uuid) time to send a terminate for a server being destroyed
+        self.next_update_server_status = 0 #time when must be check servers status
+        
+        self.hostinfo = None 
+        
+        self.queueLock = threading.Lock()
+        self.taskQueue = Queue.Queue(2000)
+        self.ssh_conn = None
+
+    def ssh_connect(self):
+        try:
+            #Connect SSH
+            self.ssh_conn = paramiko.SSHClient()
+            self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+            self.ssh_conn.load_system_host_keys()
+            self.ssh_conn.connect(self.host, username=self.user, timeout=10) #, None)
+        except paramiko.ssh_exception.SSHException as e:
+            text = e.args[0]
+            print self.name, ": ssh_connect ssh Exception:", text
+        
+    def load_localinfo(self):
+        if not self.test:
+            try:
+                #Connect SSH
+                self.ssh_connect()
+    
+                command = 'mkdir -p ' +  self.image_path
+                #print self.name, ': command:', command
+                (_, stdout, stderr) = self.ssh_conn.exec_command(command)
+                content = stderr.read()
+                if len(content) > 0:
+                    print self.name, ': command:', command, "stderr:", content
+
+                command = 'cat ' +  self.image_path + '/.openvim.yaml'
+                #print self.name, ': command:', command
+                (_, stdout, stderr) = self.ssh_conn.exec_command(command)
+                content = stdout.read()
+                if len(content) == 0:
+                    print self.name, ': command:', command, "stderr:", stderr.read()
+                    raise paramiko.ssh_exception.SSHException("Error empty file ")
+                self.localinfo = yaml.load(content)
+                js_v(self.localinfo, localinfo_schema)
+                self.localinfo_dirty=False
+                if 'server_files' not in self.localinfo:
+                    self.localinfo['server_files'] = {}
+                print self.name, ': localinfo load from host'
+                return
+    
+            except paramiko.ssh_exception.SSHException as e:
+                text = e.args[0]
+                print self.name, ": load_localinfo ssh Exception:", text
+            except host_thread.lvirt_module.libvirtError as e:
+                text = e.get_error_message()
+                print self.name, ": load_localinfo libvirt Exception:", text
+            except yaml.YAMLError as exc:
+                text = ""
+                if hasattr(exc, 'problem_mark'):
+                    mark = exc.problem_mark
+                    text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
+                print self.name, ": load_localinfo yaml format Exception", text
+            except js_e.ValidationError as e:
+                text = ""
+                if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
+                print self.name, ": load_localinfo format Exception:", text, e.message 
+            except Exception as e:
+                text = str(e)
+                print self.name, ": load_localinfo Exception:", text
+        
+        #not loaded, insert a default data and force saving by activating dirty flag
+        self.localinfo = {'files':{}, 'server_files':{} } 
+        #self.localinfo_dirty=True
+        self.localinfo_dirty=False
+
+    def load_hostinfo(self):
+        if self.test:
+            return;
+        try:
+            #Connect SSH
+            self.ssh_connect()
+
+
+            command = 'cat ' +  self.image_path + '/hostinfo.yaml'
+            #print self.name, ': command:', command
+            (_, stdout, stderr) = self.ssh_conn.exec_command(command)
+            content = stdout.read()
+            if len(content) == 0:
+                print self.name, ': command:', command, "stderr:", stderr.read()
+                raise paramiko.ssh_exception.SSHException("Error empty file ")
+            self.hostinfo = yaml.load(content)
+            js_v(self.hostinfo, hostinfo_schema)
+            print self.name, ': hostlinfo load from host', self.hostinfo
+            return
+
+        except paramiko.ssh_exception.SSHException as e:
+            text = e.args[0]
+            print self.name, ": load_hostinfo ssh Exception:", text
+        except host_thread.lvirt_module.libvirtError as e:
+            text = e.get_error_message()
+            print self.name, ": load_hostinfo libvirt Exception:", text
+        except yaml.YAMLError as exc:
+            text = ""
+            if hasattr(exc, 'problem_mark'):
+                mark = exc.problem_mark
+                text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
+            print self.name, ": load_hostinfo yaml format Exception", text
+        except js_e.ValidationError as e:
+            text = ""
+            if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
+            print self.name, ": load_hostinfo format Exception:", text, e.message 
+        except Exception as e:
+            text = str(e)
+            print self.name, ": load_hostinfo Exception:", text
+        
+        #not loaded, insert a default data 
+        self.hostinfo = None 
+        
+    def save_localinfo(self, tries=3):
+        if self.test:
+            self.localinfo_dirty = False
+            return
+        
+        while tries>=0:
+            tries-=1
+            
+            try:
+                command = 'cat > ' +  self.image_path + '/.openvim.yaml'
+                print self.name, ': command:', command
+                (stdin, _, _) = self.ssh_conn.exec_command(command)
+                yaml.safe_dump(self.localinfo, stdin, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True)
+                self.localinfo_dirty = False
+                break #while tries
+    
+            except paramiko.ssh_exception.SSHException as e:
+                text = e.args[0]
+                print self.name, ": save_localinfo ssh Exception:", text
+                if "SSH session not active" in text:
+                    self.ssh_connect()
+            except host_thread.lvirt_module.libvirtError as e:
+                text = e.get_error_message()
+                print self.name, ": save_localinfo libvirt Exception:", text
+            except yaml.YAMLError as exc:
+                text = ""
+                if hasattr(exc, 'problem_mark'):
+                    mark = exc.problem_mark
+                    text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
+                print self.name, ": save_localinfo yaml format Exception", text
+            except Exception as e:
+                text = str(e)
+                print self.name, ": save_localinfo Exception:", text
+
+    def load_servers_from_db(self):
+        self.db_lock.acquire()
+        r,c = self.db.get_table(SELECT=('uuid','status', 'image_id'), FROM='instances', WHERE={'host_id': self.host_id})
+        self.db_lock.release()
+
+        self.server_status = {}
+        if r<0:
+            print self.name, ": Error getting data from database:", c
+            return
+        for server in c:
+            self.server_status[ server['uuid'] ] = server['status']
+            
+            #convert from old version to new one
+            if 'inc_files' in self.localinfo and server['uuid'] in self.localinfo['inc_files']:
+                server_files_dict = {'source file': self.localinfo['inc_files'][ server['uuid'] ] [0],  'file format':'raw' }
+                if server_files_dict['source file'][-5:] == 'qcow2':
+                    server_files_dict['file format'] = 'qcow2'
+                    
+                self.localinfo['server_files'][ server['uuid'] ] = { server['image_id'] : server_files_dict }
+        if 'inc_files' in self.localinfo:
+            del self.localinfo['inc_files']
+            self.localinfo_dirty = True
+    
+    def delete_unused_files(self):
+        '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
+        Deletes unused entries at self.loacalinfo and the corresponding local files.
+        The only reason for this mismatch is the manual deletion of instances (VM) at database
+        ''' 
+        if self.test:
+            return
+        for uuid,images in self.localinfo['server_files'].items():
+            if uuid not in self.server_status:
+                for localfile in images.values():
+                    try:
+                        print self.name, ": deleting file '%s' of unused server '%s'" %(localfile['source file'], uuid)
+                        self.delete_file(localfile['source file'])
+                    except paramiko.ssh_exception.SSHException as e:
+                        print self.name, ": Exception deleting file '%s': %s" %(localfile['source file'], str(e))
+                del self.localinfo['server_files'][uuid]
+                self.localinfo_dirty = True
+   
+    def insert_task(self, task, *aditional):
+        try:
+            self.queueLock.acquire()
+            task = self.taskQueue.put( (task,) + aditional, timeout=5) 
+            self.queueLock.release()
+            return 1, None
+        except Queue.Full:
+            return -1, "timeout inserting a task over host " + self.name
+
+    def run(self):
+        while True:
+            self.load_localinfo()
+            self.load_hostinfo()
+            self.load_servers_from_db()
+            self.delete_unused_files()
+            while True:
+                self.queueLock.acquire()
+                if not self.taskQueue.empty():
+                    task = self.taskQueue.get()
+                else:
+                    task = None
+                self.queueLock.release()
+    
+                if task is None:
+                    now=time.time()
+                    if self.localinfo_dirty:
+                        self.save_localinfo()
+                    elif self.next_update_server_status < now:
+                        self.update_servers_status()
+                        self.next_update_server_status = now + 5
+                    elif len(self.pending_terminate_server)>0 and self.pending_terminate_server[0][0]<now:
+                        self.server_forceoff()
+                    else:
+                        time.sleep(1)
+                    continue        
+    
+                if task[0] == 'instance':
+                    print self.name, ": processing task instance", task[1]['action']
+                    retry=0
+                    while retry <2:
+                        retry += 1
+                        r=self.action_on_server(task[1], retry==2)
+                        if r>=0: 
+                            break
+                elif task[0] == 'image':
+                    pass
+                elif task[0] == 'exit':
+                    print self.name, ": processing task exit"
+                    self.terminate()
+                    return 0
+                elif task[0] == 'reload':
+                    print self.name, ": processing task reload terminating and relaunching"
+                    self.terminate()
+                    break
+                elif task[0] == 'edit-iface':
+                    print self.name, ": processing task edit-iface port=%s, old_net=%s, new_net=%s" % (task[1], task[2], task[3])
+                    self.edit_iface(task[1], task[2], task[3])
+                elif task[0] == 'restore-iface':
+                    print self.name, ": processing task restore-iface %s mac=%s" % (task[1], task[2])
+                    self.restore_iface(task[1], task[2])
+                elif task[0] == 'new-ovsbridge':
+                    print self.name, ": Creating compute OVS bridge"
+                    self.create_ovs_bridge()
+                elif task[0] == 'new-vxlan':
+                    print self.name, ": Creating vxlan tunnel=" + task[1] + ", remote ip=" + task[2]
+                    self.create_ovs_vxlan_tunnel(task[1], task[2])
+                elif task[0] == 'del-ovsbridge':
+                    print self.name, ": Deleting OVS bridge"
+                    self.delete_ovs_bridge()
+                elif task[0] == 'del-vxlan':
+                    print self.name, ": Deleting vxlan " + task[1] + " tunnel"
+                    self.delete_ovs_vxlan_tunnel(task[1])
+                elif task[0] == 'create-ovs-bridge-port':
+                    print self.name, ": Adding port ovim-" + task[1] + " to OVS bridge"
+                    self.create_ovs_bridge_port(task[1])
+                elif task[0] == 'del-ovs-port':
+                    print self.name, ": Delete bridge attached to ovs port vlan {} net {}".format(task[1], task[2])
+                    self.delete_bridge_port_attached_to_ovs(task[1], task[2])
+                else:
+                    print self.name, ": unknown task", task
+                
+    def server_forceoff(self, wait_until_finished=False):
+        while len(self.pending_terminate_server)>0:
+            now = time.time()
+            if self.pending_terminate_server[0][0]>now:
+                if wait_until_finished:
+                    time.sleep(1)
+                    continue
+                else:
+                    return
+            req={'uuid':self.pending_terminate_server[0][1],
+                'action':{'terminate':'force'},
+                'status': None
+            }
+            self.action_on_server(req)
+            self.pending_terminate_server.pop(0)
+    
+    def terminate(self):
+        try:
+            self.server_forceoff(True)
+            if self.localinfo_dirty:
+                self.save_localinfo()
+            if not self.test:
+                self.ssh_conn.close()
+        except Exception as e:
+            text = str(e)
+            print self.name, ": terminate Exception:", text
+        print self.name, ": exit from host_thread" 
+
+    def get_local_iface_name(self, generic_name):
+        if self.hostinfo != None and "iface_names" in self.hostinfo and generic_name in self.hostinfo["iface_names"]:
+            return self.hostinfo["iface_names"][generic_name]
+        return generic_name
+        
+    def create_xml_server(self, server, dev_list, server_metadata={}):
+        """Function that implements the generation of the VM XML definition.
+        Additional devices are in dev_list list
+        The main disk is upon dev_list[0]"""
+        
+    #get if operating system is Windows        
+        windows_os = False
+        os_type = server_metadata.get('os_type', None)
+        if os_type == None and 'metadata' in dev_list[0]:
+            os_type = dev_list[0]['metadata'].get('os_type', None)
+        if os_type != None and os_type.lower() == "windows":
+            windows_os = True
+    #get type of hard disk bus  
+        bus_ide = True if windows_os else False   
+        bus = server_metadata.get('bus', None)
+        if bus == None and 'metadata' in dev_list[0]:
+            bus = dev_list[0]['metadata'].get('bus', None)
+        if bus != None:
+            bus_ide = True if bus=='ide' else False
+            
+        self.xml_level = 0
+
+        text = "<domain type='kvm'>"
+    #get topology
+        topo = server_metadata.get('topology', None)
+        if topo == None and 'metadata' in dev_list[0]:
+            topo = dev_list[0]['metadata'].get('topology', None)
+    #name
+        name = server.get('name','') + "_" + server['uuid']
+        name = name[:58]  #qemu impose a length  limit of 59 chars or not start. Using 58
+        text += self.inc_tab() + "<name>" + name+ "</name>"
+    #uuid
+        text += self.tab() + "<uuid>" + server['uuid'] + "</uuid>" 
+        
+        numa={}
+        if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:
+            numa = server['extended']['numas'][0]
+    #memory
+        use_huge = False
+        memory = int(numa.get('memory',0))*1024*1024 #in KiB
+        if memory==0:
+            memory = int(server['ram'])*1024;
+        else:
+            if not self.develop_mode:
+                use_huge = True
+        if memory==0:
+            return -1, 'No memory assigned to instance'
+        memory = str(memory)
+        text += self.tab() + "<memory unit='KiB'>" +memory+"</memory>" 
+        text += self.tab() + "<currentMemory unit='KiB'>" +memory+ "</currentMemory>"
+        if use_huge:
+            text += self.tab()+'<memoryBacking>'+ \
+                self.inc_tab() + '<hugepages/>'+ \
+                self.dec_tab()+ '</memoryBacking>'
+
+    #cpu
+        use_cpu_pinning=False
+        vcpus = int(server.get("vcpus",0))
+        cpu_pinning = []
+        if 'cores-source' in numa:
+            use_cpu_pinning=True
+            for index in range(0, len(numa['cores-source'])):
+                cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )
+                vcpus += 1
+        if 'threads-source' in numa:
+            use_cpu_pinning=True
+            for index in range(0, len(numa['threads-source'])):
+                cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )
+                vcpus += 1
+        if 'paired-threads-source' in numa:
+            use_cpu_pinning=True
+            for index in range(0, len(numa['paired-threads-source'])):
+                cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )
+                cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )
+                vcpus += 2
+        
+        if use_cpu_pinning and not self.develop_mode:
+            text += self.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning)) +"</vcpu>" + \
+                self.tab()+'<cputune>'
+            self.xml_level += 1
+            for i in range(0, len(cpu_pinning)):
+                text += self.tab() + "<vcpupin vcpu='" +str(cpu_pinning[i][0])+ "' cpuset='" +str(cpu_pinning[i][1]) +"'/>"
+            text += self.dec_tab()+'</cputune>'+ \
+                self.tab() + '<numatune>' +\
+                self.inc_tab() + "<memory mode='strict' nodeset='" +str(numa['source'])+ "'/>" +\
+                self.dec_tab() + '</numatune>'
+        else:
+            if vcpus==0:
+                return -1, "Instance without number of cpus"
+            text += self.tab()+"<vcpu>" + str(vcpus)  + "</vcpu>"
+
+    #boot
+        boot_cdrom = False
+        for dev in dev_list:
+            if dev['type']=='cdrom' :
+                boot_cdrom = True
+                break
+        text += self.tab()+ '<os>' + \
+            self.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
+        if boot_cdrom:
+            text +=  self.tab() + "<boot dev='cdrom'/>" 
+        text +=  self.tab() + "<boot dev='hd'/>" + \
+            self.dec_tab()+'</os>'
+    #features
+        text += self.tab()+'<features>'+\
+            self.inc_tab()+'<acpi/>' +\
+            self.tab()+'<apic/>' +\
+            self.tab()+'<pae/>'+ \
+            self.dec_tab() +'</features>'
+        if topo == "oneSocket:hyperthreading":
+            if vcpus % 2 != 0:
+                return -1, 'Cannot expose hyperthreading with an odd number of vcpus'
+            text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>" % vcpus/2
+        elif windows_os or topo == "oneSocket":
+            text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>" % vcpus
+        else:
+            text += self.tab() + "<cpu mode='host-model'></cpu>"
+        text += self.tab() + "<clock offset='utc'/>" +\
+            self.tab() + "<on_poweroff>preserve</on_poweroff>" + \
+            self.tab() + "<on_reboot>restart</on_reboot>" + \
+            self.tab() + "<on_crash>restart</on_crash>"
+        text += self.tab() + "<devices>" + \
+            self.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
+            self.tab() + "<serial type='pty'>" +\
+            self.inc_tab() + "<target port='0'/>" + \
+            self.dec_tab() + "</serial>" +\
+            self.tab() + "<console type='pty'>" + \
+            self.inc_tab()+ "<target type='serial' port='0'/>" + \
+            self.dec_tab()+'</console>'
+        if windows_os:
+            text += self.tab() + "<controller type='usb' index='0'/>" + \
+                self.tab() + "<controller type='ide' index='0'/>" + \
+                self.tab() + "<input type='mouse' bus='ps2'/>" + \
+                self.tab() + "<sound model='ich6'/>" + \
+                self.tab() + "<video>" + \
+                self.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
+                self.dec_tab() + "</video>" + \
+                self.tab() + "<memballoon model='virtio'/>" + \
+                self.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
+
+#>             self.tab()+'<alias name=\'hostdev0\'/>\n' +\
+#>             self.dec_tab()+'</hostdev>\n' +\
+#>             self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
+        if windows_os:
+            text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
+        else:
+            #If image contains 'GRAPH' include graphics
+            #if 'GRAPH' in image:
+            text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
+                self.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
+                self.dec_tab() + "</graphics>"
+
+        vd_index = 'a'
+        for dev in dev_list:
+            bus_ide_dev = bus_ide
+            if dev['type']=='cdrom' or dev['type']=='disk':
+                if dev['type']=='cdrom':
+                    bus_ide_dev = True
+                text += self.tab() + "<disk type='file' device='"+dev['type']+"'>"
+                if 'file format' in dev:
+                    text += self.inc_tab() + "<driver name='qemu' type='"  +dev['file format']+ "' cache='writethrough'/>"
+                if 'source file' in dev:
+                    text += self.tab() + "<source file='" +dev['source file']+ "'/>"
+                #elif v['type'] == 'block':
+                #    text += self.tab() + "<source dev='" + v['source'] + "'/>"
+                #else:
+                #    return -1, 'Unknown disk type ' + v['type']
+                vpci = dev.get('vpci',None)
+                if vpci == None:
+                    vpci = dev['metadata'].get('vpci',None)
+                text += self.pci2xml(vpci)
+               
+                if bus_ide_dev:
+                    text += self.tab() + "<target dev='hd" +vd_index+ "' bus='ide'/>"   #TODO allows several type of disks
+                else:
+                    text += self.tab() + "<target dev='vd" +vd_index+ "' bus='virtio'/>" 
+                text += self.dec_tab() + '</disk>'
+                vd_index = chr(ord(vd_index)+1)
+            elif dev['type']=='xml':
+                dev_text = dev['xml']
+                if 'vpci' in dev:
+                    dev_text = dev_text.replace('__vpci__', dev['vpci'])
+                if 'source file' in dev:
+                    dev_text = dev_text.replace('__file__', dev['source file'])
+                if 'file format' in dev:
+                    dev_text = dev_text.replace('__format__', dev['source file'])
+                if '__dev__' in dev_text:
+                    dev_text = dev_text.replace('__dev__', vd_index)
+                    vd_index = chr(ord(vd_index)+1)
+                text += dev_text
+            else:
+                return -1, 'Unknown device type ' + dev['type']
+
+        net_nb=0
+        bridge_interfaces = server.get('networks', [])
+        for v in bridge_interfaces:
+            #Get the brifge name
+            self.db_lock.acquire()
+            result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )
+            self.db_lock.release()
+            if result <= 0:
+                print "create_xml_server ERROR getting nets",result, content
+                return -1, content
+            #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
+            #I know it is not secure    
+            #for v in sorted(desc['network interfaces'].itervalues()):
+            model = v.get("model", None)
+            if content[0]['provider']=='default':
+                text += self.tab() + "<interface type='network'>" + \
+                    self.inc_tab() + "<source network='" +content[0]['provider']+ "'/>"
+            elif content[0]['provider'][0:7]=='macvtap':
+                text += self.tab()+"<interface type='direct'>" + \
+                    self.inc_tab() + "<source dev='" + self.get_local_iface_name(content[0]['provider'][8:]) + "' mode='bridge'/>" + \
+                    self.tab() + "<target dev='macvtap0'/>"
+                if windows_os:
+                    text += self.tab() + "<alias name='net" + str(net_nb) + "'/>"
+                elif model==None:
+                    model = "virtio"
+            elif content[0]['provider'][0:6]=='bridge':
+                text += self.tab() + "<interface type='bridge'>" +  \
+                    self.inc_tab()+"<source bridge='" +self.get_local_iface_name(content[0]['provider'][7:])+ "'/>"
+                if windows_os:
+                    text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
+                        self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
+                elif model==None:
+                    model = "virtio"
+            elif content[0]['provider'][0:3] == "OVS":
+                vlan = content[0]['provider'].replace('OVS:', '')
+                text += self.tab() + "<interface type='bridge'>" + \
+                        self.inc_tab() + "<source bridge='ovim-" + vlan + "'/>"
+            else:
+                return -1, 'Unknown Bridge net provider ' + content[0]['provider']
+            if model!=None:
+                text += self.tab() + "<model type='" +model+ "'/>"
+            if v.get('mac_address', None) != None:
+                text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
+            text += self.pci2xml(v.get('vpci',None))
+            text += self.dec_tab()+'</interface>'
+            
+            net_nb += 1
+
+        interfaces = numa.get('interfaces', [])
+
+        net_nb=0
+        for v in interfaces:
+            if self.develop_mode: #map these interfaces to bridges
+                text += self.tab() + "<interface type='bridge'>" +  \
+                    self.inc_tab()+"<source bridge='" +self.develop_bridge_iface+ "'/>"
+                if windows_os:
+                    text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
+                        self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
+                else:
+                    text += self.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
+                if v.get('mac_address', None) != None:
+                    text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
+                text += self.pci2xml(v.get('vpci',None))
+                text += self.dec_tab()+'</interface>'
+                continue
+                
+            if v['dedicated'] == 'yes':  #passthrought
+                text += self.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
+                    self.inc_tab() + "<source>"
+                self.inc_tab()
+                text += self.pci2xml(v['source'])
+                text += self.dec_tab()+'</source>'
+                text += self.pci2xml(v.get('vpci',None))
+                if windows_os:
+                    text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
+                text += self.dec_tab()+'</hostdev>'
+                net_nb += 1
+            else:        #sriov_interfaces
+                #skip not connected interfaces
+                if v.get("net_id") == None:
+                    continue
+                text += self.tab() + "<interface type='hostdev' managed='yes'>"
+                self.inc_tab()
+                if v.get('mac_address', None) != None:
+                    text+= self.tab() + "<mac address='" +v['mac_address']+ "'/>"
+                text+= self.tab()+'<source>'
+                self.inc_tab()
+                text += self.pci2xml(v['source'])
+                text += self.dec_tab()+'</source>'
+                if v.get('vlan',None) != None:
+                    text += self.tab() + "<vlan>   <tag id='" + str(v['vlan']) + "'/>   </vlan>"
+                text += self.pci2xml(v.get('vpci',None))
+                if windows_os:
+                    text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
+                text += self.dec_tab()+'</interface>'
+
+            
+        text += self.dec_tab()+'</devices>'+\
+        self.dec_tab()+'</domain>'
+        return 0, text
+    
+    def pci2xml(self, pci):
+        '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
+        alows an empty pci text'''
+        if pci is None:
+            return ""
+        first_part = pci.split(':')
+        second_part = first_part[2].split('.')
+        return self.tab() + "<address type='pci' domain='0x" + first_part[0] + \
+                    "' bus='0x" + first_part[1] + "' slot='0x" + second_part[0] + \
+                    "' function='0x" + second_part[1] + "'/>" 
+    
+    def tab(self):
+        """Return indentation according to xml_level"""
+        return "\n" + ('  '*self.xml_level)
+    
+    def inc_tab(self):
+        """Increment and return indentation according to xml_level"""
+        self.xml_level += 1
+        return self.tab()
+    
+    def dec_tab(self):
+        """Decrement and return indentation according to xml_level"""
+        self.xml_level -= 1
+        return self.tab()
+
+    def create_ovs_bridge(self):
+        """
+        Create a bridge in compute OVS to allocate VMs
+        :return: True if success
+        """
+        if self.test:
+            return
+        command = 'sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true'
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+        if len(content) == 0:
+            return True
+        else:
+            return False
+
+    def delete_port_to_ovs_bridge(self, vlan, net_uuid):
+        """
+        Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
+        :param vlan: vlan port id
+        :param net_uuid: network id
+        :return:
+        """
+
+        if self.test:
+            return
+
+        port_name = 'ovim-' + vlan
+        command = 'sudo ovs-vsctl del-port br-int ' + port_name
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+        if len(content) == 0:
+            return True
+        else:
+            return False
+
+    def delete_dhcp_server(self, vlan, net_uuid, dhcp_path):
+        """
+        Delete dhcp server process lining in namespace
+        :param vlan: segmentation id
+        :param net_uuid: network uuid
+        :param dhcp_path: conf fiel path that live in namespace side
+        :return:
+        """
+        if self.test:
+            return
+        if not self.is_dhcp_port_free(vlan, net_uuid):
+            return True
+
+        net_namespace = 'ovim-' + vlan
+        dhcp_path = os.path.join(dhcp_path, net_namespace)
+        pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
+
+        command = 'sudo ip netns exec ' + net_namespace + ' cat ' + pid_file
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        command = 'sudo ip netns exec ' + net_namespace + ' kill -9 ' + content
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        # if len(content) == 0:
+        #     return True
+        # else:
+        #     return False
+
+    def is_dhcp_port_free(self, host_id, net_uuid):
+        """
+        Check if any port attached to the a net in a vxlan mesh across computes nodes
+        :param host_id: host id
+        :param net_uuid: network id
+        :return: True if is not free
+        """
+        self.db_lock.acquire()
+        result, content = self.db.get_table(
+            FROM='ports',
+            WHERE={'p.type': 'instance:ovs', 'p.net_id': net_uuid}
+        )
+        self.db_lock.release()
+
+        if len(content) > 0:
+            return False
+        else:
+            return True
+
+    def is_port_free(self, host_id, net_uuid):
+        """
+        Check if there not ovs ports of a network in a compute host.
+        :param host_id:  host id
+        :param net_uuid: network id
+        :return: True if is not free
+        """
+
+        self.db_lock.acquire()
+        result, content = self.db.get_table(
+            FROM='ports as p join instances as i on p.instance_id=i.uuid',
+            WHERE={"i.host_id": self.host_id, 'p.type': 'instance:ovs', 'p.net_id': net_uuid}
+        )
+        self.db_lock.release()
+
+        if len(content) > 0:
+            return False
+        else:
+            return True
+
+    def add_port_to_ovs_bridge(self, vlan):
+        """
+        Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
+        :param vlan: vlan port id
+        :return: True if success
+        """
+
+        if self.test:
+            return
+
+        port_name = 'ovim-' + vlan
+        command = 'sudo ovs-vsctl add-port br-int ' + port_name + ' tag=' + vlan
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+        if len(content) == 0:
+            return True
+        else:
+            return False
+
+    def delete_dhcp_port(self, vlan, net_uuid):
+        """
+        Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
+        :param vlan: segmentation id
+        :param net_uuid: network id
+        :return: True if success
+        """
+
+        if self.test:
+            return
+
+        if not self.is_dhcp_port_free(vlan, net_uuid):
+            return True
+        self.delete_dhcp_interfaces(vlan)
+        return True
+
+    def delete_bridge_port_attached_to_ovs(self, vlan, net_uuid):
+        """
+        Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
+        :param vlan:
+        :param net_uuid:
+        :return: True if success
+        """
+        if self.test:
+            return
+
+        if not self.is_port_free(vlan, net_uuid):
+            return True
+        self.delete_port_to_ovs_bridge(vlan, net_uuid)
+        self.delete_linux_bridge(vlan)
+        return True
+
+    def delete_linux_bridge(self, vlan):
+        """
+        Delete a linux bridge in a scpecific compute.
+        :param vlan: vlan port id
+        :return: True if success
+        """
+
+        if self.test:
+            return
+
+        port_name = 'ovim-' + vlan
+        command = 'sudo ip link set dev veth0-' + vlan + ' down'
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+        #
+        # if len(content) != 0:
+        #     return False
+
+        command = 'sudo ifconfig ' + port_name + ' down &&  sudo brctl delbr ' + port_name
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+        if len(content) == 0:
+            return True
+        else:
+            return False
+
+    def create_ovs_bridge_port(self, vlan):
+        """
+        Generate a linux bridge and attache the port to a OVS bridge
+        :param vlan: vlan port id
+        :return:
+        """
+        if self.test:
+            return
+        self.create_linux_bridge(vlan)
+        self.add_port_to_ovs_bridge(vlan)
+
+    def create_linux_bridge(self, vlan):
+        """
+        Create a linux bridge with STP active
+        :param vlan: netowrk vlan id
+        :return:
+        """
+
+        if self.test:
+            return
+
+        port_name = 'ovim-' + vlan
+        command = 'sudo brctl show | grep ' + port_name
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        # if exist nothing to create
+        # if len(content) == 0:
+        #     return False
+
+        command = 'sudo brctl addbr ' + port_name
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        # if len(content) == 0:
+        #     return True
+        # else:
+        #     return False
+
+        command = 'sudo brctl stp ' + port_name + ' on'
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        # if len(content) == 0:
+        #     return True
+        # else:
+        #     return False
+        command = 'sudo ip link set dev ' + port_name + ' up'
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        if len(content) == 0:
+            return True
+        else:
+            return False
+
+    def set_mac_dhcp_server(self, ip, mac, vlan, netmask, dhcp_path):
+        """
+        Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
+        :param ip: IP address asigned to a VM
+        :param mac: VM vnic mac to be macthed with the IP received
+        :param vlan: Segmentation id
+        :param netmask: netmask value
+        :param path: dhcp conf file path that live in namespace side
+        :return: True if success
+        """
+
+        if self.test:
+            return
+
+        net_namespace = 'ovim-' + vlan
+        dhcp_path = os.path.join(dhcp_path, net_namespace)
+        dhcp_hostsdir = os.path.join(dhcp_path, net_namespace)
+
+        if not ip:
+            return False
+
+        ip_data = mac.upper() + ',' + ip
+
+        command = 'sudo  ip netns exec ' + net_namespace + ' touch ' + dhcp_hostsdir
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        command = 'sudo  ip netns exec ' + net_namespace + ' sudo bash -ec "echo ' + ip_data + ' >> ' + dhcp_hostsdir + '"'
+
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        if len(content) == 0:
+            return True
+        else:
+            return False
+
+    def delete_mac_dhcp_server(self, ip, mac, vlan, dhcp_path):
+        """
+        Delete into dhcp conf file the ip  assigned to a specific MAC address
+
+        :param ip: IP address asigned to a VM
+        :param mac:  VM vnic mac to be macthed with the IP received
+        :param vlan:  Segmentation id
+        :param dhcp_path: dhcp conf file path that live in namespace side
+        :return:
+        """
+
+        if self.test:
+            return
+
+        net_namespace = 'ovim-' + vlan
+        dhcp_path = os.path.join(dhcp_path, net_namespace)
+        dhcp_hostsdir = os.path.join(dhcp_path, net_namespace)
+
+        if not ip:
+            return False
+
+        ip_data = mac.upper() + ',' + ip
+
+        command = 'sudo  ip netns exec ' + net_namespace + ' sudo sed -i \'/' + ip_data + '/d\' ' + dhcp_hostsdir
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        if len(content) == 0:
+            return True
+        else:
+            return False
+
+    def launch_dhcp_server(self, vlan, ip_range, netmask, dhcp_path, gateway):
+        """
+        Generate a linux bridge and attache the port to a OVS bridge
+        :param self:
+        :param vlan: Segmentation id
+        :param ip_range: IP dhcp range
+        :param netmask: network netmask
+        :param dhcp_path: dhcp conf file path that live in namespace side
+        :param gateway: Gateway address for dhcp net
+        :return: True if success
+        """
+
+        if self.test:
+            return
+
+        interface = 'tap-' + vlan
+        net_namespace = 'ovim-' + vlan
+        dhcp_path = os.path.join(dhcp_path, net_namespace)
+        leases_path = os.path.join(dhcp_path, "dnsmasq.leases")
+        pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
+
+        dhcp_range = ip_range[0] + ',' + ip_range[1] + ',' + netmask
+
+        command = 'sudo ip netns exec ' + net_namespace + ' mkdir -p ' + dhcp_path
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        pid_path = os.path.join(dhcp_path, 'dnsmasq.pid')
+        command = 'sudo  ip netns exec ' + net_namespace + ' cat ' + pid_path
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+        # check if pid is runing
+        pid_status_path = content
+        if content:
+            command = "ps aux | awk '{print $2 }' | grep " + pid_status_path
+            print self.name, ': command:', command
+            (_, stdout, _) = self.ssh_conn.exec_command(command)
+            content = stdout.read()
+        if not content:
+            command = 'sudo  ip netns exec ' + net_namespace + ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
+              '--interface=' + interface + ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path + \
+              ' --dhcp-range ' + dhcp_range + ' --pid-file=' + pid_file + ' --dhcp-leasefile=' + leases_path + \
+              '  --listen-address ' + gateway
+
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.readline()
+
+        if len(content) == 0:
+            return True
+        else:
+            return False
+
+    def delete_dhcp_interfaces(self, vlan):
+        """
+        Create a linux bridge with STP active
+        :param vlan: netowrk vlan id
+        :return:
+        """
+
+        if self.test:
+            return
+
+        net_namespace = 'ovim-' + vlan
+        command = 'sudo ovs-vsctl del-port br-int ovs-tap-' + vlan
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        command = 'sudo ip netns exec ' + net_namespace + ' ip link set dev tap-' + vlan + ' down'
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        command = 'sudo ip link set dev ovs-tap-' + vlan + ' down'
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+    def create_dhcp_interfaces(self, vlan, ip, netmask):
+        """
+        Create a linux bridge with STP active
+        :param vlan: segmentation id
+        :param ip: Ip included in the dhcp range for the tap interface living in namesapce side
+        :param netmask: dhcp net CIDR
+        :return: True if success
+        """
+
+        if self.test:
+            return
+
+        net_namespace = 'ovim-' + vlan
+        namespace_interface = 'tap-' + vlan
+
+        command = 'sudo ip netns add ' + net_namespace
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        command = 'sudo ip link add tap-' + vlan + ' type veth peer name ovs-tap-' + vlan
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        command = 'sudo ovs-vsctl add-port br-int ovs-tap-' + vlan + ' tag=' + vlan
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        command = 'sudo ip link set tap-' + vlan + ' netns ' + net_namespace
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        command = 'sudo ip netns exec ' + net_namespace + ' ip link set dev tap-' + vlan + ' up'
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        command = 'sudo ip link set dev ovs-tap-' + vlan + ' up'
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        command = 'sudo  ip netns exec ' + net_namespace + ' ' + ' ifconfig  ' + namespace_interface \
+                  + ' ' + ip + ' netmask ' + netmask
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+
+        if len(content) == 0:
+            return True
+        else:
+            return False
+
+    def create_ovs_vxlan_tunnel(self, vxlan_interface, remote_ip):
+        """
+        Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
+        :param vxlan_interface: vlxan inteface name.
+        :param remote_ip: tunnel endpoint remote compute ip.
+        :return:
+        """
+        if self.test:
+            return
+        command = 'sudo ovs-vsctl add-port br-int ' + vxlan_interface + \
+                  ' -- set Interface ' + vxlan_interface + '  type=vxlan options:remote_ip=' + remote_ip + \
+                  ' -- set Port ' + vxlan_interface + ' other_config:stp-path-cost=10'
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+        print content
+        if len(content) == 0:
+            return True
+        else:
+            return False
+
+    def delete_ovs_vxlan_tunnel(self, vxlan_interface):
+        """
+        Delete a vlxan tunnel  port from a OVS brdige.
+        :param vxlan_interface: vlxan name to be delete it.
+        :return: True if success.
+        """
+        if self.test:
+            return
+        command = 'sudo ovs-vsctl del-port br-int ' + vxlan_interface
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+        print content
+        if len(content) == 0:
+            return True
+        else:
+            return False
+
+    def delete_ovs_bridge(self):
+        """
+        Delete a OVS bridge from  a compute.
+        :return: True if success
+        """
+        if self.test:
+            return
+        command = 'sudo ovs-vsctl del-br br-int'
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+        if len(content) == 0:
+            return True
+        else:
+            return False
+
+    def get_file_info(self, path):
+        command = 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
+        print self.name, ': command:', command
+        (_, stdout, _) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+        if len(content) == 0:
+            return None # file does not exist
+        else:
+            return content.split(" ") #(permission, 1, owner, group, size, date, file)
+
+    def qemu_get_info(self, path):
+        command = 'qemu-img info ' + path
+        print self.name, ': command:', command
+        (_, stdout, stderr) = self.ssh_conn.exec_command(command)
+        content = stdout.read()
+        if len(content) == 0:
+            error = stderr.read()
+            print self.name, ": get_qemu_info error ", error
+            raise paramiko.ssh_exception.SSHException("Error getting qemu_info: " + error)
+        else:
+            try: 
+                return yaml.load(content)
+            except yaml.YAMLError as exc:
+                text = ""
+                if hasattr(exc, 'problem_mark'):
+                    mark = exc.problem_mark
+                    text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
+                print self.name, ": get_qemu_info yaml format Exception", text
+                raise paramiko.ssh_exception.SSHException("Error getting qemu_info yaml format" + text)
+
+    def qemu_change_backing(self, inc_file, new_backing_file):
+        command = 'qemu-img rebase -u -b ' + new_backing_file + ' ' + inc_file 
+        print self.name, ': command:', command
+        (_, _, stderr) = self.ssh_conn.exec_command(command)
+        content = stderr.read()
+        if len(content) == 0:
+            return 0
+        else:
+            print self.name, ": qemu_change_backing error: ", content
+            return -1
+    
+    def get_notused_filename(self, proposed_name, suffix=''):
+        '''Look for a non existing file_name in the host
+            proposed_name: proposed file name, includes path
+            suffix: suffix to be added to the name, before the extention
+        '''
+        extension = proposed_name.rfind(".")
+        slash = proposed_name.rfind("/")
+        if extension < 0 or extension < slash: # no extension
+            extension = len(proposed_name)
+        target_name = proposed_name[:extension] + suffix + proposed_name[extension:]
+        info = self.get_file_info(target_name)
+        if info is None:
+            return target_name
+        
+        index=0
+        while info is not None:
+            target_name = proposed_name[:extension] + suffix +  "-" + str(index) + proposed_name[extension:]
+            index+=1
+            info = self.get_file_info(target_name) 
+        return target_name
+    
+    def get_notused_path(self, proposed_path, suffix=''):
+        '''Look for a non existing path at database for images
+            proposed_path: proposed file name, includes path
+            suffix: suffix to be added to the name, before the extention
+        '''
+        extension = proposed_path.rfind(".")
+        if extension < 0:
+            extension = len(proposed_path)
+        if suffix != None:
+            target_path = proposed_path[:extension] + suffix + proposed_path[extension:]
+        index=0
+        while True:
+            r,_=self.db.get_table(FROM="images",WHERE={"path":target_path})
+            if r<=0:
+                return target_path
+            target_path = proposed_path[:extension] + suffix +  "-" + str(index) + proposed_path[extension:]
+            index+=1
+
+    
+    def delete_file(self, file_name):
+        command = 'rm -f '+file_name
+        print self.name, ': command:', command
+        (_, _, stderr) = self.ssh_conn.exec_command(command)
+        error_msg = stderr.read()
+        if len(error_msg) > 0:
+            raise paramiko.ssh_exception.SSHException("Error deleting file: " + error_msg)
+
+    def copy_file(self, source, destination, perserve_time=True):
+        if source[0:4]=="http":
+            command = "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
+                dst=destination, src=source, dst_result=destination + ".result" )
+        else:
+            command = 'cp --no-preserve=mode'
+            if perserve_time:
+                command += ' --preserve=timestamps'
+            command +=  " '{}' '{}'".format(source, destination)
+        print self.name, ': command:', command
+        (_, _, stderr) = self.ssh_conn.exec_command(command)
+        error_msg = stderr.read()
+        if len(error_msg) > 0:
+            raise paramiko.ssh_exception.SSHException("Error copying image to local host: " + error_msg)
+
+    def copy_remote_file(self, remote_file, use_incremental):
+        ''' Copy a file from the repository to local folder and recursively 
+            copy the backing files in case the remote file is incremental
+            Read and/or modified self.localinfo['files'] that contain the
+            unmodified copies of images in the local path
+            params:
+                remote_file: path of remote file
+                use_incremental: None (leave the decision to this function), True, False
+            return:
+                local_file: name of local file
+                qemu_info: dict with quemu information of local file
+                use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
+        '''
+        
+        use_incremental_out = use_incremental
+        new_backing_file = None
+        local_file = None
+        file_from_local = True
+
+        #in case incremental use is not decided, take the decision depending on the image
+        #avoid the use of incremental if this image is already incremental
+        if remote_file[0:4] == "http":
+            file_from_local = False
+        if file_from_local:
+            qemu_remote_info = self.qemu_get_info(remote_file)
+        if use_incremental_out==None:
+            use_incremental_out = not ( file_from_local and 'backing file' in qemu_remote_info)
+        #copy recursivelly the backing files
+        if  file_from_local and 'backing file' in qemu_remote_info:
+            new_backing_file, _, _ = self.copy_remote_file(qemu_remote_info['backing file'], True)
+        
+        #check if remote file is present locally
+        if use_incremental_out and remote_file in self.localinfo['files']:
+            local_file = self.localinfo['files'][remote_file]
+            local_file_info =  self.get_file_info(local_file)
+            if file_from_local:
+                remote_file_info = self.get_file_info(remote_file)
+            if local_file_info == None:
+                local_file = None
+            elif file_from_local and (local_file_info[4]!=remote_file_info[4] or local_file_info[5]!=remote_file_info[5]):
+                #local copy of file not valid because date or size are different. 
+                #TODO DELETE local file if this file is not used by any active virtual machine
+                try:
+                    self.delete_file(local_file)
+                    del self.localinfo['files'][remote_file]
+                except Exception:
+                    pass
+                local_file = None
+            else: #check that the local file has the same backing file, or there are not backing at all
+                qemu_info = self.qemu_get_info(local_file)
+                if new_backing_file != qemu_info.get('backing file'):
+                    local_file = None
+                
+
+        if local_file == None: #copy the file 
+            img_name= remote_file.split('/') [-1]
+            img_local = self.image_path + '/' + img_name
+            local_file = self.get_notused_filename(img_local)
+            self.copy_file(remote_file, local_file, use_incremental_out)
+
+            if use_incremental_out:
+                self.localinfo['files'][remote_file] = local_file
+            if new_backing_file:
+                self.qemu_change_backing(local_file, new_backing_file)
+            qemu_info = self.qemu_get_info(local_file)
+            
+        return local_file, qemu_info, use_incremental_out
+            
+    def launch_server(self, conn, server, rebuild=False, domain=None):
+        if self.test:
+            time.sleep(random.randint(20,150)) #sleep random timeto be make it a bit more real
+            return 0, 'Success'
+
+        server_id = server['uuid']
+        paused = server.get('paused','no')
+        try:
+            if domain!=None and rebuild==False:
+                domain.resume()
+                #self.server_status[server_id] = 'ACTIVE'
+                return 0, 'Success'
+
+            self.db_lock.acquire()
+            result, server_data = self.db.get_instance(server_id)
+            self.db_lock.release()
+            if result <= 0:
+                print self.name, ": launch_server ERROR getting server from DB",result, server_data
+                return result, server_data
+    
+        #0: get image metadata
+            server_metadata = server.get('metadata', {})
+            use_incremental = None
+             
+            if "use_incremental" in server_metadata:
+                use_incremental = False if server_metadata["use_incremental"]=="no" else True
+
+            server_host_files = self.localinfo['server_files'].get( server['uuid'], {})
+            if rebuild:
+                #delete previous incremental files
+                for file_ in server_host_files.values():
+                    self.delete_file(file_['source file'] )
+                server_host_files={}
+    
+        #1: obtain aditional devices (disks)
+            #Put as first device the main disk
+            devices = [  {"type":"disk", "image_id":server['image_id'], "vpci":server_metadata.get('vpci', None) } ] 
+            if 'extended' in server_data and server_data['extended']!=None and "devices" in server_data['extended']:
+                devices += server_data['extended']['devices']
+
+            for dev in devices:
+                if dev['image_id'] == None:
+                    continue
+                
+                self.db_lock.acquire()
+                result, content = self.db.get_table(FROM='images', SELECT=('path', 'metadata'),
+                                                    WHERE={'uuid': dev['image_id']})
+                self.db_lock.release()
+                if result <= 0:
+                    error_text = "ERROR", result, content, "when getting image", dev['image_id']
+                    print self.name, ": launch_server", error_text 
+                    return -1, error_text
+                if content[0]['metadata'] is not None:
+                    dev['metadata'] = json.loads(content[0]['metadata'])
+                else:
+                    dev['metadata'] = {}
+                
+                if dev['image_id'] in server_host_files:
+                    dev['source file'] = server_host_files[ dev['image_id'] ] ['source file'] #local path
+                    dev['file format'] = server_host_files[ dev['image_id'] ] ['file format'] # raw or qcow2
+                    continue
+                
+            #2: copy image to host
+                remote_file = content[0]['path']
+                use_incremental_image = use_incremental
+                if dev['metadata'].get("use_incremental") == "no":
+                    use_incremental_image = False
+                local_file, qemu_info, use_incremental_image = self.copy_remote_file(remote_file, use_incremental_image)
+                
+                #create incremental image
+                if use_incremental_image:
+                    local_file_inc = self.get_notused_filename(local_file, '.inc')
+                    command = 'qemu-img create -f qcow2 '+local_file_inc+ ' -o backing_file='+ local_file
+                    print 'command:', command
+                    (_, _, stderr) = self.ssh_conn.exec_command(command)
+                    error_msg = stderr.read()
+                    if len(error_msg) > 0:
+                        raise paramiko.ssh_exception.SSHException("Error creating incremental file: " + error_msg)
+                    local_file = local_file_inc
+                    qemu_info = {'file format':'qcow2'}
+                
+                server_host_files[ dev['image_id'] ] = {'source file': local_file, 'file format': qemu_info['file format']}
+
+                dev['source file'] = local_file 
+                dev['file format'] = qemu_info['file format']
+
+            self.localinfo['server_files'][ server['uuid'] ] = server_host_files
+            self.localinfo_dirty = True
+
+        #3 Create XML
+            result, xml = self.create_xml_server(server_data, devices, server_metadata)  #local_file
+            if result <0:
+                print self.name, ": create xml server error:", xml
+                return -2, xml
+            print self.name, ": create xml:", xml
+            atribute = host_thread.lvirt_module.VIR_DOMAIN_START_PAUSED if paused == "yes" else 0
+        #4 Start the domain
+            if not rebuild: #ensures that any pending destroying server is done
+                self.server_forceoff(True)
+            #print self.name, ": launching instance" #, xml
+            conn.createXML(xml, atribute)
+            #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
+
+            return 0, 'Success'
+
+        except paramiko.ssh_exception.SSHException as e:
+            text = e.args[0]
+            print self.name, ": launch_server(%s) ssh Exception: %s" %(server_id, text)
+            if "SSH session not active" in text:
+                self.ssh_connect()
+        except host_thread.lvirt_module.libvirtError as e:
+            text = e.get_error_message()
+            print self.name, ": launch_server(%s) libvirt Exception: %s"  %(server_id, text)
+        except Exception as e:
+            text = str(e)
+            print self.name, ": launch_server(%s) Exception: %s"  %(server_id, text)
+        return -1, text
+    
+    def update_servers_status(self):
+                            # # virDomainState
+                            # VIR_DOMAIN_NOSTATE = 0
+                            # VIR_DOMAIN_RUNNING = 1
+                            # VIR_DOMAIN_BLOCKED = 2
+                            # VIR_DOMAIN_PAUSED = 3
+                            # VIR_DOMAIN_SHUTDOWN = 4
+                            # VIR_DOMAIN_SHUTOFF = 5
+                            # VIR_DOMAIN_CRASHED = 6
+                            # VIR_DOMAIN_PMSUSPENDED = 7   #TODO suspended
+    
+        if self.test or len(self.server_status)==0:
+            return            
+        
+        try:
+            conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
+            domains=  conn.listAllDomains() 
+            domain_dict={}
+            for domain in domains:
+                uuid = domain.UUIDString() ;
+                libvirt_status = domain.state()
+                #print libvirt_status
+                if libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_RUNNING or libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTDOWN:
+                    new_status = "ACTIVE"
+                elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_PAUSED:
+                    new_status = "PAUSED"
+                elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTOFF:
+                    new_status = "INACTIVE"
+                elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_CRASHED:
+                    new_status = "ERROR"
+                else:
+                    new_status = None
+                domain_dict[uuid] = new_status
+            conn.close()
+        except host_thread.lvirt_module.libvirtError as e:
+            print self.name, ": get_state() Exception '", e.get_error_message()
+            return
+
+        for server_id, current_status in self.server_status.iteritems():
+            new_status = None
+            if server_id in domain_dict:
+                new_status = domain_dict[server_id]
+            else:
+                new_status = "INACTIVE"
+                            
+            if new_status == None or new_status == current_status:
+                continue
+            if new_status == 'INACTIVE' and current_status == 'ERROR':
+                continue #keep ERROR status, because obviously this machine is not running
+            #change status
+            print self.name, ": server ", server_id, "status change from ", current_status, "to", new_status
+            STATUS={'progress':100, 'status':new_status}
+            if new_status == 'ERROR':
+                STATUS['last_error'] = 'machine has crashed'
+            self.db_lock.acquire()
+            r,_ = self.db.update_rows('instances', STATUS, {'uuid':server_id}, log=False)
+            self.db_lock.release()
+            if r>=0:
+                self.server_status[server_id] = new_status
+                        
+    def action_on_server(self, req, last_retry=True):
+        '''Perform an action on a req
+        Attributes:
+            req: dictionary that contain:
+                server properties: 'uuid','name','tenant_id','status'
+                action: 'action'
+                host properties: 'user', 'ip_name'
+        return (error, text)  
+             0: No error. VM is updated to new state,  
+            -1: Invalid action, as trying to pause a PAUSED VM
+            -2: Error accessing host
+            -3: VM nor present
+            -4: Error at DB access
+            -5: Error while trying to perform action. VM is updated to ERROR
+        '''
+        server_id = req['uuid']
+        conn = None
+        new_status = None
+        old_status = req['status']
+        last_error = None
+        
+        if self.test:
+            if 'terminate' in req['action']:
+                new_status = 'deleted'
+            elif 'shutoff' in req['action'] or 'shutdown' in req['action'] or 'forceOff' in req['action']:
+                if req['status']!='ERROR':
+                    time.sleep(5)
+                    new_status = 'INACTIVE'
+            elif 'start' in req['action']  and req['status']!='ERROR':      new_status = 'ACTIVE'
+            elif 'resume' in req['action'] and req['status']!='ERROR' and req['status']!='INACTIVE' :     new_status = 'ACTIVE'
+            elif 'pause' in req['action']  and req['status']!='ERROR':      new_status = 'PAUSED'
+            elif 'reboot' in req['action'] and req['status']!='ERROR':     new_status = 'ACTIVE'
+            elif 'rebuild' in req['action']:
+                time.sleep(random.randint(20,150))
+                new_status = 'ACTIVE'
+            elif 'createImage' in req['action']:
+                time.sleep(5)
+                self.create_image(None, req)
+        else:
+            try:
+                conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
+                try:
+                    dom = conn.lookupByUUIDString(server_id)
+                except host_thread.lvirt_module.libvirtError as e:
+                    text = e.get_error_message()
+                    if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
+                        dom = None
+                    else:
+                        print self.name, ": action_on_server(",server_id,") libvirt exception:", text
+                        raise e
+                
+                if 'forceOff' in req['action']:
+                    if dom == None:
+                        print self.name, ": action_on_server(",server_id,") domain not running" 
+                    else:
+                        try:
+                            print self.name, ": sending DESTROY to server", server_id 
+                            dom.destroy()
+                        except Exception as e:
+                            if "domain is not running" not in e.get_error_message():
+                                print self.name, ": action_on_server(",server_id,") Exception while sending force off:", e.get_error_message()
+                                last_error =  'action_on_server Exception while destroy: ' + e.get_error_message()
+                                new_status = 'ERROR'
+                
+                elif 'terminate' in req['action']:
+                    if dom == None:
+                        print self.name, ": action_on_server(",server_id,") domain not running" 
+                        new_status = 'deleted'
+                    else:
+                        try:
+                            if req['action']['terminate'] == 'force':
+                                print self.name, ": sending DESTROY to server", server_id 
+                                dom.destroy()
+                                new_status = 'deleted'
+                            else:
+                                print self.name, ": sending SHUTDOWN to server", server_id 
+                                dom.shutdown()
+                                self.pending_terminate_server.append( (time.time()+10,server_id) )
+                        except Exception as e:
+                            print self.name, ": action_on_server(",server_id,") Exception while destroy:", e.get_error_message() 
+                            last_error =  'action_on_server Exception while destroy: ' + e.get_error_message()
+                            new_status = 'ERROR'
+                            if "domain is not running" in e.get_error_message():
+                                try:
+                                    dom.undefine()
+                                    new_status = 'deleted'
+                                except Exception:
+                                    print self.name, ": action_on_server(",server_id,") Exception while undefine:", e.get_error_message() 
+                                    last_error =  'action_on_server Exception2 while undefine:', e.get_error_message()
+                            #Exception: 'virDomainDetachDevice() failed'
+                    if new_status=='deleted':
+                        if server_id in self.server_status:
+                            del self.server_status[server_id]
+                        if req['uuid'] in self.localinfo['server_files']:
+                            for file_ in self.localinfo['server_files'][ req['uuid'] ].values():
+                                try:
+                                    self.delete_file(file_['source file'])
+                                except Exception:
+                                    pass
+                            del self.localinfo['server_files'][ req['uuid'] ]
+                            self.localinfo_dirty = True
+
+                elif 'shutoff' in req['action'] or 'shutdown' in req['action']:
+                    try:
+                        if dom == None:
+                            print self.name, ": action_on_server(",server_id,") domain not running"
+                        else: 
+                            dom.shutdown()
+#                        new_status = 'INACTIVE'
+                        #TODO: check status for changing at database
+                    except Exception as e:
+                        new_status = 'ERROR'
+                        print self.name, ": action_on_server(",server_id,") Exception while shutdown:", e.get_error_message() 
+                        last_error =  'action_on_server Exception while shutdown: ' + e.get_error_message()
+    
+                elif 'rebuild' in req['action']:
+                    if dom != None:
+                        dom.destroy()
+                    r = self.launch_server(conn, req, True, None)
+                    if r[0] <0:
+                        new_status = 'ERROR'
+                        last_error = r[1]
+                    else:
+                        new_status = 'ACTIVE'
+                elif 'start' in req['action']:
+                    # The instance is only create in DB but not yet at libvirt domain, needs to be create
+                    rebuild = True if req['action']['start'] == 'rebuild'  else False
+                    r = self.launch_server(conn, req, rebuild, dom)
+                    if r[0] <0:
+                        new_status = 'ERROR'
+                        last_error = r[1]
+                    else:
+                        new_status = 'ACTIVE'
+                
+                elif 'resume' in req['action']:
+                    try:
+                        if dom == None:
+                            pass
+                        else:
+                            dom.resume()
+#                            new_status = 'ACTIVE'
+                    except Exception as e:
+                        print self.name, ": action_on_server(",server_id,") Exception while resume:", e.get_error_message() 
+                    
+                elif 'pause' in req['action']:
+                    try: 
+                        if dom == None:
+                            pass
+                        else:
+                            dom.suspend()
+#                            new_status = 'PAUSED'
+                    except Exception as e:
+                        print self.name, ": action_on_server(",server_id,") Exception while pause:", e.get_error_message() 
+    
+                elif 'reboot' in req['action']:
+                    try: 
+                        if dom == None:
+                            pass
+                        else:
+                            dom.reboot()
+                        print self.name, ": action_on_server(",server_id,") reboot:" 
+                        #new_status = 'ACTIVE'
+                    except Exception as e:
+                        print self.name, ": action_on_server(",server_id,") Exception while reboot:", e.get_error_message() 
+                elif 'createImage' in req['action']:
+                    self.create_image(dom, req)
+                        
+        
+                conn.close()    
+            except host_thread.lvirt_module.libvirtError as e:
+                if conn is not None: conn.close()
+                text = e.get_error_message()
+                new_status = "ERROR"
+                last_error = text
+                print self.name, ": action_on_server(",server_id,") Exception '", text
+                if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
+                    print self.name, ": action_on_server(",server_id,") Exception removed from host"
+        #end of if self.test
+        if new_status ==  None:
+            return 1
+
+        print self.name, ": action_on_server(",server_id,") new status", new_status, last_error
+        UPDATE = {'progress':100, 'status':new_status}
+        
+        if new_status=='ERROR':
+            if not last_retry:  #if there will be another retry do not update database 
+                return -1 
+            elif 'terminate' in req['action']:
+                #PUT a log in the database
+                print self.name, ": PANIC deleting server", server_id, last_error
+                self.db_lock.acquire()
+                self.db.new_row('logs', 
+                            {'uuid':server_id, 'tenant_id':req['tenant_id'], 'related':'instances','level':'panic',
+                             'description':'PANIC deleting server from host '+self.name+': '+last_error}
+                        )
+                self.db_lock.release()
+                if server_id in self.server_status:
+                    del self.server_status[server_id]
+                return -1
+            else:
+                UPDATE['last_error'] = last_error
+        if new_status != 'deleted' and (new_status != old_status or new_status == 'ERROR') :
+            self.db_lock.acquire()
+            self.db.update_rows('instances', UPDATE, {'uuid':server_id}, log=True)
+            self.server_status[server_id] = new_status
+            self.db_lock.release()
+        if new_status == 'ERROR':
+            return -1
+        return 1
+     
+    
+    def restore_iface(self, name, mac, lib_conn=None):
+        ''' make an ifdown, ifup to restore default parameter of na interface
+            Params:
+                mac: mac address of the interface
+                lib_conn: connection to the libvirt, if None a new connection is created
+            Return 0,None if ok, -1,text if fails
+        ''' 
+        conn=None
+        ret = 0
+        error_text=None
+        if self.test:
+            print self.name, ": restore_iface '%s' %s" % (name, mac)
+            return 0, None
+        try:
+            if not lib_conn:
+                conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
+            else:
+                conn = lib_conn
+                
+            #wait to the pending VM deletion
+            #TODO.Revise  self.server_forceoff(True)
+
+            iface = conn.interfaceLookupByMACString(mac)
+            iface.destroy()
+            iface.create()
+            print self.name, ": restore_iface '%s' %s" % (name, mac)
+        except host_thread.lvirt_module.libvirtError as e:
+            error_text = e.get_error_message()
+            print self.name, ": restore_iface '%s' '%s' libvirt exception: %s" %(name, mac, error_text) 
+            ret=-1
+        finally:
+            if lib_conn is None and conn is not None:
+                conn.close()
+        return ret, error_text
+
+        
+    def create_image(self,dom, req):
+        if self.test:
+            if 'path' in req['action']['createImage']:
+                file_dst = req['action']['createImage']['path']
+            else:
+                createImage=req['action']['createImage']
+                img_name= createImage['source']['path']
+                index=img_name.rfind('/')
+                file_dst = self.get_notused_path(img_name[:index+1] + createImage['name'] + '.qcow2')
+            image_status='ACTIVE'
+        else:
+            for retry in (0,1):
+                try:
+                    server_id = req['uuid']
+                    createImage=req['action']['createImage']
+                    file_orig = self.localinfo['server_files'][server_id] [ createImage['source']['image_id'] ] ['source file']
+                    if 'path' in req['action']['createImage']:
+                        file_dst = req['action']['createImage']['path']
+                    else:
+                        img_name= createImage['source']['path']
+                        index=img_name.rfind('/')
+                        file_dst = self.get_notused_filename(img_name[:index+1] + createImage['name'] + '.qcow2')
+                          
+                    self.copy_file(file_orig, file_dst)
+                    qemu_info = self.qemu_get_info(file_orig)
+                    if 'backing file' in qemu_info:
+                        for k,v in self.localinfo['files'].items():
+                            if v==qemu_info['backing file']:
+                                self.qemu_change_backing(file_dst, k)
+                                break
+                    image_status='ACTIVE'
+                    break
+                except paramiko.ssh_exception.SSHException as e:
+                    image_status='ERROR'
+                    error_text = e.args[0]
+                    print self.name, "': create_image(",server_id,") ssh Exception:", error_text
+                    if "SSH session not active" in error_text and retry==0:
+                        self.ssh_connect()
+                except Exception as e:
+                    image_status='ERROR'
+                    error_text = str(e)
+                    print self.name, "': create_image(",server_id,") Exception:", error_text
+        
+                #TODO insert a last_error at database
+        self.db_lock.acquire()
+        self.db.update_rows('images', {'status':image_status, 'progress': 100, 'path':file_dst}, 
+                {'uuid':req['new_image']['uuid']}, log=True)
+        self.db_lock.release()
+  
+    def edit_iface(self, port_id, old_net, new_net):
+        #This action imply remove and insert interface to put proper parameters
+        if self.test:
+            time.sleep(1)
+        else:
+        #get iface details
+            self.db_lock.acquire()
+            r,c = self.db.get_table(FROM='ports as p join resources_port as rp on p.uuid=rp.port_id',
+                                    WHERE={'port_id': port_id})
+            self.db_lock.release()
+            if r<0:
+                print self.name, ": edit_iface(",port_id,") DDBB error:", c
+                return
+            elif r==0:
+                print self.name, ": edit_iface(",port_id,") por not found"
+                return
+            port=c[0]
+            if port["model"]!="VF":
+                print self.name, ": edit_iface(",port_id,") ERROR model must be VF"
+                return
+            #create xml detach file
+            xml=[]
+            self.xml_level = 2
+            xml.append("<interface type='hostdev' managed='yes'>")
+            xml.append("  <mac address='" +port['mac']+ "'/>")
+            xml.append("  <source>"+ self.pci2xml(port['pci'])+"\n  </source>")
+            xml.append('</interface>')                
+
+            
+            try:
+                conn=None
+                conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
+                dom = conn.lookupByUUIDString(port["instance_id"])
+                if old_net:
+                    text="\n".join(xml)
+                    print self.name, ": edit_iface detaching SRIOV interface", text
+                    dom.detachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
+                if new_net:
+                    xml[-1] ="  <vlan>   <tag id='" + str(port['vlan']) + "'/>   </vlan>"
+                    self.xml_level = 1
+                    xml.append(self.pci2xml(port.get('vpci',None)) )
+                    xml.append('</interface>')                
+                    text="\n".join(xml)
+                    print self.name, ": edit_iface attaching SRIOV interface", text
+                    dom.attachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
+                    
+            except host_thread.lvirt_module.libvirtError as e:
+                text = e.get_error_message()
+                print self.name, ": edit_iface(",port["instance_id"],") libvirt exception:", text 
+                
+            finally:
+                if conn is not None: conn.close()
+
+
+def create_server(server, db, db_lock, only_of_ports):
+    #print "server"
+    #print "server"
+    #print server
+    #print "server"
+    #print "server"
+    #try:
+#            host_id = server.get('host_id', None)
+    extended = server.get('extended', None)
+    
+#             print '----------------------'
+#             print json.dumps(extended, indent=4)
+    
+    requirements={}
+    requirements['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
+    requirements['ram'] = server['flavor'].get('ram', 0)
+    if requirements['ram']== None:
+        requirements['ram'] = 0
+    requirements['vcpus'] = server['flavor'].get('vcpus', 0)
+    if requirements['vcpus']== None:
+        requirements['vcpus'] = 0
+    #If extended is not defined get requirements from flavor
+    if extended is None:
+        #If extended is defined in flavor convert to dictionary and use it
+        if 'extended' in server['flavor'] and  server['flavor']['extended'] != None:
+            json_acceptable_string = server['flavor']['extended'].replace("'", "\"")
+            extended = json.loads(json_acceptable_string)
+        else:
+            extended = None
+    #print json.dumps(extended, indent=4)
+    
+    #For simplicity only one numa VM are supported in the initial implementation
+    if extended != None:
+        numas = extended.get('numas', [])
+        if len(numas)>1:
+            return (-2, "Multi-NUMA VMs are not supported yet")
+        #elif len(numas)<1:
+        #    return (-1, "At least one numa must be specified")
+    
+        #a for loop is used in order to be ready to multi-NUMA VMs
+        request = []
+        for numa in numas:
+            numa_req = {}
+            numa_req['memory'] = numa.get('memory', 0)
+            if 'cores' in numa: 
+                numa_req['proc_req_nb'] = numa['cores']                     #number of cores or threads to be reserved
+                numa_req['proc_req_type'] = 'cores'                         #indicates whether cores or threads must be reserved
+                numa_req['proc_req_list'] = numa.get('cores-id', None)      #list of ids to be assigned to the cores or threads
+            elif 'paired-threads' in numa:
+                numa_req['proc_req_nb'] = numa['paired-threads']
+                numa_req['proc_req_type'] = 'paired-threads'
+                numa_req['proc_req_list'] = numa.get('paired-threads-id', None)
+            elif 'threads' in numa:
+                numa_req['proc_req_nb'] = numa['threads']
+                numa_req['proc_req_type'] = 'threads'
+                numa_req['proc_req_list'] = numa.get('threads-id', None)
+            else:
+                numa_req['proc_req_nb'] = 0 # by default
+                numa_req['proc_req_type'] = 'threads'
+
+            
+            
+            #Generate a list of sriov and another for physical interfaces 
+            interfaces = numa.get('interfaces', [])
+            sriov_list = []
+            port_list = []
+            for iface in interfaces:
+                iface['bandwidth'] = int(iface['bandwidth'])
+                if iface['dedicated'][:3]=='yes':
+                    port_list.append(iface)
+                else:
+                    sriov_list.append(iface)
+                    
+            #Save lists ordered from more restrictive to less bw requirements
+            numa_req['sriov_list'] = sorted(sriov_list, key=lambda k: k['bandwidth'], reverse=True)
+            numa_req['port_list'] = sorted(port_list, key=lambda k: k['bandwidth'], reverse=True)
+            
+            
+            request.append(numa_req)
+                
+    #                 print "----------\n"+json.dumps(request[0], indent=4)
+    #                 print '----------\n\n'
+            
+        #Search in db for an appropriate numa for each requested numa
+        #at the moment multi-NUMA VMs are not supported
+        if len(request)>0:
+            requirements['numa'].update(request[0])
+    if requirements['numa']['memory']>0:
+        requirements['ram']=0  #By the moment I make incompatible ask for both Huge and non huge pages memory
+    elif requirements['ram']==0:
+        return (-1, "Memory information not set neither at extended field not at ram")
+    if requirements['numa']['proc_req_nb']>0:
+        requirements['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
+    elif requirements['vcpus']==0:
+        return (-1, "Processor information not set neither at extended field not at vcpus")    
+
+
+    db_lock.acquire()
+    result, content = db.get_numas(requirements, server.get('host_id', None), only_of_ports)
+    db_lock.release()
+    
+    if result == -1:
+        return (-1, content)
+    
+    numa_id = content['numa_id']
+    host_id = content['host_id']
+
+    #obtain threads_id and calculate pinning
+    cpu_pinning = []
+    reserved_threads=[]
+    if requirements['numa']['proc_req_nb']>0:
+        db_lock.acquire()
+        result, content = db.get_table(FROM='resources_core', 
+                                       SELECT=('id','core_id','thread_id'),
+                                       WHERE={'numa_id':numa_id,'instance_id': None, 'status':'ok'} )
+        db_lock.release()
+        if result <= 0:
+            print content
+            return -1, content
+    
+        #convert rows to a dictionary indexed by core_id
+        cores_dict = {}
+        for row in content:
+            if not row['core_id'] in cores_dict:
+                cores_dict[row['core_id']] = []
+            cores_dict[row['core_id']].append([row['thread_id'],row['id']]) 
+           
+        #In case full cores are requested 
+        paired = 'N'
+        if requirements['numa']['proc_req_type'] == 'cores':
+            #Get/create the list of the vcpu_ids
+            vcpu_id_list = requirements['numa']['proc_req_list']
+            if vcpu_id_list == None:
+                vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
+            
+            for threads in cores_dict.itervalues():
+                #we need full cores
+                if len(threads) != 2:
+                    continue
+                
+                #set pinning for the first thread
+                cpu_pinning.append( [ vcpu_id_list.pop(0), threads[0][0], threads[0][1] ] )
+                
+                #reserve so it is not used the second thread
+                reserved_threads.append(threads[1][1])
+                
+                if len(vcpu_id_list) == 0:
+                    break
+                
+        #In case paired threads are requested
+        elif requirements['numa']['proc_req_type'] == 'paired-threads':
+            paired = 'Y'
+            #Get/create the list of the vcpu_ids
+            if requirements['numa']['proc_req_list'] != None:
+                vcpu_id_list = []
+                for pair in requirements['numa']['proc_req_list']:
+                    if len(pair)!=2:
+                        return -1, "Field paired-threads-id not properly specified"
+                        return
+                    vcpu_id_list.append(pair[0])
+                    vcpu_id_list.append(pair[1])
+            else:
+                vcpu_id_list = range(0,2*int(requirements['numa']['proc_req_nb']))
+                
+            for threads in cores_dict.itervalues():
+                #we need full cores
+                if len(threads) != 2:
+                    continue
+                #set pinning for the first thread
+                cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
+                
+                #set pinning for the second thread
+                cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
+                
+                if len(vcpu_id_list) == 0:
+                    break    
+        
+        #In case normal threads are requested
+        elif requirements['numa']['proc_req_type'] == 'threads':
+            #Get/create the list of the vcpu_ids
+            vcpu_id_list = requirements['numa']['proc_req_list']
+            if vcpu_id_list == None:
+                vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
+                                
+            for threads_index in sorted(cores_dict, key=lambda k: len(cores_dict[k])):
+                threads = cores_dict[threads_index]
+                #set pinning for the first thread
+                cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
+                
+                #if exists, set pinning for the second thread
+                if len(threads) == 2 and len(vcpu_id_list) != 0:
+                    cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
+                
+                if len(vcpu_id_list) == 0:
+                    break    
+    
+        #Get the source pci addresses for the selected numa
+        used_sriov_ports = []
+        for port in requirements['numa']['sriov_list']:
+            db_lock.acquire()
+            result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
+            db_lock.release()
+            if result <= 0:
+                print content
+                return -1, content
+            for row in content:
+                if row['id'] in used_sriov_ports or row['id']==port['port_id']:
+                    continue
+                port['pci'] = row['pci']
+                if 'mac_address' not in port: 
+                    port['mac_address'] = row['mac']
+                del port['mac']
+                port['port_id']=row['id']
+                port['Mbps_used'] = port['bandwidth']
+                used_sriov_ports.append(row['id'])
+                break
+        
+        for port in requirements['numa']['port_list']:
+            port['Mbps_used'] = None
+            if port['dedicated'] != "yes:sriov":
+                port['mac_address'] = port['mac']
+                del port['mac']
+                continue
+            db_lock.acquire()
+            result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac', 'Mbps'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
+            db_lock.release()
+            if result <= 0:
+                print content
+                return -1, content
+            port['Mbps_used'] = content[0]['Mbps']
+            for row in content:
+                if row['id'] in used_sriov_ports or row['id']==port['port_id']:
+                    continue
+                port['pci'] = row['pci']
+                if 'mac_address' not in port: 
+                    port['mac_address'] = row['mac']  # mac cannot be set to passthrough ports 
+                del port['mac']
+                port['port_id']=row['id']
+                used_sriov_ports.append(row['id'])
+                break
+    
+    #             print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
+    #             print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
+        
+    server['host_id'] = host_id
+        
+
+    #Generate dictionary for saving in db the instance resources
+    resources = {}
+    resources['bridged-ifaces'] = []
+    
+    numa_dict = {}
+    numa_dict['interfaces'] = []
+    
+    numa_dict['interfaces'] += requirements['numa']['port_list']
+    numa_dict['interfaces'] += requirements['numa']['sriov_list']
+  
+    #Check bridge information
+    unified_dataplane_iface=[]
+    unified_dataplane_iface += requirements['numa']['port_list']
+    unified_dataplane_iface += requirements['numa']['sriov_list']
+    
+    for control_iface in server.get('networks', []):
+        control_iface['net_id']=control_iface.pop('uuid')
+        #Get the brifge name
+        db_lock.acquire()
+        result, content = db.get_table(FROM='nets',
+                                       SELECT=('name', 'type', 'vlan', 'provider', 'enable_dhcp',
+                                                 'dhcp_first_ip', 'dhcp_last_ip', 'cidr'),
+                                       WHERE={'uuid': control_iface['net_id']})
+        db_lock.release()
+        if result < 0: 
+            pass
+        elif result==0:
+            return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface['net_id']
+        else:
+            network=content[0]
+            if control_iface.get("type", 'virtual') == 'virtual':
+                if network['type']!='bridge_data' and network['type']!='bridge_man':
+                    return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface['net_id']
+                resources['bridged-ifaces'].append(control_iface)
+                if network.get("provider") and network["provider"][0:3] == "OVS":
+                    control_iface["type"] = "instance:ovs"
+                else:
+                    control_iface["type"] = "instance:bridge"
+                if network.get("vlan"):
+                    control_iface["vlan"] = network["vlan"]
+
+                if network.get("enable_dhcp") == 'true':
+                    control_iface["enable_dhcp"] = network.get("enable_dhcp")
+                    control_iface["dhcp_first_ip"] = network["dhcp_first_ip"]
+                    control_iface["dhcp_last_ip"] = network["dhcp_last_ip"]
+                    control_iface["cidr"] = network["cidr"]
+            else:
+                if network['type']!='data' and network['type']!='ptp':
+                    return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface['net_id']
+                #dataplane interface, look for it in the numa tree and asign this network
+                iface_found=False
+                for dataplane_iface in numa_dict['interfaces']:
+                    if dataplane_iface['name'] == control_iface.get("name"):
+                        if (dataplane_iface['dedicated'] == "yes" and control_iface["type"] != "PF") or \
+                            (dataplane_iface['dedicated'] == "no" and control_iface["type"] != "VF") or \
+                            (dataplane_iface['dedicated'] == "yes:sriov" and control_iface["type"] != "VFnotShared") :
+                                return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
+                                    (control_iface.get("name"), dataplane_iface['dedicated'], control_iface["type"])
+                        dataplane_iface['uuid'] = control_iface['net_id']
+                        if dataplane_iface['dedicated'] == "no":
+                            dataplane_iface['vlan'] = network['vlan']
+                        if dataplane_iface['dedicated'] != "yes" and control_iface.get("mac_address"):
+                            dataplane_iface['mac_address'] = control_iface.get("mac_address")
+                        if control_iface.get("vpci"):
+                            dataplane_iface['vpci'] = control_iface.get("vpci")
+                        iface_found=True
+                        break
+                if not iface_found:
+                    return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface.get("name")
+        
+    resources['host_id'] = host_id
+    resources['image_id'] = server['image_id']
+    resources['flavor_id'] = server['flavor_id']
+    resources['tenant_id'] = server['tenant_id']
+    resources['ram'] = requirements['ram']
+    resources['vcpus'] = requirements['vcpus']
+    resources['status'] = 'CREATING'
+    
+    if 'description' in server: resources['description'] = server['description']
+    if 'name' in server: resources['name'] = server['name']
+    
+    resources['extended'] = {}                          #optional
+    resources['extended']['numas'] = []
+    numa_dict['numa_id'] = numa_id
+    numa_dict['memory'] = requirements['numa']['memory']
+    numa_dict['cores'] = []
+
+    for core in cpu_pinning:
+        numa_dict['cores'].append({'id': core[2], 'vthread': core[0], 'paired': paired})
+    for core in reserved_threads:
+        numa_dict['cores'].append({'id': core})
+    resources['extended']['numas'].append(numa_dict)
+    if extended!=None and 'devices' in extended:   #TODO allow extra devices without numa
+        resources['extended']['devices'] = extended['devices']
+    
+
+    print '===================================={'
+    print json.dumps(resources, indent=4)
+    print '====================================}'
+    
+    return 0, resources
+
diff --git a/osm_openvim/httpserver.py b/osm_openvim/httpserver.py
new file mode 100644 (file)
index 0000000..edf1e8d
--- /dev/null
@@ -0,0 +1,2430 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+This is the thread for the http server North API. 
+Two thread will be launched, with normal and administrative permissions.
+'''
+
+__author__="Alfonso Tierno, Gerardo Garcia, Leonardo Mirabal"
+__date__ ="$10-jul-2014 12:07:15$"
+
+import bottle
+import urlparse
+import yaml
+import json
+import threading
+import datetime
+import hashlib
+import os
+import imp
+from netaddr import IPNetwork, IPAddress, all_matching_cidrs
+#import only if needed because not needed in test mode. To allow an easier installation   import RADclass
+from jsonschema import validate as js_v, exceptions as js_e
+import host_thread as ht
+from vim_schema import host_new_schema, host_edit_schema, tenant_new_schema, \
+    tenant_edit_schema, \
+    flavor_new_schema, flavor_update_schema, \
+    image_new_schema, image_update_schema, \
+    server_new_schema, server_action_schema, network_new_schema, network_update_schema, \
+    port_new_schema, port_update_schema, openflow_controller_schema, of_port_map_new_schema
+import ovim
+import logging
+
+global my
+global url_base
+global config_dic
+global RADclass_module
+RADclass=None  #RADclass module is charged only if not in test mode
+
+url_base="/openvim"
+
+HTTP_Bad_Request =          400
+HTTP_Unauthorized =         401 
+HTTP_Not_Found =            404 
+HTTP_Forbidden =            403
+HTTP_Method_Not_Allowed =   405 
+HTTP_Not_Acceptable =       406
+HTTP_Request_Timeout =      408
+HTTP_Conflict =             409
+HTTP_Service_Unavailable =  503 
+HTTP_Internal_Server_Error= 500 
+
+def md5(fname):
+    hash_md5 = hashlib.md5()
+    with open(fname, "rb") as f:
+        for chunk in iter(lambda: f.read(4096), b""):
+            hash_md5.update(chunk)
+    return hash_md5.hexdigest()
+
+def md5_string(fname):
+    hash_md5 = hashlib.md5()
+    hash_md5.update(fname)
+    return hash_md5.hexdigest()
+
+def check_extended(extended, allow_net_attach=False):
+    '''Makes and extra checking of extended input that cannot be done using jsonschema
+    Attributes: 
+        allow_net_attach:  for allowing or not the uuid field at interfaces
+        that are allowed for instance, but not for flavors
+    Return: (<0, error_text) if error; (0,None) if not error '''
+    if "numas" not in extended: return 0, None
+    id_s=[]
+    numaid=0
+    for numa in extended["numas"]:
+        nb_formats = 0
+        if "cores" in numa:
+            nb_formats += 1
+            if "cores-id" in numa:
+                if len(numa["cores-id"]) != numa["cores"]:
+                    return -HTTP_Bad_Request, "different number of cores-id (%d) than cores (%d) at numa %d" % (len(numa["cores-id"]), numa["cores"],numaid)
+                id_s.extend(numa["cores-id"])
+        if "threads" in numa:
+            nb_formats += 1
+            if "threads-id" in numa:
+                if len(numa["threads-id"]) != numa["threads"]:
+                    return -HTTP_Bad_Request, "different number of threads-id (%d) than threads (%d) at numa %d" % (len(numa["threads-id"]), numa["threads"],numaid) 
+                id_s.extend(numa["threads-id"])
+        if "paired-threads" in numa:
+            nb_formats += 1
+            if "paired-threads-id" in numa:
+                if len(numa["paired-threads-id"]) != numa["paired-threads"]:
+                    return -HTTP_Bad_Request, "different number of paired-threads-id (%d) than paired-threads (%d) at numa %d" % (len(numa["paired-threads-id"]), numa["paired-threads"],numaid) 
+                for pair in numa["paired-threads-id"]:
+                    if len(pair) != 2:
+                        return -HTTP_Bad_Request, "paired-threads-id must contain a list of two elements list at numa %d" % (numaid) 
+                    id_s.extend(pair)
+        if nb_formats > 1:
+            return -HTTP_Service_Unavailable, "only one of cores, threads,  paired-threads are allowed in this version at numa %d" % numaid 
+        #check interfaces
+        if "interfaces" in numa:
+            ifaceid=0
+            names=[]
+            vpcis=[]
+            for interface in numa["interfaces"]:
+                if "uuid" in interface and not allow_net_attach: 
+                    return -HTTP_Bad_Request, "uuid field is not allowed at numa %d interface %s position %d" % (numaid, interface.get("name",""), ifaceid )
+                if "mac_address" in interface and interface["dedicated"]=="yes":
+                    return -HTTP_Bad_Request, "mac_address can not be set for dedicated (passthrough) at numa %d, interface %s position %d" % (numaid, interface.get("name",""), ifaceid )
+                if "name" in interface:
+                    if interface["name"] in names:
+                        return -HTTP_Bad_Request, "name repeated at numa %d, interface %s position %d" % (numaid, interface.get("name",""), ifaceid )
+                    names.append(interface["name"])
+                if "vpci" in interface:
+                    if interface["vpci"] in vpcis:
+                        return -HTTP_Bad_Request, "vpci %s repeated at numa %d, interface %s position %d" % (interface["vpci"], numaid, interface.get("name",""), ifaceid )
+                    vpcis.append(interface["vpci"])
+                ifaceid+=1
+        numaid+=1
+    if numaid > 1:
+        return -HTTP_Service_Unavailable, "only one numa can be defined in this version " 
+    for a in range(0,len(id_s)):
+        if a not in id_s:
+            return -HTTP_Bad_Request, "core/thread identifiers must start at 0 and gaps are not alloed. Missing id number %d" % a 
+    
+    return 0, None
+
+#
+# dictionaries that change from HTTP API to database naming
+#
+http2db_id={'id':'uuid'}
+http2db_host={'id':'uuid'}
+http2db_tenant={'id':'uuid'}
+http2db_flavor={'id':'uuid','imageRef':'image_id'}
+http2db_image={'id':'uuid', 'created':'created_at', 'updated':'modified_at', 'public': 'public'}
+http2db_server={'id':'uuid','hostId':'host_id','flavorRef':'flavor_id','imageRef':'image_id','created':'created_at'}
+http2db_network={'id':'uuid','provider:vlan':'vlan', 'provider:physical': 'provider'}
+http2db_ofc = {'id': 'uuid'}
+http2db_port={'id':'uuid', 'network_id':'net_id', 'mac_address':'mac', 'device_owner':'type','device_id':'instance_id','binding:switch_port':'switch_port','binding:vlan':'vlan', 'bandwidth':'Mbps'}
+
+def remove_extra_items(data, schema):
+    deleted=[]
+    if type(data) is tuple or type(data) is list:
+        for d in data:
+            a= remove_extra_items(d, schema['items'])
+            if a is not None: deleted.append(a)
+    elif type(data) is dict:
+        for k in data.keys():
+            if 'properties' not in schema or k not in schema['properties'].keys():
+                del data[k]
+                deleted.append(k)
+            else:
+                a = remove_extra_items(data[k], schema['properties'][k])
+                if a is not None:  deleted.append({k:a})
+    if len(deleted) == 0: return None
+    elif len(deleted) == 1: return deleted[0]
+    else: return deleted
+                
+def delete_nulls(var):
+    if type(var) is dict:
+        for k in var.keys():
+            if var[k] is None: del var[k]
+            elif type(var[k]) is dict or type(var[k]) is list or type(var[k]) is tuple: 
+                if delete_nulls(var[k]): del var[k]
+        if len(var) == 0: return True
+    elif type(var) is list or type(var) is tuple:
+        for k in var:
+            if type(k) is dict: delete_nulls(k)
+        if len(var) == 0: return True
+    return False
+
+
+class httpserver(threading.Thread):
+    def __init__(self, ovim, name="http", host='localhost', port=8080, admin=False, config_=None):
+        '''
+        Creates a new thread to attend the http connections
+        Attributes:
+            db_conn: database connection
+            name: name of this thread
+            host: ip or name where to listen
+            port: port where to listen
+            admin: if this has privileges of administrator or not 
+            config_: unless the first thread must be provided. It is a global dictionary where to allocate the self variable 
+        '''
+        global url_base
+        global config_dic
+        
+        #initialization
+        if config_ is not None:
+            config_dic = config_
+        if 'http_threads' not in config_dic:
+            config_dic['http_threads'] = {}
+        threading.Thread.__init__(self)
+        self.host = host
+        self.port = port  
+        self.db = ovim.db  #TODO OVIM remove
+        self.ovim = ovim
+        self.admin = admin
+        if name in config_dic:
+            print "httpserver Warning!!! Onether thread with the same name", name
+            n=0
+            while name+str(n) in config_dic:
+                n +=1
+            name +=str(n)
+        self.name = name
+        self.url_preffix = 'http://' + self.host + ':' + str(self.port) + url_base
+        config_dic['http_threads'][name] = self
+
+        #Ensure that when the main program exits the thread will also exit
+        self.daemon = True      
+        self.setDaemon(True)
+        self.logger = logging.getLogger("openvim.http")
+         
+    def run(self):
+        bottle.run(host=self.host, port=self.port, debug=True) #quiet=True
+           
+    def gethost(self, host_id):
+        result, content = self.db.get_host(host_id)
+        if result < 0:
+            print "httpserver.gethost error %d %s" % (result, content)
+            bottle.abort(-result, content)
+        elif result==0:
+            print "httpserver.gethost host '%s' not found" % host_id
+            bottle.abort(HTTP_Not_Found, content)
+        else:
+            data={'host' : content}
+            convert_boolean(content, ('admin_state_up',) )
+            change_keys_http2db(content, http2db_host, reverse=True)
+            print data['host']
+            return format_out(data)
+
+@bottle.route(url_base + '/', method='GET')
+def http_get():
+    print 
+    return 'works' #TODO: put links or redirection to /openvim???
+
+#
+# Util funcions
+#
+
+def change_keys_http2db(data, http_db, reverse=False):
+    '''Change keys of dictionary data according to the key_dict values
+    This allow change from http interface names to database names.
+    When reverse is True, the change is otherwise
+    Attributes:
+        data: can be a dictionary or a list
+        http_db: is a dictionary with hhtp names as keys and database names as value
+        reverse: by default change is done from http API to database. If True change is done otherwise
+    Return: None, but data is modified'''
+    if type(data) is tuple or type(data) is list:
+        for d in data:
+            change_keys_http2db(d, http_db, reverse)
+    elif type(data) is dict or type(data) is bottle.FormsDict:
+        if reverse:
+            for k,v in http_db.items():
+                if v in data: data[k]=data.pop(v)
+        else:
+            for k,v in http_db.items():
+                if k in data: data[v]=data.pop(k)
+
+
+
+def format_out(data):
+    '''return string of dictionary data according to requested json, yaml, xml. By default json'''
+    if 'application/yaml' in bottle.request.headers.get('Accept'):
+        bottle.response.content_type='application/yaml'
+        return yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) #, canonical=True, default_style='"'
+    else: #by default json
+        bottle.response.content_type='application/json'
+        #return data #json no style
+        return json.dumps(data, indent=4) + "\n"
+
+def format_in(schema):
+    try:
+        error_text = "Invalid header format "
+        format_type = bottle.request.headers.get('Content-Type', 'application/json')
+        if 'application/json' in format_type:
+            error_text = "Invalid json format "
+            #Use the json decoder instead of bottle decoder because it informs about the location of error formats with a ValueError exception
+            client_data = json.load(bottle.request.body)
+            #client_data = bottle.request.json()
+        elif 'application/yaml' in format_type:
+            error_text = "Invalid yaml format "
+            client_data = yaml.load(bottle.request.body)
+        elif format_type == 'application/xml':
+            bottle.abort(501, "Content-Type: application/xml not supported yet.")
+        else:
+            print "HTTP HEADERS: " + str(bottle.request.headers.items())
+            bottle.abort(HTTP_Not_Acceptable, 'Content-Type ' + str(format_type) + ' not supported.')
+            return
+        #if client_data == None:
+        #    bottle.abort(HTTP_Bad_Request, "Content error, empty")
+        #    return
+        #check needed_items
+
+        #print "HTTP input data: ", str(client_data)
+        error_text = "Invalid content "
+        js_v(client_data, schema)
+
+        return client_data
+    except (ValueError, yaml.YAMLError) as exc:
+        error_text += str(exc)
+        print error_text 
+        bottle.abort(HTTP_Bad_Request, error_text)
+    except js_e.ValidationError as exc:
+        print "HTTP validate_in error, jsonschema exception ", exc.message, "at", exc.path
+        print "  CONTENT: " + str(bottle.request.body.readlines())
+        error_pos = ""
+        if len(exc.path)>0: error_pos=" at '" +  ":".join(map(str, exc.path)) + "'"
+        bottle.abort(HTTP_Bad_Request, error_text + error_pos+": "+exc.message)
+    #except:
+    #    bottle.abort(HTTP_Bad_Request, "Content error: Failed to parse Content-Type",  error_pos)
+    #    raise
+
+def filter_query_string(qs, http2db, allowed):
+    '''Process query string (qs) checking that contains only valid tokens for avoiding SQL injection
+    Attributes:
+        'qs': bottle.FormsDict variable to be processed. None or empty is considered valid
+        'allowed': list of allowed string tokens (API http naming). All the keys of 'qs' must be one of 'allowed'
+        'http2db': dictionary with change from http API naming (dictionary key) to database naming(dictionary value)
+    Return: A tuple with the (select,where,limit) to be use in a database query. All of then transformed to the database naming
+        select: list of items to retrieve, filtered by query string 'field=token'. If no 'field' is present, allowed list is returned
+        where: dictionary with key, value, taken from the query string token=value. Empty if nothing is provided
+        limit: limit dictated by user with the query string 'limit'. 100 by default
+    abort if not permitted, using bottel.abort
+    '''
+    where = {}
+    limit = 100
+    select = []
+    if type(qs) is not bottle.FormsDict:
+        print '!!!!!!!!!!!!!!invalid query string not a dictionary'
+        # bottle.abort(HTTP_Internal_Server_Error, "call programmer")
+    else:
+        for k in qs:
+            if k == 'field':
+                select += qs.getall(k)
+                for v in select:
+                    if v not in allowed:
+                        bottle.abort(HTTP_Bad_Request, "Invalid query string at 'field=" + v + "'")
+            elif k == 'limit':
+                try:
+                    limit = int(qs[k])
+                except:
+                    bottle.abort(HTTP_Bad_Request, "Invalid query string at 'limit=" + qs[k] + "'")
+            else:
+                if k not in allowed:
+                    bottle.abort(HTTP_Bad_Request, "Invalid query string at '" + k + "=" + qs[k] + "'")
+                if qs[k] != "null":
+                    where[k] = qs[k]
+                else:
+                    where[k] = None
+    if len(select) == 0: select += allowed
+    # change from http api to database naming
+    for i in range(0, len(select)):
+        k = select[i]
+        if k in http2db:
+            select[i] = http2db[k]
+    change_keys_http2db(where, http2db)
+    # print "filter_query_string", select,where,limit
+
+    return select, where, limit
+
+def convert_bandwidth(data, reverse=False):
+    '''Check the field bandwidth recursively and when found, it removes units and convert to number 
+    It assumes that bandwidth is well formed
+    Attributes:
+        'data': dictionary bottle.FormsDict variable to be checked. None or empty is considered valid
+        'reverse': by default convert form str to int (Mbps), if True it convert from number to units
+    Return:
+        None
+    '''
+    if type(data) is dict:
+        for k in data.keys():
+            if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
+                convert_bandwidth(data[k], reverse)
+        if "bandwidth" in data:
+            try:
+                value=str(data["bandwidth"])
+                if not reverse:
+                    pos = value.find("bps")
+                    if pos>0:
+                        if value[pos-1]=="G": data["bandwidth"] =  int(data["bandwidth"][:pos-1]) * 1000
+                        elif value[pos-1]=="k": data["bandwidth"]= int(data["bandwidth"][:pos-1]) / 1000
+                        else: data["bandwidth"]= int(data["bandwidth"][:pos-1])
+                else:
+                    value = int(data["bandwidth"])
+                    if value % 1000 == 0: data["bandwidth"]=str(value/1000) + " Gbps"
+                    else: data["bandwidth"]=str(value) + " Mbps"
+            except:
+                print "convert_bandwidth exception for type", type(data["bandwidth"]), " data", data["bandwidth"]
+                return
+    if type(data) is tuple or type(data) is list:
+        for k in data:
+            if type(k) is dict or type(k) is tuple or type(k) is list:
+                convert_bandwidth(k, reverse)
+
+def convert_boolean(data, items): #TODO OVIM delete
+    '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean 
+    It assumes that bandwidth is well formed
+    Attributes:
+        'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
+        'items': tuple of keys to convert
+    Return:
+        None
+    '''
+    if type(data) is dict:
+        for k in data.keys():
+            if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
+                convert_boolean(data[k], items)
+            if k in items:
+                if type(data[k]) is str:
+                    if   data[k]=="false": data[k]=False
+                    elif data[k]=="true":  data[k]=True
+    if type(data) is tuple or type(data) is list:
+        for k in data:
+            if type(k) is dict or type(k) is tuple or type(k) is list:
+                convert_boolean(k, items)
+
+def convert_datetime2str(var):
+    '''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s'
+    It enters recursively in the dict var finding this kind of variables
+    '''
+    if type(var) is dict:
+        for k,v in var.items():
+            if type(v) is datetime.datetime:
+                var[k]= v.strftime('%Y-%m-%dT%H:%M:%S')
+            elif type(v) is dict or type(v) is list or type(v) is tuple: 
+                convert_datetime2str(v)
+        if len(var) == 0: return True
+    elif type(var) is list or type(var) is tuple:
+        for v in var:
+            convert_datetime2str(v)
+
+def check_valid_tenant(my, tenant_id):
+    if tenant_id=='any':
+        if not my.admin:
+            return HTTP_Unauthorized, "Needed admin privileges"
+    else:
+        result, _ = my.db.get_table(FROM='tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id})
+        if result<=0:
+            return HTTP_Not_Found, "tenant '%s' not found" % tenant_id
+    return 0, None
+
+def is_url(url):
+    '''
+    Check if string value is a well-wormed url
+    :param url: string url
+    :return: True if is a valid url, False if is not well-formed
+    '''
+
+    parsed_url = urlparse.urlparse(url)
+    return parsed_url
+
+
+@bottle.error(400)
+@bottle.error(401) 
+@bottle.error(404) 
+@bottle.error(403)
+@bottle.error(405) 
+@bottle.error(406)
+@bottle.error(408)
+@bottle.error(409)
+@bottle.error(503) 
+@bottle.error(500)
+def error400(error):
+    e={"error":{"code":error.status_code, "type":error.status, "description":error.body}}
+    return format_out(e)
+
+@bottle.hook('after_request')
+def enable_cors():
+    #TODO: Alf: Is it needed??
+    bottle.response.headers['Access-Control-Allow-Origin'] = '*'
+
+#
+# HOSTS
+#
+
+@bottle.route(url_base + '/hosts', method='GET')
+def http_get_hosts():
+    return format_out(get_hosts())
+
+
+def get_hosts():
+    select_, where_, limit_ = filter_query_string(bottle.request.query, http2db_host,
+                                                  ('id', 'name', 'description', 'status', 'admin_state_up', 'ip_name'))
+    
+    myself = config_dic['http_threads'][ threading.current_thread().name ]
+    result, content = myself.db.get_table(FROM='hosts', SELECT=select_, WHERE=where_, LIMIT=limit_)
+    if result < 0:
+        print "http_get_hosts Error", content
+        bottle.abort(-result, content)
+    else:
+        convert_boolean(content, ('admin_state_up',) )
+        change_keys_http2db(content, http2db_host, reverse=True)
+        for row in content:
+            row['links'] = ( {'href': myself.url_preffix + '/hosts/' + str(row['id']), 'rel': 'bookmark'}, )
+        data={'hosts' : content}
+        return data
+
+@bottle.route(url_base + '/hosts/<host_id>', method='GET')
+def http_get_host_id(host_id):
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    return my.gethost(host_id)
+
+@bottle.route(url_base + '/hosts', method='POST')
+def http_post_hosts():
+    '''insert a host into the database. All resources are got and inserted'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check permissions
+    if not my.admin:
+        bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
+    
+    #parse input data
+    http_content = format_in( host_new_schema )
+    r = remove_extra_items(http_content, host_new_schema)
+    if r is not None: print "http_post_host_id: Warning: remove extra items ", r
+    change_keys_http2db(http_content['host'], http2db_host)
+
+    host = http_content['host']
+    warning_text=""
+    if 'host-data' in http_content:
+        host.update(http_content['host-data'])
+        ip_name=http_content['host-data']['ip_name']
+        user=http_content['host-data']['user']
+        password=http_content['host-data'].get('password', None)
+    else:
+        ip_name=host['ip_name']
+        user=host['user']
+        password=host.get('password', None)
+        if not RADclass_module:
+            try:
+                RADclass_module = imp.find_module("RADclass")
+            except (IOError, ImportError) as e:
+                raise ImportError("Cannot import RADclass.py Openvim not properly installed" +str(e))
+
+        #fill rad info
+        rad = RADclass_module.RADclass()
+        (return_status, code) = rad.obtain_RAD(user, password, ip_name)
+        
+        #return 
+        if not return_status:
+            print 'http_post_hosts ERROR obtaining RAD', code
+            bottle.abort(HTTP_Bad_Request, code)
+            return
+        warning_text=code
+        rad_structure = yaml.load(rad.to_text())
+        print 'rad_structure\n---------------------'
+        print json.dumps(rad_structure, indent=4)
+        print '---------------------'
+        #return
+        WHERE_={"family":rad_structure['processor']['family'], 'manufacturer':rad_structure['processor']['manufacturer'], 'version':rad_structure['processor']['version']} 
+        result, content = my.db.get_table(FROM='host_ranking', 
+                    SELECT=('ranking',),
+                    WHERE=WHERE_)
+        if result > 0:
+            host['ranking'] = content[0]['ranking']
+        else:
+            #error_text= "Host " + str(WHERE_)+ " not found in ranking table. Not valid for VIM management"
+            #bottle.abort(HTTP_Bad_Request, error_text)
+            #return
+            warning_text += "Host " + str(WHERE_)+ " not found in ranking table. Assuming lowest value 100\n"
+            host['ranking'] = 100 #TODO: as not used in this version, set the lowest value
+    
+        features = rad_structure['processor'].get('features', ())
+        host['features'] = ",".join(features)
+        host['numas'] = [] 
+        
+        for node in (rad_structure['resource topology']['nodes'] or {}).itervalues():
+            interfaces= []
+            cores = []
+            eligible_cores=[]
+            count = 0
+            for core in node['cpu']['eligible_cores']:
+                eligible_cores.extend(core)
+            for core in node['cpu']['cores']:
+                for thread_id in core:
+                    c={'core_id': count, 'thread_id': thread_id}
+                    if thread_id not in eligible_cores: c['status'] = 'noteligible'
+                    cores.append(c)
+                count = count+1 
+
+            if 'nics' in node:    
+                for port_k, port_v in node['nics']['nic 0']['ports'].iteritems():
+                    if port_v['virtual']:
+                        continue
+                    else:
+                        sriovs = []
+                        for port_k2, port_v2 in node['nics']['nic 0']['ports'].iteritems():
+                            if port_v2['virtual'] and port_v2['PF_pci_id']==port_k:
+                                sriovs.append({'pci':port_k2, 'mac':port_v2['mac'], 'source_name':port_v2['source_name']})
+                        if len(sriovs)>0:
+                            #sort sriov according to pci and rename them to the vf number
+                            new_sriovs = sorted(sriovs, key=lambda k: k['pci'])
+                            index=0 
+                            for sriov in new_sriovs:
+                                sriov['source_name'] = index
+                                index += 1
+                            interfaces.append  ({'pci':str(port_k), 'Mbps': port_v['speed']/1000000, 'sriovs': new_sriovs, 'mac':port_v['mac'], 'source_name':port_v['source_name']})
+            memory=node['memory']['node_size'] / (1024*1024*1024)
+            #memory=get_next_2pow(node['memory']['hugepage_nr'])
+            host['numas'].append( {'numa_socket': node['id'], 'hugepages': node['memory']['hugepage_nr'], 'memory':memory, 'interfaces': interfaces, 'cores': cores } )
+    print json.dumps(host, indent=4)
+    #return
+    #
+    #insert in data base
+    result, content = my.db.new_host(host)
+    if result >= 0:
+        if content['admin_state_up']:
+            #create thread
+            host_test_mode = True if config_dic['mode']=='test' or config_dic['mode']=="OF only" else False
+            host_develop_mode = True if config_dic['mode']=='development' else False
+            host_develop_bridge_iface = config_dic.get('development_bridge', None)
+            thread = ht.host_thread(name=host.get('name',ip_name), user=user, host=ip_name, db=config_dic['db'], db_lock=config_dic['db_lock'], 
+                test=host_test_mode, image_path=config_dic['image_path'],
+                version=config_dic['version'], host_id=content['uuid'],
+                develop_mode=host_develop_mode, develop_bridge_iface=host_develop_bridge_iface   )
+            thread.start()
+            config_dic['host_threads'][ content['uuid'] ] = thread
+
+            if config_dic['network_type'] == 'ovs':
+                # create bridge
+                create_dhcp_ovs_bridge()
+                config_dic['host_threads'][content['uuid']].insert_task("new-ovsbridge")
+                # check if more host exist
+                create_vxlan_mesh(content['uuid'])
+
+        #return host data
+        change_keys_http2db(content, http2db_host, reverse=True)
+        if len(warning_text)>0:
+            content["warning"]= warning_text
+        data={'host' : content}
+        return format_out(data)
+    else:
+        bottle.abort(HTTP_Bad_Request, content)
+        return
+
+
+def delete_dhcp_ovs_bridge(vlan, net_uuid):
+    """
+    Delete bridges and port created during dhcp launching at openvim controller
+    :param vlan: net vlan id
+    :param net_uuid: network identifier
+    :return:
+    """
+    dhcp_path = config_dic['ovs_controller_file_path']
+
+    http_controller = config_dic['http_threads'][threading.current_thread().name]
+    dhcp_controller = http_controller.ovim.get_dhcp_controller()
+
+    dhcp_controller.delete_dhcp_port(vlan, net_uuid)
+    dhcp_controller.delete_dhcp_server(vlan, net_uuid, dhcp_path)
+
+
+def create_dhcp_ovs_bridge():
+    """
+    Initialize bridge to allocate the dhcp server at openvim controller
+    :return:
+    """
+    http_controller = config_dic['http_threads'][threading.current_thread().name]
+    dhcp_controller = http_controller.ovim.get_dhcp_controller()
+
+    dhcp_controller.create_ovs_bridge()
+
+
+def set_mac_dhcp(vm_ip, vlan, first_ip, last_ip, cidr, mac):
+    """"
+    Launch a dhcpserver base on dnsmasq attached to the net base on vlan id across the the openvim computes
+    :param vm_ip: IP address asigned to a VM
+    :param vlan: Segmentation id
+    :param first_ip: First dhcp range ip
+    :param last_ip: Last dhcp range ip
+    :param cidr: net cidr
+    :param mac: VM vnic mac to be macthed with the IP received
+    """
+    if not vm_ip:
+        return
+    ip_tools = IPNetwork(cidr)
+    cidr_len = ip_tools.prefixlen
+    dhcp_netmask = str(ip_tools.netmask)
+    dhcp_path = config_dic['ovs_controller_file_path']
+
+    new_cidr = [first_ip + '/' + str(cidr_len)]
+    if not len(all_matching_cidrs(vm_ip, new_cidr)):
+        vm_ip = None
+
+    http_controller = config_dic['http_threads'][threading.current_thread().name]
+    dhcp_controller = http_controller.ovim.get_dhcp_controller()
+
+    dhcp_controller.set_mac_dhcp_server(vm_ip, mac, vlan, dhcp_netmask, dhcp_path)
+
+
+def delete_mac_dhcp(vm_ip, vlan, mac):
+    """
+    Delete into dhcp conf file the ip  assigned to a specific MAC address
+    :param vm_ip: IP address asigned to a VM
+    :param vlan: Segmentation id
+    :param mac:  VM vnic mac to be macthed with the IP received
+    :return:
+    """
+
+    dhcp_path = config_dic['ovs_controller_file_path']
+
+    http_controller = config_dic['http_threads'][threading.current_thread().name]
+    dhcp_controller = http_controller.ovim.get_dhcp_controller()
+
+    dhcp_controller.delete_mac_dhcp_server(vm_ip, mac, vlan, dhcp_path)
+
+
+def create_vxlan_mesh(host_id):
+    """
+    Create vxlan mesh across all openvimc controller and computes.
+    :param host_id: host identifier
+    :param host_id: host identifier
+    :return:
+    """
+    dhcp_compute_name = get_vxlan_interface("dhcp")
+    existing_hosts = get_hosts()
+    if len(existing_hosts['hosts']) > 0:
+        # vlxan mesh creation between openvim controller and computes
+        computes_available = existing_hosts['hosts']
+
+        http_controller = config_dic['http_threads'][threading.current_thread().name]
+        dhcp_controller = http_controller.ovim.get_dhcp_controller()
+
+        for compute in computes_available:
+            vxlan_interface_name = get_vxlan_interface(compute['id'][:8])
+            config_dic['host_threads'][compute['id']].insert_task("new-vxlan", dhcp_compute_name, dhcp_controller.host)
+            dhcp_controller.create_ovs_vxlan_tunnel(vxlan_interface_name, compute['ip_name'])
+
+        # vlxan mesh creation between openvim computes
+        for count, compute_owner in enumerate(computes_available):
+            for compute in computes_available:
+                if compute_owner['id'] == compute['id']:
+                    pass
+                else:
+                    vxlan_interface_name = get_vxlan_interface(compute_owner['id'][:8])
+                    dhcp_controller.create_ovs_vxlan_tunnel(vxlan_interface_name, compute_owner['ip_name'])
+                    config_dic['host_threads'][compute['id']].insert_task("new-vxlan",
+                                                                          vxlan_interface_name,
+                                                                          compute_owner['ip_name'])
+
+
+def delete_vxlan_mesh(host_id):
+    """
+    Create a task for remove a specific compute of the vlxan mesh
+    :param host_id: host id to be deleted.
+    """
+    existing_hosts = get_hosts()
+    computes_available = existing_hosts['hosts']
+    #
+    vxlan_interface_name = get_vxlan_interface(host_id[:8])
+
+    http_controller = config_dic['http_threads'][threading.current_thread().name]
+    dhcp_host = http_controller.ovim.get_dhcp_controller()
+
+    dhcp_host.delete_ovs_vxlan_tunnel(vxlan_interface_name)
+    # remove bridge from openvim controller if no more computes exist
+    if len(existing_hosts):
+        dhcp_host.delete_ovs_bridge()
+    # Remove vxlan mesh
+    for compute in computes_available:
+        if host_id == compute['id']:
+            pass
+        else:
+            dhcp_host.delete_ovs_vxlan_tunnel(vxlan_interface_name)
+            config_dic['host_threads'][compute['id']].insert_task("del-vxlan", vxlan_interface_name)
+
+
+def get_vxlan_interface(local_uuid):
+    """
+    Genearte a vxlan interface name
+    :param local_uuid: host id
+    :return: vlxan-8digits
+    """
+    return 'vxlan-' + local_uuid[:8]
+
+
+@bottle.route(url_base + '/hosts/<host_id>', method='PUT')
+def http_put_host_id(host_id):
+    '''modify a host into the database. All resources are got and inserted'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check permissions
+    if not my.admin:
+        bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
+    
+    #parse input data
+    http_content = format_in( host_edit_schema )
+    r = remove_extra_items(http_content, host_edit_schema)
+    if r is not None: print "http_post_host_id: Warning: remove extra items ", r
+    change_keys_http2db(http_content['host'], http2db_host)
+
+    #insert in data base
+    result, content = my.db.edit_host(host_id, http_content['host'])
+    if result >= 0:
+        convert_boolean(content, ('admin_state_up',) )
+        change_keys_http2db(content, http2db_host, reverse=True)
+        data={'host' : content}
+
+        if config_dic['network_type'] == 'ovs':
+            delete_vxlan_mesh(host_id)
+            config_dic['host_threads'][host_id].insert_task("del-ovsbridge")
+
+        #reload thread
+        config_dic['host_threads'][host_id].name = content.get('name',content['ip_name'])
+        config_dic['host_threads'][host_id].user = content['user']
+        config_dic['host_threads'][host_id].host = content['ip_name']
+        config_dic['host_threads'][host_id].insert_task("reload")
+
+        if config_dic['network_type'] == 'ovs':
+            # create mesh with new host data
+            config_dic['host_threads'][host_id].insert_task("new-ovsbridge")
+            create_vxlan_mesh(host_id)
+
+        #print data
+        return format_out(data)
+    else:
+        bottle.abort(HTTP_Bad_Request, content)
+        return
+
+
+
+@bottle.route(url_base + '/hosts/<host_id>', method='DELETE')
+def http_delete_host_id(host_id):
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check permissions
+    if not my.admin:
+        bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
+    result, content = my.db.delete_row('hosts', host_id)
+    if result == 0:
+        bottle.abort(HTTP_Not_Found, content)
+    elif result > 0:
+        if config_dic['network_type'] == 'ovs':
+            delete_vxlan_mesh(host_id)
+        # terminate thread
+        if host_id in config_dic['host_threads']:
+            if config_dic['network_type'] == 'ovs':
+                config_dic['host_threads'][host_id].insert_task("del-ovsbridge")
+            config_dic['host_threads'][host_id].insert_task("exit")
+        #return data
+        data={'result' : content}
+        return format_out(data)
+    else:
+        print "http_delete_host_id error",result, content
+        bottle.abort(-result, content)
+        return
+#
+# TENANTS
+#
+
+
+@bottle.route(url_base + '/tenants', method='GET')
+def http_get_tenants():
+    """
+    Retreive tenant list from DB
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        select_, where_, limit_ = filter_query_string(bottle.request.query, http2db_tenant,
+                                                      ('id', 'name', 'description', 'enabled'))
+        tenants = my.ovim.get_tenants(select_, where_)
+        delete_nulls(tenants)
+        change_keys_http2db(tenants, http2db_tenant, reverse=True)
+        data = {'tenants': tenants}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/tenants/<tenant_id>', method='GET')
+def http_get_tenant_id(tenant_id):
+    """
+    Get tenant from DB by id
+    :param tenant_id: tenant id
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        tenant = my.ovim.show_tenant_id(tenant_id)
+        delete_nulls(tenant)
+        change_keys_http2db(tenant, http2db_tenant, reverse=True)
+        data = {'tenant': tenant}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/tenants', method='POST')
+def http_post_tenants():
+    """
+    Insert a tenant into the database.
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        http_content = format_in(tenant_new_schema)
+        r = remove_extra_items(http_content, tenant_new_schema)
+        if r is not None:
+            my.logger.error("http_post_tenants: Warning: remove extra items " + str(r), exc_info=True)
+        # insert in data base
+        tenant_id = my.ovim.new_tentant(http_content['tenant'])
+        tenant = my.ovim.show_tenant_id(tenant_id)
+        change_keys_http2db(tenant, http2db_tenant, reverse=True)
+        delete_nulls(tenant)
+        data = {'tenant': tenant}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+    
+@bottle.route(url_base + '/tenants/<tenant_id>', method='PUT')
+def http_put_tenant_id(tenant_id):
+    """
+    Update a tenantinto DB.
+    :param tenant_id: tentant id
+    :return:
+    """
+
+    my = config_dic['http_threads'][threading.current_thread().name]
+    try:
+        # parse input data
+        http_content = format_in(tenant_edit_schema)
+        r = remove_extra_items(http_content, tenant_edit_schema)
+        if r is not None:
+            print "http_put_tenant_id: Warning: remove extra items ", r
+        change_keys_http2db(http_content['tenant'], http2db_tenant)
+        # insert in data base
+        my.ovim.edit_tenant(tenant_id, http_content['tenant'])
+        tenant = my.ovim.show_tenant_id(tenant_id)
+        change_keys_http2db(tenant, http2db_tenant, reverse=True)
+        data = {'tenant': tenant}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/tenants/<tenant_id>', method='DELETE')
+def http_delete_tenant_id(tenant_id):
+    """
+    Delete a tenant from the database.
+    :param tenant_id: tenant id
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        content = my.ovim.delete_tentant(tenant_id)
+        data = {'result': content}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+#
+# FLAVORS
+#
+
+
+@bottle.route(url_base + '/<tenant_id>/flavors', method='GET')
+def http_get_flavors(tenant_id):
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+    #obtain data
+    select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_flavor,
+            ('id','name','description','public') )
+    if tenant_id=='any':
+        from_  ='flavors'
+    else:
+        from_  ='tenants_flavors inner join flavors on tenants_flavors.flavor_id=flavors.uuid'
+        where_['tenant_id'] = tenant_id
+    result, content = my.db.get_table(FROM=from_, SELECT=select_, WHERE=where_, LIMIT=limit_)
+    if result < 0:
+        print "http_get_flavors Error", content
+        bottle.abort(-result, content)
+    else:
+        change_keys_http2db(content, http2db_flavor, reverse=True)
+        for row in content:
+            row['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'flavors', str(row['id']) ) ), 'rel':'bookmark' } ]
+        data={'flavors' : content}
+        return format_out(data)
+
+@bottle.route(url_base + '/<tenant_id>/flavors/<flavor_id>', method='GET')
+def http_get_flavor_id(tenant_id, flavor_id):
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+    #obtain data
+    select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_flavor,
+            ('id','name','description','ram', 'vcpus', 'extended', 'disk', 'public') )
+    if tenant_id=='any':
+        from_  ='flavors'
+    else:
+        from_  ='tenants_flavors as tf inner join flavors as f on tf.flavor_id=f.uuid'
+        where_['tenant_id'] = tenant_id
+    where_['uuid'] = flavor_id
+    result, content = my.db.get_table(SELECT=select_, FROM=from_, WHERE=where_, LIMIT=limit_)
+
+    if result < 0:
+        print "http_get_flavor_id error %d %s" % (result, content)
+        bottle.abort(-result, content)
+    elif result==0:
+        print "http_get_flavors_id flavor '%s' not found" % str(flavor_id)
+        bottle.abort(HTTP_Not_Found, 'flavor %s not found' % flavor_id)
+    else:
+        change_keys_http2db(content, http2db_flavor, reverse=True)
+        if 'extended' in content[0] and content[0]['extended'] is not None:
+            extended = json.loads(content[0]['extended'])
+            if 'devices' in extended: 
+                change_keys_http2db(extended['devices'], http2db_flavor, reverse=True)
+            content[0]['extended']=extended
+        convert_bandwidth(content[0], reverse=True)
+        content[0]['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'flavors', str(content[0]['id']) ) ), 'rel':'bookmark' } ]
+        data={'flavor' : content[0]}
+        #data['tenants_links'] = dict([('tenant', row['id']) for row in content])
+        return format_out(data)
+
+
+@bottle.route(url_base + '/<tenant_id>/flavors', method='POST')
+def http_post_flavors(tenant_id):
+    '''insert a flavor into the database, and attach to tenant.'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+    http_content = format_in( flavor_new_schema )
+    r = remove_extra_items(http_content, flavor_new_schema)
+    if r is not None: print "http_post_flavors: Warning: remove extra items ", r
+    change_keys_http2db(http_content['flavor'], http2db_flavor)
+    extended_dict = http_content['flavor'].pop('extended', None)
+    if extended_dict is not None: 
+        result, content = check_extended(extended_dict)
+        if result<0:
+            print "http_post_flavors wrong input extended error %d %s" % (result, content)
+            bottle.abort(-result, content)
+            return
+        convert_bandwidth(extended_dict)
+        if 'devices' in extended_dict: change_keys_http2db(extended_dict['devices'], http2db_flavor)
+        http_content['flavor']['extended'] = json.dumps(extended_dict)
+    #insert in data base
+    result, content = my.db.new_flavor(http_content['flavor'], tenant_id)
+    if result >= 0:
+        return http_get_flavor_id(tenant_id, content)
+    else:
+        print "http_psot_flavors error %d %s" % (result, content)
+        bottle.abort(-result, content)
+        return
+    
+@bottle.route(url_base + '/<tenant_id>/flavors/<flavor_id>', method='DELETE')
+def http_delete_flavor_id(tenant_id, flavor_id):
+    '''Deletes the flavor_id of a tenant. IT removes from tenants_flavors table.'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+        return
+    result, content = my.db.delete_image_flavor('flavor', flavor_id, tenant_id)
+    if result == 0:
+        bottle.abort(HTTP_Not_Found, content)
+    elif result >0:
+        data={'result' : content}
+        return format_out(data)
+    else:
+        print "http_delete_flavor_id error",result, content
+        bottle.abort(-result, content)
+        return
+
+@bottle.route(url_base + '/<tenant_id>/flavors/<flavor_id>/<action>', method='POST')
+def http_attach_detach_flavors(tenant_id, flavor_id, action):
+    '''attach/detach an existing flavor in this tenant. That is insert/remove at tenants_flavors table.'''
+    #TODO alf:  not tested at all!!!
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+    if tenant_id=='any':
+        bottle.abort(HTTP_Bad_Request, "Invalid tenant 'any' with this command")
+    #check valid action
+    if action!='attach' and action != 'detach':
+        bottle.abort(HTTP_Method_Not_Allowed, "actions can be attach or detach")
+        return
+
+    #Ensure that flavor exist 
+    from_  ='tenants_flavors as tf right join flavors as f on tf.flavor_id=f.uuid'
+    where_={'uuid': flavor_id}
+    result, content = my.db.get_table(SELECT=('public','tenant_id'), FROM=from_, WHERE=where_)
+    if result==0:
+        if action=='attach':
+            text_error="Flavor '%s' not found" % flavor_id
+        else:
+            text_error="Flavor '%s' not found for tenant '%s'" % (flavor_id, tenant_id)
+        bottle.abort(HTTP_Not_Found, text_error)
+        return
+    elif result>0:
+        flavor=content[0]
+        if action=='attach':
+            if flavor['tenant_id']!=None:
+                bottle.abort(HTTP_Conflict, "Flavor '%s' already attached to tenant '%s'" % (flavor_id, tenant_id))
+            if flavor['public']=='no' and not my.admin:
+                #allow only attaching public flavors
+                bottle.abort(HTTP_Unauthorized, "Needed admin rights to attach a private flavor")
+                return
+            #insert in data base
+            result, content = my.db.new_row('tenants_flavors', {'flavor_id':flavor_id, 'tenant_id': tenant_id})
+            if result >= 0:
+                return http_get_flavor_id(tenant_id, flavor_id)
+        else: #detach
+            if flavor['tenant_id']==None:
+                bottle.abort(HTTP_Not_Found, "Flavor '%s' not attached to tenant '%s'" % (flavor_id, tenant_id))
+            result, content = my.db.delete_row_by_dict(FROM='tenants_flavors', WHERE={'flavor_id':flavor_id, 'tenant_id':tenant_id})
+            if result>=0:
+                if flavor['public']=='no':
+                    #try to delete the flavor completely to avoid orphan flavors, IGNORE error
+                    my.db.delete_row_by_dict(FROM='flavors', WHERE={'uuid':flavor_id})
+                data={'result' : "flavor detached"}
+                return format_out(data)
+    
+    #if get here is because an error
+    print "http_attach_detach_flavors error %d %s" % (result, content)
+    bottle.abort(-result, content)
+    return
+
+@bottle.route(url_base + '/<tenant_id>/flavors/<flavor_id>', method='PUT')
+def http_put_flavor_id(tenant_id, flavor_id):
+    '''update a flavor_id into the database.'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+    #parse input data
+    http_content = format_in( flavor_update_schema )
+    r = remove_extra_items(http_content, flavor_update_schema)
+    if r is not None: print "http_put_flavor_id: Warning: remove extra items ", r
+    change_keys_http2db(http_content['flavor'], http2db_flavor)
+    extended_dict = http_content['flavor'].pop('extended', None)
+    if extended_dict is not None: 
+        result, content = check_extended(extended_dict)
+        if result<0:
+            print "http_put_flavor_id wrong input extended error %d %s" % (result, content)
+            bottle.abort(-result, content)
+            return
+        convert_bandwidth(extended_dict)
+        if 'devices' in extended_dict: change_keys_http2db(extended_dict['devices'], http2db_flavor)
+        http_content['flavor']['extended'] = json.dumps(extended_dict)
+    #Ensure that flavor exist 
+    where_={'uuid': flavor_id}
+    if tenant_id=='any':
+        from_  ='flavors'
+    else:
+        from_  ='tenants_flavors as ti inner join flavors as i on ti.flavor_id=i.uuid'
+        where_['tenant_id'] = tenant_id
+    result, content = my.db.get_table(SELECT=('public',), FROM=from_, WHERE=where_)
+    if result==0:
+        text_error="Flavor '%s' not found" % flavor_id
+        if tenant_id!='any':
+            text_error +=" for tenant '%s'" % flavor_id
+        bottle.abort(HTTP_Not_Found, text_error)
+        return
+    elif result>0:
+        if content[0]['public']=='yes' and not my.admin:
+            #allow only modifications over private flavors
+            bottle.abort(HTTP_Unauthorized, "Needed admin rights to edit a public flavor")
+            return
+        #insert in data base
+        result, content = my.db.update_rows('flavors', http_content['flavor'], {'uuid': flavor_id})
+
+    if result < 0:
+        print "http_put_flavor_id error %d %s" % (result, content)
+        bottle.abort(-result, content)
+        return
+    else:
+        return http_get_flavor_id(tenant_id, flavor_id)
+
+
+
+#
+# IMAGES
+#
+
+@bottle.route(url_base + '/<tenant_id>/images', method='GET')
+def http_get_images(tenant_id):
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+    #obtain data
+    select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_image,
+            ('id','name','checksum','description','path','public') )
+    if tenant_id=='any':
+        from_  ='images'
+        where_or_ = None
+    else:
+        from_  ='tenants_images right join images on tenants_images.image_id=images.uuid'
+        where_or_ = {'tenant_id': tenant_id, 'public': 'yes'}
+    result, content = my.db.get_table(SELECT=select_, DISTINCT=True, FROM=from_, WHERE=where_, WHERE_OR=where_or_, WHERE_AND_OR="AND", LIMIT=limit_)
+    if result < 0:
+        print "http_get_images Error", content
+        bottle.abort(-result, content)
+    else:
+        change_keys_http2db(content, http2db_image, reverse=True)
+        #for row in content: row['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'images', str(row['id']) ) ), 'rel':'bookmark' } ]
+        data={'images' : content}
+        return format_out(data)
+
+@bottle.route(url_base + '/<tenant_id>/images/<image_id>', method='GET')
+def http_get_image_id(tenant_id, image_id):
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+    #obtain data
+    select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_image,
+            ('id','name','checksum','description','progress', 'status','path', 'created', 'updated','public') )
+    if tenant_id=='any':
+        from_  ='images'
+        where_or_ = None
+    else:
+        from_  ='tenants_images as ti right join images as i on ti.image_id=i.uuid'
+        where_or_ = {'tenant_id': tenant_id, 'public': "yes"}
+    where_['uuid'] = image_id
+    result, content = my.db.get_table(SELECT=select_, DISTINCT=True, FROM=from_, WHERE=where_, WHERE_OR=where_or_, WHERE_AND_OR="AND", LIMIT=limit_)
+
+    if result < 0:
+        print "http_get_images error %d %s" % (result, content)
+        bottle.abort(-result, content)
+    elif result==0:
+        print "http_get_images image '%s' not found" % str(image_id)
+        bottle.abort(HTTP_Not_Found, 'image %s not found' % image_id)
+    else:
+        convert_datetime2str(content)
+        change_keys_http2db(content, http2db_image, reverse=True)
+        if 'metadata' in content[0] and content[0]['metadata'] is not None:
+            metadata = json.loads(content[0]['metadata'])
+            content[0]['metadata']=metadata
+        content[0]['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'images', str(content[0]['id']) ) ), 'rel':'bookmark' } ]
+        data={'image' : content[0]}
+        #data['tenants_links'] = dict([('tenant', row['id']) for row in content])
+        return format_out(data)
+
+@bottle.route(url_base + '/<tenant_id>/images', method='POST')
+def http_post_images(tenant_id):
+    '''insert a image into the database, and attach to tenant.'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+    http_content = format_in(image_new_schema)
+    r = remove_extra_items(http_content, image_new_schema)
+    if r is not None: print "http_post_images: Warning: remove extra items ", r
+    change_keys_http2db(http_content['image'], http2db_image)
+    metadata_dict = http_content['image'].pop('metadata', None)
+    if metadata_dict is not None: 
+        http_content['image']['metadata'] = json.dumps(metadata_dict)
+    #calculate checksum
+    try:
+        image_file = http_content['image'].get('path',None)
+        parsed_url = urlparse.urlparse(image_file)
+        if parsed_url.scheme == "" and parsed_url.netloc == "":
+            # The path is a local file
+            if os.path.exists(image_file):
+                http_content['image']['checksum'] = md5(image_file)
+        else:
+            # The path is a URL. Code should be added to download the image and calculate the checksum
+            #http_content['image']['checksum'] = md5(downloaded_image)
+            pass
+        # Finally, only if we are in test mode and checksum has not been calculated, we calculate it from the path
+        host_test_mode = True if config_dic['mode']=='test' or config_dic['mode']=="OF only" else False
+        if host_test_mode:
+            if 'checksum' not in http_content['image']:
+                http_content['image']['checksum'] = md5_string(image_file)
+        else:
+            # At this point, if the path is a local file and no chechsum has been obtained yet, an error is sent back.
+            # If it is a URL, no error is sent. Checksum will be an empty string
+            if parsed_url.scheme == "" and parsed_url.netloc == "" and 'checksum' not in http_content['image']:
+                content = "Image file not found"
+                print "http_post_images error: %d %s" % (HTTP_Bad_Request, content)
+                bottle.abort(HTTP_Bad_Request, content)
+    except Exception as e:
+        print "ERROR. Unexpected exception: %s" % (str(e))
+        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+    #insert in data base
+    result, content = my.db.new_image(http_content['image'], tenant_id)
+    if result >= 0:
+        return http_get_image_id(tenant_id, content)
+    else:
+        print "http_post_images error %d %s" % (result, content)
+        bottle.abort(-result, content)
+        return
+    
+@bottle.route(url_base + '/<tenant_id>/images/<image_id>', method='DELETE')
+def http_delete_image_id(tenant_id, image_id):
+    '''Deletes the image_id of a tenant. IT removes from tenants_images table.'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+    result, content = my.db.delete_image_flavor('image', image_id, tenant_id)
+    if result == 0:
+        bottle.abort(HTTP_Not_Found, content)
+    elif result >0:
+        data={'result' : content}
+        return format_out(data)
+    else:
+        print "http_delete_image_id error",result, content
+        bottle.abort(-result, content)
+        return
+
+@bottle.route(url_base + '/<tenant_id>/images/<image_id>/<action>', method='POST')
+def http_attach_detach_images(tenant_id, image_id, action):
+    '''attach/detach an existing image in this tenant. That is insert/remove at tenants_images table.'''
+    #TODO alf:  not tested at all!!!
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+    if tenant_id=='any':
+        bottle.abort(HTTP_Bad_Request, "Invalid tenant 'any' with this command")
+    #check valid action
+    if action!='attach' and action != 'detach':
+        bottle.abort(HTTP_Method_Not_Allowed, "actions can be attach or detach")
+        return
+
+    #Ensure that image exist 
+    from_  ='tenants_images as ti right join images as i on ti.image_id=i.uuid'
+    where_={'uuid': image_id}
+    result, content = my.db.get_table(SELECT=('public','tenant_id'), FROM=from_, WHERE=where_)
+    if result==0:
+        if action=='attach':
+            text_error="Image '%s' not found" % image_id
+        else:
+            text_error="Image '%s' not found for tenant '%s'" % (image_id, tenant_id)
+        bottle.abort(HTTP_Not_Found, text_error)
+        return
+    elif result>0:
+        image=content[0]
+        if action=='attach':
+            if image['tenant_id']!=None:
+                bottle.abort(HTTP_Conflict, "Image '%s' already attached to tenant '%s'" % (image_id, tenant_id))
+            if image['public']=='no' and not my.admin:
+                #allow only attaching public images
+                bottle.abort(HTTP_Unauthorized, "Needed admin rights to attach a private image")
+                return
+            #insert in data base
+            result, content = my.db.new_row('tenants_images', {'image_id':image_id, 'tenant_id': tenant_id})
+            if result >= 0:
+                return http_get_image_id(tenant_id, image_id)
+        else: #detach
+            if image['tenant_id']==None:
+                bottle.abort(HTTP_Not_Found, "Image '%s' not attached to tenant '%s'" % (image_id, tenant_id))
+            result, content = my.db.delete_row_by_dict(FROM='tenants_images', WHERE={'image_id':image_id, 'tenant_id':tenant_id})
+            if result>=0:
+                if image['public']=='no':
+                    #try to delete the image completely to avoid orphan images, IGNORE error
+                    my.db.delete_row_by_dict(FROM='images', WHERE={'uuid':image_id})
+                data={'result' : "image detached"}
+                return format_out(data)
+    
+    #if get here is because an error
+    print "http_attach_detach_images error %d %s" % (result, content)
+    bottle.abort(-result, content)
+    return
+
+@bottle.route(url_base + '/<tenant_id>/images/<image_id>', method='PUT')
+def http_put_image_id(tenant_id, image_id):
+    '''update a image_id into the database.'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+    #parse input data
+    http_content = format_in( image_update_schema )
+    r = remove_extra_items(http_content, image_update_schema)
+    if r is not None: print "http_put_image_id: Warning: remove extra items ", r
+    change_keys_http2db(http_content['image'], http2db_image)
+    metadata_dict = http_content['image'].pop('metadata', None)
+    if metadata_dict is not None: 
+        http_content['image']['metadata'] = json.dumps(metadata_dict)
+    #Ensure that image exist 
+    where_={'uuid': image_id}
+    if tenant_id=='any':
+        from_  ='images'
+        where_or_ = None
+    else:
+        from_  ='tenants_images as ti right join images as i on ti.image_id=i.uuid'
+        where_or_ = {'tenant_id': tenant_id, 'public': 'yes'}
+    result, content = my.db.get_table(SELECT=('public',), DISTINCT=True, FROM=from_, WHERE=where_, WHERE_OR=where_or_, WHERE_AND_OR="AND")
+    if result==0:
+        text_error="Image '%s' not found" % image_id
+        if tenant_id!='any':
+            text_error +=" for tenant '%s'" % image_id
+        bottle.abort(HTTP_Not_Found, text_error)
+        return
+    elif result>0:
+        if content[0]['public']=='yes' and not my.admin:
+            #allow only modifications over private images
+            bottle.abort(HTTP_Unauthorized, "Needed admin rights to edit a public image")
+            return
+        #insert in data base
+        result, content = my.db.update_rows('images', http_content['image'], {'uuid': image_id})
+
+    if result < 0:
+        print "http_put_image_id error %d %s" % (result, content)
+        bottle.abort(-result, content)
+        return
+    else:
+        return http_get_image_id(tenant_id, image_id)
+
+
+#
+# SERVERS
+#
+
+@bottle.route(url_base + '/<tenant_id>/servers', method='GET')
+def http_get_servers(tenant_id):
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+        return
+    #obtain data
+    select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_server,
+            ('id','name','description','hostId','imageRef','flavorRef','status', 'tenant_id') )
+    if tenant_id!='any':
+        where_['tenant_id'] = tenant_id
+    result, content = my.db.get_table(SELECT=select_, FROM='instances', WHERE=where_, LIMIT=limit_)
+    if result < 0:
+        print "http_get_servers Error", content
+        bottle.abort(-result, content)
+    else:
+        change_keys_http2db(content, http2db_server, reverse=True)
+        for row in content:
+            tenant_id = row.pop('tenant_id')
+            row['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'servers', str(row['id']) ) ), 'rel':'bookmark' } ]
+        data={'servers' : content}
+        return format_out(data)
+
+@bottle.route(url_base + '/<tenant_id>/servers/<server_id>', method='GET')
+def http_get_server_id(tenant_id, server_id):
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+        return
+    #obtain data
+    result, content = my.db.get_instance(server_id)
+    if result == 0:
+        bottle.abort(HTTP_Not_Found, content)
+    elif result >0:
+        #change image/flavor-id to id and link
+        convert_bandwidth(content, reverse=True)
+        convert_datetime2str(content)
+        if content["ram"]==0 : del content["ram"]
+        if content["vcpus"]==0 : del content["vcpus"]
+        if 'flavor_id' in content:
+            if content['flavor_id'] is not None:
+                content['flavor'] = {'id':content['flavor_id'], 
+                                     'links':[{'href':  "/".join( (my.url_preffix, content['tenant_id'], 'flavors', str(content['flavor_id']) ) ), 'rel':'bookmark'}] 
+                                }
+            del content['flavor_id']
+        if 'image_id' in content:
+            if content['image_id'] is not None:
+                content['image'] = {'id':content['image_id'], 
+                                    'links':[{'href':  "/".join( (my.url_preffix, content['tenant_id'], 'images', str(content['image_id']) ) ), 'rel':'bookmark'}]
+                                }
+            del content['image_id']
+        change_keys_http2db(content, http2db_server, reverse=True)
+        if 'extended' in content:
+            if 'devices' in content['extended']: change_keys_http2db(content['extended']['devices'], http2db_server, reverse=True)
+            
+        data={'server' : content}
+        return format_out(data)
+    else:
+        bottle.abort(-result, content)
+        return
+
+@bottle.route(url_base + '/<tenant_id>/servers', method='POST')
+def http_post_server_id(tenant_id):
+    '''deploys a new server'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+        return
+    if tenant_id=='any':
+        bottle.abort(HTTP_Bad_Request, "Invalid tenant 'any' with this command")
+    #chek input
+    http_content = format_in( server_new_schema )
+    r = remove_extra_items(http_content, server_new_schema)
+    if r is not None: print "http_post_serves: Warning: remove extra items ", r
+    change_keys_http2db(http_content['server'], http2db_server)
+    extended_dict = http_content['server'].get('extended', None)
+    if extended_dict is not None:
+        result, content = check_extended(extended_dict, True)
+        if result<0:
+            print "http_post_servers wrong input extended error %d %s" % (result, content)
+            bottle.abort(-result, content)
+            return
+        convert_bandwidth(extended_dict)
+        if 'devices' in extended_dict: change_keys_http2db(extended_dict['devices'], http2db_server)
+
+    server = http_content['server']
+    server_start = server.get('start', 'yes')
+    server['tenant_id'] = tenant_id
+    #check flavor valid and take info
+    result, content = my.db.get_table(FROM='tenants_flavors as tf join flavors as f on tf.flavor_id=f.uuid',
+             SELECT=('ram','vcpus','extended'), WHERE={'uuid':server['flavor_id'], 'tenant_id':tenant_id})
+    if result<=0:
+        bottle.abort(HTTP_Not_Found, 'flavor_id %s not found' % server['flavor_id'])
+        return
+    server['flavor']=content[0]
+    #check image valid and take info
+    result, content = my.db.get_table(FROM='tenants_images as ti right join images as i on ti.image_id=i.uuid',
+                                      SELECT=('path', 'metadata', 'image_id'),
+                                      WHERE={'uuid':server['image_id'], "status":"ACTIVE"},
+                                      WHERE_OR={'tenant_id':tenant_id, 'public': 'yes'},
+                                      WHERE_AND_OR="AND",
+                                      DISTINCT=True)
+    if result<=0:
+        bottle.abort(HTTP_Not_Found, 'image_id %s not found or not ACTIVE' % server['image_id'])
+        return
+    for image_dict in content:
+        if image_dict.get("image_id"):
+            break
+    else:
+        # insert in data base tenants_images
+        r2, c2 = my.db.new_row('tenants_images', {'image_id': server['image_id'], 'tenant_id': tenant_id})
+        if r2<=0:
+            bottle.abort(HTTP_Not_Found, 'image_id %s cannot be used. Error %s' % (server['image_id'], c2))
+            return
+    server['image']={"path": content[0]["path"], "metadata": content[0]["metadata"]}
+    if "hosts_id" in server:
+        result, content = my.db.get_table(FROM='hosts', SELECT=('uuid',), WHERE={'uuid': server['host_id']})
+        if result<=0:
+            bottle.abort(HTTP_Not_Found, 'hostId %s not found' % server['host_id'])
+            return
+    #print json.dumps(server, indent=4)
+     
+    result, content = ht.create_server(server, config_dic['db'], config_dic['db_lock'], config_dic['mode']=='normal')
+
+    if result >= 0:
+    #Insert instance to database
+        nets=[]
+        print
+        print "inserting at DB"
+        print
+        if server_start == 'no':
+            content['status'] = 'INACTIVE'
+        dhcp_nets_id = []
+        for net in http_content['server']['networks']:
+            if net['type'] == 'instance:ovs':
+                dhcp_nets_id.append(get_network_id(net['net_id']))
+
+        ports_to_free=[]
+        new_instance_result, new_instance = my.db.new_instance(content, nets, ports_to_free)
+        if new_instance_result < 0:
+            print "Error http_post_servers() :", new_instance_result, new_instance
+            bottle.abort(-new_instance_result, new_instance)
+            return
+        print
+        print "inserted at DB"
+        print
+        for port in ports_to_free:
+            r,c = config_dic['host_threads'][ server['host_id'] ].insert_task( 'restore-iface',*port )
+            if r < 0:
+                print ' http_post_servers ERROR RESTORE IFACE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' +  c
+        # update nets
+        for net_id in nets:
+            try:
+                my.ovim.net_update_ofc_thread(net_id)
+            except ovim.ovimException as e:
+                my.logger.error("http_post_servers, Error updating network with id '{}', '{}'".format(net_id, str(e)))
+
+        # look for dhcp ip address
+        r2, c2 = my.db.get_table(FROM="ports", SELECT=["mac", "ip_address", "net_id"], WHERE={"instance_id": new_instance})
+        if r2 >0:
+            for iface in c2:
+                if config_dic.get("dhcp_server") and iface["net_id"] in config_dic["dhcp_nets"]:
+                    #print "dhcp insert add task"
+                    r,c = config_dic['dhcp_thread'].insert_task("add", iface["mac"])
+                    if r < 0:
+                        print ':http_post_servers ERROR UPDATING dhcp_server !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' +  c
+
+                #ensure compute contain the bridge for ovs networks:
+                server_net = get_network_id(iface['net_id'])
+                if server_net["network"].get('provider:physical', "")[:3] == 'OVS':
+                    vlan = str(server_net['network']['provider:vlan'])
+                    dhcp_enable = bool(server_net['network']['enable_dhcp'])
+                    if dhcp_enable:
+                        dhcp_firt_ip = str(server_net['network']['dhcp_first_ip'])
+                        dhcp_last_ip = str(server_net['network']['dhcp_last_ip'])
+                        dhcp_cidr = str(server_net['network']['cidr'])
+                        gateway = str(server_net['network']['gateway'])
+                        vm_dhcp_ip = c2[0]["ip_address"]
+                        config_dic['host_threads'][server['host_id']].insert_task("create-ovs-bridge-port", vlan)
+
+                        set_mac_dhcp(vm_dhcp_ip, vlan, dhcp_firt_ip, dhcp_last_ip, dhcp_cidr, c2[0]['mac'])
+                        http_controller = config_dic['http_threads'][threading.current_thread().name]
+                        http_controller.ovim.launch_dhcp_server(vlan, dhcp_firt_ip, dhcp_last_ip, dhcp_cidr, gateway)
+
+        #Start server
+        server['uuid'] = new_instance
+        server_start = server.get('start', 'yes')
+
+        if server_start != 'no':
+            server['paused'] = True if server_start == 'paused' else False
+            server['action'] = {"start":None}
+            server['status'] = "CREATING"
+            #Program task
+            r,c = config_dic['host_threads'][ server['host_id'] ].insert_task( 'instance',server )
+            if r<0:
+                my.db.update_rows('instances', {'status':"ERROR"}, {'uuid':server['uuid'], 'last_error':c}, log=True)
+        
+        return http_get_server_id(tenant_id, new_instance)
+    else:
+        bottle.abort(HTTP_Bad_Request, content)
+        return
+
+def http_server_action(server_id, tenant_id, action):
+    '''Perform actions over a server as resume, reboot, terminate, ...'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    server={"uuid": server_id, "action":action}
+    where={'uuid': server_id}
+    if tenant_id!='any':
+        where['tenant_id']= tenant_id
+    result, content = my.db.get_table(FROM='instances', WHERE=where)
+    if result == 0:
+        bottle.abort(HTTP_Not_Found, "server %s not found" % server_id)
+        return
+    if result < 0:
+        print "http_post_server_action error getting data %d %s" % (result, content)
+        bottle.abort(HTTP_Internal_Server_Error, content)
+        return
+    server.update(content[0])
+    tenant_id = server["tenant_id"]
+
+    #TODO check a right content
+    new_status = None
+    if 'terminate' in action:
+        new_status='DELETING'
+    elif server['status'] == 'ERROR': #or server['status'] == 'CREATING':
+        if 'terminate' not in action and 'rebuild' not in action:
+            bottle.abort(HTTP_Method_Not_Allowed, "Server is in ERROR status, must be rebuit or deleted ")
+            return
+#     elif server['status'] == 'INACTIVE':
+#         if 'start' not in action and 'createImage' not in action:
+#             bottle.abort(HTTP_Method_Not_Allowed, "The only possible action over an instance in 'INACTIVE' status is 'start'")
+#             return
+#         if 'start' in action:
+#             new_status='CREATING'
+#             server['paused']='no'
+#     elif server['status'] == 'PAUSED':
+#         if 'resume' not in action:
+#             bottle.abort(HTTP_Method_Not_Allowed, "The only possible action over an instance in 'PAUSED' status is 'resume'")
+#             return
+#     elif server['status'] == 'ACTIVE':
+#         if 'pause' not in action and 'reboot'not in action and 'shutoff'not in action:
+#             bottle.abort(HTTP_Method_Not_Allowed, "The only possible action over an instance in 'ACTIVE' status is 'pause','reboot' or 'shutoff'")
+#             return
+
+    if 'start' in action or 'createImage' in action or 'rebuild' in action:
+        #check image valid and take info
+        image_id = server['image_id']
+        if 'createImage' in action:
+            if 'imageRef' in action['createImage']:
+                image_id = action['createImage']['imageRef']
+            elif 'disk' in action['createImage']:
+                result, content = my.db.get_table(FROM='instance_devices',
+                    SELECT=('image_id','dev'), WHERE={'instance_id':server['uuid'],"type":"disk"})
+                if result<=0:
+                    bottle.abort(HTTP_Not_Found, 'disk not found for server')
+                    return
+                elif result>1:
+                    disk_id=None
+                    if action['createImage']['imageRef']['disk'] != None:
+                        for disk in content:
+                            if disk['dev'] == action['createImage']['imageRef']['disk']:
+                                disk_id = disk['image_id']
+                                break
+                        if disk_id == None:
+                            bottle.abort(HTTP_Not_Found, 'disk %s not found for server' % action['createImage']['imageRef']['disk'])
+                            return
+                    else:
+                        bottle.abort(HTTP_Not_Found, 'more than one disk found for server' )
+                        return
+                    image_id = disk_id    
+                else: #result==1
+                    image_id = content[0]['image_id']    
+                
+        result, content = my.db.get_table(FROM='tenants_images as ti right join images as i on ti.image_id=i.uuid',
+            SELECT=('path','metadata'), WHERE={'uuid':image_id, "status":"ACTIVE"},
+            WHERE_OR={'tenant_id':tenant_id, 'public': 'yes'}, WHERE_AND_OR="AND", DISTINCT=True)
+        if result<=0:
+            bottle.abort(HTTP_Not_Found, 'image_id %s not found or not ACTIVE' % image_id)
+            return
+        if content[0]['metadata'] is not None:
+            try:
+                metadata = json.loads(content[0]['metadata'])
+            except:
+                return -HTTP_Internal_Server_Error, "Can not decode image metadata"
+            content[0]['metadata']=metadata
+        else:
+            content[0]['metadata'] = {}
+        server['image']=content[0]
+        if 'createImage' in action:
+            action['createImage']['source'] = {'image_id': image_id, 'path': content[0]['path']}
+    if 'createImage' in action:
+        #Create an entry in Database for the new image
+        new_image={'status':'BUILD', 'progress': 0 }
+        new_image_metadata=content[0]
+        if 'metadata' in server['image'] and server['image']['metadata'] != None:
+            new_image_metadata.update(server['image']['metadata'])
+        new_image_metadata = {"use_incremental":"no"}
+        if 'metadata' in action['createImage']:
+            new_image_metadata.update(action['createImage']['metadata'])
+        new_image['metadata'] = json.dumps(new_image_metadata)
+        new_image['name'] = action['createImage'].get('name', None)
+        new_image['description'] = action['createImage'].get('description', None)
+        new_image['uuid']=my.db.new_uuid()
+        if 'path' in action['createImage']:
+            new_image['path'] = action['createImage']['path']
+        else:
+            new_image['path']="/provisional/path/" + new_image['uuid']
+        result, image_uuid = my.db.new_image(new_image, tenant_id)
+        if result<=0:
+            bottle.abort(HTTP_Bad_Request, 'Error: ' + image_uuid)
+            return
+        server['new_image'] = new_image
+
+                
+    #Program task
+    r,c = config_dic['host_threads'][ server['host_id'] ].insert_task( 'instance',server )
+    if r<0:
+        print "Task queue full at host ", server['host_id']
+        bottle.abort(HTTP_Request_Timeout, c)
+    if 'createImage' in action and result >= 0:
+        return http_get_image_id(tenant_id, image_uuid)
+    
+    #Update DB only for CREATING or DELETING status
+    data={'result' : 'deleting in process'}
+    warn_text=""
+    if new_status != None and new_status == 'DELETING':
+        nets=[]
+        ports_to_free=[]
+
+        net_ovs_list = []
+        #look for dhcp ip address
+        r2, c2 = my.db.get_table(FROM="ports", SELECT=["mac", "net_id"], WHERE={"instance_id": server_id})
+        r, c = my.db.delete_instance(server_id, tenant_id, nets, ports_to_free, net_ovs_list, "requested by http")
+        for port in ports_to_free:
+            r1,c1 = config_dic['host_threads'][ server['host_id'] ].insert_task( 'restore-iface',*port )
+            if r1 < 0:
+                my.logger.error("http_post_server_action server deletion ERROR at resore-iface!!!! " + c1)
+                warn_text += "; Error iface '{}' cannot be restored '{}'".format(str(port), str(e))
+        for net_id in nets:
+            try:
+                my.ovim.net_update_ofc_thread(net_id)
+            except ovim.ovimException as e:
+                my.logger.error("http_server_action, Error updating network with id '{}', '{}'".format(net_id, str(e)))
+                warn_text += "; Error openflow rules of network '{}' cannot be restore '{}'".format(net_id, str (e))
+
+        # look for dhcp ip address
+        if r2 >0 and config_dic.get("dhcp_server"):
+            for iface in c2:
+                if iface["net_id"] in config_dic["dhcp_nets"]:
+                    r,c = config_dic['dhcp_thread'].insert_task("del", iface["mac"])
+                    #print "dhcp insert del task"
+                    if r < 0:
+                        print ':http_post_servers ERROR UPDATING dhcp_server !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' +  c 
+        # delete ovs-port and linux bridge, contains a list of tuple (net_id,vlan)
+        for net in net_ovs_list:
+            mac = str(net[3])
+            vm_ip = str(net[2])
+            vlan = str(net[1])
+            net_id = net[0]
+            delete_dhcp_ovs_bridge(vlan, net_id)
+            delete_mac_dhcp(vm_ip, vlan, mac)
+            config_dic['host_threads'][server['host_id']].insert_task('del-ovs-port', vlan, net_id)
+    return format_out(data + warn_text)
+
+
+
+@bottle.route(url_base + '/<tenant_id>/servers/<server_id>', method='DELETE')
+def http_delete_server_id(tenant_id, server_id):
+    '''delete a server'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+        return
+
+    return http_server_action(server_id, tenant_id, {"terminate":None} )
+
+    
+@bottle.route(url_base + '/<tenant_id>/servers/<server_id>/action', method='POST')
+def http_post_server_action(tenant_id, server_id):
+    '''take an action over a server'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #check valid tenant_id
+    result,content = check_valid_tenant(my, tenant_id)
+    if result != 0:
+        bottle.abort(result, content)
+        return
+    http_content = format_in( server_action_schema )
+    #r = remove_extra_items(http_content, server_action_schema)
+    #if r is not None: print "http_post_server_action: Warning: remove extra items ", r
+    
+    return http_server_action(server_id, tenant_id, http_content)
+
+#
+# NETWORKS
+#
+
+
+@bottle.route(url_base + '/networks', method='GET')
+def http_get_networks():
+    """
+    Get all networks available
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        # obtain data
+        select_, where_, limit_ = filter_query_string(bottle.request.query, http2db_network,
+                                                      ('id', 'name', 'tenant_id', 'type',
+                                                       'shared', 'provider:vlan', 'status', 'last_error',
+                                                       'admin_state_up', 'provider:physical'))
+        if "tenant_id" in where_:
+            del where_["tenant_id"]
+
+        content = my.ovim.get_networks(select_, where_, limit_)
+
+        delete_nulls(content)
+        change_keys_http2db(content, http2db_network, reverse=True)
+        data = {'networks': content}
+        return format_out(data)
+
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/networks/<network_id>', method='GET')
+def http_get_network_id(network_id):
+    """
+    Get a network data by id
+    :param network_id:
+    :return:
+    """
+    data = get_network_id(network_id)
+    return format_out(data)
+
+
+def get_network_id(network_id):
+    """
+    Get network from DB by id
+    :param network_id: network Id
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        # obtain data
+        where_ = bottle.request.query
+        content = my.ovim.show_network(network_id, where_)
+
+        change_keys_http2db(content, http2db_network, reverse=True)
+        delete_nulls(content)
+        data = {'network': content}
+        return data
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/networks', method='POST')
+def http_post_networks():
+    """
+    Insert a network into the database.
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        # parse input data
+        http_content = format_in(network_new_schema )
+        r = remove_extra_items(http_content, network_new_schema)
+        if r is not None:
+            print "http_post_networks: Warning: remove extra items ", r
+        change_keys_http2db(http_content['network'], http2db_network)
+        network = http_content['network']
+        content = my.ovim.new_network(network)
+        return format_out(get_network_id(content))
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/networks/<network_id>', method='PUT')
+def http_put_network_id(network_id):
+    """
+    Update a network_id into DB.
+    :param network_id: network id
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+    
+    try:
+        # parse input data
+        http_content = format_in(network_update_schema)
+        change_keys_http2db(http_content['network'], http2db_network)
+        network = http_content['network']
+        return format_out(my.ovim.edit_network(network_id, network))
+
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/networks/<network_id>', method='DELETE')
+def http_delete_network_id(network_id):
+    """
+    Delete a network_id from the database.
+    :param network_id: Network id
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        # delete from the data base
+        content = my.ovim.delete_network(network_id)
+        data = {'result': content}
+        return format_out(data)
+
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+#
+# OPENFLOW
+#
+
+
+@bottle.route(url_base + '/openflow/controller', method='GET')
+def http_get_openflow_controller():
+    """
+    Retrieve a openflow controllers list from DB.
+    :return:
+    """
+    # TODO check if show a proper list
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        select_, where_, limit_ = filter_query_string(bottle.request.query, http2db_ofc,
+                                                      ('id', 'name', 'dpid', 'ip', 'port', 'type',
+                                                       'version', 'user', 'password'))
+
+        content = my.ovim.get_of_controllers(select_, where_)
+        delete_nulls(content)
+        change_keys_http2db(content, http2db_ofc, reverse=True)
+        data = {'ofcs': content}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/openflow/controller/<uuid>', method='GET')
+def http_get_openflow_controller_id(uuid):
+    """
+    Get an openflow controller by dpid from DB.get_of_controllers
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+
+        content = my.ovim.show_of_controller(uuid)
+        delete_nulls(content)
+        change_keys_http2db(content, http2db_ofc, reverse=True)
+        data = {'ofc': content}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/openflow/controller/', method='POST')
+def http_post_openflow_controller():
+    """
+    Create a new openflow controller into DB
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        http_content = format_in(openflow_controller_schema)
+        of_c = http_content['ofc']
+        uuid = my.ovim.new_of_controller(of_c)
+        content = my.ovim.show_of_controller(uuid)
+        delete_nulls(content)
+        change_keys_http2db(content, http2db_ofc, reverse=True)
+        data = {'ofc': content}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/openflow/controller/<of_controller_id>', method='PUT')
+def http_put_openflow_controller_by_id(of_controller_id):
+    """
+    Create an openflow controller into DB
+    :param of_controller_id: openflow controller dpid
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        http_content = format_in(openflow_controller_schema)
+        of_c = http_content['ofc']
+
+        content = my.ovim.edit_of_controller(of_controller_id, of_c)
+        delete_nulls(content)
+        change_keys_http2db(content, http2db_ofc, reverse=True)
+        data = {'ofc': content}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/openflow/controller/<of_controller_id>', method='DELETE')
+def http_delete_openflow_controller(of_controller_id):
+    """
+    Delete  an openflow controller from DB.
+    :param of_controller_id: openflow controller dpid
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        content = my.ovim.delete_of_controller(of_controller_id)
+        data = {'result': content}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/networks/<network_id>/openflow', method='GET')
+def http_get_openflow_id(network_id):
+    """
+    To obtain the list of openflow rules of a network
+    :param network_id: network id
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    # ignore input data
+    if network_id == 'all':
+        network_id = None
+    try:
+        content = my.ovim.get_openflow_rules(network_id)
+        data = {'openflow-rules': content}
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+    return format_out(data)
+
+
+@bottle.route(url_base + '/networks/<network_id>/openflow', method='PUT')
+def http_put_openflow_id(network_id):
+    """
+    To make actions over the net. The action is to reinstall the openflow rules
+    network_id can be 'all'
+    :param network_id: network id
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    if not my.admin:
+        bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
+
+    if network_id == 'all':
+        network_id = None
+
+    try:
+        result = my.ovim.edit_openflow_rules(network_id)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+    data = {'result': str(result) + " nets updates"}
+    return format_out(data)
+
+@bottle.route(url_base + '/networks/clear/openflow/<ofc_id>', method='DELETE')
+@bottle.route(url_base + '/networks/clear/openflow', method='DELETE')
+def http_clear_openflow_rules(ofc_id=None):
+    """
+    To make actions over the net. The action is to delete ALL openflow rules
+    :return:
+    """
+    my = config_dic['http_threads'][ threading.current_thread().name]
+
+    if not my.admin:
+        bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
+    try:
+        my.ovim.delete_openflow_rules(ofc_id)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+    data = {'result': " Clearing openflow rules in process"}
+    return format_out(data)
+
+@bottle.route(url_base + '/networks/openflow/ports/<ofc_id>', method='GET')
+@bottle.route(url_base + '/networks/openflow/ports', method='GET')
+def http_get_openflow_ports(ofc_id=None):
+    """
+    Obtain switch ports names of openflow controller
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        ports = my.ovim.get_openflow_ports(ofc_id)
+        data = {'ports': ports}
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+    return format_out(data)
+#
+# PORTS
+#
+
+
+@bottle.route(url_base + '/ports', method='GET')
+def http_get_ports():
+    #obtain data
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_port,
+            ('id','name','tenant_id','network_id','vpci','mac_address','device_owner','device_id',
+             'binding:switch_port','binding:vlan','bandwidth','status','admin_state_up','ip_address') )
+    try:
+        ports = my.ovim.get_ports(columns=select_, filter=where_, limit=limit_)
+        delete_nulls(ports)
+        change_keys_http2db(ports, http2db_port, reverse=True)
+        data={'ports' : ports}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+@bottle.route(url_base + '/ports/<port_id>', method='GET')
+def http_get_port_id(port_id):
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    try:
+        ports = my.ovim.get_ports(filter={"uuid": port_id})
+        if not ports:
+            bottle.abort(HTTP_Not_Found, 'port %s not found' % port_id)
+            return
+        delete_nulls(ports)
+        change_keys_http2db(ports, http2db_port, reverse=True)
+        data = {'port': ports[0]}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+@bottle.route(url_base + '/ports', method='POST')
+def http_post_ports():
+    '''insert an external port into the database.'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    if not my.admin:
+        bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
+    #parse input data
+    http_content = format_in( port_new_schema )
+    r = remove_extra_items(http_content, port_new_schema)
+    if r is not None: print "http_post_ports: Warning: remove extra items ", r
+    change_keys_http2db(http_content['port'], http2db_port)
+    port=http_content['port']
+    try:
+        port_id = my.ovim.new_port(port)
+        ports = my.ovim.get_ports(filter={"uuid": port_id})
+        if not ports:
+            bottle.abort(HTTP_Internal_Server_Error, "port '{}' inserted but not found at database".format(port_id))
+            return
+        delete_nulls(ports)
+        change_keys_http2db(ports, http2db_port, reverse=True)
+        data = {'port': ports[0]}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+@bottle.route(url_base + '/ports/<port_id>', method='PUT')
+def http_put_port_id(port_id):
+    '''update a port_id into the database.'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    #parse input data
+    http_content = format_in( port_update_schema )
+    change_keys_http2db(http_content['port'], http2db_port)
+    port_dict=http_content['port']
+
+    for k in ('vlan', 'switch_port', 'mac_address', 'tenant_id'):
+        if k in port_dict and not my.admin:
+            bottle.abort(HTTP_Unauthorized, "Needed admin privileges for changing " + k)
+            return
+    try:
+        port_id = my.ovim.edit_port(port_id, port_dict, my.admin)
+        ports = my.ovim.get_ports(filter={"uuid": port_id})
+        if not ports:
+            bottle.abort(HTTP_Internal_Server_Error, "port '{}' edited but not found at database".format(port_id))
+            return
+        delete_nulls(ports)
+        change_keys_http2db(ports, http2db_port, reverse=True)
+        data = {'port': ports[0]}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/ports/<port_id>', method='DELETE')
+def http_delete_port_id(port_id):
+    '''delete a port_id from the database.'''
+    my = config_dic['http_threads'][ threading.current_thread().name ]
+    if not my.admin:
+        bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
+        return
+    try:
+        result = my.ovim.delete_port(port_id)
+        data = {'result': result}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/openflow/mapping', method='POST')
+def http_of_port_mapping():
+    """
+    Create new compute port mapping entry
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        http_content = format_in(of_port_map_new_schema)
+        r = remove_extra_items(http_content, of_port_map_new_schema)
+        if r is not None:
+            my.logger.error("http_of_port_mapping: Warning: remove extra items " + str(r), exc_info=True)
+
+        # insert in data base
+        port_mapping = my.ovim.set_of_port_mapping(http_content['of_port_mapings'])
+        change_keys_http2db(port_mapping, http2db_id, reverse=True)
+        delete_nulls(port_mapping)
+        data = {'of_port_mappings': port_mapping}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/openflow/mapping', method='GET')
+def get_of_port_mapping():
+    """
+    Get compute port mapping
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        select_, where_, limit_ = filter_query_string(bottle.request.query, http2db_id,
+                                                      ('id', 'ofc_id', 'region', 'compute_node', 'pci',
+                                                       'switch_dpid', 'switch_port', 'switch_mac'))
+        # insert in data base
+        port_mapping = my.ovim.get_of_port_mappings(select_, where_)
+        change_keys_http2db(port_mapping, http2db_id, reverse=True)
+        delete_nulls(port_mapping)
+        data = {'of_port_mappings': port_mapping}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
+
+@bottle.route(url_base + '/openflow/mapping/<region>', method='DELETE')
+def delete_of_port_mapping(region):
+    """
+    Insert a tenant into the database.
+    :return:
+    """
+    my = config_dic['http_threads'][threading.current_thread().name]
+
+    try:
+        # insert in data base
+        db_filter = {'region': region}
+        result = my.ovim.clear_of_port_mapping(db_filter)
+        data = {'result': result}
+        return format_out(data)
+    except ovim.ovimException as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        my.logger.error(str(e), exc_info=True)
+        bottle.abort(HTTP_Bad_Request, str(e))
+
diff --git a/osm_openvim/onos.py b/osm_openvim/onos.py
new file mode 100644 (file)
index 0000000..338412f
--- /dev/null
@@ -0,0 +1,470 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2016, I2T Research Group (UPV/EHU)
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: alaitz.mendiola@ehu.eus or alaitz.mendiola@gmail.com
+##
+
+'''
+ImplementS the pluging for the Open Network Operating System (ONOS) openflow
+controller. It creates the class OF_conn to create dataplane connections
+with static rules based on packet destination MAC address
+'''
+
+__author__="Alaitz Mendiola"
+__date__ ="$22-nov-2016$"
+
+
+import json
+import requests
+import base64
+import logging
+import openflow_conn
+
+
+class OF_conn(openflow_conn.OpenflowConn):
+    """
+    ONOS connector. No MAC learning is used
+    """
+    def __init__(self, params):
+        """ Constructor.
+            Params: dictionary with the following keys:
+                of_dpid:     DPID to use for this controller ?? Does a controller have a dpid?
+                of_ip:       controller IP address
+                of_port:     controller TCP port
+                of_user:     user credentials, can be missing or None
+                of_password: password credentials
+                of_debug:    debug level for logging. Default to ERROR
+                other keys are ignored
+            Raise an exception if same parameter is missing or wrong
+        """
+
+        openflow_conn.OpenflowConn.__init__(self, params)
+
+        # check params
+        if "of_ip" not in params or params["of_ip"]==None or "of_port" not in params or params["of_port"]==None:
+            raise ValueError("IP address and port must be provided")
+        #internal variables
+        self.name = "onos"
+        self.headers = {'content-type':'application/json','accept':'application/json',}
+
+        self.auth="None"
+        self.pp2ofi={}  # From Physical Port to OpenFlow Index
+        self.ofi2pp={}  # From OpenFlow Index to Physical Port
+
+        self.dpid = str(params["of_dpid"])
+        self.id = 'of:'+str(self.dpid.replace(':', ''))
+        self.url = "http://%s:%s/onos/v1/" %( str(params["of_ip"]), str(params["of_port"] ) )
+
+        # TODO This may not be straightforward
+        if "of_user" in params and params["of_user"]!=None:
+            if not params.get("of_password"):
+                of_password=""
+            else:
+                of_password=str(params["of_password"])
+            self.auth = base64.b64encode(str(params["of_user"])+":"+of_password)
+            self.headers['authorization'] = 'Basic ' + self.auth
+
+        self.logger = logging.getLogger('vim.OF.onos')
+        self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR")) )
+        self.ip_address = None
+
+    def get_of_switches(self):
+        """
+        Obtain a a list of switches or DPID detected by this controller
+        :return: list where each element a tuple pair (DPID, IP address)
+                 Raise a openflowconnUnexpectedResponse expection in case of failure
+        """
+        try:
+            self.headers['content-type'] = 'text/plain'
+            of_response = requests.get(self.url + "devices", headers=self.headers)
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+            if of_response.status_code != 200:
+                self.logger.warning("get_of_switches " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+            self.logger.debug("get_of_switches " + error_text)
+            info = of_response.json()
+
+            if type(info) != dict:
+                self.logger.error("get_of_switches. Unexpected response, not a dict: %s", str(info))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, not a dict. Wrong version?")
+
+            node_list = info.get('devices')
+
+            if type(node_list) is not list:
+                self.logger.error(
+                    "get_of_switches. Unexpected response, at 'devices', not found or not a list: %s",
+                    str(type(node_list)))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response, at 'devices', not found "
+                                                                   "or not a list. Wrong version?")
+
+            switch_list = []
+            for node in node_list:
+                node_id = node.get('id')
+                if node_id is None:
+                    self.logger.error("get_of_switches. Unexpected response at 'device':'id', not found: %s",
+                                      str(node))
+                    raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'device':'id', "
+                                                                       "not found . Wrong version?")
+
+                node_ip_address = node.get('annotations').get('managementAddress')
+                if node_ip_address is None:
+                    self.logger.error(
+                        "get_of_switches. Unexpected response at 'device':'managementAddress', not found: %s",
+                        str(node))
+                    raise openflow_conn.OpenflowconnUnexpectedResponse(
+                        "Unexpected response at 'device':'managementAddress', not found. Wrong version?")
+
+                node_id_hex = hex(int(node_id.split(':')[1])).split('x')[1].zfill(16)
+
+                switch_list.append(
+                    (':'.join(a + b for a, b in zip(node_id_hex[::2], node_id_hex[1::2])), node_ip_address))
+            raise switch_list
+
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("get_of_switches " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+        except ValueError as e:
+            # ValueError in the case that JSON can not be decoded
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("get_of_switches " + error_text)
+            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+    def obtain_port_correspondence(self):
+        """
+        Obtain the correspondence between physical and openflow port names
+        :return: dictionary with physical name as key, openflow name as value
+                 Raise a openflowconnUnexpectedResponse expection in case of failure
+        """
+        try:
+            self.headers['content-type'] = 'text/plain'
+            of_response = requests.get(self.url + "devices/" + self.id + "/ports", headers=self.headers)
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+            if of_response.status_code != 200:
+                self.logger.warning("obtain_port_correspondence " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+            self.logger.debug("obtain_port_correspondence " + error_text)
+            info = of_response.json()
+
+            node_connector_list = info.get('ports')
+            if type(node_connector_list) is not list:
+                self.logger.error(
+                    "obtain_port_correspondence. Unexpected response at 'ports', not found or not a list: %s",
+                    str(node_connector_list))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'ports', not found  or not "
+                                                                   "a list. Wrong version?")
+
+            for node_connector in node_connector_list:
+                if node_connector['port'] != "local":
+                    self.pp2ofi[str(node_connector['annotations']['portName'])] = str(node_connector['port'])
+                    self.ofi2pp[str(node_connector['port'])] = str(node_connector['annotations']['portName'])
+
+            node_ip_address = info['annotations']['managementAddress']
+            if node_ip_address is None:
+                self.logger.error(
+                    "obtain_port_correspondence. Unexpected response at 'managementAddress', not found: %s",
+                    str(self.id))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'managementAddress', "
+                                                                   "not found. Wrong version?")
+            self.ip_address = node_ip_address
+
+            # print self.name, ": obtain_port_correspondence ports:", self.pp2ofi
+            return self.pp2ofi
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("obtain_port_correspondence " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+        except ValueError as e:
+            # ValueError in the case that JSON can not be decoded
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("obtain_port_correspondence " + error_text)
+            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+    def get_of_rules(self, translate_of_ports=True):
+        """
+        Obtain the rules inserted at openflow controller
+        :param translate_of_ports: if True it translates ports from openflow index to physical switch name
+        :return: dict if ok: with the rule name as key and value is another dictionary with the following content:
+                    priority: rule priority
+                    name:         rule name (present also as the master dict key)
+                    ingress_port: match input port of the rule
+                    dst_mac:      match destination mac address of the rule, can be missing or None if not apply
+                    vlan_id:      match vlan tag of the rule, can be missing or None if not apply
+                    actions:      list of actions, composed by a pair tuples:
+                        (vlan, None/int): for stripping/setting a vlan tag
+                        (out, port):      send to this port
+                    switch:       DPID, all
+                 Raise a openflowconnUnexpectedResponse expection in case of failure
+        """
+
+        try:
+
+            if len(self.ofi2pp) == 0:
+                self.obtain_port_correspondence()
+
+            # get rules
+            self.headers['content-type'] = 'text/plain'
+            of_response = requests.get(self.url + "flows/" + self.id, headers=self.headers)
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+
+            # The configured page does not exist if there are no rules installed. In that case we return an empty dict
+            if of_response.status_code == 404:
+                return {}
+
+            elif of_response.status_code != 200:
+                self.logger.warning("get_of_rules " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+            self.logger.debug("get_of_rules " + error_text)
+
+            info = of_response.json()
+
+            if type(info) != dict:
+                self.logger.error("get_of_rules. Unexpected response, not a dict: %s", str(info))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected openflow response, not a dict. "
+                                                                   "Wrong version?")
+
+            flow_list = info.get('flows')
+
+            if flow_list is None:
+                return {}
+
+            if type(flow_list) is not list:
+                self.logger.error(
+                    "get_of_rules. Unexpected response at 'flows', not a list: %s",
+                    str(type(flow_list)))
+                raise openflow_conn.OpenflowconnUnexpectedResponse("Unexpected response at 'flows', not a list. "
+                                                                   "Wrong version?")
+
+            rules = dict() # Response dictionary
+
+            for flow in flow_list:
+                if not ('id' in flow and 'selector' in flow and 'treatment' in flow and \
+                                    'instructions' in flow['treatment'] and 'criteria' in \
+                                    flow['selector']):
+                    raise openflow_conn.OpenflowconnUnexpectedResponse("unexpected openflow response, one or more "
+                                                                       "elements are missing. Wrong version?")
+
+                rule = dict()
+                rule['switch'] = self.dpid
+                rule['priority'] = flow.get('priority')
+                rule['name'] = flow['id']
+
+                for criteria in flow['selector']['criteria']:
+                    if criteria['type'] == 'IN_PORT':
+                        in_port = str(criteria['port'])
+                        if in_port != "CONTROLLER":
+                            if not in_port in self.ofi2pp:
+                                raise openflow_conn.OpenflowconnUnexpectedResponse("Error: Ingress port {} is not "
+                                                                                   "in switch port list".format(in_port))
+                            if translate_of_ports:
+                                in_port = self.ofi2pp[in_port]
+                        rule['ingress_port'] = in_port
+
+                    elif criteria['type'] == 'VLAN_VID':
+                        rule['vlan_id'] = criteria['vlanId']
+
+                    elif criteria['type'] == 'ETH_DST':
+                        rule['dst_mac'] = str(criteria['mac']).lower()
+
+                actions = []
+                for instruction in flow['treatment']['instructions']:
+                    if instruction['type'] == "OUTPUT":
+                        out_port = str(instruction['port'])
+                        if out_port != "CONTROLLER":
+                            if not out_port in self.ofi2pp:
+                                raise openflow_conn.OpenflowconnUnexpectedResponse("Error: Output port {} is not in "
+                                                                                   "switch port list".format(out_port))
+
+                            if translate_of_ports:
+                                out_port = self.ofi2pp[out_port]
+
+                        actions.append( ('out', out_port) )
+
+                    if instruction['type'] == "L2MODIFICATION" and instruction['subtype'] == "VLAN_POP":
+                        actions.append( ('vlan', 'None') )
+                    if instruction['type'] == "L2MODIFICATION" and instruction['subtype'] == "VLAN_ID":
+                        actions.append( ('vlan', instruction['vlanId']) )
+
+                rule['actions'] = actions
+                rules[flow['id']] = dict(rule)
+            return rules
+
+        except requests.exceptions.RequestException as e:
+            # ValueError in the case that JSON can not be decoded
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("get_of_rules " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+        except ValueError as e:
+            # ValueError in the case that JSON can not be decoded
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("get_of_rules " + error_text)
+            raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+    def del_flow(self, flow_name):
+        """
+        Delete an existing rule
+        :param flow_name:
+        :return: Raise a openflowconnUnexpectedResponse expection in case of failure
+        """
+
+        try:
+            self.headers['content-type'] = None
+            of_response = requests.delete(self.url + "flows/" + self.id + "/" + flow_name, headers=self.headers)
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+
+            if of_response.status_code != 204:
+                self.logger.warning("del_flow " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+            self.logger.debug("del_flow OK " + error_text)
+            return None
+
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("del_flow " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+
+    def new_flow(self, data):
+        """
+        Insert a new static rule
+        :param data: dictionary with the following content:
+                priority:     rule priority
+                name:         rule name
+                ingress_port: match input port of the rule
+                dst_mac:      match destination mac address of the rule, missing or None if not apply
+                vlan_id:      match vlan tag of the rule, missing or None if not apply
+                actions:      list of actions, composed by a pair tuples with these posibilities:
+                    ('vlan', None/int): for stripping/setting a vlan tag
+                    ('out', port):      send to this port
+        :return: Raise a openflowconnUnexpectedResponse expection in case of failure
+        """
+        try:
+
+            if len(self.pp2ofi) == 0:
+                self.obtain_port_correspondence()
+
+            # Build the dictionary with the flow rule information for ONOS
+            flow = dict()
+            #flow['id'] = data['name']
+            flow['tableId'] = 0
+            flow['priority'] = data.get('priority')
+            flow['timeout'] = 0
+            flow['isPermanent'] = "true"
+            flow['appId'] = 10 # FIXME We should create an appId for OSM
+            flow['selector'] = dict()
+            flow['selector']['criteria'] = list()
+
+            # Flow rule matching criteria
+            if not data['ingress_port'] in self.pp2ofi:
+                error_text = 'Error. Port ' + data['ingress_port'] + ' is not present in the switch'
+                self.logger.warning("new_flow " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+            ingress_port_criteria = dict()
+            ingress_port_criteria['type'] = "IN_PORT"
+            ingress_port_criteria['port'] = self.pp2ofi[data['ingress_port']]
+            flow['selector']['criteria'].append(ingress_port_criteria)
+
+            if 'dst_mac' in data:
+                dst_mac_criteria = dict()
+                dst_mac_criteria["type"] = "ETH_DST"
+                dst_mac_criteria["mac"] = data['dst_mac']
+                flow['selector']['criteria'].append(dst_mac_criteria)
+
+            if data.get('vlan_id'):
+                vlan_criteria = dict()
+                vlan_criteria["type"] = "VLAN_VID"
+                vlan_criteria["vlanId"] = int(data['vlan_id'])
+                flow['selector']['criteria'].append(vlan_criteria)
+
+            # Flow rule treatment
+            flow['treatment'] = dict()
+            flow['treatment']['instructions'] = list()
+            flow['treatment']['deferred'] = list()
+
+            for action in data['actions']:
+                new_action = dict()
+                if  action[0] == "vlan":
+                    new_action['type'] = "L2MODIFICATION"
+                    if action[1] == None:
+                        new_action['subtype'] = "VLAN_POP"
+                    else:
+                        new_action['subtype'] = "VLAN_ID"
+                        new_action['vlanId'] = int(action[1])
+                elif action[0] == 'out':
+                    new_action['type'] = "OUTPUT"
+                    if not action[1] in self.pp2ofi:
+                        error_msj = 'Port '+ action[1] + ' is not present in the switch'
+                        raise openflow_conn.OpenflowconnUnexpectedResponse(error_msj)
+                    new_action['port'] = self.pp2ofi[action[1]]
+                else:
+                    error_msj = "Unknown item '%s' in action list" % action[0]
+                    self.logger.error("new_flow " + error_msj)
+                    raise openflow_conn.OpenflowconnUnexpectedResponse(error_msj)
+
+                flow['treatment']['instructions'].append(new_action)
+
+            self.headers['content-type'] = 'application/json'
+            path = self.url + "flows/" + self.id
+            of_response = requests.post(path, headers=self.headers, data=json.dumps(flow) )
+
+            error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text)
+            if of_response.status_code != 201:
+                self.logger.warning("new_flow " + error_text)
+                raise openflow_conn.OpenflowconnUnexpectedResponse(error_text)
+
+            flowId = of_response.headers['location'][path.__len__() + 1:]
+
+            data['name'] = flowId
+
+            self.logger.debug("new_flow OK " + error_text)
+            return None
+
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("new_flow " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+
+    def clear_all_flows(self):
+        """
+        Delete all existing rules
+        :return: Raise a openflowconnUnexpectedResponse expection in case of failure
+        """
+        try:
+            rules = self.get_of_rules(True)
+
+            for rule in rules:
+                self.del_flow(rule)
+
+            self.logger.debug("clear_all_flows OK ")
+            return None
+
+        except requests.exceptions.RequestException as e:
+            error_text = type(e).__name__ + ": " + str(e)
+            self.logger.error("clear_all_flows " + error_text)
+            raise openflow_conn.OpenflowconnConnectionException(error_text)
+
+
+
+
+
diff --git a/osm_openvim/openflow_conn.py b/osm_openvim/openflow_conn.py
new file mode 100644 (file)
index 0000000..f42f4dc
--- /dev/null
@@ -0,0 +1,223 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+import logging
+import base64
+
+"""
+vimconn implement an Abstract class for the vim connector plugins
+ with the definition of the method to be implemented.
+"""
+__author__ = "Alfonso Tierno, Leonardo Mirabal"
+__date__ = "$16-oct-2015 11:09:29$"
+
+
+
+# Error variables
+HTTP_Bad_Request = 400
+HTTP_Unauthorized = 401
+HTTP_Not_Found = 404
+HTTP_Method_Not_Allowed = 405
+HTTP_Request_Timeout = 408
+HTTP_Conflict = 409
+HTTP_Not_Implemented = 501
+HTTP_Service_Unavailable = 503
+HTTP_Internal_Server_Error = 500
+
+
+class OpenflowconnException(Exception):
+    """Common and base class Exception for all vimconnector exceptions"""
+    def __init__(self, message, http_code=HTTP_Bad_Request):
+        Exception.__init__(self, message)
+        self.http_code = http_code
+
+
+class OpenflowconnConnectionException(OpenflowconnException):
+    """Connectivity error with the VIM"""
+    def __init__(self, message, http_code=HTTP_Service_Unavailable):
+        OpenflowconnException.__init__(self, message, http_code)
+
+
+class OpenflowconnUnexpectedResponse(OpenflowconnException):
+    """Get an wrong response from VIM"""
+    def __init__(self, message, http_code=HTTP_Internal_Server_Error):
+        OpenflowconnException.__init__(self, message, http_code)
+
+
+class OpenflowconnAuthException(OpenflowconnException):
+    """Invalid credentials or authorization to perform this action over the VIM"""
+    def __init__(self, message, http_code=HTTP_Unauthorized):
+        OpenflowconnException.__init__(self, message, http_code)
+
+
+class OpenflowconnNotFoundException(OpenflowconnException):
+    """The item is not found at VIM"""
+    def __init__(self, message, http_code=HTTP_Not_Found):
+        OpenflowconnException.__init__(self, message, http_code)
+
+
+class OpenflowconnConflictException(OpenflowconnException):
+    """There is a conflict, e.g. more item found than one"""
+    def __init__(self, message, http_code=HTTP_Conflict):
+        OpenflowconnException.__init__(self, message, http_code)
+
+
+class OpenflowconnNotSupportedException(OpenflowconnException):
+    """The request is not supported by connector"""
+    def __init__(self, message, http_code=HTTP_Service_Unavailable):
+        OpenflowconnException.__init__(self, message, http_code)
+
+
+class OpenflowconnNotImplemented(OpenflowconnException):
+    """The method is not implemented by the connected"""
+    def __init__(self, message, http_code=HTTP_Not_Implemented):
+        OpenflowconnException.__init__(self, message, http_code)
+
+
+class OpenflowConn:
+    """
+    Openflow controller connector abstract implementeation.
+    """
+    def __init__(self, params):
+        self.name = "openflow_conector"
+        self.headers = {'content-type': 'application/json', 'Accept': 'application/json'}
+        self.auth = None
+        self.pp2ofi = {}  # From Physical Port to OpenFlow Index
+        self.ofi2pp = {}  # From OpenFlow Index to Physical Port
+        self.dpid = '00:01:02:03:04:05:06:07'
+        self.id = 'openflow:00:01:02:03:04:05:06:07'
+        self.rules = {}
+        self.url = "http://%s:%s" % ('localhost', str(8081))
+        self.auth = base64.b64encode('of_user:of_password')
+        self.headers['Authorization'] = 'Basic ' + self.auth
+        self.logger = logging.getLogger('openflow_conn')
+        self.logger.setLevel(getattr(logging, params.get("of_debug", "ERROR")))
+        self.ip_address = None
+
+    def get_of_switches(self):
+        """"
+        Obtain a a list of switches or DPID detected by this controller
+        :return: list length, and a list where each element a tuple pair (DPID, IP address), text_error: if fails
+        """
+        raise OpenflowconnNotImplemented("Should have implemented this")
+
+    def obtain_port_correspondence(self):
+        """
+        Obtain the correspondence between physical and openflow port names
+        :return: dictionary: with physical name as key, openflow name as value, error_text: if fails
+        """
+        raise OpenflowconnNotImplemented("Should have implemented this")
+
+    def get_of_rules(self, translate_of_ports=True):
+        """
+        Obtain the rules inserted at openflow controller
+        :param translate_of_ports: if True it translates ports from openflow index to physical switch name
+        :return: dict if ok: with the rule name as key and value is another dictionary with the following content:
+                    priority: rule priority
+                    name:         rule name (present also as the master dict key)
+                    ingress_port: match input port of the rule
+                    dst_mac:      match destination mac address of the rule, can be missing or None if not apply
+                    vlan_id:      match vlan tag of the rule, can be missing or None if not apply
+                    actions:      list of actions, composed by a pair tuples:
+                        (vlan, None/int): for stripping/setting a vlan tag
+                        (out, port):      send to this port
+                    switch:       DPID, all
+                 text_error if fails
+        """
+        raise OpenflowconnNotImplemented("Should have implemented this")
+
+    def del_flow(self, flow_name):
+        """
+        Delete all existing rules
+        :param flow_name: flow_name, this is the rule name
+        :return: None if ok, text_error if fails
+        """
+        raise OpenflowconnNotImplemented("Should have implemented this")
+
+    def new_flow(self, data):
+        """
+        Insert a new static rule
+        :param data: dictionary with the following content:
+                priority:     rule priority
+                name:         rule name
+                ingress_port: match input port of the rule
+                dst_mac:      match destination mac address of the rule, missing or None if not apply
+                vlan_id:      match vlan tag of the rule, missing or None if not apply
+                actions:      list of actions, composed by a pair tuples with these posibilities:
+                    ('vlan', None/int): for stripping/setting a vlan tag
+                    ('out', port):      send to this port
+        :return: None if ok, text_error if fails
+        """
+        raise OpenflowconnNotImplemented("Should have implemented this")
+
+    def clear_all_flows(self):
+        """"
+        Delete all existing rules
+        :return: None if ok, text_error if fails
+        """
+        raise OpenflowconnNotImplemented("Should have implemented this")
+
+
+class OfTestConnector(OpenflowConn):
+    """
+    This is a fake openflow connector for testing.
+    It does nothing and it is used for running openvim without an openflow controller
+    """
+
+    def __init__(self, params):
+        OpenflowConn.__init__(self, params)
+
+        name = params.get("name", "test-ofc")
+        self.name = name
+        self.dpid = params.get("dpid")
+        self.rules = {}
+        self.logger = logging.getLogger('vim.OF.TEST')
+        self.logger.setLevel(getattr(logging, params.get("of_debug", "ERROR")))
+        self.pp2ofi = {}
+
+    def get_of_switches(self):
+        return ()
+
+    def obtain_port_correspondence(self):
+        return ()
+
+    def del_flow(self, flow_name):
+        if flow_name in self.rules:
+            self.logger.debug("del_flow OK")
+            del self.rules[flow_name]
+            return None
+        else:
+            self.logger.warning("del_flow not found")
+            raise OpenflowconnUnexpectedResponse("flow {} not found".format(flow_name))
+
+    def new_flow(self, data):
+        self.rules[data["name"]] = data
+        self.logger.debug("new_flow OK")
+        return None
+
+    def get_of_rules(self, translate_of_ports=True):
+        return self.rules
+
+    def clear_all_flows(self):
+        self.logger.debug("clear_all_flows OK")
+        self.rules = {}
+        return None
diff --git a/osm_openvim/openflow_thread.py b/osm_openvim/openflow_thread.py
new file mode 100644 (file)
index 0000000..cd873e7
--- /dev/null
@@ -0,0 +1,598 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+This thread interacts with a openflow controller to create dataplane connections
+'''
+
+__author__="Pablo Montes, Alfonso Tierno"
+__date__ ="17-jul-2015"
+
+
+#import json
+import threading
+import time
+import Queue
+import requests
+import logging
+import openflow_conn
+
+OFC_STATUS_ACTIVE = 'ACTIVE'
+OFC_STATUS_INACTIVE = 'INACTIVE'
+OFC_STATUS_ERROR = 'ERROR'
+
+class FlowBadFormat(Exception):
+    '''raise when a bad format of flow is found''' 
+
+def change_of2db(flow):
+    '''Change 'flow' dictionary from openflow format to database format
+    Basically the change consist of changing 'flow[actions] from a list of
+    double tuple to a string
+    from [(A,B),(C,D),..] to "A=B,C=D" '''
+    action_str_list=[]
+    if type(flow)!=dict or "actions" not in flow:
+        raise FlowBadFormat("Bad input parameters, expect dictionary with 'actions' as key")
+    try:
+        for action in flow['actions']:
+            action_str_list.append( action[0] + "=" + str(action[1]) )
+        flow['actions'] = ",".join(action_str_list)
+    except:
+        raise FlowBadFormat("Unexpected format at 'actions'")
+
+def change_db2of(flow):
+    '''Change 'flow' dictionary from database format to openflow format
+    Basically the change consist of changing 'flow[actions]' from a string to 
+    a double tuple list
+    from "A=B,C=D,..." to [(A,B),(C,D),..] 
+    raise FlowBadFormat '''
+    actions=[]
+    if type(flow)!=dict or "actions" not in flow or type(flow["actions"])!=str:
+        raise FlowBadFormat("Bad input parameters, expect dictionary with 'actions' as key")
+    action_list = flow['actions'].split(",")
+    for action_item in action_list:
+        action_tuple = action_item.split("=")
+        if len(action_tuple) != 2:
+            raise FlowBadFormat("Expected key=value format at 'actions'")
+        if action_tuple[0].strip().lower()=="vlan":
+            if action_tuple[1].strip().lower() in ("none", "strip"):
+                actions.append( ("vlan",None) )
+            else:
+                try:
+                    actions.append( ("vlan", int(action_tuple[1])) )
+                except:
+                    raise FlowBadFormat("Expected integer after vlan= at 'actions'")
+        elif action_tuple[0].strip().lower()=="out":
+            actions.append( ("out", str(action_tuple[1])) )
+        else:
+            raise FlowBadFormat("Unexpected '%s' at 'actions'"%action_tuple[0])
+    flow['actions'] = actions
+
+
+class openflow_thread(threading.Thread):
+    """
+    This thread interacts with a openflow controller to create dataplane connections
+    """
+    def __init__(self, of_uuid, of_connector, db, db_lock, of_test, pmp_with_same_vlan=False, debug='ERROR'):
+        threading.Thread.__init__(self)
+        self.of_uuid = of_uuid
+        self.db = db
+        self.pmp_with_same_vlan = pmp_with_same_vlan
+        self.name = "openflow"
+        self.test = of_test
+        self.db_lock = db_lock
+        self.OF_connector = of_connector
+        self.logger = logging.getLogger('vim.OF-' + of_uuid)
+        self.logger.setLevel(getattr(logging, debug))
+        self.logger.name = of_connector.name + " " + self.OF_connector.dpid
+        self.queueLock = threading.Lock()
+        self.taskQueue = Queue.Queue(2000)
+        
+    def insert_task(self, task, *aditional):
+        try:
+            self.queueLock.acquire()
+            task = self.taskQueue.put( (task,) + aditional, timeout=5) 
+            self.queueLock.release()
+            return 1, None
+        except Queue.Full:
+            return -1, "timeout inserting a task over openflow thread " + self.name
+
+    def run(self):
+        self.logger.debug("Start openflow thread")
+        self.set_openflow_controller_status(OFC_STATUS_ACTIVE)
+
+        while True:
+            try:
+                self.queueLock.acquire()
+                if not self.taskQueue.empty():
+                    task = self.taskQueue.get()
+                else:
+                    task = None
+                self.queueLock.release()
+
+                if task is None:
+                    time.sleep(1)
+                    continue
+
+                if task[0] == 'update-net':
+                    r,c = self.update_of_flows(task[1])
+                    # update database status
+                    if r<0:
+                        UPDATE={'status':'ERROR', 'last_error': str(c)}
+                        self.logger.error("processing task 'update-net' %s: %s", str(task[1]), c)
+                        self.set_openflow_controller_status(OFC_STATUS_ERROR, "Error updating net {}".format(task[1]))
+                    else:
+                        UPDATE={'status':'ACTIVE', 'last_error': None}
+                        self.logger.debug("processing task 'update-net' %s: OK", str(task[1]))
+                        self.set_openflow_controller_status(OFC_STATUS_ACTIVE)
+                    self.db_lock.acquire()
+                    self.db.update_rows('nets', UPDATE, WHERE={'uuid': task[1]})
+                    self.db_lock.release()
+
+                elif task[0] == 'clear-all':
+                    r,c = self.clear_all_flows()
+                    if r<0:
+                        self.logger.error("processing task 'clear-all': %s", c)
+                        self.set_openflow_controller_status(OFC_STATUS_ERROR, "Error deleting all flows")
+                    else:
+                        self.set_openflow_controller_status(OFC_STATUS_ACTIVE)
+                        self.logger.debug("processing task 'clear-all': OK")
+                elif task[0] == 'exit':
+                    self.logger.debug("exit from openflow_thread")
+                    self.terminate()
+                    self.set_openflow_controller_status(OFC_STATUS_INACTIVE, "Ofc with thread killed")
+                    return 0
+                else:
+                    self.logger.error("unknown task %s", str(task))
+            except openflow_conn.OpenflowconnException as e:
+                self.set_openflow_controller_status(OFC_STATUS_ERROR, str(e))
+
+    def terminate(self):
+        pass
+        # print self.name, ": exit from openflow_thread"
+
+    def update_of_flows(self, net_id):
+        ports=()
+        self.db_lock.acquire()
+        select_= ('type','admin_state_up', 'vlan', 'provider', 'bind_net','bind_type','uuid')
+        result, nets = self.db.get_table(FROM='nets', SELECT=select_, WHERE={'uuid':net_id} )
+        #get all the networks binding to this
+        if result > 0:
+            if nets[0]['bind_net']:
+                bind_id = nets[0]['bind_net']
+            else:
+                bind_id = net_id
+            #get our net and all bind_nets
+            result, nets = self.db.get_table(FROM='nets', SELECT=select_,
+                                                WHERE_OR={'bind_net':bind_id, 'uuid':bind_id} )
+            
+        self.db_lock.release()
+        if result < 0:
+            return -1, "DB error getting net: " + nets
+        #elif result==0:
+            #net has been deleted
+        ifaces_nb = 0
+        database_flows = []
+        for net in nets:
+            net_id = net["uuid"]
+            if net['admin_state_up'] == 'false':
+                net['ports'] = ()
+            else:
+                self.db_lock.acquire()
+                nb_ports, net_ports = self.db.get_table(
+                        FROM='ports',
+                        SELECT=('switch_port','vlan','uuid','mac','type','model'),
+                        WHERE={'net_id':net_id, 'admin_state_up':'true', 'status':'ACTIVE'} )
+                self.db_lock.release()
+                if nb_ports < 0:
+
+                    #print self.name, ": update_of_flows() ERROR getting ports", ports
+                    return -1, "DB error getting ports from net '%s': %s" % (net_id, net_ports)
+                
+                #add the binding as an external port
+                if net['provider'] and net['provider'][:9]=="openflow:":
+                    external_port={"type":"external","mac":None}
+                    external_port['uuid'] = net_id + ".1" #fake uuid
+                    if net['provider'][-5:]==":vlan":
+                        external_port["vlan"] = net["vlan"]
+                        external_port["switch_port"] = net['provider'][9:-5]
+                    else:
+                        external_port["vlan"] = None
+                        external_port["switch_port"] = net['provider'][9:]
+                    net_ports = net_ports + (external_port,)
+                    nb_ports += 1
+                net['ports'] = net_ports
+                ifaces_nb += nb_ports
+        
+            # Get the name of flows that will be affected by this NET 
+            self.db_lock.acquire()
+            result, database_net_flows = self.db.get_table(FROM='of_flows', WHERE={'net_id':net_id})
+            self.db_lock.release()
+            if result < 0:
+                error_msg = "DB error getting flows from net '{}': {}".format(net_id, database_net_flows)
+                # print self.name, ": update_of_flows() ERROR getting flows from database", database_flows
+                return -1, error_msg
+            database_flows += database_net_flows
+        # Get the name of flows where net_id==NULL that means net deleted (At DB foreign key: On delete set null)
+        self.db_lock.acquire()
+        result, database_net_flows = self.db.get_table(FROM='of_flows', WHERE={'net_id':None})
+        self.db_lock.release()
+        if result < 0:
+            error_msg = "DB error getting flows from net 'null': {}".format(database_net_flows)
+            # print self.name, ": update_of_flows() ERROR getting flows from database", database_flows
+            return -1, error_msg
+        database_flows += database_net_flows
+
+        # Get the existing flows at openflow controller
+        try:
+            of_flows = self.OF_connector.get_of_rules()
+            # print self.name, ": update_of_flows() ERROR getting flows from controller", of_flows
+        except openflow_conn.OpenflowconnException as e:
+            # self.set_openflow_controller_status(OFC_STATUS_ERROR, "OF error {} getting flows".format(str(e)))
+            return -1, "OF error {} getting flows".format(str(e))
+
+        if ifaces_nb < 2:
+            pass
+        elif net['type'] == 'ptp':
+            if ifaces_nb > 2:
+                #print self.name, 'Error, network '+str(net_id)+' has been defined as ptp but it has '+\
+                #                 str(ifaces_nb)+' interfaces.'
+                return -1, "'ptp' type network cannot connect %d interfaces, only 2" % ifaces_nb
+        elif net['type'] == 'data':
+            if ifaces_nb > 2 and self.pmp_with_same_vlan:
+                # check all ports are VLAN (tagged) or none
+                vlan_tag = None
+                for port in ports:
+                    if port["type"]=="external":
+                        if port["vlan"] != None:
+                            if port["vlan"]!=net["vlan"]:
+                                text="External port vlan-tag and net vlan-tag must be the same when flag 'of_controller_nets_with_same_vlan' is True"
+                                #print self.name, "Error", text
+                                return -1, text
+                            if vlan_tag == None:
+                                vlan_tag=True
+                            elif vlan_tag==False:
+                                text="Passthrough and external port vlan-tagged cannot be connected when flag 'of_controller_nets_with_same_vlan' is True"
+                                #print self.name, "Error", text
+                                return -1, text
+                        else:
+                            if vlan_tag == None:
+                                vlan_tag=False
+                            elif vlan_tag == True:
+                                text="SR-IOV and external port not vlan-tagged cannot be connected when flag 'of_controller_nets_with_same_vlan' is True"
+                                #print self.name, "Error", text
+                                return -1, text
+                    elif port["model"]=="PF" or port["model"]=="VFnotShared":
+                        if vlan_tag == None:
+                            vlan_tag=False
+                        elif vlan_tag==True:
+                            text="Passthrough and SR-IOV ports cannot be connected when flag 'of_controller_nets_with_same_vlan' is True"
+                            #print self.name, "Error", text
+                            return -1, text
+                    elif port["model"] == "VF":
+                        if vlan_tag == None:
+                            vlan_tag=True
+                        elif vlan_tag==False:
+                            text="Passthrough and SR-IOV ports cannot be connected when flag 'of_controller_nets_with_same_vlan' is True"
+                            #print self.name, "Error", text
+                            return -1, text
+        else:
+            return -1, 'Only ptp and data networks are supported for openflow'
+            
+        # calculate new flows to be inserted
+        result, new_flows = self._compute_net_flows(nets)
+        if result < 0:
+            return result, new_flows
+
+        #modify database flows format and get the used names
+        used_names=[]
+        for flow in database_flows:
+            try:
+                change_db2of(flow)
+            except FlowBadFormat as e:
+                self.logger.error("Exception FlowBadFormat: '%s', flow: '%s'",str(e), str(flow))
+                continue
+            used_names.append(flow['name'])
+        name_index=0
+        # insert at database the new flows, change actions to human text
+        for flow in new_flows:
+            # 1 check if an equal flow is already present
+            index = self._check_flow_already_present(flow, database_flows)
+            if index>=0:
+                database_flows[index]["not delete"]=True
+                self.logger.debug("Skipping already present flow %s", str(flow))
+                continue
+            # 2 look for a non used name
+            flow_name=flow["net_id"]+"."+str(name_index)
+            while flow_name in used_names or flow_name in of_flows:         
+                name_index += 1   
+                flow_name=flow["net_id"]+"."+str(name_index)
+            used_names.append(flow_name)
+            flow['name'] = flow_name
+            # 3 insert at openflow
+
+            try:
+                self.OF_connector.new_flow(flow)
+            except openflow_conn.OpenflowconnException as e:
+                return -1, "Error creating new flow {}".format(str(e))
+
+            # 4 insert at database
+            try:
+                change_of2db(flow)
+            except FlowBadFormat as e:
+                # print self.name, ": Error Exception FlowBadFormat '%s'" % str(e), flow
+                return -1, str(e)
+            self.db_lock.acquire()
+            result, content = self.db.new_row('of_flows', flow)
+            self.db_lock.release()
+            if result < 0:
+                # print self.name, ": Error '%s' at database insertion" % content, flow
+                return -1, content
+
+        #delete not needed old flows from openflow and from DDBB, 
+        #check that the needed flows at DDBB are present in controller or insert them otherwise
+        for flow in database_flows:
+            if "not delete" in flow:
+                if flow["name"] not in of_flows:
+                    # not in controller, insert it
+                    try:
+                        self.OF_connector.new_flow(flow)
+                    except openflow_conn.OpenflowconnException as e:
+                        return -1, "Error creating new flow {}".format(str(e))
+
+                continue
+            # Delete flow
+            if flow["name"] in of_flows:
+                try:
+                    self.OF_connector.del_flow(flow['name'])
+                except openflow_conn.OpenflowconnException as e:
+                    self.logger.error("cannot delete flow '%s' from OF: %s", flow['name'], str(e))
+                    # skip deletion from database
+                    continue
+
+            # delete from database
+            self.db_lock.acquire()
+            result, content = self.db.delete_row_by_key('of_flows', 'id', flow['id'])
+            self.db_lock.release()
+            if result<0:
+                self.logger.error("cannot delete flow '%s' from DB: %s", flow['name'], content )
+        
+        return 0, 'Success'
+
+    def clear_all_flows(self):
+        try:
+            if not self.test:
+                self.OF_connector.clear_all_flows()
+
+            # remove from database
+            self.db_lock.acquire()
+            self.db.delete_row_by_key('of_flows', None, None) #this will delete all lines
+            self.db_lock.release()
+            return 0, None
+        except openflow_conn.OpenflowconnException as e:
+            return -1, self.logger.error("Error deleting all flows {}", str(e))
+
+    flow_fields = ('priority', 'vlan', 'ingress_port', 'actions', 'dst_mac', 'src_mac', 'net_id')
+
+    def _check_flow_already_present(self, new_flow, flow_list):
+        '''check if the same flow is already present in the flow list
+        The flow is repeated if all the fields, apart from name, are equal
+        Return the index of matching flow, -1 if not match'''
+        index=0
+        for flow in flow_list:
+            equal=True
+            for f in self.flow_fields:
+                if flow.get(f) != new_flow.get(f):
+                    equal=False
+                    break
+            if equal:
+                return index
+            index += 1
+        return -1
+        
+    def _compute_net_flows(self, nets):
+        new_flows=[]
+        new_broadcast_flows={}
+        nb_ports = 0
+
+        # Check switch_port information is right
+        self.logger.debug("_compute_net_flows nets: %s", str(nets))
+        for net in nets:
+            for port in net['ports']:
+                nb_ports += 1
+                if not self.test and str(port['switch_port']) not in self.OF_connector.pp2ofi:
+                    error_text= "switch port name '%s' is not valid for the openflow controller" % str(port['switch_port'])
+                    # print self.name, ": ERROR " + error_text
+                    return -1, error_text
+
+        for net_src in nets:
+            net_id = net_src["uuid"]
+            for net_dst in nets:
+                vlan_net_in  = None
+                vlan_net_out = None
+                if net_src == net_dst:
+                    #intra net rules    
+                    priority = 1000
+                elif net_src['bind_net'] == net_dst['uuid']:
+                    if net_src.get('bind_type') and net_src['bind_type'][0:5] == "vlan:":
+                        vlan_net_out = int(net_src['bind_type'][5:])
+                    priority = 1100
+                elif net_dst['bind_net'] == net_src['uuid']:
+                    if net_dst.get('bind_type') and net_dst['bind_type'][0:5] == "vlan:":
+                        vlan_net_in = int(net_dst['bind_type'][5:])
+                    priority = 1100
+                else:
+                    #nets not binding
+                    continue
+                for src_port in net_src['ports']:
+                    vlan_in  = vlan_net_in
+                    if vlan_in == None  and src_port['vlan'] != None:
+                        vlan_in  = src_port['vlan']
+                    elif vlan_in != None  and src_port['vlan'] != None:
+                        #TODO this is something that we cannot do. It requires a double VLAN check
+                        #outer VLAN should be src_port['vlan'] and inner VLAN should be vlan_in
+                        continue
+
+                    # BROADCAST:
+                    broadcast_key = src_port['uuid'] + "." + str(vlan_in)
+                    if broadcast_key in new_broadcast_flows:
+                        flow_broadcast = new_broadcast_flows[broadcast_key]
+                    else:
+                        flow_broadcast = {'priority': priority,
+                            'net_id':  net_id,
+                            'dst_mac': 'ff:ff:ff:ff:ff:ff',
+                            "ingress_port": str(src_port['switch_port']),
+                            'actions': [] 
+                        }
+                        new_broadcast_flows[broadcast_key] = flow_broadcast
+                        if vlan_in is not None:
+                            flow_broadcast['vlan_id'] = str(vlan_in)
+
+                    for dst_port in net_dst['ports']:
+                        vlan_out = vlan_net_out 
+                        if vlan_out == None and dst_port['vlan'] != None:
+                            vlan_out = dst_port['vlan']
+                        elif vlan_out != None and dst_port['vlan'] != None:
+                            #TODO this is something that we cannot do. It requires a double VLAN set
+                            #outer VLAN should be dst_port['vlan'] and inner VLAN should be vlan_out
+                            continue
+                        #if src_port == dst_port:
+                        #    continue
+                        if src_port['switch_port'] == dst_port['switch_port'] and vlan_in == vlan_out:
+                            continue
+                        flow = {
+                            "priority": priority,
+                            'net_id':  net_id,
+                            "ingress_port": str(src_port['switch_port']),
+                            'actions': []
+                        }
+                        if vlan_in is not None:
+                            flow['vlan_id'] = str(vlan_in)
+                        # allow that one port have no mac
+                        if dst_port['mac'] is None or nb_ports==2:  # point to point or nets with 2 elements
+                            flow['priority'] = priority-5  # less priority
+                        else:
+                            flow['dst_mac'] = str(dst_port['mac'])
+            
+                        if vlan_out == None:
+                            if vlan_in != None:
+                                flow['actions'].append( ('vlan',None) )
+                        else:
+                            flow['actions'].append( ('vlan', vlan_out ) )
+                        flow['actions'].append( ('out', str(dst_port['switch_port'])) )
+            
+                        if self._check_flow_already_present(flow, new_flows) >= 0:
+                            self.logger.debug("Skipping repeated flow '%s'", str(flow))
+                            continue
+                        
+                        new_flows.append(flow)
+                    
+                        # BROADCAST:
+                        if nb_ports <= 2:  # point to multipoint or nets with more than 2 elements
+                            continue
+                        out = (vlan_out, str(dst_port['switch_port']))
+                        if out not in flow_broadcast['actions']:
+                            flow_broadcast['actions'].append( out )
+
+        #BROADCAST
+        for flow_broadcast in new_broadcast_flows.values():      
+            if len(flow_broadcast['actions'])==0:
+                continue #nothing to do, skip
+            flow_broadcast['actions'].sort()
+            if 'vlan_id' in flow_broadcast:
+                previous_vlan = 0  # indicates that a packet contains a vlan, and the vlan
+            else:
+                previous_vlan = None
+            final_actions=[]
+            action_number = 0
+            for action in flow_broadcast['actions']:
+                if action[0] != previous_vlan:
+                    final_actions.append( ('vlan', action[0]) )
+                    previous_vlan = action[0]
+                    if self.pmp_with_same_vlan and action_number:
+                        return -1, "Cannot interconnect different vlan tags in a network when flag 'of_controller_nets_with_same_vlan' is True."
+                    action_number += 1
+                final_actions.append( ('out', action[1]) )
+            flow_broadcast['actions'] = final_actions
+
+            if self._check_flow_already_present(flow_broadcast, new_flows) >= 0:
+                self.logger.debug("Skipping repeated flow '%s'", str(flow_broadcast))
+                continue
+            
+            new_flows.append(flow_broadcast)        
+        
+        #UNIFY openflow rules with the same input port and vlan and the same output actions
+        #These flows differ at the dst_mac; and they are unified by not filtering by dst_mac
+        #this can happen if there is only two ports. It is converted to a point to point connection
+        flow_dict={} # use as key vlan_id+ingress_port and as value the list of flows matching these values
+        for flow in new_flows:
+            key = str(flow.get("vlan_id"))+":"+flow["ingress_port"]
+            if key in flow_dict:
+                flow_dict[key].append(flow)
+            else:
+                flow_dict[key]=[ flow ]
+        new_flows2=[]
+        for flow_list in flow_dict.values():
+            convert2ptp=False
+            if len (flow_list)>=2:
+                convert2ptp=True
+                for f in flow_list:
+                    if f['actions'] != flow_list[0]['actions']:
+                        convert2ptp=False
+                        break
+            if convert2ptp: # add only one unified rule without dst_mac
+                self.logger.debug("Convert flow rules to NON mac dst_address " + str(flow_list) )
+                flow_list[0].pop('dst_mac')
+                flow_list[0]["priority"] -= 5
+                new_flows2.append(flow_list[0])
+            else:  # add all the rules
+                new_flows2 += flow_list
+        return 0, new_flows2
+
+    def set_openflow_controller_status(self, status, error_text=None):
+        """
+        Set openflow controller last operation status in DB
+        :param status: ofc status ('ACTIVE','INACTIVE','ERROR')
+        :param error_text: error text
+        :return:
+        """
+        if self.of_uuid == "Default":
+            return True
+
+        ofc = {}
+        ofc['status'] = status
+        ofc['last_error'] = error_text
+        self.db_lock.acquire()
+        result, content = self.db.update_rows('ofcs', ofc, WHERE={'uuid': self.of_uuid}, log=False)
+        self.db_lock.release()
+        if result >= 0:
+            return True
+        else:
+            return False
+
+
+
+
+
+
+
diff --git a/osm_openvim/openvimd.cfg b/osm_openvim/openvimd.cfg
new file mode 100644 (file)
index 0000000..9641938
--- /dev/null
@@ -0,0 +1,140 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+
+
+#Miscellaneous
+#Option to test openvim without the needed infrastructure, possible values are
+#    "normal"      by default, Openflow controller (OFC), switch and real host are needed
+#    "test"        Used for testing http API and database without connecting to host or to OFC
+#    "host only"   Used when neither OFC nor OF switch are provided. 
+#                  Dataplane network connection must be done manually.
+#    "OF only"     Used for testing of new openflow controllers support. No real VM deployments will be done but
+#                  OFC will be used as in real mode
+#    "development" Forced a cloud-type deployment, nomal memory instead of hugepages is used, 
+#                  without cpu pinning, and using a bridge network instead of a real OFC dataplane networks.
+#                  The same 'development_bridge' (see below) is used for all dataplane networks
+mode: test
+
+#Openflow controller information
+of_controller:      floodlight                   # Type of controller to be used.
+                                                 # Valid controllers are 'opendaylight', 'floodlight' or <custom>
+#of_controller_module: module                    # Only needed for <custom>.  Python module that implement
+                                                 # this controller. By default a file with the name  <custom>.py is used 
+#of_<other>:           value                     # Other parameters required by <custom> controller. Consumed by __init__
+of_user:            user credentials             # User credentials for the controller if needed
+of_password:        passwd credentials           # Password credentials for the controller if needed
+of_controller_ip:   127.0.0.1                    # IP address where the Openflow controller is listening
+of_controller_port: 7070                         # TCP port where the Openflow controller is listening (REST API server)
+of_controller_dpid: '00:01:02:03:04:05:06:07'    # Openflow Switch identifier (put here the right number)
+
+#This option is used for those openflow switch that cannot deliver one packet to several output with different vlan tags
+#When set to true, it fails when trying to attach different vlan tagged ports to the same net
+of_controller_nets_with_same_vlan: false         # (by default, true)
+
+#Server parameters
+http_host:       0.0.0.0             # IP address where openvim is listening (by default, localhost)
+http_port:       9080                # General port where openvim is listening (by default, 9080)
+http_admin_port: 9085                # Admin port where openvim is listening (when missing, no administration server is launched)
+
+#database parameters
+db_host:   localhost                   # by default localhost
+db_user:   vim                       # DB user
+db_passwd: vimpw                     # DB password
+db_name:   vim_db                    # Name of the VIM DB
+
+#host paremeters
+image_path: "/opt/VNF/images"        # Folder, same for every host, where the VNF images will be copied
+
+#testing parameters (used by ./test/test_openvim.py)
+tenant_id: fc7b43b6-6bfa-11e4-84d2-5254006d6777   # Default tenant identifier for testing
+
+#VLAN ranges used for the dataplane networks (ptp, data)
+#When a network is created an unused value in this range is used
+network_vlan_range_start: 3000
+network_vlan_range_end:   4000
+
+# Overlay network implementation. Options are:
+# - ovs :   (by default) Use a vlxan mesh between computes to handle the network overlay.
+# - bridge: Use pre-populated linux bridges with L2 conectivity at compte nodes.
+network_type : ovs
+ovs_controller_ip   :   localhost                   # dhcp controller IP address, must be change in order to
+ovs_controller_user :   "osm_dhcp"                  # User for the dchp controller for OVS networks
+ovs_controller_file_path  :   "/var/lib/openvim"    # Path for dhcp daemon configuration, by default '/var/lib/openvim'
+
+
+#host bridge interfaces for networks
+# Apply only for 'network_type: bridge'
+# Indicates the bridges at compute nodes to be used for the overlay networks
+# Bridge networks need to be pre-provisioned on each host and Openvim uses those pre-provisioned bridge networks.
+# Openvim assumes that the following bridge interfaces have been created on each host, appropriately associated to a physical port.
+# The following information needs to be provided:
+#    - Name of the bridge (identical in all hosts)
+#    - VLAN tag associated to each bridge interface
+#    - The speed of the physical port in Gbps, where that bridge interface was created
+# For instance, next example assumes that 10 bridges have been created on each host
+# using vlans 2001 to 2010, associated to a 1Gbps physical port 
+#bridge_ifaces:
+#   #name:      [vlan, speed in Gbps]
+#   virbrMan1:  [2001, 1]
+#   virbrMan2:  [2002, 1]
+#   virbrMan3:  [2003, 1]
+#   virbrMan4:  [2004, 1]
+#   virbrMan5:  [2005, 1]
+#   virbrMan6:  [2006, 1]
+#   virbrMan7:  [2007, 1]
+#   virbrMan8:  [2008, 1]
+#   virbrMan9:  [2009, 1]
+#   virbrMan10: [2010, 1]
+
+#Used only when 'mode' is at development'. Indicates which 'bridge_ifaces' is used for dataplane networks
+#development_bridge: virbrMan10
+
+#DHCP SERVER PARAMETERS. 
+#In case some of the previous 'bridge_ifaces' are connected to an EXTERNAL dhcp server, provide 
+#   the server parameters to allow openvim getting the allocated IP addresses of virtual machines
+#   connected to the indicated 'bridge_ifaces' and or 'nets'. Openvim will connect to the dhcp server by ssh.
+#DHCP server must contain a shell script "get_dhcp_lease.sh" included in the path, that accepts a mac address as 
+#   parameter and return empty or the allocated IP address. See an example at the end of the file 
+#   ./openvim/dhcp_thread.py 
+#COMMENT all lines in case you do not have a DHCP server in 'normal', 'development'  or 'host only' modes.
+#   For 'test' or 'OF only' modes you can leave then uncommented, because in these modes fake IP 
+#   address are generated instead of connecting with a real DHCP server.
+dhcp_server:
+   host:     host-ip-or-name  
+   #port:     22               #ssh port, by default 22
+   provider: isc-dhcp-server  #dhcp-server type
+   user:     user
+   #provide password, or key if needed
+   password: passwd           
+   #key:     ssh-access-key
+   #list of the previous bridge interfaces attached to this dhcp server
+   bridge_ifaces:   [ virbrMan1, virbrMan2 ] 
+   #list of the networks attached to this dhcp server
+   nets: [default]
+
+
+#logging parameters       # DEBUG, INFO, WARNING, ERROR, CRITICAL
+log_level:       ERROR
+log_level_db:    DEBUG
+log_level_of:    DEBUG
+
+
diff --git a/osm_openvim/ovim.py b/osm_openvim/ovim.py
new file mode 100755 (executable)
index 0000000..39ab578
--- /dev/null
@@ -0,0 +1,1384 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+This is the thread for the http server North API. 
+Two thread will be launched, with normal and administrative permissions.
+'''
+
+__author__ = "Alfonso Tierno, Leonardo Mirabal"
+__date__ = "$06-Feb-2017 12:07:15$"
+__version__ = "0.5.10-r526"
+version_date = "Apr 2017"
+database_version = "0.17"      #expected database schema version
+
+import threading
+import osm_openvim.vim_db as vim_db
+import logging
+import imp
+import argparse
+from netaddr import IPNetwork
+from jsonschema import validate as js_v, exceptions as js_e
+import osm_openvim.host_thread as ht
+import osm_openvim.dhcp_thread as dt
+import osm_openvim.openflow_thread as oft
+import osm_openvim.openflow_conn as openflow_conn
+
+
+HTTP_Bad_Request =          400
+HTTP_Unauthorized =         401
+HTTP_Not_Found =            404
+HTTP_Forbidden =            403
+HTTP_Method_Not_Allowed =   405
+HTTP_Not_Acceptable =       406
+HTTP_Request_Timeout =      408
+HTTP_Conflict =             409
+HTTP_Service_Unavailable =  503
+HTTP_Internal_Server_Error= 500
+
+
+def convert_boolean(data, items):
+    '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean
+    It assumes that bandwidth is well formed
+    Attributes:
+        'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
+        'items': tuple of keys to convert
+    Return:
+        None
+    '''
+    if type(data) is dict:
+        for k in data.keys():
+            if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
+                convert_boolean(data[k], items)
+            if k in items:
+                if type(data[k]) is str:
+                    if data[k] == "false":
+                        data[k] = False
+                    elif data[k] == "true":
+                        data[k] = True
+    if type(data) is tuple or type(data) is list:
+        for k in data:
+            if type(k) is dict or type(k) is tuple or type(k) is list:
+                convert_boolean(k, items)
+
+
+
+class ovimException(Exception):
+    def __init__(self, message, http_code=HTTP_Bad_Request):
+        self.http_code = http_code
+        Exception.__init__(self, message)
+
+
+class ovim():
+    running_info = {} #TODO OVIM move the info of running threads from config_dic to this static variable
+    of_module = {}
+
+    def __init__(self, configuration):
+        self.config = configuration
+        self.logger_name = configuration.get("logger_name", "openvim")
+        self.logger = logging.getLogger(self.logger_name)
+        self.db = None
+        self.db = self._create_database_connection()
+        self.db_lock = None
+        self.db_of = None
+        self.of_test_mode = False
+
+    def _create_database_connection(self):
+        db = vim_db.vim_db((self.config["network_vlan_range_start"], self.config["network_vlan_range_end"]),
+                           self.logger_name + ".db", self.config.get('log_level_db'))
+        if db.connect(self.config['db_host'], self.config['db_user'], self.config['db_passwd'],
+                      self.config['db_name']) == -1:
+            # self.logger.error("Cannot connect to database %s at %s@%s", self.config['db_name'], self.config['db_user'],
+            #              self.config['db_host'])
+            raise ovimException("Cannot connect to database {} at {}@{}".format(self.config['db_name'],
+                                                                                self.config['db_user'],
+                                                                                self.config['db_host']) )
+        return db
+
+    @staticmethod
+    def get_version():
+        return __version__
+
+    @staticmethod
+    def get_version_date():
+        return version_date
+
+    @staticmethod
+    def get_database_version():
+        return database_version
+
+    @staticmethod
+    def _check_dhcp_data_integrity(network):
+        """
+        Check if all dhcp parameter for anet are valid, if not will be calculated from cidr value
+        :param network: list with user nets paramters
+        :return:
+        """
+        if "cidr" in network:
+            cidr = network["cidr"]
+            ip_tools = IPNetwork(cidr)
+            cidr_len = ip_tools.prefixlen
+            if cidr_len > 29:
+                return False
+
+            ips = IPNetwork(cidr)
+            if "dhcp_first_ip" not in network:
+                network["dhcp_first_ip"] = str(ips[2])
+            if "dhcp_last_ip" not in network:
+                network["dhcp_last_ip"] = str(ips[-2])
+            if "gateway_ip" not in network:
+                network["gateway_ip"] = str(ips[1])
+
+            return True
+        else:
+            return False
+
+    @staticmethod
+    def _check_valid_uuid(uuid):
+        id_schema = {"type": "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+        try:
+            js_v(uuid, id_schema)
+            return True
+        except js_e.ValidationError:
+            return False
+
+    def start_service(self):
+        """
+        Start ovim services
+        :return:
+        """
+        global database_version
+        # if self.running_info:
+        #    return  #TODO service can be checked and rebuild broken threads
+        r = self.db.get_db_version()
+        if r[0] < 0:
+            raise ovimException("DATABASE is not a VIM one or it is a '0.0' version. Try to upgrade to version '{}' with "\
+                                "'./database_utils/migrate_vim_db.sh'".format(database_version) )
+        elif r[1] != database_version:
+            raise ovimException("DATABASE wrong version '{}'. Try to upgrade/downgrade to version '{}' with "\
+                                "'./database_utils/migrate_vim_db.sh'".format(r[1], database_version) )
+        self.logger.critical("Starting ovim server version: '{} {}' database version '{}'".format(
+            self.get_version(), self.get_version_date(), self.get_database_version()))
+        # create database connection for openflow threads
+        self.db_of = self._create_database_connection()
+        self.config["db"] = self.db_of
+        self.db_lock = threading.Lock()
+        self.config["db_lock"] = self.db_lock
+
+        self.of_test_mode = False if self.config['mode'] == 'normal' or self.config['mode'] == "OF only" else True
+        # precreate interfaces; [bridge:<host_bridge_name>, VLAN used at Host, uuid of network camping in this bridge,
+        # speed in Gbit/s
+
+        self.config['dhcp_nets'] = []
+        self.config['bridge_nets'] = []
+        for bridge, vlan_speed in self.config["bridge_ifaces"].items():
+            # skip 'development_bridge'
+            if self.config['mode'] == 'development' and self.config['development_bridge'] == bridge:
+                continue
+            self.config['bridge_nets'].append([bridge, vlan_speed[0], vlan_speed[1], None])
+
+        # check if this bridge is already used (present at database) for a network)
+        used_bridge_nets = []
+        for brnet in self.config['bridge_nets']:
+            r, nets = self.db.get_table(SELECT=('uuid',), FROM='nets', WHERE={'provider': "bridge:" + brnet[0]})
+            if r > 0:
+                brnet[3] = nets[0]['uuid']
+                used_bridge_nets.append(brnet[0])
+                if self.config.get("dhcp_server"):
+                    if brnet[0] in self.config["dhcp_server"]["bridge_ifaces"]:
+                        self.config['dhcp_nets'].append(nets[0]['uuid'])
+        if len(used_bridge_nets) > 0:
+            self.logger.info("found used bridge nets: " + ",".join(used_bridge_nets))
+        # get nets used by dhcp
+        if self.config.get("dhcp_server"):
+            for net in self.config["dhcp_server"].get("nets", ()):
+                r, nets = self.db.get_table(SELECT=('uuid',), FROM='nets', WHERE={'name': net})
+                if r > 0:
+                    self.config['dhcp_nets'].append(nets[0]['uuid'])
+
+        # OFC default
+        self._start_ofc_default_task()
+
+        # OFC per tenant in DB
+        self._start_of_db_tasks()
+
+        # create dhcp_server thread
+        host_test_mode = True if self.config['mode'] == 'test' or self.config['mode'] == "OF only" else False
+        dhcp_params = self.config.get("dhcp_server")
+        if dhcp_params:
+            thread = dt.dhcp_thread(dhcp_params=dhcp_params, test=host_test_mode, dhcp_nets=self.config["dhcp_nets"],
+                                    db=self.db_of, db_lock=self.db_lock, logger_name=self.logger_name + ".dhcp",
+                                    debug=self.config.get('log_level_of'))
+            thread.start()
+            self.config['dhcp_thread'] = thread
+
+        # Create one thread for each host
+        host_test_mode = True if self.config['mode'] == 'test' or self.config['mode'] == "OF only" else False
+        host_develop_mode = True if self.config['mode'] == 'development' else False
+        host_develop_bridge_iface = self.config.get('development_bridge', None)
+
+        # get host list from data base before starting threads
+        r, hosts = self.db.get_table(SELECT=('name', 'ip_name', 'user', 'uuid'), FROM='hosts', WHERE={'status': 'ok'})
+        if r < 0:
+            raise ovimException("Cannot get hosts from database {}".format(hosts))
+
+        self.config['host_threads'] = {}
+        for host in hosts:
+            host['image_path'] = '/opt/VNF/images/openvim'
+            thread = ht.host_thread(name=host['name'], user=host['user'], host=host['ip_name'], db=self.db_of,
+                                    db_lock=self.db_lock, test=host_test_mode, image_path=self.config['image_path'],
+                                    version=self.config['version'], host_id=host['uuid'], develop_mode=host_develop_mode,
+                                    develop_bridge_iface=host_develop_bridge_iface)
+            thread.start()
+            self.config['host_threads'][host['uuid']] = thread
+
+        # create ovs dhcp thread
+        result, content = self.db.get_table(FROM='nets')
+        if result < 0:
+            self.logger.error("http_get_ports Error %d %s", result, content)
+            raise ovimException(str(content), -result)
+
+        for net in content:
+            net_type = net['type']
+            if (net_type == 'bridge_data' or net_type == 'bridge_man') \
+                    and net["provider"][:4] == 'OVS:' and net["enable_dhcp"] == "true":
+                    self.launch_dhcp_server(net['vlan'],
+                                            net['dhcp_first_ip'],
+                                            net['dhcp_last_ip'],
+                                            net['cidr'],
+                                            net['gateway_ip'])
+
+    def _start_of_db_tasks(self):
+        """
+        Start ofc task for existing ofcs in database
+        :param db_of:
+        :param db_lock:
+        :return:
+        """
+        ofcs = self.get_of_controllers()
+
+        for ofc in ofcs:
+            of_conn = self._load_of_module(ofc)
+            # create ofc thread per of controller
+            self._create_ofc_task(ofc['uuid'], ofc['dpid'], of_conn)
+
+    def _create_ofc_task(self, ofc_uuid, dpid, of_conn):
+        """
+        Create an ofc thread for handle each sdn controllers
+        :param ofc_uuid: sdn controller uuid
+        :param dpid:  sdn controller dpid
+        :param of_conn: OF_conn module
+        :return:
+        """
+        if 'ofcs_thread' not in self.config and 'ofcs_thread_dpid' not in self.config:
+            ofcs_threads = {}
+            ofcs_thread_dpid = []
+        else:
+            ofcs_threads = self.config['ofcs_thread']
+            ofcs_thread_dpid = self.config['ofcs_thread_dpid']
+
+        if ofc_uuid not in ofcs_threads:
+            ofc_thread = self._create_ofc_thread(of_conn, ofc_uuid)
+            if ofc_uuid == "Default":
+                self.config['of_thread'] = ofc_thread
+
+            ofcs_threads[ofc_uuid] = ofc_thread
+            self.config['ofcs_thread'] = ofcs_threads
+
+            ofcs_thread_dpid.append({dpid: ofc_thread})
+            self.config['ofcs_thread_dpid'] = ofcs_thread_dpid
+
+    def _start_ofc_default_task(self):
+        """
+        Create default ofc thread
+        """
+        if 'of_controller' not in self.config \
+                and 'of_controller_ip' not in self.config \
+                and 'of_controller_port' not in self.config \
+                and 'of_controller_dpid' not in self.config:
+            return
+
+        # OF THREAD
+        db_config = {}
+        db_config['ip'] = self.config.get('of_controller_ip')
+        db_config['port'] = self.config.get('of_controller_port')
+        db_config['dpid'] = self.config.get('of_controller_dpid')
+        db_config['type'] = self.config.get('of_controller')
+        db_config['user'] = self.config.get('of_user')
+        db_config['password'] = self.config.get('of_password')
+
+        # create connector to the openflow controller
+        # load other parameters starting by of_ from config dict in a temporal dict
+
+        of_conn = self._load_of_module(db_config)
+        # create openflow thread
+        self._create_ofc_task("Default", db_config['dpid'], of_conn)
+
+    def _load_of_module(self, db_config):
+        """
+        import python module for each SDN controller supported
+        :param db_config: SDN dn information
+        :return: Module
+        """
+        if not db_config:
+            raise ovimException("No module found it", HTTP_Internal_Server_Error)
+
+        module_info = None
+
+        try:
+            if self.of_test_mode:
+                return openflow_conn.OfTestConnector({"name": db_config['type'],
+                                                      "dpid": db_config['dpid'],
+                                                      "of_debug": self.config['log_level_of']})
+            temp_dict = {}
+
+            if db_config:
+                temp_dict['of_ip'] = db_config['ip']
+                temp_dict['of_port'] = db_config['port']
+                temp_dict['of_dpid'] = db_config['dpid']
+                temp_dict['of_controller'] = db_config['type']
+                temp_dict['of_user'] = db_config.get('user')
+                temp_dict['of_password'] = db_config.get('password')
+
+            temp_dict['of_debug'] = self.config['log_level_of']
+
+            if temp_dict['of_controller'] == 'opendaylight':
+                module = "ODL"
+            else:
+                module = temp_dict['of_controller']
+
+            if module not in ovim.of_module:
+                module_info = imp.find_module(module)
+                of_conn_module = imp.load_module("OF_conn", *module_info)
+                ovim.of_module[module] = of_conn_module
+            else:
+                of_conn_module = ovim.of_module[module]
+
+            try:
+                return of_conn_module.OF_conn(temp_dict)
+            except Exception as e:
+                self.logger.error("Cannot open the Openflow controller '%s': %s", type(e).__name__, str(e))
+                if module_info and module_info[0]:
+                    file.close(module_info[0])
+                raise ovimException("Cannot open the Openflow controller '{}': '{}'".format(type(e).__name__, str(e)),
+                                    HTTP_Internal_Server_Error)
+        except (IOError, ImportError) as e:
+            if module_info and module_info[0]:
+                file.close(module_info[0])
+            self.logger.error("Cannot open openflow controller module '%s'; %s: %s; revise 'of_controller' "
+                              "field of configuration file.", module, type(e).__name__, str(e))
+            raise ovimException("Cannot open openflow controller module '{}'; {}: {}; revise 'of_controller' "
+                                "field of configuration file.".format(module, type(e).__name__, str(e)),
+                                HTTP_Internal_Server_Error)
+
+    def _create_ofc_thread(self, of_conn, ofc_uuid="Default"):
+        """
+        Create and launch a of thread
+        :return: thread obj
+        """
+        # create openflow thread
+
+        #if 'of_controller_nets_with_same_vlan' in self.config:
+        #    ofc_net_same_vlan = self.config['of_controller_nets_with_same_vlan']
+        #else:
+        #    ofc_net_same_vlan = False
+        ofc_net_same_vlan = False
+
+        thread = oft.openflow_thread(ofc_uuid, of_conn, of_test=self.of_test_mode, db=self.db_of, db_lock=self.db_lock,
+                                     pmp_with_same_vlan=ofc_net_same_vlan, debug=self.config['log_level_of'])
+        #r, c = thread.OF_connector.obtain_port_correspondence()
+        #if r < 0:
+        #    raise ovimException("Cannot get openflow information %s", c)
+        thread.start()
+        return thread
+
+    def stop_service(self):
+        threads = self.config.get('host_threads', {})
+        if 'of_thread' in self.config:
+            threads['of'] = (self.config['of_thread'])
+        if 'ofcs_thread' in self.config:
+            ofcs_thread = self.config['ofcs_thread']
+            for ofc in ofcs_thread:
+                threads[ofc] = ofcs_thread[ofc]
+
+        if 'dhcp_thread' in self.config:
+            threads['dhcp'] = (self.config['dhcp_thread'])
+
+        for thread in threads.values():
+            thread.insert_task("exit")
+        for thread in threads.values():
+            thread.join()
+
+    def get_networks(self, columns=None, db_filter={}, limit=None):
+        """
+        Retreive networks available
+        :param columns: List with select query parameters
+        :param db_filter: List with where query parameters
+        :param limit: Query limit result
+        :return:
+        """
+        result, content = self.db.get_table(SELECT=columns, FROM='nets', WHERE=db_filter, LIMIT=limit)
+
+        if result < 0:
+            raise ovimException(str(content), -result)
+
+        convert_boolean(content, ('shared', 'admin_state_up', 'enable_dhcp'))
+
+        return content
+
+    def show_network(self, network_id, db_filter={}):
+        """
+        Get network from DB by id
+        :param network_id: net Id
+        :param db_filter: List with where query parameters
+        :return:
+        """
+        # obtain data
+        if not network_id:
+            raise ovimException("Not network id was not found")
+        db_filter['uuid'] = network_id
+
+        result, content = self.db.get_table(FROM='nets', WHERE=db_filter, LIMIT=100)
+
+        if result < 0:
+            raise ovimException(str(content), -result)
+        elif result == 0:
+            raise ovimException("show_network network '%s' not found" % network_id, -result)
+        else:
+            convert_boolean(content, ('shared', 'admin_state_up', 'enable_dhcp'))
+            # get ports from DB
+            result, ports = self.db.get_table(FROM='ports', SELECT=('uuid as port_id',),
+                                              WHERE={'net_id': network_id}, LIMIT=100)
+            if len(ports) > 0:
+                content[0]['ports'] = ports
+
+            convert_boolean(content, ('shared', 'admin_state_up', 'enable_dhcp'))
+            return content[0]
+
+    def new_network(self, network):
+        """
+        Create a net in DB
+        :return:
+        """
+        tenant_id = network.get('tenant_id')
+
+        if tenant_id:
+            result, _ = self.db.get_table(FROM='tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id, "enabled": True})
+            if result <= 0:
+                raise ovimException("set_network error, no tenant founded", -result)
+
+        bridge_net = None
+        # check valid params
+        net_provider = network.get('provider')
+        net_type = network.get('type')
+        net_vlan = network.get("vlan")
+        net_bind_net = network.get("bind_net")
+        net_bind_type = network.get("bind_type")
+        name = network["name"]
+
+        # check if network name ends with :<vlan_tag> and network exist in order to make and automated bindning
+        vlan_index = name.rfind(":")
+        if not net_bind_net and not net_bind_type and vlan_index > 1:
+            try:
+                vlan_tag = int(name[vlan_index + 1:])
+                if not vlan_tag and vlan_tag < 4096:
+                    net_bind_net = name[:vlan_index]
+                    net_bind_type = "vlan:" + name[vlan_index + 1:]
+            except:
+                pass
+
+        if net_bind_net:
+            # look for a valid net
+            if self._check_valid_uuid(net_bind_net):
+                net_bind_key = "uuid"
+            else:
+                net_bind_key = "name"
+            result, content = self.db.get_table(FROM='nets', WHERE={net_bind_key: net_bind_net})
+            if result < 0:
+                raise ovimException(' getting nets from db ' + content, HTTP_Internal_Server_Error)
+            elif result == 0:
+                raise ovimException(" bind_net %s '%s'not found" % (net_bind_key, net_bind_net), HTTP_Bad_Request)
+            elif result > 1:
+                raise ovimException(" more than one bind_net %s '%s' found, use uuid" % (net_bind_key, net_bind_net), HTTP_Bad_Request)
+            network["bind_net"] = content[0]["uuid"]
+
+        if net_bind_type:
+            if net_bind_type[0:5] != "vlan:":
+                raise ovimException("bad format for 'bind_type', must be 'vlan:<tag>'", HTTP_Bad_Request)
+            if int(net_bind_type[5:]) > 4095 or int(net_bind_type[5:]) <= 0:
+                raise ovimException("bad format for 'bind_type', must be 'vlan:<tag>' with a tag between 1 and 4095",
+                                    HTTP_Bad_Request)
+            network["bind_type"] = net_bind_type
+
+        if net_provider:
+            if net_provider[:9] == "openflow:":
+                if net_type:
+                    if net_type != "ptp" and net_type != "data":
+                        raise ovimException(" only 'ptp' or 'data' net types can be bound to 'openflow'",
+                                            HTTP_Bad_Request)
+                else:
+                    net_type = 'data'
+            else:
+                if net_type:
+                    if net_type != "bridge_man" and net_type != "bridge_data":
+                        raise ovimException("Only 'bridge_man' or 'bridge_data' net types can be bound "
+                                            "to 'bridge', 'macvtap' or 'default", HTTP_Bad_Request)
+                else:
+                    net_type = 'bridge_man'
+
+        if not net_type:
+            net_type = 'bridge_man'
+
+        if net_provider:
+            if net_provider[:7] == 'bridge:':
+                # check it is one of the pre-provisioned bridges
+                bridge_net_name = net_provider[7:]
+                for brnet in self.config['bridge_nets']:
+                    if brnet[0] == bridge_net_name:  # free
+                        if not brnet[3]:
+                            raise ovimException("invalid 'provider:physical', "
+                                                "bridge '%s' is already used" % bridge_net_name, HTTP_Conflict)
+                        bridge_net = brnet
+                        net_vlan = brnet[1]
+                        break
+                        # if bridge_net==None:
+                        #    bottle.abort(HTTP_Bad_Request, "invalid 'provider:physical', bridge '%s' is not one of the
+                        #                    provisioned 'bridge_ifaces' in the configuration file" % bridge_net_name)
+                        #    return
+
+        elif self.config['network_type'] == 'bridge' and (net_type == 'bridge_data' or net_type == 'bridge_man'):
+            # look for a free precreated nets
+            for brnet in self.config['bridge_nets']:
+                if not brnet[3]:  # free
+                    if not bridge_net:
+                        if net_type == 'bridge_man':  # look for the smaller speed
+                            if brnet[2] < bridge_net[2]:
+                                bridge_net = brnet
+                        else:  # look for the larger speed
+                            if brnet[2] > bridge_net[2]:
+                                bridge_net = brnet
+                    else:
+                        bridge_net = brnet
+                        net_vlan = brnet[1]
+            if not bridge_net:
+                raise ovimException("Max limits of bridge networks reached. Future versions of VIM "
+                                    "will overcome this limit", HTTP_Bad_Request)
+            else:
+                self.logger.debug("using net " + bridge_net)
+                net_provider = "bridge:" + bridge_net[0]
+                net_vlan = bridge_net[1]
+        elif net_type == 'bridge_data' or net_type == 'bridge_man' and self.config['network_type'] == 'ovs':
+            net_provider = 'OVS'
+        if not net_vlan and (net_type == "data" or net_type == "ptp" or net_provider == "OVS"):
+            net_vlan = self.db.get_free_net_vlan()
+            if net_vlan < 0:
+                raise ovimException("Error getting an available vlan", HTTP_Internal_Server_Error)
+        if net_provider == 'OVS':
+            net_provider = 'OVS' + ":" + str(net_vlan)
+
+        network['provider'] = net_provider
+        network['type'] = net_type
+        network['vlan'] = net_vlan
+        dhcp_integrity = True
+        if 'enable_dhcp' in network and network['enable_dhcp']:
+            dhcp_integrity = self._check_dhcp_data_integrity(network)
+
+        result, content = self.db.new_row('nets', network, True, True)
+
+        if result >= 0 and dhcp_integrity:
+            if bridge_net:
+                bridge_net[3] = content
+            if self.config.get("dhcp_server") and self.config['network_type'] == 'bridge':
+                if network["name"] in self.config["dhcp_server"].get("nets", ()):
+                    self.config["dhcp_nets"].append(content)
+                    self.logger.debug("dhcp_server: add new net", content)
+                elif not bridge_net and bridge_net[0] in self.config["dhcp_server"].get("bridge_ifaces", ()):
+                    self.config["dhcp_nets"].append(content)
+                    self.logger.debug("dhcp_server: add new net", content, content)
+            return content
+        else:
+            raise ovimException("Error posting network", HTTP_Internal_Server_Error)
+# TODO kei change update->edit
+
+    def edit_network(self, network_id, network):
+        """
+        Update entwork data byt id
+        :return:
+        """
+        # Look for the previous data
+        where_ = {'uuid': network_id}
+        result, network_old = self.db.get_table(FROM='nets', WHERE=where_)
+        if result < 0:
+            raise ovimException("Error updating network %s" % network_old, HTTP_Internal_Server_Error)
+        elif result == 0:
+            raise ovimException('network %s not found' % network_id, HTTP_Not_Found)
+        # get ports
+        nbports, content = self.db.get_table(FROM='ports', SELECT=('uuid as port_id',),
+                                             WHERE={'net_id': network_id}, LIMIT=100)
+        if result < 0:
+            raise ovimException("http_put_network_id error %d %s" % (result, network_old), HTTP_Internal_Server_Error)
+        if nbports > 0:
+            if 'type' in network and network['type'] != network_old[0]['type']:
+                raise ovimException("Can not change type of network while having ports attached",
+                                    HTTP_Method_Not_Allowed)
+            if 'vlan' in network and network['vlan'] != network_old[0]['vlan']:
+                raise ovimException("Can not change vlan of network while having ports attached",
+                                    HTTP_Method_Not_Allowed)
+
+        # check valid params
+        net_provider = network.get('provider', network_old[0]['provider'])
+        net_type = network.get('type', network_old[0]['type'])
+        net_bind_net = network.get("bind_net")
+        net_bind_type = network.get("bind_type")
+        if net_bind_net:
+            # look for a valid net
+            if self._check_valid_uuid(net_bind_net):
+                net_bind_key = "uuid"
+            else:
+                net_bind_key = "name"
+            result, content = self.db.get_table(FROM='nets', WHERE={net_bind_key: net_bind_net})
+            if result < 0:
+                raise ovimException('Getting nets from db ' + content, HTTP_Internal_Server_Error)
+            elif result == 0:
+                raise ovimException("bind_net %s '%s'not found" % (net_bind_key, net_bind_net), HTTP_Bad_Request)
+            elif result > 1:
+                raise ovimException("More than one bind_net %s '%s' found, use uuid" % (net_bind_key, net_bind_net),
+                                    HTTP_Bad_Request)
+            network["bind_net"] = content[0]["uuid"]
+        if net_bind_type:
+            if net_bind_type[0:5] != "vlan:":
+                raise ovimException("Bad format for 'bind_type', must be 'vlan:<tag>'", HTTP_Bad_Request)
+            if int(net_bind_type[5:]) > 4095 or int(net_bind_type[5:]) <= 0:
+                raise ovimException("bad format for 'bind_type', must be 'vlan:<tag>' with a tag between 1 and 4095",
+                                    HTTP_Bad_Request)
+        if net_provider:
+            if net_provider[:9] == "openflow:":
+                if net_type != "ptp" and net_type != "data":
+                    raise ovimException("Only 'ptp' or 'data' net types can be bound to 'openflow'", HTTP_Bad_Request)
+            else:
+                if net_type != "bridge_man" and net_type != "bridge_data":
+                    raise ovimException("Only 'bridge_man' or 'bridge_data' net types can be bound to "
+                                        "'bridge', 'macvtap' or 'default", HTTP_Bad_Request)
+
+        # insert in data base
+        result, content = self.db.update_rows('nets', network, WHERE={'uuid': network_id}, log=True)
+        if result >= 0:
+            # if result > 0 and nbports>0 and 'admin_state_up' in network
+            #     and network['admin_state_up'] != network_old[0]['admin_state_up']:
+            if result > 0:
+
+                try:
+                    if nbports:
+                        self.net_update_ofc_thread(network_id)
+                except ovimException as e:
+                    raise ovimException("Error while launching openflow rules in network '{}' {}"
+                                        .format(network_id, str(e)), HTTP_Internal_Server_Error)
+                except Exception as e:
+                    raise ovimException("Error while launching openflow rules in network '{}' {}"
+                                        .format(network_id, str(e)), HTTP_Internal_Server_Error)
+
+                if self.config.get("dhcp_server"):
+                    if network_id in self.config["dhcp_nets"]:
+                        self.config["dhcp_nets"].remove(network_id)
+                    if network.get("name", network_old[0]["name"]) in self.config["dhcp_server"].get("nets", ()):
+                        self.config["dhcp_nets"].append(network_id)
+                    else:
+                        net_bind = network.get("bind_type", network_old[0]["bind_type"])
+                        if net_bind and net_bind and net_bind[:7] == "bridge:" and net_bind[7:] in self.config["dhcp_server"].get(
+                                "bridge_ifaces", ()):
+                            self.config["dhcp_nets"].append(network_id)
+            return network_id
+        else:
+            raise ovimException(content, -result)
+
+    def delete_network(self, network_id):
+        """
+        Delete network by network id
+        :param network_id:  network id
+        :return:
+        """
+
+        # delete from the data base
+        result, content = self.db.delete_row('nets', network_id)
+
+        if result == 0:
+            raise ovimException("Network %s not found " % network_id, HTTP_Not_Found)
+        elif result > 0:
+            for brnet in self.config['bridge_nets']:
+                if brnet[3] == network_id:
+                    brnet[3] = None
+                    break
+            if self.config.get("dhcp_server") and network_id in self.config["dhcp_nets"]:
+                self.config["dhcp_nets"].remove(network_id)
+            return content
+        else:
+            raise ovimException("Error deleting  network %s" % network_id, HTTP_Internal_Server_Error)
+
+    def get_openflow_rules(self, network_id=None):
+        """
+        Get openflow id from DB
+        :param network_id: Network id, if none all networks will be retrieved
+        :return: Return a list with Openflow rules per net
+        """
+        # ignore input data
+        if not network_id:
+            where_ = {}
+        else:
+            where_ = {"net_id": network_id}
+        result, content = self.db.get_table(
+            SELECT=("name", "net_id", "ofc_id", "priority", "vlan_id", "ingress_port", "src_mac", "dst_mac", "actions"),
+            WHERE=where_, FROM='of_flows')
+
+        if result < 0:
+            raise ovimException(str(content), -result)
+        return content
+
+    def edit_openflow_rules(self, network_id=None):
+
+        """
+        To make actions over the net. The action is to reinstall the openflow rules
+        network_id can be 'all'
+        :param network_id: Network id, if none all networks will be retrieved
+        :return : Number of nets updated
+        """
+
+        # ignore input data
+        if not network_id:
+            where_ = {}
+        else:
+            where_ = {"uuid": network_id}
+        result, content = self.db.get_table(SELECT=("uuid", "type"), WHERE=where_, FROM='nets')
+
+        if result < 0:
+            raise ovimException(str(content), -result)
+
+        for net in content:
+            if net["type"] != "ptp" and net["type"] != "data":
+                result -= 1
+                continue
+
+            try:
+                self.net_update_ofc_thread(net['uuid'])
+            except ovimException as e:
+                raise ovimException("Error updating network'{}' {}".format(net['uuid'], str(e)),
+                                    HTTP_Internal_Server_Error)
+            except Exception as e:
+                raise ovimException("Error updating network '{}' {}".format(net['uuid'], str(e)),
+                                    HTTP_Internal_Server_Error)
+
+        return result
+
+    def delete_openflow_rules(self, ofc_id=None):
+        """
+        To make actions over the net. The action is to delete ALL openflow rules
+        :return: return operation result
+        """
+
+        if not ofc_id:
+            if 'Default' in self.config['ofcs_thread']:
+                r, c = self.config['ofcs_thread']['Default'].insert_task("clear-all")
+            else:
+                raise ovimException("Default Openflow controller not not running", HTTP_Not_Found)
+
+        elif ofc_id in self.config['ofcs_thread']:
+            r, c = self.config['ofcs_thread'][ofc_id].insert_task("clear-all")
+
+            # ignore input data
+            if r < 0:
+                raise ovimException(str(c), -r)
+        else:
+            raise ovimException("Openflow controller not found with ofc_id={}".format(ofc_id), HTTP_Not_Found)
+        return r
+
+    def get_openflow_ports(self, ofc_id=None):
+        """
+        Obtain switch ports names of openflow controller
+        :return: Return flow ports in DB
+        """
+        if not ofc_id:
+            if 'Default' in self.config['ofcs_thread']:
+                conn = self.config['ofcs_thread']['Default'].OF_connector
+            else:
+                raise ovimException("Default Openflow controller not not running", HTTP_Not_Found)
+
+        if ofc_id in self.config['ofcs_thread']:
+            conn = self.config['ofcs_thread'][ofc_id].OF_connector
+        else:
+            raise ovimException("Openflow controller not found with ofc_id={}".format(ofc_id), HTTP_Not_Found)
+        return conn.pp2ofi
+
+    def get_ports(self, columns=None, filter={}, limit=None):
+        # result, content = my.db.get_ports(where_)
+        result, content = self.db.get_table(SELECT=columns, WHERE=filter, FROM='ports', LIMIT=limit)
+        if result < 0:
+            self.logger.error("http_get_ports Error %d %s", result, content)
+            raise ovimException(str(content), -result)
+        else:
+            convert_boolean(content, ('admin_state_up',))
+            return content
+
+    def new_port(self, port_data):
+        port_data['type'] = 'external'
+        if port_data.get('net_id'):
+            # check that new net has the correct type
+            result, new_net = self.db.check_target_net(port_data['net_id'], None, 'external')
+            if result < 0:
+                raise ovimException(str(new_net), -result)
+        # insert in data base
+        result, uuid = self.db.new_row('ports', port_data, True, True)
+        if result > 0:
+            if 'net_id' in port_data:
+                try:
+                    self.net_update_ofc_thread(port_data['net_id'])
+                except ovimException as e:
+                    raise ovimException("Cannot insert a task for updating network '{}' {}"
+                                        .format(port_data['net_id'], str(e)), HTTP_Internal_Server_Error)
+                except Exception as e:
+                    raise ovimException("Cannot insert a task for updating network '{}' {}"
+                                        .format(port_data['net_id'], str(e)), HTTP_Internal_Server_Error)
+
+            return uuid
+        else:
+            raise ovimException(str(uuid), -result)
+
+    def new_external_port(self, port_data):
+        """
+        Create new external port and check port mapping correspondence
+        :param port_data: port_data = {
+            'region': 'datacenter region',
+            'compute_node': 'compute node id',
+            'pci': 'pci port address',
+            'vlan': 'net vlan',
+            'net_id': 'net id',
+            'tenant_id': 'tenant id',
+            'mac': 'switch mac',
+            'name': 'port name'
+            'ip_address': 'ip address - optional'}
+        :return:
+        """
+
+        port_data['type'] = 'external'
+
+        if port_data.get('net_id'):
+            # check that new net has the correct type
+            result, new_net = self.db.check_target_net(port_data['net_id'], None, 'external')
+            if result < 0:
+                raise ovimException(str(new_net), -result)
+        # insert in data base
+        db_filter = {}
+
+        if port_data.get('region'):
+            db_filter['region'] = port_data['region']
+        if port_data.get('pci'):
+            db_filter['pci'] = port_data['pci']
+        if port_data.get('compute_node'):
+            db_filter['compute_node'] = port_data['compute_node']
+
+        columns = ['ofc_id', 'switch_dpid', 'switch_port', 'switch_mac', 'pci']
+        port_mapping_data = self.get_of_port_mappings(columns, db_filter)
+
+        if not len(port_mapping_data):
+            raise ovimException("No port mapping founded for '{}'".format(str(db_filter)),
+                                HTTP_Not_Found)
+        elif len(port_mapping_data) > 1:
+            raise ovimException("Wrong port data was given, please check pci, region & compute id data",
+                                HTTP_Conflict)
+
+        port_data['ofc_id'] = port_mapping_data[0]['ofc_id']
+        port_data['switch_dpid'] = port_mapping_data[0]['switch_dpid']
+        port_data['switch_port'] = port_mapping_data[0]['switch_port']
+        port_data['switch_mac'] = port_mapping_data[0]['switch_mac']
+
+        # remove from compute_node, region and pci of_port_data to adapt to 'ports' structure
+        if 'region' in port_data:
+            del port_data['region']
+        if 'pci' in port_data:
+            del port_data['pci']
+        if 'compute_node' in port_data:
+            del port_data['compute_node']
+
+        result, uuid = self.db.new_row('ports', port_data, True, True)
+        if result > 0:
+            try:
+                self.net_update_ofc_thread(port_data['net_id'], port_data['ofc_id'])
+            except ovimException as e:
+                raise ovimException("Cannot insert a task for updating network '{}' {}".
+                                    format(port_data['net_id'], str(e)), HTTP_Internal_Server_Error)
+            except Exception as e:
+                raise ovimException("Cannot insert a task for updating network '{}' {}"
+                                    .format(port_data['net_id'], e), HTTP_Internal_Server_Error)
+            return uuid
+        else:
+            raise ovimException(str(uuid), -result)
+
+    def net_update_ofc_thread(self, net_id, ofc_id=None, switch_dpid=None):
+        """
+        Insert a update net task by net id or ofc_id for each ofc thread
+        :param net_id: network id
+        :param ofc_id: openflow controller id
+        :param switch_dpid: switch dpid
+        :return:
+        """
+        if not net_id:
+            raise ovimException("No net_id received", HTTP_Internal_Server_Error)
+
+        r = -1
+        c = 'No valid ofc_id or switch_dpid received'
+
+        if not ofc_id:
+            ports = self.get_ports(filter={"net_id": net_id})
+            for port in ports:
+                port_ofc_id = port.get('ofc_id', None)
+                if port_ofc_id:
+                    ofc_id = port['ofc_id']
+                    switch_dpid = port['switch_dpid']
+                    break
+        #TODO if not ofc_id: look at database table ofcs
+
+
+        # If no ofc_id found it, default ofc_id is used.
+        if not ofc_id and not switch_dpid:
+            ofc_id = "Default"
+
+        if ofc_id and ofc_id in self.config['ofcs_thread']:
+            r, c = self.config['ofcs_thread'][ofc_id].insert_task("update-net", net_id)
+        elif switch_dpid:
+
+            ofcs_dpid_list = self.config['ofcs_thread_dpid']
+            for ofc_t in ofcs_dpid_list:
+                if switch_dpid in ofc_t:
+                    r, c = ofc_t[switch_dpid].insert_task("update-net", net_id)
+
+        if r < 0:
+            message = "Cannot insert a task for updating network '{}', {}".format(net_id, c)
+            self.logger.error(message)
+            raise ovimException(message, HTTP_Internal_Server_Error)
+
+    def delete_port(self, port_id):
+        # Look for the previous port data
+        result, ports = self.db.get_table(WHERE={'uuid': port_id, "type": "external"}, FROM='ports')
+        if result < 0:
+            raise ovimException("Cannot get port info from database: {}".format(ports), http_code=-result)
+        # delete from the data base
+        result, content = self.db.delete_row('ports', port_id)
+        if result == 0:
+            raise ovimException("External port '{}' not found".format(port_id), http_code=HTTP_Not_Found)
+        elif result < 0:
+            raise ovimException("Cannot delete port from database: {}".format(content), http_code=-result)
+        # update network
+        network = ports[0].get('net_id', None)
+        if network:
+            # change of net.
+
+            try:
+                self.net_update_ofc_thread(network, ofc_id=ports[0]["ofc_id"], switch_dpid=ports[0]["switch_dpid"])
+            except ovimException as e:
+                raise ovimException("Cannot insert a task for delete network '{}' {}".format(network, str(e)),
+                                    HTTP_Internal_Server_Error)
+            except Exception as e:
+                raise ovimException("Cannot insert a task for delete network '{}' {}".format(network, str(e)),
+                                    HTTP_Internal_Server_Error)
+
+        return content
+
+    def edit_port(self, port_id, port_data, admin=True):
+        # Look for the previous port data
+        result, content = self.db.get_table(FROM="ports", WHERE={'uuid': port_id})
+        if result < 0:
+            raise ovimException("Cannot get port info from database: {}".format(content), http_code=-result)
+        elif result == 0:
+            raise ovimException("Port '{}' not found".format(port_id), http_code=HTTP_Not_Found)
+        port = content[0]
+        nets = []
+        host_id = None
+        result = 1
+        if 'net_id' in port_data:
+            # change of net.
+            old_net = port.get('net_id', None)
+            new_net = port_data['net_id']
+            if old_net != new_net:
+
+                if new_net:
+                    nets.append(new_net)  # put first the new net, so that new openflow rules are created before removing the old ones
+                if old_net:
+                    nets.append(old_net)
+                if port['type'] == 'instance:bridge' or port['type'] == 'instance:ovs':
+                    raise ovimException("bridge interfaces cannot be attached to a different net", http_code=HTTP_Forbidden)
+                elif port['type'] == 'external' and not admin:
+                    raise ovimException("Needed admin privileges",http_code=HTTP_Unauthorized)
+                if new_net:
+                    # check that new net has the correct type
+                    result, new_net_dict = self.db.check_target_net(new_net, None, port['type'])
+                    if result < 0:
+                        raise ovimException("Error {}".format(new_net_dict), http_code=HTTP_Conflict)
+                # change VLAN for SR-IOV ports
+                if result >= 0 and port["type"] == "instance:data" and port["model"] == "VF":  # TODO consider also VFnotShared
+                    if new_net:
+                        port_data["vlan"] = None
+                    else:
+                        port_data["vlan"] = new_net_dict["vlan"]
+                    # get host where this VM is allocated
+                    result, content = self.db.get_table(FROM="instances", WHERE={"uuid": port["instance_id"]})
+                    if result > 0:
+                        host_id = content[0]["host_id"]
+
+        # insert in data base
+        if result >= 0:
+            result, content = self.db.update_rows('ports', port_data, WHERE={'uuid': port_id}, log=False)
+            port.update(port_data)
+
+        # Insert task to complete actions
+        if result > 0:
+            for net_id in nets:
+                try:
+                    self.net_update_ofc_thread(net_id, port["ofc_id"], switch_dpid=port["switch_dpid"])
+                except ovimException as e:
+                    raise ovimException("Error updating network'{}' {}".format(net_id, str(e)),
+                                        HTTP_Internal_Server_Error)
+                except Exception as e:
+                    raise ovimException("Error updating network '{}' {}".format(net_id, str(e)),
+                                        HTTP_Internal_Server_Error)
+
+            if host_id:
+                r, v = self.config['host_threads'][host_id].insert_task("edit-iface", port_id, old_net, new_net)
+                if r < 0:
+                    self.logger.error("Error updating network '{}' {}".format(r,v))
+                    # TODO Do something if fails
+        if result >= 0:
+            return port_id
+        else:
+            raise ovimException("Error {}".format(content), http_code=-result)
+
+    def new_of_controller(self, ofc_data):
+        """
+        Create a new openflow controller into DB
+        :param ofc_data: Dict openflow controller data
+        :return: openflow controller dpid
+        """
+
+        result, ofc_uuid = self.db.new_row('ofcs', ofc_data, True, True)
+        if result < 0:
+            raise ovimException("New ofc Error %s" % ofc_uuid, HTTP_Internal_Server_Error)
+
+        ofc_data['uuid'] = ofc_uuid
+        of_conn = self._load_of_module(ofc_data)
+        self._create_ofc_task(ofc_uuid, ofc_data['dpid'], of_conn)
+
+        return ofc_uuid
+
+    def edit_of_controller(self, of_id, ofc_data):
+        """
+        Edit an openflow controller entry from DB
+        :return:
+        """
+        if not ofc_data:
+            raise ovimException("No data received during uptade OF contorller", http_code=HTTP_Internal_Server_Error)
+
+        old_of_controller = self.show_of_controller(of_id)
+
+        if old_of_controller:
+            result, content = self.db.update_rows('ofcs', ofc_data, WHERE={'uuid': of_id}, log=False)
+            if result >= 0:
+                return ofc_data
+            else:
+                raise ovimException("Error uptating OF contorller with uuid {}".format(of_id),
+                                    http_code=-result)
+        else:
+            raise ovimException("Error uptating OF contorller with uuid {}".format(of_id),
+                                http_code=HTTP_Internal_Server_Error)
+
+    def delete_of_controller(self, of_id):
+        """
+        Delete an openflow controller from DB.
+        :param of_id: openflow controller dpid
+        :return:
+        """
+
+        ofc = self.show_of_controller(of_id)
+
+        result, content = self.db.delete_row("ofcs", of_id)
+        if result < 0:
+            raise ovimException("Cannot delete ofc from database: {}".format(content), http_code=-result)
+        elif result == 0:
+            raise ovimException("ofc {} not found ".format(content), http_code=HTTP_Not_Found)
+
+        ofc_thread = self.config['ofcs_thread'][of_id]
+        del self.config['ofcs_thread'][of_id]
+        for ofc_th in self.config['ofcs_thread_dpid']:
+            if ofc['dpid'] in ofc_th:
+                self.config['ofcs_thread_dpid'].remove(ofc_th)
+
+        ofc_thread.insert_task("exit")
+        #ofc_thread.join()
+
+        return content
+
+    def show_of_controller(self, uuid):
+        """
+        Show an openflow controller by dpid from DB.
+        :param db_filter: List with where query parameters
+        :return:
+        """
+
+        result, content = self.db.get_table(FROM='ofcs', WHERE={"uuid": uuid}, LIMIT=100)
+
+        if result == 0:
+            raise ovimException("Openflow controller with uuid '{}' not found".format(uuid),
+                                http_code=HTTP_Not_Found)
+        elif result < 0:
+            raise ovimException("Openflow controller with uuid '{}' error".format(uuid),
+                                http_code=HTTP_Internal_Server_Error)
+        return content[0]
+
+    def get_of_controllers(self, columns=None, db_filter={}, limit=None):
+        """
+        Show an openflow controllers from DB.
+        :param columns:  List with SELECT query parameters
+        :param db_filter: List with where query parameters
+        :param limit: result Limit
+        :return:
+        """
+        result, content = self.db.get_table(SELECT=columns, FROM='ofcs', WHERE=db_filter, LIMIT=limit)
+
+        if result < 0:
+            raise ovimException(str(content), -result)
+
+        return content
+
+    def get_tenants(self, columns=None, db_filter={}, limit=None):
+        """
+        Retrieve tenant list from DB
+        :param columns:  List with SELECT query parameters
+        :param db_filter: List with where query parameters
+        :param limit: result limit
+        :return:
+        """
+        result, content = self.db.get_table(FROM='tenants', SELECT=columns, WHERE=db_filter, LIMIT=limit)
+        if result < 0:
+            raise ovimException('get_tenatns Error {}'.format(str(content)), -result)
+        else:
+            convert_boolean(content, ('enabled',))
+            return content
+
+    def show_tenant_id(self, tenant_id):
+        """
+        Get tenant from DB by id
+        :param tenant_id: tenant id
+        :return:
+        """
+        result, content = self.db.get_table(FROM='tenants', SELECT=('uuid', 'name', 'description', 'enabled'),
+                                            WHERE={"uuid": tenant_id})
+        if result < 0:
+            raise ovimException(str(content), -result)
+        elif result == 0:
+            raise ovimException("tenant with uuid='{}' not found".format(tenant_id), HTTP_Not_Found)
+        else:
+            convert_boolean(content, ('enabled',))
+            return content[0]
+
+    def new_tentant(self, tenant):
+        """
+        Create a tenant and store in DB
+        :param tenant: Dictionary with tenant data
+        :return: the uuid of created tenant. Raise exception upon error
+        """
+
+        # insert in data base
+        result, tenant_uuid = self.db.new_tenant(tenant)
+
+        if result >= 0:
+            return tenant_uuid
+        else:
+            raise ovimException(str(tenant_uuid), -result)
+
+    def delete_tentant(self, tenant_id):
+        """
+        Delete a tenant from the database.
+        :param tenant_id: Tenant id
+        :return: delete tenant id
+        """
+
+        # check permissions
+        r, tenants_flavors = self.db.get_table(FROM='tenants_flavors', SELECT=('flavor_id', 'tenant_id'),
+                                               WHERE={'tenant_id': tenant_id})
+        if r <= 0:
+            tenants_flavors = ()
+        r, tenants_images = self.db.get_table(FROM='tenants_images', SELECT=('image_id', 'tenant_id'),
+                                              WHERE={'tenant_id': tenant_id})
+        if r <= 0:
+            tenants_images = ()
+
+        result, content = self.db.delete_row('tenants', tenant_id)
+        if result == 0:
+            raise ovimException("tenant '%s' not found" % tenant_id, HTTP_Not_Found)
+        elif result > 0:
+            for flavor in tenants_flavors:
+                self.db.delete_row_by_key("flavors", "uuid", flavor['flavor_id'])
+            for image in tenants_images:
+                self.db.delete_row_by_key("images", "uuid", image['image_id'])
+            return content
+        else:
+            raise ovimException("Error deleting tenant '%s' " % tenant_id, HTTP_Internal_Server_Error)
+
+    def edit_tenant(self, tenant_id, tenant_data):
+        """
+        Update a tenant data identified by tenant id
+        :param tenant_id: tenant id
+        :param tenant_data: Dictionary with tenant data
+        :return:
+        """
+
+        # Look for the previous data
+        result, tenant_data_old = self.db.get_table(FROM='tenants', WHERE={'uuid': tenant_id})
+        if result < 0:
+            raise ovimException("Error updating tenant with uuid='{}': {}".format(tenant_id, tenant_data_old),
+                                HTTP_Internal_Server_Error)
+        elif result == 0:
+            raise ovimException("tenant with uuid='{}' not found".format(tenant_id), HTTP_Not_Found)
+
+        # insert in data base
+        result, content = self.db.update_rows('tenants', tenant_data, WHERE={'uuid': tenant_id}, log=True)
+        if result >= 0:
+            return content
+        else:
+            raise ovimException(str(content), -result)
+
+    def set_of_port_mapping(self, of_maps, ofc_id=None, switch_dpid=None, region=None):
+        """
+        Create new port mapping entry
+        :param of_maps: List with port mapping information
+        # maps =[{"ofc_id": <ofc_id>,"region": datacenter region,"compute_node": compute uuid,"pci": pci adress,
+                "switch_dpid": swith dpid,"switch_port": port name,"switch_mac": mac}]
+        :param ofc_id: ofc id
+        :param switch_dpid: switch  dpid
+        :param region: datacenter region id
+        :return:
+        """
+
+        for map in of_maps:
+            if ofc_id:
+                map['ofc_id'] = ofc_id
+            if switch_dpid:
+                map['switch_dpid'] = switch_dpid
+            if region:
+                map['region'] = region
+
+        for of_map in of_maps:
+            result, uuid = self.db.new_row('of_port_mappings', of_map, True)
+            if result > 0:
+                of_map["uuid"] = uuid
+            else:
+                raise ovimException(str(uuid), -result)
+        return of_maps
+
+    def clear_of_port_mapping(self, db_filter={}):
+        """
+        Clear port mapping filtering using db_filter dict
+        :param db_filter: Parameter to filter during remove process
+        :return:
+        """
+        result, content = self.db.delete_row_by_dict(FROM='of_port_mappings', WHERE=db_filter)
+        # delete_row_by_key
+        if result >= 0:
+            return content
+        else:
+            raise ovimException("Error deleting of_port_mappings with filter='{}'".format(str(db_filter)),
+                                HTTP_Internal_Server_Error)
+
+    def get_of_port_mappings(self, column=None, db_filter=None, db_limit=None):
+        """
+        Retrive port mapping from DB
+        :param column:
+        :param db_filter:
+        :return:
+        """
+        result, content = self.db.get_table(SELECT=column, WHERE=db_filter, FROM='of_port_mappings', LIMIT=db_limit)
+
+        if result < 0:
+            self.logger.error("get_of_port_mappings Error %d %s", result, content)
+            raise ovimException(str(content), -result)
+        else:
+            return content
+
+    def get_dhcp_controller(self):
+        """
+        Create an host_thread object for manage openvim controller and not create a thread for itself
+        :return: dhcp_host openvim controller object
+        """
+
+        if 'openvim_controller' in self.config['host_threads']:
+            return self.config['host_threads']['openvim_controller']
+
+        bridge_ifaces = []
+        controller_ip = self.config['ovs_controller_ip']
+        ovs_controller_user = self.config['ovs_controller_user']
+
+        host_test_mode = True if self.config['mode'] == 'test' or self.config['mode'] == "OF only" else False
+        host_develop_mode = True if self.config['mode'] == 'development' else False
+
+        dhcp_host = ht.host_thread(name='openvim_controller', user=ovs_controller_user, host=controller_ip,
+                                   db=self.db_of,
+                                   db_lock=self.db_lock, test=host_test_mode,
+                                   image_path=self.config['image_path'], version=self.config['version'],
+                                   host_id='openvim_controller', develop_mode=host_develop_mode,
+                                   develop_bridge_iface=bridge_ifaces)
+
+        self.config['host_threads']['openvim_controller'] = dhcp_host
+        if not host_test_mode:
+            dhcp_host.ssh_connect()
+        return dhcp_host
+
+    def launch_dhcp_server(self, vlan, first_ip, last_ip, cidr, gateway):
+        """
+        Launch a dhcpserver base on dnsmasq attached to the net base on vlan id across the the openvim computes
+        :param vlan: vlan identifier
+        :param first_ip: First dhcp range ip
+        :param last_ip: Last dhcp range ip
+        :param cidr: net cidr
+        :param gateway: net gateway
+        :return:
+        """
+        ip_tools = IPNetwork(cidr)
+        dhcp_netmask = str(ip_tools.netmask)
+        ip_range = [first_ip, last_ip]
+
+        dhcp_path = self.config['ovs_controller_file_path']
+
+        controller_host = self.get_dhcp_controller()
+        controller_host.create_linux_bridge(vlan)
+        controller_host.create_dhcp_interfaces(vlan, first_ip, dhcp_netmask)
+        controller_host.launch_dhcp_server(vlan, ip_range, dhcp_netmask, dhcp_path, gateway)
+
+if __name__ == "__main__":
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-v","--version", help="show ovim library version", action="store_true")
+    parser.add_argument("--database-version", help="show required database version", action="store_true")
+    args = parser.parse_args()
+    if args.version:
+        print ('openvimd version {} {}'.format(ovim.get_version(), ovim.get_version_date()))
+        print ('(c) Copyright Telefonica')
+    elif args.database_version:
+        print ('required database version: {}'.format(ovim.get_database_version()))
+
diff --git a/osm_openvim/vim_db.py b/osm_openvim/vim_db.py
new file mode 100644 (file)
index 0000000..c34160d
--- /dev/null
@@ -0,0 +1,1734 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+This module interact with the openvim database,
+It implements general table management
+and complex writings 'transactional' sures, 
+that is, or all is changed or nothing
+'''
+
+__author__="Alfonso Tierno"
+__date__ ="$10-jul-2014 12:07:15$"
+
+import MySQLdb as mdb
+import uuid as myUuid
+import auxiliary_functions as af
+import json
+import logging
+from netaddr import IPNetwork, IPSet, IPRange, all_matching_cidrs
+
+HTTP_Bad_Request = 400
+HTTP_Unauthorized = 401 
+HTTP_Not_Found = 404 
+HTTP_Method_Not_Allowed = 405 
+HTTP_Request_Timeout = 408
+HTTP_Conflict = 409
+HTTP_Service_Unavailable = 503 
+HTTP_Internal_Server_Error = 500 
+
+
+class vim_db():
+    def __init__(self, vlan_range, logger_name= None, debug=None):
+        '''vlan_range must be a tuple (vlan_ini, vlan_end) with available vlan values for networks
+        every dataplane network contain a unique value, regardless of it is used or not 
+        ''' 
+        #initialization
+        self.net_vlan_range = vlan_range
+        self.net_vlan_usedlist = None
+        self.net_vlan_lastused = self.net_vlan_range[0] -1
+        self.debug=debug
+        if logger_name:
+            self.logger_name = logger_name
+        else:
+            self.logger_name = 'openvim.db'
+        self.logger = logging.getLogger(self.logger_name)
+        if debug:
+            self.logger.setLevel( getattr(logging, debug) )
+
+
+    def connect(self, host=None, user=None, passwd=None, database=None):
+        '''Connect to the concrete data base. 
+        The first time a valid host, user, passwd and database must be provided,
+        Following calls can skip this parameters
+        '''
+        try:
+            if host     is not None: self.host = host
+            if user     is not None: self.user = user
+            if passwd   is not None: self.passwd = passwd
+            if database is not None: self.database = database
+
+            self.con = mdb.connect(self.host, self.user, self.passwd, self.database)
+            self.logger.debug("connected to DB %s at %s@%s", self.database,self.user, self.host)
+            return 0
+        except mdb.Error as e:
+            self.logger.error("Cannot connect to DB %s at %s@%s Error %d: %s", self.database, self.user, self.host, e.args[0], e.args[1])
+            return -1
+
+    def get_db_version(self):
+        ''' Obtain the database schema version.
+        Return: (negative, text) if error or version 0.0 where schema_version table is missing
+                (version_int, version_text) if ok
+        '''
+        cmd = "SELECT version_int,version,openvim_ver FROM schema_version"
+        for retry_ in range(0,2):
+            try:
+                with self.con:
+                    self.cur = self.con.cursor()
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    rows = self.cur.fetchall()
+                    highest_version_int=0
+                    highest_version=""
+                    #print rows
+                    for row in rows: #look for the latest version
+                        if row[0]>highest_version_int:
+                            highest_version_int, highest_version = row[0:2]
+                    return highest_version_int, highest_version
+            except (mdb.Error, AttributeError) as e:
+                self.logger.error("get_db_version DB Exception %d: %s. Command %s",e.args[0], e.args[1], cmd)
+                r,c = self.format_error(e)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c    
+                
+    def disconnect(self):
+        '''disconnect from the data base'''
+        try:
+            self.con.close()
+            del self.con
+        except mdb.Error as e:
+            self.logger.error("while disconnecting from DB: Error %d: %s",e.args[0], e.args[1])
+            return -1
+        except AttributeError as e: #self.con not defined
+            if e[0][-5:] == "'con'": return -1, "Database internal error, no connection."
+            else: raise
+    
+    def format_error(self, e, func, cmd, command=None, extra=None):
+        '''Creates a text error base on the produced exception
+            Params:
+                e: mdb exception
+                func: name of the function that makes the call, for logging purposes
+                cmd: database command that produce the exception
+                command: if the intention is update or delete
+                extra: extra information to add to some commands
+            Return
+                HTTP error in negative, formatted error text
+        ''' 
+                
+        self.logger.error("%s DB Exception %s. Command %s",func, str(e), cmd)
+        if type(e[0]) is str:
+            if e[0][-5:] == "'con'": return -HTTP_Internal_Server_Error, "DB Exception, no connection."
+            else: raise
+        if e.args[0]==2006 or e.args[0]==2013 : #MySQL server has gone away (((or)))    Exception 2013: Lost connection to MySQL server during query
+            #reconnect
+            self.connect()
+            return -HTTP_Request_Timeout,"Database reconnection. Try Again"
+        fk=e.args[1].find("foreign key constraint fails")
+        if fk>=0:
+            if command=="update": return -HTTP_Bad_Request, "tenant_id %s not found." % extra
+            elif command=="delete":  return -HTTP_Bad_Request, "Resource is not free. There are %s that prevent its deletion." % extra
+        de = e.args[1].find("Duplicate entry")
+        fk = e.args[1].find("for key")
+        uk = e.args[1].find("Unknown column")
+        wc = e.args[1].find("in 'where clause'")
+        fl = e.args[1].find("in 'field list'")
+        #print de, fk, uk, wc,fl
+        if de>=0:
+            if fk>=0: #error 1062
+                return -HTTP_Conflict, "Value %s already in use for %s" % (e.args[1][de+15:fk], e.args[1][fk+7:])
+        if uk>=0:
+            if wc>=0:
+                return -HTTP_Bad_Request, "Field %s cannot be used for filtering" % e.args[1][uk+14:wc]
+            if fl>=0:
+                return -HTTP_Bad_Request, "Field %s does not exist" % e.args[1][uk+14:wc]
+        return -HTTP_Internal_Server_Error, "Database internal Error %d: %s" % (e.args[0], e.args[1])
+
+    def __data2db_format(self, data):
+        '''convert data to database format. If data is None it return the 'Null' text,
+        otherwise it return the text surrounded by quotes ensuring internal quotes are escaped'''
+        if data==None:
+            return 'Null'
+        out=str(data)
+        if "'" not in out:
+            return "'" + out + "'"
+        elif '"' not in out:
+            return '"' + out + '"'
+        else:
+            return json.dumps(out)
+    
+    def __get_used_net_vlan(self):
+        #get used from database if needed
+        try:
+            cmd = "SELECT vlan FROM nets WHERE vlan>='%s' ORDER BY vlan LIMIT 25" % self.net_vlan_lastused
+            with self.con:
+                self.cur = self.con.cursor()
+                self.logger.debug(cmd)
+                self.cur.execute(cmd)
+                vlan_tuple = self.cur.fetchall()
+                #convert a tuple of tuples in a list of numbers
+                self.net_vlan_usedlist = []
+                for k in vlan_tuple:
+                    self.net_vlan_usedlist.append(k[0])
+            return 0
+        except (mdb.Error, AttributeError) as e:
+            return self.format_error(e, "get_free_net_vlan", cmd)
+    
+    def get_free_net_vlan(self):
+        '''obtain a vlan not used in any net'''
+        
+        while True:
+            self.logger.debug("net_vlan_lastused:%d  net_vlan_range:%d-%d  net_vlan_usedlist:%s", 
+                            self.net_vlan_lastused, self.net_vlan_range[0], self.net_vlan_range[1], str(self.net_vlan_usedlist))
+            self.net_vlan_lastused += 1
+            if self.net_vlan_lastused ==  self.net_vlan_range[1]:
+                #start from the begining
+                self.net_vlan_lastused =  self.net_vlan_range[0]
+                self.net_vlan_usedlist = None
+            if self.net_vlan_usedlist is None \
+            or (len(self.net_vlan_usedlist)>0 and self.net_vlan_lastused >= self.net_vlan_usedlist[-1] and len(self.net_vlan_usedlist)==25):
+                r = self.__get_used_net_vlan()
+                if r<0: return r
+                self.logger.debug("new net_vlan_usedlist %s", str(self.net_vlan_usedlist))
+            if self.net_vlan_lastused in self.net_vlan_usedlist:
+                continue
+            else:
+                return self.net_vlan_lastused
+                
+    def get_table(self, **sql_dict):
+        ''' Obtain rows from a table.
+        Atribure sql_dir: dictionary with the following key: value
+            'SELECT': [list of fields to retrieve] (by default all)
+            'FROM': string of table name (Mandatory)
+            'WHERE': dict of key:values, translated to key=value AND ... (Optional)
+            'WHERE_NOT': dict of key:values, translated to key!=value AND ... (Optional)
+            'WHERE_OR': dict of key:values, translated to key=value OR ... (Optional)
+            'WHERE_AND_OR: str 'AND' or 'OR'(by default) mark the priority to 'WHERE AND (WHERE_OR)' or (WHERE) OR WHERE_OR' (Optional)
+            'LIMIT': limit of number of rows (Optional)
+            'DISTINCT': make a select distinct to remove repeated elements
+        Return: a list with dictionarys at each row
+        '''
+        #print sql_dict
+        select_ = "SELECT "
+        if sql_dict.get("DISTINCT"):
+            select_ += "DISTINCT "
+        select_ += ("*" if not sql_dict.get('SELECT') else ",".join(map(str,sql_dict['SELECT'])) )
+        #print 'select_', select_
+        from_  = "FROM " + str(sql_dict['FROM'])
+        #print 'from_', from_
+        
+        where_and = None
+        where_or = None
+        w = sql_dict.get('WHERE')
+        if w:
+            where_and = " AND ".join(map( lambda x: str(x) + (" is Null" if w[x] is None else "='"+str(w[x])+"'"),  w.keys()) )
+        w = sql_dict.get('WHERE_NOT')
+        if w:
+            where_and_not = " AND ".join(map( lambda x: str(x) + (" is not Null" if w[x] is None else "!='"+str(w[x])+"'"),  w.keys()) )
+            if where_and:
+                where_and += " AND " + where_and_not
+            else:
+                where_and = where_and_not
+        w = sql_dict.get('WHERE_OR')
+        if w:
+            where_or =  " OR ".join(map( lambda x: str(x) + (" is Null" if w[x] is None else "='"+str(w[x])+"'"),  w.keys()) )
+             
+        if where_and!=None and where_or!=None:
+            if sql_dict.get("WHERE_AND_OR") == "AND":
+                where_ = "WHERE " + where_and + " AND (" + where_or + ")"
+            else:
+                where_ = "WHERE (" + where_and + ") OR " + where_or
+        elif where_and!=None and where_or==None:
+            where_ = "WHERE " + where_and
+        elif where_and==None and where_or!=None:
+            where_ = "WHERE " + where_or
+        else:
+            where_ = ""
+        #print 'where_', where_
+        limit_ = "LIMIT " + str(sql_dict['LIMIT']) if sql_dict.get("LIMIT") else ""
+        #print 'limit_', limit_
+        cmd =  " ".join( (select_, from_, where_, limit_) )
+        for retry_ in range(0,2):
+            try:
+                with self.con:
+                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    rows = self.cur.fetchall()
+                    return self.cur.rowcount, rows
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "get_table", cmd)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+        
+    def new_tenant(self, tenant_dict):
+        ''' Add one row into a table.
+        Attribure 
+            tenant_dict: dictionary with the key: value to insert
+        It checks presence of uuid and add one automatically otherwise
+        Return: (result, uuid) where result can be 0 if error, or 1 if ok
+        '''
+        for retry_ in range(0,2):
+            cmd=""
+            inserted=-1
+            try:
+                #create uuid if not provided
+                if 'uuid' not in tenant_dict:
+                    uuid = tenant_dict['uuid'] = str(myUuid.uuid1()) # create_uuid
+                else: 
+                    uuid = str(tenant_dict['uuid'])
+                #obtain tenant_id for logs
+                tenant_id = uuid
+                with self.con:
+                    self.cur = self.con.cursor()
+                    #inserting new uuid
+                    cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','tenants')" % uuid
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    #insert tenant
+                    cmd= "INSERT INTO tenants (" + \
+                        ",".join(map(str, tenant_dict.keys() ))   + ") VALUES(" + \
+                        ",".join(map(lambda x: "Null" if x is None else "'"+str(x)+"'",tenant_dict.values() )) + ")"
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    inserted = self.cur.rowcount
+                    ##inserting new log
+                    #del tenant_dict['uuid'] # not interested for the log
+                    #cmd = "INSERT INTO logs (related,level,tenant_id,uuid,description) VALUES ('tenants','debug','%s','%s',\"new tenant %s\")" % (uuid, tenant_id, str(tenant_dict))
+                    #self.logger.debug(cmd)
+                    #self.cur.execute(cmd)  
+                    #commit transaction
+                    self.cur.close()
+                if inserted == 0: return 0, uuid
+                with self.con:
+                    self.cur = self.con.cursor()
+                    #adding public flavors
+                    cmd = "INSERT INTO tenants_flavors(flavor_id,tenant_id) SELECT uuid as flavor_id,'"+ tenant_id + "' FROM flavors WHERE public = 'yes'"
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd) 
+                    self.logger.debug("attached public flavors: %s", str(self.cur.rowcount))
+                    #rows = self.cur.fetchall()
+                    #for row in rows:
+                    #    cmd = "INSERT INTO tenants_flavors(flavor_id,tenant_id) VALUES('%s','%s')" % (row[0], tenant_id)
+                    #    self.cur.execute(cmd )
+                    #adding public images
+                    cmd = "INSERT INTO tenants_images(image_id,tenant_id) SELECT uuid as image_id,'"+ tenant_id + "' FROM images WHERE public = 'yes'"
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd) 
+                    self.logger.debug("attached public images: %s", str(self.cur.rowcount))
+                    return 1, uuid
+            except (mdb.Error, AttributeError) as e:
+                if inserted==1:
+                    self.logger.warning("new_tenant DB Exception %d: %s. Command %s",e.args[0], e.args[1], cmd)
+                    return 1, uuid
+                else: 
+                    r,c = self.format_error(e, "new_tenant", cmd)
+                    if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+
+    def new_row(self, table, INSERT, add_uuid=False, log=False):
+        ''' Add one row into a table.
+        Atribure 
+            INSERT: dictionary with the key: value to insert
+            table: table where to insert
+            add_uuid: if True, it will crated an uuid key entry at INSERT if not provided
+        It checks presence of uuid and add one automatically otherwise
+        Return: (result, uuid) where result can be 0 if error, or 1 if ok
+        '''
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                if add_uuid:
+                    #create uuid if not provided
+                    if 'uuid' not in INSERT:
+                        uuid = INSERT['uuid'] = str(myUuid.uuid1()) # create_uuid
+                    else: 
+                        uuid = str(INSERT['uuid'])
+                else:
+                    uuid=None
+                with self.con:
+                    self.cur = self.con.cursor()
+                    if add_uuid:
+                        #inserting new uuid
+                        cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','%s')" % (uuid, table)
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+                    #insertion
+                    cmd= "INSERT INTO " + table +" (" + \
+                        ",".join(map(str, INSERT.keys() ))   + ") VALUES(" + \
+                        ",".join(map(lambda x: 'Null' if x is None else "'"+str(x)+"'", INSERT.values() )) + ")"
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    nb_rows = self.cur.rowcount
+                    #inserting new log
+                    #if nb_rows > 0 and log:                
+                    #    if add_uuid: del INSERT['uuid']
+                    #    #obtain tenant_id for logs
+                    #    if 'tenant_id' in INSERT: 
+                    #        tenant_id = INSERT['tenant_id']
+                    #        del INSERT['tenant_id']
+                    #    elif table == 'tenants':    
+                    #        tenant_id = uuid
+                    #    else:                       
+                    #        tenant_id = None
+                    #    if uuid is None: uuid_k = uuid_v = ""
+                    #    else: uuid_k=",uuid"; uuid_v=",'" + str(uuid) + "'"
+                    #    if tenant_id is None: tenant_k = tenant_v = ""
+                    #    else: tenant_k=",tenant_id"; tenant_v=",'" + str(tenant_id) + "'"
+                    #    cmd = "INSERT INTO logs (related,level%s%s,description) VALUES ('%s','debug'%s%s,\"new %s %s\")" \
+                    #        % (uuid_k, tenant_k, table, uuid_v, tenant_v, table[:-1], str(INSERT))
+                    #    self.logger.debug(cmd)
+                    #    self.cur.execute(cmd)                    
+                    return nb_rows, uuid
+
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "new_row", cmd)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+    
+    def __remove_quotes(self, data):
+        '''remove single quotes ' of any string content of data dictionary'''
+        for k,v in data.items():
+            if type(v) == str:
+                if "'" in v: 
+                    data[k] = data[k].replace("'","_")
+    
+    def _update_rows_internal(self, table, UPDATE, WHERE={}):
+        cmd= "UPDATE " + table +" SET " + \
+            ",".join(map(lambda x: str(x)+'='+ self.__data2db_format(UPDATE[x]),   UPDATE.keys() ));
+        if WHERE:
+            cmd += " WHERE " + " and ".join(map(lambda x: str(x)+ (' is Null' if WHERE[x] is None else"='"+str(WHERE[x])+"'" ),  WHERE.keys() ))
+        self.logger.debug(cmd)
+        self.cur.execute(cmd) 
+        nb_rows = self.cur.rowcount
+        return nb_rows, None
+
+    def update_rows(self, table, UPDATE, WHERE={}, log=False):
+        ''' Update one or several rows into a table.
+        Atributes
+            UPDATE: dictionary with the key-new_value pairs to change
+            table: table to be modified
+            WHERE: dictionary to filter target rows, key-value
+            log:   if true, a log entry is added at logs table
+        Return: (result, None) where result indicates the number of updated files
+        '''
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                #gettting uuid 
+                uuid = WHERE.get('uuid')
+
+                with self.con:
+                    self.cur = self.con.cursor()
+                    cmd= "UPDATE " + table +" SET " + \
+                        ",".join(map(lambda x: str(x)+'='+ self.__data2db_format(UPDATE[x]),   UPDATE.keys() ));
+                    if WHERE:
+                        cmd += " WHERE " + " and ".join(map(lambda x: str(x)+ (' is Null' if WHERE[x] is None else"='"+str(WHERE[x])+"'" ),  WHERE.keys() ))
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd) 
+                    nb_rows = self.cur.rowcount
+                    #if nb_rows > 0 and log:                
+                    #    #inserting new log
+                    #    if uuid is None: uuid_k = uuid_v = ""
+                    #    else: uuid_k=",uuid"; uuid_v=",'" + str(uuid) + "'"
+                    #    cmd = "INSERT INTO logs (related,level%s,description) VALUES ('%s','debug'%s,\"updating %d entry %s\")" \
+                    #        % (uuid_k, table, uuid_v, nb_rows, (str(UPDATE)).replace('"','-')  )
+                    #    self.logger.debug(cmd)
+                    #    self.cur.execute(cmd)                    
+                    return nb_rows, uuid
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "update_rows", cmd)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+            
+    def get_host(self, host_id):
+        if af.check_valid_uuid(host_id):
+            where_filter="uuid='" + host_id + "'"
+        else:
+            where_filter="name='" + host_id + "'"
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                with self.con:
+                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
+                    #get HOST
+                    cmd = "SELECT uuid, user, name, ip_name, description, ranking, admin_state_up, DATE_FORMAT(created_at,'%Y-%m-%dT%H:%i:%s') as created_at \
+                        FROM hosts WHERE " + where_filter
+                    self.logger.debug(cmd) 
+                    self.cur.execute(cmd)
+                    if self.cur.rowcount == 0 : 
+                        return 0, "host '" + str(host_id) +"'not found."
+                    elif self.cur.rowcount > 1 : 
+                        return 0, "host '" + str(host_id) +"' matches more than one result."
+                    host = self.cur.fetchone()
+                    host_id = host['uuid']
+                    #get numa
+                    cmd = "SELECT id, numa_socket, hugepages, memory, admin_state_up FROM numas WHERE host_id = '" + str(host_id) + "'"
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    host['numas'] = self.cur.fetchall()
+                    for numa in host['numas']:
+                        #print "SELECT core_id, instance_id, status, thread_id, v_thread_id FROM resources_core  WHERE numa_id = '" + str(numa['id']) + "'"
+                        #get cores
+                        cmd = "SELECT core_id, instance_id, status, thread_id, v_thread_id FROM resources_core  WHERE numa_id = '" + str(numa['id']) + "'"
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+                        numa['cores'] = self.cur.fetchall()
+                        for core in numa['cores']: 
+                            if core['instance_id'] == None: del core['instance_id'], core['v_thread_id']
+                            if core['status'] == 'ok': del core['status']
+                        #get used memory
+                        cmd = "SELECT sum(consumed) as hugepages_consumed FROM resources_mem  WHERE numa_id = '" + str(numa['id']) + "' GROUP BY numa_id"
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+                        used = self.cur.fetchone()
+                        used_= int(used['hugepages_consumed']) if used != None else 0
+                        numa['hugepages_consumed'] = used_
+                        #get ports
+                        #cmd = "CALL GetPortsFromNuma(%s)'" % str(numa['id'])
+                        #self.cur.callproc('GetPortsFromNuma', (numa['id'],) )
+                        #every time a Procedure is launched you need to close and open the cursor 
+                        #under Error 2014: Commands out of sync; you can't run this command now
+                        #self.cur.close()   
+                        #self.cur = self.con.cursor(mdb.cursors.DictCursor)
+                        cmd="SELECT Mbps, pci, status, Mbps_used, instance_id, if(id=root_id,'PF','VF') as type_,\
+                             switch_port, switch_dpid, mac, source_name\
+                             FROM resources_port WHERE numa_id=%d ORDER BY root_id, type_ DESC" %  (numa['id'])
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+                        ifaces = self.cur.fetchall()
+                        #The SQL query will ensure to have SRIOV interfaces from a port first
+                        sriovs=[]
+                        Mpbs_consumed = 0
+                        numa['interfaces'] = []
+                        for iface in ifaces:
+                            if not iface["instance_id"]:
+                                del iface["instance_id"]
+                            if iface['status'] == 'ok':
+                                del iface['status']
+                            Mpbs_consumed += int(iface["Mbps_used"])
+                            del iface["Mbps_used"]
+                            if iface["type_"]=='PF':
+                                if not iface["switch_dpid"]:
+                                    del iface["switch_dpid"]
+                                if not iface["switch_port"]:
+                                    del iface["switch_port"]
+                                if sriovs:
+                                    iface["sriovs"] = sriovs
+                                if Mpbs_consumed:
+                                    iface["Mpbs_consumed"] = Mpbs_consumed
+                                del iface["type_"]
+                                numa['interfaces'].append(iface)
+                                sriovs=[]
+                                Mpbs_consumed = 0
+                            else: #VF, SRIOV
+                                del iface["switch_port"]
+                                del iface["switch_dpid"]
+                                del iface["type_"]
+                                del iface["Mbps"]
+                                sriovs.append(iface)
+
+                        #delete internal field
+                        del numa['id']
+                    return 1, host
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "get_host", cmd)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+        
+    def new_uuid(self):
+        max_retries=10
+        while max_retries>0:
+            uuid =  str( myUuid.uuid1() )
+            if self.check_uuid(uuid)[0] == 0:
+                return uuid
+            max_retries-=1
+        return uuid
+
+    def check_uuid(self, uuid):
+        '''check in the database if this uuid is already present'''
+        try:
+            cmd = "SELECT * FROM uuids where uuid='" + str(uuid) + "'"
+            with self.con:
+                self.cur = self.con.cursor(mdb.cursors.DictCursor)
+                self.logger.debug(cmd)
+                self.cur.execute(cmd)
+                rows = self.cur.fetchall()
+                return self.cur.rowcount, rows
+        except (mdb.Error, AttributeError) as e:
+            return self.format_error(e, "check_uuid", cmd)
+            
+    def __get_next_ids(self):
+        '''get next auto increment index of all table in the database'''
+        self.cur.execute("SELECT table_name,AUTO_INCREMENT FROM information_schema.tables WHERE AUTO_INCREMENT IS NOT NULL AND table_schema = DATABASE()") 
+        rows = self.cur.fetchall()
+        return self.cur.rowcount, dict(rows)
+    
+    def edit_host(self, host_id, host_dict):
+        #get next port index
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                with self.con:
+                    self.cur = self.con.cursor()
+
+                    #update table host
+                    numa_list = host_dict.pop('numas', () )                    
+                    if host_dict:
+                        self._update_rows_internal("hosts", host_dict, {"uuid": host_id})
+                        
+                    where = {"host_id": host_id} 
+                    for numa_dict in numa_list:
+                        where["numa_socket"] = str(numa_dict.pop('numa_socket'))
+                        interface_list = numa_dict.pop('interfaces', () )
+                        if numa_dict:
+                            self._update_rows_internal("numas", numa_dict, where)
+                        for interface in interface_list:
+                            source_name = str(interface.pop("source_name") )
+                            if interface:
+                            #get interface id from resources_port
+                                cmd= "SELECT rp.id as id FROM resources_port as rp join numas as n on n.id=rp.numa_id join hosts as h on h.uuid=n.host_id " +\
+                                    "WHERE host_id='%s' and rp.source_name='%s'" %(host_id, source_name)
+                                self.logger.debug(cmd)
+                                self.cur.execute(cmd)
+                                row = self.cur.fetchone()
+                                if self.cur.rowcount<=0:
+                                    return -HTTP_Bad_Request, "Interface source_name='%s' from numa_socket='%s' not found" % (source_name, str(where["numa_socket"]))
+                                interface_id = row[0]
+                                self._update_rows_internal("resources_port", interface, {"root_id": interface_id})
+                return self.get_host(host_id)
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "edit_host", cmd)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+
+    def new_host(self, host_dict):
+        #get next port index
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                with self.con:
+                    self.cur = self.con.cursor()
+
+                    result, next_ids = self.__get_next_ids()
+                    #print "next_ids: " + str(next_ids)
+                    if result <= 0: return result, "Internal DataBase error getting next id of tables"
+
+                    #create uuid if not provided
+                    if 'uuid' not in host_dict:
+                        uuid = host_dict['uuid'] = str(myUuid.uuid1()) # create_uuid
+                    else: #check uuid is valid
+                        uuid = str(host_dict['uuid'])
+                    #    result, data = self.check_uuid(uuid)
+                    #    if (result == 1):
+                    #        return -1, "UUID '%s' already in use" % uuid
+
+                    #inserting new uuid
+                    cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','hosts')" % uuid
+                    self.logger.debug(cmd)
+                    result = self.cur.execute(cmd)
+
+                    #insert in table host
+                    numa_list = host_dict.pop('numas', [])
+                    #get nonhupages and nonisolated cpus
+                    host_dict['RAM']=0
+                    host_dict['cpus']=0
+                    for numa in numa_list:
+                        mem_numa = numa.get('memory', 0) - numa.get('hugepages',0)
+                        if mem_numa>0:
+                            host_dict['RAM'] += mem_numa 
+                        for core in numa.get("cores", []):
+                            if "status" in core and core["status"]=="noteligible":
+                                host_dict['cpus']+=1
+                    host_dict['RAM']*=1024 # from GB to MB
+                                            
+                    keys    = ",".join(host_dict.keys())
+                    values  = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", host_dict.values() ) )
+                    cmd = "INSERT INTO hosts (" + keys + ") VALUES (" + values + ")"
+                    self.logger.debug(cmd)
+                    result = self.cur.execute(cmd)
+                    #if result != 1: return -1, "Database Error while inserting at hosts table"
+
+                    #insert numas
+                    nb_numas = nb_cores = nb_ifaces = 0
+                    for numa_dict in numa_list:
+                        nb_numas += 1
+                        interface_list = numa_dict.pop('interfaces', [])
+                        core_list = numa_dict.pop('cores', [])
+                        numa_dict['id'] = next_ids['numas'];   next_ids['numas'] += 1
+                        numa_dict['host_id'] = uuid
+                        keys    = ",".join(numa_dict.keys())
+                        values  = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", numa_dict.values() ) )
+                        cmd = "INSERT INTO numas (" + keys + ") VALUES (" + values + ")"
+                        self.logger.debug(cmd)
+                        result = self.cur.execute(cmd)
+
+                        #insert cores
+                        for core_dict in core_list:
+                            nb_cores += 1
+                            core_dict['numa_id'] = numa_dict['id']
+                            keys    = ",".join(core_dict.keys())
+                            values  = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", core_dict.values() ) )
+                            cmd = "INSERT INTO resources_core (" + keys + ") VALUES (" + values + ")"
+                            self.logger.debug(cmd)
+                            result = self.cur.execute(cmd)
+
+                        #insert ports
+                        for port_dict in interface_list:
+                            nb_ifaces += 1
+                            sriov_list = port_dict.pop('sriovs', [])
+                            port_dict['numa_id'] = numa_dict['id']
+                            port_dict['id'] = port_dict['root_id'] = next_ids['resources_port']
+                            next_ids['resources_port'] += 1
+                            switch_port = port_dict.get('switch_port', None)
+                            switch_dpid = port_dict.get('switch_dpid', None)
+                            keys    = ",".join(port_dict.keys())
+                            values  = ",".join( map(lambda x:  "Null" if x is None else "'"+str(x)+"'", port_dict.values() ) )
+                            cmd = "INSERT INTO resources_port (" + keys + ") VALUES (" + values + ")"
+                            self.logger.debug(cmd)
+                            result = self.cur.execute(cmd)
+
+                            #insert sriovs into port table
+                            for sriov_dict in sriov_list:
+                                sriov_dict['switch_port'] = switch_port
+                                sriov_dict['switch_dpid'] = switch_dpid
+                                sriov_dict['numa_id'] = port_dict['numa_id']
+                                sriov_dict['Mbps'] = port_dict['Mbps']
+                                sriov_dict['root_id'] = port_dict['id']
+                                sriov_dict['id'] = next_ids['resources_port']
+                                if "vlan" in sriov_dict:
+                                    del sriov_dict["vlan"]
+                                next_ids['resources_port'] += 1
+                                keys    = ",".join(sriov_dict.keys())
+                                values  = ",".join( map(lambda x:  "Null" if x is None else "'"+str(x)+"'", sriov_dict.values() ) )
+                                cmd = "INSERT INTO resources_port (" + keys + ") VALUES (" + values + ")"
+                                self.logger.debug(cmd)
+                                result = self.cur.execute(cmd)
+
+                    #inserting new log
+                    #cmd = "INSERT INTO logs (related,level,uuid,description) VALUES ('hosts','debug','%s','new host: %d numas, %d theads, %d ifaces')" % (uuid, nb_numas, nb_cores, nb_ifaces)
+                    #self.logger.debug(cmd)
+                    #result = self.cur.execute(cmd)                    
+
+                    #inseted ok
+                with self.con:
+                    self.cur = self.con.cursor()
+                    self.logger.debug("callproc('UpdateSwitchPort', () )")
+                    self.cur.callproc('UpdateSwitchPort', () )
+
+                self.logger.debug("getting host '%s'",str(host_dict['uuid']))
+                return self.get_host(host_dict['uuid'])
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "new_host", cmd)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+
+    def new_flavor(self, flavor_dict, tenant_id ):
+        '''Add new flavor into the database. Create uuid if not provided
+        Atributes
+            flavor_dict: flavor dictionary with the key: value to insert. Must be valid flavors columns
+            tenant_id: if not 'any', it matches this flavor/tenant inserting at tenants_flavors table
+        Return: (result, data) where result can be
+            negative: error at inserting. data contain text
+            1, inserted, data contain inserted uuid flavor
+        '''
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                with self.con:
+                    self.cur = self.con.cursor()
+
+                    #create uuid if not provided
+                    if 'uuid' not in flavor_dict:
+                        uuid = flavor_dict['uuid'] = str(myUuid.uuid1()) # create_uuid
+                    else: #check uuid is valid
+                        uuid = str(flavor_dict['uuid'])
+                    #    result, data = self.check_uuid(uuid)
+                    #    if (result == 1):
+                    #        return -1, "UUID '%s' already in use" % uuid
+
+                    #inserting new uuid
+                    cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','flavors')" % uuid
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+
+                    #insert in table flavor
+                    keys    = ",".join(flavor_dict.keys())
+                    values  = ",".join( map(lambda x:  "Null" if x is None else "'"+str(x)+"'", flavor_dict.values() ) )
+                    cmd = "INSERT INTO flavors (" + keys + ") VALUES (" + values + ")"
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    #if result != 1: return -1, "Database Error while inserting at flavors table"
+
+                    #insert tenants_flavors
+                    if tenant_id != 'any':
+                        cmd = "INSERT INTO tenants_flavors (tenant_id,flavor_id) VALUES ('%s','%s')" % (tenant_id, uuid)
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+
+                    #inserting new log
+                    #del flavor_dict['uuid']
+                    #if 'extended' in flavor_dict: del flavor_dict['extended'] #remove two many information
+                    #cmd = "INSERT INTO logs (related,level,uuid, tenant_id, description) VALUES ('flavors','debug','%s','%s',\"new flavor: %s\")" \
+                    #    % (uuid, tenant_id, str(flavor_dict))
+                    #self.logger.debug(cmd)
+                    #self.cur.execute(cmd)                    
+
+                    #inseted ok
+                return 1, uuid
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "new_flavor", cmd, "update", tenant_id)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+        
+    def new_image(self, image_dict, tenant_id):
+        '''Add new image into the database. Create uuid if not provided
+        Atributes
+            image_dict: image dictionary with the key: value to insert. Must be valid images columns
+            tenant_id: if not 'any', it matches this image/tenant inserting at tenants_images table
+        Return: (result, data) where result can be
+            negative: error at inserting. data contain text
+            1, inserted, data contain inserted uuid image
+        '''
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                with self.con:
+                    self.cur = self.con.cursor()
+
+                    #create uuid if not provided
+                    if 'uuid' not in image_dict:
+                        uuid = image_dict['uuid'] = str(myUuid.uuid1()) # create_uuid
+                    else: #check uuid is valid
+                        uuid = str(image_dict['uuid'])
+                    #    result, data = self.check_uuid(uuid)
+                    #    if (result == 1):
+                    #        return -1, "UUID '%s' already in use" % uuid
+
+                    #inserting new uuid
+                    cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','images')" % uuid
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+
+                    #insert in table image
+                    keys    = ",".join(image_dict.keys())
+                    values  = ",".join( map(lambda x:  "Null" if x is None else "'"+str(x)+"'", image_dict.values() ) )
+                    cmd = "INSERT INTO images (" + keys + ") VALUES (" + values + ")"
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    #if result != 1: return -1, "Database Error while inserting at images table"
+
+                    #insert tenants_images
+                    if tenant_id != 'any':
+                        cmd = "INSERT INTO tenants_images (tenant_id,image_id) VALUES ('%s','%s')" % (tenant_id, uuid)
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+
+                    ##inserting new log
+                    #cmd = "INSERT INTO logs (related,level,uuid, tenant_id, description) VALUES ('images','debug','%s','%s',\"new image: %s path: %s\")" % (uuid, tenant_id, image_dict['name'], image_dict['path'])
+                    #self.logger.debug(cmd)
+                    #self.cur.execute(cmd)                    
+
+                    #inseted ok
+                return 1, uuid
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "new_image", cmd, "update", tenant_id)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+        
+    def delete_image_flavor(self, item_type, item_id, tenant_id):
+        '''deletes an image or flavor from database
+        item_type must be a 'image' or 'flavor'
+        item_id is the uuid
+        tenant_id is the asociated tenant, can be 'any' with means all
+        If tenan_id is not any, it deletes from tenants_images/flavors,
+        which means this image/flavor is used by this tenant, and if success, 
+        it tries to delete from images/flavors in case this is not public, 
+        that only will success if image is private and not used by other tenants
+        If tenant_id is any, it tries to delete from both tables at the same transaction
+        so that image/flavor is completely deleted from all tenants or nothing
+        '''
+        for retry_ in range(0,2):
+            deleted = -1
+            deleted_item = -1
+            result = (-HTTP_Internal_Server_Error, "internal error")
+            cmd=""
+            try:
+                with self.con:
+                    self.cur = self.con.cursor()
+                    cmd = "DELETE FROM tenants_%ss WHERE %s_id = '%s'" % (item_type, item_type, item_id)
+                    if tenant_id != 'any':
+                        cmd += " AND tenant_id = '%s'" % tenant_id
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    deleted = self.cur.rowcount
+                    if tenant_id == 'any': #delete from images/flavors in the SAME transaction
+                        cmd = "DELETE FROM %ss WHERE uuid = '%s'" % (item_type, item_id)
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+                        deleted = self.cur.rowcount
+                        if deleted>=1:
+                            #delete uuid
+                            cmd = "DELETE FROM uuids WHERE uuid = '%s'" % item_id
+                            self.logger.debug(cmd)
+                            self.cur.execute(cmd)
+                            ##inserting new log
+                            #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \
+                            #       VALUES ('%ss','debug','%s','%s','delete %s completely')" % \
+                            #       (item_type, item_id, tenant_id, item_type)
+                            #self.logger.debug(cmd)
+                            #self.cur.execute(cmd)
+                            return deleted, "%s '%s' completely deleted" % (item_type, item_id)
+                        return 0, "%s '%s' not found" % (item_type, item_id)
+                    
+                    if deleted == 1:
+                        ##inserting new log
+                        #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \
+                        #        VALUES ('%ss','debug','%s','%s','delete %s reference for this tenant')" % \
+                        #        (item_type, item_id, tenant_id, item_type)
+                        #self.logger.debug(cmd)
+                        #self.cur.execute(cmd)
+
+                        #commit transaction
+                        self.cur.close()
+                #if tenant!=any  delete from images/flavors in OTHER transaction. If fails is because dependencies so that not return error
+                if deleted==1:
+                    with self.con:
+                        self.cur = self.con.cursor()
+
+                        #delete image/flavor if not public
+                        cmd = "DELETE FROM %ss WHERE uuid = '%s' AND public = 'no'" % (item_type, item_id)
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+                        deleted_item = self.cur.rowcount
+                        if deleted_item == 1:
+                            #delete uuid
+                            cmd = "DELETE FROM uuids WHERE uuid = '%s'" % item_id
+                            self.logger.debug(cmd)
+                            self.cur.execute(cmd)
+                            ##inserting new log
+                            #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \
+                            #       VALUES ('%ss','debug','%s','%s','delete %s completely')" % \
+                            #       (item_type, item_id, tenant_id, item_type)
+                            #self.logger.debug(cmd)
+                            #self.cur.execute(cmd)
+            except (mdb.Error, AttributeError) as e:
+                #print "delete_%s DB Exception %d: %s" % (item_type, e.args[0], e.args[1])
+                if deleted <0: 
+                    result = self.format_error(e, "delete_"+item_type, cmd, "delete", "servers")
+            finally:
+                if deleted==1:
+                    return 1, "%s '%s' from tenant '%s' %sdeleted" % \
+                    (item_type, item_id, tenant_id, "completely " if deleted_item==1 else "")
+                elif deleted==0:
+                    return 0, "%s '%s' from tenant '%s' not found" % (item_type, item_id, tenant_id)
+                else: 
+                    if result[0]!=-HTTP_Request_Timeout or retry_==1: return result  
+            
+    def delete_row(self, table, uuid):
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                with self.con:
+                    #delete host
+                    self.cur = self.con.cursor()
+                    cmd = "DELETE FROM %s WHERE uuid = '%s'" % (table, uuid)
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    deleted = self.cur.rowcount
+                    if deleted == 1:
+                        #delete uuid
+                        if table == 'tenants': tenant_str=uuid
+                        else: tenant_str='Null'
+                        self.cur = self.con.cursor()
+                        cmd = "DELETE FROM uuids WHERE uuid = '%s'" % uuid
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+                        ##inserting new log
+                        #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) VALUES ('%s','debug','%s','%s','delete %s')" % (table, uuid, tenant_str, table[:-1])
+                        #self.logger.debug(cmd)
+                        #self.cur.execute(cmd)                    
+                return deleted, table[:-1] + " '%s' %s" %(uuid, "deleted" if deleted==1 else "not found")
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "delete_row", cmd, "delete", 'instances' if table=='hosts' or table=='tenants' else 'dependencies')
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+
+    def delete_row_by_key(self, table, key, value):
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                with self.con:
+                    #delete host
+                    self.cur = self.con.cursor()
+                    cmd = "DELETE FROM %s" % (table)
+                    if key!=None:
+                        if value!=None:
+                            cmd += " WHERE %s = '%s'" % (key, value)
+                        else:
+                            cmd += " WHERE %s is null" % (key)
+                    else: #delete all
+                        pass
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    deleted = self.cur.rowcount
+                    if deleted < 1:
+                        return -1, 'Not found'
+                        #delete uuid
+                    return 0, deleted
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "delete_row_by_key", cmd, "delete", 'instances' if table=='hosts' or table=='tenants' else 'dependencies')
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+                
+    def delete_row_by_dict(self, **sql_dict):
+        ''' Deletes rows from a table.
+        Attribute sql_dir: dictionary with the following key: value
+            'FROM': string of table name (Mandatory)
+            'WHERE': dict of key:values, translated to key=value AND ... (Optional)
+            'WHERE_NOT': dict of key:values, translated to key<>value AND ... (Optional)
+            'WHERE_NOTNULL': (list or tuple of items that must not be null in a where ... (Optional)
+            'LIMIT': limit of number of rows (Optional)
+        Return: the (number of items deleted, descriptive test) if ok; (negative, descriptive text) if error
+        '''
+        #print sql_dict
+        from_  = "FROM " + str(sql_dict['FROM'])
+        #print 'from_', from_
+        if 'WHERE' in sql_dict and len(sql_dict['WHERE']) > 0:
+            w=sql_dict['WHERE']
+            where_ = "WHERE " + " AND ".join(map( lambda x: str(x) + (" is Null" if w[x] is None else "='"+str(w[x])+"'"),  w.keys()) ) 
+        else: where_ = ""
+        if 'WHERE_NOT' in sql_dict and len(sql_dict['WHERE_NOT']) > 0: 
+            w=sql_dict['WHERE_NOT']
+            where_2 = " AND ".join(map( lambda x: str(x) + (" is not Null" if w[x] is None else "<>'"+str(w[x])+"'"),  w.keys()) )
+            if len(where_)==0:   where_ = "WHERE " + where_2
+            else:                where_ = where_ + " AND " + where_2
+        if 'WHERE_NOTNULL' in sql_dict and len(sql_dict['WHERE_NOTNULL']) > 0: 
+            w=sql_dict['WHERE_NOTNULL']
+            where_2 = " AND ".join(map( lambda x: str(x) + " is not Null",  w) )
+            if len(where_)==0:   where_ = "WHERE " + where_2
+            else:                where_ = where_ + " AND " + where_2
+        #print 'where_', where_
+        limit_ = "LIMIT " + str(sql_dict['LIMIT']) if 'LIMIT' in sql_dict else ""
+        #print 'limit_', limit_
+        cmd =  " ".join( ("DELETE", from_, where_, limit_) )
+        self.logger.debug(cmd)
+        for retry_ in range(0,2):
+            try:
+                with self.con:
+                    #delete host
+                    self.cur = self.con.cursor()
+                    self.cur.execute(cmd)
+                    deleted = self.cur.rowcount
+                return deleted, "%d deleted from %s" % (deleted, sql_dict['FROM'][:-1] )
+            except (mdb.Error, AttributeError) as e:
+                r,c =  self.format_error(e, "delete_row_by_dict", cmd, "delete", 'dependencies')
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+
+    
+    def get_instance(self, instance_id):
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                with self.con:
+                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
+                    #get INSTANCE
+                    cmd = "SELECT uuid, name, description, progress, host_id, flavor_id, image_id, status, last_error, "\
+                        "tenant_id, ram, vcpus, created_at FROM instances WHERE uuid='{}'".format(instance_id)
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    if self.cur.rowcount == 0 : return 0, "instance '" + str(instance_id) +"'not found."
+                    instance = self.cur.fetchone()
+                    #get networks
+                    cmd = "SELECT uuid as iface_id, net_id, mac as mac_address, ip_address, name, Mbps as bandwidth, "\
+                        "vpci, model FROM ports WHERE (type='instance:bridge' or type='instance:ovs') AND "\
+                        "instance_id= '{}'".format(instance_id)
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    if self.cur.rowcount > 0 :
+                        instance['networks'] = self.cur.fetchall()
+
+                    #get extended
+                    extended = {}
+                    #get devices
+                    cmd = "SELECT type, vpci, image_id, xml,dev FROM instance_devices WHERE instance_id = '%s' " %  str(instance_id)
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    if self.cur.rowcount > 0 :
+                        extended['devices'] = self.cur.fetchall()
+                    #get numas
+                    numas = []
+                    cmd = "SELECT id, numa_socket as source FROM numas WHERE host_id = '" + str(instance['host_id']) + "'"
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    host_numas = self.cur.fetchall()
+                    #print 'host_numas', host_numas
+                    for k in host_numas:
+                        numa_id = str(k['id'])
+                        numa_dict ={}
+                        #get memory
+                        cmd = "SELECT consumed FROM resources_mem WHERE instance_id = '%s' AND numa_id = '%s'" % ( instance_id, numa_id)
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+                        if self.cur.rowcount > 0:
+                            mem_dict = self.cur.fetchone()
+                            numa_dict['memory'] = mem_dict['consumed']
+                        #get full cores
+                        cursor2 = self.con.cursor()
+                        cmd = "SELECT core_id, paired, MIN(v_thread_id) as v1, MAX(v_thread_id) as v2, COUNT(instance_id) as nb, MIN(thread_id) as t1, MAX(thread_id) as t2 FROM resources_core WHERE instance_id = '%s' AND numa_id = '%s' GROUP BY core_id,paired" % ( str(instance_id), numa_id) 
+                        self.logger.debug(cmd)
+                        cursor2.execute(cmd)
+                        core_list = [];     core_source = []
+                        paired_list = [];   paired_source = []
+                        thread_list = [];   thread_source = []
+                        if cursor2.rowcount > 0: 
+                            cores = cursor2.fetchall()
+                            for core in cores:
+                                if core[4] == 2: #number of used threads from core
+                                    if core[3] == core[2]:  #only one thread asigned to VM, so completely core
+                                        core_list.append(core[2])
+                                        core_source.append(core[5])
+                                    elif core[1] == 'Y':
+                                        paired_list.append(core[2:4])
+                                        paired_source.append(core[5:7])
+                                    else:
+                                        thread_list.extend(core[2:4])
+                                        thread_source.extend(core[5:7])
+
+                                else:
+                                    thread_list.append(core[2])
+                                    thread_source.append(core[5])
+                            if len(core_list) > 0:
+                                numa_dict['cores'] = len(core_list)
+                                numa_dict['cores-id'] = core_list
+                                numa_dict['cores-source'] = core_source
+                            if len(paired_list) > 0:
+                                numa_dict['paired-threads'] = len(paired_list)
+                                numa_dict['paired-threads-id'] = paired_list
+                                numa_dict['paired-threads-source'] = paired_source
+                            if len(thread_list) > 0:
+                                numa_dict['threads'] = len(thread_list)
+                                numa_dict['threads-id'] = thread_list
+                                numa_dict['threads-source'] = thread_source
+
+                        #get dedicated ports and SRIOV
+                        cmd = "SELECT port_id as iface_id, p.vlan as vlan, p.mac as mac_address, net_id, if(model='PF',\
+                            'yes',if(model='VF','no','yes:sriov')) as dedicated, rp.Mbps as bandwidth, name, vpci, \
+                            pci as source \
+                            FROM resources_port as rp join ports as p on port_id=uuid  WHERE p.instance_id = '%s' AND numa_id = '%s' and p.type='instance:data'" % (instance_id, numa_id) 
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+                        if self.cur.rowcount > 0: 
+                            numa_dict['interfaces'] = self.cur.fetchall()
+                            #print 'interfaces', numa_dict
+
+                        if len(numa_dict) > 0 : 
+                            numa_dict['source'] = k['source'] #numa socket
+                            numas.append(numa_dict)
+
+                    if len(numas) > 0 :  extended['numas'] = numas
+                    if len(extended) > 0 :  instance['extended'] = extended
+                    af.DeleteNone(instance)
+                    return 1, instance
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "get_instance", cmd)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+        
+    def get_numas(self, requirements, prefered_host_id=None, only_of_ports=True):
+        '''Obtain a valid NUMA/HOST for deployment a VM
+        requirements: contain requirement regarding:
+            requirements['ram']: Non huge page memory in MB; 0 to skip 
+            requirements['vcpus']: Non isolated cpus; 0 to skip 
+            requirements['numa']: Requiremets to be fixed in ONE Numa node
+                requirements['numa']['memory']: Huge page memory in GB at ; 0 for any 
+                requirements['numa']['proc_req_type']: Type of processor, cores or threads 
+                requirements['numa']['proc_req_nb']: Number of isolated cpus  
+                requirements['numa']['port_list']: Physical NIC ports list ; [] for any 
+                requirements['numa']['sriov_list']: Virtual function NIC ports list ; [] for any
+        prefered_host_id: if not None return this host if it match 
+        only_of_ports: if True only those ports conected to the openflow (of) are valid,
+            that is, with switch_port information filled; if False, all NIC ports are valid. 
+        Return a valid numa and host
+        '''
+         
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                with self.con:              
+#                     #Find numas of prefered host
+#                     prefered_numas = ()
+#                     if prefered_host_id != None:
+#                         self.cur = self.con.cursor()
+#                         self.cur.execute("SELECT id FROM numas WHERE host_id='%s'" + prefered_host_id)
+#                         prefered_numas = self.cur.fetchall()
+#                         self.cur.close()
+                        
+                    #Find valid host for the ram and vcpus
+                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
+                    cmd = "CALL GetHostByMemCpu(%s, %s)" % (str(requirements['ram']), str(requirements['vcpus']))
+                    self.logger.debug(cmd)   
+                    self.cur.callproc('GetHostByMemCpu', (str(requirements['ram']), str(requirements['vcpus'])) )
+                    valid_hosts = self.cur.fetchall()
+                    self.cur.close()   
+                    self.cur = self.con.cursor()
+                    match_found = False
+                    if len(valid_hosts)<=0:
+                        error_text = 'No room at data center. Cannot find a host with %s MB memory and %s cpus available' % (str(requirements['ram']), str(requirements['vcpus'])) 
+                        #self.logger.debug(error_text)
+                        return -1, error_text
+                    
+                    #elif req_numa != None:
+                    #Find valid numa nodes for memory requirements
+                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
+                    cmd = "CALL GetNumaByMemory(%s)" % str(requirements['numa']['memory'])
+                    self.logger.debug(cmd)   
+                    self.cur.callproc('GetNumaByMemory', (requirements['numa']['memory'],) )
+                    valid_for_memory = self.cur.fetchall()
+                    self.cur.close()   
+                    self.cur = self.con.cursor()
+                    if len(valid_for_memory)<=0:
+                        error_text = 'No room at data center. Cannot find a host with %s GB Hugepages memory available' % str(requirements['numa']['memory']) 
+                        #self.logger.debug(error_text)
+                        return -1, error_text
+
+                    #Find valid numa nodes for processor requirements
+                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
+                    if requirements['numa']['proc_req_type'] == 'threads':
+                        cpu_requirement_text='cpu-threads'
+                        cmd = "CALL GetNumaByThread(%s)" % str(requirements['numa']['proc_req_nb'])
+                        self.logger.debug(cmd) 
+                        self.cur.callproc('GetNumaByThread', (requirements['numa']['proc_req_nb'],) )
+                    else:
+                        cpu_requirement_text='cpu-cores'
+                        cmd = "CALL GetNumaByCore(%s)" % str(requirements['numa']['proc_req_nb'])
+                        self.logger.debug(cmd) 
+                        self.cur.callproc('GetNumaByCore', (requirements['numa']['proc_req_nb'],) )
+                    valid_for_processor = self.cur.fetchall()
+                    self.cur.close()   
+                    self.cur = self.con.cursor()
+                    if len(valid_for_processor)<=0:
+                        error_text = 'No room at data center. Cannot find a host with %s %s available' % (str(requirements['numa']['proc_req_nb']),cpu_requirement_text)  
+                        #self.logger.debug(error_text)
+                        return -1, error_text
+
+                    #Find the numa nodes that comply for memory and processor requirements
+                    #sorting from less to more memory capacity
+                    valid_numas = []
+                    for m_numa in valid_for_memory:
+                        numa_valid_for_processor = False
+                        for p_numa in valid_for_processor:
+                            if m_numa['numa_id'] == p_numa['numa_id']:
+                                numa_valid_for_processor = True
+                                break
+                        numa_valid_for_host = False
+                        prefered_numa = False
+                        for p_host in valid_hosts:
+                            if m_numa['host_id'] == p_host['uuid']:
+                                numa_valid_for_host = True
+                                if p_host['uuid'] == prefered_host_id:
+                                    prefered_numa = True
+                                break
+                        if numa_valid_for_host and numa_valid_for_processor:
+                            if prefered_numa:
+                                valid_numas.insert(0, m_numa['numa_id'])
+                            else:
+                                valid_numas.append(m_numa['numa_id'])
+                    if len(valid_numas)<=0:
+                        error_text = 'No room at data center. Cannot find a host with %s MB hugepages memory and %s %s available in the same numa' %\
+                            (requirements['numa']['memory'], str(requirements['numa']['proc_req_nb']),cpu_requirement_text)  
+                        #self.logger.debug(error_text)
+                        return -1, error_text
+                    
+    #                 print 'Valid numas list: '+str(valid_numas)
+
+                    #Find valid numa nodes for interfaces requirements
+                    #For each valid numa we will obtain the number of available ports and check if these are valid          
+                    match_found = False    
+                    for numa_id in valid_numas:
+    #                     print 'Checking '+str(numa_id)
+                        match_found = False
+                        self.cur = self.con.cursor(mdb.cursors.DictCursor)
+                        if only_of_ports:
+                            cmd="CALL GetAvailablePorts(%s)" % str(numa_id) 
+                            self.logger.debug(cmd)
+                            self.cur.callproc('GetAvailablePorts', (numa_id,) )
+                        else:
+                            cmd="CALL GetAllAvailablePorts(%s)" % str(numa_id) 
+                            self.logger.debug(cmd)
+                            self.cur.callproc('GetAllAvailablePorts', (numa_id,) )
+                        available_ports = self.cur.fetchall()
+                        self.cur.close()   
+                        self.cur = self.con.cursor()
+
+                        #Set/reset reservations
+                        for port in available_ports:
+                            port['Mbps_reserved'] = 0
+                            port['SRIOV_reserved'] = 0
+
+                        #Try to allocate physical ports
+                        physical_ports_found = True
+                        for iface in requirements['numa']['port_list']:
+    #                         print '\t\tchecking iface: '+str(iface)
+                            portFound = False
+                            for port in available_ports:
+    #                             print '\t\t\tfor port: '+str(port)
+                                #If the port is not empty continue
+                                if port['Mbps_free'] != port['Mbps'] or port['Mbps_reserved'] != 0:
+    #                                 print '\t\t\t\t Not empty port'
+                                    continue;
+                                #If the port speed is not enough continue
+                                if port['Mbps'] < iface['bandwidth']:
+    #                                 print '\t\t\t\t Not enough speed'
+                                    continue;
+
+                                #Otherwise this is a valid port  
+                                port['Mbps_reserved'] = port['Mbps']
+                                port['SRIOV_reserved'] = 0
+                                iface['port_id'] = port['port_id']
+                                iface['vlan'] = None
+                                iface['mac'] = port['mac']
+                                iface['switch_port'] = port['switch_port']
+    #                             print '\t\t\t\t Dedicated port found '+str(port['port_id'])
+                                portFound = True
+                                break;
+
+                            #if all ports have been checked and no match has been found
+                            #this is not a valid numa
+                            if not portFound:
+    #                             print '\t\t\t\t\tAll ports have been checked and no match has been found for numa '+str(numa_id)+'\n\n'
+                                physical_ports_found = False
+                                break
+
+                        #if there is no match continue checking the following numa
+                        if not physical_ports_found:
+                            continue
+
+                        #Try to allocate SR-IOVs
+                        sriov_ports_found = True
+                        for iface in requirements['numa']['sriov_list']:
+    #                         print '\t\tchecking iface: '+str(iface)
+                            portFound = False
+                            for port in available_ports:
+    #                             print '\t\t\tfor port: '+str(port)
+                                #If there are not available SR-IOVs continue
+                                if port['availableSRIOV'] - port['SRIOV_reserved'] <= 0:
+    #                                 print '\t\t\t\t Not enough SR-IOV'
+                                    continue;
+                                #If the port free speed is not enough continue
+                                if port['Mbps_free'] - port['Mbps_reserved'] < iface['bandwidth']:
+    #                                 print '\t\t\t\t Not enough speed'
+                                    continue;
+
+                                #Otherwise this is a valid port  
+                                port['Mbps_reserved'] += iface['bandwidth']
+                                port['SRIOV_reserved'] += 1
+    #                             print '\t\t\t\t SR-IOV found '+str(port['port_id'])
+                                iface['port_id'] = port['port_id']
+                                iface['vlan'] = None
+                                iface['mac'] = port['mac']
+                                iface['switch_port'] = port['switch_port']
+                                portFound = True
+                                break;
+
+                            #if all ports have been checked and no match has been found
+                            #this is not a valid numa
+                            if not portFound:
+    #                             print '\t\t\t\t\tAll ports have been checked and no match has been found for numa '+str(numa_id)+'\n\n'
+                                sriov_ports_found = False
+                                break
+
+                        #if there is no match continue checking the following numa
+                        if not sriov_ports_found:
+                            continue
+
+
+                        if sriov_ports_found and physical_ports_found:
+                            match_found = True
+                            break
+
+                    if not match_found:
+                        error_text = 'No room at data center. Cannot find a host with the required hugepages, vcpus and interfaces'  
+                        #self.logger.debug(error_text)
+                        return -1, error_text
+
+                    #self.logger.debug('Full match found in numa %s', str(numa_id))
+
+                for numa in valid_for_processor:
+                    if numa_id==numa['numa_id']:
+                        host_id=numa['host_id']
+                        break
+                return 0, {'numa_id':numa_id, 'host_id': host_id, }
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "get_numas", cmd)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+
+    def new_instance(self, instance_dict, nets, ports_to_free):
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                with self.con:
+                    self.cur = self.con.cursor()
+
+                    #create uuid if not provided
+                    if 'uuid' not in instance_dict:
+                        uuid = instance_dict['uuid'] = str(myUuid.uuid1()) # create_uuid
+                    else: #check uuid is valid
+                        uuid = str(instance_dict['uuid'])
+
+
+                    #inserting new uuid
+                    cmd = "INSERT INTO uuids (uuid, root_uuid, used_at) VALUES ('%s','%s', 'instances')" % (uuid, uuid)
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+
+                    #insert in table instance
+                    extended = instance_dict.pop('extended', None);
+                    bridgedifaces = instance_dict.pop('bridged-ifaces', () );
+
+                    keys    = ",".join(instance_dict.keys())
+                    values  = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", instance_dict.values() ) )
+                    cmd = "INSERT INTO instances (" + keys + ") VALUES (" + values + ")"
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    #if result != 1: return -1, "Database Error while inserting at instances table"
+
+                    #insert resources
+                    nb_bridge_ifaces = nb_cores = nb_ifaces = nb_numas = 0
+                    #insert bridged_ifaces
+
+                    for iface in bridgedifaces:
+                        #generate and insert a iface uuid
+                        if 'enable_dhcp' in iface and iface['enable_dhcp']:
+                            dhcp_first_ip = iface["dhcp_first_ip"]
+                            del iface["dhcp_first_ip"]
+                            dhcp_last_ip = iface["dhcp_last_ip"]
+                            del iface["dhcp_last_ip"]
+                            dhcp_cidr = iface["cidr"]
+                            del iface["cidr"]
+                            del iface["enable_dhcp"]
+                            used_dhcp_ips = self._get_dhcp_ip_used_list(iface["net_id"])
+                            iface["ip_address"] = self.get_free_ip_from_range(dhcp_first_ip, dhcp_last_ip,
+                                                                              dhcp_cidr, used_dhcp_ips)
+
+                        iface['uuid'] = str(myUuid.uuid1()) # create_uuid
+                        cmd = "INSERT INTO uuids (uuid, root_uuid, used_at) VALUES ('%s','%s', 'ports')" % (iface['uuid'], uuid)
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+                        #insert iface
+                        iface['instance_id'] = uuid
+                        # iface['type'] = 'instance:bridge'
+                        if 'name' not in iface: iface['name']="br"+str(nb_bridge_ifaces)
+                        iface['Mbps']=iface.pop('bandwidth', None)
+                        if 'mac_address' not in iface:
+                            iface['mac'] = af.gen_random_mac()
+                        else:
+                            iface['mac'] = iface['mac_address']
+                            del iface['mac_address']
+                        #iface['mac']=iface.pop('mac_address', None)  #for leaving mac generation to libvirt
+                        keys    = ",".join(iface.keys())
+                        values  = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", iface.values() ) )
+                        cmd = "INSERT INTO ports (" + keys + ") VALUES (" + values + ")"
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+                        nb_bridge_ifaces += 1
+
+                    if extended is not None:
+                        if 'numas' not in extended or extended['numas'] is None: extended['numas'] = ()
+                        for numa in extended['numas']:
+                            nb_numas += 1
+                            #cores
+                            if 'cores' not in numa or numa['cores'] is None: numa['cores'] = ()
+                            for core in numa['cores']:
+                                nb_cores += 1
+                                cmd = "UPDATE resources_core SET instance_id='%s'%s%s WHERE id='%s'" \
+                                    % (uuid, \
+                                    (",v_thread_id='" + str(core['vthread']) + "'") if 'vthread' in core else '', \
+                                    (",paired='"      + core['paired']  + "'") if 'paired' in core else '', \
+                                    core['id'] )
+                                self.logger.debug(cmd)
+                                self.cur.execute(cmd)
+                            #interfaces
+                            if 'interfaces' not in numa or numa['interfaces'] is None: numa['interfaces'] = ()
+                            for iface in numa['interfaces']:
+                                #generate and insert an uuid; iface[id]=iface_uuid; iface[uuid]= net_id
+                                iface['id'] = str(myUuid.uuid1()) # create_uuid
+                                cmd = "INSERT INTO uuids (uuid, root_uuid, used_at) VALUES ('%s','%s', 'ports')" % (iface['id'], uuid)
+                                self.logger.debug(cmd)
+                                self.cur.execute(cmd)
+                                nb_ifaces += 1
+                                mbps_=("'"+str(iface['Mbps_used'])+"'") if 'Mbps_used' in iface and iface['Mbps_used'] is not None else "Mbps"
+                                if iface["dedicated"]=="yes": 
+                                    iface_model="PF"
+                                elif iface["dedicated"]=="yes:sriov": 
+                                    iface_model="VFnotShared"
+                                elif iface["dedicated"]=="no": 
+                                    iface_model="VF"
+                                #else error
+                                INSERT=(iface['mac_address'], iface['switch_port'], iface.get('vlan',None), 'instance:data', iface['Mbps_used'], iface['id'],
+                                        uuid, instance_dict['tenant_id'], iface.get('name',None), iface.get('vpci',None), iface.get('uuid',None), iface_model )
+                                cmd = "INSERT INTO ports (mac,switch_port,vlan,type,Mbps,uuid,instance_id,tenant_id,name,vpci,net_id, model) " + \
+                                       " VALUES (" + ",".join(map(lambda x: 'Null' if x is None else "'"+str(x)+"'", INSERT )) + ")"
+                                self.logger.debug(cmd)
+                                self.cur.execute(cmd)
+                                if 'uuid' in iface:
+                                    nets.append(iface['uuid'])
+                                    
+                                #discover if this port is not used by anyone
+                                cmd = "SELECT source_name, mac FROM ( SELECT root_id, count(instance_id) as used FROM resources_port" \
+                                      " WHERE root_id=(SELECT root_id from resources_port WHERE id='%s')"\
+                                      " GROUP BY root_id ) AS A JOIN resources_port as B ON A.root_id=B.id AND A.used=0" % iface['port_id'] 
+                                self.logger.debug(cmd)
+                                self.cur.execute(cmd)
+                                ports_to_free += self.cur.fetchall()
+
+                                cmd = "UPDATE resources_port SET instance_id='%s', port_id='%s',Mbps_used=%s WHERE id='%s'" \
+                                    % (uuid, iface['id'], mbps_, iface['port_id'])
+                                #if Mbps_used not suply, set the same value of 'Mpbs', that is the total
+                                self.logger.debug(cmd)
+                                self.cur.execute(cmd)
+                            #memory
+                            if 'memory' in numa and numa['memory'] is not None and numa['memory']>0:
+                                cmd = "INSERT INTO resources_mem (numa_id, instance_id, consumed) VALUES ('%s','%s','%s')" % (numa['numa_id'], uuid, numa['memory'])
+                                self.logger.debug(cmd)
+                                self.cur.execute(cmd)
+                        if 'devices' not in extended or extended['devices'] is None: extended['devices'] = ()
+                        for device in extended['devices']:
+                            if 'vpci' in device:    vpci = "'" + device['vpci'] + "'"
+                            else:                   vpci = 'Null'
+                            if 'image_id' in device: image_id = "'" + device['image_id'] + "'"
+                            else:                    image_id = 'Null'
+                            if 'xml' in device: xml = "'" + device['xml'] + "'"
+                            else:                    xml = 'Null'
+                            if 'dev' in device: dev = "'" + device['dev'] + "'"
+                            else:                    dev = 'Null'
+                            cmd = "INSERT INTO instance_devices (type, instance_id, image_id, vpci, xml, dev) VALUES ('%s','%s', %s, %s, %s, %s)" % \
+                                (device['type'], uuid, image_id, vpci, xml, dev)
+                            self.logger.debug(cmd)
+                            self.cur.execute(cmd)
+                    ##inserting new log
+                    #cmd = "INSERT INTO logs (related,level,uuid,description) VALUES ('instances','debug','%s','new instance: %d numas, %d theads, %d ifaces %d bridge_ifaces')" % (uuid, nb_numas, nb_cores, nb_ifaces, nb_bridge_ifaces)
+                    #self.logger.debug(cmd)
+                    #self.cur.execute(cmd)                    
+
+                    #inseted ok
+                return 1, uuid 
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "new_instance", cmd)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+
+    def get_free_ip_from_range(self, first_ip, last_ip, cidr, ip_used_list):
+        """
+        Calculate a free IP from a range given
+        :param first_ip: First dhcp ip range
+        :param last_ip: Last dhcp ip range
+        :param cidr: net cidr
+        :param ip_used_list: contain all used ips to avoid ip collisions
+        :return:
+        """
+
+        ip_tools = IPNetwork(cidr)
+        cidr_len = ip_tools.prefixlen
+        ips = IPNetwork(first_ip + '/' + str(cidr_len))
+        ip_used_list.append(str(ips[0])) # first ip
+        ip_used_list.append(str(ips[1])) # gw ip
+        ip_used_list.append(str(ips[-1])) # broadcast ip
+        for vm_ip in ips:
+            if str(vm_ip) not in ip_used_list:
+                return vm_ip
+
+        return None
+
+    def _get_dhcp_ip_used_list(self, net_id):
+        """
+        REtreive from DB all ips already used by the dhcp server for a given net
+        :param net_id:
+        :return:
+        """
+        WHERE={'type': 'instance:ovs', 'net_id': net_id}
+        for retry_ in range(0, 2):
+            cmd = ""
+            self.cur = self.con.cursor(mdb.cursors.DictCursor)
+            select_ = "SELECT uuid, ip_address FROM ports "
+
+            if WHERE is None or len(WHERE) == 0:
+                where_ = ""
+            else:
+                where_ = "WHERE " + " AND ".join(
+                    map(lambda x: str(x) + (" is Null" if WHERE[x] is None else "='" + str(WHERE[x]) + "'"),
+                        WHERE.keys()))
+            limit_ = "LIMIT 100"
+            cmd = " ".join((select_, where_, limit_))
+            self.logger.debug(cmd)
+            self.cur.execute(cmd)
+            ports = self.cur.fetchall()
+            ip_address_list = []
+            for port in ports:
+                ip_address_list.append(port['ip_address'])
+
+            return ip_address_list
+
+
+    def delete_instance(self, instance_id, tenant_id, net_dataplane_list, ports_to_free, net_ovs_list, logcause="requested by http"):
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                with self.con:
+                    self.cur = self.con.cursor()
+                    #get INSTANCE
+                    cmd = "SELECT uuid FROM instances WHERE uuid='%s' AND tenant_id='%s'" % (instance_id, tenant_id)
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    if self.cur.rowcount == 0 : return 0, "instance %s not found in tenant %s" % (instance_id, tenant_id)
+
+                    #delete bridged ifaces, instace_devices, resources_mem; done by database: it is automatic by Database; FOREIGN KEY DELETE CASCADE
+                    
+                    #get nets afected
+                    cmd = "SELECT DISTINCT net_id from ports WHERE instance_id = '%s' AND net_id is not Null AND type='instance:data'" % instance_id
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    net_list__ = self.cur.fetchall()
+                    for net in net_list__:
+                        net_dataplane_list.append(net[0])
+
+                    # get ovs manangement nets
+                    cmd = "SELECT DISTINCT net_id, vlan, ip_address, mac FROM ports WHERE instance_id='{}' AND net_id is not Null AND "\
+                            "type='instance:ovs'".format(instance_id)
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    net_ovs_list += self.cur.fetchall()
+
+                    #get dataplane interfaces releases by this VM; both PF and VF with no other VF 
+                    cmd="SELECT source_name, mac FROM (SELECT root_id, count(instance_id) as used FROM resources_port WHERE instance_id='%s' GROUP BY root_id ) AS A" % instance_id \
+                        +  " JOIN (SELECT root_id, count(instance_id) as used FROM resources_port GROUP BY root_id) AS B ON A.root_id=B.root_id AND A.used=B.used"\
+                        +  " JOIN resources_port as C ON A.root_id=C.id" 
+#                    cmd = "SELECT DISTINCT root_id FROM resources_port WHERE instance_id = '%s'" % instance_id
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    ports_to_free += self.cur.fetchall()
+
+                    #update resources port
+                    cmd = "UPDATE resources_port SET instance_id=Null, port_id=Null, Mbps_used='0' WHERE instance_id = '%s'" % instance_id
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    
+#                     #filter dataplane ports used by this VM that now are free
+#                     for port in ports_list__:
+#                         cmd = "SELECT mac, count(instance_id) FROM resources_port WHERE root_id = '%s'" % port[0]
+#                         self.logger.debug(cmd)
+#                         self.cur.execute(cmd)
+#                         mac_list__ = self.cur.fetchone()
+#                         if mac_list__ and mac_list__[1]==0:
+#                             ports_to_free.append(mac_list__[0])
+                        
+
+                    #update resources core
+                    cmd = "UPDATE resources_core SET instance_id=Null, v_thread_id=Null, paired='N' WHERE instance_id = '%s'" % instance_id
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+
+                    #delete all related uuids
+                    cmd = "DELETE FROM uuids WHERE root_uuid='%s'" % instance_id
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+
+                    ##insert log
+                    #cmd = "INSERT INTO logs (related,level,uuid,description) VALUES ('instances','debug','%s','delete instance %s')" % (instance_id, logcause)
+                    #self.logger.debug(cmd)
+                    #self.cur.execute(cmd)                    
+
+                    #delete instance
+                    cmd = "DELETE FROM instances WHERE uuid='%s' AND tenant_id='%s'" % (instance_id, tenant_id)
+                    self.cur.execute(cmd)
+                    return 1, "instance %s from tenant %s DELETED" % (instance_id, tenant_id)
+
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "delete_instance", cmd)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+
+    def get_ports(self, WHERE):
+        ''' Obtain ports using the WHERE filtering.
+        Attributes:
+            'where_': dict of key:values, translated to key=value AND ... (Optional)
+        Return: a list with dictionarys at each row
+        '''
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                with self.con:
+
+                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
+                    select_ = "SELECT uuid,'ACTIVE' as status,admin_state_up,name,net_id,\
+                        tenant_id,type,mac,vlan,switch_port,instance_id,Mbps FROM ports "
+
+                    if WHERE is None or len(WHERE) == 0:  where_ = ""
+                    else:
+                        where_ = "WHERE " + " AND ".join(map( lambda x: str(x) + (" is Null" if WHERE[x] is None else "='"+str(WHERE[x])+"'"),  WHERE.keys()) ) 
+                    limit_ = "LIMIT 100"
+                    cmd =  " ".join( (select_, where_, limit_) )
+    #                print "SELECT multiple de instance_ifaces, iface_uuid, external_ports" #print cmd
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    ports = self.cur.fetchall()
+                    if self.cur.rowcount>0:  af.DeleteNone(ports)
+                    return self.cur.rowcount, ports
+    #                return self.get_table(FROM=from_, SELECT=select_,WHERE=where_,LIMIT=100)
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "get_ports", cmd)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+        
+    def check_target_net(self, net_id, tenant_id, port_type):
+        '''check if valid attachement of a port into a target net
+        Attributes:
+            net_id: target net uuid
+            tenant_id: client where tenant belongs. Not used in this version
+            port_type: string with the option 'instance:bridge', 'instance:data', 'external'
+        Return: 
+            (0,net_dict) if ok,   where net_dict contain 'uuid','type','vlan', ...
+            (negative,string-error) if error
+        '''
+        for retry_ in range(0,2):
+            cmd=""
+            try:
+                with self.con:
+                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
+                    cmd = "SELECT * FROM nets WHERE uuid='%s'" % net_id
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    if self.cur.rowcount == 0 : return -1, "network_id %s does not match any net" % net_id
+                    net = self.cur.fetchone()
+                    break
+
+            except (mdb.Error, AttributeError) as e:
+                r,c = self.format_error(e, "check_target_net", cmd)
+                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
+        #check permissions
+        if tenant_id is not None and tenant_id is not "admin":
+            if net['tenant_id']==tenant_id and net['shared']=='false':
+                return -1, "needed admin privileges to attach to the net %s" % net_id
+        #check types
+        if (net['type'] in ('ptp','data') and port_type not in ('instance:data','external')) or \
+            (net['type'] in ('bridge_data','bridge_man') and port_type not in ('instance:bridge', 'instance:ovs')):
+            return -1, "Cannot attach a port of type %s into a net of type %s" % (port_type, net['type'])
+        if net['type'] == 'ptp':
+            #look how many 
+            nb_ports, data = self.get_ports( {'net_id':net_id} )
+            if nb_ports<0:
+                return -1, data
+            else:
+                if net['provider']:
+                    nb_ports +=1
+                if nb_ports >=2:
+                    return -1, "net of type p2p already contain two ports attached. No room for another"
+            
+        return 0, net
+
+if __name__ == "__main__":
+    print "Hello World"
diff --git a/osm_openvim/vim_schema.py b/osm_openvim/vim_schema.py
new file mode 100644 (file)
index 0000000..c2dc1e2
--- /dev/null
@@ -0,0 +1,767 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+''' Definition of dictionaries schemas used by validating input
+    These dictionaries are validated using jsonschema library
+'''
+__author__="Alfonso Tierno"
+__date__ ="$10-jul-2014 12:07:15$"
+
+#
+# SCHEMAS to validate input data
+#
+
+path_schema={"type":"string", "pattern":"^(\.){0,2}(/[^/\"':{}\(\)]+)+$"}
+http_schema={"type":"string", "pattern":"^https?://[^'\"=]+$"}
+port_schema={"type":"integer","minimum":1,"maximun":65534}
+ip_schema={"type":"string","pattern":"^([0-9]{1,3}.){3}[0-9]{1,3}$"}
+cidr_schema={"type":"string","pattern":"^([0-9]{1,3}.){3}[0-9]{1,3}/[0-9]{1,2}$"}
+name_schema={"type" : "string", "minLength":1, "maxLength":255, "pattern" : "^[^,;()'\"]+$"}
+nameshort_schema={"type" : "string", "minLength":1, "maxLength":64, "pattern" : "^[^,;()'\"]+$"}
+nametiny_schema={"type" : "string", "minLength":1, "maxLength":12, "pattern" : "^[^,;()'\"]+$"}
+xml_text_schema={"type" : "string", "minLength":1, "maxLength":1000, "pattern" : "^[^']+$"}
+description_schema={"type" : ["string","null"], "maxLength":255, "pattern" : "^[^'\"]+$"}
+id_schema_fake = {"type" : "string", "minLength":2, "maxLength":36 }  #"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
+id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+pci_schema={"type":"string", "pattern":"^[0-9a-fA-F]{4}(:[0-9a-fA-F]{2}){2}\.[0-9a-fA-F]$"}
+bandwidth_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]bps)?$"}
+integer0_schema={"type":"integer","minimum":0}
+integer1_schema={"type":"integer","minimum":1}
+vlan_schema={"type":"integer","minimum":1,"maximun":4095}
+vlan1000_schema={"type":"integer","minimum":1000,"maximun":4095}
+mac_schema={"type":"string", "pattern":"^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){5}$"}  #must be unicast LSB bit of MSB byte ==0 
+net_bind_schema={"oneOf":[{"type":"null"},{"type":"string", "pattern":"^(default|((bridge|macvtap):[0-9a-zA-Z\.\-]{1,50})|openflow:[/0-9a-zA-Z\.\-]{1,50}(:vlan)?)$"}]}
+yes_no_schema={"type":"string", "enum":["yes", "no"]}
+log_level_schema={"type":"string", "enum":["DEBUG", "INFO", "WARNING","ERROR","CRITICAL"]}
+
+config_schema = {
+    "title":"main configuration information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "http_port": port_schema,
+        "http_admin_port": port_schema,
+        "http_host": nameshort_schema,
+        "http_url_prefix": path_schema, # it does not work yet; it's supposed to be the base path to be used by bottle, but it must be explicitly declared
+        "db_host": nameshort_schema,
+        "db_user": nameshort_schema,
+        "db_passwd": {"type":"string"},
+        "db_name": nameshort_schema,
+        "of_controller_ip": ip_schema,
+        "of_controller_port": port_schema,
+        "of_controller_dpid": nameshort_schema,
+        "of_controller_nets_with_same_vlan": {"type" : "boolean"},
+        "of_controller": nameshort_schema, #{"type":"string", "enum":["floodlight", "opendaylight"]},
+        "of_controller_module": {"type":"string"},
+        "of_user": nameshort_schema,
+        "of_password": nameshort_schema,
+        "test_mode": {"type": "boolean"}, #leave for backward compatibility
+        "mode": {"type":"string", "enum":["normal", "host only", "OF only", "development", "test"] },
+        "development_bridge": {"type":"string"},
+        "tenant_id": {"type" : "string"},
+        "image_path": path_schema,
+        "network_vlan_range_start": vlan_schema,
+        "network_vlan_range_end": vlan_schema,
+        "bridge_ifaces": {
+            "type": "object",
+            "patternProperties": {
+                "." : {
+                    "type": "array", 
+                    "items": integer0_schema,
+                    "minItems":2,
+                    "maxItems":2,
+                },
+            },
+            "minProperties": 2
+        },
+        "dhcp_server": {
+            "type": "object",
+            "properties": {
+                "host" : name_schema,
+                "port" : port_schema,
+                "provider" : {"type": "string", "enum": ["isc-dhcp-server"]},
+                "user" : nameshort_schema,
+                "password" : {"type": "string"},
+                "key" : {"type": "string"},
+                "bridge_ifaces" :{
+                    "type": "array", 
+                    "items": nameshort_schema,
+                },
+                "nets" :{
+                    "type": "array", 
+                    "items": name_schema,
+                },
+            },
+            "required": ['host', 'provider', 'user']
+        },
+        "log_level": log_level_schema,
+        "log_level_db": log_level_schema,
+        "log_level_of": log_level_schema,
+        "network_type": {"type": "string", "enum": ["ovs", "bridge"]},
+        "ovs_controller_file_path": path_schema,
+        "ovs_controller_user": nameshort_schema,
+
+        "ovs_controller_ip": nameshort_schema
+    },
+    "patternProperties": {
+        "of_*" : {"type": ["string", "integer", "boolean"]}
+    },
+    "required": ['db_host', 'db_user', 'db_passwd', 'db_name'],
+    "additionalProperties": False
+}
+
+
+
+metadata_schema={
+    "type":"object",
+    "properties":{
+        "architecture": {"type":"string"},
+        "use_incremental": yes_no_schema,
+        "vpci": pci_schema,
+        "os_distro": {"type":"string"},
+        "os_type": {"type":"string"},
+        "os_version": {"type":"string"},
+        "bus": {"type":"string"},
+        "topology": {"type":"string", "enum": ["oneSocket"]}
+    }
+}
+
+tenant_new_schema = {
+    "title":"tenant creation information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "tenant":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+                "name": nameshort_schema,
+                "description":description_schema,
+                "enabled":{"type" : "boolean"}
+            },
+            "required": ["name"]
+        }
+    },
+    "required": ["tenant"],
+    "additionalProperties": False
+}
+
+tenant_edit_schema = {
+    "title":"tenant edition information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "tenant":{
+            "type":"object",
+            "minProperties":1,
+            "properties":{
+                "name":nameshort_schema,
+                "description":description_schema,
+                "enabled":{"type" : "boolean"}
+            },
+            "additionalProperties": False,
+        }
+    },
+    "required": ["tenant"],
+    "additionalProperties": False
+}
+interfaces_schema={
+    "type":"array",
+    "minItems":0,
+    "items":{
+        "type":"object",
+        "properties":{
+            "name":name_schema,
+            "dedicated":{"type":"string","enum":["yes","no","yes:sriov"]},
+            "bandwidth":bandwidth_schema,
+            "vpci":pci_schema,
+            "uuid":id_schema,
+            "mac_address":mac_schema
+        },
+        "additionalProperties": False,
+        "required": ["dedicated", "bandwidth"]
+    }
+}
+
+extended_schema={
+    "type":"object", 
+    "properties":{                  
+        "processor_ranking":integer0_schema,
+        "devices":{
+            "type": "array", 
+            "items":{
+                "type": "object",
+                "properties":{
+                    "type":{"type":"string", "enum":["usb","disk","cdrom","xml"]},
+                    "vpci":pci_schema,
+                    "imageRef":id_schema,
+                    "xml":xml_text_schema,
+                    "dev":nameshort_schema
+                },
+                "additionalProperties": False,
+                "required": ["type"]
+            }
+        },
+        "numas":{
+            "type": "array",
+            "items":{
+                "type": "object",
+                "properties":{
+                    "memory":integer1_schema,
+                    "cores":integer1_schema,
+                    "paired-threads":integer1_schema,
+                    "threads":integer1_schema,
+                    "cores-id":{"type":"array","items":integer0_schema},
+                    "paired-threads-id":{"type":"array","items":{"type":"array","minItems":2,"maxItems":2,"items":integer0_schema}},
+                    "threads-id":{"type":"array","items":integer0_schema},
+                    "interfaces":interfaces_schema
+                },
+                "additionalProperties": False,
+                "minProperties": 1,
+                #"required": ["memory"]
+            }
+        }
+    },
+    #"additionalProperties": False,
+    #"required": ["processor_ranking"]
+}
+
+host_data_schema={
+    "title":"hosts manual insertion information schema",
+    "type":"object", 
+    "properties":{                  
+        "ip_name":nameshort_schema,
+        "name": name_schema,
+        "description":description_schema,
+        "user":nameshort_schema,
+        "password":nameshort_schema,
+        "features":description_schema,
+        "ranking":integer0_schema,
+        "devices":{
+            "type": "array", 
+            "items":{
+                "type": "object",
+                "properties":{
+                    "type":{"type":"string", "enum":["usb","disk"]},
+                    "vpci":pci_schema
+                },
+                "additionalProperties": False,
+                "required": ["type"]
+            }
+        },
+        "numas":{
+            "type": "array",
+            "minItems":1,
+            "items":{
+                "type": "object",
+                "properties":{
+                    "admin_state_up":{"type":"boolean"},
+                    "hugepages":integer0_schema,
+                    "cores":{
+                        "type": "array",
+                        "minItems":2,
+                        "items":{
+                            "type": "object",
+                            "properties":{
+                                "core_id":integer0_schema,
+                                "thread_id":integer0_schema,
+                                "status": {"type":"string", "enum":["noteligible"]}
+                            },
+                            "additionalProperties": False,
+                            "required": ["core_id","thread_id"]
+                        }
+                    },
+                    "interfaces":{
+                        "type": "array",
+                        "minItems":1,
+                        "items":{
+                            "type": "object",
+                            "properties":{
+                                "source_name":nameshort_schema,
+                                "mac":mac_schema,
+                                "Mbps":integer0_schema,
+                                "pci":pci_schema,
+                                "sriovs":{
+                                    "type": "array",
+                                    "minItems":1,
+                                    "items":{
+                                        "type": "object",
+                                        "properties":{
+                                            "source_name":{"oneOf":[integer0_schema, nameshort_schema]},
+                                            "mac":mac_schema,
+                                            "vlan":integer0_schema, 
+                                            "pci":pci_schema,
+                                        },
+                                        "additionalProperties": False,
+                                        "required": ["source_name","mac","pci"]
+                                    }
+                                },
+                                "switch_port": nameshort_schema,
+                                "switch_dpid": nameshort_schema,
+                            },
+                            "additionalProperties": False,
+                            "required": ["source_name","mac","Mbps","pci"]
+                        }
+                    },
+                    "numa_socket":integer0_schema,
+                    "memory":integer1_schema
+                },
+                "additionalProperties": False,
+                "required": ["cores","numa_socket"]
+            }
+        }
+    },
+    "additionalProperties": False,
+    "required": ["ranking", "numas","ip_name","user"]
+}
+
+host_edit_schema={
+    "title":"hosts creation information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "host":{
+            "type":"object",
+            "properties":{
+                "ip_name":nameshort_schema,
+                "name": name_schema,
+                "description":description_schema,
+                "user":nameshort_schema,
+                "password":nameshort_schema,
+                "admin_state_up":{"type":"boolean"},
+                "numas":{
+                    "type":"array", 
+                    "items":{
+                        "type": "object",
+                        "properties":{
+                            "numa_socket": integer0_schema,
+                            "admin_state_up":{"type":"boolean"},
+                            "interfaces":{
+                                "type":"array", 
+                                "items":{
+                                    "type": "object",
+                                    "properties":{
+                                        "source_name": nameshort_schema,
+                                        "switch_dpid": nameshort_schema,
+                                        "switch_port": nameshort_schema,
+                                    },
+                                    "required": ["source_name"],
+                                }
+                            }
+                        }, 
+                        "required": ["numa_socket"],
+                        "additionalProperties": False,
+                    }
+                }
+            },
+            "minProperties": 1,
+            "additionalProperties": False
+        },
+    },
+    "required": ["host"],
+    "minProperties": 1,
+    "additionalProperties": False
+}
+
+host_new_schema = {
+    "title":"hosts creation information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "host":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+                "ip_name":nameshort_schema,
+                "name": name_schema,
+                "description":description_schema,
+                "user":nameshort_schema,
+                "password":nameshort_schema,
+                "admin_state_up":{"type":"boolean"},
+            },
+            "required": ["name","ip_name","user"]
+        },
+        "host-data":host_data_schema
+    },
+    "required": ["host"],
+    "minProperties": 1,
+    "maxProperties": 2,
+    "additionalProperties": False
+}
+
+
+flavor_new_schema = {
+    "title":"flavor creation information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "flavor":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+                "name":name_schema,
+                "description":description_schema,
+                "ram":integer0_schema,
+                "vcpus":integer0_schema,
+                "extended": extended_schema,
+                "public": yes_no_schema
+            },
+            "required": ["name"]
+        }
+    },
+    "required": ["flavor"],
+    "additionalProperties": False
+}
+flavor_update_schema = {
+    "title":"flavor update information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "flavor":{
+            "type":"object",
+            "properties":{
+                "name":name_schema,
+                "description":description_schema,
+                "ram":integer0_schema,
+                "vcpus":integer0_schema,
+                "extended": extended_schema,
+                "public": yes_no_schema
+            },
+            "minProperties": 1,
+            "additionalProperties": False
+        }
+    },
+    "required": ["flavor"],
+    "additionalProperties": False
+}
+
+image_new_schema = {
+    "title":"image creation information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "image":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+                "path": {"oneOf": [path_schema, http_schema]},
+                "description":description_schema,
+                "name":name_schema,
+                "metadata":metadata_schema,
+                "public": yes_no_schema
+            },
+            "required": ["name","path"]
+        }
+    },
+    "required": ["image"],
+    "additionalProperties": False
+}
+
+image_update_schema = {
+    "title":"image update information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "image":{
+            "type":"object",
+            "properties":{
+                "path":{"oneOf": [path_schema, http_schema]},
+                "description":description_schema,
+                "name":name_schema,
+                "metadata":metadata_schema,
+                "public": yes_no_schema
+            },
+            "minProperties": 1,
+            "additionalProperties": False
+        }
+    },
+    "required": ["image"],
+    "additionalProperties": False
+}
+
+networks_schema={
+    "type":"array",
+    "items":{
+        "type":"object",
+        "properties":{
+            "name":name_schema,
+            "bandwidth":bandwidth_schema,
+            "vpci":pci_schema,
+            "uuid":id_schema,
+            "mac_address": mac_schema,
+            "model": {"type":"string", "enum":["virtio","e1000","ne2k_pci","pcnet","rtl8139"]},
+            "type": {"type":"string", "enum":["virtual","PF","VF","VFnotShared"]}
+        },
+        "additionalProperties": False,
+        "required": ["uuid"]
+    }
+}
+
+server_new_schema = {
+    "title":"server creation information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "server":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+                "name":name_schema,
+                "description":description_schema,
+                "start":{"type":"string", "enum":["yes","no","paused"]},
+                "hostId":id_schema,
+                "flavorRef":id_schema,
+                "imageRef":id_schema,
+                "extended": extended_schema,
+                "networks":networks_schema
+            },
+            "required": ["name","flavorRef","imageRef"]
+        }
+    },
+    "required": ["server"],
+    "additionalProperties": False
+}
+
+server_action_schema = {
+    "title":"server action information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "start":{"oneOf":[{"type": "null"}, {"type":"string", "enum":["rebuild","null"] }]},
+        "pause":{"type": "null"},
+        "resume":{"type": "null"},
+        "shutoff":{"type": "null"},
+        "shutdown":{"type": "null"},
+        "forceOff":{"type": "null"},
+        "terminate":{"type": "null"},
+        "createImage":{
+            "type":"object",
+            "properties":{ 
+                "path":path_schema,
+                "description":description_schema,
+                "name":name_schema,
+                "metadata":metadata_schema,
+                "imageRef": id_schema,
+                "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
+            },
+            "required": ["name"]
+        },
+        "rebuild":{"type": ["object","null"]},
+        "reboot":{
+            "type": ["object","null"],
+#            "properties": {
+#                "type":{"type":"string", "enum":["SOFT"] }
+#            }, 
+#            "minProperties": 1,
+#            "maxProperties": 1,
+#            "additionalProperties": False
+        }
+    },
+    "minProperties": 1,
+    "maxProperties": 1,
+    "additionalProperties": False
+}
+
+network_new_schema = {
+    "title":"network creation information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "network":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+                "name":name_schema,
+                "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
+                "shared":{"type":"boolean"},
+                "tenant_id":id_schema,
+                "admin_state_up":{"type":"boolean"},
+                "provider:vlan":vlan_schema,
+                "provider:physical":net_bind_schema,
+                "cidr":cidr_schema,
+                "enable_dhcp": {"type":"boolean"},
+                "dhcp_first_ip": ip_schema,
+                "dhcp_last_ip": ip_schema,
+                "bind_net":name_schema, #can be name, or uuid
+                "bind_type":{"oneOf":[{"type":"null"},{"type":"string", "pattern":"^vlan:[0-9]{1,4}$"}]}
+            },
+            "required": ["name"]
+        }
+    },
+    "required": ["network"],
+    "additionalProperties": False
+}
+
+network_update_schema = {
+    "title":"network update information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "network":{
+            "type":"object",
+            "properties":{
+                "name":name_schema,
+                "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
+                "shared":{"type":"boolean"},
+                "tenant_id":id_schema,
+                "admin_state_up":{"type":"boolean"},
+                "provider:vlan":vlan_schema, 
+                "provider:physical":net_bind_schema,
+                "cidr":cidr_schema,
+                "enable_dhcp": {"type":"boolean"},
+                # "dhcp_first_ip": ip_schema,
+                # "dhcp_last_ip": ip_schema,
+                "bind_net":name_schema, #can be name, or uuid
+                "bind_type":{"oneOf":[{"type":"null"},{"type":"string", "pattern":"^vlan:[0-9]{1,4}$"}]}
+            },
+            "minProperties": 1,
+            "additionalProperties": False
+        }
+    },
+    "required": ["network"],
+    "additionalProperties": False
+}
+
+
+port_new_schema = {
+    "title":"port creation information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "port":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+                "name":nameshort_schema,
+                "network_id":{"oneOf":[{"type": "null"}, id_schema ]},
+                "tenant_id":id_schema,
+                "mac_address": {"oneOf":[{"type": "null"}, mac_schema] },
+                "admin_state_up":{"type":"boolean"},
+                "bandwidth":bandwidth_schema,
+                "binding:switch_port":nameshort_schema,
+                "binding:vlan": {"oneOf":[{"type": "null"}, vlan_schema ]}
+            },
+            "required": ["name"]
+        }
+    },
+    "required": ["port"],
+    "additionalProperties": False
+}
+
+port_update_schema = {
+    "title":"port update information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "port":{
+            "type":"object",
+            "properties":{
+                "name":nameshort_schema,
+                "network_id":{"anyOf":[{"type":"null"}, id_schema ] }
+            },
+            "minProperties": 1,
+            "additionalProperties": False
+        }
+    },
+    "required": ["port"],
+    "additionalProperties": False
+}
+
+localinfo_schema = {
+    "title":"localinfo information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "files":{ "type": "object"},
+        "inc_files":{ "type": "object"},
+        "server_files":{ "type": "object"}
+    },
+    "required": ["files"]
+}
+
+hostinfo_schema = {
+    "title":"host information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "iface_names":{
+            "type":"object",
+            "patternProperties":{
+                ".":{ "type": "string"}
+            },
+            "minProperties": 1
+        }
+    },
+    "required": ["iface_names"]
+}
+
+openflow_controller_schema = {
+    "title": "network creation information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties": {
+        "ofc": {
+            "type": "object",
+            "properties": {
+                "name": name_schema,
+                "dpid": nameshort_schema,
+                "ip": nameshort_schema,
+                "port": port_schema,
+                "type": nameshort_schema,
+                "version": nametiny_schema,
+                "user": nameshort_schema,
+                "password": nameshort_schema
+            },
+            "required": ["dpid", "type", "ip", "port", "name"]
+        }
+    },
+    "required": ["ofc"],
+    "additionalProperties": False
+}
+
+of_port_new_schema = {
+    "title": "OF port mapping",
+    "type": "object",
+    "properties": {
+        "ofc_id": id_schema,
+        "region": nameshort_schema,
+        "compute_node": nameshort_schema,
+        "pci": pci_schema,
+        "switch_dpid": nameshort_schema,
+        "switch_port": nameshort_schema,
+        "switch_mac": mac_schema
+    },
+    "required": ["region", "compute_node",  "pci", "switch_dpid"]
+}
+
+of_port_map_new_schema = {
+    "title": "OF port mapping",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties": {
+        "of_port_mapings": {"type": "array", "items": of_port_new_schema, "minLenght":1},
+    },
+    "required": ["of_port_mapings"],
+    "additionalProperties": False
+
+}
\ No newline at end of file
diff --git a/ovim.py b/ovim.py
deleted file mode 100755 (executable)
index b369594..0000000
--- a/ovim.py
+++ /dev/null
@@ -1,1383 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-This is the thread for the http server North API. 
-Two thread will be launched, with normal and administrative permissions.
-'''
-
-__author__ = "Alfonso Tierno, Leonardo Mirabal"
-__date__ = "$06-Feb-2017 12:07:15$"
-__version__ = "0.5.10-r526"
-version_date = "Apr 2017"
-database_version = "0.17"      #expected database schema version
-
-import threading
-import vim_db
-import logging
-import imp
-import host_thread as ht
-import dhcp_thread as dt
-import openflow_thread as oft
-from netaddr import IPNetwork
-from jsonschema import validate as js_v, exceptions as js_e
-import openflow_conn
-import argparse
-
-HTTP_Bad_Request =          400
-HTTP_Unauthorized =         401
-HTTP_Not_Found =            404
-HTTP_Forbidden =            403
-HTTP_Method_Not_Allowed =   405
-HTTP_Not_Acceptable =       406
-HTTP_Request_Timeout =      408
-HTTP_Conflict =             409
-HTTP_Service_Unavailable =  503
-HTTP_Internal_Server_Error= 500
-
-
-def convert_boolean(data, items):
-    '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean
-    It assumes that bandwidth is well formed
-    Attributes:
-        'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
-        'items': tuple of keys to convert
-    Return:
-        None
-    '''
-    if type(data) is dict:
-        for k in data.keys():
-            if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
-                convert_boolean(data[k], items)
-            if k in items:
-                if type(data[k]) is str:
-                    if data[k] == "false":
-                        data[k] = False
-                    elif data[k] == "true":
-                        data[k] = True
-    if type(data) is tuple or type(data) is list:
-        for k in data:
-            if type(k) is dict or type(k) is tuple or type(k) is list:
-                convert_boolean(k, items)
-
-
-
-class ovimException(Exception):
-    def __init__(self, message, http_code=HTTP_Bad_Request):
-        self.http_code = http_code
-        Exception.__init__(self, message)
-
-
-class ovim():
-    running_info = {} #TODO OVIM move the info of running threads from config_dic to this static variable
-    of_module = {}
-
-    def __init__(self, configuration):
-        self.config = configuration
-        self.logger_name = configuration.get("logger_name", "openvim")
-        self.logger = logging.getLogger(self.logger_name)
-        self.db = None
-        self.db = self._create_database_connection()
-        self.db_lock = None
-        self.db_of = None
-        self.of_test_mode = False
-
-    def _create_database_connection(self):
-        db = vim_db.vim_db((self.config["network_vlan_range_start"], self.config["network_vlan_range_end"]),
-                           self.logger_name + ".db", self.config.get('log_level_db'))
-        if db.connect(self.config['db_host'], self.config['db_user'], self.config['db_passwd'],
-                      self.config['db_name']) == -1:
-            # self.logger.error("Cannot connect to database %s at %s@%s", self.config['db_name'], self.config['db_user'],
-            #              self.config['db_host'])
-            raise ovimException("Cannot connect to database {} at {}@{}".format(self.config['db_name'],
-                                                                                self.config['db_user'],
-                                                                                self.config['db_host']) )
-        return db
-
-    @staticmethod
-    def get_version():
-        return __version__
-
-    @staticmethod
-    def get_version_date():
-        return version_date
-
-    @staticmethod
-    def get_database_version():
-        return database_version
-
-    @staticmethod
-    def _check_dhcp_data_integrity(network):
-        """
-        Check if all dhcp parameter for anet are valid, if not will be calculated from cidr value
-        :param network: list with user nets paramters
-        :return:
-        """
-        if "cidr" in network:
-            cidr = network["cidr"]
-            ip_tools = IPNetwork(cidr)
-            cidr_len = ip_tools.prefixlen
-            if cidr_len > 29:
-                return False
-
-            ips = IPNetwork(cidr)
-            if "dhcp_first_ip" not in network:
-                network["dhcp_first_ip"] = str(ips[2])
-            if "dhcp_last_ip" not in network:
-                network["dhcp_last_ip"] = str(ips[-2])
-            if "gateway_ip" not in network:
-                network["gateway_ip"] = str(ips[1])
-
-            return True
-        else:
-            return False
-
-    @staticmethod
-    def _check_valid_uuid(uuid):
-        id_schema = {"type": "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
-        try:
-            js_v(uuid, id_schema)
-            return True
-        except js_e.ValidationError:
-            return False
-
-    def start_service(self):
-        """
-        Start ovim services
-        :return:
-        """
-        global database_version
-        # if self.running_info:
-        #    return  #TODO service can be checked and rebuild broken threads
-        r = self.db.get_db_version()
-        if r[0] < 0:
-            raise ovimException("DATABASE is not a VIM one or it is a '0.0' version. Try to upgrade to version '{}' with "\
-                                "'./database_utils/migrate_vim_db.sh'".format(database_version) )
-        elif r[1] != database_version:
-            raise ovimException("DATABASE wrong version '{}'. Try to upgrade/downgrade to version '{}' with "\
-                                "'./database_utils/migrate_vim_db.sh'".format(r[1], database_version) )
-        self.logger.critical("Starting ovim server version: '{} {}' database version '{}'".format(
-            self.get_version(), self.get_version_date(), self.get_database_version()))
-        # create database connection for openflow threads
-        self.db_of = self._create_database_connection()
-        self.config["db"] = self.db_of
-        self.db_lock = threading.Lock()
-        self.config["db_lock"] = self.db_lock
-
-        self.of_test_mode = False if self.config['mode'] == 'normal' or self.config['mode'] == "OF only" else True
-        # precreate interfaces; [bridge:<host_bridge_name>, VLAN used at Host, uuid of network camping in this bridge,
-        # speed in Gbit/s
-
-        self.config['dhcp_nets'] = []
-        self.config['bridge_nets'] = []
-        for bridge, vlan_speed in self.config["bridge_ifaces"].items():
-            # skip 'development_bridge'
-            if self.config['mode'] == 'development' and self.config['development_bridge'] == bridge:
-                continue
-            self.config['bridge_nets'].append([bridge, vlan_speed[0], vlan_speed[1], None])
-
-        # check if this bridge is already used (present at database) for a network)
-        used_bridge_nets = []
-        for brnet in self.config['bridge_nets']:
-            r, nets = self.db.get_table(SELECT=('uuid',), FROM='nets', WHERE={'provider': "bridge:" + brnet[0]})
-            if r > 0:
-                brnet[3] = nets[0]['uuid']
-                used_bridge_nets.append(brnet[0])
-                if self.config.get("dhcp_server"):
-                    if brnet[0] in self.config["dhcp_server"]["bridge_ifaces"]:
-                        self.config['dhcp_nets'].append(nets[0]['uuid'])
-        if len(used_bridge_nets) > 0:
-            self.logger.info("found used bridge nets: " + ",".join(used_bridge_nets))
-        # get nets used by dhcp
-        if self.config.get("dhcp_server"):
-            for net in self.config["dhcp_server"].get("nets", ()):
-                r, nets = self.db.get_table(SELECT=('uuid',), FROM='nets', WHERE={'name': net})
-                if r > 0:
-                    self.config['dhcp_nets'].append(nets[0]['uuid'])
-
-        # OFC default
-        self._start_ofc_default_task()
-
-        # OFC per tenant in DB
-        self._start_of_db_tasks()
-
-        # create dhcp_server thread
-        host_test_mode = True if self.config['mode'] == 'test' or self.config['mode'] == "OF only" else False
-        dhcp_params = self.config.get("dhcp_server")
-        if dhcp_params:
-            thread = dt.dhcp_thread(dhcp_params=dhcp_params, test=host_test_mode, dhcp_nets=self.config["dhcp_nets"],
-                                    db=self.db_of, db_lock=self.db_lock, logger_name=self.logger_name + ".dhcp",
-                                    debug=self.config.get('log_level_of'))
-            thread.start()
-            self.config['dhcp_thread'] = thread
-
-        # Create one thread for each host
-        host_test_mode = True if self.config['mode'] == 'test' or self.config['mode'] == "OF only" else False
-        host_develop_mode = True if self.config['mode'] == 'development' else False
-        host_develop_bridge_iface = self.config.get('development_bridge', None)
-
-        # get host list from data base before starting threads
-        r, hosts = self.db.get_table(SELECT=('name', 'ip_name', 'user', 'uuid'), FROM='hosts', WHERE={'status': 'ok'})
-        if r < 0:
-            raise ovimException("Cannot get hosts from database {}".format(hosts))
-
-        self.config['host_threads'] = {}
-        for host in hosts:
-            host['image_path'] = '/opt/VNF/images/openvim'
-            thread = ht.host_thread(name=host['name'], user=host['user'], host=host['ip_name'], db=self.db_of,
-                                    db_lock=self.db_lock, test=host_test_mode, image_path=self.config['image_path'],
-                                    version=self.config['version'], host_id=host['uuid'], develop_mode=host_develop_mode,
-                                    develop_bridge_iface=host_develop_bridge_iface)
-            thread.start()
-            self.config['host_threads'][host['uuid']] = thread
-
-        # create ovs dhcp thread
-        result, content = self.db.get_table(FROM='nets')
-        if result < 0:
-            self.logger.error("http_get_ports Error %d %s", result, content)
-            raise ovimException(str(content), -result)
-
-        for net in content:
-            net_type = net['type']
-            if (net_type == 'bridge_data' or net_type == 'bridge_man') \
-                    and net["provider"][:4] == 'OVS:' and net["enable_dhcp"] == "true":
-                    self.launch_dhcp_server(net['vlan'],
-                                            net['dhcp_first_ip'],
-                                            net['dhcp_last_ip'],
-                                            net['cidr'],
-                                            net['gateway_ip'])
-
-    def _start_of_db_tasks(self):
-        """
-        Start ofc task for existing ofcs in database
-        :param db_of:
-        :param db_lock:
-        :return:
-        """
-        ofcs = self.get_of_controllers()
-
-        for ofc in ofcs:
-            of_conn = self._load_of_module(ofc)
-            # create ofc thread per of controller
-            self._create_ofc_task(ofc['uuid'], ofc['dpid'], of_conn)
-
-    def _create_ofc_task(self, ofc_uuid, dpid, of_conn):
-        """
-        Create an ofc thread for handle each sdn controllers
-        :param ofc_uuid: sdn controller uuid
-        :param dpid:  sdn controller dpid
-        :param of_conn: OF_conn module
-        :return:
-        """
-        if 'ofcs_thread' not in self.config and 'ofcs_thread_dpid' not in self.config:
-            ofcs_threads = {}
-            ofcs_thread_dpid = []
-        else:
-            ofcs_threads = self.config['ofcs_thread']
-            ofcs_thread_dpid = self.config['ofcs_thread_dpid']
-
-        if ofc_uuid not in ofcs_threads:
-            ofc_thread = self._create_ofc_thread(of_conn, ofc_uuid)
-            if ofc_uuid == "Default":
-                self.config['of_thread'] = ofc_thread
-
-            ofcs_threads[ofc_uuid] = ofc_thread
-            self.config['ofcs_thread'] = ofcs_threads
-
-            ofcs_thread_dpid.append({dpid: ofc_thread})
-            self.config['ofcs_thread_dpid'] = ofcs_thread_dpid
-
-    def _start_ofc_default_task(self):
-        """
-        Create default ofc thread
-        """
-        if 'of_controller' not in self.config \
-                and 'of_controller_ip' not in self.config \
-                and 'of_controller_port' not in self.config \
-                and 'of_controller_dpid' not in self.config:
-            return
-
-        # OF THREAD
-        db_config = {}
-        db_config['ip'] = self.config.get('of_controller_ip')
-        db_config['port'] = self.config.get('of_controller_port')
-        db_config['dpid'] = self.config.get('of_controller_dpid')
-        db_config['type'] = self.config.get('of_controller')
-        db_config['user'] = self.config.get('of_user')
-        db_config['password'] = self.config.get('of_password')
-
-        # create connector to the openflow controller
-        # load other parameters starting by of_ from config dict in a temporal dict
-
-        of_conn = self._load_of_module(db_config)
-        # create openflow thread
-        self._create_ofc_task("Default", db_config['dpid'], of_conn)
-
-    def _load_of_module(self, db_config):
-        """
-        import python module for each SDN controller supported
-        :param db_config: SDN dn information
-        :return: Module
-        """
-        if not db_config:
-            raise ovimException("No module found it", HTTP_Internal_Server_Error)
-
-        module_info = None
-
-        try:
-            if self.of_test_mode:
-                return openflow_conn.OfTestConnector({"name": db_config['type'],
-                                                      "dpid": db_config['dpid'],
-                                                      "of_debug": self.config['log_level_of']})
-            temp_dict = {}
-
-            if db_config:
-                temp_dict['of_ip'] = db_config['ip']
-                temp_dict['of_port'] = db_config['port']
-                temp_dict['of_dpid'] = db_config['dpid']
-                temp_dict['of_controller'] = db_config['type']
-                temp_dict['of_user'] = db_config.get('user')
-                temp_dict['of_password'] = db_config.get('password')
-
-            temp_dict['of_debug'] = self.config['log_level_of']
-
-            if temp_dict['of_controller'] == 'opendaylight':
-                module = "ODL"
-            else:
-                module = temp_dict['of_controller']
-
-            if module not in ovim.of_module:
-                module_info = imp.find_module(module)
-                of_conn_module = imp.load_module("OF_conn", *module_info)
-                ovim.of_module[module] = of_conn_module
-            else:
-                of_conn_module = ovim.of_module[module]
-
-            try:
-                return of_conn_module.OF_conn(temp_dict)
-            except Exception as e:
-                self.logger.error("Cannot open the Openflow controller '%s': %s", type(e).__name__, str(e))
-                if module_info and module_info[0]:
-                    file.close(module_info[0])
-                raise ovimException("Cannot open the Openflow controller '{}': '{}'".format(type(e).__name__, str(e)),
-                                    HTTP_Internal_Server_Error)
-        except (IOError, ImportError) as e:
-            if module_info and module_info[0]:
-                file.close(module_info[0])
-            self.logger.error("Cannot open openflow controller module '%s'; %s: %s; revise 'of_controller' "
-                              "field of configuration file.", module, type(e).__name__, str(e))
-            raise ovimException("Cannot open openflow controller module '{}'; {}: {}; revise 'of_controller' "
-                                "field of configuration file.".format(module, type(e).__name__, str(e)),
-                                HTTP_Internal_Server_Error)
-
-    def _create_ofc_thread(self, of_conn, ofc_uuid="Default"):
-        """
-        Create and launch a of thread
-        :return: thread obj
-        """
-        # create openflow thread
-
-        #if 'of_controller_nets_with_same_vlan' in self.config:
-        #    ofc_net_same_vlan = self.config['of_controller_nets_with_same_vlan']
-        #else:
-        #    ofc_net_same_vlan = False
-        ofc_net_same_vlan = False
-
-        thread = oft.openflow_thread(ofc_uuid, of_conn, of_test=self.of_test_mode, db=self.db_of, db_lock=self.db_lock,
-                                     pmp_with_same_vlan=ofc_net_same_vlan, debug=self.config['log_level_of'])
-        #r, c = thread.OF_connector.obtain_port_correspondence()
-        #if r < 0:
-        #    raise ovimException("Cannot get openflow information %s", c)
-        thread.start()
-        return thread
-
-    def stop_service(self):
-        threads = self.config.get('host_threads', {})
-        if 'of_thread' in self.config:
-            threads['of'] = (self.config['of_thread'])
-        if 'ofcs_thread' in self.config:
-            ofcs_thread = self.config['ofcs_thread']
-            for ofc in ofcs_thread:
-                threads[ofc] = ofcs_thread[ofc]
-
-        if 'dhcp_thread' in self.config:
-            threads['dhcp'] = (self.config['dhcp_thread'])
-
-        for thread in threads.values():
-            thread.insert_task("exit")
-        for thread in threads.values():
-            thread.join()
-
-    def get_networks(self, columns=None, db_filter={}, limit=None):
-        """
-        Retreive networks available
-        :param columns: List with select query parameters
-        :param db_filter: List with where query parameters
-        :param limit: Query limit result
-        :return:
-        """
-        result, content = self.db.get_table(SELECT=columns, FROM='nets', WHERE=db_filter, LIMIT=limit)
-
-        if result < 0:
-            raise ovimException(str(content), -result)
-
-        convert_boolean(content, ('shared', 'admin_state_up', 'enable_dhcp'))
-
-        return content
-
-    def show_network(self, network_id, db_filter={}):
-        """
-        Get network from DB by id
-        :param network_id: net Id
-        :param db_filter: List with where query parameters
-        :return:
-        """
-        # obtain data
-        if not network_id:
-            raise ovimException("Not network id was not found")
-        db_filter['uuid'] = network_id
-
-        result, content = self.db.get_table(FROM='nets', WHERE=db_filter, LIMIT=100)
-
-        if result < 0:
-            raise ovimException(str(content), -result)
-        elif result == 0:
-            raise ovimException("show_network network '%s' not found" % network_id, -result)
-        else:
-            convert_boolean(content, ('shared', 'admin_state_up', 'enable_dhcp'))
-            # get ports from DB
-            result, ports = self.db.get_table(FROM='ports', SELECT=('uuid as port_id',),
-                                              WHERE={'net_id': network_id}, LIMIT=100)
-            if len(ports) > 0:
-                content[0]['ports'] = ports
-
-            convert_boolean(content, ('shared', 'admin_state_up', 'enable_dhcp'))
-            return content[0]
-
-    def new_network(self, network):
-        """
-        Create a net in DB
-        :return:
-        """
-        tenant_id = network.get('tenant_id')
-
-        if tenant_id:
-            result, _ = self.db.get_table(FROM='tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id, "enabled": True})
-            if result <= 0:
-                raise ovimException("set_network error, no tenant founded", -result)
-
-        bridge_net = None
-        # check valid params
-        net_provider = network.get('provider')
-        net_type = network.get('type')
-        net_vlan = network.get("vlan")
-        net_bind_net = network.get("bind_net")
-        net_bind_type = network.get("bind_type")
-        name = network["name"]
-
-        # check if network name ends with :<vlan_tag> and network exist in order to make and automated bindning
-        vlan_index = name.rfind(":")
-        if not net_bind_net and not net_bind_type and vlan_index > 1:
-            try:
-                vlan_tag = int(name[vlan_index + 1:])
-                if not vlan_tag and vlan_tag < 4096:
-                    net_bind_net = name[:vlan_index]
-                    net_bind_type = "vlan:" + name[vlan_index + 1:]
-            except:
-                pass
-
-        if net_bind_net:
-            # look for a valid net
-            if self._check_valid_uuid(net_bind_net):
-                net_bind_key = "uuid"
-            else:
-                net_bind_key = "name"
-            result, content = self.db.get_table(FROM='nets', WHERE={net_bind_key: net_bind_net})
-            if result < 0:
-                raise ovimException(' getting nets from db ' + content, HTTP_Internal_Server_Error)
-            elif result == 0:
-                raise ovimException(" bind_net %s '%s'not found" % (net_bind_key, net_bind_net), HTTP_Bad_Request)
-            elif result > 1:
-                raise ovimException(" more than one bind_net %s '%s' found, use uuid" % (net_bind_key, net_bind_net), HTTP_Bad_Request)
-            network["bind_net"] = content[0]["uuid"]
-
-        if net_bind_type:
-            if net_bind_type[0:5] != "vlan:":
-                raise ovimException("bad format for 'bind_type', must be 'vlan:<tag>'", HTTP_Bad_Request)
-            if int(net_bind_type[5:]) > 4095 or int(net_bind_type[5:]) <= 0:
-                raise ovimException("bad format for 'bind_type', must be 'vlan:<tag>' with a tag between 1 and 4095",
-                                    HTTP_Bad_Request)
-            network["bind_type"] = net_bind_type
-
-        if net_provider:
-            if net_provider[:9] == "openflow:":
-                if net_type:
-                    if net_type != "ptp" and net_type != "data":
-                        raise ovimException(" only 'ptp' or 'data' net types can be bound to 'openflow'",
-                                            HTTP_Bad_Request)
-                else:
-                    net_type = 'data'
-            else:
-                if net_type:
-                    if net_type != "bridge_man" and net_type != "bridge_data":
-                        raise ovimException("Only 'bridge_man' or 'bridge_data' net types can be bound "
-                                            "to 'bridge', 'macvtap' or 'default", HTTP_Bad_Request)
-                else:
-                    net_type = 'bridge_man'
-
-        if not net_type:
-            net_type = 'bridge_man'
-
-        if net_provider:
-            if net_provider[:7] == 'bridge:':
-                # check it is one of the pre-provisioned bridges
-                bridge_net_name = net_provider[7:]
-                for brnet in self.config['bridge_nets']:
-                    if brnet[0] == bridge_net_name:  # free
-                        if not brnet[3]:
-                            raise ovimException("invalid 'provider:physical', "
-                                                "bridge '%s' is already used" % bridge_net_name, HTTP_Conflict)
-                        bridge_net = brnet
-                        net_vlan = brnet[1]
-                        break
-                        # if bridge_net==None:
-                        #    bottle.abort(HTTP_Bad_Request, "invalid 'provider:physical', bridge '%s' is not one of the
-                        #                    provisioned 'bridge_ifaces' in the configuration file" % bridge_net_name)
-                        #    return
-
-        elif self.config['network_type'] == 'bridge' and (net_type == 'bridge_data' or net_type == 'bridge_man'):
-            # look for a free precreated nets
-            for brnet in self.config['bridge_nets']:
-                if not brnet[3]:  # free
-                    if not bridge_net:
-                        if net_type == 'bridge_man':  # look for the smaller speed
-                            if brnet[2] < bridge_net[2]:
-                                bridge_net = brnet
-                        else:  # look for the larger speed
-                            if brnet[2] > bridge_net[2]:
-                                bridge_net = brnet
-                    else:
-                        bridge_net = brnet
-                        net_vlan = brnet[1]
-            if not bridge_net:
-                raise ovimException("Max limits of bridge networks reached. Future versions of VIM "
-                                    "will overcome this limit", HTTP_Bad_Request)
-            else:
-                self.logger.debug("using net " + bridge_net)
-                net_provider = "bridge:" + bridge_net[0]
-                net_vlan = bridge_net[1]
-        elif net_type == 'bridge_data' or net_type == 'bridge_man' and self.config['network_type'] == 'ovs':
-            net_provider = 'OVS'
-        if not net_vlan and (net_type == "data" or net_type == "ptp" or net_provider == "OVS"):
-            net_vlan = self.db.get_free_net_vlan()
-            if net_vlan < 0:
-                raise ovimException("Error getting an available vlan", HTTP_Internal_Server_Error)
-        if net_provider == 'OVS':
-            net_provider = 'OVS' + ":" + str(net_vlan)
-
-        network['provider'] = net_provider
-        network['type'] = net_type
-        network['vlan'] = net_vlan
-        dhcp_integrity = True
-        if 'enable_dhcp' in network and network['enable_dhcp']:
-            dhcp_integrity = self._check_dhcp_data_integrity(network)
-
-        result, content = self.db.new_row('nets', network, True, True)
-
-        if result >= 0 and dhcp_integrity:
-            if bridge_net:
-                bridge_net[3] = content
-            if self.config.get("dhcp_server") and self.config['network_type'] == 'bridge':
-                if network["name"] in self.config["dhcp_server"].get("nets", ()):
-                    self.config["dhcp_nets"].append(content)
-                    self.logger.debug("dhcp_server: add new net", content)
-                elif not bridge_net and bridge_net[0] in self.config["dhcp_server"].get("bridge_ifaces", ()):
-                    self.config["dhcp_nets"].append(content)
-                    self.logger.debug("dhcp_server: add new net", content, content)
-            return content
-        else:
-            raise ovimException("Error posting network", HTTP_Internal_Server_Error)
-# TODO kei change update->edit
-
-    def edit_network(self, network_id, network):
-        """
-        Update entwork data byt id
-        :return:
-        """
-        # Look for the previous data
-        where_ = {'uuid': network_id}
-        result, network_old = self.db.get_table(FROM='nets', WHERE=where_)
-        if result < 0:
-            raise ovimException("Error updating network %s" % network_old, HTTP_Internal_Server_Error)
-        elif result == 0:
-            raise ovimException('network %s not found' % network_id, HTTP_Not_Found)
-        # get ports
-        nbports, content = self.db.get_table(FROM='ports', SELECT=('uuid as port_id',),
-                                             WHERE={'net_id': network_id}, LIMIT=100)
-        if result < 0:
-            raise ovimException("http_put_network_id error %d %s" % (result, network_old), HTTP_Internal_Server_Error)
-        if nbports > 0:
-            if 'type' in network and network['type'] != network_old[0]['type']:
-                raise ovimException("Can not change type of network while having ports attached",
-                                    HTTP_Method_Not_Allowed)
-            if 'vlan' in network and network['vlan'] != network_old[0]['vlan']:
-                raise ovimException("Can not change vlan of network while having ports attached",
-                                    HTTP_Method_Not_Allowed)
-
-        # check valid params
-        net_provider = network.get('provider', network_old[0]['provider'])
-        net_type = network.get('type', network_old[0]['type'])
-        net_bind_net = network.get("bind_net")
-        net_bind_type = network.get("bind_type")
-        if net_bind_net:
-            # look for a valid net
-            if self._check_valid_uuid(net_bind_net):
-                net_bind_key = "uuid"
-            else:
-                net_bind_key = "name"
-            result, content = self.db.get_table(FROM='nets', WHERE={net_bind_key: net_bind_net})
-            if result < 0:
-                raise ovimException('Getting nets from db ' + content, HTTP_Internal_Server_Error)
-            elif result == 0:
-                raise ovimException("bind_net %s '%s'not found" % (net_bind_key, net_bind_net), HTTP_Bad_Request)
-            elif result > 1:
-                raise ovimException("More than one bind_net %s '%s' found, use uuid" % (net_bind_key, net_bind_net),
-                                    HTTP_Bad_Request)
-            network["bind_net"] = content[0]["uuid"]
-        if net_bind_type:
-            if net_bind_type[0:5] != "vlan:":
-                raise ovimException("Bad format for 'bind_type', must be 'vlan:<tag>'", HTTP_Bad_Request)
-            if int(net_bind_type[5:]) > 4095 or int(net_bind_type[5:]) <= 0:
-                raise ovimException("bad format for 'bind_type', must be 'vlan:<tag>' with a tag between 1 and 4095",
-                                    HTTP_Bad_Request)
-        if net_provider:
-            if net_provider[:9] == "openflow:":
-                if net_type != "ptp" and net_type != "data":
-                    raise ovimException("Only 'ptp' or 'data' net types can be bound to 'openflow'", HTTP_Bad_Request)
-            else:
-                if net_type != "bridge_man" and net_type != "bridge_data":
-                    raise ovimException("Only 'bridge_man' or 'bridge_data' net types can be bound to "
-                                        "'bridge', 'macvtap' or 'default", HTTP_Bad_Request)
-
-        # insert in data base
-        result, content = self.db.update_rows('nets', network, WHERE={'uuid': network_id}, log=True)
-        if result >= 0:
-            # if result > 0 and nbports>0 and 'admin_state_up' in network
-            #     and network['admin_state_up'] != network_old[0]['admin_state_up']:
-            if result > 0:
-
-                try:
-                    if nbports:
-                        self.net_update_ofc_thread(network_id)
-                except ovimException as e:
-                    raise ovimException("Error while launching openflow rules in network '{}' {}"
-                                        .format(network_id, str(e)), HTTP_Internal_Server_Error)
-                except Exception as e:
-                    raise ovimException("Error while launching openflow rules in network '{}' {}"
-                                        .format(network_id, str(e)), HTTP_Internal_Server_Error)
-
-                if self.config.get("dhcp_server"):
-                    if network_id in self.config["dhcp_nets"]:
-                        self.config["dhcp_nets"].remove(network_id)
-                    if network.get("name", network_old[0]["name"]) in self.config["dhcp_server"].get("nets", ()):
-                        self.config["dhcp_nets"].append(network_id)
-                    else:
-                        net_bind = network.get("bind_type", network_old[0]["bind_type"])
-                        if net_bind and net_bind and net_bind[:7] == "bridge:" and net_bind[7:] in self.config["dhcp_server"].get(
-                                "bridge_ifaces", ()):
-                            self.config["dhcp_nets"].append(network_id)
-            return network_id
-        else:
-            raise ovimException(content, -result)
-
-    def delete_network(self, network_id):
-        """
-        Delete network by network id
-        :param network_id:  network id
-        :return:
-        """
-
-        # delete from the data base
-        result, content = self.db.delete_row('nets', network_id)
-
-        if result == 0:
-            raise ovimException("Network %s not found " % network_id, HTTP_Not_Found)
-        elif result > 0:
-            for brnet in self.config['bridge_nets']:
-                if brnet[3] == network_id:
-                    brnet[3] = None
-                    break
-            if self.config.get("dhcp_server") and network_id in self.config["dhcp_nets"]:
-                self.config["dhcp_nets"].remove(network_id)
-            return content
-        else:
-            raise ovimException("Error deleting  network %s" % network_id, HTTP_Internal_Server_Error)
-
-    def get_openflow_rules(self, network_id=None):
-        """
-        Get openflow id from DB
-        :param network_id: Network id, if none all networks will be retrieved
-        :return: Return a list with Openflow rules per net
-        """
-        # ignore input data
-        if not network_id:
-            where_ = {}
-        else:
-            where_ = {"net_id": network_id}
-        result, content = self.db.get_table(
-            SELECT=("name", "net_id", "ofc_id", "priority", "vlan_id", "ingress_port", "src_mac", "dst_mac", "actions"),
-            WHERE=where_, FROM='of_flows')
-
-        if result < 0:
-            raise ovimException(str(content), -result)
-        return content
-
-    def edit_openflow_rules(self, network_id=None):
-
-        """
-        To make actions over the net. The action is to reinstall the openflow rules
-        network_id can be 'all'
-        :param network_id: Network id, if none all networks will be retrieved
-        :return : Number of nets updated
-        """
-
-        # ignore input data
-        if not network_id:
-            where_ = {}
-        else:
-            where_ = {"uuid": network_id}
-        result, content = self.db.get_table(SELECT=("uuid", "type"), WHERE=where_, FROM='nets')
-
-        if result < 0:
-            raise ovimException(str(content), -result)
-
-        for net in content:
-            if net["type"] != "ptp" and net["type"] != "data":
-                result -= 1
-                continue
-
-            try:
-                self.net_update_ofc_thread(net['uuid'])
-            except ovimException as e:
-                raise ovimException("Error updating network'{}' {}".format(net['uuid'], str(e)),
-                                    HTTP_Internal_Server_Error)
-            except Exception as e:
-                raise ovimException("Error updating network '{}' {}".format(net['uuid'], str(e)),
-                                    HTTP_Internal_Server_Error)
-
-        return result
-
-    def delete_openflow_rules(self, ofc_id=None):
-        """
-        To make actions over the net. The action is to delete ALL openflow rules
-        :return: return operation result
-        """
-
-        if not ofc_id:
-            if 'Default' in self.config['ofcs_thread']:
-                r, c = self.config['ofcs_thread']['Default'].insert_task("clear-all")
-            else:
-                raise ovimException("Default Openflow controller not not running", HTTP_Not_Found)
-
-        elif ofc_id in self.config['ofcs_thread']:
-            r, c = self.config['ofcs_thread'][ofc_id].insert_task("clear-all")
-
-            # ignore input data
-            if r < 0:
-                raise ovimException(str(c), -r)
-        else:
-            raise ovimException("Openflow controller not found with ofc_id={}".format(ofc_id), HTTP_Not_Found)
-        return r
-
-    def get_openflow_ports(self, ofc_id=None):
-        """
-        Obtain switch ports names of openflow controller
-        :return: Return flow ports in DB
-        """
-        if not ofc_id:
-            if 'Default' in self.config['ofcs_thread']:
-                conn = self.config['ofcs_thread']['Default'].OF_connector
-            else:
-                raise ovimException("Default Openflow controller not not running", HTTP_Not_Found)
-
-        if ofc_id in self.config['ofcs_thread']:
-            conn = self.config['ofcs_thread'][ofc_id].OF_connector
-        else:
-            raise ovimException("Openflow controller not found with ofc_id={}".format(ofc_id), HTTP_Not_Found)
-        return conn.pp2ofi
-
-    def get_ports(self, columns=None, filter={}, limit=None):
-        # result, content = my.db.get_ports(where_)
-        result, content = self.db.get_table(SELECT=columns, WHERE=filter, FROM='ports', LIMIT=limit)
-        if result < 0:
-            self.logger.error("http_get_ports Error %d %s", result, content)
-            raise ovimException(str(content), -result)
-        else:
-            convert_boolean(content, ('admin_state_up',))
-            return content
-
-    def new_port(self, port_data):
-        port_data['type'] = 'external'
-        if port_data.get('net_id'):
-            # check that new net has the correct type
-            result, new_net = self.db.check_target_net(port_data['net_id'], None, 'external')
-            if result < 0:
-                raise ovimException(str(new_net), -result)
-        # insert in data base
-        result, uuid = self.db.new_row('ports', port_data, True, True)
-        if result > 0:
-            if 'net_id' in port_data:
-                try:
-                    self.net_update_ofc_thread(port_data['net_id'])
-                except ovimException as e:
-                    raise ovimException("Cannot insert a task for updating network '{}' {}"
-                                        .format(port_data['net_id'], str(e)), HTTP_Internal_Server_Error)
-                except Exception as e:
-                    raise ovimException("Cannot insert a task for updating network '{}' {}"
-                                        .format(port_data['net_id'], str(e)), HTTP_Internal_Server_Error)
-
-            return uuid
-        else:
-            raise ovimException(str(uuid), -result)
-
-    def new_external_port(self, port_data):
-        """
-        Create new external port and check port mapping correspondence
-        :param port_data: port_data = {
-            'region': 'datacenter region',
-            'compute_node': 'compute node id',
-            'pci': 'pci port address',
-            'vlan': 'net vlan',
-            'net_id': 'net id',
-            'tenant_id': 'tenant id',
-            'mac': 'switch mac',
-            'name': 'port name'
-            'ip_address': 'ip address - optional'}
-        :return:
-        """
-
-        port_data['type'] = 'external'
-
-        if port_data.get('net_id'):
-            # check that new net has the correct type
-            result, new_net = self.db.check_target_net(port_data['net_id'], None, 'external')
-            if result < 0:
-                raise ovimException(str(new_net), -result)
-        # insert in data base
-        db_filter = {}
-
-        if port_data.get('region'):
-            db_filter['region'] = port_data['region']
-        if port_data.get('pci'):
-            db_filter['pci'] = port_data['pci']
-        if port_data.get('compute_node'):
-            db_filter['compute_node'] = port_data['compute_node']
-
-        columns = ['ofc_id', 'switch_dpid', 'switch_port', 'switch_mac', 'pci']
-        port_mapping_data = self.get_of_port_mappings(columns, db_filter)
-
-        if not len(port_mapping_data):
-            raise ovimException("No port mapping founded for '{}'".format(str(db_filter)),
-                                HTTP_Not_Found)
-        elif len(port_mapping_data) > 1:
-            raise ovimException("Wrong port data was given, please check pci, region & compute id data",
-                                HTTP_Conflict)
-
-        port_data['ofc_id'] = port_mapping_data[0]['ofc_id']
-        port_data['switch_dpid'] = port_mapping_data[0]['switch_dpid']
-        port_data['switch_port'] = port_mapping_data[0]['switch_port']
-        port_data['switch_mac'] = port_mapping_data[0]['switch_mac']
-
-        # remove from compute_node, region and pci of_port_data to adapt to 'ports' structure
-        if 'region' in port_data:
-            del port_data['region']
-        if 'pci' in port_data:
-            del port_data['pci']
-        if 'compute_node' in port_data:
-            del port_data['compute_node']
-
-        result, uuid = self.db.new_row('ports', port_data, True, True)
-        if result > 0:
-            try:
-                self.net_update_ofc_thread(port_data['net_id'], port_data['ofc_id'])
-            except ovimException as e:
-                raise ovimException("Cannot insert a task for updating network '{}' {}".
-                                    format(port_data['net_id'], str(e)), HTTP_Internal_Server_Error)
-            except Exception as e:
-                raise ovimException("Cannot insert a task for updating network '{}' {}"
-                                    .format(port_data['net_id'], e), HTTP_Internal_Server_Error)
-            return uuid
-        else:
-            raise ovimException(str(uuid), -result)
-
-    def net_update_ofc_thread(self, net_id, ofc_id=None, switch_dpid=None):
-        """
-        Insert a update net task by net id or ofc_id for each ofc thread
-        :param net_id: network id
-        :param ofc_id: openflow controller id
-        :param switch_dpid: switch dpid
-        :return:
-        """
-        if not net_id:
-            raise ovimException("No net_id received", HTTP_Internal_Server_Error)
-
-        r = -1
-        c = 'No valid ofc_id or switch_dpid received'
-
-        if not ofc_id:
-            ports = self.get_ports(filter={"net_id": net_id})
-            for port in ports:
-                port_ofc_id = port.get('ofc_id', None)
-                if port_ofc_id:
-                    ofc_id = port['ofc_id']
-                    switch_dpid = port['switch_dpid']
-                    break
-        #TODO if not ofc_id: look at database table ofcs
-
-
-        # If no ofc_id found it, default ofc_id is used.
-        if not ofc_id and not switch_dpid:
-            ofc_id = "Default"
-
-        if ofc_id and ofc_id in self.config['ofcs_thread']:
-            r, c = self.config['ofcs_thread'][ofc_id].insert_task("update-net", net_id)
-        elif switch_dpid:
-
-            ofcs_dpid_list = self.config['ofcs_thread_dpid']
-            for ofc_t in ofcs_dpid_list:
-                if switch_dpid in ofc_t:
-                    r, c = ofc_t[switch_dpid].insert_task("update-net", net_id)
-
-        if r < 0:
-            message = "Cannot insert a task for updating network '{}', {}".format(net_id, c)
-            self.logger.error(message)
-            raise ovimException(message, HTTP_Internal_Server_Error)
-
-    def delete_port(self, port_id):
-        # Look for the previous port data
-        result, ports = self.db.get_table(WHERE={'uuid': port_id, "type": "external"}, FROM='ports')
-        if result < 0:
-            raise ovimException("Cannot get port info from database: {}".format(ports), http_code=-result)
-        # delete from the data base
-        result, content = self.db.delete_row('ports', port_id)
-        if result == 0:
-            raise ovimException("External port '{}' not found".format(port_id), http_code=HTTP_Not_Found)
-        elif result < 0:
-            raise ovimException("Cannot delete port from database: {}".format(content), http_code=-result)
-        # update network
-        network = ports[0].get('net_id', None)
-        if network:
-            # change of net.
-
-            try:
-                self.net_update_ofc_thread(network, ofc_id=ports[0]["ofc_id"], switch_dpid=ports[0]["switch_dpid"])
-            except ovimException as e:
-                raise ovimException("Cannot insert a task for delete network '{}' {}".format(network, str(e)),
-                                    HTTP_Internal_Server_Error)
-            except Exception as e:
-                raise ovimException("Cannot insert a task for delete network '{}' {}".format(network, str(e)),
-                                    HTTP_Internal_Server_Error)
-
-        return content
-
-    def edit_port(self, port_id, port_data, admin=True):
-        # Look for the previous port data
-        result, content = self.db.get_table(FROM="ports", WHERE={'uuid': port_id})
-        if result < 0:
-            raise ovimException("Cannot get port info from database: {}".format(content), http_code=-result)
-        elif result == 0:
-            raise ovimException("Port '{}' not found".format(port_id), http_code=HTTP_Not_Found)
-        port = content[0]
-        nets = []
-        host_id = None
-        result = 1
-        if 'net_id' in port_data:
-            # change of net.
-            old_net = port.get('net_id', None)
-            new_net = port_data['net_id']
-            if old_net != new_net:
-
-                if new_net:
-                    nets.append(new_net)  # put first the new net, so that new openflow rules are created before removing the old ones
-                if old_net:
-                    nets.append(old_net)
-                if port['type'] == 'instance:bridge' or port['type'] == 'instance:ovs':
-                    raise ovimException("bridge interfaces cannot be attached to a different net", http_code=HTTP_Forbidden)
-                elif port['type'] == 'external' and not admin:
-                    raise ovimException("Needed admin privileges",http_code=HTTP_Unauthorized)
-                if new_net:
-                    # check that new net has the correct type
-                    result, new_net_dict = self.db.check_target_net(new_net, None, port['type'])
-                    if result < 0:
-                        raise ovimException("Error {}".format(new_net_dict), http_code=HTTP_Conflict)
-                # change VLAN for SR-IOV ports
-                if result >= 0 and port["type"] == "instance:data" and port["model"] == "VF":  # TODO consider also VFnotShared
-                    if new_net:
-                        port_data["vlan"] = None
-                    else:
-                        port_data["vlan"] = new_net_dict["vlan"]
-                    # get host where this VM is allocated
-                    result, content = self.db.get_table(FROM="instances", WHERE={"uuid": port["instance_id"]})
-                    if result > 0:
-                        host_id = content[0]["host_id"]
-
-        # insert in data base
-        if result >= 0:
-            result, content = self.db.update_rows('ports', port_data, WHERE={'uuid': port_id}, log=False)
-            port.update(port_data)
-
-        # Insert task to complete actions
-        if result > 0:
-            for net_id in nets:
-                try:
-                    self.net_update_ofc_thread(net_id, port["ofc_id"], switch_dpid=port["switch_dpid"])
-                except ovimException as e:
-                    raise ovimException("Error updating network'{}' {}".format(net_id, str(e)),
-                                        HTTP_Internal_Server_Error)
-                except Exception as e:
-                    raise ovimException("Error updating network '{}' {}".format(net_id, str(e)),
-                                        HTTP_Internal_Server_Error)
-
-            if host_id:
-                r, v = self.config['host_threads'][host_id].insert_task("edit-iface", port_id, old_net, new_net)
-                if r < 0:
-                    self.logger.error("Error updating network '{}' {}".format(r,v))
-                    # TODO Do something if fails
-        if result >= 0:
-            return port_id
-        else:
-            raise ovimException("Error {}".format(content), http_code=-result)
-
-    def new_of_controller(self, ofc_data):
-        """
-        Create a new openflow controller into DB
-        :param ofc_data: Dict openflow controller data
-        :return: openflow controller dpid
-        """
-
-        result, ofc_uuid = self.db.new_row('ofcs', ofc_data, True, True)
-        if result < 0:
-            raise ovimException("New ofc Error %s" % ofc_uuid, HTTP_Internal_Server_Error)
-
-        ofc_data['uuid'] = ofc_uuid
-        of_conn = self._load_of_module(ofc_data)
-        self._create_ofc_task(ofc_uuid, ofc_data['dpid'], of_conn)
-
-        return ofc_uuid
-
-    def edit_of_controller(self, of_id, ofc_data):
-        """
-        Edit an openflow controller entry from DB
-        :return:
-        """
-        if not ofc_data:
-            raise ovimException("No data received during uptade OF contorller", http_code=HTTP_Internal_Server_Error)
-
-        old_of_controller = self.show_of_controller(of_id)
-
-        if old_of_controller:
-            result, content = self.db.update_rows('ofcs', ofc_data, WHERE={'uuid': of_id}, log=False)
-            if result >= 0:
-                return ofc_data
-            else:
-                raise ovimException("Error uptating OF contorller with uuid {}".format(of_id),
-                                    http_code=-result)
-        else:
-            raise ovimException("Error uptating OF contorller with uuid {}".format(of_id),
-                                http_code=HTTP_Internal_Server_Error)
-
-    def delete_of_controller(self, of_id):
-        """
-        Delete an openflow controller from DB.
-        :param of_id: openflow controller dpid
-        :return:
-        """
-
-        ofc = self.show_of_controller(of_id)
-
-        result, content = self.db.delete_row("ofcs", of_id)
-        if result < 0:
-            raise ovimException("Cannot delete ofc from database: {}".format(content), http_code=-result)
-        elif result == 0:
-            raise ovimException("ofc {} not found ".format(content), http_code=HTTP_Not_Found)
-
-        ofc_thread = self.config['ofcs_thread'][of_id]
-        del self.config['ofcs_thread'][of_id]
-        for ofc_th in self.config['ofcs_thread_dpid']:
-            if ofc['dpid'] in ofc_th:
-                self.config['ofcs_thread_dpid'].remove(ofc_th)
-
-        ofc_thread.insert_task("exit")
-        #ofc_thread.join()
-
-        return content
-
-    def show_of_controller(self, uuid):
-        """
-        Show an openflow controller by dpid from DB.
-        :param db_filter: List with where query parameters
-        :return:
-        """
-
-        result, content = self.db.get_table(FROM='ofcs', WHERE={"uuid": uuid}, LIMIT=100)
-
-        if result == 0:
-            raise ovimException("Openflow controller with uuid '{}' not found".format(uuid),
-                                http_code=HTTP_Not_Found)
-        elif result < 0:
-            raise ovimException("Openflow controller with uuid '{}' error".format(uuid),
-                                http_code=HTTP_Internal_Server_Error)
-        return content[0]
-
-    def get_of_controllers(self, columns=None, db_filter={}, limit=None):
-        """
-        Show an openflow controllers from DB.
-        :param columns:  List with SELECT query parameters
-        :param db_filter: List with where query parameters
-        :param limit: result Limit
-        :return:
-        """
-        result, content = self.db.get_table(SELECT=columns, FROM='ofcs', WHERE=db_filter, LIMIT=limit)
-
-        if result < 0:
-            raise ovimException(str(content), -result)
-
-        return content
-
-    def get_tenants(self, columns=None, db_filter={}, limit=None):
-        """
-        Retrieve tenant list from DB
-        :param columns:  List with SELECT query parameters
-        :param db_filter: List with where query parameters
-        :param limit: result limit
-        :return:
-        """
-        result, content = self.db.get_table(FROM='tenants', SELECT=columns, WHERE=db_filter, LIMIT=limit)
-        if result < 0:
-            raise ovimException('get_tenatns Error {}'.format(str(content)), -result)
-        else:
-            convert_boolean(content, ('enabled',))
-            return content
-
-    def show_tenant_id(self, tenant_id):
-        """
-        Get tenant from DB by id
-        :param tenant_id: tenant id
-        :return:
-        """
-        result, content = self.db.get_table(FROM='tenants', SELECT=('uuid', 'name', 'description', 'enabled'),
-                                            WHERE={"uuid": tenant_id})
-        if result < 0:
-            raise ovimException(str(content), -result)
-        elif result == 0:
-            raise ovimException("tenant with uuid='{}' not found".format(tenant_id), HTTP_Not_Found)
-        else:
-            convert_boolean(content, ('enabled',))
-            return content[0]
-
-    def new_tentant(self, tenant):
-        """
-        Create a tenant and store in DB
-        :param tenant: Dictionary with tenant data
-        :return: the uuid of created tenant. Raise exception upon error
-        """
-
-        # insert in data base
-        result, tenant_uuid = self.db.new_tenant(tenant)
-
-        if result >= 0:
-            return tenant_uuid
-        else:
-            raise ovimException(str(tenant_uuid), -result)
-
-    def delete_tentant(self, tenant_id):
-        """
-        Delete a tenant from the database.
-        :param tenant_id: Tenant id
-        :return: delete tenant id
-        """
-
-        # check permissions
-        r, tenants_flavors = self.db.get_table(FROM='tenants_flavors', SELECT=('flavor_id', 'tenant_id'),
-                                               WHERE={'tenant_id': tenant_id})
-        if r <= 0:
-            tenants_flavors = ()
-        r, tenants_images = self.db.get_table(FROM='tenants_images', SELECT=('image_id', 'tenant_id'),
-                                              WHERE={'tenant_id': tenant_id})
-        if r <= 0:
-            tenants_images = ()
-
-        result, content = self.db.delete_row('tenants', tenant_id)
-        if result == 0:
-            raise ovimException("tenant '%s' not found" % tenant_id, HTTP_Not_Found)
-        elif result > 0:
-            for flavor in tenants_flavors:
-                self.db.delete_row_by_key("flavors", "uuid", flavor['flavor_id'])
-            for image in tenants_images:
-                self.db.delete_row_by_key("images", "uuid", image['image_id'])
-            return content
-        else:
-            raise ovimException("Error deleting tenant '%s' " % tenant_id, HTTP_Internal_Server_Error)
-
-    def edit_tenant(self, tenant_id, tenant_data):
-        """
-        Update a tenant data identified by tenant id
-        :param tenant_id: tenant id
-        :param tenant_data: Dictionary with tenant data
-        :return:
-        """
-
-        # Look for the previous data
-        result, tenant_data_old = self.db.get_table(FROM='tenants', WHERE={'uuid': tenant_id})
-        if result < 0:
-            raise ovimException("Error updating tenant with uuid='{}': {}".format(tenant_id, tenant_data_old),
-                                HTTP_Internal_Server_Error)
-        elif result == 0:
-            raise ovimException("tenant with uuid='{}' not found".format(tenant_id), HTTP_Not_Found)
-
-        # insert in data base
-        result, content = self.db.update_rows('tenants', tenant_data, WHERE={'uuid': tenant_id}, log=True)
-        if result >= 0:
-            return content
-        else:
-            raise ovimException(str(content), -result)
-
-    def set_of_port_mapping(self, of_maps, ofc_id=None, switch_dpid=None, region=None):
-        """
-        Create new port mapping entry
-        :param of_maps: List with port mapping information
-        # maps =[{"ofc_id": <ofc_id>,"region": datacenter region,"compute_node": compute uuid,"pci": pci adress,
-                "switch_dpid": swith dpid,"switch_port": port name,"switch_mac": mac}]
-        :param ofc_id: ofc id
-        :param switch_dpid: switch  dpid
-        :param region: datacenter region id
-        :return:
-        """
-
-        for map in of_maps:
-            if ofc_id:
-                map['ofc_id'] = ofc_id
-            if switch_dpid:
-                map['switch_dpid'] = switch_dpid
-            if region:
-                map['region'] = region
-
-        for of_map in of_maps:
-            result, uuid = self.db.new_row('of_port_mappings', of_map, True)
-            if result > 0:
-                of_map["uuid"] = uuid
-            else:
-                raise ovimException(str(uuid), -result)
-        return of_maps
-
-    def clear_of_port_mapping(self, db_filter={}):
-        """
-        Clear port mapping filtering using db_filter dict
-        :param db_filter: Parameter to filter during remove process
-        :return:
-        """
-        result, content = self.db.delete_row_by_dict(FROM='of_port_mappings', WHERE=db_filter)
-        # delete_row_by_key
-        if result >= 0:
-            return content
-        else:
-            raise ovimException("Error deleting of_port_mappings with filter='{}'".format(str(db_filter)),
-                                HTTP_Internal_Server_Error)
-
-    def get_of_port_mappings(self, column=None, db_filter=None, db_limit=None):
-        """
-        Retrive port mapping from DB
-        :param column:
-        :param db_filter:
-        :return:
-        """
-        result, content = self.db.get_table(SELECT=column, WHERE=db_filter, FROM='of_port_mappings', LIMIT=db_limit)
-
-        if result < 0:
-            self.logger.error("get_of_port_mappings Error %d %s", result, content)
-            raise ovimException(str(content), -result)
-        else:
-            return content
-
-    def get_dhcp_controller(self):
-        """
-        Create an host_thread object for manage openvim controller and not create a thread for itself
-        :return: dhcp_host openvim controller object
-        """
-
-        if 'openvim_controller' in self.config['host_threads']:
-            return self.config['host_threads']['openvim_controller']
-
-        bridge_ifaces = []
-        controller_ip = self.config['ovs_controller_ip']
-        ovs_controller_user = self.config['ovs_controller_user']
-
-        host_test_mode = True if self.config['mode'] == 'test' or self.config['mode'] == "OF only" else False
-        host_develop_mode = True if self.config['mode'] == 'development' else False
-
-        dhcp_host = ht.host_thread(name='openvim_controller', user=ovs_controller_user, host=controller_ip,
-                                   db=self.db_of,
-                                   db_lock=self.db_lock, test=host_test_mode,
-                                   image_path=self.config['image_path'], version=self.config['version'],
-                                   host_id='openvim_controller', develop_mode=host_develop_mode,
-                                   develop_bridge_iface=bridge_ifaces)
-
-        self.config['host_threads']['openvim_controller'] = dhcp_host
-        if not host_test_mode:
-            dhcp_host.ssh_connect()
-        return dhcp_host
-
-    def launch_dhcp_server(self, vlan, first_ip, last_ip, cidr, gateway):
-        """
-        Launch a dhcpserver base on dnsmasq attached to the net base on vlan id across the the openvim computes
-        :param vlan: vlan identifier
-        :param first_ip: First dhcp range ip
-        :param last_ip: Last dhcp range ip
-        :param cidr: net cidr
-        :param gateway: net gateway
-        :return:
-        """
-        ip_tools = IPNetwork(cidr)
-        dhcp_netmask = str(ip_tools.netmask)
-        ip_range = [first_ip, last_ip]
-
-        dhcp_path = self.config['ovs_controller_file_path']
-
-        controller_host = self.get_dhcp_controller()
-        controller_host.create_linux_bridge(vlan)
-        controller_host.create_dhcp_interfaces(vlan, first_ip, dhcp_netmask)
-        controller_host.launch_dhcp_server(vlan, ip_range, dhcp_netmask, dhcp_path, gateway)
-
-if __name__ == "__main__":
-
-    parser = argparse.ArgumentParser()
-    parser.add_argument("-v","--version", help="show ovim library version", action="store_true")
-    parser.add_argument("--database-version", help="show required database version", action="store_true")
-    args = parser.parse_args()
-    if args.version:
-        print ('openvimd version {} {}'.format(ovim.get_version(), ovim.get_version_date()))
-        print ('(c) Copyright Telefonica')
-    elif args.database_version:
-        print ('required database version: {}'.format(ovim.get_database_version()))
-
diff --git a/scripts/openvim-report b/scripts/openvim-report
new file mode 100755 (executable)
index 0000000..b53243b
--- /dev/null
@@ -0,0 +1,92 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#It generates a report for debugging
+
+DIRNAME=$(readlink -f ${BASH_SOURCE[0]})
+DIRNAME=$(dirname $DIRNAME )
+OVCLIENT=$DIRNAME/../openvim
+
+#get screen log files at the beginning
+echo
+echo "-------------------------------"
+echo "log files"
+echo "-------------------------------"
+echo
+echo "cat $DIRNAME/../logs/openvim.log*"
+cat $DIRNAME/../logs/openvim.log*
+echo
+echo
+
+#get version
+echo
+echo "-------------------------------"
+echo "version"
+echo "-------------------------------"
+echo "cat $DIRNAME/../openvimd.py|grep ^__version__"
+cat $DIRNAME/../openvimd.py|grep ^__version__
+echo
+echo
+
+#get configuration files
+echo "-------------------------------"
+echo "Configuration files"
+echo "-------------------------------"
+echo "cat $DIRNAME/../openvimd.cfg"
+cat $DIRNAME/../openvimd.cfg
+echo
+
+#get list of items
+for verbose in "" "-vvv"
+do
+  echo "-------------------------------"
+  echo "OPENVIM$verbose"
+  echo "-------------------------------"
+  echo "$OVCLIENT config"
+  $OVCLIENT config
+  echo "-------------------------------"
+  echo "$OVCLIENT tenant-list $verbose"
+  $OVCLIENT tenant-list $verbose
+  echo "-------------------------------"
+  echo "$OVCLIENT host-list $verbose"
+  $OVCLIENT host-list $verbose
+  echo "-------------------------------"
+  echo "$OVCLIENT net-list $verbose"
+  $OVCLIENT net-list $verbose
+  echo "-------------------------------"
+  echo "$OVCLIENT port-list $verbose"
+  $OVCLIENT port-list $verbose
+  echo "-------------------------------"
+  echo "$OVCLIENT flavor-list $verbose"
+  $OVCLIENT flavor-list $verbose
+  echo "-------------------------------"
+  echo "$OVCLIENT image-list $verbose"
+  $OVCLIENT image-list $verbose
+  echo "-------------------------------"
+  echo "$OVCLIENT vm-list $verbose"
+  $OVCLIENT vm-list $verbose
+  echo "-------------------------------"
+  echo
+
+done
+echo
diff --git a/scripts/openvim-report.sh b/scripts/openvim-report.sh
deleted file mode 100755 (executable)
index b53243b..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-#It generates a report for debugging
-
-DIRNAME=$(readlink -f ${BASH_SOURCE[0]})
-DIRNAME=$(dirname $DIRNAME )
-OVCLIENT=$DIRNAME/../openvim
-
-#get screen log files at the beginning
-echo
-echo "-------------------------------"
-echo "log files"
-echo "-------------------------------"
-echo
-echo "cat $DIRNAME/../logs/openvim.log*"
-cat $DIRNAME/../logs/openvim.log*
-echo
-echo
-
-#get version
-echo
-echo "-------------------------------"
-echo "version"
-echo "-------------------------------"
-echo "cat $DIRNAME/../openvimd.py|grep ^__version__"
-cat $DIRNAME/../openvimd.py|grep ^__version__
-echo
-echo
-
-#get configuration files
-echo "-------------------------------"
-echo "Configuration files"
-echo "-------------------------------"
-echo "cat $DIRNAME/../openvimd.cfg"
-cat $DIRNAME/../openvimd.cfg
-echo
-
-#get list of items
-for verbose in "" "-vvv"
-do
-  echo "-------------------------------"
-  echo "OPENVIM$verbose"
-  echo "-------------------------------"
-  echo "$OVCLIENT config"
-  $OVCLIENT config
-  echo "-------------------------------"
-  echo "$OVCLIENT tenant-list $verbose"
-  $OVCLIENT tenant-list $verbose
-  echo "-------------------------------"
-  echo "$OVCLIENT host-list $verbose"
-  $OVCLIENT host-list $verbose
-  echo "-------------------------------"
-  echo "$OVCLIENT net-list $verbose"
-  $OVCLIENT net-list $verbose
-  echo "-------------------------------"
-  echo "$OVCLIENT port-list $verbose"
-  $OVCLIENT port-list $verbose
-  echo "-------------------------------"
-  echo "$OVCLIENT flavor-list $verbose"
-  $OVCLIENT flavor-list $verbose
-  echo "-------------------------------"
-  echo "$OVCLIENT image-list $verbose"
-  $OVCLIENT image-list $verbose
-  echo "-------------------------------"
-  echo "$OVCLIENT vm-list $verbose"
-  $OVCLIENT vm-list $verbose
-  echo "-------------------------------"
-  echo
-
-done
-echo
diff --git a/scripts/service-floodlight b/scripts/service-floodlight
new file mode 100755 (executable)
index 0000000..b8aa38e
--- /dev/null
@@ -0,0 +1,163 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#launch floodlight inside a screen. It assumes shell variable $FLOODLIGHT_PATH
+# contain the installation path
+
+
+DIRNAME=$(readlink -f ${BASH_SOURCE[0]})
+DIRNAME=$(dirname $DIRNAME )
+DIR_OM=$(dirname $DIRNAME )
+
+function usage(){
+    echo -e "Usage: $0 start|stop|restart|status"
+    echo -e "  Launch|Removes|Restart|Getstatus floodlight on a screen"
+    echo -e "  Shell variable FLOODLIGHT_PATH must indicate floodlight installationpath"
+}
+
+function kill_pid(){
+    #send TERM signal and wait 5 seconds and send KILL signal ir still running
+    #PARAMS: $1: PID of process to terminate
+    kill $1 #send TERM signal
+    WAIT=5
+    while [ $WAIT -gt 0 ] && ps -o pid -U $USER -u $USER | grep -q $1
+    do
+        sleep 1
+        WAIT=$((WAIT-1))
+        [ $WAIT -eq 0 ] && echo -n "sending SIGKILL...  " &&  kill -9 $1  #kill when count reach 0
+    done
+    echo "done"
+   
+}
+
+#obtain parameters
+#om_action="start"  #uncoment to get a default action
+for param in $*
+do
+    [ "$param" == "start" -o "$param" == "stop"  -o "$param" == "restart" -o "$param" == "status" ] && om_action=$param  && continue
+    [ "$param" == "openflow" -o "$param" == "flow" -o "$param" == "floodlight" ] && continue
+    [ "$param" == "-h" -o "$param" == "--help" ] && usage && exit 0
+    
+    #if none of above, reach this line because a param is incorrect
+    echo "Unknown param '$param' type $0 --help" >&2
+    exit -1
+done
+
+#check action is provided
+[ -z "$om_action" ] && usage >&2 && exit -1
+
+    om_cmd="floodlight.jar"
+    om_name="floodlight"
+    
+    #obtain PID of program
+    component_id=`ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep ${om_cmd} | awk '{print $1}'`
+
+    #status
+    if [ "$om_action" == "status" ]
+    then
+        [ -n "$component_id" ] && echo "    $om_name running, pid $component_id"
+        [ -z "$component_id" ] && echo "    $om_name stopped"
+    fi
+
+    #stop
+    if [ "$om_action" == "stop" -o "$om_action" == "restart" ]
+    then
+        #terminates program
+        [ -n "$component_id" ] && echo -n "    stopping $om_name ... " && kill_pid $component_id 
+        component_id=""
+        #terminates screen
+        if screen -wipe | grep -Fq .flow
+        then
+            screen -S flow -p 0 -X stuff "exit\n"
+            sleep 1
+        fi
+    fi
+
+    #start
+    if [ "$om_action" == "start" -o "$om_action" == "restart" ]
+    then
+        [[ -z $FLOODLIGHT_PATH ]] && echo "FLOODLIGHT_PATH shell variable must indicate floodlight installation path" >&2 && exit -1
+        #calculates log file name
+        logfile=""
+        mkdir -p $DIR_OM/logs && logfile=$DIR_OM/logs/openflow.log || echo "can not create logs directory  $DIR_OM/logs"
+        #check already running
+        [ -n "$component_id" ] && echo "    $om_name is already running. Skipping" && continue
+        #create screen if not created
+        echo -n "    starting $om_name ... "
+        if ! screen -wipe | grep -Fq .flow
+        then
+            pushd ${FLOODLIGHT_PATH} > /dev/null
+            screen -dmS flow  bash
+            sleep 1
+            popd > /dev/null
+        else
+            echo -n " using existing screen 'flow' ... "
+            screen -S flow -p 0 -X log off
+            screen -S flow -p 0 -X stuff "cd ${FLOODLIGHT_PATH}\n"
+            sleep 1
+        fi
+        #move old log file index one number up and log again in index 0
+        if [[ -n $logfile ]]
+        then
+            for index in 8 7 6 5 4 3 2 1
+            do
+                [[ -f ${logfile}.${index} ]] && mv ${logfile}.${index} ${logfile}.$((index+1))
+            done
+            [[ -f ${logfile} ]] && mv ${logfile} ${logfile}.1
+            screen -S flow -p 0 -X logfile ${logfile}
+            screen -S flow -p 0 -X log on
+        fi
+        #launch command to screen
+        screen -S flow -p 0 -X stuff "java  -Dlogback.configurationFile=${DIRNAME}/flow-logback.xml -jar ./target/floodlight.jar -cf ${DIRNAME}/flow.properties_v0.9\n"
+        #check if is running
+        [[ -n $logfile ]] && timeout=120 #2 minute
+        [[ -z $logfile ]] && timeout=20
+        while [[ $timeout -gt 0 ]]
+        do
+           #check if is running
+           #echo timeout $timeout
+           #if !  ps -f -U $USER -u $USER | grep -v grep | grep -q ${om_cmd}
+           log_lines=0
+           [[ -n $logfile ]] && log_lines=`head ${logfile} | wc -l`
+           component_id=`ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep ${om_cmd} | awk '{print $1}'`
+           if [[ -z $component_id ]]
+           then #process not started or finished
+               [[ $log_lines -ge 2 ]] &&  echo -n "ERROR, it has exited." && break
+               #started because writted serveral lines at log so report error
+           fi
+           [[ -n $logfile ]] && grep -q "Listening for switch connections" ${logfile} && sleep 1 && break
+           sleep 1
+           timeout=$((timeout -1))
+        done
+        if [[ -n $logfile ]] && [[ $timeout == 0 ]] 
+        then 
+           echo -n "timeout!"
+        else
+           echo -n "running on 'screen -x flow'."
+        fi
+        [[ -n $logfile ]] && echo "  Logging at '${logfile}'" || echo
+    fi
+
+
+
+
diff --git a/scripts/service-floodlight.sh b/scripts/service-floodlight.sh
deleted file mode 100755 (executable)
index b8aa38e..0000000
+++ /dev/null
@@ -1,163 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-#launch floodlight inside a screen. It assumes shell variable $FLOODLIGHT_PATH
-# contain the installation path
-
-
-DIRNAME=$(readlink -f ${BASH_SOURCE[0]})
-DIRNAME=$(dirname $DIRNAME )
-DIR_OM=$(dirname $DIRNAME )
-
-function usage(){
-    echo -e "Usage: $0 start|stop|restart|status"
-    echo -e "  Launch|Removes|Restart|Getstatus floodlight on a screen"
-    echo -e "  Shell variable FLOODLIGHT_PATH must indicate floodlight installationpath"
-}
-
-function kill_pid(){
-    #send TERM signal and wait 5 seconds and send KILL signal ir still running
-    #PARAMS: $1: PID of process to terminate
-    kill $1 #send TERM signal
-    WAIT=5
-    while [ $WAIT -gt 0 ] && ps -o pid -U $USER -u $USER | grep -q $1
-    do
-        sleep 1
-        WAIT=$((WAIT-1))
-        [ $WAIT -eq 0 ] && echo -n "sending SIGKILL...  " &&  kill -9 $1  #kill when count reach 0
-    done
-    echo "done"
-   
-}
-
-#obtain parameters
-#om_action="start"  #uncoment to get a default action
-for param in $*
-do
-    [ "$param" == "start" -o "$param" == "stop"  -o "$param" == "restart" -o "$param" == "status" ] && om_action=$param  && continue
-    [ "$param" == "openflow" -o "$param" == "flow" -o "$param" == "floodlight" ] && continue
-    [ "$param" == "-h" -o "$param" == "--help" ] && usage && exit 0
-    
-    #if none of above, reach this line because a param is incorrect
-    echo "Unknown param '$param' type $0 --help" >&2
-    exit -1
-done
-
-#check action is provided
-[ -z "$om_action" ] && usage >&2 && exit -1
-
-    om_cmd="floodlight.jar"
-    om_name="floodlight"
-    
-    #obtain PID of program
-    component_id=`ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep ${om_cmd} | awk '{print $1}'`
-
-    #status
-    if [ "$om_action" == "status" ]
-    then
-        [ -n "$component_id" ] && echo "    $om_name running, pid $component_id"
-        [ -z "$component_id" ] && echo "    $om_name stopped"
-    fi
-
-    #stop
-    if [ "$om_action" == "stop" -o "$om_action" == "restart" ]
-    then
-        #terminates program
-        [ -n "$component_id" ] && echo -n "    stopping $om_name ... " && kill_pid $component_id 
-        component_id=""
-        #terminates screen
-        if screen -wipe | grep -Fq .flow
-        then
-            screen -S flow -p 0 -X stuff "exit\n"
-            sleep 1
-        fi
-    fi
-
-    #start
-    if [ "$om_action" == "start" -o "$om_action" == "restart" ]
-    then
-        [[ -z $FLOODLIGHT_PATH ]] && echo "FLOODLIGHT_PATH shell variable must indicate floodlight installation path" >&2 && exit -1
-        #calculates log file name
-        logfile=""
-        mkdir -p $DIR_OM/logs && logfile=$DIR_OM/logs/openflow.log || echo "can not create logs directory  $DIR_OM/logs"
-        #check already running
-        [ -n "$component_id" ] && echo "    $om_name is already running. Skipping" && continue
-        #create screen if not created
-        echo -n "    starting $om_name ... "
-        if ! screen -wipe | grep -Fq .flow
-        then
-            pushd ${FLOODLIGHT_PATH} > /dev/null
-            screen -dmS flow  bash
-            sleep 1
-            popd > /dev/null
-        else
-            echo -n " using existing screen 'flow' ... "
-            screen -S flow -p 0 -X log off
-            screen -S flow -p 0 -X stuff "cd ${FLOODLIGHT_PATH}\n"
-            sleep 1
-        fi
-        #move old log file index one number up and log again in index 0
-        if [[ -n $logfile ]]
-        then
-            for index in 8 7 6 5 4 3 2 1
-            do
-                [[ -f ${logfile}.${index} ]] && mv ${logfile}.${index} ${logfile}.$((index+1))
-            done
-            [[ -f ${logfile} ]] && mv ${logfile} ${logfile}.1
-            screen -S flow -p 0 -X logfile ${logfile}
-            screen -S flow -p 0 -X log on
-        fi
-        #launch command to screen
-        screen -S flow -p 0 -X stuff "java  -Dlogback.configurationFile=${DIRNAME}/flow-logback.xml -jar ./target/floodlight.jar -cf ${DIRNAME}/flow.properties_v0.9\n"
-        #check if is running
-        [[ -n $logfile ]] && timeout=120 #2 minute
-        [[ -z $logfile ]] && timeout=20
-        while [[ $timeout -gt 0 ]]
-        do
-           #check if is running
-           #echo timeout $timeout
-           #if !  ps -f -U $USER -u $USER | grep -v grep | grep -q ${om_cmd}
-           log_lines=0
-           [[ -n $logfile ]] && log_lines=`head ${logfile} | wc -l`
-           component_id=`ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep ${om_cmd} | awk '{print $1}'`
-           if [[ -z $component_id ]]
-           then #process not started or finished
-               [[ $log_lines -ge 2 ]] &&  echo -n "ERROR, it has exited." && break
-               #started because writted serveral lines at log so report error
-           fi
-           [[ -n $logfile ]] && grep -q "Listening for switch connections" ${logfile} && sleep 1 && break
-           sleep 1
-           timeout=$((timeout -1))
-        done
-        if [[ -n $logfile ]] && [[ $timeout == 0 ]] 
-        then 
-           echo -n "timeout!"
-        else
-           echo -n "running on 'screen -x flow'."
-        fi
-        [[ -n $logfile ]] && echo "  Logging at '${logfile}'" || echo
-    fi
-
-
-
-
diff --git a/scripts/service-opendaylight b/scripts/service-opendaylight
new file mode 100755 (executable)
index 0000000..a17f319
--- /dev/null
@@ -0,0 +1,164 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#launch opendaylight inside a screen. It assumes shell variable $OPENDAYLIGHT_PATH
+# contain the installation path
+
+
+DIRNAME=$(readlink -f ${BASH_SOURCE[0]})
+DIRNAME=$(dirname $DIRNAME )
+DIR_OM=$(dirname $DIRNAME )
+
+function usage(){
+    echo -e "Usage: $0 start|stop|restart|status"
+    echo -e "  Launch|Removes|Restart|Getstatus opendaylight on a screen"
+    echo -e "  Shell variable OPENDAYLIGHT_PATH must indicate opendaylight installation path"
+}
+
+function kill_pid(){
+    #send TERM signal and wait 5 seconds and send KILL signal ir still running
+    #PARAMS: $1: PID of process to terminate
+    kill $1 #send TERM signal
+    WAIT=5
+    while [ $WAIT -gt 0 ] && ps -o pid -U $USER -u $USER | grep -q $1
+    do
+        sleep 1
+        WAIT=$((WAIT-1))
+        [ $WAIT -eq 0 ] && echo -n "sending SIGKILL...  " &&  kill -9 $1  #kill when count reach 0
+    done
+    echo "done"
+   
+}
+
+#obtain parameters
+#om_action="start"  #uncoment to get a default action
+for param in $*
+do
+    [ "$param" == "start" -o "$param" == "stop"  -o "$param" == "restart" -o "$param" == "status" ] && om_action=$param  && continue
+    [ "$param" == "openflow" -o "$param" == "flow" -o "$param" == "opendaylight" ] && continue
+    [ "$param" == "-h" -o "$param" == "--help" ] && usage && exit 0
+    
+    #if none of above, reach this line because a param is incorrect
+    echo "Unknown param '$param' type $0 --help" >&2
+    exit -1
+done
+
+#check action is provided
+[ -z "$om_action" ] && usage >&2 && exit -1
+
+    om_cmd="./karaf"
+    om_name="opendaylight"
+    
+    #obtain PID of program
+    component_id=`ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep ${om_cmd} | awk '{print $1}'`
+
+    #status
+    if [ "$om_action" == "status" ]
+    then
+        [ -n "$component_id" ] && echo "    $om_name running, pid $component_id"
+        [ -z "$component_id" ] && echo "    $om_name stopped"
+    fi
+
+    #stop
+    if [ "$om_action" == "stop" -o "$om_action" == "restart" ]
+    then
+        #terminates program
+        [ -n "$component_id" ] && echo -n "    stopping $om_name ... " && kill_pid $component_id 
+        component_id=""
+        #terminates screen
+        if screen -wipe | grep -Fq .flow
+        then
+            screen -S flow -p 0 -X stuff "exit\n"
+            sleep 1
+        fi
+    fi
+
+    #start
+    if [ "$om_action" == "start" -o "$om_action" == "restart" ]
+    then
+        [[ -z $OPENDAYLIGHT_PATH ]] && echo "OPENDAYLIGHT_PATH shell variable must indicate opendaylight installation path" >&2 && exit -1
+        #calculates log file name
+        logfile=""
+        mkdir -p $DIR_OM/logs && logfile=$DIR_OM/logs/openflow.log && logfile_console=$DIR_OM/logs/openflow_console.log || echo "can not create logs directory  $DIR_OM/logs"
+        #check already running
+        [ -n "$component_id" ] && echo "    $om_name is already running. Skipping" && continue
+        #create screen if not created
+        echo -n "    starting $om_name ... "
+        if ! screen -wipe | grep -Fq .flow
+        then
+            pushd ${OPENDAYLIGHT_PATH}/bin > /dev/null
+            screen -dmS flow  bash
+            sleep 1
+            popd > /dev/null
+        else
+            echo -n " using existing screen 'flow' ... "
+            screen -S flow -p 0 -X log off
+            screen -S flow -p 0 -X stuff "cd ${OPENDAYLIGHT_PATH}/bin\n"
+            sleep 1
+        fi
+        #move old log file index one number up and log again in index 0
+        if [[ -n $logfile ]]
+        then
+            for index in .9 .8 .7 .6 .5 .4 .3 .2 .1 ""
+            do
+                rm -f ${logfile}${index}
+                ln -s ${OPENDAYLIGHT_PATH}/data/log/karaf.log${index} ${logfile}${index}
+            done
+            rm -rf ${logfile_console}
+            screen -S flow -p 0 -X logfile ${logfile_console}
+            screen -S flow -p 0 -X log on
+        fi
+        #launch command to screen
+        screen -S flow -p 0 -X stuff "${om_cmd}\n"
+        #check if is running
+        [[ -n $logfile ]] && timeout=120 #2 minute
+        [[ -z $logfile ]] && timeout=20
+        while [[ $timeout -gt 0 ]]
+        do
+           #check if is running
+           #echo timeout $timeout
+           #if !  ps -f -U $USER -u $USER | grep -v grep | grep -q ${om_cmd}
+           log_lines=0
+           [[ -n $logfile_console ]] && log_lines=`head ${logfile_console} | wc -l`
+           component_id=`ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep ${om_cmd} | awk '{print $1}'`
+           if [[ -z $component_id ]]
+           then #process not started or finished
+               [[ $log_lines -ge 2 ]] &&  echo -n "ERROR, it has exited." && break
+               #started because writted serveral lines at log so report error
+           fi
+           [[ -n $logfile_console ]] && grep -q "Listening on port" ${logfile_console} && sleep 1 && break
+           sleep 1
+           timeout=$((timeout -1))
+        done
+        if [[ -n $logfile_console ]] && [[ $timeout == 0 ]] 
+        then 
+           echo -n "timeout!"
+        else
+           echo -n "running on 'screen -x flow'."
+        fi
+        [[ -n $logfile ]] && echo "  Logging at '${logfile}'" || echo
+    fi
+
+
+
+
diff --git a/scripts/service-opendaylight.sh b/scripts/service-opendaylight.sh
deleted file mode 100755 (executable)
index a17f319..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-#launch opendaylight inside a screen. It assumes shell variable $OPENDAYLIGHT_PATH
-# contain the installation path
-
-
-DIRNAME=$(readlink -f ${BASH_SOURCE[0]})
-DIRNAME=$(dirname $DIRNAME )
-DIR_OM=$(dirname $DIRNAME )
-
-function usage(){
-    echo -e "Usage: $0 start|stop|restart|status"
-    echo -e "  Launch|Removes|Restart|Getstatus opendaylight on a screen"
-    echo -e "  Shell variable OPENDAYLIGHT_PATH must indicate opendaylight installation path"
-}
-
-function kill_pid(){
-    #send TERM signal and wait 5 seconds and send KILL signal ir still running
-    #PARAMS: $1: PID of process to terminate
-    kill $1 #send TERM signal
-    WAIT=5
-    while [ $WAIT -gt 0 ] && ps -o pid -U $USER -u $USER | grep -q $1
-    do
-        sleep 1
-        WAIT=$((WAIT-1))
-        [ $WAIT -eq 0 ] && echo -n "sending SIGKILL...  " &&  kill -9 $1  #kill when count reach 0
-    done
-    echo "done"
-   
-}
-
-#obtain parameters
-#om_action="start"  #uncoment to get a default action
-for param in $*
-do
-    [ "$param" == "start" -o "$param" == "stop"  -o "$param" == "restart" -o "$param" == "status" ] && om_action=$param  && continue
-    [ "$param" == "openflow" -o "$param" == "flow" -o "$param" == "opendaylight" ] && continue
-    [ "$param" == "-h" -o "$param" == "--help" ] && usage && exit 0
-    
-    #if none of above, reach this line because a param is incorrect
-    echo "Unknown param '$param' type $0 --help" >&2
-    exit -1
-done
-
-#check action is provided
-[ -z "$om_action" ] && usage >&2 && exit -1
-
-    om_cmd="./karaf"
-    om_name="opendaylight"
-    
-    #obtain PID of program
-    component_id=`ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep ${om_cmd} | awk '{print $1}'`
-
-    #status
-    if [ "$om_action" == "status" ]
-    then
-        [ -n "$component_id" ] && echo "    $om_name running, pid $component_id"
-        [ -z "$component_id" ] && echo "    $om_name stopped"
-    fi
-
-    #stop
-    if [ "$om_action" == "stop" -o "$om_action" == "restart" ]
-    then
-        #terminates program
-        [ -n "$component_id" ] && echo -n "    stopping $om_name ... " && kill_pid $component_id 
-        component_id=""
-        #terminates screen
-        if screen -wipe | grep -Fq .flow
-        then
-            screen -S flow -p 0 -X stuff "exit\n"
-            sleep 1
-        fi
-    fi
-
-    #start
-    if [ "$om_action" == "start" -o "$om_action" == "restart" ]
-    then
-        [[ -z $OPENDAYLIGHT_PATH ]] && echo "OPENDAYLIGHT_PATH shell variable must indicate opendaylight installation path" >&2 && exit -1
-        #calculates log file name
-        logfile=""
-        mkdir -p $DIR_OM/logs && logfile=$DIR_OM/logs/openflow.log && logfile_console=$DIR_OM/logs/openflow_console.log || echo "can not create logs directory  $DIR_OM/logs"
-        #check already running
-        [ -n "$component_id" ] && echo "    $om_name is already running. Skipping" && continue
-        #create screen if not created
-        echo -n "    starting $om_name ... "
-        if ! screen -wipe | grep -Fq .flow
-        then
-            pushd ${OPENDAYLIGHT_PATH}/bin > /dev/null
-            screen -dmS flow  bash
-            sleep 1
-            popd > /dev/null
-        else
-            echo -n " using existing screen 'flow' ... "
-            screen -S flow -p 0 -X log off
-            screen -S flow -p 0 -X stuff "cd ${OPENDAYLIGHT_PATH}/bin\n"
-            sleep 1
-        fi
-        #move old log file index one number up and log again in index 0
-        if [[ -n $logfile ]]
-        then
-            for index in .9 .8 .7 .6 .5 .4 .3 .2 .1 ""
-            do
-                rm -f ${logfile}${index}
-                ln -s ${OPENDAYLIGHT_PATH}/data/log/karaf.log${index} ${logfile}${index}
-            done
-            rm -rf ${logfile_console}
-            screen -S flow -p 0 -X logfile ${logfile_console}
-            screen -S flow -p 0 -X log on
-        fi
-        #launch command to screen
-        screen -S flow -p 0 -X stuff "${om_cmd}\n"
-        #check if is running
-        [[ -n $logfile ]] && timeout=120 #2 minute
-        [[ -z $logfile ]] && timeout=20
-        while [[ $timeout -gt 0 ]]
-        do
-           #check if is running
-           #echo timeout $timeout
-           #if !  ps -f -U $USER -u $USER | grep -v grep | grep -q ${om_cmd}
-           log_lines=0
-           [[ -n $logfile_console ]] && log_lines=`head ${logfile_console} | wc -l`
-           component_id=`ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep ${om_cmd} | awk '{print $1}'`
-           if [[ -z $component_id ]]
-           then #process not started or finished
-               [[ $log_lines -ge 2 ]] &&  echo -n "ERROR, it has exited." && break
-               #started because writted serveral lines at log so report error
-           fi
-           [[ -n $logfile_console ]] && grep -q "Listening on port" ${logfile_console} && sleep 1 && break
-           sleep 1
-           timeout=$((timeout -1))
-        done
-        if [[ -n $logfile_console ]] && [[ $timeout == 0 ]] 
-        then 
-           echo -n "timeout!"
-        else
-           echo -n "running on 'screen -x flow'."
-        fi
-        [[ -n $logfile ]] && echo "  Logging at '${logfile}'" || echo
-    fi
-
-
-
-
diff --git a/scripts/service-openvim b/scripts/service-openvim
new file mode 100755 (executable)
index 0000000..7eefa8e
--- /dev/null
@@ -0,0 +1,225 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openvim
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#launch openvim (and/or floodlight) inside a screen. 
+#or call service if it is installed on systemd
+#It assumes a relative path '..' for openvim 
+#for floodlight, the variable FLOODLIGHT_PATH indicates the installation path
+
+
+DIRNAME=$(readlink -f ${BASH_SOURCE[0]})
+DIRNAME=$(dirname $DIRNAME )
+DIR_OM=$(dirname $DIRNAME )
+#[[ -z $FLOODLIGHT_PATH ]] && FLOODLIGHT_PATH=$(dirname ${DIR_OM})/floodlight-1.1
+#[[ -z $FLOODLIGHT_PATH ]] && FLOODLIGHT_PATH=$(dirname ${DIR_OM})/floodlight-0.90
+
+function usage(){
+    echo -e "Usage: $0 [openvim/vim] [floodlight/flow] start|stop|restart|status"
+    echo -e "  Launch|Removes|Restart|Getstatus openvim (by default) or/and floodlight on a screen/service"
+    echo -e "  For floodlight variable FLOODLIGHT_PATH must indicate installation path"
+    echo -e "    -h --help: shows this help"
+    echo -e "    -n --screen-name NAME : name of screen to launch openvim (default vim)"
+    echo -e "    -- PARAMS use to separate PARAMS that will be send to the service. e.g. -pPORT -PADMINPORT --dbname=DDBB"
+}
+
+function kill_pid(){
+    #send TERM signal and wait 5 seconds and send KILL signal ir still running
+    #PARAMS: $1: PID of process to terminate
+    kill $1 #send TERM signal
+    WAIT=5
+    while [ $WAIT -gt 0 ] && ps -o pid -U $USER -u $USER | grep -q $1
+    do
+        sleep 1
+        WAIT=$((WAIT-1))
+        [ $WAIT -eq 0 ] && echo -n "sending SIGKILL...  " &&  kill -9 $1  #kill when count reach 0
+    done
+    echo "done"
+   
+}
+
+#process options
+source ${DIRNAME}/get-options.sh "screen-name:n= help:h --" $* || exit 1
+
+#help
+[[ -n "$option_help" ]] && usage && exit 0
+
+
+#obtain parameters
+om_list=""
+#om_action="start"  #uncoment to get a default action
+action_list=""
+om_params="$option__"
+
+for param in $params
+do
+    [ "$param" == "start" -o "$param" == "stop"  -o "$param" == "restart" -o "$param" == "status" ] && om_action=$param  && continue
+    [ "$param" == "openvim" -o "$param" == "vim"  ]    && om_list="$om_list vim"              && continue
+    [ "$param" == "openmano" -o "$param" == "mano" ]   && continue #allow and ingore for backwards compatibility
+    [ "$param" == "openflow" -o "$param" == "flow" -o "$param" == "floodlight" ] && om_list="flow $om_list" && continue
+    echo "invalid argument '$param'?  Type -h for help" >&2 && exit 1
+done
+
+[[ -n $option_screen_name ]] && option_screen_name=${option_screen_name#*.} #allow the format 'pid.name' and keep only name
+
+#check action is provided
+[ -z "$om_action" ] && usage >&2 && exit -1
+
+#if no componenets supplied assume all
+[ -z "$om_list" ] && om_list="vim"
+
+function find_process_id(){ #PARAMS:  command screen-name
+    for process_id in `ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep "${1}" | awk '{print $1}'`
+    do
+        scname=$(ps wwep $process_id | grep -o 'STY=\S*')
+        scname=${scname#STY=}
+        [[ -n "$2" ]] && [[ "${scname#*.}" != "$2" ]] && continue
+        echo -n "${process_id} "
+    done
+    echo    
+}
+
+
+for om_component in $om_list
+do
+    screen_name="${om_component}"
+    [[ -n "$option_screen_name" ]] && screen_name=$option_screen_name
+    [ "${om_component}" == "flow" ] && om_cmd="floodlight.jar" && om_name="floodlight" && om_dir=$FLOODLIGHT_PATH
+    [ "${om_component}" == "vim" ]  && om_cmd="./openvimd.py"  && om_name="openvim   " && om_dir=${DIR_OM}
+    #obtain PID of program
+    component_id=`find_process_id "${om_cmd}" $option_screen_name`
+    processes=$(echo $component_id | wc -w)
+
+    #status
+    if [ "$om_action" == "status" ]
+    then
+       running=""
+        for process_id in $component_id 
+        do
+            scname=$(ps wwep $process_id | grep -o 'STY=\S*')
+            scname=${scname#STY=}
+            [[ -n "$option_screen_name" ]] && [[ "${scname#*.}" != "$option_screen_name" ]] && continue
+            printf "%-15s" "pid: ${process_id},"
+            [[ -n "$scname" ]] && printf "%-25s" "screen: ${scname},"
+            echo cmd: $(ps -o cmd p $process_id | tail -n1 )
+            running=y
+        done
+        #if installed as a service and it is not provided a screen name call service
+        [[ -f /etc/systemd/system/openvim.service ]] && [[ -z $option_screen_name ]] && running=y #&& service openvim status
+        if [ -z "$running" ]
+        then
+            echo -n "    $om_name not running" && [[ -n "$option_screen_name" ]] && echo " on screen '$option_screen_name'" || echo
+        fi
+    fi
+
+    #if installed as a service and it is not provided a screen name call service
+    [[ -f /etc/systemd/system/openvim.service ]] && [[ -z $option_screen_name ]] && service openvim $om_action && ( [[ $om_action == status ]] || sleep 5 ) && exit $?
+
+    #stop
+    if [ "$om_action" == "stop" -o "$om_action" == "restart" ]
+    then
+        #terminates program
+        [ $processes -gt 1 ] && echo "$processes processes are running, specify with --screen-name" && continue  
+        [ $processes -eq 1 ] && echo -n "    stopping $om_name ... " && kill_pid $component_id 
+        component_id=""
+        #terminates screen
+        if screen -wipe | grep -q -e "\.${screen_name}\b"
+        then
+            screen -S $screen_name -p 0 -X stuff "exit\n" || echo
+            sleep 1
+        fi
+    fi
+
+    #start
+    if [ "$om_action" == "start" -o "$om_action" == "restart" ]
+    then
+        [[ -z $FLOODLIGHT_PATH ]] && [[ $om_component == flow ]] && 
+            echo "FLOODLIGHT_PATH shell variable must indicate floodlight installation path" >&2 && exit -1
+        #calculates log file name
+        logfile=""
+        mkdir -p $DIR_OM/logs && logfile=$DIR_OM/logs/open${screen_name}.log || echo "can not create logs directory  $DIR_OM/logs"
+        #check already running
+        [ -n "$component_id" ] && echo "    $om_name is already running. Skipping" && continue
+        #create screen if not created
+        echo -n "    starting $om_name ... "
+        if ! screen -wipe | grep -q -e "\.${screen_name}\b"
+        then
+            pushd ${om_dir} > /dev/null
+            screen -dmS ${screen_name}  bash
+            sleep 1
+            popd > /dev/null
+        else
+            echo -n " using existing screen '${screen_name}' ... "
+            screen -S ${screen_name} -p 0 -X log off
+            screen -S ${screen_name} -p 0 -X stuff "cd ${om_dir}\n"
+            sleep 1
+        fi
+        #move old log file index one number up and log again in index 0
+        if [[ -n $logfile ]]
+        then
+            for index in 8 7 6 5 4 3 2 1
+            do
+                [[ -f ${logfile}.${index} ]] && mv ${logfile}.${index} ${logfile}.$((index+1))
+            done
+            [[ -f ${logfile} ]] && mv ${logfile} ${logfile}.1
+            screen -S ${screen_name} -p 0 -X logfile ${logfile}
+            screen -S ${screen_name} -p 0 -X log on
+        fi
+        #launch command to screen
+        #[ "${om_component}" != "flow" ] && screen -S ${screen_name} -p 0 -X stuff "cd ${DIR_OM}/open${om_component}\n" && sleep 1
+        [ "${om_component}" == "flow" ] && screen -S ${screen_name} -p 0 -X stuff "java  -Dlogback.configurationFile=${DIRNAME}/flow-logback.xml -jar ./target/floodlight.jar -cf ${DIRNAME}/flow.properties_v0.9\n"
+        #[ "${om_component}" == "flow" ] && screen -S ${screen_name} -p 0 -X stuff "java  -Dlogback.configurationFile=${DIRNAME}/flow-logback.xml -jar ./target/floodlight.jar -cf ${DIRNAME}/flow.properties_v1.1\n" && sleep 5
+        [ "${om_component}" != "flow" ] && screen -S ${screen_name} -p 0 -X stuff "${om_cmd}${om_params}\n"
+        #check if is running
+        [[ -n $logfile ]] && timeout=120 #2 minute
+        [[ -z $logfile ]] && timeout=20
+        while [[ $timeout -gt 0 ]]
+        do
+           #check if is running
+           #echo timeout $timeout
+           #if !  ps -f -U $USER -u $USER | grep -v grep | grep -q ${om_cmd}
+           log_lines=0
+           [[ -n $logfile ]] && log_lines=`head ${logfile} | wc -l`
+           component_id=`find_process_id "${om_cmd}${om_params}" $screen_name`
+           if [[ -z $component_id ]]
+           then #process not started or finished
+               [[ $log_lines -ge 2 ]] &&  echo -n "ERROR, it has exited." && break
+               #started because writted serveral lines at log so report error
+           fi
+           [[ -n $logfile ]] && [[ ${om_component} == flow ]] && grep -q "Listening for switch connections" ${logfile} && sleep 1 && break
+           [[ -n $logfile ]] && [[ ${om_component} != flow ]] && grep -q "open${om_component}d ready" ${logfile} && break
+           sleep 1
+           timeout=$((timeout -1))
+        done
+        if [[ -n $logfile ]] && [[ $timeout == 0 ]] 
+        then 
+           echo -n "timeout!"
+        else
+           echo -n "running on 'screen -x ${screen_name}'."
+        fi
+        [[ -n $logfile ]] && echo "  Logging at '${logfile}'" || echo
+    fi
+done
+
+
+
+
diff --git a/scripts/service-openvim.sh b/scripts/service-openvim.sh
deleted file mode 100755 (executable)
index 7eefa8e..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-#!/bin/bash
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-#launch openvim (and/or floodlight) inside a screen. 
-#or call service if it is installed on systemd
-#It assumes a relative path '..' for openvim 
-#for floodlight, the variable FLOODLIGHT_PATH indicates the installation path
-
-
-DIRNAME=$(readlink -f ${BASH_SOURCE[0]})
-DIRNAME=$(dirname $DIRNAME )
-DIR_OM=$(dirname $DIRNAME )
-#[[ -z $FLOODLIGHT_PATH ]] && FLOODLIGHT_PATH=$(dirname ${DIR_OM})/floodlight-1.1
-#[[ -z $FLOODLIGHT_PATH ]] && FLOODLIGHT_PATH=$(dirname ${DIR_OM})/floodlight-0.90
-
-function usage(){
-    echo -e "Usage: $0 [openvim/vim] [floodlight/flow] start|stop|restart|status"
-    echo -e "  Launch|Removes|Restart|Getstatus openvim (by default) or/and floodlight on a screen/service"
-    echo -e "  For floodlight variable FLOODLIGHT_PATH must indicate installation path"
-    echo -e "    -h --help: shows this help"
-    echo -e "    -n --screen-name NAME : name of screen to launch openvim (default vim)"
-    echo -e "    -- PARAMS use to separate PARAMS that will be send to the service. e.g. -pPORT -PADMINPORT --dbname=DDBB"
-}
-
-function kill_pid(){
-    #send TERM signal and wait 5 seconds and send KILL signal ir still running
-    #PARAMS: $1: PID of process to terminate
-    kill $1 #send TERM signal
-    WAIT=5
-    while [ $WAIT -gt 0 ] && ps -o pid -U $USER -u $USER | grep -q $1
-    do
-        sleep 1
-        WAIT=$((WAIT-1))
-        [ $WAIT -eq 0 ] && echo -n "sending SIGKILL...  " &&  kill -9 $1  #kill when count reach 0
-    done
-    echo "done"
-   
-}
-
-#process options
-source ${DIRNAME}/get-options.sh "screen-name:n= help:h --" $* || exit 1
-
-#help
-[[ -n "$option_help" ]] && usage && exit 0
-
-
-#obtain parameters
-om_list=""
-#om_action="start"  #uncoment to get a default action
-action_list=""
-om_params="$option__"
-
-for param in $params
-do
-    [ "$param" == "start" -o "$param" == "stop"  -o "$param" == "restart" -o "$param" == "status" ] && om_action=$param  && continue
-    [ "$param" == "openvim" -o "$param" == "vim"  ]    && om_list="$om_list vim"              && continue
-    [ "$param" == "openmano" -o "$param" == "mano" ]   && continue #allow and ingore for backwards compatibility
-    [ "$param" == "openflow" -o "$param" == "flow" -o "$param" == "floodlight" ] && om_list="flow $om_list" && continue
-    echo "invalid argument '$param'?  Type -h for help" >&2 && exit 1
-done
-
-[[ -n $option_screen_name ]] && option_screen_name=${option_screen_name#*.} #allow the format 'pid.name' and keep only name
-
-#check action is provided
-[ -z "$om_action" ] && usage >&2 && exit -1
-
-#if no componenets supplied assume all
-[ -z "$om_list" ] && om_list="vim"
-
-function find_process_id(){ #PARAMS:  command screen-name
-    for process_id in `ps -o pid,cmd -U $USER -u $USER | grep -v grep | grep "${1}" | awk '{print $1}'`
-    do
-        scname=$(ps wwep $process_id | grep -o 'STY=\S*')
-        scname=${scname#STY=}
-        [[ -n "$2" ]] && [[ "${scname#*.}" != "$2" ]] && continue
-        echo -n "${process_id} "
-    done
-    echo    
-}
-
-
-for om_component in $om_list
-do
-    screen_name="${om_component}"
-    [[ -n "$option_screen_name" ]] && screen_name=$option_screen_name
-    [ "${om_component}" == "flow" ] && om_cmd="floodlight.jar" && om_name="floodlight" && om_dir=$FLOODLIGHT_PATH
-    [ "${om_component}" == "vim" ]  && om_cmd="./openvimd.py"  && om_name="openvim   " && om_dir=${DIR_OM}
-    #obtain PID of program
-    component_id=`find_process_id "${om_cmd}" $option_screen_name`
-    processes=$(echo $component_id | wc -w)
-
-    #status
-    if [ "$om_action" == "status" ]
-    then
-       running=""
-        for process_id in $component_id 
-        do
-            scname=$(ps wwep $process_id | grep -o 'STY=\S*')
-            scname=${scname#STY=}
-            [[ -n "$option_screen_name" ]] && [[ "${scname#*.}" != "$option_screen_name" ]] && continue
-            printf "%-15s" "pid: ${process_id},"
-            [[ -n "$scname" ]] && printf "%-25s" "screen: ${scname},"
-            echo cmd: $(ps -o cmd p $process_id | tail -n1 )
-            running=y
-        done
-        #if installed as a service and it is not provided a screen name call service
-        [[ -f /etc/systemd/system/openvim.service ]] && [[ -z $option_screen_name ]] && running=y #&& service openvim status
-        if [ -z "$running" ]
-        then
-            echo -n "    $om_name not running" && [[ -n "$option_screen_name" ]] && echo " on screen '$option_screen_name'" || echo
-        fi
-    fi
-
-    #if installed as a service and it is not provided a screen name call service
-    [[ -f /etc/systemd/system/openvim.service ]] && [[ -z $option_screen_name ]] && service openvim $om_action && ( [[ $om_action == status ]] || sleep 5 ) && exit $?
-
-    #stop
-    if [ "$om_action" == "stop" -o "$om_action" == "restart" ]
-    then
-        #terminates program
-        [ $processes -gt 1 ] && echo "$processes processes are running, specify with --screen-name" && continue  
-        [ $processes -eq 1 ] && echo -n "    stopping $om_name ... " && kill_pid $component_id 
-        component_id=""
-        #terminates screen
-        if screen -wipe | grep -q -e "\.${screen_name}\b"
-        then
-            screen -S $screen_name -p 0 -X stuff "exit\n" || echo
-            sleep 1
-        fi
-    fi
-
-    #start
-    if [ "$om_action" == "start" -o "$om_action" == "restart" ]
-    then
-        [[ -z $FLOODLIGHT_PATH ]] && [[ $om_component == flow ]] && 
-            echo "FLOODLIGHT_PATH shell variable must indicate floodlight installation path" >&2 && exit -1
-        #calculates log file name
-        logfile=""
-        mkdir -p $DIR_OM/logs && logfile=$DIR_OM/logs/open${screen_name}.log || echo "can not create logs directory  $DIR_OM/logs"
-        #check already running
-        [ -n "$component_id" ] && echo "    $om_name is already running. Skipping" && continue
-        #create screen if not created
-        echo -n "    starting $om_name ... "
-        if ! screen -wipe | grep -q -e "\.${screen_name}\b"
-        then
-            pushd ${om_dir} > /dev/null
-            screen -dmS ${screen_name}  bash
-            sleep 1
-            popd > /dev/null
-        else
-            echo -n " using existing screen '${screen_name}' ... "
-            screen -S ${screen_name} -p 0 -X log off
-            screen -S ${screen_name} -p 0 -X stuff "cd ${om_dir}\n"
-            sleep 1
-        fi
-        #move old log file index one number up and log again in index 0
-        if [[ -n $logfile ]]
-        then
-            for index in 8 7 6 5 4 3 2 1
-            do
-                [[ -f ${logfile}.${index} ]] && mv ${logfile}.${index} ${logfile}.$((index+1))
-            done
-            [[ -f ${logfile} ]] && mv ${logfile} ${logfile}.1
-            screen -S ${screen_name} -p 0 -X logfile ${logfile}
-            screen -S ${screen_name} -p 0 -X log on
-        fi
-        #launch command to screen
-        #[ "${om_component}" != "flow" ] && screen -S ${screen_name} -p 0 -X stuff "cd ${DIR_OM}/open${om_component}\n" && sleep 1
-        [ "${om_component}" == "flow" ] && screen -S ${screen_name} -p 0 -X stuff "java  -Dlogback.configurationFile=${DIRNAME}/flow-logback.xml -jar ./target/floodlight.jar -cf ${DIRNAME}/flow.properties_v0.9\n"
-        #[ "${om_component}" == "flow" ] && screen -S ${screen_name} -p 0 -X stuff "java  -Dlogback.configurationFile=${DIRNAME}/flow-logback.xml -jar ./target/floodlight.jar -cf ${DIRNAME}/flow.properties_v1.1\n" && sleep 5
-        [ "${om_component}" != "flow" ] && screen -S ${screen_name} -p 0 -X stuff "${om_cmd}${om_params}\n"
-        #check if is running
-        [[ -n $logfile ]] && timeout=120 #2 minute
-        [[ -z $logfile ]] && timeout=20
-        while [[ $timeout -gt 0 ]]
-        do
-           #check if is running
-           #echo timeout $timeout
-           #if !  ps -f -U $USER -u $USER | grep -v grep | grep -q ${om_cmd}
-           log_lines=0
-           [[ -n $logfile ]] && log_lines=`head ${logfile} | wc -l`
-           component_id=`find_process_id "${om_cmd}${om_params}" $screen_name`
-           if [[ -z $component_id ]]
-           then #process not started or finished
-               [[ $log_lines -ge 2 ]] &&  echo -n "ERROR, it has exited." && break
-               #started because writted serveral lines at log so report error
-           fi
-           [[ -n $logfile ]] && [[ ${om_component} == flow ]] && grep -q "Listening for switch connections" ${logfile} && sleep 1 && break
-           [[ -n $logfile ]] && [[ ${om_component} != flow ]] && grep -q "open${om_component}d ready" ${logfile} && break
-           sleep 1
-           timeout=$((timeout -1))
-        done
-        if [[ -n $logfile ]] && [[ $timeout == 0 ]] 
-        then 
-           echo -n "timeout!"
-        else
-           echo -n "running on 'screen -x ${screen_name}'."
-        fi
-        [[ -n $logfile ]] && echo "  Logging at '${logfile}'" || echo
-    fi
-done
-
-
-
-
index 1d3138f..fac60ea 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -1,41 +1,16 @@
 #!/usr/bin/env python
 
-from setuptools import setup, find_packages
-from setuptools.command.install import install
-from os import system
 from setuptools import setup
 
-__name__ = 'lib-osm-openvim'
-__version__ = '1.0.0'
-__description__ = 'OSM Openvim library'
-__author__ = 'ETSI OSM'
-__author_email__ = 'alfonso.tiernosepulveda@telefonica.com'
-__maintainer__ = 'mirabal'
-__maintainer_email__ = 'leonardo.mirabal@altran.com'
-__license__ = 'Apache 2.0'
-__url__ = 'https://osm.etsi.org/gitweb/?p=osm/openvim.git;a=summary'
-
-cmd = 'cp ovim.py ovim'
-system(cmd)
-
-__data_files__ = [('osm/openvim/', ['openvimd.cfg']),
-                  ('osm/openvim/database_utils/', ['database_utils/vim_db_structure.sql',
-                                                   'database_utils/nets.sql',
-                                                   'database_utils/of_ports_pci_correspondence.sql',
-                                                   'database_utils/host_ranking.sql',
-                                                   'database_utils/dump_db.sh',
-                                                   'database_utils/init_vim_db.sh',
-                                                   'database_utils/migrate_vim_db.sh',
-                                                   'database_utils/install-db-server.sh'
-                                                   ]),
-                  ('osm/openvim/scripts/', ['scripts/service-openvim.sh',
-                                            'scripts/openvim-report.sh',
-                                            'scripts/service-floodlight.sh',
-                                            'scripts/service-opendaylight.sh',
-                                            'scripts/initopenvim.sh'
-                                            ]),
-                  ]
-
+__name = 'osm_openvim'
+__version = '1.0.0'
+__description = 'OSM Openvim library'
+__author = 'ETSI OSM'
+__author_email = 'alfonso.tiernosepulveda@telefonica.com'
+__maintainer = 'mirabal'
+__maintainer_email = 'leonardo.mirabal@altran.com'
+__license = 'Apache 2.0'
+__url = 'https://osm.etsi.org/gitweb/?p=osm/openvim.git;a=summary'
 
 _req = [
     "asn1crypto",
@@ -64,66 +39,31 @@ _req = [
     "libvirt-python"
 ]
 
-__scripts__ = ['openflow', 'openvim', 'ovim']
-
-
-class LibOpenvimInstaller(install):
-    lite = None
-    user_options = install.user_options + [('lite', None, "Don't install without Machine Learning modules.")]
-
-    def initialize_options(self):
-        self.lite = None
-        install.initialize_options(self)
-
-    def finalize_options(self):
-        install.finalize_options(self)
-
-    def run(self):
-
-        cmd = 'ln -sf -v /usr/local/osm/openvim/openvimd.cfg /etc/default/openvimd.cfg'
-        system(cmd)
-        cmd = 'ln -sf -v /usr/local/osm/openvim/openflow /usr/bin/openflow'
-        system(cmd)
-        cmd = 'ln -sf -v /usr/local/osm/openvim/ovim.py /usr/bin/ovim'
-        system(cmd)
-
-        install.run(self)
-
-
-setup(name=__name__,
-      version=__version__,
-      description=__description__,
-      long_description=__description__,
-      author=__author__,
-      author_email=__author_email__,
-      license=__license__,
-      maintainer=__maintainer__,
-      maintainer_email=__maintainer_email__,
-      url=__url__,
-      py_modules=['ovim',
-                  'openvimd',
-                  'vim_db',
-                  'httpserver',
-                  'RADclass',
-                  'auxiliary_functions',
-                  'dhcp_thread',
-                  'definitionsClass',
-                  'host_thread',
-                  'vim_schema',
-                  'ovim',
-                  'openflow_thread',
-                  'openflow_conn',
-                  'onos',
-                  'ODL',
-                  'floodlight',
-                  ],
-      packages=find_packages() + ['database_utils'] + ['scripts'],
-      package_dir={__name__: __name__},
-      package_data={'database_utils': ['*'], 'scripts': ['*']},
+__scripts__ = ['openflow',
+               'openvim',
+               'openvimd',
+               'osm_openvim/scripts/service-openvim',
+               'osm_openvim/scripts/service-opendaylight',
+               'osm_openvim/scripts/service-floodlight',
+               'osm_openvim/scripts/service-openvim',
+               'osm_openvim/scripts/openvim-report',
+               'osm_openvim/scripts/get_dhcp_lease.sh']
+
+setup(name=__name,
+      version=__version,
+      description=__description,
+      long_description=__description,
+      author=__author,
+      author_email=__author_email,
+      license=__license,
+      maintainer=__maintainer,
+      maintainer_email=__maintainer_email,
+      url=__url,
+      packages=[__name],
+      package_dir={__name: __name},
       scripts=__scripts__,
-      data_files=__data_files__,
+      package_data={'osm_openvim': ['*']},
       include_package_data=True,
-      cmdclass={'install': LibOpenvimInstaller},
       install_requires=_req
       )
 
diff --git a/setup_lite.py b/setup_lite.py
new file mode 100755 (executable)
index 0000000..cf38014
--- /dev/null
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+
+from setuptools import setup
+
+__name = 'lib_osm_openvim'
+__version = '1.0.0'
+__description = 'OSM Openvim library'
+__author = 'ETSI OSM'
+__author_email = 'alfonso.tiernosepulveda@telefonica.com'
+__maintainer = 'mirabal'
+__maintainer_email = 'leonardo.mirabal@altran.com'
+__license = 'Apache 2.0'
+__url = 'https://osm.etsi.org/gitweb/?p=osm/openvim.git;a=summary'
+
+_req = [
+    "asn1crypto",
+    "cffi",
+    "enum34",
+    "functools32",
+    "idna",
+    "ipaddress",
+    "packaging",
+    "pbr",
+    "pkgconfig",
+    "pyasn1",
+    "pycparser",
+    "pycrypto",
+    "pyparsing",
+    "six",
+    "jsonschema",
+    "argcomplete",
+    "requests",
+    "PyYAML",
+    "requestsexceptions",
+    "netaddr",
+    "bottle",
+    "MySQL-python",
+    "paramiko",
+]
+
+__scripts__ = ['openflow']
+
+setup(name=__name,
+      version=__version,
+      description=__description,
+      long_description=__description,
+      author=__author,
+      author_email=__author_email,
+      license=__license,
+      maintainer=__maintainer,
+      maintainer_email=__maintainer_email,
+      url=__url,
+      packages=[__name],
+      package_dir={__name: __name},
+      scripts=__scripts__,
+      package_data={'lib_osm_openvim': ['*']},
+      include_package_data=True,
+      install_requires=_req
+      )
+
+
diff --git a/vim_db.py b/vim_db.py
deleted file mode 100644 (file)
index c34160d..0000000
--- a/vim_db.py
+++ /dev/null
@@ -1,1734 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-'''
-This module interact with the openvim database,
-It implements general table management
-and complex writings 'transactional' sures, 
-that is, or all is changed or nothing
-'''
-
-__author__="Alfonso Tierno"
-__date__ ="$10-jul-2014 12:07:15$"
-
-import MySQLdb as mdb
-import uuid as myUuid
-import auxiliary_functions as af
-import json
-import logging
-from netaddr import IPNetwork, IPSet, IPRange, all_matching_cidrs
-
-HTTP_Bad_Request = 400
-HTTP_Unauthorized = 401 
-HTTP_Not_Found = 404 
-HTTP_Method_Not_Allowed = 405 
-HTTP_Request_Timeout = 408
-HTTP_Conflict = 409
-HTTP_Service_Unavailable = 503 
-HTTP_Internal_Server_Error = 500 
-
-
-class vim_db():
-    def __init__(self, vlan_range, logger_name= None, debug=None):
-        '''vlan_range must be a tuple (vlan_ini, vlan_end) with available vlan values for networks
-        every dataplane network contain a unique value, regardless of it is used or not 
-        ''' 
-        #initialization
-        self.net_vlan_range = vlan_range
-        self.net_vlan_usedlist = None
-        self.net_vlan_lastused = self.net_vlan_range[0] -1
-        self.debug=debug
-        if logger_name:
-            self.logger_name = logger_name
-        else:
-            self.logger_name = 'openvim.db'
-        self.logger = logging.getLogger(self.logger_name)
-        if debug:
-            self.logger.setLevel( getattr(logging, debug) )
-
-
-    def connect(self, host=None, user=None, passwd=None, database=None):
-        '''Connect to the concrete data base. 
-        The first time a valid host, user, passwd and database must be provided,
-        Following calls can skip this parameters
-        '''
-        try:
-            if host     is not None: self.host = host
-            if user     is not None: self.user = user
-            if passwd   is not None: self.passwd = passwd
-            if database is not None: self.database = database
-
-            self.con = mdb.connect(self.host, self.user, self.passwd, self.database)
-            self.logger.debug("connected to DB %s at %s@%s", self.database,self.user, self.host)
-            return 0
-        except mdb.Error as e:
-            self.logger.error("Cannot connect to DB %s at %s@%s Error %d: %s", self.database, self.user, self.host, e.args[0], e.args[1])
-            return -1
-
-    def get_db_version(self):
-        ''' Obtain the database schema version.
-        Return: (negative, text) if error or version 0.0 where schema_version table is missing
-                (version_int, version_text) if ok
-        '''
-        cmd = "SELECT version_int,version,openvim_ver FROM schema_version"
-        for retry_ in range(0,2):
-            try:
-                with self.con:
-                    self.cur = self.con.cursor()
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    rows = self.cur.fetchall()
-                    highest_version_int=0
-                    highest_version=""
-                    #print rows
-                    for row in rows: #look for the latest version
-                        if row[0]>highest_version_int:
-                            highest_version_int, highest_version = row[0:2]
-                    return highest_version_int, highest_version
-            except (mdb.Error, AttributeError) as e:
-                self.logger.error("get_db_version DB Exception %d: %s. Command %s",e.args[0], e.args[1], cmd)
-                r,c = self.format_error(e)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c    
-                
-    def disconnect(self):
-        '''disconnect from the data base'''
-        try:
-            self.con.close()
-            del self.con
-        except mdb.Error as e:
-            self.logger.error("while disconnecting from DB: Error %d: %s",e.args[0], e.args[1])
-            return -1
-        except AttributeError as e: #self.con not defined
-            if e[0][-5:] == "'con'": return -1, "Database internal error, no connection."
-            else: raise
-    
-    def format_error(self, e, func, cmd, command=None, extra=None):
-        '''Creates a text error base on the produced exception
-            Params:
-                e: mdb exception
-                func: name of the function that makes the call, for logging purposes
-                cmd: database command that produce the exception
-                command: if the intention is update or delete
-                extra: extra information to add to some commands
-            Return
-                HTTP error in negative, formatted error text
-        ''' 
-                
-        self.logger.error("%s DB Exception %s. Command %s",func, str(e), cmd)
-        if type(e[0]) is str:
-            if e[0][-5:] == "'con'": return -HTTP_Internal_Server_Error, "DB Exception, no connection."
-            else: raise
-        if e.args[0]==2006 or e.args[0]==2013 : #MySQL server has gone away (((or)))    Exception 2013: Lost connection to MySQL server during query
-            #reconnect
-            self.connect()
-            return -HTTP_Request_Timeout,"Database reconnection. Try Again"
-        fk=e.args[1].find("foreign key constraint fails")
-        if fk>=0:
-            if command=="update": return -HTTP_Bad_Request, "tenant_id %s not found." % extra
-            elif command=="delete":  return -HTTP_Bad_Request, "Resource is not free. There are %s that prevent its deletion." % extra
-        de = e.args[1].find("Duplicate entry")
-        fk = e.args[1].find("for key")
-        uk = e.args[1].find("Unknown column")
-        wc = e.args[1].find("in 'where clause'")
-        fl = e.args[1].find("in 'field list'")
-        #print de, fk, uk, wc,fl
-        if de>=0:
-            if fk>=0: #error 1062
-                return -HTTP_Conflict, "Value %s already in use for %s" % (e.args[1][de+15:fk], e.args[1][fk+7:])
-        if uk>=0:
-            if wc>=0:
-                return -HTTP_Bad_Request, "Field %s cannot be used for filtering" % e.args[1][uk+14:wc]
-            if fl>=0:
-                return -HTTP_Bad_Request, "Field %s does not exist" % e.args[1][uk+14:wc]
-        return -HTTP_Internal_Server_Error, "Database internal Error %d: %s" % (e.args[0], e.args[1])
-
-    def __data2db_format(self, data):
-        '''convert data to database format. If data is None it return the 'Null' text,
-        otherwise it return the text surrounded by quotes ensuring internal quotes are escaped'''
-        if data==None:
-            return 'Null'
-        out=str(data)
-        if "'" not in out:
-            return "'" + out + "'"
-        elif '"' not in out:
-            return '"' + out + '"'
-        else:
-            return json.dumps(out)
-    
-    def __get_used_net_vlan(self):
-        #get used from database if needed
-        try:
-            cmd = "SELECT vlan FROM nets WHERE vlan>='%s' ORDER BY vlan LIMIT 25" % self.net_vlan_lastused
-            with self.con:
-                self.cur = self.con.cursor()
-                self.logger.debug(cmd)
-                self.cur.execute(cmd)
-                vlan_tuple = self.cur.fetchall()
-                #convert a tuple of tuples in a list of numbers
-                self.net_vlan_usedlist = []
-                for k in vlan_tuple:
-                    self.net_vlan_usedlist.append(k[0])
-            return 0
-        except (mdb.Error, AttributeError) as e:
-            return self.format_error(e, "get_free_net_vlan", cmd)
-    
-    def get_free_net_vlan(self):
-        '''obtain a vlan not used in any net'''
-        
-        while True:
-            self.logger.debug("net_vlan_lastused:%d  net_vlan_range:%d-%d  net_vlan_usedlist:%s", 
-                            self.net_vlan_lastused, self.net_vlan_range[0], self.net_vlan_range[1], str(self.net_vlan_usedlist))
-            self.net_vlan_lastused += 1
-            if self.net_vlan_lastused ==  self.net_vlan_range[1]:
-                #start from the begining
-                self.net_vlan_lastused =  self.net_vlan_range[0]
-                self.net_vlan_usedlist = None
-            if self.net_vlan_usedlist is None \
-            or (len(self.net_vlan_usedlist)>0 and self.net_vlan_lastused >= self.net_vlan_usedlist[-1] and len(self.net_vlan_usedlist)==25):
-                r = self.__get_used_net_vlan()
-                if r<0: return r
-                self.logger.debug("new net_vlan_usedlist %s", str(self.net_vlan_usedlist))
-            if self.net_vlan_lastused in self.net_vlan_usedlist:
-                continue
-            else:
-                return self.net_vlan_lastused
-                
-    def get_table(self, **sql_dict):
-        ''' Obtain rows from a table.
-        Atribure sql_dir: dictionary with the following key: value
-            'SELECT': [list of fields to retrieve] (by default all)
-            'FROM': string of table name (Mandatory)
-            'WHERE': dict of key:values, translated to key=value AND ... (Optional)
-            'WHERE_NOT': dict of key:values, translated to key!=value AND ... (Optional)
-            'WHERE_OR': dict of key:values, translated to key=value OR ... (Optional)
-            'WHERE_AND_OR: str 'AND' or 'OR'(by default) mark the priority to 'WHERE AND (WHERE_OR)' or (WHERE) OR WHERE_OR' (Optional)
-            'LIMIT': limit of number of rows (Optional)
-            'DISTINCT': make a select distinct to remove repeated elements
-        Return: a list with dictionarys at each row
-        '''
-        #print sql_dict
-        select_ = "SELECT "
-        if sql_dict.get("DISTINCT"):
-            select_ += "DISTINCT "
-        select_ += ("*" if not sql_dict.get('SELECT') else ",".join(map(str,sql_dict['SELECT'])) )
-        #print 'select_', select_
-        from_  = "FROM " + str(sql_dict['FROM'])
-        #print 'from_', from_
-        
-        where_and = None
-        where_or = None
-        w = sql_dict.get('WHERE')
-        if w:
-            where_and = " AND ".join(map( lambda x: str(x) + (" is Null" if w[x] is None else "='"+str(w[x])+"'"),  w.keys()) )
-        w = sql_dict.get('WHERE_NOT')
-        if w:
-            where_and_not = " AND ".join(map( lambda x: str(x) + (" is not Null" if w[x] is None else "!='"+str(w[x])+"'"),  w.keys()) )
-            if where_and:
-                where_and += " AND " + where_and_not
-            else:
-                where_and = where_and_not
-        w = sql_dict.get('WHERE_OR')
-        if w:
-            where_or =  " OR ".join(map( lambda x: str(x) + (" is Null" if w[x] is None else "='"+str(w[x])+"'"),  w.keys()) )
-             
-        if where_and!=None and where_or!=None:
-            if sql_dict.get("WHERE_AND_OR") == "AND":
-                where_ = "WHERE " + where_and + " AND (" + where_or + ")"
-            else:
-                where_ = "WHERE (" + where_and + ") OR " + where_or
-        elif where_and!=None and where_or==None:
-            where_ = "WHERE " + where_and
-        elif where_and==None and where_or!=None:
-            where_ = "WHERE " + where_or
-        else:
-            where_ = ""
-        #print 'where_', where_
-        limit_ = "LIMIT " + str(sql_dict['LIMIT']) if sql_dict.get("LIMIT") else ""
-        #print 'limit_', limit_
-        cmd =  " ".join( (select_, from_, where_, limit_) )
-        for retry_ in range(0,2):
-            try:
-                with self.con:
-                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    rows = self.cur.fetchall()
-                    return self.cur.rowcount, rows
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "get_table", cmd)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-        
-    def new_tenant(self, tenant_dict):
-        ''' Add one row into a table.
-        Attribure 
-            tenant_dict: dictionary with the key: value to insert
-        It checks presence of uuid and add one automatically otherwise
-        Return: (result, uuid) where result can be 0 if error, or 1 if ok
-        '''
-        for retry_ in range(0,2):
-            cmd=""
-            inserted=-1
-            try:
-                #create uuid if not provided
-                if 'uuid' not in tenant_dict:
-                    uuid = tenant_dict['uuid'] = str(myUuid.uuid1()) # create_uuid
-                else: 
-                    uuid = str(tenant_dict['uuid'])
-                #obtain tenant_id for logs
-                tenant_id = uuid
-                with self.con:
-                    self.cur = self.con.cursor()
-                    #inserting new uuid
-                    cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','tenants')" % uuid
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    #insert tenant
-                    cmd= "INSERT INTO tenants (" + \
-                        ",".join(map(str, tenant_dict.keys() ))   + ") VALUES(" + \
-                        ",".join(map(lambda x: "Null" if x is None else "'"+str(x)+"'",tenant_dict.values() )) + ")"
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    inserted = self.cur.rowcount
-                    ##inserting new log
-                    #del tenant_dict['uuid'] # not interested for the log
-                    #cmd = "INSERT INTO logs (related,level,tenant_id,uuid,description) VALUES ('tenants','debug','%s','%s',\"new tenant %s\")" % (uuid, tenant_id, str(tenant_dict))
-                    #self.logger.debug(cmd)
-                    #self.cur.execute(cmd)  
-                    #commit transaction
-                    self.cur.close()
-                if inserted == 0: return 0, uuid
-                with self.con:
-                    self.cur = self.con.cursor()
-                    #adding public flavors
-                    cmd = "INSERT INTO tenants_flavors(flavor_id,tenant_id) SELECT uuid as flavor_id,'"+ tenant_id + "' FROM flavors WHERE public = 'yes'"
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd) 
-                    self.logger.debug("attached public flavors: %s", str(self.cur.rowcount))
-                    #rows = self.cur.fetchall()
-                    #for row in rows:
-                    #    cmd = "INSERT INTO tenants_flavors(flavor_id,tenant_id) VALUES('%s','%s')" % (row[0], tenant_id)
-                    #    self.cur.execute(cmd )
-                    #adding public images
-                    cmd = "INSERT INTO tenants_images(image_id,tenant_id) SELECT uuid as image_id,'"+ tenant_id + "' FROM images WHERE public = 'yes'"
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd) 
-                    self.logger.debug("attached public images: %s", str(self.cur.rowcount))
-                    return 1, uuid
-            except (mdb.Error, AttributeError) as e:
-                if inserted==1:
-                    self.logger.warning("new_tenant DB Exception %d: %s. Command %s",e.args[0], e.args[1], cmd)
-                    return 1, uuid
-                else: 
-                    r,c = self.format_error(e, "new_tenant", cmd)
-                    if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-
-    def new_row(self, table, INSERT, add_uuid=False, log=False):
-        ''' Add one row into a table.
-        Atribure 
-            INSERT: dictionary with the key: value to insert
-            table: table where to insert
-            add_uuid: if True, it will crated an uuid key entry at INSERT if not provided
-        It checks presence of uuid and add one automatically otherwise
-        Return: (result, uuid) where result can be 0 if error, or 1 if ok
-        '''
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                if add_uuid:
-                    #create uuid if not provided
-                    if 'uuid' not in INSERT:
-                        uuid = INSERT['uuid'] = str(myUuid.uuid1()) # create_uuid
-                    else: 
-                        uuid = str(INSERT['uuid'])
-                else:
-                    uuid=None
-                with self.con:
-                    self.cur = self.con.cursor()
-                    if add_uuid:
-                        #inserting new uuid
-                        cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','%s')" % (uuid, table)
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-                    #insertion
-                    cmd= "INSERT INTO " + table +" (" + \
-                        ",".join(map(str, INSERT.keys() ))   + ") VALUES(" + \
-                        ",".join(map(lambda x: 'Null' if x is None else "'"+str(x)+"'", INSERT.values() )) + ")"
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    nb_rows = self.cur.rowcount
-                    #inserting new log
-                    #if nb_rows > 0 and log:                
-                    #    if add_uuid: del INSERT['uuid']
-                    #    #obtain tenant_id for logs
-                    #    if 'tenant_id' in INSERT: 
-                    #        tenant_id = INSERT['tenant_id']
-                    #        del INSERT['tenant_id']
-                    #    elif table == 'tenants':    
-                    #        tenant_id = uuid
-                    #    else:                       
-                    #        tenant_id = None
-                    #    if uuid is None: uuid_k = uuid_v = ""
-                    #    else: uuid_k=",uuid"; uuid_v=",'" + str(uuid) + "'"
-                    #    if tenant_id is None: tenant_k = tenant_v = ""
-                    #    else: tenant_k=",tenant_id"; tenant_v=",'" + str(tenant_id) + "'"
-                    #    cmd = "INSERT INTO logs (related,level%s%s,description) VALUES ('%s','debug'%s%s,\"new %s %s\")" \
-                    #        % (uuid_k, tenant_k, table, uuid_v, tenant_v, table[:-1], str(INSERT))
-                    #    self.logger.debug(cmd)
-                    #    self.cur.execute(cmd)                    
-                    return nb_rows, uuid
-
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "new_row", cmd)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-    
-    def __remove_quotes(self, data):
-        '''remove single quotes ' of any string content of data dictionary'''
-        for k,v in data.items():
-            if type(v) == str:
-                if "'" in v: 
-                    data[k] = data[k].replace("'","_")
-    
-    def _update_rows_internal(self, table, UPDATE, WHERE={}):
-        cmd= "UPDATE " + table +" SET " + \
-            ",".join(map(lambda x: str(x)+'='+ self.__data2db_format(UPDATE[x]),   UPDATE.keys() ));
-        if WHERE:
-            cmd += " WHERE " + " and ".join(map(lambda x: str(x)+ (' is Null' if WHERE[x] is None else"='"+str(WHERE[x])+"'" ),  WHERE.keys() ))
-        self.logger.debug(cmd)
-        self.cur.execute(cmd) 
-        nb_rows = self.cur.rowcount
-        return nb_rows, None
-
-    def update_rows(self, table, UPDATE, WHERE={}, log=False):
-        ''' Update one or several rows into a table.
-        Atributes
-            UPDATE: dictionary with the key-new_value pairs to change
-            table: table to be modified
-            WHERE: dictionary to filter target rows, key-value
-            log:   if true, a log entry is added at logs table
-        Return: (result, None) where result indicates the number of updated files
-        '''
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                #gettting uuid 
-                uuid = WHERE.get('uuid')
-
-                with self.con:
-                    self.cur = self.con.cursor()
-                    cmd= "UPDATE " + table +" SET " + \
-                        ",".join(map(lambda x: str(x)+'='+ self.__data2db_format(UPDATE[x]),   UPDATE.keys() ));
-                    if WHERE:
-                        cmd += " WHERE " + " and ".join(map(lambda x: str(x)+ (' is Null' if WHERE[x] is None else"='"+str(WHERE[x])+"'" ),  WHERE.keys() ))
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd) 
-                    nb_rows = self.cur.rowcount
-                    #if nb_rows > 0 and log:                
-                    #    #inserting new log
-                    #    if uuid is None: uuid_k = uuid_v = ""
-                    #    else: uuid_k=",uuid"; uuid_v=",'" + str(uuid) + "'"
-                    #    cmd = "INSERT INTO logs (related,level%s,description) VALUES ('%s','debug'%s,\"updating %d entry %s\")" \
-                    #        % (uuid_k, table, uuid_v, nb_rows, (str(UPDATE)).replace('"','-')  )
-                    #    self.logger.debug(cmd)
-                    #    self.cur.execute(cmd)                    
-                    return nb_rows, uuid
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "update_rows", cmd)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-            
-    def get_host(self, host_id):
-        if af.check_valid_uuid(host_id):
-            where_filter="uuid='" + host_id + "'"
-        else:
-            where_filter="name='" + host_id + "'"
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                with self.con:
-                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
-                    #get HOST
-                    cmd = "SELECT uuid, user, name, ip_name, description, ranking, admin_state_up, DATE_FORMAT(created_at,'%Y-%m-%dT%H:%i:%s') as created_at \
-                        FROM hosts WHERE " + where_filter
-                    self.logger.debug(cmd) 
-                    self.cur.execute(cmd)
-                    if self.cur.rowcount == 0 : 
-                        return 0, "host '" + str(host_id) +"'not found."
-                    elif self.cur.rowcount > 1 : 
-                        return 0, "host '" + str(host_id) +"' matches more than one result."
-                    host = self.cur.fetchone()
-                    host_id = host['uuid']
-                    #get numa
-                    cmd = "SELECT id, numa_socket, hugepages, memory, admin_state_up FROM numas WHERE host_id = '" + str(host_id) + "'"
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    host['numas'] = self.cur.fetchall()
-                    for numa in host['numas']:
-                        #print "SELECT core_id, instance_id, status, thread_id, v_thread_id FROM resources_core  WHERE numa_id = '" + str(numa['id']) + "'"
-                        #get cores
-                        cmd = "SELECT core_id, instance_id, status, thread_id, v_thread_id FROM resources_core  WHERE numa_id = '" + str(numa['id']) + "'"
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-                        numa['cores'] = self.cur.fetchall()
-                        for core in numa['cores']: 
-                            if core['instance_id'] == None: del core['instance_id'], core['v_thread_id']
-                            if core['status'] == 'ok': del core['status']
-                        #get used memory
-                        cmd = "SELECT sum(consumed) as hugepages_consumed FROM resources_mem  WHERE numa_id = '" + str(numa['id']) + "' GROUP BY numa_id"
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-                        used = self.cur.fetchone()
-                        used_= int(used['hugepages_consumed']) if used != None else 0
-                        numa['hugepages_consumed'] = used_
-                        #get ports
-                        #cmd = "CALL GetPortsFromNuma(%s)'" % str(numa['id'])
-                        #self.cur.callproc('GetPortsFromNuma', (numa['id'],) )
-                        #every time a Procedure is launched you need to close and open the cursor 
-                        #under Error 2014: Commands out of sync; you can't run this command now
-                        #self.cur.close()   
-                        #self.cur = self.con.cursor(mdb.cursors.DictCursor)
-                        cmd="SELECT Mbps, pci, status, Mbps_used, instance_id, if(id=root_id,'PF','VF') as type_,\
-                             switch_port, switch_dpid, mac, source_name\
-                             FROM resources_port WHERE numa_id=%d ORDER BY root_id, type_ DESC" %  (numa['id'])
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-                        ifaces = self.cur.fetchall()
-                        #The SQL query will ensure to have SRIOV interfaces from a port first
-                        sriovs=[]
-                        Mpbs_consumed = 0
-                        numa['interfaces'] = []
-                        for iface in ifaces:
-                            if not iface["instance_id"]:
-                                del iface["instance_id"]
-                            if iface['status'] == 'ok':
-                                del iface['status']
-                            Mpbs_consumed += int(iface["Mbps_used"])
-                            del iface["Mbps_used"]
-                            if iface["type_"]=='PF':
-                                if not iface["switch_dpid"]:
-                                    del iface["switch_dpid"]
-                                if not iface["switch_port"]:
-                                    del iface["switch_port"]
-                                if sriovs:
-                                    iface["sriovs"] = sriovs
-                                if Mpbs_consumed:
-                                    iface["Mpbs_consumed"] = Mpbs_consumed
-                                del iface["type_"]
-                                numa['interfaces'].append(iface)
-                                sriovs=[]
-                                Mpbs_consumed = 0
-                            else: #VF, SRIOV
-                                del iface["switch_port"]
-                                del iface["switch_dpid"]
-                                del iface["type_"]
-                                del iface["Mbps"]
-                                sriovs.append(iface)
-
-                        #delete internal field
-                        del numa['id']
-                    return 1, host
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "get_host", cmd)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-        
-    def new_uuid(self):
-        max_retries=10
-        while max_retries>0:
-            uuid =  str( myUuid.uuid1() )
-            if self.check_uuid(uuid)[0] == 0:
-                return uuid
-            max_retries-=1
-        return uuid
-
-    def check_uuid(self, uuid):
-        '''check in the database if this uuid is already present'''
-        try:
-            cmd = "SELECT * FROM uuids where uuid='" + str(uuid) + "'"
-            with self.con:
-                self.cur = self.con.cursor(mdb.cursors.DictCursor)
-                self.logger.debug(cmd)
-                self.cur.execute(cmd)
-                rows = self.cur.fetchall()
-                return self.cur.rowcount, rows
-        except (mdb.Error, AttributeError) as e:
-            return self.format_error(e, "check_uuid", cmd)
-            
-    def __get_next_ids(self):
-        '''get next auto increment index of all table in the database'''
-        self.cur.execute("SELECT table_name,AUTO_INCREMENT FROM information_schema.tables WHERE AUTO_INCREMENT IS NOT NULL AND table_schema = DATABASE()") 
-        rows = self.cur.fetchall()
-        return self.cur.rowcount, dict(rows)
-    
-    def edit_host(self, host_id, host_dict):
-        #get next port index
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                with self.con:
-                    self.cur = self.con.cursor()
-
-                    #update table host
-                    numa_list = host_dict.pop('numas', () )                    
-                    if host_dict:
-                        self._update_rows_internal("hosts", host_dict, {"uuid": host_id})
-                        
-                    where = {"host_id": host_id} 
-                    for numa_dict in numa_list:
-                        where["numa_socket"] = str(numa_dict.pop('numa_socket'))
-                        interface_list = numa_dict.pop('interfaces', () )
-                        if numa_dict:
-                            self._update_rows_internal("numas", numa_dict, where)
-                        for interface in interface_list:
-                            source_name = str(interface.pop("source_name") )
-                            if interface:
-                            #get interface id from resources_port
-                                cmd= "SELECT rp.id as id FROM resources_port as rp join numas as n on n.id=rp.numa_id join hosts as h on h.uuid=n.host_id " +\
-                                    "WHERE host_id='%s' and rp.source_name='%s'" %(host_id, source_name)
-                                self.logger.debug(cmd)
-                                self.cur.execute(cmd)
-                                row = self.cur.fetchone()
-                                if self.cur.rowcount<=0:
-                                    return -HTTP_Bad_Request, "Interface source_name='%s' from numa_socket='%s' not found" % (source_name, str(where["numa_socket"]))
-                                interface_id = row[0]
-                                self._update_rows_internal("resources_port", interface, {"root_id": interface_id})
-                return self.get_host(host_id)
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "edit_host", cmd)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-
-    def new_host(self, host_dict):
-        #get next port index
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                with self.con:
-                    self.cur = self.con.cursor()
-
-                    result, next_ids = self.__get_next_ids()
-                    #print "next_ids: " + str(next_ids)
-                    if result <= 0: return result, "Internal DataBase error getting next id of tables"
-
-                    #create uuid if not provided
-                    if 'uuid' not in host_dict:
-                        uuid = host_dict['uuid'] = str(myUuid.uuid1()) # create_uuid
-                    else: #check uuid is valid
-                        uuid = str(host_dict['uuid'])
-                    #    result, data = self.check_uuid(uuid)
-                    #    if (result == 1):
-                    #        return -1, "UUID '%s' already in use" % uuid
-
-                    #inserting new uuid
-                    cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','hosts')" % uuid
-                    self.logger.debug(cmd)
-                    result = self.cur.execute(cmd)
-
-                    #insert in table host
-                    numa_list = host_dict.pop('numas', [])
-                    #get nonhupages and nonisolated cpus
-                    host_dict['RAM']=0
-                    host_dict['cpus']=0
-                    for numa in numa_list:
-                        mem_numa = numa.get('memory', 0) - numa.get('hugepages',0)
-                        if mem_numa>0:
-                            host_dict['RAM'] += mem_numa 
-                        for core in numa.get("cores", []):
-                            if "status" in core and core["status"]=="noteligible":
-                                host_dict['cpus']+=1
-                    host_dict['RAM']*=1024 # from GB to MB
-                                            
-                    keys    = ",".join(host_dict.keys())
-                    values  = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", host_dict.values() ) )
-                    cmd = "INSERT INTO hosts (" + keys + ") VALUES (" + values + ")"
-                    self.logger.debug(cmd)
-                    result = self.cur.execute(cmd)
-                    #if result != 1: return -1, "Database Error while inserting at hosts table"
-
-                    #insert numas
-                    nb_numas = nb_cores = nb_ifaces = 0
-                    for numa_dict in numa_list:
-                        nb_numas += 1
-                        interface_list = numa_dict.pop('interfaces', [])
-                        core_list = numa_dict.pop('cores', [])
-                        numa_dict['id'] = next_ids['numas'];   next_ids['numas'] += 1
-                        numa_dict['host_id'] = uuid
-                        keys    = ",".join(numa_dict.keys())
-                        values  = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", numa_dict.values() ) )
-                        cmd = "INSERT INTO numas (" + keys + ") VALUES (" + values + ")"
-                        self.logger.debug(cmd)
-                        result = self.cur.execute(cmd)
-
-                        #insert cores
-                        for core_dict in core_list:
-                            nb_cores += 1
-                            core_dict['numa_id'] = numa_dict['id']
-                            keys    = ",".join(core_dict.keys())
-                            values  = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", core_dict.values() ) )
-                            cmd = "INSERT INTO resources_core (" + keys + ") VALUES (" + values + ")"
-                            self.logger.debug(cmd)
-                            result = self.cur.execute(cmd)
-
-                        #insert ports
-                        for port_dict in interface_list:
-                            nb_ifaces += 1
-                            sriov_list = port_dict.pop('sriovs', [])
-                            port_dict['numa_id'] = numa_dict['id']
-                            port_dict['id'] = port_dict['root_id'] = next_ids['resources_port']
-                            next_ids['resources_port'] += 1
-                            switch_port = port_dict.get('switch_port', None)
-                            switch_dpid = port_dict.get('switch_dpid', None)
-                            keys    = ",".join(port_dict.keys())
-                            values  = ",".join( map(lambda x:  "Null" if x is None else "'"+str(x)+"'", port_dict.values() ) )
-                            cmd = "INSERT INTO resources_port (" + keys + ") VALUES (" + values + ")"
-                            self.logger.debug(cmd)
-                            result = self.cur.execute(cmd)
-
-                            #insert sriovs into port table
-                            for sriov_dict in sriov_list:
-                                sriov_dict['switch_port'] = switch_port
-                                sriov_dict['switch_dpid'] = switch_dpid
-                                sriov_dict['numa_id'] = port_dict['numa_id']
-                                sriov_dict['Mbps'] = port_dict['Mbps']
-                                sriov_dict['root_id'] = port_dict['id']
-                                sriov_dict['id'] = next_ids['resources_port']
-                                if "vlan" in sriov_dict:
-                                    del sriov_dict["vlan"]
-                                next_ids['resources_port'] += 1
-                                keys    = ",".join(sriov_dict.keys())
-                                values  = ",".join( map(lambda x:  "Null" if x is None else "'"+str(x)+"'", sriov_dict.values() ) )
-                                cmd = "INSERT INTO resources_port (" + keys + ") VALUES (" + values + ")"
-                                self.logger.debug(cmd)
-                                result = self.cur.execute(cmd)
-
-                    #inserting new log
-                    #cmd = "INSERT INTO logs (related,level,uuid,description) VALUES ('hosts','debug','%s','new host: %d numas, %d theads, %d ifaces')" % (uuid, nb_numas, nb_cores, nb_ifaces)
-                    #self.logger.debug(cmd)
-                    #result = self.cur.execute(cmd)                    
-
-                    #inseted ok
-                with self.con:
-                    self.cur = self.con.cursor()
-                    self.logger.debug("callproc('UpdateSwitchPort', () )")
-                    self.cur.callproc('UpdateSwitchPort', () )
-
-                self.logger.debug("getting host '%s'",str(host_dict['uuid']))
-                return self.get_host(host_dict['uuid'])
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "new_host", cmd)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-
-    def new_flavor(self, flavor_dict, tenant_id ):
-        '''Add new flavor into the database. Create uuid if not provided
-        Atributes
-            flavor_dict: flavor dictionary with the key: value to insert. Must be valid flavors columns
-            tenant_id: if not 'any', it matches this flavor/tenant inserting at tenants_flavors table
-        Return: (result, data) where result can be
-            negative: error at inserting. data contain text
-            1, inserted, data contain inserted uuid flavor
-        '''
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                with self.con:
-                    self.cur = self.con.cursor()
-
-                    #create uuid if not provided
-                    if 'uuid' not in flavor_dict:
-                        uuid = flavor_dict['uuid'] = str(myUuid.uuid1()) # create_uuid
-                    else: #check uuid is valid
-                        uuid = str(flavor_dict['uuid'])
-                    #    result, data = self.check_uuid(uuid)
-                    #    if (result == 1):
-                    #        return -1, "UUID '%s' already in use" % uuid
-
-                    #inserting new uuid
-                    cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','flavors')" % uuid
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-
-                    #insert in table flavor
-                    keys    = ",".join(flavor_dict.keys())
-                    values  = ",".join( map(lambda x:  "Null" if x is None else "'"+str(x)+"'", flavor_dict.values() ) )
-                    cmd = "INSERT INTO flavors (" + keys + ") VALUES (" + values + ")"
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    #if result != 1: return -1, "Database Error while inserting at flavors table"
-
-                    #insert tenants_flavors
-                    if tenant_id != 'any':
-                        cmd = "INSERT INTO tenants_flavors (tenant_id,flavor_id) VALUES ('%s','%s')" % (tenant_id, uuid)
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-
-                    #inserting new log
-                    #del flavor_dict['uuid']
-                    #if 'extended' in flavor_dict: del flavor_dict['extended'] #remove two many information
-                    #cmd = "INSERT INTO logs (related,level,uuid, tenant_id, description) VALUES ('flavors','debug','%s','%s',\"new flavor: %s\")" \
-                    #    % (uuid, tenant_id, str(flavor_dict))
-                    #self.logger.debug(cmd)
-                    #self.cur.execute(cmd)                    
-
-                    #inseted ok
-                return 1, uuid
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "new_flavor", cmd, "update", tenant_id)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-        
-    def new_image(self, image_dict, tenant_id):
-        '''Add new image into the database. Create uuid if not provided
-        Atributes
-            image_dict: image dictionary with the key: value to insert. Must be valid images columns
-            tenant_id: if not 'any', it matches this image/tenant inserting at tenants_images table
-        Return: (result, data) where result can be
-            negative: error at inserting. data contain text
-            1, inserted, data contain inserted uuid image
-        '''
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                with self.con:
-                    self.cur = self.con.cursor()
-
-                    #create uuid if not provided
-                    if 'uuid' not in image_dict:
-                        uuid = image_dict['uuid'] = str(myUuid.uuid1()) # create_uuid
-                    else: #check uuid is valid
-                        uuid = str(image_dict['uuid'])
-                    #    result, data = self.check_uuid(uuid)
-                    #    if (result == 1):
-                    #        return -1, "UUID '%s' already in use" % uuid
-
-                    #inserting new uuid
-                    cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','images')" % uuid
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-
-                    #insert in table image
-                    keys    = ",".join(image_dict.keys())
-                    values  = ",".join( map(lambda x:  "Null" if x is None else "'"+str(x)+"'", image_dict.values() ) )
-                    cmd = "INSERT INTO images (" + keys + ") VALUES (" + values + ")"
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    #if result != 1: return -1, "Database Error while inserting at images table"
-
-                    #insert tenants_images
-                    if tenant_id != 'any':
-                        cmd = "INSERT INTO tenants_images (tenant_id,image_id) VALUES ('%s','%s')" % (tenant_id, uuid)
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-
-                    ##inserting new log
-                    #cmd = "INSERT INTO logs (related,level,uuid, tenant_id, description) VALUES ('images','debug','%s','%s',\"new image: %s path: %s\")" % (uuid, tenant_id, image_dict['name'], image_dict['path'])
-                    #self.logger.debug(cmd)
-                    #self.cur.execute(cmd)                    
-
-                    #inseted ok
-                return 1, uuid
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "new_image", cmd, "update", tenant_id)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-        
-    def delete_image_flavor(self, item_type, item_id, tenant_id):
-        '''deletes an image or flavor from database
-        item_type must be a 'image' or 'flavor'
-        item_id is the uuid
-        tenant_id is the asociated tenant, can be 'any' with means all
-        If tenan_id is not any, it deletes from tenants_images/flavors,
-        which means this image/flavor is used by this tenant, and if success, 
-        it tries to delete from images/flavors in case this is not public, 
-        that only will success if image is private and not used by other tenants
-        If tenant_id is any, it tries to delete from both tables at the same transaction
-        so that image/flavor is completely deleted from all tenants or nothing
-        '''
-        for retry_ in range(0,2):
-            deleted = -1
-            deleted_item = -1
-            result = (-HTTP_Internal_Server_Error, "internal error")
-            cmd=""
-            try:
-                with self.con:
-                    self.cur = self.con.cursor()
-                    cmd = "DELETE FROM tenants_%ss WHERE %s_id = '%s'" % (item_type, item_type, item_id)
-                    if tenant_id != 'any':
-                        cmd += " AND tenant_id = '%s'" % tenant_id
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    deleted = self.cur.rowcount
-                    if tenant_id == 'any': #delete from images/flavors in the SAME transaction
-                        cmd = "DELETE FROM %ss WHERE uuid = '%s'" % (item_type, item_id)
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-                        deleted = self.cur.rowcount
-                        if deleted>=1:
-                            #delete uuid
-                            cmd = "DELETE FROM uuids WHERE uuid = '%s'" % item_id
-                            self.logger.debug(cmd)
-                            self.cur.execute(cmd)
-                            ##inserting new log
-                            #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \
-                            #       VALUES ('%ss','debug','%s','%s','delete %s completely')" % \
-                            #       (item_type, item_id, tenant_id, item_type)
-                            #self.logger.debug(cmd)
-                            #self.cur.execute(cmd)
-                            return deleted, "%s '%s' completely deleted" % (item_type, item_id)
-                        return 0, "%s '%s' not found" % (item_type, item_id)
-                    
-                    if deleted == 1:
-                        ##inserting new log
-                        #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \
-                        #        VALUES ('%ss','debug','%s','%s','delete %s reference for this tenant')" % \
-                        #        (item_type, item_id, tenant_id, item_type)
-                        #self.logger.debug(cmd)
-                        #self.cur.execute(cmd)
-
-                        #commit transaction
-                        self.cur.close()
-                #if tenant!=any  delete from images/flavors in OTHER transaction. If fails is because dependencies so that not return error
-                if deleted==1:
-                    with self.con:
-                        self.cur = self.con.cursor()
-
-                        #delete image/flavor if not public
-                        cmd = "DELETE FROM %ss WHERE uuid = '%s' AND public = 'no'" % (item_type, item_id)
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-                        deleted_item = self.cur.rowcount
-                        if deleted_item == 1:
-                            #delete uuid
-                            cmd = "DELETE FROM uuids WHERE uuid = '%s'" % item_id
-                            self.logger.debug(cmd)
-                            self.cur.execute(cmd)
-                            ##inserting new log
-                            #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \
-                            #       VALUES ('%ss','debug','%s','%s','delete %s completely')" % \
-                            #       (item_type, item_id, tenant_id, item_type)
-                            #self.logger.debug(cmd)
-                            #self.cur.execute(cmd)
-            except (mdb.Error, AttributeError) as e:
-                #print "delete_%s DB Exception %d: %s" % (item_type, e.args[0], e.args[1])
-                if deleted <0: 
-                    result = self.format_error(e, "delete_"+item_type, cmd, "delete", "servers")
-            finally:
-                if deleted==1:
-                    return 1, "%s '%s' from tenant '%s' %sdeleted" % \
-                    (item_type, item_id, tenant_id, "completely " if deleted_item==1 else "")
-                elif deleted==0:
-                    return 0, "%s '%s' from tenant '%s' not found" % (item_type, item_id, tenant_id)
-                else: 
-                    if result[0]!=-HTTP_Request_Timeout or retry_==1: return result  
-            
-    def delete_row(self, table, uuid):
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                with self.con:
-                    #delete host
-                    self.cur = self.con.cursor()
-                    cmd = "DELETE FROM %s WHERE uuid = '%s'" % (table, uuid)
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    deleted = self.cur.rowcount
-                    if deleted == 1:
-                        #delete uuid
-                        if table == 'tenants': tenant_str=uuid
-                        else: tenant_str='Null'
-                        self.cur = self.con.cursor()
-                        cmd = "DELETE FROM uuids WHERE uuid = '%s'" % uuid
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-                        ##inserting new log
-                        #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) VALUES ('%s','debug','%s','%s','delete %s')" % (table, uuid, tenant_str, table[:-1])
-                        #self.logger.debug(cmd)
-                        #self.cur.execute(cmd)                    
-                return deleted, table[:-1] + " '%s' %s" %(uuid, "deleted" if deleted==1 else "not found")
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "delete_row", cmd, "delete", 'instances' if table=='hosts' or table=='tenants' else 'dependencies')
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-
-    def delete_row_by_key(self, table, key, value):
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                with self.con:
-                    #delete host
-                    self.cur = self.con.cursor()
-                    cmd = "DELETE FROM %s" % (table)
-                    if key!=None:
-                        if value!=None:
-                            cmd += " WHERE %s = '%s'" % (key, value)
-                        else:
-                            cmd += " WHERE %s is null" % (key)
-                    else: #delete all
-                        pass
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    deleted = self.cur.rowcount
-                    if deleted < 1:
-                        return -1, 'Not found'
-                        #delete uuid
-                    return 0, deleted
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "delete_row_by_key", cmd, "delete", 'instances' if table=='hosts' or table=='tenants' else 'dependencies')
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-                
-    def delete_row_by_dict(self, **sql_dict):
-        ''' Deletes rows from a table.
-        Attribute sql_dir: dictionary with the following key: value
-            'FROM': string of table name (Mandatory)
-            'WHERE': dict of key:values, translated to key=value AND ... (Optional)
-            'WHERE_NOT': dict of key:values, translated to key<>value AND ... (Optional)
-            'WHERE_NOTNULL': (list or tuple of items that must not be null in a where ... (Optional)
-            'LIMIT': limit of number of rows (Optional)
-        Return: the (number of items deleted, descriptive test) if ok; (negative, descriptive text) if error
-        '''
-        #print sql_dict
-        from_  = "FROM " + str(sql_dict['FROM'])
-        #print 'from_', from_
-        if 'WHERE' in sql_dict and len(sql_dict['WHERE']) > 0:
-            w=sql_dict['WHERE']
-            where_ = "WHERE " + " AND ".join(map( lambda x: str(x) + (" is Null" if w[x] is None else "='"+str(w[x])+"'"),  w.keys()) ) 
-        else: where_ = ""
-        if 'WHERE_NOT' in sql_dict and len(sql_dict['WHERE_NOT']) > 0: 
-            w=sql_dict['WHERE_NOT']
-            where_2 = " AND ".join(map( lambda x: str(x) + (" is not Null" if w[x] is None else "<>'"+str(w[x])+"'"),  w.keys()) )
-            if len(where_)==0:   where_ = "WHERE " + where_2
-            else:                where_ = where_ + " AND " + where_2
-        if 'WHERE_NOTNULL' in sql_dict and len(sql_dict['WHERE_NOTNULL']) > 0: 
-            w=sql_dict['WHERE_NOTNULL']
-            where_2 = " AND ".join(map( lambda x: str(x) + " is not Null",  w) )
-            if len(where_)==0:   where_ = "WHERE " + where_2
-            else:                where_ = where_ + " AND " + where_2
-        #print 'where_', where_
-        limit_ = "LIMIT " + str(sql_dict['LIMIT']) if 'LIMIT' in sql_dict else ""
-        #print 'limit_', limit_
-        cmd =  " ".join( ("DELETE", from_, where_, limit_) )
-        self.logger.debug(cmd)
-        for retry_ in range(0,2):
-            try:
-                with self.con:
-                    #delete host
-                    self.cur = self.con.cursor()
-                    self.cur.execute(cmd)
-                    deleted = self.cur.rowcount
-                return deleted, "%d deleted from %s" % (deleted, sql_dict['FROM'][:-1] )
-            except (mdb.Error, AttributeError) as e:
-                r,c =  self.format_error(e, "delete_row_by_dict", cmd, "delete", 'dependencies')
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-
-    
-    def get_instance(self, instance_id):
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                with self.con:
-                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
-                    #get INSTANCE
-                    cmd = "SELECT uuid, name, description, progress, host_id, flavor_id, image_id, status, last_error, "\
-                        "tenant_id, ram, vcpus, created_at FROM instances WHERE uuid='{}'".format(instance_id)
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    if self.cur.rowcount == 0 : return 0, "instance '" + str(instance_id) +"'not found."
-                    instance = self.cur.fetchone()
-                    #get networks
-                    cmd = "SELECT uuid as iface_id, net_id, mac as mac_address, ip_address, name, Mbps as bandwidth, "\
-                        "vpci, model FROM ports WHERE (type='instance:bridge' or type='instance:ovs') AND "\
-                        "instance_id= '{}'".format(instance_id)
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    if self.cur.rowcount > 0 :
-                        instance['networks'] = self.cur.fetchall()
-
-                    #get extended
-                    extended = {}
-                    #get devices
-                    cmd = "SELECT type, vpci, image_id, xml,dev FROM instance_devices WHERE instance_id = '%s' " %  str(instance_id)
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    if self.cur.rowcount > 0 :
-                        extended['devices'] = self.cur.fetchall()
-                    #get numas
-                    numas = []
-                    cmd = "SELECT id, numa_socket as source FROM numas WHERE host_id = '" + str(instance['host_id']) + "'"
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    host_numas = self.cur.fetchall()
-                    #print 'host_numas', host_numas
-                    for k in host_numas:
-                        numa_id = str(k['id'])
-                        numa_dict ={}
-                        #get memory
-                        cmd = "SELECT consumed FROM resources_mem WHERE instance_id = '%s' AND numa_id = '%s'" % ( instance_id, numa_id)
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-                        if self.cur.rowcount > 0:
-                            mem_dict = self.cur.fetchone()
-                            numa_dict['memory'] = mem_dict['consumed']
-                        #get full cores
-                        cursor2 = self.con.cursor()
-                        cmd = "SELECT core_id, paired, MIN(v_thread_id) as v1, MAX(v_thread_id) as v2, COUNT(instance_id) as nb, MIN(thread_id) as t1, MAX(thread_id) as t2 FROM resources_core WHERE instance_id = '%s' AND numa_id = '%s' GROUP BY core_id,paired" % ( str(instance_id), numa_id) 
-                        self.logger.debug(cmd)
-                        cursor2.execute(cmd)
-                        core_list = [];     core_source = []
-                        paired_list = [];   paired_source = []
-                        thread_list = [];   thread_source = []
-                        if cursor2.rowcount > 0: 
-                            cores = cursor2.fetchall()
-                            for core in cores:
-                                if core[4] == 2: #number of used threads from core
-                                    if core[3] == core[2]:  #only one thread asigned to VM, so completely core
-                                        core_list.append(core[2])
-                                        core_source.append(core[5])
-                                    elif core[1] == 'Y':
-                                        paired_list.append(core[2:4])
-                                        paired_source.append(core[5:7])
-                                    else:
-                                        thread_list.extend(core[2:4])
-                                        thread_source.extend(core[5:7])
-
-                                else:
-                                    thread_list.append(core[2])
-                                    thread_source.append(core[5])
-                            if len(core_list) > 0:
-                                numa_dict['cores'] = len(core_list)
-                                numa_dict['cores-id'] = core_list
-                                numa_dict['cores-source'] = core_source
-                            if len(paired_list) > 0:
-                                numa_dict['paired-threads'] = len(paired_list)
-                                numa_dict['paired-threads-id'] = paired_list
-                                numa_dict['paired-threads-source'] = paired_source
-                            if len(thread_list) > 0:
-                                numa_dict['threads'] = len(thread_list)
-                                numa_dict['threads-id'] = thread_list
-                                numa_dict['threads-source'] = thread_source
-
-                        #get dedicated ports and SRIOV
-                        cmd = "SELECT port_id as iface_id, p.vlan as vlan, p.mac as mac_address, net_id, if(model='PF',\
-                            'yes',if(model='VF','no','yes:sriov')) as dedicated, rp.Mbps as bandwidth, name, vpci, \
-                            pci as source \
-                            FROM resources_port as rp join ports as p on port_id=uuid  WHERE p.instance_id = '%s' AND numa_id = '%s' and p.type='instance:data'" % (instance_id, numa_id) 
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-                        if self.cur.rowcount > 0: 
-                            numa_dict['interfaces'] = self.cur.fetchall()
-                            #print 'interfaces', numa_dict
-
-                        if len(numa_dict) > 0 : 
-                            numa_dict['source'] = k['source'] #numa socket
-                            numas.append(numa_dict)
-
-                    if len(numas) > 0 :  extended['numas'] = numas
-                    if len(extended) > 0 :  instance['extended'] = extended
-                    af.DeleteNone(instance)
-                    return 1, instance
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "get_instance", cmd)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-        
-    def get_numas(self, requirements, prefered_host_id=None, only_of_ports=True):
-        '''Obtain a valid NUMA/HOST for deployment a VM
-        requirements: contain requirement regarding:
-            requirements['ram']: Non huge page memory in MB; 0 to skip 
-            requirements['vcpus']: Non isolated cpus; 0 to skip 
-            requirements['numa']: Requiremets to be fixed in ONE Numa node
-                requirements['numa']['memory']: Huge page memory in GB at ; 0 for any 
-                requirements['numa']['proc_req_type']: Type of processor, cores or threads 
-                requirements['numa']['proc_req_nb']: Number of isolated cpus  
-                requirements['numa']['port_list']: Physical NIC ports list ; [] for any 
-                requirements['numa']['sriov_list']: Virtual function NIC ports list ; [] for any
-        prefered_host_id: if not None return this host if it match 
-        only_of_ports: if True only those ports conected to the openflow (of) are valid,
-            that is, with switch_port information filled; if False, all NIC ports are valid. 
-        Return a valid numa and host
-        '''
-         
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                with self.con:              
-#                     #Find numas of prefered host
-#                     prefered_numas = ()
-#                     if prefered_host_id != None:
-#                         self.cur = self.con.cursor()
-#                         self.cur.execute("SELECT id FROM numas WHERE host_id='%s'" + prefered_host_id)
-#                         prefered_numas = self.cur.fetchall()
-#                         self.cur.close()
-                        
-                    #Find valid host for the ram and vcpus
-                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
-                    cmd = "CALL GetHostByMemCpu(%s, %s)" % (str(requirements['ram']), str(requirements['vcpus']))
-                    self.logger.debug(cmd)   
-                    self.cur.callproc('GetHostByMemCpu', (str(requirements['ram']), str(requirements['vcpus'])) )
-                    valid_hosts = self.cur.fetchall()
-                    self.cur.close()   
-                    self.cur = self.con.cursor()
-                    match_found = False
-                    if len(valid_hosts)<=0:
-                        error_text = 'No room at data center. Cannot find a host with %s MB memory and %s cpus available' % (str(requirements['ram']), str(requirements['vcpus'])) 
-                        #self.logger.debug(error_text)
-                        return -1, error_text
-                    
-                    #elif req_numa != None:
-                    #Find valid numa nodes for memory requirements
-                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
-                    cmd = "CALL GetNumaByMemory(%s)" % str(requirements['numa']['memory'])
-                    self.logger.debug(cmd)   
-                    self.cur.callproc('GetNumaByMemory', (requirements['numa']['memory'],) )
-                    valid_for_memory = self.cur.fetchall()
-                    self.cur.close()   
-                    self.cur = self.con.cursor()
-                    if len(valid_for_memory)<=0:
-                        error_text = 'No room at data center. Cannot find a host with %s GB Hugepages memory available' % str(requirements['numa']['memory']) 
-                        #self.logger.debug(error_text)
-                        return -1, error_text
-
-                    #Find valid numa nodes for processor requirements
-                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
-                    if requirements['numa']['proc_req_type'] == 'threads':
-                        cpu_requirement_text='cpu-threads'
-                        cmd = "CALL GetNumaByThread(%s)" % str(requirements['numa']['proc_req_nb'])
-                        self.logger.debug(cmd) 
-                        self.cur.callproc('GetNumaByThread', (requirements['numa']['proc_req_nb'],) )
-                    else:
-                        cpu_requirement_text='cpu-cores'
-                        cmd = "CALL GetNumaByCore(%s)" % str(requirements['numa']['proc_req_nb'])
-                        self.logger.debug(cmd) 
-                        self.cur.callproc('GetNumaByCore', (requirements['numa']['proc_req_nb'],) )
-                    valid_for_processor = self.cur.fetchall()
-                    self.cur.close()   
-                    self.cur = self.con.cursor()
-                    if len(valid_for_processor)<=0:
-                        error_text = 'No room at data center. Cannot find a host with %s %s available' % (str(requirements['numa']['proc_req_nb']),cpu_requirement_text)  
-                        #self.logger.debug(error_text)
-                        return -1, error_text
-
-                    #Find the numa nodes that comply for memory and processor requirements
-                    #sorting from less to more memory capacity
-                    valid_numas = []
-                    for m_numa in valid_for_memory:
-                        numa_valid_for_processor = False
-                        for p_numa in valid_for_processor:
-                            if m_numa['numa_id'] == p_numa['numa_id']:
-                                numa_valid_for_processor = True
-                                break
-                        numa_valid_for_host = False
-                        prefered_numa = False
-                        for p_host in valid_hosts:
-                            if m_numa['host_id'] == p_host['uuid']:
-                                numa_valid_for_host = True
-                                if p_host['uuid'] == prefered_host_id:
-                                    prefered_numa = True
-                                break
-                        if numa_valid_for_host and numa_valid_for_processor:
-                            if prefered_numa:
-                                valid_numas.insert(0, m_numa['numa_id'])
-                            else:
-                                valid_numas.append(m_numa['numa_id'])
-                    if len(valid_numas)<=0:
-                        error_text = 'No room at data center. Cannot find a host with %s MB hugepages memory and %s %s available in the same numa' %\
-                            (requirements['numa']['memory'], str(requirements['numa']['proc_req_nb']),cpu_requirement_text)  
-                        #self.logger.debug(error_text)
-                        return -1, error_text
-                    
-    #                 print 'Valid numas list: '+str(valid_numas)
-
-                    #Find valid numa nodes for interfaces requirements
-                    #For each valid numa we will obtain the number of available ports and check if these are valid          
-                    match_found = False    
-                    for numa_id in valid_numas:
-    #                     print 'Checking '+str(numa_id)
-                        match_found = False
-                        self.cur = self.con.cursor(mdb.cursors.DictCursor)
-                        if only_of_ports:
-                            cmd="CALL GetAvailablePorts(%s)" % str(numa_id) 
-                            self.logger.debug(cmd)
-                            self.cur.callproc('GetAvailablePorts', (numa_id,) )
-                        else:
-                            cmd="CALL GetAllAvailablePorts(%s)" % str(numa_id) 
-                            self.logger.debug(cmd)
-                            self.cur.callproc('GetAllAvailablePorts', (numa_id,) )
-                        available_ports = self.cur.fetchall()
-                        self.cur.close()   
-                        self.cur = self.con.cursor()
-
-                        #Set/reset reservations
-                        for port in available_ports:
-                            port['Mbps_reserved'] = 0
-                            port['SRIOV_reserved'] = 0
-
-                        #Try to allocate physical ports
-                        physical_ports_found = True
-                        for iface in requirements['numa']['port_list']:
-    #                         print '\t\tchecking iface: '+str(iface)
-                            portFound = False
-                            for port in available_ports:
-    #                             print '\t\t\tfor port: '+str(port)
-                                #If the port is not empty continue
-                                if port['Mbps_free'] != port['Mbps'] or port['Mbps_reserved'] != 0:
-    #                                 print '\t\t\t\t Not empty port'
-                                    continue;
-                                #If the port speed is not enough continue
-                                if port['Mbps'] < iface['bandwidth']:
-    #                                 print '\t\t\t\t Not enough speed'
-                                    continue;
-
-                                #Otherwise this is a valid port  
-                                port['Mbps_reserved'] = port['Mbps']
-                                port['SRIOV_reserved'] = 0
-                                iface['port_id'] = port['port_id']
-                                iface['vlan'] = None
-                                iface['mac'] = port['mac']
-                                iface['switch_port'] = port['switch_port']
-    #                             print '\t\t\t\t Dedicated port found '+str(port['port_id'])
-                                portFound = True
-                                break;
-
-                            #if all ports have been checked and no match has been found
-                            #this is not a valid numa
-                            if not portFound:
-    #                             print '\t\t\t\t\tAll ports have been checked and no match has been found for numa '+str(numa_id)+'\n\n'
-                                physical_ports_found = False
-                                break
-
-                        #if there is no match continue checking the following numa
-                        if not physical_ports_found:
-                            continue
-
-                        #Try to allocate SR-IOVs
-                        sriov_ports_found = True
-                        for iface in requirements['numa']['sriov_list']:
-    #                         print '\t\tchecking iface: '+str(iface)
-                            portFound = False
-                            for port in available_ports:
-    #                             print '\t\t\tfor port: '+str(port)
-                                #If there are not available SR-IOVs continue
-                                if port['availableSRIOV'] - port['SRIOV_reserved'] <= 0:
-    #                                 print '\t\t\t\t Not enough SR-IOV'
-                                    continue;
-                                #If the port free speed is not enough continue
-                                if port['Mbps_free'] - port['Mbps_reserved'] < iface['bandwidth']:
-    #                                 print '\t\t\t\t Not enough speed'
-                                    continue;
-
-                                #Otherwise this is a valid port  
-                                port['Mbps_reserved'] += iface['bandwidth']
-                                port['SRIOV_reserved'] += 1
-    #                             print '\t\t\t\t SR-IOV found '+str(port['port_id'])
-                                iface['port_id'] = port['port_id']
-                                iface['vlan'] = None
-                                iface['mac'] = port['mac']
-                                iface['switch_port'] = port['switch_port']
-                                portFound = True
-                                break;
-
-                            #if all ports have been checked and no match has been found
-                            #this is not a valid numa
-                            if not portFound:
-    #                             print '\t\t\t\t\tAll ports have been checked and no match has been found for numa '+str(numa_id)+'\n\n'
-                                sriov_ports_found = False
-                                break
-
-                        #if there is no match continue checking the following numa
-                        if not sriov_ports_found:
-                            continue
-
-
-                        if sriov_ports_found and physical_ports_found:
-                            match_found = True
-                            break
-
-                    if not match_found:
-                        error_text = 'No room at data center. Cannot find a host with the required hugepages, vcpus and interfaces'  
-                        #self.logger.debug(error_text)
-                        return -1, error_text
-
-                    #self.logger.debug('Full match found in numa %s', str(numa_id))
-
-                for numa in valid_for_processor:
-                    if numa_id==numa['numa_id']:
-                        host_id=numa['host_id']
-                        break
-                return 0, {'numa_id':numa_id, 'host_id': host_id, }
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "get_numas", cmd)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-
-    def new_instance(self, instance_dict, nets, ports_to_free):
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                with self.con:
-                    self.cur = self.con.cursor()
-
-                    #create uuid if not provided
-                    if 'uuid' not in instance_dict:
-                        uuid = instance_dict['uuid'] = str(myUuid.uuid1()) # create_uuid
-                    else: #check uuid is valid
-                        uuid = str(instance_dict['uuid'])
-
-
-                    #inserting new uuid
-                    cmd = "INSERT INTO uuids (uuid, root_uuid, used_at) VALUES ('%s','%s', 'instances')" % (uuid, uuid)
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-
-                    #insert in table instance
-                    extended = instance_dict.pop('extended', None);
-                    bridgedifaces = instance_dict.pop('bridged-ifaces', () );
-
-                    keys    = ",".join(instance_dict.keys())
-                    values  = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", instance_dict.values() ) )
-                    cmd = "INSERT INTO instances (" + keys + ") VALUES (" + values + ")"
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    #if result != 1: return -1, "Database Error while inserting at instances table"
-
-                    #insert resources
-                    nb_bridge_ifaces = nb_cores = nb_ifaces = nb_numas = 0
-                    #insert bridged_ifaces
-
-                    for iface in bridgedifaces:
-                        #generate and insert a iface uuid
-                        if 'enable_dhcp' in iface and iface['enable_dhcp']:
-                            dhcp_first_ip = iface["dhcp_first_ip"]
-                            del iface["dhcp_first_ip"]
-                            dhcp_last_ip = iface["dhcp_last_ip"]
-                            del iface["dhcp_last_ip"]
-                            dhcp_cidr = iface["cidr"]
-                            del iface["cidr"]
-                            del iface["enable_dhcp"]
-                            used_dhcp_ips = self._get_dhcp_ip_used_list(iface["net_id"])
-                            iface["ip_address"] = self.get_free_ip_from_range(dhcp_first_ip, dhcp_last_ip,
-                                                                              dhcp_cidr, used_dhcp_ips)
-
-                        iface['uuid'] = str(myUuid.uuid1()) # create_uuid
-                        cmd = "INSERT INTO uuids (uuid, root_uuid, used_at) VALUES ('%s','%s', 'ports')" % (iface['uuid'], uuid)
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-                        #insert iface
-                        iface['instance_id'] = uuid
-                        # iface['type'] = 'instance:bridge'
-                        if 'name' not in iface: iface['name']="br"+str(nb_bridge_ifaces)
-                        iface['Mbps']=iface.pop('bandwidth', None)
-                        if 'mac_address' not in iface:
-                            iface['mac'] = af.gen_random_mac()
-                        else:
-                            iface['mac'] = iface['mac_address']
-                            del iface['mac_address']
-                        #iface['mac']=iface.pop('mac_address', None)  #for leaving mac generation to libvirt
-                        keys    = ",".join(iface.keys())
-                        values  = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", iface.values() ) )
-                        cmd = "INSERT INTO ports (" + keys + ") VALUES (" + values + ")"
-                        self.logger.debug(cmd)
-                        self.cur.execute(cmd)
-                        nb_bridge_ifaces += 1
-
-                    if extended is not None:
-                        if 'numas' not in extended or extended['numas'] is None: extended['numas'] = ()
-                        for numa in extended['numas']:
-                            nb_numas += 1
-                            #cores
-                            if 'cores' not in numa or numa['cores'] is None: numa['cores'] = ()
-                            for core in numa['cores']:
-                                nb_cores += 1
-                                cmd = "UPDATE resources_core SET instance_id='%s'%s%s WHERE id='%s'" \
-                                    % (uuid, \
-                                    (",v_thread_id='" + str(core['vthread']) + "'") if 'vthread' in core else '', \
-                                    (",paired='"      + core['paired']  + "'") if 'paired' in core else '', \
-                                    core['id'] )
-                                self.logger.debug(cmd)
-                                self.cur.execute(cmd)
-                            #interfaces
-                            if 'interfaces' not in numa or numa['interfaces'] is None: numa['interfaces'] = ()
-                            for iface in numa['interfaces']:
-                                #generate and insert an uuid; iface[id]=iface_uuid; iface[uuid]= net_id
-                                iface['id'] = str(myUuid.uuid1()) # create_uuid
-                                cmd = "INSERT INTO uuids (uuid, root_uuid, used_at) VALUES ('%s','%s', 'ports')" % (iface['id'], uuid)
-                                self.logger.debug(cmd)
-                                self.cur.execute(cmd)
-                                nb_ifaces += 1
-                                mbps_=("'"+str(iface['Mbps_used'])+"'") if 'Mbps_used' in iface and iface['Mbps_used'] is not None else "Mbps"
-                                if iface["dedicated"]=="yes": 
-                                    iface_model="PF"
-                                elif iface["dedicated"]=="yes:sriov": 
-                                    iface_model="VFnotShared"
-                                elif iface["dedicated"]=="no": 
-                                    iface_model="VF"
-                                #else error
-                                INSERT=(iface['mac_address'], iface['switch_port'], iface.get('vlan',None), 'instance:data', iface['Mbps_used'], iface['id'],
-                                        uuid, instance_dict['tenant_id'], iface.get('name',None), iface.get('vpci',None), iface.get('uuid',None), iface_model )
-                                cmd = "INSERT INTO ports (mac,switch_port,vlan,type,Mbps,uuid,instance_id,tenant_id,name,vpci,net_id, model) " + \
-                                       " VALUES (" + ",".join(map(lambda x: 'Null' if x is None else "'"+str(x)+"'", INSERT )) + ")"
-                                self.logger.debug(cmd)
-                                self.cur.execute(cmd)
-                                if 'uuid' in iface:
-                                    nets.append(iface['uuid'])
-                                    
-                                #discover if this port is not used by anyone
-                                cmd = "SELECT source_name, mac FROM ( SELECT root_id, count(instance_id) as used FROM resources_port" \
-                                      " WHERE root_id=(SELECT root_id from resources_port WHERE id='%s')"\
-                                      " GROUP BY root_id ) AS A JOIN resources_port as B ON A.root_id=B.id AND A.used=0" % iface['port_id'] 
-                                self.logger.debug(cmd)
-                                self.cur.execute(cmd)
-                                ports_to_free += self.cur.fetchall()
-
-                                cmd = "UPDATE resources_port SET instance_id='%s', port_id='%s',Mbps_used=%s WHERE id='%s'" \
-                                    % (uuid, iface['id'], mbps_, iface['port_id'])
-                                #if Mbps_used not suply, set the same value of 'Mpbs', that is the total
-                                self.logger.debug(cmd)
-                                self.cur.execute(cmd)
-                            #memory
-                            if 'memory' in numa and numa['memory'] is not None and numa['memory']>0:
-                                cmd = "INSERT INTO resources_mem (numa_id, instance_id, consumed) VALUES ('%s','%s','%s')" % (numa['numa_id'], uuid, numa['memory'])
-                                self.logger.debug(cmd)
-                                self.cur.execute(cmd)
-                        if 'devices' not in extended or extended['devices'] is None: extended['devices'] = ()
-                        for device in extended['devices']:
-                            if 'vpci' in device:    vpci = "'" + device['vpci'] + "'"
-                            else:                   vpci = 'Null'
-                            if 'image_id' in device: image_id = "'" + device['image_id'] + "'"
-                            else:                    image_id = 'Null'
-                            if 'xml' in device: xml = "'" + device['xml'] + "'"
-                            else:                    xml = 'Null'
-                            if 'dev' in device: dev = "'" + device['dev'] + "'"
-                            else:                    dev = 'Null'
-                            cmd = "INSERT INTO instance_devices (type, instance_id, image_id, vpci, xml, dev) VALUES ('%s','%s', %s, %s, %s, %s)" % \
-                                (device['type'], uuid, image_id, vpci, xml, dev)
-                            self.logger.debug(cmd)
-                            self.cur.execute(cmd)
-                    ##inserting new log
-                    #cmd = "INSERT INTO logs (related,level,uuid,description) VALUES ('instances','debug','%s','new instance: %d numas, %d theads, %d ifaces %d bridge_ifaces')" % (uuid, nb_numas, nb_cores, nb_ifaces, nb_bridge_ifaces)
-                    #self.logger.debug(cmd)
-                    #self.cur.execute(cmd)                    
-
-                    #inseted ok
-                return 1, uuid 
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "new_instance", cmd)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-
-    def get_free_ip_from_range(self, first_ip, last_ip, cidr, ip_used_list):
-        """
-        Calculate a free IP from a range given
-        :param first_ip: First dhcp ip range
-        :param last_ip: Last dhcp ip range
-        :param cidr: net cidr
-        :param ip_used_list: contain all used ips to avoid ip collisions
-        :return:
-        """
-
-        ip_tools = IPNetwork(cidr)
-        cidr_len = ip_tools.prefixlen
-        ips = IPNetwork(first_ip + '/' + str(cidr_len))
-        ip_used_list.append(str(ips[0])) # first ip
-        ip_used_list.append(str(ips[1])) # gw ip
-        ip_used_list.append(str(ips[-1])) # broadcast ip
-        for vm_ip in ips:
-            if str(vm_ip) not in ip_used_list:
-                return vm_ip
-
-        return None
-
-    def _get_dhcp_ip_used_list(self, net_id):
-        """
-        REtreive from DB all ips already used by the dhcp server for a given net
-        :param net_id:
-        :return:
-        """
-        WHERE={'type': 'instance:ovs', 'net_id': net_id}
-        for retry_ in range(0, 2):
-            cmd = ""
-            self.cur = self.con.cursor(mdb.cursors.DictCursor)
-            select_ = "SELECT uuid, ip_address FROM ports "
-
-            if WHERE is None or len(WHERE) == 0:
-                where_ = ""
-            else:
-                where_ = "WHERE " + " AND ".join(
-                    map(lambda x: str(x) + (" is Null" if WHERE[x] is None else "='" + str(WHERE[x]) + "'"),
-                        WHERE.keys()))
-            limit_ = "LIMIT 100"
-            cmd = " ".join((select_, where_, limit_))
-            self.logger.debug(cmd)
-            self.cur.execute(cmd)
-            ports = self.cur.fetchall()
-            ip_address_list = []
-            for port in ports:
-                ip_address_list.append(port['ip_address'])
-
-            return ip_address_list
-
-
-    def delete_instance(self, instance_id, tenant_id, net_dataplane_list, ports_to_free, net_ovs_list, logcause="requested by http"):
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                with self.con:
-                    self.cur = self.con.cursor()
-                    #get INSTANCE
-                    cmd = "SELECT uuid FROM instances WHERE uuid='%s' AND tenant_id='%s'" % (instance_id, tenant_id)
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    if self.cur.rowcount == 0 : return 0, "instance %s not found in tenant %s" % (instance_id, tenant_id)
-
-                    #delete bridged ifaces, instace_devices, resources_mem; done by database: it is automatic by Database; FOREIGN KEY DELETE CASCADE
-                    
-                    #get nets afected
-                    cmd = "SELECT DISTINCT net_id from ports WHERE instance_id = '%s' AND net_id is not Null AND type='instance:data'" % instance_id
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    net_list__ = self.cur.fetchall()
-                    for net in net_list__:
-                        net_dataplane_list.append(net[0])
-
-                    # get ovs manangement nets
-                    cmd = "SELECT DISTINCT net_id, vlan, ip_address, mac FROM ports WHERE instance_id='{}' AND net_id is not Null AND "\
-                            "type='instance:ovs'".format(instance_id)
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    net_ovs_list += self.cur.fetchall()
-
-                    #get dataplane interfaces releases by this VM; both PF and VF with no other VF 
-                    cmd="SELECT source_name, mac FROM (SELECT root_id, count(instance_id) as used FROM resources_port WHERE instance_id='%s' GROUP BY root_id ) AS A" % instance_id \
-                        +  " JOIN (SELECT root_id, count(instance_id) as used FROM resources_port GROUP BY root_id) AS B ON A.root_id=B.root_id AND A.used=B.used"\
-                        +  " JOIN resources_port as C ON A.root_id=C.id" 
-#                    cmd = "SELECT DISTINCT root_id FROM resources_port WHERE instance_id = '%s'" % instance_id
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    ports_to_free += self.cur.fetchall()
-
-                    #update resources port
-                    cmd = "UPDATE resources_port SET instance_id=Null, port_id=Null, Mbps_used='0' WHERE instance_id = '%s'" % instance_id
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    
-#                     #filter dataplane ports used by this VM that now are free
-#                     for port in ports_list__:
-#                         cmd = "SELECT mac, count(instance_id) FROM resources_port WHERE root_id = '%s'" % port[0]
-#                         self.logger.debug(cmd)
-#                         self.cur.execute(cmd)
-#                         mac_list__ = self.cur.fetchone()
-#                         if mac_list__ and mac_list__[1]==0:
-#                             ports_to_free.append(mac_list__[0])
-                        
-
-                    #update resources core
-                    cmd = "UPDATE resources_core SET instance_id=Null, v_thread_id=Null, paired='N' WHERE instance_id = '%s'" % instance_id
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-
-                    #delete all related uuids
-                    cmd = "DELETE FROM uuids WHERE root_uuid='%s'" % instance_id
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-
-                    ##insert log
-                    #cmd = "INSERT INTO logs (related,level,uuid,description) VALUES ('instances','debug','%s','delete instance %s')" % (instance_id, logcause)
-                    #self.logger.debug(cmd)
-                    #self.cur.execute(cmd)                    
-
-                    #delete instance
-                    cmd = "DELETE FROM instances WHERE uuid='%s' AND tenant_id='%s'" % (instance_id, tenant_id)
-                    self.cur.execute(cmd)
-                    return 1, "instance %s from tenant %s DELETED" % (instance_id, tenant_id)
-
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "delete_instance", cmd)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-
-    def get_ports(self, WHERE):
-        ''' Obtain ports using the WHERE filtering.
-        Attributes:
-            'where_': dict of key:values, translated to key=value AND ... (Optional)
-        Return: a list with dictionarys at each row
-        '''
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                with self.con:
-
-                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
-                    select_ = "SELECT uuid,'ACTIVE' as status,admin_state_up,name,net_id,\
-                        tenant_id,type,mac,vlan,switch_port,instance_id,Mbps FROM ports "
-
-                    if WHERE is None or len(WHERE) == 0:  where_ = ""
-                    else:
-                        where_ = "WHERE " + " AND ".join(map( lambda x: str(x) + (" is Null" if WHERE[x] is None else "='"+str(WHERE[x])+"'"),  WHERE.keys()) ) 
-                    limit_ = "LIMIT 100"
-                    cmd =  " ".join( (select_, where_, limit_) )
-    #                print "SELECT multiple de instance_ifaces, iface_uuid, external_ports" #print cmd
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    ports = self.cur.fetchall()
-                    if self.cur.rowcount>0:  af.DeleteNone(ports)
-                    return self.cur.rowcount, ports
-    #                return self.get_table(FROM=from_, SELECT=select_,WHERE=where_,LIMIT=100)
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "get_ports", cmd)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-        
-    def check_target_net(self, net_id, tenant_id, port_type):
-        '''check if valid attachement of a port into a target net
-        Attributes:
-            net_id: target net uuid
-            tenant_id: client where tenant belongs. Not used in this version
-            port_type: string with the option 'instance:bridge', 'instance:data', 'external'
-        Return: 
-            (0,net_dict) if ok,   where net_dict contain 'uuid','type','vlan', ...
-            (negative,string-error) if error
-        '''
-        for retry_ in range(0,2):
-            cmd=""
-            try:
-                with self.con:
-                    self.cur = self.con.cursor(mdb.cursors.DictCursor)
-                    cmd = "SELECT * FROM nets WHERE uuid='%s'" % net_id
-                    self.logger.debug(cmd)
-                    self.cur.execute(cmd)
-                    if self.cur.rowcount == 0 : return -1, "network_id %s does not match any net" % net_id
-                    net = self.cur.fetchone()
-                    break
-
-            except (mdb.Error, AttributeError) as e:
-                r,c = self.format_error(e, "check_target_net", cmd)
-                if r!=-HTTP_Request_Timeout or retry_==1: return r,c
-        #check permissions
-        if tenant_id is not None and tenant_id is not "admin":
-            if net['tenant_id']==tenant_id and net['shared']=='false':
-                return -1, "needed admin privileges to attach to the net %s" % net_id
-        #check types
-        if (net['type'] in ('ptp','data') and port_type not in ('instance:data','external')) or \
-            (net['type'] in ('bridge_data','bridge_man') and port_type not in ('instance:bridge', 'instance:ovs')):
-            return -1, "Cannot attach a port of type %s into a net of type %s" % (port_type, net['type'])
-        if net['type'] == 'ptp':
-            #look how many 
-            nb_ports, data = self.get_ports( {'net_id':net_id} )
-            if nb_ports<0:
-                return -1, data
-            else:
-                if net['provider']:
-                    nb_ports +=1
-                if nb_ports >=2:
-                    return -1, "net of type p2p already contain two ports attached. No room for another"
-            
-        return 0, net
-
-if __name__ == "__main__":
-    print "Hello World"
diff --git a/vim_schema.py b/vim_schema.py
deleted file mode 100644 (file)
index c2dc1e2..0000000
+++ /dev/null
@@ -1,767 +0,0 @@
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openvim
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-
-''' Definition of dictionaries schemas used by validating input
-    These dictionaries are validated using jsonschema library
-'''
-__author__="Alfonso Tierno"
-__date__ ="$10-jul-2014 12:07:15$"
-
-#
-# SCHEMAS to validate input data
-#
-
-path_schema={"type":"string", "pattern":"^(\.){0,2}(/[^/\"':{}\(\)]+)+$"}
-http_schema={"type":"string", "pattern":"^https?://[^'\"=]+$"}
-port_schema={"type":"integer","minimum":1,"maximun":65534}
-ip_schema={"type":"string","pattern":"^([0-9]{1,3}.){3}[0-9]{1,3}$"}
-cidr_schema={"type":"string","pattern":"^([0-9]{1,3}.){3}[0-9]{1,3}/[0-9]{1,2}$"}
-name_schema={"type" : "string", "minLength":1, "maxLength":255, "pattern" : "^[^,;()'\"]+$"}
-nameshort_schema={"type" : "string", "minLength":1, "maxLength":64, "pattern" : "^[^,;()'\"]+$"}
-nametiny_schema={"type" : "string", "minLength":1, "maxLength":12, "pattern" : "^[^,;()'\"]+$"}
-xml_text_schema={"type" : "string", "minLength":1, "maxLength":1000, "pattern" : "^[^']+$"}
-description_schema={"type" : ["string","null"], "maxLength":255, "pattern" : "^[^'\"]+$"}
-id_schema_fake = {"type" : "string", "minLength":2, "maxLength":36 }  #"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
-id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
-pci_schema={"type":"string", "pattern":"^[0-9a-fA-F]{4}(:[0-9a-fA-F]{2}){2}\.[0-9a-fA-F]$"}
-bandwidth_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]bps)?$"}
-integer0_schema={"type":"integer","minimum":0}
-integer1_schema={"type":"integer","minimum":1}
-vlan_schema={"type":"integer","minimum":1,"maximun":4095}
-vlan1000_schema={"type":"integer","minimum":1000,"maximun":4095}
-mac_schema={"type":"string", "pattern":"^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){5}$"}  #must be unicast LSB bit of MSB byte ==0 
-net_bind_schema={"oneOf":[{"type":"null"},{"type":"string", "pattern":"^(default|((bridge|macvtap):[0-9a-zA-Z\.\-]{1,50})|openflow:[/0-9a-zA-Z\.\-]{1,50}(:vlan)?)$"}]}
-yes_no_schema={"type":"string", "enum":["yes", "no"]}
-log_level_schema={"type":"string", "enum":["DEBUG", "INFO", "WARNING","ERROR","CRITICAL"]}
-
-config_schema = {
-    "title":"main configuration information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "http_port": port_schema,
-        "http_admin_port": port_schema,
-        "http_host": nameshort_schema,
-        "http_url_prefix": path_schema, # it does not work yet; it's supposed to be the base path to be used by bottle, but it must be explicitly declared
-        "db_host": nameshort_schema,
-        "db_user": nameshort_schema,
-        "db_passwd": {"type":"string"},
-        "db_name": nameshort_schema,
-        "of_controller_ip": ip_schema,
-        "of_controller_port": port_schema,
-        "of_controller_dpid": nameshort_schema,
-        "of_controller_nets_with_same_vlan": {"type" : "boolean"},
-        "of_controller": nameshort_schema, #{"type":"string", "enum":["floodlight", "opendaylight"]},
-        "of_controller_module": {"type":"string"},
-        "of_user": nameshort_schema,
-        "of_password": nameshort_schema,
-        "test_mode": {"type": "boolean"}, #leave for backward compatibility
-        "mode": {"type":"string", "enum":["normal", "host only", "OF only", "development", "test"] },
-        "development_bridge": {"type":"string"},
-        "tenant_id": {"type" : "string"},
-        "image_path": path_schema,
-        "network_vlan_range_start": vlan_schema,
-        "network_vlan_range_end": vlan_schema,
-        "bridge_ifaces": {
-            "type": "object",
-            "patternProperties": {
-                "." : {
-                    "type": "array", 
-                    "items": integer0_schema,
-                    "minItems":2,
-                    "maxItems":2,
-                },
-            },
-            "minProperties": 2
-        },
-        "dhcp_server": {
-            "type": "object",
-            "properties": {
-                "host" : name_schema,
-                "port" : port_schema,
-                "provider" : {"type": "string", "enum": ["isc-dhcp-server"]},
-                "user" : nameshort_schema,
-                "password" : {"type": "string"},
-                "key" : {"type": "string"},
-                "bridge_ifaces" :{
-                    "type": "array", 
-                    "items": nameshort_schema,
-                },
-                "nets" :{
-                    "type": "array", 
-                    "items": name_schema,
-                },
-            },
-            "required": ['host', 'provider', 'user']
-        },
-        "log_level": log_level_schema,
-        "log_level_db": log_level_schema,
-        "log_level_of": log_level_schema,
-        "network_type": {"type": "string", "enum": ["ovs", "bridge"]},
-        "ovs_controller_file_path": path_schema,
-        "ovs_controller_user": nameshort_schema,
-
-        "ovs_controller_ip": nameshort_schema
-    },
-    "patternProperties": {
-        "of_*" : {"type": ["string", "integer", "boolean"]}
-    },
-    "required": ['db_host', 'db_user', 'db_passwd', 'db_name'],
-    "additionalProperties": False
-}
-
-
-
-metadata_schema={
-    "type":"object",
-    "properties":{
-        "architecture": {"type":"string"},
-        "use_incremental": yes_no_schema,
-        "vpci": pci_schema,
-        "os_distro": {"type":"string"},
-        "os_type": {"type":"string"},
-        "os_version": {"type":"string"},
-        "bus": {"type":"string"},
-        "topology": {"type":"string", "enum": ["oneSocket"]}
-    }
-}
-
-tenant_new_schema = {
-    "title":"tenant creation information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "tenant":{
-            "type":"object",
-            "properties":{
-                "id":id_schema,
-                "name": nameshort_schema,
-                "description":description_schema,
-                "enabled":{"type" : "boolean"}
-            },
-            "required": ["name"]
-        }
-    },
-    "required": ["tenant"],
-    "additionalProperties": False
-}
-
-tenant_edit_schema = {
-    "title":"tenant edition information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "tenant":{
-            "type":"object",
-            "minProperties":1,
-            "properties":{
-                "name":nameshort_schema,
-                "description":description_schema,
-                "enabled":{"type" : "boolean"}
-            },
-            "additionalProperties": False,
-        }
-    },
-    "required": ["tenant"],
-    "additionalProperties": False
-}
-interfaces_schema={
-    "type":"array",
-    "minItems":0,
-    "items":{
-        "type":"object",
-        "properties":{
-            "name":name_schema,
-            "dedicated":{"type":"string","enum":["yes","no","yes:sriov"]},
-            "bandwidth":bandwidth_schema,
-            "vpci":pci_schema,
-            "uuid":id_schema,
-            "mac_address":mac_schema
-        },
-        "additionalProperties": False,
-        "required": ["dedicated", "bandwidth"]
-    }
-}
-
-extended_schema={
-    "type":"object", 
-    "properties":{                  
-        "processor_ranking":integer0_schema,
-        "devices":{
-            "type": "array", 
-            "items":{
-                "type": "object",
-                "properties":{
-                    "type":{"type":"string", "enum":["usb","disk","cdrom","xml"]},
-                    "vpci":pci_schema,
-                    "imageRef":id_schema,
-                    "xml":xml_text_schema,
-                    "dev":nameshort_schema
-                },
-                "additionalProperties": False,
-                "required": ["type"]
-            }
-        },
-        "numas":{
-            "type": "array",
-            "items":{
-                "type": "object",
-                "properties":{
-                    "memory":integer1_schema,
-                    "cores":integer1_schema,
-                    "paired-threads":integer1_schema,
-                    "threads":integer1_schema,
-                    "cores-id":{"type":"array","items":integer0_schema},
-                    "paired-threads-id":{"type":"array","items":{"type":"array","minItems":2,"maxItems":2,"items":integer0_schema}},
-                    "threads-id":{"type":"array","items":integer0_schema},
-                    "interfaces":interfaces_schema
-                },
-                "additionalProperties": False,
-                "minProperties": 1,
-                #"required": ["memory"]
-            }
-        }
-    },
-    #"additionalProperties": False,
-    #"required": ["processor_ranking"]
-}
-
-host_data_schema={
-    "title":"hosts manual insertion information schema",
-    "type":"object", 
-    "properties":{                  
-        "ip_name":nameshort_schema,
-        "name": name_schema,
-        "description":description_schema,
-        "user":nameshort_schema,
-        "password":nameshort_schema,
-        "features":description_schema,
-        "ranking":integer0_schema,
-        "devices":{
-            "type": "array", 
-            "items":{
-                "type": "object",
-                "properties":{
-                    "type":{"type":"string", "enum":["usb","disk"]},
-                    "vpci":pci_schema
-                },
-                "additionalProperties": False,
-                "required": ["type"]
-            }
-        },
-        "numas":{
-            "type": "array",
-            "minItems":1,
-            "items":{
-                "type": "object",
-                "properties":{
-                    "admin_state_up":{"type":"boolean"},
-                    "hugepages":integer0_schema,
-                    "cores":{
-                        "type": "array",
-                        "minItems":2,
-                        "items":{
-                            "type": "object",
-                            "properties":{
-                                "core_id":integer0_schema,
-                                "thread_id":integer0_schema,
-                                "status": {"type":"string", "enum":["noteligible"]}
-                            },
-                            "additionalProperties": False,
-                            "required": ["core_id","thread_id"]
-                        }
-                    },
-                    "interfaces":{
-                        "type": "array",
-                        "minItems":1,
-                        "items":{
-                            "type": "object",
-                            "properties":{
-                                "source_name":nameshort_schema,
-                                "mac":mac_schema,
-                                "Mbps":integer0_schema,
-                                "pci":pci_schema,
-                                "sriovs":{
-                                    "type": "array",
-                                    "minItems":1,
-                                    "items":{
-                                        "type": "object",
-                                        "properties":{
-                                            "source_name":{"oneOf":[integer0_schema, nameshort_schema]},
-                                            "mac":mac_schema,
-                                            "vlan":integer0_schema, 
-                                            "pci":pci_schema,
-                                        },
-                                        "additionalProperties": False,
-                                        "required": ["source_name","mac","pci"]
-                                    }
-                                },
-                                "switch_port": nameshort_schema,
-                                "switch_dpid": nameshort_schema,
-                            },
-                            "additionalProperties": False,
-                            "required": ["source_name","mac","Mbps","pci"]
-                        }
-                    },
-                    "numa_socket":integer0_schema,
-                    "memory":integer1_schema
-                },
-                "additionalProperties": False,
-                "required": ["cores","numa_socket"]
-            }
-        }
-    },
-    "additionalProperties": False,
-    "required": ["ranking", "numas","ip_name","user"]
-}
-
-host_edit_schema={
-    "title":"hosts creation information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "host":{
-            "type":"object",
-            "properties":{
-                "ip_name":nameshort_schema,
-                "name": name_schema,
-                "description":description_schema,
-                "user":nameshort_schema,
-                "password":nameshort_schema,
-                "admin_state_up":{"type":"boolean"},
-                "numas":{
-                    "type":"array", 
-                    "items":{
-                        "type": "object",
-                        "properties":{
-                            "numa_socket": integer0_schema,
-                            "admin_state_up":{"type":"boolean"},
-                            "interfaces":{
-                                "type":"array", 
-                                "items":{
-                                    "type": "object",
-                                    "properties":{
-                                        "source_name": nameshort_schema,
-                                        "switch_dpid": nameshort_schema,
-                                        "switch_port": nameshort_schema,
-                                    },
-                                    "required": ["source_name"],
-                                }
-                            }
-                        }, 
-                        "required": ["numa_socket"],
-                        "additionalProperties": False,
-                    }
-                }
-            },
-            "minProperties": 1,
-            "additionalProperties": False
-        },
-    },
-    "required": ["host"],
-    "minProperties": 1,
-    "additionalProperties": False
-}
-
-host_new_schema = {
-    "title":"hosts creation information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "host":{
-            "type":"object",
-            "properties":{
-                "id":id_schema,
-                "ip_name":nameshort_schema,
-                "name": name_schema,
-                "description":description_schema,
-                "user":nameshort_schema,
-                "password":nameshort_schema,
-                "admin_state_up":{"type":"boolean"},
-            },
-            "required": ["name","ip_name","user"]
-        },
-        "host-data":host_data_schema
-    },
-    "required": ["host"],
-    "minProperties": 1,
-    "maxProperties": 2,
-    "additionalProperties": False
-}
-
-
-flavor_new_schema = {
-    "title":"flavor creation information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "flavor":{
-            "type":"object",
-            "properties":{
-                "id":id_schema,
-                "name":name_schema,
-                "description":description_schema,
-                "ram":integer0_schema,
-                "vcpus":integer0_schema,
-                "extended": extended_schema,
-                "public": yes_no_schema
-            },
-            "required": ["name"]
-        }
-    },
-    "required": ["flavor"],
-    "additionalProperties": False
-}
-flavor_update_schema = {
-    "title":"flavor update information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "flavor":{
-            "type":"object",
-            "properties":{
-                "name":name_schema,
-                "description":description_schema,
-                "ram":integer0_schema,
-                "vcpus":integer0_schema,
-                "extended": extended_schema,
-                "public": yes_no_schema
-            },
-            "minProperties": 1,
-            "additionalProperties": False
-        }
-    },
-    "required": ["flavor"],
-    "additionalProperties": False
-}
-
-image_new_schema = {
-    "title":"image creation information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "image":{
-            "type":"object",
-            "properties":{
-                "id":id_schema,
-                "path": {"oneOf": [path_schema, http_schema]},
-                "description":description_schema,
-                "name":name_schema,
-                "metadata":metadata_schema,
-                "public": yes_no_schema
-            },
-            "required": ["name","path"]
-        }
-    },
-    "required": ["image"],
-    "additionalProperties": False
-}
-
-image_update_schema = {
-    "title":"image update information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "image":{
-            "type":"object",
-            "properties":{
-                "path":{"oneOf": [path_schema, http_schema]},
-                "description":description_schema,
-                "name":name_schema,
-                "metadata":metadata_schema,
-                "public": yes_no_schema
-            },
-            "minProperties": 1,
-            "additionalProperties": False
-        }
-    },
-    "required": ["image"],
-    "additionalProperties": False
-}
-
-networks_schema={
-    "type":"array",
-    "items":{
-        "type":"object",
-        "properties":{
-            "name":name_schema,
-            "bandwidth":bandwidth_schema,
-            "vpci":pci_schema,
-            "uuid":id_schema,
-            "mac_address": mac_schema,
-            "model": {"type":"string", "enum":["virtio","e1000","ne2k_pci","pcnet","rtl8139"]},
-            "type": {"type":"string", "enum":["virtual","PF","VF","VFnotShared"]}
-        },
-        "additionalProperties": False,
-        "required": ["uuid"]
-    }
-}
-
-server_new_schema = {
-    "title":"server creation information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "server":{
-            "type":"object",
-            "properties":{
-                "id":id_schema,
-                "name":name_schema,
-                "description":description_schema,
-                "start":{"type":"string", "enum":["yes","no","paused"]},
-                "hostId":id_schema,
-                "flavorRef":id_schema,
-                "imageRef":id_schema,
-                "extended": extended_schema,
-                "networks":networks_schema
-            },
-            "required": ["name","flavorRef","imageRef"]
-        }
-    },
-    "required": ["server"],
-    "additionalProperties": False
-}
-
-server_action_schema = {
-    "title":"server action information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "start":{"oneOf":[{"type": "null"}, {"type":"string", "enum":["rebuild","null"] }]},
-        "pause":{"type": "null"},
-        "resume":{"type": "null"},
-        "shutoff":{"type": "null"},
-        "shutdown":{"type": "null"},
-        "forceOff":{"type": "null"},
-        "terminate":{"type": "null"},
-        "createImage":{
-            "type":"object",
-            "properties":{ 
-                "path":path_schema,
-                "description":description_schema,
-                "name":name_schema,
-                "metadata":metadata_schema,
-                "imageRef": id_schema,
-                "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
-            },
-            "required": ["name"]
-        },
-        "rebuild":{"type": ["object","null"]},
-        "reboot":{
-            "type": ["object","null"],
-#            "properties": {
-#                "type":{"type":"string", "enum":["SOFT"] }
-#            }, 
-#            "minProperties": 1,
-#            "maxProperties": 1,
-#            "additionalProperties": False
-        }
-    },
-    "minProperties": 1,
-    "maxProperties": 1,
-    "additionalProperties": False
-}
-
-network_new_schema = {
-    "title":"network creation information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "network":{
-            "type":"object",
-            "properties":{
-                "id":id_schema,
-                "name":name_schema,
-                "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
-                "shared":{"type":"boolean"},
-                "tenant_id":id_schema,
-                "admin_state_up":{"type":"boolean"},
-                "provider:vlan":vlan_schema,
-                "provider:physical":net_bind_schema,
-                "cidr":cidr_schema,
-                "enable_dhcp": {"type":"boolean"},
-                "dhcp_first_ip": ip_schema,
-                "dhcp_last_ip": ip_schema,
-                "bind_net":name_schema, #can be name, or uuid
-                "bind_type":{"oneOf":[{"type":"null"},{"type":"string", "pattern":"^vlan:[0-9]{1,4}$"}]}
-            },
-            "required": ["name"]
-        }
-    },
-    "required": ["network"],
-    "additionalProperties": False
-}
-
-network_update_schema = {
-    "title":"network update information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "network":{
-            "type":"object",
-            "properties":{
-                "name":name_schema,
-                "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
-                "shared":{"type":"boolean"},
-                "tenant_id":id_schema,
-                "admin_state_up":{"type":"boolean"},
-                "provider:vlan":vlan_schema, 
-                "provider:physical":net_bind_schema,
-                "cidr":cidr_schema,
-                "enable_dhcp": {"type":"boolean"},
-                # "dhcp_first_ip": ip_schema,
-                # "dhcp_last_ip": ip_schema,
-                "bind_net":name_schema, #can be name, or uuid
-                "bind_type":{"oneOf":[{"type":"null"},{"type":"string", "pattern":"^vlan:[0-9]{1,4}$"}]}
-            },
-            "minProperties": 1,
-            "additionalProperties": False
-        }
-    },
-    "required": ["network"],
-    "additionalProperties": False
-}
-
-
-port_new_schema = {
-    "title":"port creation information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "port":{
-            "type":"object",
-            "properties":{
-                "id":id_schema,
-                "name":nameshort_schema,
-                "network_id":{"oneOf":[{"type": "null"}, id_schema ]},
-                "tenant_id":id_schema,
-                "mac_address": {"oneOf":[{"type": "null"}, mac_schema] },
-                "admin_state_up":{"type":"boolean"},
-                "bandwidth":bandwidth_schema,
-                "binding:switch_port":nameshort_schema,
-                "binding:vlan": {"oneOf":[{"type": "null"}, vlan_schema ]}
-            },
-            "required": ["name"]
-        }
-    },
-    "required": ["port"],
-    "additionalProperties": False
-}
-
-port_update_schema = {
-    "title":"port update information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "port":{
-            "type":"object",
-            "properties":{
-                "name":nameshort_schema,
-                "network_id":{"anyOf":[{"type":"null"}, id_schema ] }
-            },
-            "minProperties": 1,
-            "additionalProperties": False
-        }
-    },
-    "required": ["port"],
-    "additionalProperties": False
-}
-
-localinfo_schema = {
-    "title":"localinfo information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "files":{ "type": "object"},
-        "inc_files":{ "type": "object"},
-        "server_files":{ "type": "object"}
-    },
-    "required": ["files"]
-}
-
-hostinfo_schema = {
-    "title":"host information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
-        "iface_names":{
-            "type":"object",
-            "patternProperties":{
-                ".":{ "type": "string"}
-            },
-            "minProperties": 1
-        }
-    },
-    "required": ["iface_names"]
-}
-
-openflow_controller_schema = {
-    "title": "network creation information schema",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type": "object",
-    "properties": {
-        "ofc": {
-            "type": "object",
-            "properties": {
-                "name": name_schema,
-                "dpid": nameshort_schema,
-                "ip": nameshort_schema,
-                "port": port_schema,
-                "type": nameshort_schema,
-                "version": nametiny_schema,
-                "user": nameshort_schema,
-                "password": nameshort_schema
-            },
-            "required": ["dpid", "type", "ip", "port", "name"]
-        }
-    },
-    "required": ["ofc"],
-    "additionalProperties": False
-}
-
-of_port_new_schema = {
-    "title": "OF port mapping",
-    "type": "object",
-    "properties": {
-        "ofc_id": id_schema,
-        "region": nameshort_schema,
-        "compute_node": nameshort_schema,
-        "pci": pci_schema,
-        "switch_dpid": nameshort_schema,
-        "switch_port": nameshort_schema,
-        "switch_mac": mac_schema
-    },
-    "required": ["region", "compute_node",  "pci", "switch_dpid"]
-}
-
-of_port_map_new_schema = {
-    "title": "OF port mapping",
-    "$schema": "http://json-schema.org/draft-04/schema#",
-    "type": "object",
-    "properties": {
-        "of_port_mapings": {"type": "array", "items": of_port_new_schema, "minLenght":1},
-    },
-    "required": ["of_port_mapings"],
-    "additionalProperties": False
-
-}
\ No newline at end of file