From a62249e0c8447a500a699e17597e239d35b8c37b Mon Sep 17 00:00:00 2001 From: tierno Date: Mon, 17 Sep 2018 17:57:30 +0200 Subject: [PATCH] Doing vim_db threading safe with a Lock. PEP formatting. Removing non asccii characters Change-Id: I2b695c318efc1cdbb18173a5e0c617b4932d81e2 Signed-off-by: tierno --- openflow | 2 +- osm_openvim/ODL.py | 2 +- osm_openvim/RADclass.py | 2 +- osm_openvim/auxiliary_functions.py | 2 +- osm_openvim/definitionsClass.py | 2 +- osm_openvim/dhcp_thread.py | 17 +- osm_openvim/floodlight.py | 2 +- osm_openvim/host_thread.py | 53 +- osm_openvim/httpserver.py | 6 +- osm_openvim/openflow_conn.py | 2 +- osm_openvim/openflow_thread.py | 23 +- osm_openvim/ovim.py | 15 +- osm_openvim/vim_db.py | 1731 +++++++++++++++------------- osm_openvim/vim_schema.py | 2 +- test/test_openvim.py | 2 +- 15 files changed, 983 insertions(+), 880 deletions(-) diff --git a/openflow b/openflow index 91ccf91..cac14de 100755 --- a/openflow +++ b/openflow @@ -3,7 +3,7 @@ # PYTHON_ARGCOMPLETE_OK ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openmano # All Rights Reserved. # diff --git a/osm_openvim/ODL.py b/osm_openvim/ODL.py index 588409e..e258c9d 100644 --- a/osm_openvim/ODL.py +++ b/osm_openvim/ODL.py @@ -2,7 +2,7 @@ # -*- coding: utf-8 -*- ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openvim # All Rights Reserved. # diff --git a/osm_openvim/RADclass.py b/osm_openvim/RADclass.py index a4c10ec..397bd65 100644 --- a/osm_openvim/RADclass.py +++ b/osm_openvim/RADclass.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openvim # All Rights Reserved. # diff --git a/osm_openvim/auxiliary_functions.py b/osm_openvim/auxiliary_functions.py index 795d84a..140a8e8 100644 --- a/osm_openvim/auxiliary_functions.py +++ b/osm_openvim/auxiliary_functions.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openvim # All Rights Reserved. # diff --git a/osm_openvim/definitionsClass.py b/osm_openvim/definitionsClass.py index 70168e8..c3cf100 100644 --- a/osm_openvim/definitionsClass.py +++ b/osm_openvim/definitionsClass.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openvim # All Rights Reserved. # diff --git a/osm_openvim/dhcp_thread.py b/osm_openvim/dhcp_thread.py index 8cf50dd..5df1c6b 100644 --- a/osm_openvim/dhcp_thread.py +++ b/osm_openvim/dhcp_thread.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openvim # All Rights Reserved. # @@ -40,19 +40,18 @@ import logging #TODO: insert a logging system class dhcp_thread(threading.Thread): - def __init__(self, dhcp_params, db, db_lock, test, dhcp_nets, logger_name=None, debug=None): + def __init__(self, dhcp_params, db, test, dhcp_nets, logger_name=None, debug=None): '''Init a thread. Arguments: thread_info must be a dictionary with: 'dhcp_params' dhcp server parameters with the following keys: mandatory : user, host, port, key, ifaces(interface name list of the one managed by the dhcp) optional: password, key, port(22) - 'db' 'db_lock': database class and lock for accessing it + 'db': database class threading safe 'test': in test mode no acces to a server is done, and ip is invented ''' threading.Thread.__init__(self) self.dhcp_params = dhcp_params self.db = db - self.db_lock = db_lock self.test = test self.dhcp_nets = dhcp_nets self.ssh_conn = None @@ -90,11 +89,9 @@ class dhcp_thread(threading.Thread): def load_mac_from_db(self): #TODO get macs to follow from the database self.logger.debug("load macs from db") - self.db_lock.acquire() r,c = self.db.get_table(SELECT=('mac','ip_address','nets.uuid as net_id', ), FROM='ports join nets on ports.net_id=nets.uuid', WHERE_NOT={'ports.instance_id': None, 'nets.provider': None}) - self.db_lock.release() now = time.time() self.mac_status ={} if r<0: @@ -175,10 +172,8 @@ class dhcp_thread(threading.Thread): if self.mac_status[mac_address].get("active") == None: #check from db if already active - self.db_lock.acquire() r,c = self.db.get_table(FROM="ports as p join instances as i on p.instance_id=i.uuid", WHERE={"p.mac": mac_address, "i.status": "ACTIVE"}) - self.db_lock.release() if r>0: self.mac_status[mac_address]["active"] = now self.mac_status[mac_address]["next_reading"] = (int(now)/2 +1)* 2 @@ -189,9 +184,7 @@ class dhcp_thread(threading.Thread): if now - self.mac_status[mac_address]["created"] > 300: #modify Database to tell openmano that we can not get dhcp from the machine if not self.mac_status[mac_address].get("ip"): - self.db_lock.acquire() r,c = self.db.update_rows("ports", {"ip_address": "0.0.0.0"}, {"mac": mac_address}) - self.db_lock.release() self.mac_status[mac_address]["ip"] = "0.0.0.0" self.logger.debug("mac %s >> set to 0.0.0.0 because of timeout", mac_address) self.mac_status[mac_address]["next_reading"] = (int(now)/60 +1)* 60 @@ -233,9 +226,7 @@ class dhcp_thread(threading.Thread): if content: self.mac_status[mac_address]["ip"] = content #modify Database - self.db_lock.acquire() r,c = self.db.update_rows("ports", {"ip_address": content}, {"mac": mac_address}) - self.db_lock.release() if r<0: self.logger.error("Database update error: " + c) else: @@ -251,9 +242,7 @@ class dhcp_thread(threading.Thread): if now - self.mac_status[mac_address]["active"] > 120: #modify Database to tell openmano that we can not get dhcp from the machine if not self.mac_status[mac_address].get("ip"): - self.db_lock.acquire() r,c = self.db.update_rows("ports", {"ip_address": "0.0.0.0"}, {"mac": mac_address}) - self.db_lock.release() self.mac_status[mac_address]["ip"] = "0.0.0.0" self.logger.debug("mac %s >> set to 0.0.0.0 because of timeout", mac_address) diff --git a/osm_openvim/floodlight.py b/osm_openvim/floodlight.py index 810572b..ee94f02 100644 --- a/osm_openvim/floodlight.py +++ b/osm_openvim/floodlight.py @@ -2,7 +2,7 @@ # -*- coding: utf-8 -*- ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openvim # All Rights Reserved. # diff --git a/osm_openvim/host_thread.py b/osm_openvim/host_thread.py index 34e8f07..ed6551f 100644 --- a/osm_openvim/host_thread.py +++ b/osm_openvim/host_thread.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openvim # All Rights Reserved. # @@ -49,21 +49,20 @@ class RunCommandException(Exception): class host_thread(threading.Thread): lvirt_module = None - def __init__(self, name, host, user, db, db_lock, test, image_path, host_id, version, develop_mode, + def __init__(self, name, host, user, db, test, image_path, host_id, version, develop_mode, develop_bridge_iface, password=None, keyfile = None, logger_name=None, debug=None, hypervisors=None): """Init a thread to communicate with compute node or ovs_controller. :param host_id: host identity :param name: name of the thread :param host: host ip or name to manage and user :param user, password, keyfile: user and credentials to connect to host - :param db, db_lock': database class and lock to use it in exclusion + :param db: database class, threading safe """ threading.Thread.__init__(self) self.name = name self.host = host self.user = user self.db = db - self.db_lock = db_lock self.test = test self.password = password self.keyfile = keyfile @@ -322,9 +321,7 @@ class host_thread(threading.Thread): self.logger.error("save_localinfo Exception: " + text) def load_servers_from_db(self): - self.db_lock.acquire() r,c = self.db.get_table(SELECT=('uuid','status', 'image_id'), FROM='instances', WHERE={'host_id': self.host_id}) - self.db_lock.release() self.server_status = {} if r<0: @@ -722,9 +719,7 @@ class host_thread(threading.Thread): bridge_interfaces = server.get('networks', []) for v in bridge_interfaces: #Get the brifge name - self.db_lock.acquire() result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} ) - self.db_lock.release() if result <= 0: self.logger.error("create_xml_server ERROR %d getting nets %s", result, content) return -1, content @@ -917,12 +912,10 @@ class host_thread(threading.Thread): :param net_uuid: network id :return: True if is not free """ - self.db_lock.acquire() result, content = self.db.get_table( FROM='ports', WHERE={'type': 'instance:ovs', 'net_id': net_uuid} ) - self.db_lock.release() if len(content) > 0: return False @@ -937,12 +930,10 @@ class host_thread(threading.Thread): :return: True if is not free """ - self.db_lock.acquire() result, content = self.db.get_table( FROM='ports as p join instances as i on p.instance_id=i.uuid', WHERE={"i.host_id": self.host_id, 'p.type': 'instance:ovs', 'p.net_id': net_uuid} ) - self.db_lock.release() if len(content) > 0: return False @@ -1825,9 +1816,7 @@ class host_thread(threading.Thread): #self.server_status[server_id] = 'ACTIVE' return 0, 'Success' - self.db_lock.acquire() result, server_data = self.db.get_instance(server_id) - self.db_lock.release() if result <= 0: self.logger.error("launch_server ERROR getting server from DB %d %s", result, server_data) return result, server_data @@ -1871,10 +1860,8 @@ class host_thread(threading.Thread): continue else: - self.db_lock.acquire() result, content = self.db.get_table(FROM='images', SELECT=('path', 'metadata'), WHERE={'uuid': image_id}) - self.db_lock.release() if result <= 0: error_text = "ERROR", result, content, "when getting image", dev['image_id'] self.logger.error("launch_server " + error_text) @@ -1998,9 +1985,7 @@ class host_thread(threading.Thread): STATUS={'progress':100, 'status':new_status} if new_status == 'ERROR': STATUS['last_error'] = 'machine has crashed' - self.db_lock.acquire() r,_ = self.db.update_rows('instances', STATUS, {'uuid':server_id}, log=False) - self.db_lock.release() if r>=0: self.server_status[server_id] = new_status @@ -2206,22 +2191,18 @@ class host_thread(threading.Thread): elif 'terminate' in req['action']: #PUT a log in the database self.logger.error("PANIC deleting server id='%s' %s", server_id, last_error) - self.db_lock.acquire() - self.db.new_row('logs', + self.db.new_row('logs', {'uuid':server_id, 'tenant_id':req['tenant_id'], 'related':'instances','level':'panic', 'description':'PANIC deleting server from host '+self.name+': '+last_error} ) - self.db_lock.release() if server_id in self.server_status: del self.server_status[server_id] return -1 else: UPDATE['last_error'] = last_error if new_status != 'deleted' and (new_status != old_status or new_status == 'ERROR') : - self.db_lock.acquire() self.db.update_rows('instances', UPDATE, {'uuid':server_id}, log=True) self.server_status[server_id] = new_status - self.db_lock.release() if new_status == 'ERROR': return -1 return 1 @@ -2308,21 +2289,17 @@ class host_thread(threading.Thread): self.logger.error("create_image id='%s' Exception: %s", server_id, error_text) #TODO insert a last_error at database - self.db_lock.acquire() - self.db.update_rows('images', {'status':image_status, 'progress': 100, 'path':file_dst}, + self.db.update_rows('images', {'status':image_status, 'progress': 100, 'path':file_dst}, {'uuid':req['new_image']['uuid']}, log=True) - self.db_lock.release() - + def edit_iface(self, port_id, old_net, new_net): #This action imply remove and insert interface to put proper parameters if self.test: time.sleep(1) else: #get iface details - self.db_lock.acquire() r,c = self.db.get_table(FROM='ports as p join resources_port as rp on p.uuid=rp.port_id', WHERE={'port_id': port_id}) - self.db_lock.release() if r<0: self.logger.error("edit_iface %s DDBB error: %s", port_id, c) return @@ -2367,7 +2344,7 @@ class host_thread(threading.Thread): if conn is not None: conn.close() -def create_server(server, db, db_lock, only_of_ports): +def create_server(server, db, only_of_ports): extended = server.get('extended', None) requirements={} requirements['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]} @@ -2454,10 +2431,8 @@ def create_server(server, db, db_lock, only_of_ports): if 'hypervisor' in server: requirements['hypervisor'] = server['hypervisor'] #Unikernels extension - db_lock.acquire() result, content = db.get_numas(requirements, server.get('host_id', None), only_of_ports) - db_lock.release() - + if result == -1: return (-1, content) @@ -2468,11 +2443,9 @@ def create_server(server, db, db_lock, only_of_ports): cpu_pinning = [] reserved_threads=[] if requirements['numa']['proc_req_nb']>0: - db_lock.acquire() - result, content = db.get_table(FROM='resources_core', + result, content = db.get_table(FROM='resources_core', SELECT=('id','core_id','thread_id'), WHERE={'numa_id':numa_id,'instance_id': None, 'status':'ok'} ) - db_lock.release() if result <= 0: #print content return -1, content @@ -2556,9 +2529,7 @@ def create_server(server, db, db_lock, only_of_ports): #Get the source pci addresses for the selected numa used_sriov_ports = [] for port in requirements['numa']['sriov_list']: - db_lock.acquire() result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} ) - db_lock.release() if result <= 0: #print content return -1, content @@ -2580,9 +2551,7 @@ def create_server(server, db, db_lock, only_of_ports): port['mac_address'] = port['mac'] del port['mac'] continue - db_lock.acquire() result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac', 'Mbps'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} ) - db_lock.release() if result <= 0: #print content return -1, content @@ -2621,13 +2590,11 @@ def create_server(server, db, db_lock, only_of_ports): for control_iface in server.get('networks', []): control_iface['net_id']=control_iface.pop('uuid') #Get the brifge name - db_lock.acquire() result, content = db.get_table(FROM='nets', SELECT=('name', 'type', 'vlan', 'provider', 'enable_dhcp','dhcp_first_ip', 'dhcp_last_ip', 'cidr', 'gateway_ip', 'dns', 'links', 'routes'), WHERE={'uuid': control_iface['net_id']}) - db_lock.release() - if result < 0: + if result < 0: pass elif result==0: return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface['net_id'] diff --git a/osm_openvim/httpserver.py b/osm_openvim/httpserver.py index 3de9409..0e5bf39 100644 --- a/osm_openvim/httpserver.py +++ b/osm_openvim/httpserver.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openvim # All Rights Reserved. # @@ -653,7 +653,7 @@ def http_post_hosts(): thread = ht.host_thread(name=host.get('name',ip_name), user=user, host=ip_name, password=host.get('password'), keyfile=host.get('keyfile', config_dic["host_ssh_keyfile"]), - db=config_dic['db'], db_lock=config_dic['db_lock'], + db=config_dic['db'], test=host_test_mode, image_path=config_dic['host_image_path'], version=config_dic['version'], host_id=content['uuid'], develop_mode=host_develop_mode, develop_bridge_iface=host_develop_bridge_iface, @@ -1622,7 +1622,7 @@ def http_post_server_id(tenant_id): return #print json.dumps(server, indent=4) - result, content = ht.create_server(server, config_dic['db'], config_dic['db_lock'], config_dic['mode']=='normal') + result, content = ht.create_server(server, config_dic['db'], config_dic['mode']=='normal') if result >= 0: #Insert instance to database diff --git a/osm_openvim/openflow_conn.py b/osm_openvim/openflow_conn.py index f42f4dc..e16145b 100644 --- a/osm_openvim/openflow_conn.py +++ b/osm_openvim/openflow_conn.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openmano # All Rights Reserved. # diff --git a/osm_openvim/openflow_thread.py b/osm_openvim/openflow_thread.py index e41fb6b..a26b48e 100644 --- a/osm_openvim/openflow_thread.py +++ b/osm_openvim/openflow_thread.py @@ -2,7 +2,7 @@ # -*- coding: utf-8 -*- ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openvim # All Rights Reserved. # @@ -93,14 +93,13 @@ class openflow_thread(threading.Thread): """ This thread interacts with a openflow controller to create dataplane connections """ - def __init__(self, of_uuid, of_connector, db, db_lock, of_test, pmp_with_same_vlan=False, logger_name=None, + def __init__(self, of_uuid, of_connector, db, of_test, pmp_with_same_vlan=False, logger_name=None, debug=None): threading.Thread.__init__(self) self.of_uuid = of_uuid self.db = db self.pmp_with_same_vlan = pmp_with_same_vlan self.test = of_test - self.db_lock = db_lock self.OF_connector = of_connector if logger_name: self.logger_name = logger_name @@ -155,9 +154,7 @@ class openflow_thread(threading.Thread): UPDATE={'status':'ACTIVE', 'last_error': None} self.logger.debug("processing task 'update-net' %s: OK", str(task[1])) self.set_openflow_controller_status(OFC_STATUS_ACTIVE) - self.db_lock.acquire() self.db.update_rows('nets', UPDATE, WHERE={'uuid': task[1]}) - self.db_lock.release() elif task[0] == 'clear-all': r,c = self.clear_all_flows() @@ -186,7 +183,6 @@ class openflow_thread(threading.Thread): def update_of_flows(self, net_id): ports=() - self.db_lock.acquire() select_= ('type','admin_state_up', 'vlan', 'provider', 'bind_net','bind_type','uuid') result, nets = self.db.get_table(FROM='nets', SELECT=select_, WHERE={'uuid':net_id} ) #get all the networks binding to this @@ -199,7 +195,6 @@ class openflow_thread(threading.Thread): result, nets = self.db.get_table(FROM='nets', SELECT=select_, WHERE_OR={'bind_net':bind_id, 'uuid':bind_id} ) - self.db_lock.release() if result < 0: return -1, "DB error getting net: " + nets #elif result==0: @@ -211,12 +206,10 @@ class openflow_thread(threading.Thread): if net['admin_state_up'] == 'false': net['ports'] = () else: - self.db_lock.acquire() nb_ports, net_ports = self.db.get_table( FROM='ports', SELECT=('switch_port','vlan','uuid','mac','type','model'), WHERE={'net_id':net_id, 'admin_state_up':'true', 'status':'ACTIVE'} ) - self.db_lock.release() if nb_ports < 0: #print self.name, ": update_of_flows() ERROR getting ports", ports @@ -238,18 +231,14 @@ class openflow_thread(threading.Thread): ifaces_nb += nb_ports # Get the name of flows that will be affected by this NET - self.db_lock.acquire() result, database_net_flows = self.db.get_table(FROM='of_flows', WHERE={'net_id':net_id}) - self.db_lock.release() if result < 0: error_msg = "DB error getting flows from net '{}': {}".format(net_id, database_net_flows) # print self.name, ": update_of_flows() ERROR getting flows from database", database_flows return -1, error_msg database_flows += database_net_flows # Get the name of flows where net_id==NULL that means net deleted (At DB foreign key: On delete set null) - self.db_lock.acquire() result, database_net_flows = self.db.get_table(FROM='of_flows', WHERE={'net_id':None}) - self.db_lock.release() if result < 0: error_msg = "DB error getting flows from net 'null': {}".format(database_net_flows) # print self.name, ": update_of_flows() ERROR getting flows from database", database_flows @@ -355,9 +344,7 @@ class openflow_thread(threading.Thread): except FlowBadFormat as e: # print self.name, ": Error Exception FlowBadFormat '%s'" % str(e), flow return -1, str(e) - self.db_lock.acquire() result, content = self.db.new_row('of_flows', flow) - self.db_lock.release() if result < 0: # print self.name, ": Error '%s' at database insertion" % content, flow return -1, content @@ -384,9 +371,7 @@ class openflow_thread(threading.Thread): continue # delete from database - self.db_lock.acquire() result, content = self.db.delete_row_by_key('of_flows', 'id', flow['id']) - self.db_lock.release() if result<0: self.logger.error("cannot delete flow '%s' from DB: %s", flow['name'], content ) @@ -398,9 +383,7 @@ class openflow_thread(threading.Thread): self.OF_connector.clear_all_flows() # remove from database - self.db_lock.acquire() self.db.delete_row_by_key('of_flows', None, None) #this will delete all lines - self.db_lock.release() return 0, None except openflow_conn.OpenflowconnException as e: return -1, self.logger.error("Error deleting all flows {}", str(e)) @@ -595,9 +578,7 @@ class openflow_thread(threading.Thread): ofc = {} ofc['status'] = status ofc['last_error'] = self._format_error_msg(error_text, 255) - self.db_lock.acquire() result, content = self.db.update_rows('ofcs', ofc, WHERE={'uuid': self.of_uuid}, log=False) - self.db_lock.release() if result >= 0: return True else: diff --git a/osm_openvim/ovim.py b/osm_openvim/ovim.py index e3581b7..612e4df 100755 --- a/osm_openvim/ovim.py +++ b/osm_openvim/ovim.py @@ -2,7 +2,7 @@ # -*- coding: utf-8 -*- ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openvim # All Rights Reserved. # @@ -43,8 +43,8 @@ import openflow_conn __author__ = "Alfonso Tierno, Leonardo Mirabal" __date__ = "$06-Feb-2017 12:07:15$" -__version__ = "0.5.28-r548" -version_date = "Jul 2018" +__version__ = "0.5.29-r549" +version_date = "Sep 2018" database_version = 23 #needed database schema version HTTP_Bad_Request = 400 @@ -189,7 +189,6 @@ class ovim(): self.get_version(), self.get_version_date(), self.get_database_version())) # create database connection for openflow threads self.config["db"] = self._create_database_connection() - self.config["db_lock"] = threading.Lock() self.of_test_mode = False if self.config['mode'] == 'normal' or self.config['mode'] == "OF only" else True @@ -210,7 +209,7 @@ class ovim(): thread = ht.host_thread(name=host['name'], user=host['user'], host=host['ip_name'], db=self.config["db"], password=host['password'], keyfile=host.get('keyfile', self.config["host_ssh_keyfile"]), - db_lock=self.config["db_lock"], test=host_test_mode, + test=host_test_mode, image_path=self.config['host_image_path'], version=self.config['version'], host_id=host['uuid'], develop_mode=host_develop_mode, @@ -268,7 +267,7 @@ class ovim(): dhcp_params = self.config.get("dhcp_server") if dhcp_params: thread = dt.dhcp_thread(dhcp_params=dhcp_params, test=host_test_mode, dhcp_nets=self.config["dhcp_nets"], - db=self.config["db"], db_lock=self.config["db_lock"], + db=self.config["db"], logger_name=self.logger_name + ".dhcp", debug=self.config.get('log_level_of')) thread.start() @@ -325,7 +324,6 @@ class ovim(): """ Start ofc task for existing ofcs in database :param db_of: - :param db_lock: :return: """ ofcs = self.get_of_controllers() @@ -453,7 +451,6 @@ class ovim(): ofc_net_same_vlan = False thread = oft.openflow_thread(ofc_uuid, of_conn, of_test=self.of_test_mode, db=self.config["db"], - db_lock=self.config["db_lock"], pmp_with_same_vlan=ofc_net_same_vlan, logger_name=self.logger_name + ".ofc." + ofc_uuid, debug=self.config.get('log_level_of')) @@ -1436,7 +1433,7 @@ class ovim(): dhcp_host = ht.host_thread(name='openvim_controller', user=ovs_controller_user, host=controller_ip, password=self.config.get('ovs_controller_password'), keyfile=self.config.get('ovs_controller_keyfile'), - db=self.config["db"], db_lock=self.config["db_lock"], test=host_test_mode, + db=self.config["db"], test=host_test_mode, image_path=self.config['host_image_path'], version=self.config['version'], host_id='openvim_controller', develop_mode=host_develop_mode, develop_bridge_iface=bridge_ifaces, diff --git a/osm_openvim/vim_db.py b/osm_openvim/vim_db.py index abeb0fd..a2f1f1f 100644 --- a/osm_openvim/vim_db.py +++ b/osm_openvim/vim_db.py @@ -2,7 +2,7 @@ # -*- coding: utf-8 -*- ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openvim # All Rights Reserved. # @@ -22,15 +22,11 @@ # contact with: nfvlabs@tid.es ## -''' +""" This module interact with the openvim database, -It implements general table management -and complex writings 'transactional' sures, -that is, or all is changed or nothing -''' - -__author__="Alfonso Tierno" -__date__ ="$10-jul-2014 12:07:15$" +It implements general table management and transactional writes, that is, or all is changed or nothing. +It is threading safe using a Lock +""" import MySQLdb as mdb import uuid as myUuid @@ -38,92 +34,127 @@ import auxiliary_functions as af import json import logging from netaddr import IPNetwork, IPAddress +from threading import Lock + +__author__ = "Alfonso Tierno" +__date__ = "$10-jul-2014 12:07:15$" HTTP_Bad_Request = 400 -HTTP_Unauthorized = 401 -HTTP_Not_Found = 404 -HTTP_Method_Not_Allowed = 405 +HTTP_Unauthorized = 401 +HTTP_Not_Found = 404 +HTTP_Method_Not_Allowed = 405 HTTP_Request_Timeout = 408 HTTP_Conflict = 409 -HTTP_Service_Unavailable = 503 -HTTP_Internal_Server_Error = 500 +HTTP_Service_Unavailable = 503 +HTTP_Internal_Server_Error = 500 class vim_db(): - def __init__(self, vlan_range, logger_name= None, debug=None): - '''vlan_range must be a tuple (vlan_ini, vlan_end) with available vlan values for networks + def __init__(self, vlan_range, logger_name=None, debug=None, lock=None): + """vlan_range must be a tuple (vlan_ini, vlan_end) with available vlan values for networks every dataplane network contain a unique value, regardless of it is used or not - ''' - #initialization + """ + # initialization self.net_vlan_range = vlan_range self.vlan_config = {} - self.debug=debug + self.host = None + self.user = None + self.passwd = None + self.database = None + self.con = None + self.cur = None + self.debug = debug + self.lock = lock or Lock() if logger_name: self.logger_name = logger_name else: self.logger_name = 'openvim.db' self.logger = logging.getLogger(self.logger_name) if debug: - self.logger.setLevel( getattr(logging, debug) ) - + self.logger.setLevel(getattr(logging, debug)) def connect(self, host=None, user=None, passwd=None, database=None): - '''Connect to the concrete data base. + """Connect to the concrete data base. The first time a valid host, user, passwd and database must be provided, Following calls can skip this parameters - ''' + """ try: - if host is not None: self.host = host - if user is not None: self.user = user - if passwd is not None: self.passwd = passwd - if database is not None: self.database = database - - self.con = mdb.connect(self.host, self.user, self.passwd, self.database) - self.logger.debug("connected to DB %s at %s@%s", self.database,self.user, self.host) - return 0 + with self.lock, self.con: + if host: + self.host = host + self.con = None + if user: + self.user = user + self.con = None + if passwd: + self.passwd = passwd + self.con = None + if database: + self.database = database + self.con = None + if self.con: + # try to connect + try: + with self.con: + self.cur = self.con.cursor() + cmd = "SELECT version_int,version,openvim_ver FROM schema_version" + self.logger.debug("method connect: " + cmd) + self.cur.execute(cmd) + return 0 + except Exception: + pass + self.con = mdb.connect(self.host, self.user, self.passwd, self.database) + self.logger.debug("connected to DB %s at %s@%s", self.database, self.user, self.host) + return 0 except mdb.Error as e: - self.logger.error("Cannot connect to DB %s at %s@%s Error %d: %s", self.database, self.user, self.host, e.args[0], e.args[1]) + self.logger.error("Cannot connect to DB %s at %s@%s Error %d: %s", self.database, self.user, self.host, + e.args[0], e.args[1]) return -1 def get_db_version(self): - ''' Obtain the database schema version. + """ Obtain the database schema version. Return: (negative, text) if error or version 0.0 where schema_version table is missing (version_int, version_text) if ok - ''' + """ cmd = "SELECT version_int,version,openvim_ver FROM schema_version" - for retry_ in range(0,2): + for retry_ in range(0, 2): try: - with self.con: + with self.lock, self.con: self.cur = self.con.cursor() self.logger.debug(cmd) self.cur.execute(cmd) rows = self.cur.fetchall() - highest_version_int=0 - highest_version="" - #print rows - for row in rows: #look for the latest version - if row[0]>highest_version_int: - highest_version_int, highest_version = row[0:2] - return highest_version_int, highest_version + highest_version_int = 0 + highest_version = "" + # print rows + for row in rows: # look for the latest version + if row[0] > highest_version_int: + highest_version_int, highest_version = row[0:2] + return highest_version_int, highest_version except (mdb.Error, AttributeError) as e: - self.logger.error("get_db_version DB Exception %d: %s. Command %s",e.args[0], e.args[1], cmd) - r,c = self.format_error(e) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c - + self.logger.error("get_db_version DB Exception %d: %s. Command %s", e.args[0], e.args[1], cmd) + r, c = self.format_error(e) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c + def disconnect(self): - '''disconnect from the data base''' + """disconnect from the data base""" try: - self.con.close() - del self.con + with self.lock: + if not self.con: + self.con.close() + self.con = None except mdb.Error as e: - self.logger.error("while disconnecting from DB: Error %d: %s",e.args[0], e.args[1]) + self.logger.error("while disconnecting from DB: Error: %s", str(e)) return -1 - except AttributeError as e: #self.con not defined - if e[0][-5:] == "'con'": return -1, "Database internal error, no connection." - else: raise - + except AttributeError as e: # self.con not defined + if e[0][-5:] == "'con'": + return -1, "Database internal error, no connection." + else: + raise + def format_error(self, e, func, cmd, command=None, extra=None): - '''Creates a text error base on the produced exception + """Creates a text error base on the produced exception Params: e: mdb exception func: name of the function that makes the call, for logging purposes @@ -132,57 +163,61 @@ class vim_db(): extra: extra information to add to some commands Return HTTP error in negative, formatted error text - ''' - - self.logger.error("%s DB Exception %s. Command %s",func, str(e), cmd) + """ + + self.logger.error("%s DB Exception %s. Command %s", func, str(e), cmd) if type(e[0]) is str: if e[0][-5:] == "'con'": return -HTTP_Internal_Server_Error, "DB Exception, no connection." else: return -HTTP_Internal_Server_Error, e.args[1] - if e.args[0]==2006 or e.args[0]==2013 : #MySQL server has gone away (((or))) Exception 2013: Lost connection to MySQL server during query - #reconnect + if e.args[0] == 2006 or e.args[0] == 2013: + # MySQL server has gone away (((or))) Exception 2013: Lost connection to MySQL server during query + # reconnect self.connect() - return -HTTP_Request_Timeout,"Database reconnection. Try Again" - fk=e.args[1].find("foreign key constraint fails") - if fk>=0: - if command=="update": return -HTTP_Bad_Request, "tenant_id %s not found." % extra - elif command=="delete": return -HTTP_Bad_Request, "Resource is not free. There are %s that prevent its deletion." % extra + return -HTTP_Request_Timeout, "Database reconnection. Try Again" + fk = e.args[1].find("foreign key constraint fails") + if fk >= 0: + if command == "update": + return -HTTP_Bad_Request, "tenant_id %s not found." % extra + elif command == "delete": + return -HTTP_Bad_Request, "Resource is not free. There are %s that prevent its deletion." % extra de = e.args[1].find("Duplicate entry") fk = e.args[1].find("for key") uk = e.args[1].find("Unknown column") wc = e.args[1].find("in 'where clause'") fl = e.args[1].find("in 'field list'") - #print de, fk, uk, wc,fl - if de>=0: - if fk>=0: #error 1062 - return -HTTP_Conflict, "Value %s already in use for %s" % (e.args[1][de+15:fk], e.args[1][fk+7:]) - if uk>=0: - if wc>=0: - return -HTTP_Bad_Request, "Field %s cannot be used for filtering" % e.args[1][uk+14:wc] - if fl>=0: - return -HTTP_Bad_Request, "Field %s does not exist" % e.args[1][uk+14:wc] + # print de, fk, uk, wc,fl + if de >= 0: + if fk >= 0: # error 1062 + return -HTTP_Conflict, "Value %s already in use for %s" % (e.args[1][de + 15:fk], e.args[1][fk + 7:]) + if uk >= 0: + if wc >= 0: + return -HTTP_Bad_Request, "Field %s cannot be used for filtering" % e.args[1][uk + 14:wc] + if fl >= 0: + return -HTTP_Bad_Request, "Field %s does not exist" % e.args[1][uk + 14:wc] return -HTTP_Internal_Server_Error, "Database internal Error %d: %s" % (e.args[0], e.args[1]) - def __data2db_format(self, data): - '''convert data to database format. If data is None it return the 'Null' text, - otherwise it return the text surrounded by quotes ensuring internal quotes are escaped''' - if data==None: + @staticmethod + def __data2db_format(data): + """convert data to database format. If data is None it return the 'Null' text, + otherwise it return the text surrounded by quotes ensuring internal quotes are escaped""" + if data is None: return 'Null' - out=str(data) + out = str(data) if "'" not in out: return "'" + out + "'" elif '"' not in out: return '"' + out + '"' else: return json.dumps(out) - + def __get_used_net_vlan(self, region=None): - #get used from database if needed + # get used from database if needed vlan_region = self.vlan_config[region] try: cmd = "SELECT vlan FROM nets WHERE vlan>='{}' and region{} ORDER BY vlan LIMIT 25".format( - vlan_region["lastused"], "='"+region+"'" if region else " is NULL") + vlan_region["lastused"], "='" + region + "'" if region else " is NULL") with self.con: self.cur = self.con.cursor() self.logger.debug(cmd) @@ -194,205 +229,218 @@ class vim_db(): vlan_region["usedlist"].append(k[0]) except (mdb.Error, AttributeError) as e: return self.format_error(e, "get_free_net_vlan", cmd) - + def get_free_net_vlan(self, region=None): - '''obtain a vlan not used in any net''' - if region not in self.vlan_config: - self.vlan_config[region] = { - "usedlist": None, - "lastused": self.net_vlan_range[0] - 1 - } - vlan_region = self.vlan_config[region] + """obtain a vlan not used in any net""" + with self.lock: + if region not in self.vlan_config: + self.vlan_config[region] = { + "usedlist": None, + "lastused": self.net_vlan_range[0] - 1 + } + vlan_region = self.vlan_config[region] + + while True: + self.logger.debug("get_free_net_vlan() region[%s]=%s, net_vlan_range:%s-%s", str(region), + str(vlan_region), str(self.net_vlan_range[0]), str(self.net_vlan_range[1])) + vlan_region["lastused"] += 1 + if vlan_region["lastused"] == self.net_vlan_range[1]: + # start from the begining + vlan_region["lastused"] = self.net_vlan_range[0] + vlan_region["usedlist"] = None + if vlan_region["usedlist"] is None or \ + (len(vlan_region["usedlist"]) == 25 and vlan_region["lastused"] >= vlan_region["usedlist"][-1]): + self.__get_used_net_vlan(region) + self.logger.debug("new net_vlan_usedlist %s", str(vlan_region["usedlist"])) + if vlan_region["lastused"] in vlan_region["usedlist"]: + continue + else: + return vlan_region["lastused"] - while True: - self.logger.debug("get_free_net_vlan() region[{}]={}, net_vlan_range:{}-{} ".format(region, vlan_region, - self.net_vlan_range[0], self.net_vlan_range[1])) - vlan_region["lastused"] += 1 - if vlan_region["lastused"] == self.net_vlan_range[1]: - # start from the begining - vlan_region["lastused"] = self.net_vlan_range[0] - vlan_region["usedlist"] = None - if vlan_region["usedlist"] is None or \ - (len(vlan_region["usedlist"])==25 and vlan_region["lastused"] >= vlan_region["usedlist"][-1]): - self.__get_used_net_vlan(region) - self.logger.debug("new net_vlan_usedlist %s", str(vlan_region["usedlist"])) - if vlan_region["lastused"] in vlan_region["usedlist"]: - continue - else: - return vlan_region["lastused"] - def get_table(self, **sql_dict): - ''' Obtain rows from a table. + """ Obtain rows from a table. Atribure sql_dir: dictionary with the following key: value 'SELECT': [list of fields to retrieve] (by default all) 'FROM': string of table name (Mandatory) 'WHERE': dict of key:values, translated to key=value AND ... (Optional) 'WHERE_NOT': dict of key:values, translated to key!=value AND ... (Optional) 'WHERE_OR': dict of key:values, translated to key=value OR ... (Optional) - 'WHERE_AND_OR: str 'AND' or 'OR'(by default) mark the priority to 'WHERE AND (WHERE_OR)' or (WHERE) OR WHERE_OR' (Optional) + 'WHERE_AND_OR: str 'AND' or 'OR'(by default) mark the priority to 'WHERE AND (WHERE_OR)' or (WHERE) OR + WHERE_OR' (Optional) 'LIMIT': limit of number of rows (Optional) 'DISTINCT': make a select distinct to remove repeated elements Return: a list with dictionarys at each row - ''' - #print sql_dict + """ + # print sql_dict select_ = "SELECT " if sql_dict.get("DISTINCT"): select_ += "DISTINCT " - select_ += ("*" if not sql_dict.get('SELECT') else ",".join(map(str,sql_dict['SELECT'])) ) - #print 'select_', select_ - from_ = "FROM " + str(sql_dict['FROM']) - #print 'from_', from_ - + select_ += ("*" if not sql_dict.get('SELECT') else ",".join(map(str, sql_dict['SELECT']))) + # print 'select_', select_ + from_ = "FROM " + str(sql_dict['FROM']) + # print 'from_', from_ + where_and = None where_or = None w = sql_dict.get('WHERE') if w: - where_and = " AND ".join(map( lambda x: str(x) + (" is Null" if w[x] is None else "='"+str(w[x])+"'"), w.keys()) ) - w = sql_dict.get('WHERE_LIKE') #Unikernels extension -START- + where_and = " AND ".join(map(lambda x: str(x) + (" is Null" if w[x] is None else "='" + str(w[x]) + "'"), + w.keys())) + w = sql_dict.get('WHERE_LIKE') # Unikernels extension -START- if w: - where_and_like = " AND ".join(map( lambda x: str(x) + (" is Null" if w[x] is None else " LIKE '"+str(w[x])+"'"), w.keys()) ) + where_and_like = " AND ".join( + map(lambda x: str(x) + (" is Null" if w[x] is None else " LIKE '" + str(w[x]) + "'"), w.keys())) if where_and: where_and += " AND " + where_and_like else: - where_and = where_and_like #Unikernels extension -END- + where_and = where_and_like # Unikernels extension -END- w = sql_dict.get('WHERE_NOT') if w: - where_and_not = " AND ".join(map( lambda x: str(x) + (" is not Null" if w[x] is None else "!='"+str(w[x])+"'"), w.keys()) ) + where_and_not = " AND ".join( + map(lambda x: str(x) + (" is not Null" if w[x] is None else "!='" + str(w[x]) + "'"), w.keys())) if where_and: where_and += " AND " + where_and_not else: where_and = where_and_not w = sql_dict.get('WHERE_OR') if w: - where_or = " OR ".join(map( lambda x: str(x) + (" is Null" if w[x] is None else "='"+str(w[x])+"'"), w.keys()) ) - - if where_and!=None and where_or!=None: + where_or = " OR ".join(map(lambda x: str(x) + (" is Null" if w[x] is None else "='" + str(w[x]) + "'"), + w.keys())) + + if where_and and where_or: if sql_dict.get("WHERE_AND_OR") == "AND": where_ = "WHERE " + where_and + " AND (" + where_or + ")" else: where_ = "WHERE (" + where_and + ") OR " + where_or - elif where_and!=None and where_or==None: + elif where_and and where_or is None: where_ = "WHERE " + where_and - elif where_and==None and where_or!=None: + elif where_and is None and where_or: where_ = "WHERE " + where_or else: where_ = "" - #print 'where_', where_ + # print 'where_', where_ limit_ = "LIMIT " + str(sql_dict['LIMIT']) if sql_dict.get("LIMIT") else "" - #print 'limit_', limit_ - cmd = " ".join( (select_, from_, where_, limit_) ) - for retry_ in range(0,2): + # print 'limit_', limit_ + cmd = " ".join((select_, from_, where_, limit_)) + for retry_ in range(0, 2): try: - with self.con: + with self.lock, self.con: self.cur = self.con.cursor(mdb.cursors.DictCursor) self.logger.debug(cmd) self.cur.execute(cmd) rows = self.cur.fetchall() return self.cur.rowcount, rows except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "get_table", cmd) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c - + r, c = self.format_error(e, "get_table", cmd) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c + def new_tenant(self, tenant_dict): - ''' Add one row into a table. + """ Add one row into a table. Attribure tenant_dict: dictionary with the key: value to insert It checks presence of uuid and add one automatically otherwise Return: (result, uuid) where result can be 0 if error, or 1 if ok - ''' - for retry_ in range(0,2): - cmd="" - inserted=-1 + """ + for retry_ in range(0, 2): + cmd = "" + inserted = -1 try: - #create uuid if not provided + # create uuid if not provided if 'uuid' not in tenant_dict: - uuid = tenant_dict['uuid'] = str(myUuid.uuid1()) # create_uuid - else: + uuid = tenant_dict['uuid'] = str(myUuid.uuid1()) # create_uuid + else: uuid = str(tenant_dict['uuid']) - #obtain tenant_id for logs + # obtain tenant_id for logs tenant_id = uuid - with self.con: + with self.lock, self.con: self.cur = self.con.cursor() - #inserting new uuid + # inserting new uuid cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','tenants')" % uuid self.logger.debug(cmd) self.cur.execute(cmd) - #insert tenant - cmd= "INSERT INTO tenants (" + \ - ",".join(map(str, tenant_dict.keys() )) + ") VALUES(" + \ - ",".join(map(lambda x: "Null" if x is None else "'"+str(x)+"'",tenant_dict.values() )) + ")" + # insert tenant + cmd = "INSERT INTO tenants (" + \ + ",".join(map(str, tenant_dict.keys())) + ") VALUES(" + \ + ",".join( + map(lambda x: "Null" if x is None else "'" + str(x) + "'", tenant_dict.values())) + ")" self.logger.debug(cmd) self.cur.execute(cmd) inserted = self.cur.rowcount - ##inserting new log - #del tenant_dict['uuid'] # not interested for the log - #cmd = "INSERT INTO logs (related,level,tenant_id,uuid,description) VALUES ('tenants','debug','%s','%s',\"new tenant %s\")" % (uuid, tenant_id, str(tenant_dict)) - #self.logger.debug(cmd) - #self.cur.execute(cmd) - #commit transaction + # #inserting new log + # del tenant_dict['uuid'] # not interested for the log + # cmd = "INSERT INTO logs (related,level,tenant_id,uuid,description) VALUES + # ('tenants','debug','%s','%s',\"new tenant %s\")" % (uuid, tenant_id, str(tenant_dict)) + # self.logger.debug(cmd) + # self.cur.execute(cmd) + # commit transaction self.cur.close() - if inserted == 0: return 0, uuid - with self.con: + if inserted == 0: + return 0, uuid + with self.lock, self.con: self.cur = self.con.cursor() - #adding public flavors - cmd = "INSERT INTO tenants_flavors(flavor_id,tenant_id) SELECT uuid as flavor_id,'"+ tenant_id + "' FROM flavors WHERE public = 'yes'" + # adding public flavors + cmd = "INSERT INTO tenants_flavors(flavor_id,tenant_id) SELECT uuid as flavor_id,'" + tenant_id + \ + "' FROM flavors WHERE public = 'yes'" self.logger.debug(cmd) - self.cur.execute(cmd) + self.cur.execute(cmd) self.logger.debug("attached public flavors: %s", str(self.cur.rowcount)) - #rows = self.cur.fetchall() - #for row in rows: - # cmd = "INSERT INTO tenants_flavors(flavor_id,tenant_id) VALUES('%s','%s')" % (row[0], tenant_id) + # rows = self.cur.fetchall() + # for row in rows: + # cmd = "INSERT INTO tenants_flavors(flavor_id,tenant_id) VALUES('%s','%s')"%(row[0], tenant_id) # self.cur.execute(cmd ) - #adding public images - cmd = "INSERT INTO tenants_images(image_id,tenant_id) SELECT uuid as image_id,'"+ tenant_id + "' FROM images WHERE public = 'yes'" + # adding public images + cmd = "INSERT INTO tenants_images(image_id,tenant_id) SELECT uuid as image_id,'" + tenant_id + \ + "' FROM images WHERE public = 'yes'" self.logger.debug(cmd) - self.cur.execute(cmd) + self.cur.execute(cmd) self.logger.debug("attached public images: %s", str(self.cur.rowcount)) return 1, uuid except (mdb.Error, AttributeError) as e: - if inserted==1: - self.logger.warning("new_tenant DB Exception %d: %s. Command %s",e.args[0], e.args[1], cmd) + if inserted == 1: + self.logger.warning("new_tenant DB Exception %d: %s. Command %s", e.args[0], e.args[1], cmd) return 1, uuid - else: - r,c = self.format_error(e, "new_tenant", cmd) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c + else: + r, c = self.format_error(e, "new_tenant", cmd) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c def new_row(self, table, INSERT, add_uuid=False, log=False): - ''' Add one row into a table. + """ Add one row into a table. Atribure INSERT: dictionary with the key: value to insert table: table where to insert add_uuid: if True, it will crated an uuid key entry at INSERT if not provided It checks presence of uuid and add one automatically otherwise Return: (result, uuid) where result can be 0 if error, or 1 if ok - ''' - for retry_ in range(0,2): - cmd="" + """ + for retry_ in range(0, 2): + cmd = "" try: if add_uuid: - #create uuid if not provided + # create uuid if not provided if 'uuid' not in INSERT: - uuid = INSERT['uuid'] = str(myUuid.uuid1()) # create_uuid - else: + uuid = INSERT['uuid'] = str(myUuid.uuid1()) # create_uuid + else: uuid = str(INSERT['uuid']) else: - uuid=None - with self.con: + uuid = None + with self.lock, self.con: self.cur = self.con.cursor() if add_uuid: - #inserting new uuid + # inserting new uuid cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','%s')" % (uuid, table) self.logger.debug(cmd) self.cur.execute(cmd) - #insertion - cmd= "INSERT INTO " + table +" (" + \ - ",".join(map(str, INSERT.keys() )) + ") VALUES(" + \ - ",".join(map(lambda x: 'Null' if x is None else "'"+str(x)+"'", INSERT.values() )) + ")" + # insertion + cmd = "INSERT INTO " + table + " (" + \ + ",".join(map(str, INSERT.keys())) + ") VALUES(" + \ + ",".join(map(lambda x: 'Null' if x is None else "'" + str(x) + "'", INSERT.values())) + ")" self.logger.debug(cmd) self.cur.execute(cmd) nb_rows = self.cur.rowcount - #inserting new log - #if nb_rows > 0 and log: + # inserting new log + # if nb_rows > 0 and log: # if add_uuid: del INSERT['uuid'] # #obtain tenant_id for logs # if 'tenant_id' in INSERT: @@ -406,115 +454,131 @@ class vim_db(): # else: uuid_k=",uuid"; uuid_v=",'" + str(uuid) + "'" # if tenant_id is None: tenant_k = tenant_v = "" # else: tenant_k=",tenant_id"; tenant_v=",'" + str(tenant_id) + "'" - # cmd = "INSERT INTO logs (related,level%s%s,description) VALUES ('%s','debug'%s%s,\"new %s %s\")" \ + # cmd = "INSERT INTO logs (related,level%s%s,description) VALUES + # ('%s','debug'%s%s,\"new %s %s\")" \ # % (uuid_k, tenant_k, table, uuid_v, tenant_v, table[:-1], str(INSERT)) # self.logger.debug(cmd) # self.cur.execute(cmd) return nb_rows, uuid except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "new_row", cmd) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c - - def __remove_quotes(self, data): - '''remove single quotes ' of any string content of data dictionary''' - for k,v in data.items(): + r, c = self.format_error(e, "new_row", cmd) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c + + @staticmethod + def __remove_quotes(data): + """remove single quotes ' of any string content of data dictionary""" + for k, v in data.items(): if type(v) == str: - if "'" in v: - data[k] = data[k].replace("'","_") - - def _update_rows_internal(self, table, UPDATE, WHERE={}): - cmd= "UPDATE " + table +" SET " + \ - ",".join(map(lambda x: str(x)+'='+ self.__data2db_format(UPDATE[x]), UPDATE.keys() )); + if "'" in v: + data[k] = data[k].replace("'", "_") + + def _update_rows_internal(self, table, UPDATE, WHERE=None): + cmd = "UPDATE " + table + " SET " + \ + ",".join(map(lambda x: str(x) + '=' + self.__data2db_format(UPDATE[x]), UPDATE.keys())) if WHERE: - cmd += " WHERE " + " and ".join(map(lambda x: str(x)+ (' is Null' if WHERE[x] is None else"='"+str(WHERE[x])+"'" ), WHERE.keys() )) + cmd += " WHERE " + " and ".join( + map(lambda x: str(x) + (' is Null' if WHERE[x] is None else "='" + str(WHERE[x]) + "'"), WHERE.keys())) self.logger.debug(cmd) - self.cur.execute(cmd) + self.cur.execute(cmd) nb_rows = self.cur.rowcount return nb_rows, None - def update_rows(self, table, UPDATE, WHERE={}, log=False): - ''' Update one or several rows into a table. + def update_rows(self, table, UPDATE, WHERE=None, log=False): + """ Update one or several rows into a table. Atributes UPDATE: dictionary with the key-new_value pairs to change table: table to be modified WHERE: dictionary to filter target rows, key-value log: if true, a log entry is added at logs table Return: (result, None) where result indicates the number of updated files - ''' - for retry_ in range(0,2): - cmd="" + """ + for retry_ in range(0, 2): + cmd = "" try: - #gettting uuid - uuid = WHERE.get('uuid') + # gettting uuid + uuid = None + if WHERE: + uuid = WHERE.get('uuid') - with self.con: + with self.lock, self.con: self.cur = self.con.cursor() - cmd= "UPDATE " + table +" SET " + \ - ",".join(map(lambda x: str(x)+'='+ self.__data2db_format(UPDATE[x]), UPDATE.keys() )); + cmd = "UPDATE " + table + " SET " + \ + ",".join(map(lambda x: str(x) + '=' + self.__data2db_format(UPDATE[x]), UPDATE.keys())) if WHERE: - cmd += " WHERE " + " and ".join(map(lambda x: str(x)+ (' is Null' if WHERE[x] is None else"='"+str(WHERE[x])+"'" ), WHERE.keys() )) + cmd += " WHERE " + " and ".join( + map(lambda x: str(x) + (' is Null' if WHERE[x] is None else "='" + str(WHERE[x]) + "'"), + WHERE.keys())) self.logger.debug(cmd) - self.cur.execute(cmd) + self.cur.execute(cmd) nb_rows = self.cur.rowcount - #if nb_rows > 0 and log: + # if nb_rows > 0 and log: # #inserting new log # if uuid is None: uuid_k = uuid_v = "" # else: uuid_k=",uuid"; uuid_v=",'" + str(uuid) + "'" - # cmd = "INSERT INTO logs (related,level%s,description) VALUES ('%s','debug'%s,\"updating %d entry %s\")" \ + # cmd = "INSERT INTO logs (related,level%s,description) + # VALUES ('%s','debug'%s,\"updating %d entry %s\")" \ # % (uuid_k, table, uuid_v, nb_rows, (str(UPDATE)).replace('"','-') ) # self.logger.debug(cmd) # self.cur.execute(cmd) return nb_rows, uuid except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "update_rows", cmd) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c - + r, c = self.format_error(e, "update_rows", cmd) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c + def get_host(self, host_id): if af.check_valid_uuid(host_id): - where_filter="uuid='" + host_id + "'" + where_filter = "uuid='" + host_id + "'" else: - where_filter="name='" + host_id + "'" - for retry_ in range(0,2): - cmd="" + where_filter = "name='" + host_id + "'" + for retry_ in range(0, 2): + cmd = "" try: - with self.con: + with self.lock, self.con: self.cur = self.con.cursor(mdb.cursors.DictCursor) - #get HOST - cmd = "SELECT uuid, user, password, keyfile, name, ip_name, description, hypervisors, ranking, admin_state_up, "\ - "DATE_FORMAT(created_at,'%Y-%m-%dT%H:%i:%s') as created_at "\ - "FROM hosts WHERE " + where_filter #Unikernels extension - self.logger.debug(cmd) + # get HOST + cmd = "SELECT uuid, user, password, keyfile, name, ip_name, description, hypervisors, " \ + "ranking, admin_state_up, DATE_FORMAT(created_at,'%Y-%m-%dT%H:%i:%s') as created_at " \ + "FROM hosts WHERE " + where_filter # Unikernels extension + self.logger.debug(cmd) self.cur.execute(cmd) if self.cur.rowcount == 0: - return 0, "host '" + str(host_id) +"'not found." - elif self.cur.rowcount > 1 : - return 0, "host '" + str(host_id) +"' matches more than one result." + return 0, "host '" + str(host_id) + "'not found." + elif self.cur.rowcount > 1: + return 0, "host '" + str(host_id) + "' matches more than one result." host = self.cur.fetchone() host_id = host['uuid'] if host.get("password"): host["password"] = "*****" - #get numa - cmd = "SELECT id, numa_socket, hugepages, memory, admin_state_up FROM numas WHERE host_id = '" + str(host_id) + "'" + # get numa + cmd = "SELECT id, numa_socket, hugepages, memory, admin_state_up FROM numas " \ + "WHERE host_id = '" + str(host_id) + "'" self.logger.debug(cmd) self.cur.execute(cmd) host['numas'] = self.cur.fetchall() for numa in host['numas']: - #print "SELECT core_id, instance_id, status, thread_id, v_thread_id FROM resources_core WHERE numa_id = '" + str(numa['id']) + "'" - #get cores - cmd = "SELECT core_id, instance_id, status, thread_id, v_thread_id FROM resources_core WHERE numa_id = '" + str(numa['id']) + "'" + # print "SELECT core_id, instance_id, status, thread_id, v_thread_id FROM resources_core + # WHERE numa_id = '" + str(numa['id']) + "'" + # get cores + cmd = "SELECT core_id, instance_id, status, thread_id, v_thread_id FROM resources_core " \ + "WHERE numa_id = '" + str(numa['id']) + "'" self.logger.debug(cmd) self.cur.execute(cmd) numa['cores'] = self.cur.fetchall() - for core in numa['cores']: - if core['instance_id'] == None: del core['instance_id'], core['v_thread_id'] - if core['status'] == 'ok': del core['status'] - #get used memory - cmd = "SELECT sum(consumed) as hugepages_consumed FROM resources_mem WHERE numa_id = '" + str(numa['id']) + "' GROUP BY numa_id" + for core in numa['cores']: + if core['instance_id']is None: + del core['instance_id'], core['v_thread_id'] + if core['status'] == 'ok': + del core['status'] + # get used memory + cmd = "SELECT sum(consumed) as hugepages_consumed FROM resources_mem " \ + "WHERE numa_id = '" + str(numa['id']) + "' GROUP BY numa_id" self.logger.debug(cmd) self.cur.execute(cmd) used = self.cur.fetchone() - used_= int(used['hugepages_consumed']) if used != None else 0 + used_ = int(used['hugepages_consumed']) if used is not None else 0 numa['hugepages_consumed'] = used_ # get ports # cmd = "CALL GetPortsFromNuma(%s)'" % str(numa['id']) @@ -523,14 +587,14 @@ class vim_db(): # under Error 2014: Commands out of sync; you can't run this command now # self.cur.close() # self.cur = self.con.cursor(mdb.cursors.DictCursor) - cmd = "SELECT Mbps, pci, status, Mbps_used, instance_id, if(id=root_id,'PF','VF') as type_, "\ - "switch_port, switch_dpid, switch_mac, mac, source_name "\ + cmd = "SELECT Mbps, pci, status, Mbps_used, instance_id, if(id=root_id,'PF','VF') as type_, " \ + "switch_port, switch_dpid, switch_mac, mac, source_name " \ "FROM resources_port WHERE numa_id={} ORDER BY root_id, type_ DESC".format(numa['id']) self.logger.debug(cmd) self.cur.execute(cmd) ifaces = self.cur.fetchall() # The SQL query will ensure to have SRIOV interfaces from a port first - sriovs=[] + sriovs = [] Mpbs_consumed = 0 numa['interfaces'] = [] for iface in ifaces: @@ -540,7 +604,7 @@ class vim_db(): del iface['status'] Mpbs_consumed += int(iface["Mbps_used"]) del iface["Mbps_used"] - if iface["type_"]=='PF': + if iface["type_"] == 'PF': if not iface["switch_dpid"]: del iface["switch_dpid"] if not iface["switch_port"]: @@ -553,9 +617,9 @@ class vim_db(): iface["Mpbs_consumed"] = Mpbs_consumed del iface["type_"] numa['interfaces'].append(iface) - sriovs=[] + sriovs = [] Mpbs_consumed = 0 - else: #VF, SRIOV + else: # VF, SRIOV del iface["switch_port"] del iface["switch_dpid"] del iface["switch_mac"] @@ -563,27 +627,28 @@ class vim_db(): del iface["Mbps"] sriovs.append(iface) - #delete internal field + # delete internal field del numa['id'] return 1, host except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "get_host", cmd) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c - + r, c = self.format_error(e, "get_host", cmd) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c + def new_uuid(self): - max_retries=10 - while max_retries>0: - uuid = str( myUuid.uuid1() ) + max_retries = 10 + while max_retries > 0: + uuid = str(myUuid.uuid1()) if self.check_uuid(uuid)[0] == 0: return uuid - max_retries-=1 + max_retries -= 1 return uuid def check_uuid(self, uuid): - '''check in the database if this uuid is already present''' + """check in the database if this uuid is already present""" try: cmd = "SELECT * FROM uuids where uuid='" + str(uuid) + "'" - with self.con: + with self.lock, self.con: self.cur = self.con.cursor(mdb.cursors.DictCursor) self.logger.debug(cmd) self.cur.execute(cmd) @@ -591,122 +656,131 @@ class vim_db(): return self.cur.rowcount, rows except (mdb.Error, AttributeError) as e: return self.format_error(e, "check_uuid", cmd) - + def __get_next_ids(self): - '''get next auto increment index of all table in the database''' - self.cur.execute("SELECT table_name,AUTO_INCREMENT FROM information_schema.tables WHERE AUTO_INCREMENT IS NOT NULL AND table_schema = DATABASE()") + """get next auto increment index of all table in the database""" + self.cur.execute("SELECT table_name,AUTO_INCREMENT FROM information_schema.tables " + "WHERE AUTO_INCREMENT IS NOT NULL AND table_schema = DATABASE()") rows = self.cur.fetchall() return self.cur.rowcount, dict(rows) - + def edit_host(self, host_id, host_dict): - #get next port index - for retry_ in range(0,2): - cmd="" + # get next port index + for retry_ in range(0, 2): + cmd = "" try: - with self.con: + with self.lock, self.con: self.cur = self.con.cursor() - #update table host - numa_list = host_dict.pop('numas', () ) + # update table host + numa_list = host_dict.pop('numas', ()) if host_dict: self._update_rows_internal("hosts", host_dict, {"uuid": host_id}) - - where = {"host_id": host_id} + + where = {"host_id": host_id} for numa_dict in numa_list: where["numa_socket"] = str(numa_dict.pop('numa_socket')) - interface_list = numa_dict.pop('interfaces', () ) + interface_list = numa_dict.pop('interfaces', ()) if numa_dict: self._update_rows_internal("numas", numa_dict, where) for interface in interface_list: - source_name = str(interface.pop("source_name") ) + source_name = str(interface.pop("source_name")) if interface: - #get interface id from resources_port - cmd= "SELECT rp.id as id FROM resources_port as rp join numas as n on n.id=rp.numa_id join hosts as h on h.uuid=n.host_id " +\ - "WHERE host_id='%s' and rp.source_name='%s'" %(host_id, source_name) + # get interface id from resources_port + cmd = "SELECT rp.id as id " \ + "FROM resources_port as rp join numas as n on n.id=rp.numa_id join " \ + "hosts as h on h.uuid=n.host_id " + \ + "WHERE host_id='{}' and rp.source_name='{}'".format(host_id, source_name) self.logger.debug(cmd) self.cur.execute(cmd) row = self.cur.fetchone() - if self.cur.rowcount<=0: - return -HTTP_Bad_Request, "Interface source_name='%s' from numa_socket='%s' not found" % (source_name, str(where["numa_socket"])) + if self.cur.rowcount <= 0: + return -HTTP_Bad_Request, "Interface source_name='{}s' from numa_socket='{}' " \ + "not found".format(source_name, where["numa_socket"]) interface_id = row[0] self._update_rows_internal("resources_port", interface, {"root_id": interface_id}) return self.get_host(host_id) except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "edit_host", cmd) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c + r, c = self.format_error(e, "edit_host", cmd) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c def new_host(self, host_dict): - #get next port index - for retry_ in range(0,2): - cmd="" + # get next port index + for retry_ in range(0, 2): + cmd = "" try: - with self.con: + with self.lock, self.con: self.cur = self.con.cursor() result, next_ids = self.__get_next_ids() - #print "next_ids: " + str(next_ids) - if result <= 0: return result, "Internal DataBase error getting next id of tables" + # print "next_ids: " + str(next_ids) + if result <= 0: + return result, "Internal DataBase error getting next id of tables" - #create uuid if not provided + # create uuid if not provided if 'uuid' not in host_dict: - uuid = host_dict['uuid'] = str(myUuid.uuid1()) # create_uuid - else: #check uuid is valid + uuid = host_dict['uuid'] = str(myUuid.uuid1()) # create_uuid + else: # check uuid is valid uuid = str(host_dict['uuid']) # result, data = self.check_uuid(uuid) # if (result == 1): # return -1, "UUID '%s' already in use" % uuid - #inserting new uuid + # inserting new uuid cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','hosts')" % uuid self.logger.debug(cmd) result = self.cur.execute(cmd) - #insert in table host + # insert in table host numa_list = host_dict.pop('numas', []) - #get nonhupages and nonisolated cpus - host_dict['RAM']=0 - host_dict['cpus']=0 + # get nonhupages and nonisolated cpus + host_dict['RAM'] = 0 + host_dict['cpus'] = 0 for numa in numa_list: - mem_numa = numa.get('memory', 0) - numa.get('hugepages',0) - if mem_numa>0: - host_dict['RAM'] += mem_numa + mem_numa = numa.get('memory', 0) - numa.get('hugepages', 0) + if mem_numa > 0: + host_dict['RAM'] += mem_numa for core in numa.get("cores", []): - if "status" in core and core["status"]=="noteligible": - host_dict['cpus']+=1 - host_dict['RAM']*=1024 # from GB to MB - - keys = ",".join(host_dict.keys()) - values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", host_dict.values() ) ) + if "status" in core and core["status"] == "noteligible": + host_dict['cpus'] += 1 + host_dict['RAM'] *= 1024 # from GB to MB + + keys = ",".join(host_dict.keys()) + values = ",".join(map(lambda x: "Null" if x is None else "'" + str(x) + "'", host_dict.values())) cmd = "INSERT INTO hosts (" + keys + ") VALUES (" + values + ")" self.logger.debug(cmd) result = self.cur.execute(cmd) - #if result != 1: return -1, "Database Error while inserting at hosts table" + # if result != 1: return -1, "Database Error while inserting at hosts table" - #insert numas + # insert numas nb_numas = nb_cores = nb_ifaces = 0 for numa_dict in numa_list: nb_numas += 1 interface_list = numa_dict.pop('interfaces', []) core_list = numa_dict.pop('cores', []) - numa_dict['id'] = next_ids['numas']; next_ids['numas'] += 1 + numa_dict['id'] = next_ids['numas'] + next_ids['numas'] += 1 numa_dict['host_id'] = uuid - keys = ",".join(numa_dict.keys()) - values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", numa_dict.values() ) ) + keys = ",".join(numa_dict.keys()) + values = ",".join( + map(lambda x: "Null" if x is None else "'" + str(x) + "'", numa_dict.values())) cmd = "INSERT INTO numas (" + keys + ") VALUES (" + values + ")" self.logger.debug(cmd) - result = self.cur.execute(cmd) + self.cur.execute(cmd) - #insert cores + # insert cores for core_dict in core_list: nb_cores += 1 core_dict['numa_id'] = numa_dict['id'] - keys = ",".join(core_dict.keys()) - values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", core_dict.values() ) ) + keys = ",".join(core_dict.keys()) + values = ",".join(map(lambda x: "Null" if x is None else "'" + str(x) + "'", + core_dict.values())) cmd = "INSERT INTO resources_core (" + keys + ") VALUES (" + values + ")" self.logger.debug(cmd) - result = self.cur.execute(cmd) + self.cur.execute(cmd) - #insert ports + # insert ports for port_dict in interface_list: nb_ifaces += 1 sriov_list = port_dict.pop('sriovs', []) @@ -715,13 +789,14 @@ class vim_db(): next_ids['resources_port'] += 1 switch_port = port_dict.get('switch_port', None) switch_dpid = port_dict.get('switch_dpid', None) - keys = ",".join(port_dict.keys()) - values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", port_dict.values() ) ) + keys = ",".join(port_dict.keys()) + values = ",".join(map(lambda x: "Null" if x is None else "'" + str(x) + "'", + port_dict.values())) cmd = "INSERT INTO resources_port (" + keys + ") VALUES (" + values + ")" self.logger.debug(cmd) - result = self.cur.execute(cmd) + self.cur.execute(cmd) - #insert sriovs into port table + # insert sriovs into port table for sriov_dict in sriov_list: sriov_dict['switch_port'] = switch_port sriov_dict['switch_dpid'] = switch_dpid @@ -732,142 +807,150 @@ class vim_db(): if "vlan" in sriov_dict: del sriov_dict["vlan"] next_ids['resources_port'] += 1 - keys = ",".join(sriov_dict.keys()) - values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", sriov_dict.values() ) ) + keys = ",".join(sriov_dict.keys()) + values = ",".join(map(lambda x: "Null" if x is None else "'" + str(x) + "'", + sriov_dict.values())) cmd = "INSERT INTO resources_port (" + keys + ") VALUES (" + values + ")" self.logger.debug(cmd) - result = self.cur.execute(cmd) + self.cur.execute(cmd) - #inserting new log - #cmd = "INSERT INTO logs (related,level,uuid,description) VALUES ('hosts','debug','%s','new host: %d numas, %d theads, %d ifaces')" % (uuid, nb_numas, nb_cores, nb_ifaces) - #self.logger.debug(cmd) - #result = self.cur.execute(cmd) + # inserting new log + # cmd = "INSERT INTO logs (related,level,uuid,description) VALUES ('hosts','debug','%s','new host: + # %d numas, %d theads, %d ifaces')" % (uuid, nb_numas, nb_cores, nb_ifaces) + # self.logger.debug(cmd) + # result = self.cur.execute(cmd) - #inseted ok - with self.con: + # inseted ok + with self.lock, self.con: self.cur = self.con.cursor() self.logger.debug("callproc('UpdateSwitchPort', () )") - self.cur.callproc('UpdateSwitchPort', () ) + self.cur.callproc('UpdateSwitchPort', ()) - self.logger.debug("getting host '%s'",str(host_dict['uuid'])) + self.logger.debug("getting host '%s'", str(host_dict['uuid'])) return self.get_host(host_dict['uuid']) except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "new_host", cmd) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c + r, c = self.format_error(e, "new_host", cmd) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c - def new_flavor(self, flavor_dict, tenant_id ): - '''Add new flavor into the database. Create uuid if not provided + def new_flavor(self, flavor_dict, tenant_id): + """Add new flavor into the database. Create uuid if not provided Atributes flavor_dict: flavor dictionary with the key: value to insert. Must be valid flavors columns tenant_id: if not 'any', it matches this flavor/tenant inserting at tenants_flavors table Return: (result, data) where result can be negative: error at inserting. data contain text 1, inserted, data contain inserted uuid flavor - ''' - for retry_ in range(0,2): - cmd="" + """ + for retry_ in range(0, 2): + cmd = "" try: - with self.con: + with self.lock, self.con: self.cur = self.con.cursor() - #create uuid if not provided + # create uuid if not provided if 'uuid' not in flavor_dict: - uuid = flavor_dict['uuid'] = str(myUuid.uuid1()) # create_uuid - else: #check uuid is valid + uuid = flavor_dict['uuid'] = str(myUuid.uuid1()) # create_uuid + else: # check uuid is valid uuid = str(flavor_dict['uuid']) # result, data = self.check_uuid(uuid) # if (result == 1): # return -1, "UUID '%s' already in use" % uuid - #inserting new uuid + # inserting new uuid cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','flavors')" % uuid self.logger.debug(cmd) self.cur.execute(cmd) - #insert in table flavor - keys = ",".join(flavor_dict.keys()) - values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", flavor_dict.values() ) ) + # insert in table flavor + keys = ",".join(flavor_dict.keys()) + values = ",".join(map(lambda x: "Null" if x is None else "'" + str(x) + "'", flavor_dict.values())) cmd = "INSERT INTO flavors (" + keys + ") VALUES (" + values + ")" self.logger.debug(cmd) self.cur.execute(cmd) - #if result != 1: return -1, "Database Error while inserting at flavors table" + # if result != 1: return -1, "Database Error while inserting at flavors table" - #insert tenants_flavors + # insert tenants_flavors if tenant_id != 'any': cmd = "INSERT INTO tenants_flavors (tenant_id,flavor_id) VALUES ('%s','%s')" % (tenant_id, uuid) self.logger.debug(cmd) self.cur.execute(cmd) - #inserting new log - #del flavor_dict['uuid'] - #if 'extended' in flavor_dict: del flavor_dict['extended'] #remove two many information - #cmd = "INSERT INTO logs (related,level,uuid, tenant_id, description) VALUES ('flavors','debug','%s','%s',\"new flavor: %s\")" \ + # inserting new log + # del flavor_dict['uuid'] + # if 'extended' in flavor_dict: del flavor_dict['extended'] #remove two many information + # cmd = "INSERT INTO logs (related,level,uuid, tenant_id, description) VALUES + # ('flavors','debug','%s','%s',\"new flavor: %s\")" \ # % (uuid, tenant_id, str(flavor_dict)) - #self.logger.debug(cmd) - #self.cur.execute(cmd) + # self.logger.debug(cmd) + # self.cur.execute(cmd) - #inseted ok + # inseted ok return 1, uuid except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "new_flavor", cmd, "update", tenant_id) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c - + r, c = self.format_error(e, "new_flavor", cmd, "update", tenant_id) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c + def new_image(self, image_dict, tenant_id): - '''Add new image into the database. Create uuid if not provided + """Add new image into the database. Create uuid if not provided Atributes image_dict: image dictionary with the key: value to insert. Must be valid images columns tenant_id: if not 'any', it matches this image/tenant inserting at tenants_images table Return: (result, data) where result can be negative: error at inserting. data contain text 1, inserted, data contain inserted uuid image - ''' - for retry_ in range(0,2): - cmd="" + """ + for retry_ in range(0, 2): + cmd = "" try: - with self.con: + with self.lock, self.con: self.cur = self.con.cursor() - #create uuid if not provided + # create uuid if not provided if 'uuid' not in image_dict: - uuid = image_dict['uuid'] = str(myUuid.uuid1()) # create_uuid - else: #check uuid is valid + uuid = image_dict['uuid'] = str(myUuid.uuid1()) # create_uuid + else: # check uuid is valid uuid = str(image_dict['uuid']) # result, data = self.check_uuid(uuid) # if (result == 1): # return -1, "UUID '%s' already in use" % uuid - #inserting new uuid + # inserting new uuid cmd = "INSERT INTO uuids (uuid, used_at) VALUES ('%s','images')" % uuid self.logger.debug(cmd) self.cur.execute(cmd) - #insert in table image - keys = ",".join(image_dict.keys()) - values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", image_dict.values() ) ) + # insert in table image + keys = ",".join(image_dict.keys()) + values = ",".join(map(lambda x: "Null" if x is None else "'" + str(x) + "'", image_dict.values())) cmd = "INSERT INTO images (" + keys + ") VALUES (" + values + ")" self.logger.debug(cmd) self.cur.execute(cmd) - #if result != 1: return -1, "Database Error while inserting at images table" + # if result != 1: return -1, "Database Error while inserting at images table" - #insert tenants_images + # insert tenants_images if tenant_id != 'any': cmd = "INSERT INTO tenants_images (tenant_id,image_id) VALUES ('%s','%s')" % (tenant_id, uuid) self.logger.debug(cmd) self.cur.execute(cmd) - ##inserting new log - #cmd = "INSERT INTO logs (related,level,uuid, tenant_id, description) VALUES ('images','debug','%s','%s',\"new image: %s path: %s\")" % (uuid, tenant_id, image_dict['name'], image_dict['path']) - #self.logger.debug(cmd) - #self.cur.execute(cmd) + # #inserting new log + # cmd = "INSERT INTO logs (related,level,uuid, tenant_id, description) VALUES + # ('images','debug','%s','%s',\"new image: %s path: %s\")" % + # (uuid, tenant_id, image_dict['name'], image_dict['path']) + # self.logger.debug(cmd) + # self.cur.execute(cmd) - #inseted ok + # inseted ok return 1, uuid except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "new_image", cmd, "update", tenant_id) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c - + r, c = self.format_error(e, "new_image", cmd, "update", tenant_id) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c + def delete_image_flavor(self, item_type, item_id, tenant_id): - '''deletes an image or flavor from database + """deletes an image or flavor from database item_type must be a 'image' or 'flavor' item_id is the uuid tenant_id is the asociated tenant, can be 'any' with means all @@ -877,14 +960,14 @@ class vim_db(): that only will success if image is private and not used by other tenants If tenant_id is any, it tries to delete from both tables at the same transaction so that image/flavor is completely deleted from all tenants or nothing - ''' - for retry_ in range(0,2): + """ + for retry_ in range(0, 2): deleted = -1 deleted_item = -1 result = (-HTTP_Internal_Server_Error, "internal error") - cmd="" + cmd = "" try: - with self.con: + with self.lock, self.con: self.cur = self.con.cursor() cmd = "DELETE FROM tenants_%ss WHERE %s_id = '%s'" % (item_type, item_type, item_id) if tenant_id != 'any': @@ -892,125 +975,134 @@ class vim_db(): self.logger.debug(cmd) self.cur.execute(cmd) deleted = self.cur.rowcount - if tenant_id == 'any': #delete from images/flavors in the SAME transaction + if tenant_id == 'any': # delete from images/flavors in the SAME transaction cmd = "DELETE FROM %ss WHERE uuid = '%s'" % (item_type, item_id) self.logger.debug(cmd) self.cur.execute(cmd) deleted = self.cur.rowcount - if deleted>=1: - #delete uuid + if deleted >= 1: + # delete uuid cmd = "DELETE FROM uuids WHERE uuid = '%s'" % item_id self.logger.debug(cmd) self.cur.execute(cmd) - ##inserting new log - #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \ + # #inserting new log + # cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \ # VALUES ('%ss','debug','%s','%s','delete %s completely')" % \ # (item_type, item_id, tenant_id, item_type) - #self.logger.debug(cmd) - #self.cur.execute(cmd) + # self.logger.debug(cmd) + # self.cur.execute(cmd) return deleted, "%s '%s' completely deleted" % (item_type, item_id) return 0, "%s '%s' not found" % (item_type, item_id) - + if deleted == 1: - ##inserting new log - #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \ + # #inserting new log + # cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \ # VALUES ('%ss','debug','%s','%s','delete %s reference for this tenant')" % \ # (item_type, item_id, tenant_id, item_type) - #self.logger.debug(cmd) - #self.cur.execute(cmd) + # self.logger.debug(cmd) + # self.cur.execute(cmd) - #commit transaction + # commit transaction self.cur.close() - #if tenant!=any delete from images/flavors in OTHER transaction. If fails is because dependencies so that not return error - if deleted==1: - with self.con: + # if tenant!=any delete from images/flavors in OTHER transaction. + # If fails is because dependencies so that not return error + if deleted == 1: + with self.lock, self.con: self.cur = self.con.cursor() - #delete image/flavor if not public + # delete image/flavor if not public cmd = "DELETE FROM %ss WHERE uuid = '%s' AND public = 'no'" % (item_type, item_id) self.logger.debug(cmd) self.cur.execute(cmd) deleted_item = self.cur.rowcount if deleted_item == 1: - #delete uuid + # delete uuid cmd = "DELETE FROM uuids WHERE uuid = '%s'" % item_id self.logger.debug(cmd) self.cur.execute(cmd) - ##inserting new log - #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \ + # #inserting new log + # cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) \ # VALUES ('%ss','debug','%s','%s','delete %s completely')" % \ # (item_type, item_id, tenant_id, item_type) - #self.logger.debug(cmd) - #self.cur.execute(cmd) + # self.logger.debug(cmd) + # self.cur.execute(cmd) except (mdb.Error, AttributeError) as e: - #print "delete_%s DB Exception %d: %s" % (item_type, e.args[0], e.args[1]) - if deleted <0: - result = self.format_error(e, "delete_"+item_type, cmd, "delete", "servers") + # print "delete_%s DB Exception %d: %s" % (item_type, e.args[0], e.args[1]) + if deleted < 0: + result = self.format_error(e, "delete_" + item_type, cmd, "delete", "servers") finally: - if deleted==1: - return 1, "%s '%s' from tenant '%s' %sdeleted" % \ - (item_type, item_id, tenant_id, "completely " if deleted_item==1 else "") - elif deleted==0: - return 0, "%s '%s' from tenant '%s' not found" % (item_type, item_id, tenant_id) - else: - if result[0]!=-HTTP_Request_Timeout or retry_==1: return result - + if deleted == 1: + return 1, "{} '{}' from tenant '{}' {}deleted".format(item_type, item_id, tenant_id, + "completely " if deleted_item == 1 else "") + elif deleted == 0: + return 0, "{} '{}' from tenant '{}' not found".format(item_type, item_id, tenant_id) + else: + if result[0] != -HTTP_Request_Timeout or retry_ == 1: + return result + def delete_row(self, table, uuid): - for retry_ in range(0,2): - cmd="" + for retry_ in range(0, 2): + cmd = "" try: - with self.con: - #delete host + with self.lock, self.con: + # delete host self.cur = self.con.cursor() cmd = "DELETE FROM %s WHERE uuid = '%s'" % (table, uuid) self.logger.debug(cmd) self.cur.execute(cmd) deleted = self.cur.rowcount if deleted == 1: - #delete uuid - if table == 'tenants': tenant_str=uuid - else: tenant_str='Null' + # delete uuid + # if table == 'tenants': + # tenant_str = uuid + # else: + # tenant_str = 'Null' self.cur = self.con.cursor() cmd = "DELETE FROM uuids WHERE uuid = '%s'" % uuid self.logger.debug(cmd) self.cur.execute(cmd) - ##inserting new log - #cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) VALUES ('%s','debug','%s','%s','delete %s')" % (table, uuid, tenant_str, table[:-1]) - #self.logger.debug(cmd) - #self.cur.execute(cmd) - return deleted, table[:-1] + " '%s' %s" %(uuid, "deleted" if deleted==1 else "not found") + # #inserting new log + # cmd = "INSERT INTO logs (related,level,uuid,tenant_id,description) VALUES + # ('%s','debug','%s','%s','delete %s')" % (table, uuid, tenant_str, table[:-1]) + # self.logger.debug(cmd) + # self.cur.execute(cmd) + return deleted, table[:-1] + " '%s' %s" % (uuid, "deleted" if deleted == 1 else "not found") except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "delete_row", cmd, "delete", 'instances' if table=='hosts' or table=='tenants' else 'dependencies') - if r!=-HTTP_Request_Timeout or retry_==1: return r,c + r, c = self.format_error(e, "delete_row", cmd, "delete", + 'instances' if table in ('hosts', 'tenants') else 'dependencies') + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c def delete_row_by_key(self, table, key, value): - for retry_ in range(0,2): - cmd="" + for retry_ in range(0, 2): + cmd = "" try: - with self.con: - #delete host + with self.lock, self.con: + # delete host self.cur = self.con.cursor() cmd = "DELETE FROM %s" % (table) - if key!=None: - if value!=None: + if key: + if value: cmd += " WHERE %s = '%s'" % (key, value) else: cmd += " WHERE %s is null" % (key) - else: #delete all + else: # delete all pass self.logger.debug(cmd) self.cur.execute(cmd) deleted = self.cur.rowcount if deleted < 1: return -1, 'Not found' - #delete uuid + # delete uuid return 0, deleted except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "delete_row_by_key", cmd, "delete", 'instances' if table=='hosts' or table=='tenants' else 'dependencies') - if r!=-HTTP_Request_Timeout or retry_==1: return r,c - + r, c = self.format_error(e, "delete_row_by_key", cmd, "delete", + 'instances' if table in ('hosts', 'tenants') else 'dependencies') + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c + def delete_row_by_dict(self, **sql_dict): - ''' Deletes rows from a table. + """ Deletes rows from a table. Attribute sql_dir: dictionary with the following key: value 'FROM': string of table name (Mandatory) 'WHERE': dict of key:values, translated to key=value AND ... (Optional) @@ -1018,102 +1110,120 @@ class vim_db(): 'WHERE_NOTNULL': (list or tuple of items that must not be null in a where ... (Optional) 'LIMIT': limit of number of rows (Optional) Return: the (number of items deleted, descriptive test) if ok; (negative, descriptive text) if error - ''' - #print sql_dict - from_ = "FROM " + str(sql_dict['FROM']) - #print 'from_', from_ + """ + # print sql_dict + from_ = "FROM " + str(sql_dict['FROM']) + # print 'from_', from_ if 'WHERE' in sql_dict and len(sql_dict['WHERE']) > 0: - w=sql_dict['WHERE'] - where_ = "WHERE " + " AND ".join(map( lambda x: str(x) + (" is Null" if w[x] is None else "='"+str(w[x])+"'"), w.keys()) ) - else: where_ = "" - if 'WHERE_NOT' in sql_dict and len(sql_dict['WHERE_NOT']) > 0: - w=sql_dict['WHERE_NOT'] - where_2 = " AND ".join(map( lambda x: str(x) + (" is not Null" if w[x] is None else "<>'"+str(w[x])+"'"), w.keys()) ) - if len(where_)==0: where_ = "WHERE " + where_2 - else: where_ = where_ + " AND " + where_2 - if 'WHERE_NOTNULL' in sql_dict and len(sql_dict['WHERE_NOTNULL']) > 0: - w=sql_dict['WHERE_NOTNULL'] - where_2 = " AND ".join(map( lambda x: str(x) + " is not Null", w) ) - if len(where_)==0: where_ = "WHERE " + where_2 - else: where_ = where_ + " AND " + where_2 - #print 'where_', where_ + w = sql_dict['WHERE'] + where_ = "WHERE " + " AND ".join(map(lambda x: str(x) + (" is Null" if w[x] is None else "='" + str(w[x]) + + "'"), w.keys())) + else: + where_ = "" + if 'WHERE_NOT' in sql_dict and len(sql_dict['WHERE_NOT']) > 0: + w = sql_dict['WHERE_NOT'] + where_2 = " AND ".join(map(lambda x: str(x) + (" is not Null" if w[x] is None else "<>'" + str(w[x]) + "'"), + w.keys())) + if len(where_) == 0: + where_ = "WHERE " + where_2 + else: + where_ = where_ + " AND " + where_2 + if 'WHERE_NOTNULL' in sql_dict and len(sql_dict['WHERE_NOTNULL']) > 0: + w = sql_dict['WHERE_NOTNULL'] + where_2 = " AND ".join(map(lambda x: str(x) + " is not Null", w)) + if len(where_) == 0: + where_ = "WHERE " + where_2 + else: + where_ = where_ + " AND " + where_2 + # print 'where_', where_ limit_ = "LIMIT " + str(sql_dict['LIMIT']) if 'LIMIT' in sql_dict else "" - #print 'limit_', limit_ - cmd = " ".join( ("DELETE", from_, where_, limit_) ) + # print 'limit_', limit_ + cmd = " ".join(("DELETE", from_, where_, limit_)) self.logger.debug(cmd) - for retry_ in range(0,2): + for retry_ in range(0, 2): try: - with self.con: - #delete host + with self.lock, self.con: + # delete host self.cur = self.con.cursor() self.cur.execute(cmd) deleted = self.cur.rowcount - return deleted, "%d deleted from %s" % (deleted, sql_dict['FROM'][:-1] ) + return deleted, "%d deleted from %s" % (deleted, sql_dict['FROM'][:-1]) except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "delete_row_by_dict", cmd, "delete", 'dependencies') - if r!=-HTTP_Request_Timeout or retry_==1: return r,c + r, c = self.format_error(e, "delete_row_by_dict", cmd, "delete", 'dependencies') + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c - def get_instance(self, instance_id): - for retry_ in range(0,2): - cmd="" + for retry_ in range(0, 2): + cmd = "" try: - with self.con: + with self.lock, self.con: self.cur = self.con.cursor(mdb.cursors.DictCursor) - #get INSTANCE - cmd = "SELECT uuid, name, description, progress, host_id, flavor_id, image_id, status, hypervisor, os_image_type, last_error, "\ - "tenant_id, ram, vcpus, created_at FROM instances WHERE uuid='{}'".format(instance_id) #Unikernels extension + # get INSTANCE + cmd = "SELECT uuid, name, description, progress, host_id, flavor_id, image_id, status, " \ + "hypervisor, os_image_type, last_error, tenant_id, ram, vcpus, created_at " \ + "FROM instances WHERE uuid='{}'".format(instance_id) # Unikernels extension self.logger.debug(cmd) self.cur.execute(cmd) - if self.cur.rowcount == 0 : return 0, "instance '" + str(instance_id) +"'not found." + if self.cur.rowcount == 0: + return 0, "instance '" + str(instance_id) + "'not found." instance = self.cur.fetchone() - #get networks - cmd = "SELECT uuid as iface_id, net_id, mac as mac_address, ip_address, name, Mbps as bandwidth, "\ - "vpci, model FROM ports WHERE (type='instance:bridge' or type='instance:ovs') AND "\ - "instance_id= '{}'".format(instance_id) + # get networks + cmd = "SELECT uuid as iface_id, net_id, mac as mac_address, ip_address, name, Mbps as bandwidth, " \ + "vpci, model FROM ports WHERE (type='instance:bridge' or type='instance:ovs') AND " \ + "instance_id= '{}'".format(instance_id) self.logger.debug(cmd) self.cur.execute(cmd) - if self.cur.rowcount > 0 : + if self.cur.rowcount > 0: instance['networks'] = self.cur.fetchall() - #get extended + # get extended extended = {} - #get devices - cmd = "SELECT type, vpci, image_id, xml, dev, image_size FROM instance_devices WHERE instance_id = '%s' " % str(instance_id) + # get devices + cmd = "SELECT type,vpci,image_id,xml,dev,image_size FROM instance_devices " \ + "WHERE instance_id = '%s' " % str(instance_id) self.logger.debug(cmd) self.cur.execute(cmd) - if self.cur.rowcount > 0 : + if self.cur.rowcount > 0: extended['devices'] = self.cur.fetchall() - #get numas + # get numas numas = [] - cmd = "SELECT id, numa_socket as source FROM numas WHERE host_id = '" + str(instance['host_id']) + "'" + cmd = "SELECT id, numa_socket as source FROM numas WHERE host_id = '{}'".format(instance['host_id']) self.logger.debug(cmd) self.cur.execute(cmd) host_numas = self.cur.fetchall() - #print 'host_numas', host_numas + # print 'host_numas', host_numas for k in host_numas: numa_id = str(k['id']) - numa_dict ={} - #get memory - cmd = "SELECT consumed FROM resources_mem WHERE instance_id = '%s' AND numa_id = '%s'" % ( instance_id, numa_id) + numa_dict = {} + # get memory + cmd = "SELECT consumed FROM resources_mem WHERE instance_id = '{}' AND numa_id = '{}'".foramt( + instance_id, numa_id) self.logger.debug(cmd) self.cur.execute(cmd) if self.cur.rowcount > 0: mem_dict = self.cur.fetchone() numa_dict['memory'] = mem_dict['consumed'] - #get full cores + # get full cores cursor2 = self.con.cursor() - cmd = "SELECT core_id, paired, MIN(v_thread_id) as v1, MAX(v_thread_id) as v2, COUNT(instance_id) as nb, MIN(thread_id) as t1, MAX(thread_id) as t2 FROM resources_core WHERE instance_id = '%s' AND numa_id = '%s' GROUP BY core_id,paired" % ( str(instance_id), numa_id) + cmd = "SELECT core_id, paired, MIN(v_thread_id) as v1, MAX(v_thread_id) as v2, " \ + "COUNT(instance_id) as nb, MIN(thread_id) as t1, MAX(thread_id) as t2 " \ + "FROM resources_core " \ + "WHERE instance_id = '{}' AND numa_id = '{}' GROUP BY core_id,paired".format(instance_id, + numa_id) self.logger.debug(cmd) cursor2.execute(cmd) - core_list = []; core_source = [] - paired_list = []; paired_source = [] - thread_list = []; thread_source = [] - if cursor2.rowcount > 0: + core_list = [] + core_source = [] + paired_list = [] + paired_source = [] + thread_list = [] + thread_source = [] + if cursor2.rowcount > 0: cores = cursor2.fetchall() for core in cores: - if core[4] == 2: #number of used threads from core - if core[3] == core[2]: #only one thread asigned to VM, so completely core + if core[4] == 2: # number of used threads from core + if core[3] == core[2]: # only one thread asigned to VM, so completely core core_list.append(core[2]) core_source.append(core[5]) elif core[1] == 'Y': @@ -1139,31 +1249,36 @@ class vim_db(): numa_dict['threads-id'] = thread_list numa_dict['threads-source'] = thread_source - #get dedicated ports and SRIOV - cmd = "SELECT port_id as iface_id, p.vlan as vlan, p.mac as mac_address, net_id, if(model='PF',\ - 'yes',if(model='VF','no','yes:sriov')) as dedicated, p.Mbps as bandwidth, name, vpci, \ - pci as source \ - FROM resources_port as rp join ports as p on port_id=uuid WHERE p.instance_id = '%s' AND numa_id = '%s' and p.type='instance:data'" % (instance_id, numa_id) + # get dedicated ports and SRIOV + cmd = "SELECT port_id as iface_id, p.vlan as vlan, p.mac as mac_address, net_id, " \ + "if(model='PF','yes',if(model='VF','no','yes:sriov')) as dedicated, p.Mbps as bandwidth" \ + ", name, vpci, pci as source " \ + "FROM resources_port as rp join ports as p on port_id=uuid " \ + "WHERE p.instance_id = '{}' AND numa_id = '{}' and " \ + "p.type='instance:data'".format(instance_id, numa_id) self.logger.debug(cmd) self.cur.execute(cmd) - if self.cur.rowcount > 0: + if self.cur.rowcount > 0: numa_dict['interfaces'] = self.cur.fetchall() - #print 'interfaces', numa_dict + # print 'interfaces', numa_dict - if len(numa_dict) > 0 : - numa_dict['source'] = k['source'] #numa socket + if len(numa_dict) > 0: + numa_dict['source'] = k['source'] # numa socket numas.append(numa_dict) - if len(numas) > 0 : extended['numas'] = numas - if len(extended) > 0 : instance['extended'] = extended + if len(numas) > 0: + extended['numas'] = numas + if len(extended) > 0: + instance['extended'] = extended af.DeleteNone(instance) return 1, instance except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "get_instance", cmd) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c - + r, c = self.format_error(e, "get_instance", cmd) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c + def get_numas(self, requirements, prefered_host_id=None, only_of_ports=True): - '''Obtain a valid NUMA/HOST for deployment a VM + """Obtain a valid NUMA/HOST for deployment a VM requirements: contain requirement regarding: requirements['ram']: Non huge page memory in MB; 0 to skip requirements['vcpus']: Non isolated cpus; 0 to skip @@ -1177,83 +1292,88 @@ class vim_db(): only_of_ports: if True only those ports conected to the openflow (of) are valid, that is, with switch_port information filled; if False, all NIC ports are valid. Return a valid numa and host - ''' - - for retry_ in range(0,2): - cmd="" + """ + + for retry_ in range(0, 2): + cmd = "" try: - with self.con: -# #Find numas of prefered host -# prefered_numas = () -# if prefered_host_id != None: -# self.cur = self.con.cursor() -# self.cur.execute("SELECT id FROM numas WHERE host_id='%s'" + prefered_host_id) -# prefered_numas = self.cur.fetchall() -# self.cur.close() - - #Find valid host for the ram and vcpus + with self.lock, self.con: + # #Find numas of prefered host + # prefered_numas = () + # if prefered_host_id is not None: + # self.cur = self.con.cursor() + # self.cur.execute("SELECT id FROM numas WHERE host_id='%s'" + prefered_host_id) + # prefered_numas = self.cur.fetchall() + # self.cur.close() + + # Find valid host for the ram and vcpus self.cur = self.con.cursor(mdb.cursors.DictCursor) cmd = "CALL GetHostByMemCpu(%s, %s)" % (str(requirements['ram']), str(requirements['vcpus'])) - self.logger.debug(cmd) - self.cur.callproc('GetHostByMemCpu', (str(requirements['ram']), str(requirements['vcpus'])) ) + self.logger.debug(cmd) + self.cur.callproc('GetHostByMemCpu', (str(requirements['ram']), str(requirements['vcpus']))) valid_hosts = self.cur.fetchall() - self.cur.close() + self.cur.close() self.cur = self.con.cursor() match_found = False - if len(valid_hosts)<=0: - error_text = 'No room at data center. Cannot find a host with %s MB memory and %s cpus available' % (str(requirements['ram']), str(requirements['vcpus'])) - #self.logger.debug(error_text) + if len(valid_hosts) <= 0: + error_text = 'No room at data center. Cannot find a host with %s MB memory and %s cpus ' \ + 'available' % (str(requirements['ram']), str(requirements['vcpus'])) + # self.logger.debug(error_text) return -1, error_text - - if not 'hypervisor' in requirements: #Unikernels extension -END- + + if 'hypervisor' not in requirements: # Unikernels extension -END- requirements['hypervisor'] = "kvm" for valid_host in valid_hosts: - if not 'hypervisors' in valid_host: + if 'hypervisors' not in valid_host: valid_host['hypervisors'] = "kvm" - valid_hosts = tuple(valid_host for valid_host in valid_hosts if requirements['hypervisor'] in valid_host['hypervisors'].split(",")) + valid_hosts = tuple(valid_host for valid_host in valid_hosts if + requirements['hypervisor'] in valid_host['hypervisors'].split(",")) - if len(valid_hosts)<=0: - error_text = 'No room at data center. Cannot find a host with %s hypervisor or not have enough resources available' % (str(requirements['hypervisor'])) - #self.logger.debug(error_text) - return -1, error_text #Unikernels extension -END- + if len(valid_hosts) <= 0: + error_text = 'No room at data center. Cannot find a host with %s hypervisor or not have ' \ + 'enough resources available' % (str(requirements['hypervisor'])) + # self.logger.debug(error_text) + return -1, error_text # Unikernels extension -END- - #elif req_numa != None: - #Find valid numa nodes for memory requirements + # elif req_numa is not None: + # Find valid numa nodes for memory requirements self.cur = self.con.cursor(mdb.cursors.DictCursor) cmd = "CALL GetNumaByMemory(%s)" % str(requirements['numa']['memory']) - self.logger.debug(cmd) - self.cur.callproc('GetNumaByMemory', (requirements['numa']['memory'],) ) + self.logger.debug(cmd) + self.cur.callproc('GetNumaByMemory', (requirements['numa']['memory'],)) valid_for_memory = self.cur.fetchall() - self.cur.close() + self.cur.close() self.cur = self.con.cursor() - if len(valid_for_memory)<=0: - error_text = 'No room at data center. Cannot find a host with %s GB Hugepages memory available' % str(requirements['numa']['memory']) - #self.logger.debug(error_text) + if len(valid_for_memory) <= 0: + error_text = 'No room at data center. Cannot find a host with %s GB Hugepages memory' \ + ' available' % str(requirements['numa']['memory']) + # self.logger.debug(error_text) return -1, error_text - #Find valid numa nodes for processor requirements + # Find valid numa nodes for processor requirements self.cur = self.con.cursor(mdb.cursors.DictCursor) if requirements['numa']['proc_req_type'] == 'threads': - cpu_requirement_text='cpu-threads' + cpu_requirement_text = 'cpu-threads' cmd = "CALL GetNumaByThread(%s)" % str(requirements['numa']['proc_req_nb']) - self.logger.debug(cmd) - self.cur.callproc('GetNumaByThread', (requirements['numa']['proc_req_nb'],) ) + self.logger.debug(cmd) + self.cur.callproc('GetNumaByThread', (requirements['numa']['proc_req_nb'],)) else: - cpu_requirement_text='cpu-cores' + cpu_requirement_text = 'cpu-cores' cmd = "CALL GetNumaByCore(%s)" % str(requirements['numa']['proc_req_nb']) - self.logger.debug(cmd) - self.cur.callproc('GetNumaByCore', (requirements['numa']['proc_req_nb'],) ) + self.logger.debug(cmd) + self.cur.callproc('GetNumaByCore', (requirements['numa']['proc_req_nb'],)) valid_for_processor = self.cur.fetchall() - self.cur.close() + self.cur.close() self.cur = self.con.cursor() - if len(valid_for_processor)<=0: - error_text = 'No room at data center. Cannot find a host with %s %s available' % (str(requirements['numa']['proc_req_nb']),cpu_requirement_text) - #self.logger.debug(error_text) + if len(valid_for_processor) <= 0: + error_text = 'No room at data center. Cannot find a host with %s %s available' % ( + str(requirements['numa']['proc_req_nb']), cpu_requirement_text) + # self.logger.debug(error_text) return -1, error_text - #Find the numa nodes that comply for memory and processor requirements - #sorting from less to more memory capacity + # Find the numa nodes that comply for memory and processor requirements + # sorting from less to more memory capacity valid_numas = [] for m_numa in valid_for_memory: numa_valid_for_processor = False @@ -1274,171 +1394,176 @@ class vim_db(): valid_numas.insert(0, m_numa['numa_id']) else: valid_numas.append(m_numa['numa_id']) - if len(valid_numas)<=0: - error_text = 'No room at data center. Cannot find a host with %s MB hugepages memory and %s %s available in the same numa' %\ - (requirements['numa']['memory'], str(requirements['numa']['proc_req_nb']),cpu_requirement_text) - #self.logger.debug(error_text) + if len(valid_numas) <= 0: + error_text = "No room at data center. Cannot find a host with {} MB hugepages memory and {} " \ + "{} available in the same numa".format(requirements['numa']['memory'], + requirements['numa']['proc_req_nb'], + cpu_requirement_text) + # self.logger.debug(error_text) return -1, error_text - - # print 'Valid numas list: '+str(valid_numas) - #Find valid numa nodes for interfaces requirements - #For each valid numa we will obtain the number of available ports and check if these are valid - match_found = False + # print 'Valid numas list: '+str(valid_numas) + + # Find valid numa nodes for interfaces requirements + # For each valid numa we will obtain the number of available ports and check if these are valid + match_found = False for numa_id in valid_numas: - # print 'Checking '+str(numa_id) + # print 'Checking '+str(numa_id) match_found = False self.cur = self.con.cursor(mdb.cursors.DictCursor) if only_of_ports: - cmd="CALL GetAvailablePorts(%s)" % str(numa_id) + cmd = "CALL GetAvailablePorts(%s)" % str(numa_id) self.logger.debug(cmd) - self.cur.callproc('GetAvailablePorts', (numa_id,) ) + self.cur.callproc('GetAvailablePorts', (numa_id,)) else: - cmd="CALL GetAllAvailablePorts(%s)" % str(numa_id) + cmd = "CALL GetAllAvailablePorts(%s)" % str(numa_id) self.logger.debug(cmd) - self.cur.callproc('GetAllAvailablePorts', (numa_id,) ) + self.cur.callproc('GetAllAvailablePorts', (numa_id,)) available_ports = self.cur.fetchall() - self.cur.close() + self.cur.close() self.cur = self.con.cursor() - #Set/reset reservations + # Set/reset reservations for port in available_ports: port['Mbps_reserved'] = 0 port['SRIOV_reserved'] = 0 - #Try to allocate physical ports + # Try to allocate physical ports physical_ports_found = True for iface in requirements['numa']['port_list']: - # print '\t\tchecking iface: '+str(iface) + # print '\t\tchecking iface: '+str(iface) portFound = False for port in available_ports: - # print '\t\t\tfor port: '+str(port) - #If the port is not empty continue + # print '\t\t\tfor port: '+str(port) + # If the port is not empty continue if port['Mbps_free'] != port['Mbps'] or port['Mbps_reserved'] != 0: - # print '\t\t\t\t Not empty port' - continue; - #If the port speed is not enough continue + # print '\t\t\t\t Not empty port' + continue + # If the port speed is not enough continue if port['Mbps'] < iface['bandwidth']: - # print '\t\t\t\t Not enough speed' - continue; + # print '\t\t\t\t Not enough speed' + continue - #Otherwise this is a valid port + # Otherwise this is a valid port port['Mbps_reserved'] = port['Mbps'] port['SRIOV_reserved'] = 0 iface['port_id'] = port['port_id'] iface['vlan'] = None iface['mac'] = port['mac'] iface['switch_port'] = port['switch_port'] - # print '\t\t\t\t Dedicated port found '+str(port['port_id']) + # print '\t\t\t\t Dedicated port found '+str(port['port_id']) portFound = True - break; + break - #if all ports have been checked and no match has been found - #this is not a valid numa + # if all ports have been checked and no match has been found + # this is not a valid numa if not portFound: - # print '\t\t\t\t\tAll ports have been checked and no match has been found for numa '+str(numa_id)+'\n\n' + # print '\t\t\t\t\tAll ports have been checked and no match has been found for + # numa '+str(numa_id)+'\n\n' physical_ports_found = False break - #if there is no match continue checking the following numa + # if there is no match continue checking the following numa if not physical_ports_found: continue - #Try to allocate SR-IOVs + # Try to allocate SR-IOVs sriov_ports_found = True for iface in requirements['numa']['sriov_list']: - # print '\t\tchecking iface: '+str(iface) + # print '\t\tchecking iface: '+str(iface) portFound = False for port in available_ports: - # print '\t\t\tfor port: '+str(port) - #If there are not available SR-IOVs continue + # print '\t\t\tfor port: '+str(port) + # If there are not available SR-IOVs continue if port['availableSRIOV'] - port['SRIOV_reserved'] <= 0: - # print '\t\t\t\t Not enough SR-IOV' - continue; - #If the port free speed is not enough continue + # print '\t\t\t\t Not enough SR-IOV' + continue + # If the port free speed is not enough continue if port['Mbps_free'] - port['Mbps_reserved'] < iface['bandwidth']: - # print '\t\t\t\t Not enough speed' - continue; + # print '\t\t\t\t Not enough speed' + continue - #Otherwise this is a valid port + # Otherwise this is a valid port port['Mbps_reserved'] += iface['bandwidth'] port['SRIOV_reserved'] += 1 - # print '\t\t\t\t SR-IOV found '+str(port['port_id']) + # print '\t\t\t\t SR-IOV found '+str(port['port_id']) iface['port_id'] = port['port_id'] iface['vlan'] = None iface['mac'] = port['mac'] iface['switch_port'] = port['switch_port'] portFound = True - break; + break - #if all ports have been checked and no match has been found - #this is not a valid numa + # if all ports have been checked and no match has been found + # this is not a valid numa if not portFound: - # print '\t\t\t\t\tAll ports have been checked and no match has been found for numa '+str(numa_id)+'\n\n' + # print '\t\t\t\t\tAll ports have been checked and no match has been found for numa + # '+str(numa_id)+'\n\n' sriov_ports_found = False break - #if there is no match continue checking the following numa + # if there is no match continue checking the following numa if not sriov_ports_found: continue - if sriov_ports_found and physical_ports_found: match_found = True break if not match_found: - error_text = 'No room at data center. Cannot find a host with the required hugepages, vcpus and interfaces' - #self.logger.debug(error_text) + error_text = 'No room at data center. Cannot find a host with the required hugepages, vcpus ' \ + 'and interfaces' + # self.logger.debug(error_text) return -1, error_text - #self.logger.debug('Full match found in numa %s', str(numa_id)) + # self.logger.debug('Full match found in numa %s', str(numa_id)) for numa in valid_for_processor: - if numa_id==numa['numa_id']: - host_id=numa['host_id'] + if numa_id == numa['numa_id']: + host_id = numa['host_id'] break - return 0, {'numa_id':numa_id, 'host_id': host_id, } + return 0, {'numa_id': numa_id, 'host_id': host_id} except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "get_numas", cmd) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c + r, c = self.format_error(e, "get_numas", cmd) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c def new_instance(self, instance_dict, nets, ports_to_free): - for retry_ in range(0,2): - cmd="" + for retry_ in range(0, 2): + cmd = "" try: - with self.con: + with self.lock, self.con: self.cur = self.con.cursor() - #create uuid if not provided + # create uuid if not provided if 'uuid' not in instance_dict: - uuid = instance_dict['uuid'] = str(myUuid.uuid1()) # create_uuid - else: #check uuid is valid + uuid = instance_dict['uuid'] = str(myUuid.uuid1()) # create_uuid + else: # check uuid is valid uuid = str(instance_dict['uuid']) - - #inserting new uuid + # inserting new uuid cmd = "INSERT INTO uuids (uuid, root_uuid, used_at) VALUES ('%s','%s', 'instances')" % (uuid, uuid) self.logger.debug(cmd) self.cur.execute(cmd) - #insert in table instance - extended = instance_dict.pop('extended', None); - bridgedifaces = instance_dict.pop('bridged-ifaces', () ); + # insert in table instance + extended = instance_dict.pop('extended', None) + bridgedifaces = instance_dict.pop('bridged-ifaces', ()) - keys = ",".join(instance_dict.keys()) - values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", instance_dict.values() ) ) + keys = ",".join(instance_dict.keys()) + values = ",".join( + map(lambda x: "Null" if x is None else "'" + str(x) + "'", instance_dict.values())) cmd = "INSERT INTO instances (" + keys + ") VALUES (" + values + ")" self.logger.debug(cmd) self.cur.execute(cmd) - #if result != 1: return -1, "Database Error while inserting at instances table" + # if result != 1: return -1, "Database Error while inserting at instances table" - #insert resources + # insert resources nb_bridge_ifaces = nb_cores = nb_ifaces = nb_numas = 0 - #insert bridged_ifaces + # insert bridged_ifaces for iface in bridgedifaces: - #generate and insert a iface uuid + # generate and insert a iface uuid if 'enable_dhcp' in iface and iface['enable_dhcp']: dhcp_first_ip = iface["dhcp_first_ip"] del iface["dhcp_first_ip"] @@ -1453,7 +1578,7 @@ class vim_db(): iface["ip_address"] = None else: iface["ip_address"] = self.get_free_ip_from_range(dhcp_first_ip, dhcp_last_ip, - dhcp_cidr, used_dhcp_ips) + dhcp_cidr, used_dhcp_ips) if 'links' in iface: del iface['links'] if 'dns' in iface: @@ -1461,116 +1586,145 @@ class vim_db(): if 'routes' in iface: del iface['routes'] - iface['uuid'] = str(myUuid.uuid1()) # create_uuid - cmd = "INSERT INTO uuids (uuid, root_uuid, used_at) VALUES ('%s','%s', 'ports')" % (iface['uuid'], uuid) + iface['uuid'] = str(myUuid.uuid1()) # create_uuid + cmd = "INSERT INTO uuids (uuid, root_uuid, used_at) VALUES ('%s','%s', 'ports')" % ( + iface['uuid'], uuid) self.logger.debug(cmd) self.cur.execute(cmd) - #insert iface + # insert iface iface['instance_id'] = uuid # iface['type'] = 'instance:bridge' - if 'name' not in iface: iface['name']="br"+str(nb_bridge_ifaces) - iface['Mbps']=iface.pop('bandwidth', None) + if 'name' not in iface: + iface['name'] = "br" + str(nb_bridge_ifaces) + iface['Mbps'] = iface.pop('bandwidth', None) if 'mac_address' not in iface: iface['mac'] = af.gen_random_mac() else: iface['mac'] = iface['mac_address'] del iface['mac_address'] - #iface['mac']=iface.pop('mac_address', None) #for leaving mac generation to libvirt - keys = ",".join(iface.keys()) - values = ",".join( map(lambda x: "Null" if x is None else "'"+str(x)+"'", iface.values() ) ) + # iface['mac']=iface.pop('mac_address', None) #for leaving mac generation to libvirt + keys = ",".join(iface.keys()) + values = ",".join(map(lambda x: "Null" if x is None else "'" + str(x) + "'", iface.values())) cmd = "INSERT INTO ports (" + keys + ") VALUES (" + values + ")" self.logger.debug(cmd) self.cur.execute(cmd) nb_bridge_ifaces += 1 if extended is not None: - if 'numas' not in extended or extended['numas'] is None: extended['numas'] = () + if 'numas' not in extended or extended['numas'] is None: + extended['numas'] = () for numa in extended['numas']: nb_numas += 1 - #cores - if 'cores' not in numa or numa['cores'] is None: numa['cores'] = () + # cores + if 'cores' not in numa or numa['cores'] is None: + numa['cores'] = () for core in numa['cores']: nb_cores += 1 cmd = "UPDATE resources_core SET instance_id='%s'%s%s WHERE id='%s'" \ - % (uuid, \ - (",v_thread_id='" + str(core['vthread']) + "'") if 'vthread' in core else '', \ - (",paired='" + core['paired'] + "'") if 'paired' in core else '', \ - core['id'] ) + % (uuid, + (",v_thread_id='" + str(core['vthread']) + "'") if 'vthread' in core else '', + (",paired='" + core['paired'] + "'") if 'paired' in core else '', core['id']) self.logger.debug(cmd) self.cur.execute(cmd) - #interfaces - if 'interfaces' not in numa or numa['interfaces'] is None: numa['interfaces'] = () + # interfaces + if 'interfaces' not in numa or numa['interfaces'] is None: + numa['interfaces'] = () for iface in numa['interfaces']: - #generate and insert an uuid; iface[id]=iface_uuid; iface[uuid]= net_id - iface['id'] = str(myUuid.uuid1()) # create_uuid - cmd = "INSERT INTO uuids (uuid, root_uuid, used_at) VALUES ('%s','%s', 'ports')" % (iface['id'], uuid) + # generate and insert an uuid; iface[id]=iface_uuid; iface[uuid]= net_id + iface['id'] = str(myUuid.uuid1()) # create_uuid + cmd = "INSERT INTO uuids (uuid, root_uuid, used_at) VALUES " \ + "('{}','{}', 'ports')".format(iface['id'], uuid) self.logger.debug(cmd) self.cur.execute(cmd) nb_ifaces += 1 - mbps_=("'"+str(iface['Mbps_used'])+"'") if 'Mbps_used' in iface and iface['Mbps_used'] is not None else "Mbps" - if iface["dedicated"]=="yes": - iface_model="PF" - elif iface["dedicated"]=="yes:sriov": - iface_model="VFnotShared" - elif iface["dedicated"]=="no": - iface_model="VF" - #else error - INSERT=(iface['mac_address'], iface['switch_port'], iface.get('vlan',None), 'instance:data', iface['Mbps_used'], iface['id'], - uuid, instance_dict['tenant_id'], iface.get('name',None), iface.get('vpci',None), iface.get('uuid',None), iface_model ) - cmd = "INSERT INTO ports (mac,switch_port,vlan,type,Mbps,uuid,instance_id,tenant_id,name,vpci,net_id, model) " + \ - " VALUES (" + ",".join(map(lambda x: 'Null' if x is None else "'"+str(x)+"'", INSERT )) + ")" + mbps_ = ("'" + str(iface['Mbps_used']) + "'") if 'Mbps_used' in iface and \ + iface['Mbps_used'] is not None \ + else "Mbps" + if iface["dedicated"] == "yes": + iface_model = "PF" + elif iface["dedicated"] == "yes:sriov": + iface_model = "VFnotShared" + elif iface["dedicated"] == "no": + iface_model = "VF" + # else error + INSERT = (iface['mac_address'], iface['switch_port'], iface.get('vlan'), + 'instance:data', iface['Mbps_used'], iface['id'], uuid, + instance_dict['tenant_id'], iface.get('name'), iface.get('vpci'), + iface.get('uuid'), iface_model) + cmd = "INSERT INTO ports (mac,switch_port,vlan,type,Mbps,uuid,instance_id,tenant_id," \ + "name,vpci,net_id, model) VALUES (" + \ + ",".join(map(lambda x: 'Null' if x is None else "'" + str(x) + "'", INSERT)) + ")" self.logger.debug(cmd) self.cur.execute(cmd) if 'uuid' in iface: nets.append(iface['uuid']) - - #discover if this port is not used by anyone - cmd = "SELECT source_name, mac FROM ( SELECT root_id, count(instance_id) as used FROM resources_port" \ - " WHERE root_id=(SELECT root_id from resources_port WHERE id='%s')"\ - " GROUP BY root_id ) AS A JOIN resources_port as B ON A.root_id=B.id AND A.used=0" % iface['port_id'] + + # discover if this port is not used by anyone + cmd = "SELECT source_name, mac " \ + "FROM ( SELECT root_id, count(instance_id) as used FROM resources_port" \ + " WHERE root_id=(SELECT root_id from resources_port WHERE id='%s')" \ + " GROUP BY root_id ) AS A JOIN resources_port as B ON " \ + "A.root_id=B.id AND A.used=0" % iface['port_id'] self.logger.debug(cmd) self.cur.execute(cmd) ports_to_free += self.cur.fetchall() - cmd = "UPDATE resources_port SET instance_id='%s', port_id='%s',Mbps_used=%s WHERE id='%s'" \ - % (uuid, iface['id'], mbps_, iface['port_id']) - #if Mbps_used not suply, set the same value of 'Mpbs', that is the total + cmd = "UPDATE resources_port SET instance_id='%s', port_id='%s',Mbps_used=%s " \ + "WHERE id='%s'" % (uuid, iface['id'], mbps_, iface['port_id']) + # if Mbps_used not suply, set the same value of 'Mpbs', that is the total self.logger.debug(cmd) self.cur.execute(cmd) - #memory - if 'memory' in numa and numa['memory'] is not None and numa['memory']>0: - cmd = "INSERT INTO resources_mem (numa_id, instance_id, consumed) VALUES ('%s','%s','%s')" % (numa['numa_id'], uuid, numa['memory']) + # memory + if 'memory' in numa and numa['memory'] is not None and numa['memory'] > 0: + cmd = "INSERT INTO resources_mem (numa_id, instance_id, consumed) VALUES " \ + "('%s','%s','%s')" % (numa['numa_id'], uuid, numa['memory']) self.logger.debug(cmd) self.cur.execute(cmd) - if 'devices' not in extended or extended['devices'] is None: extended['devices'] = () + if 'devices' not in extended or extended['devices'] is None: + extended['devices'] = () for device in extended['devices']: - if 'vpci' in device: vpci = "'" + device['vpci'] + "'" - else: vpci = 'Null' - if 'image_id' in device: image_id = "'" + device['image_id'] + "'" - else: image_id = 'Null' - if 'xml' in device: xml = "'" + device['xml'] + "'" - else: xml = 'Null' - if 'dev' in device: dev = "'" + device['dev'] + "'" - else: dev = 'Null' - if 'image_size' in device: size = device['image_size'] - else: size = 0 - cmd = "INSERT INTO instance_devices (type, instance_id, image_id, vpci, xml, dev, image_size) VALUES ('%s','%s', %s, %s, %s, %s, %s)" % \ - (device['type'], uuid, image_id, vpci, xml, dev, str(size)) + if 'vpci' in device: + vpci = "'" + device['vpci'] + "'" + else: + vpci = 'Null' + if 'image_id' in device: + image_id = "'" + device['image_id'] + "'" + else: + image_id = 'Null' + if 'xml' in device: + xml = "'" + device['xml'] + "'" + else: + xml = 'Null' + if 'dev' in device: + dev = "'" + device['dev'] + "'" + else: + dev = 'Null' + if 'image_size' in device: + size = device['image_size'] + else: + size = 0 + cmd = "INSERT INTO instance_devices (type,instance_id,image_id,vpci,xml,dev,image_size) " \ + "VALUES ('%s','%s', %s, %s, %s, %s, %s)" % \ + (device['type'], uuid, image_id, vpci, xml, dev, str(size)) self.logger.debug(cmd) self.cur.execute(cmd) - ##inserting new log - #cmd = "INSERT INTO logs (related,level,uuid,description) VALUES ('instances','debug','%s','new instance: %d numas, %d theads, %d ifaces %d bridge_ifaces')" % (uuid, nb_numas, nb_cores, nb_ifaces, nb_bridge_ifaces) - #self.logger.debug(cmd) - #self.cur.execute(cmd) - - #inseted ok - return 1, uuid + # #inserting new log + # cmd = "INSERT INTO logs (related,level,uuid,description) VALUES ('instances','debug','%s', + # 'new instance: %d numas, %d theads, %d ifaces %d bridge_ifaces')" % + # (uuid, nb_numas, nb_cores, nb_ifaces, nb_bridge_ifaces) + # self.logger.debug(cmd) + # self.cur.execute(cmd) + # + # inseted ok + return 1, uuid except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "new_instance", cmd) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c + r, c = self.format_error(e, "new_instance", cmd) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c - def get_free_ip_from_range(self, first_ip, last_ip, cidr, ip_used_list): + @staticmethod + def get_free_ip_from_range(first_ip, last_ip, cidr, ip_used_list): """ Calculate a free IP from a range given :param first_ip: First dhcp ip range @@ -1599,7 +1753,7 @@ class vim_db(): :param net_id: :return: """ - WHERE={'type': 'instance:ovs', 'net_id': net_id} + WHERE = {'type': 'instance:ovs', 'net_id': net_id} for retry_ in range(0, 2): cmd = "" self.cur = self.con.cursor(mdb.cursors.DictCursor) @@ -1622,23 +1776,26 @@ class vim_db(): return ip_address_list - - def delete_instance(self, instance_id, tenant_id, net_dataplane_list, ports_to_free, net_ovs_list, logcause="requested by http"): - for retry_ in range(0,2): - cmd="" + def delete_instance(self, instance_id, tenant_id, net_dataplane_list, ports_to_free, net_ovs_list, + logcause="requested by http"): + for retry_ in range(0, 2): + cmd = "" try: - with self.con: + with self.lock, self.con: self.cur = self.con.cursor() - #get INSTANCE + # get INSTANCE cmd = "SELECT uuid FROM instances WHERE uuid='%s' AND tenant_id='%s'" % (instance_id, tenant_id) self.logger.debug(cmd) self.cur.execute(cmd) - if self.cur.rowcount == 0 : return 0, "instance %s not found in tenant %s" % (instance_id, tenant_id) + if self.cur.rowcount == 0: + return 0, "instance %s not found in tenant %s" % (instance_id, tenant_id) - #delete bridged ifaces, instace_devices, resources_mem; done by database: it is automatic by Database; FOREIGN KEY DELETE CASCADE - - #get nets afected - cmd = "SELECT DISTINCT net_id from ports WHERE instance_id = '%s' AND net_id is not Null AND type='instance:data'" % instance_id + # delete bridged ifaces, instace_devices, resources_mem; done by database: it is automatic by + # Database; FOREIGN KEY DELETE CASCADE + + # get nets afected + cmd = "SELECT DISTINCT net_id from ports WHERE instance_id = '%s' AND net_id is not Null AND " \ + "type='instance:data'" % instance_id self.logger.debug(cmd) self.cur.execute(cmd) net_list__ = self.cur.fetchall() @@ -1646,93 +1803,102 @@ class vim_db(): net_dataplane_list.append(net[0]) # get ovs manangement nets - cmd = "SELECT DISTINCT net_id, vlan, ip_address, mac FROM ports WHERE instance_id='{}' AND net_id is not Null AND "\ - "type='instance:ovs'".format(instance_id) + cmd = "SELECT DISTINCT net_id, vlan, ip_address, mac FROM ports WHERE instance_id='{}' AND " \ + "net_id is not Null AND type='instance:ovs'".format(instance_id) self.logger.debug(cmd) self.cur.execute(cmd) net_ovs_list += self.cur.fetchall() - #get dataplane interfaces releases by this VM; both PF and VF with no other VF - cmd="SELECT source_name, mac FROM (SELECT root_id, count(instance_id) as used FROM resources_port WHERE instance_id='%s' GROUP BY root_id ) AS A" % instance_id \ - + " JOIN (SELECT root_id, count(instance_id) as used FROM resources_port GROUP BY root_id) AS B ON A.root_id=B.root_id AND A.used=B.used"\ - + " JOIN resources_port as C ON A.root_id=C.id" -# cmd = "SELECT DISTINCT root_id FROM resources_port WHERE instance_id = '%s'" % instance_id + # get dataplane interfaces releases by this VM; both PF and VF with no other VF + cmd = "SELECT source_name, mac FROM (SELECT root_id, count(instance_id) as used " \ + "FROM resources_port WHERE instance_id='%s' GROUP BY root_id ) AS A" % instance_id \ + + " JOIN (SELECT root_id, count(instance_id) as used FROM resources_port GROUP BY root_id) " \ + "AS B ON A.root_id=B.root_id AND A.used=B.used JOIN resources_port as C ON A.root_id=C.id" + # cmd = "SELECT DISTINCT root_id FROM resources_port WHERE instance_id = '%s'" % instance_id self.logger.debug(cmd) self.cur.execute(cmd) ports_to_free += self.cur.fetchall() - #update resources port - cmd = "UPDATE resources_port SET instance_id=Null, port_id=Null, Mbps_used='0' WHERE instance_id = '%s'" % instance_id + # update resources port + cmd = "UPDATE resources_port SET instance_id=Null, port_id=Null, Mbps_used='0' " \ + "WHERE instance_id = '%s'" % instance_id self.logger.debug(cmd) self.cur.execute(cmd) - -# #filter dataplane ports used by this VM that now are free -# for port in ports_list__: -# cmd = "SELECT mac, count(instance_id) FROM resources_port WHERE root_id = '%s'" % port[0] -# self.logger.debug(cmd) -# self.cur.execute(cmd) -# mac_list__ = self.cur.fetchone() -# if mac_list__ and mac_list__[1]==0: -# ports_to_free.append(mac_list__[0]) - - - #update resources core - cmd = "UPDATE resources_core SET instance_id=Null, v_thread_id=Null, paired='N' WHERE instance_id = '%s'" % instance_id + + # #filter dataplane ports used by this VM that now are free + # for port in ports_list__: + # cmd = "SELECT mac, count(instance_id) FROM resources_port WHERE root_id = '%s'" % port[0] + # self.logger.debug(cmd) + # self.cur.execute(cmd) + # mac_list__ = self.cur.fetchone() + # if mac_list__ and mac_list__[1]==0: + # ports_to_free.append(mac_list__[0]) + + # update resources core + cmd = "UPDATE resources_core SET instance_id=Null, v_thread_id=Null, paired='N' " \ + "WHERE instance_id = '%s'" % instance_id self.logger.debug(cmd) self.cur.execute(cmd) - #delete all related uuids + # delete all related uuids cmd = "DELETE FROM uuids WHERE root_uuid='%s'" % instance_id self.logger.debug(cmd) self.cur.execute(cmd) - ##insert log - #cmd = "INSERT INTO logs (related,level,uuid,description) VALUES ('instances','debug','%s','delete instance %s')" % (instance_id, logcause) - #self.logger.debug(cmd) - #self.cur.execute(cmd) + # #insert log + # cmd = "INSERT INTO logs (related,level,uuid,description) VALUES + # ('instances','debug','%s','delete instance %s')" % (instance_id, logcause) + # self.logger.debug(cmd) + # self.cur.execute(cmd) - #delete instance + # delete instance cmd = "DELETE FROM instances WHERE uuid='%s' AND tenant_id='%s'" % (instance_id, tenant_id) self.cur.execute(cmd) return 1, "instance %s from tenant %s DELETED" % (instance_id, tenant_id) except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "delete_instance", cmd) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c + r, c = self.format_error(e, "delete_instance", cmd) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c def get_ports(self, WHERE): - ''' Obtain ports using the WHERE filtering. + """ Obtain ports using the WHERE filtering. Attributes: 'where_': dict of key:values, translated to key=value AND ... (Optional) Return: a list with dictionarys at each row - ''' - for retry_ in range(0,2): - cmd="" + """ + for retry_ in range(0, 2): + cmd = "" try: - with self.con: + with self.lock, self.con: self.cur = self.con.cursor(mdb.cursors.DictCursor) select_ = "SELECT uuid,'ACTIVE' as status,admin_state_up,name,net_id,\ tenant_id,type,mac,vlan,switch_port,instance_id,Mbps FROM ports " - if WHERE is None or len(WHERE) == 0: where_ = "" + if WHERE is None or len(WHERE) == 0: + where_ = "" else: - where_ = "WHERE " + " AND ".join(map( lambda x: str(x) + (" is Null" if WHERE[x] is None else "='"+str(WHERE[x])+"'"), WHERE.keys()) ) + where_ = "WHERE " + " AND ".join( + map(lambda x: str(x) + (" is Null" if WHERE[x] is None else "='" + str(WHERE[x]) + "'"), + WHERE.keys())) limit_ = "LIMIT 100" - cmd = " ".join( (select_, where_, limit_) ) - # print "SELECT multiple de instance_ifaces, iface_uuid, external_ports" #print cmd + cmd = " ".join((select_, where_, limit_)) + # print "SELECT multiple de instance_ifaces, iface_uuid, external_ports" #print cmd self.logger.debug(cmd) self.cur.execute(cmd) ports = self.cur.fetchall() - if self.cur.rowcount>0: af.DeleteNone(ports) + if self.cur.rowcount > 0: + af.DeleteNone(ports) return self.cur.rowcount, ports - # return self.get_table(FROM=from_, SELECT=select_,WHERE=where_,LIMIT=100) + # return self.get_table(FROM=from_, SELECT=select_,WHERE=where_,LIMIT=100) except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "get_ports", cmd) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c - + r, c = self.format_error(e, "get_ports", cmd) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c + def check_target_net(self, net_id, tenant_id, port_type): - '''check if valid attachement of a port into a target net + """check if valid attachement of a port into a target net Attributes: net_id: target net uuid tenant_id: client where tenant belongs. Not used in this version @@ -1740,42 +1906,45 @@ class vim_db(): Return: (0,net_dict) if ok, where net_dict contain 'uuid','type','vlan', ... (negative,string-error) if error - ''' - for retry_ in range(0,2): - cmd="" + """ + for retry_ in range(0, 2): + cmd = "" try: - with self.con: + with self.lock, self.con: self.cur = self.con.cursor(mdb.cursors.DictCursor) cmd = "SELECT * FROM nets WHERE uuid='%s'" % net_id self.logger.debug(cmd) self.cur.execute(cmd) - if self.cur.rowcount == 0 : return -1, "network_id %s does not match any net" % net_id + if self.cur.rowcount == 0: + return -1, "network_id %s does not match any net" % net_id net = self.cur.fetchone() break except (mdb.Error, AttributeError) as e: - r,c = self.format_error(e, "check_target_net", cmd) - if r!=-HTTP_Request_Timeout or retry_==1: return r,c - #check permissions + r, c = self.format_error(e, "check_target_net", cmd) + if r != -HTTP_Request_Timeout or retry_ == 1: + return r, c + # check permissions if tenant_id is not None and tenant_id is not "admin": - if net['tenant_id']==tenant_id and net['shared']=='false': + if net['tenant_id'] == tenant_id and net['shared'] == 'false': return -1, "needed admin privileges to attach to the net %s" % net_id - #check types - if (net['type'] in ('ptp','data') and port_type not in ('instance:data','external')) or \ - (net['type'] in ('bridge_data','bridge_man') and port_type not in ('instance:bridge', 'instance:ovs')): + # check types + if (net['type'] in ('ptp', 'data') and port_type not in ('instance:data', 'external')) or \ + (net['type'] in ('bridge_data', 'bridge_man') and port_type not in ('instance:bridge', 'instance:ovs')): return -1, "Cannot attach a port of type %s into a net of type %s" % (port_type, net['type']) if net['type'] == 'ptp': - #look how many - nb_ports, data = self.get_ports( {'net_id':net_id} ) - if nb_ports<0: + # look how many + nb_ports, data = self.get_ports({'net_id': net_id}) + if nb_ports < 0: return -1, data else: if net['provider']: - nb_ports +=1 - if nb_ports >=2: + nb_ports += 1 + if nb_ports >= 2: return -1, "net of type p2p already contain two ports attached. No room for another" - + return 0, net + if __name__ == "__main__": - print "Hello World" + print("Hello World") diff --git a/osm_openvim/vim_schema.py b/osm_openvim/vim_schema.py index 87ab3e2..59225ac 100644 --- a/osm_openvim/vim_schema.py +++ b/osm_openvim/vim_schema.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openvim # All Rights Reserved. # diff --git a/test/test_openvim.py b/test/test_openvim.py index b209a57..db88105 100755 --- a/test/test_openvim.py +++ b/test/test_openvim.py @@ -3,7 +3,7 @@ ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U. # This file is part of openvim # All Rights Reserved. # -- 2.25.1