1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openvim
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
28 __author__
= "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__
= "$10-jul-2014 12:07:15$"
37 from jsonschema
import validate
as js_v
, exceptions
as js_e
40 from vim_schema
import localinfo_schema
, hostinfo_schema
45 # from logging import Logger
46 # import auxiliary_functions as af
48 # TODO: insert a logging system
52 # lvirt_module=None #libvirt module is charged only if not in test mode
54 class host_thread(threading
.Thread
):
57 def __init__(self
, name
, host
, user
, db
, db_lock
, test
, image_path
, host_id
, version
, develop_mode
,
58 develop_bridge_iface
):
63 'host','user': host ip or name to manage and user
64 'db', 'db_lock': database class and lock to use it in exclusion
66 threading
.Thread
.__init
__(self
)
71 self
.db_lock
= db_lock
74 if not test
and not host_thread
.lvirt_module
:
76 module_info
= imp
.find_module("libvirt")
77 host_thread
.lvirt_module
= imp
.load_module("libvirt", *module_info
)
78 except (IOError, ImportError) as e
:
79 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e
))
82 self
.develop_mode
= develop_mode
83 self
.develop_bridge_iface
= develop_bridge_iface
84 self
.image_path
= image_path
85 self
.host_id
= host_id
86 self
.version
= version
91 self
.server_status
= {} #dictionary with pairs server_uuid:server_status
92 self
.pending_terminate_server
=[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
93 self
.next_update_server_status
= 0 #time when must be check servers status
97 self
.queueLock
= threading
.Lock()
98 self
.taskQueue
= Queue
.Queue(2000)
101 def ssh_connect(self
):
104 self
.ssh_conn
= paramiko
.SSHClient()
105 self
.ssh_conn
.set_missing_host_key_policy(paramiko
.AutoAddPolicy())
106 self
.ssh_conn
.load_system_host_keys()
107 self
.ssh_conn
.connect(self
.host
, username
=self
.user
, timeout
=10) #, None)
108 except paramiko
.ssh_exception
.SSHException
as e
:
110 print self
.name
, ": ssh_connect ssh Exception:", text
112 def load_localinfo(self
):
118 command
= 'mkdir -p ' + self
.image_path
119 #print self.name, ': command:', command
120 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
121 content
= stderr
.read()
123 print self
.name
, ': command:', command
, "stderr:", content
125 command
= 'cat ' + self
.image_path
+ '/.openvim.yaml'
126 #print self.name, ': command:', command
127 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
128 content
= stdout
.read()
129 if len(content
) == 0:
130 print self
.name
, ': command:', command
, "stderr:", stderr
.read()
131 raise paramiko
.ssh_exception
.SSHException("Error empty file ")
132 self
.localinfo
= yaml
.load(content
)
133 js_v(self
.localinfo
, localinfo_schema
)
134 self
.localinfo_dirty
=False
135 if 'server_files' not in self
.localinfo
:
136 self
.localinfo
['server_files'] = {}
137 print self
.name
, ': localinfo load from host'
140 except paramiko
.ssh_exception
.SSHException
as e
:
142 print self
.name
, ": load_localinfo ssh Exception:", text
143 except host_thread
.lvirt_module
.libvirtError
as e
:
144 text
= e
.get_error_message()
145 print self
.name
, ": load_localinfo libvirt Exception:", text
146 except yaml
.YAMLError
as exc
:
148 if hasattr(exc
, 'problem_mark'):
149 mark
= exc
.problem_mark
150 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
151 print self
.name
, ": load_localinfo yaml format Exception", text
152 except js_e
.ValidationError
as e
:
154 if len(e
.path
)>0: text
=" at '" + ":".join(map(str, e
.path
))+"'"
155 print self
.name
, ": load_localinfo format Exception:", text
, e
.message
156 except Exception as e
:
158 print self
.name
, ": load_localinfo Exception:", text
160 #not loaded, insert a default data and force saving by activating dirty flag
161 self
.localinfo
= {'files':{}, 'server_files':{} }
162 #self.localinfo_dirty=True
163 self
.localinfo_dirty
=False
165 def load_hostinfo(self
):
173 command
= 'cat ' + self
.image_path
+ '/hostinfo.yaml'
174 #print self.name, ': command:', command
175 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
176 content
= stdout
.read()
177 if len(content
) == 0:
178 print self
.name
, ': command:', command
, "stderr:", stderr
.read()
179 raise paramiko
.ssh_exception
.SSHException("Error empty file ")
180 self
.hostinfo
= yaml
.load(content
)
181 js_v(self
.hostinfo
, hostinfo_schema
)
182 print self
.name
, ': hostlinfo load from host', self
.hostinfo
185 except paramiko
.ssh_exception
.SSHException
as e
:
187 print self
.name
, ": load_hostinfo ssh Exception:", text
188 except host_thread
.lvirt_module
.libvirtError
as e
:
189 text
= e
.get_error_message()
190 print self
.name
, ": load_hostinfo libvirt Exception:", text
191 except yaml
.YAMLError
as exc
:
193 if hasattr(exc
, 'problem_mark'):
194 mark
= exc
.problem_mark
195 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
196 print self
.name
, ": load_hostinfo yaml format Exception", text
197 except js_e
.ValidationError
as e
:
199 if len(e
.path
)>0: text
=" at '" + ":".join(map(str, e
.path
))+"'"
200 print self
.name
, ": load_hostinfo format Exception:", text
, e
.message
201 except Exception as e
:
203 print self
.name
, ": load_hostinfo Exception:", text
205 #not loaded, insert a default data
208 def save_localinfo(self
, tries
=3):
210 self
.localinfo_dirty
= False
217 command
= 'cat > ' + self
.image_path
+ '/.openvim.yaml'
218 print self
.name
, ': command:', command
219 (stdin
, _
, _
) = self
.ssh_conn
.exec_command(command
)
220 yaml
.safe_dump(self
.localinfo
, stdin
, explicit_start
=True, indent
=4, default_flow_style
=False, tags
=False, encoding
='utf-8', allow_unicode
=True)
221 self
.localinfo_dirty
= False
224 except paramiko
.ssh_exception
.SSHException
as e
:
226 print self
.name
, ": save_localinfo ssh Exception:", text
227 if "SSH session not active" in text
:
229 except host_thread
.lvirt_module
.libvirtError
as e
:
230 text
= e
.get_error_message()
231 print self
.name
, ": save_localinfo libvirt Exception:", text
232 except yaml
.YAMLError
as exc
:
234 if hasattr(exc
, 'problem_mark'):
235 mark
= exc
.problem_mark
236 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
237 print self
.name
, ": save_localinfo yaml format Exception", text
238 except Exception as e
:
240 print self
.name
, ": save_localinfo Exception:", text
242 def load_servers_from_db(self
):
243 self
.db_lock
.acquire()
244 r
,c
= self
.db
.get_table(SELECT
=('uuid','status', 'image_id'), FROM
='instances', WHERE
={'host_id': self
.host_id
})
245 self
.db_lock
.release()
247 self
.server_status
= {}
249 print self
.name
, ": Error getting data from database:", c
252 self
.server_status
[ server
['uuid'] ] = server
['status']
254 #convert from old version to new one
255 if 'inc_files' in self
.localinfo
and server
['uuid'] in self
.localinfo
['inc_files']:
256 server_files_dict
= {'source file': self
.localinfo
['inc_files'][ server
['uuid'] ] [0], 'file format':'raw' }
257 if server_files_dict
['source file'][-5:] == 'qcow2':
258 server_files_dict
['file format'] = 'qcow2'
260 self
.localinfo
['server_files'][ server
['uuid'] ] = { server
['image_id'] : server_files_dict
}
261 if 'inc_files' in self
.localinfo
:
262 del self
.localinfo
['inc_files']
263 self
.localinfo_dirty
= True
265 def delete_unused_files(self
):
266 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
267 Deletes unused entries at self.loacalinfo and the corresponding local files.
268 The only reason for this mismatch is the manual deletion of instances (VM) at database
272 for uuid
,images
in self
.localinfo
['server_files'].items():
273 if uuid
not in self
.server_status
:
274 for localfile
in images
.values():
276 print self
.name
, ": deleting file '%s' of unused server '%s'" %(localfile
['source file'], uuid
)
277 self
.delete_file(localfile
['source file'])
278 except paramiko
.ssh_exception
.SSHException
as e
:
279 print self
.name
, ": Exception deleting file '%s': %s" %(localfile
['source file'], str(e
))
280 del self
.localinfo
['server_files'][uuid
]
281 self
.localinfo_dirty
= True
283 def insert_task(self
, task
, *aditional
):
285 self
.queueLock
.acquire()
286 task
= self
.taskQueue
.put( (task
,) + aditional
, timeout
=5)
287 self
.queueLock
.release()
290 return -1, "timeout inserting a task over host " + self
.name
294 self
.load_localinfo()
296 self
.load_servers_from_db()
297 self
.delete_unused_files()
299 self
.queueLock
.acquire()
300 if not self
.taskQueue
.empty():
301 task
= self
.taskQueue
.get()
304 self
.queueLock
.release()
308 if self
.localinfo_dirty
:
309 self
.save_localinfo()
310 elif self
.next_update_server_status
< now
:
311 self
.update_servers_status()
312 self
.next_update_server_status
= now
+ 5
313 elif len(self
.pending_terminate_server
)>0 and self
.pending_terminate_server
[0][0]<now
:
314 self
.server_forceoff()
319 if task
[0] == 'instance':
320 print self
.name
, ": processing task instance", task
[1]['action']
324 r
=self
.action_on_server(task
[1], retry
==2)
327 elif task
[0] == 'image':
329 elif task
[0] == 'exit':
330 print self
.name
, ": processing task exit"
333 elif task
[0] == 'reload':
334 print self
.name
, ": processing task reload terminating and relaunching"
337 elif task
[0] == 'edit-iface':
338 print self
.name
, ": processing task edit-iface port=%s, old_net=%s, new_net=%s" % (task
[1], task
[2], task
[3])
339 self
.edit_iface(task
[1], task
[2], task
[3])
340 elif task
[0] == 'restore-iface':
341 print self
.name
, ": processing task restore-iface %s mac=%s" % (task
[1], task
[2])
342 self
.restore_iface(task
[1], task
[2])
343 elif task
[0] == 'new-ovsbridge':
344 print self
.name
, ": Creating compute OVS bridge"
345 self
.create_ovs_bridge()
347 elif task
[0] == 'new-vxlan':
348 print self
.name
, ": Creating vxlan tunnel=" + task
[1] + ", remote ip=" + task
[2]
349 self
.create_ovs_vxlan_tunnel(task
[1], task
[2])
351 elif task
[0] == 'del-ovsbridge':
352 print self
.name
, ": Deleting OVS bridge"
353 self
.delete_ovs_bridge()
355 elif task
[0] == 'del-vxlan':
356 print self
.name
, ": Deleting vxlan " + task
[1] + " tunnel"
357 self
.delete_ovs_vxlan_tunnel(task
[1])
359 elif task
[0] == 'create-ovs-bridge-port':
360 print self
.name
, ": Adding port ovim-" + task
[1] + " to OVS bridge"
361 self
.create_ovs_bridge_port(task
[1])
362 elif task
[0] == 'del-ovs-port':
363 self
.delete_bridge_port_attached_to_ovs(task
[1], task
[2])
365 print self
.name
, ": unknown task", task
367 def server_forceoff(self
, wait_until_finished
=False):
368 while len(self
.pending_terminate_server
)>0:
370 if self
.pending_terminate_server
[0][0]>now
:
371 if wait_until_finished
:
376 req
={'uuid':self
.pending_terminate_server
[0][1],
377 'action':{'terminate':'force'},
380 self
.action_on_server(req
)
381 self
.pending_terminate_server
.pop(0)
385 self
.server_forceoff(True)
386 if self
.localinfo_dirty
:
387 self
.save_localinfo()
389 self
.ssh_conn
.close()
390 except Exception as e
:
392 print self
.name
, ": terminate Exception:", text
393 print self
.name
, ": exit from host_thread"
395 def get_local_iface_name(self
, generic_name
):
396 if self
.hostinfo
!= None and "iface_names" in self
.hostinfo
and generic_name
in self
.hostinfo
["iface_names"]:
397 return self
.hostinfo
["iface_names"][generic_name
]
400 def create_xml_server(self
, server
, dev_list
, server_metadata
={}):
401 """Function that implements the generation of the VM XML definition.
402 Additional devices are in dev_list list
403 The main disk is upon dev_list[0]"""
405 #get if operating system is Windows
407 os_type
= server_metadata
.get('os_type', None)
408 if os_type
== None and 'metadata' in dev_list
[0]:
409 os_type
= dev_list
[0]['metadata'].get('os_type', None)
410 if os_type
!= None and os_type
.lower() == "windows":
412 #get type of hard disk bus
413 bus_ide
= True if windows_os
else False
414 bus
= server_metadata
.get('bus', None)
415 if bus
== None and 'metadata' in dev_list
[0]:
416 bus
= dev_list
[0]['metadata'].get('bus', None)
418 bus_ide
= True if bus
=='ide' else False
422 text
= "<domain type='kvm'>"
424 topo
= server_metadata
.get('topology', None)
425 if topo
== None and 'metadata' in dev_list
[0]:
426 topo
= dev_list
[0]['metadata'].get('topology', None)
428 name
= server
.get('name','') + "_" + server
['uuid']
429 name
= name
[:58] #qemu impose a length limit of 59 chars or not start. Using 58
430 text
+= self
.inc_tab() + "<name>" + name
+ "</name>"
432 text
+= self
.tab() + "<uuid>" + server
['uuid'] + "</uuid>"
435 if 'extended' in server
and server
['extended']!=None and 'numas' in server
['extended']:
436 numa
= server
['extended']['numas'][0]
439 memory
= int(numa
.get('memory',0))*1024*1024 #in KiB
441 memory
= int(server
['ram'])*1024;
443 if not self
.develop_mode
:
446 return -1, 'No memory assigned to instance'
448 text
+= self
.tab() + "<memory unit='KiB'>" +memory
+"</memory>"
449 text
+= self
.tab() + "<currentMemory unit='KiB'>" +memory
+ "</currentMemory>"
451 text
+= self
.tab()+'<memoryBacking>'+ \
452 self
.inc_tab() + '<hugepages/>'+ \
453 self
.dec_tab()+ '</memoryBacking>'
456 use_cpu_pinning
=False
457 vcpus
= int(server
.get("vcpus",0))
459 if 'cores-source' in numa
:
461 for index
in range(0, len(numa
['cores-source'])):
462 cpu_pinning
.append( [ numa
['cores-id'][index
], numa
['cores-source'][index
] ] )
464 if 'threads-source' in numa
:
466 for index
in range(0, len(numa
['threads-source'])):
467 cpu_pinning
.append( [ numa
['threads-id'][index
], numa
['threads-source'][index
] ] )
469 if 'paired-threads-source' in numa
:
471 for index
in range(0, len(numa
['paired-threads-source'])):
472 cpu_pinning
.append( [numa
['paired-threads-id'][index
][0], numa
['paired-threads-source'][index
][0] ] )
473 cpu_pinning
.append( [numa
['paired-threads-id'][index
][1], numa
['paired-threads-source'][index
][1] ] )
476 if use_cpu_pinning
and not self
.develop_mode
:
477 text
+= self
.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning
)) +"</vcpu>" + \
478 self
.tab()+'<cputune>'
480 for i
in range(0, len(cpu_pinning
)):
481 text
+= self
.tab() + "<vcpupin vcpu='" +str(cpu_pinning
[i
][0])+ "' cpuset='" +str(cpu_pinning
[i
][1]) +"'/>"
482 text
+= self
.dec_tab()+'</cputune>'+ \
483 self
.tab() + '<numatune>' +\
484 self
.inc_tab() + "<memory mode='strict' nodeset='" +str(numa
['source'])+ "'/>" +\
485 self
.dec_tab() + '</numatune>'
488 return -1, "Instance without number of cpus"
489 text
+= self
.tab()+"<vcpu>" + str(vcpus
) + "</vcpu>"
494 if dev
['type']=='cdrom' :
497 text
+= self
.tab()+ '<os>' + \
498 self
.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
500 text
+= self
.tab() + "<boot dev='cdrom'/>"
501 text
+= self
.tab() + "<boot dev='hd'/>" + \
502 self
.dec_tab()+'</os>'
504 text
+= self
.tab()+'<features>'+\
505 self
.inc_tab()+'<acpi/>' +\
506 self
.tab()+'<apic/>' +\
507 self
.tab()+'<pae/>'+ \
508 self
.dec_tab() +'</features>'
509 if windows_os
or topo
=="oneSocket":
510 text
+= self
.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>"% vcpus
512 text
+= self
.tab() + "<cpu mode='host-model'></cpu>"
513 text
+= self
.tab() + "<clock offset='utc'/>" +\
514 self
.tab() + "<on_poweroff>preserve</on_poweroff>" + \
515 self
.tab() + "<on_reboot>restart</on_reboot>" + \
516 self
.tab() + "<on_crash>restart</on_crash>"
517 text
+= self
.tab() + "<devices>" + \
518 self
.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
519 self
.tab() + "<serial type='pty'>" +\
520 self
.inc_tab() + "<target port='0'/>" + \
521 self
.dec_tab() + "</serial>" +\
522 self
.tab() + "<console type='pty'>" + \
523 self
.inc_tab()+ "<target type='serial' port='0'/>" + \
524 self
.dec_tab()+'</console>'
526 text
+= self
.tab() + "<controller type='usb' index='0'/>" + \
527 self
.tab() + "<controller type='ide' index='0'/>" + \
528 self
.tab() + "<input type='mouse' bus='ps2'/>" + \
529 self
.tab() + "<sound model='ich6'/>" + \
530 self
.tab() + "<video>" + \
531 self
.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
532 self
.dec_tab() + "</video>" + \
533 self
.tab() + "<memballoon model='virtio'/>" + \
534 self
.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
536 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
537 #> self.dec_tab()+'</hostdev>\n' +\
538 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
540 text
+= self
.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
542 #If image contains 'GRAPH' include graphics
543 #if 'GRAPH' in image:
544 text
+= self
.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
545 self
.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
546 self
.dec_tab() + "</graphics>"
550 bus_ide_dev
= bus_ide
551 if dev
['type']=='cdrom' or dev
['type']=='disk':
552 if dev
['type']=='cdrom':
554 text
+= self
.tab() + "<disk type='file' device='"+dev
['type']+"'>"
555 if 'file format' in dev
:
556 text
+= self
.inc_tab() + "<driver name='qemu' type='" +dev
['file format']+ "' cache='writethrough'/>"
557 if 'source file' in dev
:
558 text
+= self
.tab() + "<source file='" +dev
['source file']+ "'/>"
559 #elif v['type'] == 'block':
560 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
562 # return -1, 'Unknown disk type ' + v['type']
563 vpci
= dev
.get('vpci',None)
565 vpci
= dev
['metadata'].get('vpci',None)
566 text
+= self
.pci2xml(vpci
)
569 text
+= self
.tab() + "<target dev='hd" +vd_index
+ "' bus='ide'/>" #TODO allows several type of disks
571 text
+= self
.tab() + "<target dev='vd" +vd_index
+ "' bus='virtio'/>"
572 text
+= self
.dec_tab() + '</disk>'
573 vd_index
= chr(ord(vd_index
)+1)
574 elif dev
['type']=='xml':
575 dev_text
= dev
['xml']
577 dev_text
= dev_text
.replace('__vpci__', dev
['vpci'])
578 if 'source file' in dev
:
579 dev_text
= dev_text
.replace('__file__', dev
['source file'])
580 if 'file format' in dev
:
581 dev_text
= dev_text
.replace('__format__', dev
['source file'])
582 if '__dev__' in dev_text
:
583 dev_text
= dev_text
.replace('__dev__', vd_index
)
584 vd_index
= chr(ord(vd_index
)+1)
587 return -1, 'Unknown device type ' + dev
['type']
590 bridge_interfaces
= server
.get('networks', [])
591 for v
in bridge_interfaces
:
593 self
.db_lock
.acquire()
594 result
, content
= self
.db
.get_table(FROM
='nets', SELECT
=('provider',),WHERE
={'uuid':v
['net_id']} )
595 self
.db_lock
.release()
597 print "create_xml_server ERROR getting nets",result
, content
599 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
600 #I know it is not secure
601 #for v in sorted(desc['network interfaces'].itervalues()):
602 model
= v
.get("model", None)
603 if content
[0]['provider']=='default':
604 text
+= self
.tab() + "<interface type='network'>" + \
605 self
.inc_tab() + "<source network='" +content
[0]['provider']+ "'/>"
606 elif content
[0]['provider'][0:7]=='macvtap':
607 text
+= self
.tab()+"<interface type='direct'>" + \
608 self
.inc_tab() + "<source dev='" + self
.get_local_iface_name(content
[0]['provider'][8:]) + "' mode='bridge'/>" + \
609 self
.tab() + "<target dev='macvtap0'/>"
611 text
+= self
.tab() + "<alias name='net" + str(net_nb
) + "'/>"
614 elif content
[0]['provider'][0:6]=='bridge':
615 text
+= self
.tab() + "<interface type='bridge'>" + \
616 self
.inc_tab()+"<source bridge='" +self
.get_local_iface_name(content
[0]['provider'][7:])+ "'/>"
618 text
+= self
.tab() + "<target dev='vnet" + str(net_nb
)+ "'/>" +\
619 self
.tab() + "<alias name='net" + str(net_nb
)+ "'/>"
622 elif content
[0]['provider'][0:3] == "OVS":
623 vlan
= content
[0]['provider'].replace('OVS:', '')
624 text
+= self
.tab() + "<interface type='bridge'>" + \
625 self
.inc_tab() + "<source bridge='ovim-" + vlan
+ "'/>"
627 return -1, 'Unknown Bridge net provider ' + content
[0]['provider']
629 text
+= self
.tab() + "<model type='" +model
+ "'/>"
630 if v
.get('mac_address', None) != None:
631 text
+= self
.tab() +"<mac address='" +v
['mac_address']+ "'/>"
632 text
+= self
.pci2xml(v
.get('vpci',None))
633 text
+= self
.dec_tab()+'</interface>'
637 interfaces
= numa
.get('interfaces', [])
641 if self
.develop_mode
: #map these interfaces to bridges
642 text
+= self
.tab() + "<interface type='bridge'>" + \
643 self
.inc_tab()+"<source bridge='" +self
.develop_bridge_iface
+ "'/>"
645 text
+= self
.tab() + "<target dev='vnet" + str(net_nb
)+ "'/>" +\
646 self
.tab() + "<alias name='net" + str(net_nb
)+ "'/>"
648 text
+= self
.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
649 if v
.get('mac_address', None) != None:
650 text
+= self
.tab() +"<mac address='" +v
['mac_address']+ "'/>"
651 text
+= self
.pci2xml(v
.get('vpci',None))
652 text
+= self
.dec_tab()+'</interface>'
655 if v
['dedicated'] == 'yes': #passthrought
656 text
+= self
.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
657 self
.inc_tab() + "<source>"
659 text
+= self
.pci2xml(v
['source'])
660 text
+= self
.dec_tab()+'</source>'
661 text
+= self
.pci2xml(v
.get('vpci',None))
663 text
+= self
.tab() + "<alias name='hostdev" + str(net_nb
) + "'/>"
664 text
+= self
.dec_tab()+'</hostdev>'
666 else: #sriov_interfaces
667 #skip not connected interfaces
668 if v
.get("net_id") == None:
670 text
+= self
.tab() + "<interface type='hostdev' managed='yes'>"
672 if v
.get('mac_address', None) != None:
673 text
+= self
.tab() + "<mac address='" +v
['mac_address']+ "'/>"
674 text
+= self
.tab()+'<source>'
676 text
+= self
.pci2xml(v
['source'])
677 text
+= self
.dec_tab()+'</source>'
678 if v
.get('vlan',None) != None:
679 text
+= self
.tab() + "<vlan> <tag id='" + str(v
['vlan']) + "'/> </vlan>"
680 text
+= self
.pci2xml(v
.get('vpci',None))
682 text
+= self
.tab() + "<alias name='hostdev" + str(net_nb
) + "'/>"
683 text
+= self
.dec_tab()+'</interface>'
686 text
+= self
.dec_tab()+'</devices>'+\
687 self
.dec_tab()+'</domain>'
690 def pci2xml(self
, pci
):
691 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
692 alows an empty pci text'''
695 first_part
= pci
.split(':')
696 second_part
= first_part
[2].split('.')
697 return self
.tab() + "<address type='pci' domain='0x" + first_part
[0] + \
698 "' bus='0x" + first_part
[1] + "' slot='0x" + second_part
[0] + \
699 "' function='0x" + second_part
[1] + "'/>"
702 """Return indentation according to xml_level"""
703 return "\n" + (' '*self
.xml_level
)
706 """Increment and return indentation according to xml_level"""
711 """Decrement and return indentation according to xml_level"""
715 def create_ovs_bridge(self
):
717 Create a bridge in compute OVS to allocate VMs
718 :return: True if success
722 command
= 'sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true'
723 print self
.name
, ': command:', command
724 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
725 content
= stdout
.read()
726 if len(content
) == 0:
731 def delete_port_to_ovs_bridge(self
, vlan
, net_uuid
):
733 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
734 :param vlan: vlan port id
735 :param net_uuid: network id
742 port_name
= 'ovim-' + vlan
743 command
= 'sudo ovs-vsctl del-port br-int ' + port_name
744 print self
.name
, ': command:', command
745 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
746 content
= stdout
.read()
747 if len(content
) == 0:
752 def delete_dhcp_server(self
, vlan
, net_uuid
, dhcp_path
):
754 Delete dhcp server process lining in namespace
755 :param vlan: segmentation id
756 :param net_uuid: network uuid
757 :param dhcp_path: conf fiel path that live in namespace side
762 if not self
.is_dhcp_port_free(vlan
, net_uuid
):
765 net_namespace
= 'ovim-' + vlan
766 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
767 pid_file
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
769 command
= 'sudo ip netns exec ' + net_namespace
+ ' cat ' + pid_file
770 print self
.name
, ': command:', command
771 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
772 content
= stdout
.read()
774 command
= 'sudo ip netns exec ' + net_namespace
+ ' kill -9 ' + content
775 print self
.name
, ': command:', command
776 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
777 content
= stdout
.read()
779 # if len(content) == 0:
784 def is_dhcp_port_free(self
, host_id
, net_uuid
):
786 Check if any port attached to the a net in a vxlan mesh across computes nodes
787 :param host_id: host id
788 :param net_uuid: network id
789 :return: True if is not free
791 self
.db_lock
.acquire()
792 result
, content
= self
.db
.get_table(
794 WHERE
={'p.type': 'instance:ovs', 'p.net_id': net_uuid
}
796 self
.db_lock
.release()
803 def is_port_free(self
, host_id
, net_uuid
):
805 Check if there not ovs ports of a network in a compute host.
806 :param host_id: host id
807 :param net_uuid: network id
808 :return: True if is not free
811 self
.db_lock
.acquire()
812 result
, content
= self
.db
.get_table(
813 FROM
='ports as p join instances as i on p.instance_id=i.uuid',
814 WHERE
={"i.host_id": self
.host_id
, 'p.type': 'instance:ovs', 'p.net_id': net_uuid
}
816 self
.db_lock
.release()
823 def add_port_to_ovs_bridge(self
, vlan
):
825 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
826 :param vlan: vlan port id
827 :return: True if success
833 port_name
= 'ovim-' + vlan
834 command
= 'sudo ovs-vsctl add-port br-int ' + port_name
+ ' tag=' + vlan
835 print self
.name
, ': command:', command
836 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
837 content
= stdout
.read()
838 if len(content
) == 0:
843 def delete_dhcp_port(self
, vlan
, net_uuid
):
845 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
846 :param vlan: segmentation id
847 :param net_uuid: network id
848 :return: True if success
854 if not self
.is_dhcp_port_free(vlan
, net_uuid
):
856 self
.delete_dhcp_interfaces(vlan
)
859 def delete_bridge_port_attached_to_ovs(self
, vlan
, net_uuid
):
861 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
864 :return: True if success
869 if not self
.is_port_free(vlan
, net_uuid
):
871 self
.delete_port_to_ovs_bridge(vlan
, net_uuid
)
872 self
.delete_linux_bridge(vlan
)
875 def delete_linux_bridge(self
, vlan
):
877 Delete a linux bridge in a scpecific compute.
878 :param vlan: vlan port id
879 :return: True if success
885 port_name
= 'ovim-' + vlan
886 command
= 'sudo ip link set dev veth0-' + vlan
+ ' down'
887 print self
.name
, ': command:', command
888 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
889 content
= stdout
.read()
891 # if len(content) != 0:
894 command
= 'sudo ifconfig ' + port_name
+ ' down && sudo brctl delbr ' + port_name
895 print self
.name
, ': command:', command
896 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
897 content
= stdout
.read()
898 if len(content
) == 0:
903 def create_ovs_bridge_port(self
, vlan
):
905 Generate a linux bridge and attache the port to a OVS bridge
906 :param vlan: vlan port id
911 self
.create_linux_bridge(vlan
)
912 self
.add_port_to_ovs_bridge(vlan
)
914 def create_linux_bridge(self
, vlan
):
916 Create a linux bridge with STP active
917 :param vlan: netowrk vlan id
924 port_name
= 'ovim-' + vlan
925 command
= 'sudo brctl show | grep ' + port_name
926 print self
.name
, ': command:', command
927 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
928 content
= stdout
.read()
930 # if exist nothing to create
931 # if len(content) == 0:
934 command
= 'sudo brctl addbr ' + port_name
935 print self
.name
, ': command:', command
936 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
937 content
= stdout
.read()
939 # if len(content) == 0:
944 command
= 'sudo brctl stp ' + port_name
+ ' on'
945 print self
.name
, ': command:', command
946 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
947 content
= stdout
.read()
949 # if len(content) == 0:
953 command
= 'sudo ip link set dev ' + port_name
+ ' up'
954 print self
.name
, ': command:', command
955 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
956 content
= stdout
.read()
958 if len(content
) == 0:
963 def set_mac_dhcp_server(self
, ip
, mac
, vlan
, netmask
, dhcp_path
):
965 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
966 :param ip: IP address asigned to a VM
967 :param mac: VM vnic mac to be macthed with the IP received
968 :param vlan: Segmentation id
969 :param netmask: netmask value
970 :param path: dhcp conf file path that live in namespace side
971 :return: True if success
977 net_namespace
= 'ovim-' + vlan
978 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
979 dhcp_hostsdir
= os
.path
.join(dhcp_path
, net_namespace
)
984 ip_data
= mac
.upper() + ',' + ip
986 command
= 'sudo ip netns exec ' + net_namespace
+ ' touch ' + dhcp_hostsdir
987 print self
.name
, ': command:', command
988 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
989 content
= stdout
.read()
991 command
= 'sudo ip netns exec ' + net_namespace
+ ' sudo bash -ec "echo ' + ip_data
+ ' >> ' + dhcp_hostsdir
+ '"'
993 print self
.name
, ': command:', command
994 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
995 content
= stdout
.read()
997 if len(content
) == 0:
1002 def delete_mac_dhcp_server(self
, ip
, mac
, vlan
, dhcp_path
):
1004 Delete into dhcp conf file the ip assigned to a specific MAC address
1006 :param ip: IP address asigned to a VM
1007 :param mac: VM vnic mac to be macthed with the IP received
1008 :param vlan: Segmentation id
1009 :param dhcp_path: dhcp conf file path that live in namespace side
1016 net_namespace
= 'ovim-' + vlan
1017 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
1018 dhcp_hostsdir
= os
.path
.join(dhcp_path
, net_namespace
)
1023 ip_data
= mac
.upper() + ',' + ip
1025 command
= 'sudo ip netns exec ' + net_namespace
+ ' sudo sed -i \'/' + ip_data
+ '/d\' ' + dhcp_hostsdir
1026 print self
.name
, ': command:', command
1027 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1028 content
= stdout
.read()
1030 if len(content
) == 0:
1035 def launch_dhcp_server(self
, vlan
, ip_range
, netmask
, dhcp_path
):
1037 Generate a linux bridge and attache the port to a OVS bridge
1039 :param vlan: Segmentation id
1040 :param ip_range: IP dhcp range
1041 :param netmask: network netmask
1042 :param dhcp_path: dhcp conf file path that live in namespace side
1043 :return: True if success
1049 interface
= 'tap-' + vlan
1050 net_namespace
= 'ovim-' + vlan
1051 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
1052 leases_path
= os
.path
.join(dhcp_path
, "dnsmasq.leases")
1053 pid_file
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
1055 dhcp_range
= ip_range
[0] + ',' + ip_range
[1] + ',' + netmask
1057 command
= 'sudo ip netns exec ' + net_namespace
+ ' mkdir -p ' + dhcp_path
1058 print self
.name
, ': command:', command
1059 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1060 content
= stdout
.read()
1062 pid_path
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
1063 command
= 'sudo ip netns exec ' + net_namespace
+ ' cat ' + pid_path
1064 print self
.name
, ': command:', command
1065 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1066 content
= stdout
.read()
1067 # check if pid is runing
1068 pid_status_path
= content
1070 command
= "ps aux | awk '{print $2 }' | grep " + pid_status_path
1071 print self
.name
, ': command:', command
1072 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1073 content
= stdout
.read()
1075 command
= 'sudo ip netns exec ' + net_namespace
+ ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1076 '--interface=' + interface
+ ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path
+ \
1077 ' --dhcp-range ' + dhcp_range
+ ' --pid-file=' + pid_file
+ ' --dhcp-leasefile=' + leases_path
+ ' --listen-address ' + ip_range
[0]
1079 print self
.name
, ': command:', command
1080 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1081 content
= stdout
.readline()
1083 if len(content
) == 0:
1088 def delete_dhcp_interfaces(self
, vlan
):
1090 Create a linux bridge with STP active
1091 :param vlan: netowrk vlan id
1098 net_namespace
= 'ovim-' + vlan
1099 command
= 'sudo ovs-vsctl del-port br-int ovs-tap-' + vlan
1100 print self
.name
, ': command:', command
1101 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1102 content
= stdout
.read()
1104 command
= 'sudo ip netns exec ' + net_namespace
+ ' ip link set dev tap-' + vlan
+ ' down'
1105 print self
.name
, ': command:', command
1106 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1107 content
= stdout
.read()
1109 command
= 'sudo ip link set dev ovs-tap-' + vlan
+ ' down'
1110 print self
.name
, ': command:', command
1111 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1112 content
= stdout
.read()
1114 def create_dhcp_interfaces(self
, vlan
, ip
, netmask
):
1116 Create a linux bridge with STP active
1117 :param vlan: segmentation id
1118 :param ip: Ip included in the dhcp range for the tap interface living in namesapce side
1119 :param netmask: dhcp net CIDR
1120 :return: True if success
1126 net_namespace
= 'ovim-' + vlan
1127 namespace_interface
= 'tap-' + vlan
1129 command
= 'sudo ip netns add ' + net_namespace
1130 print self
.name
, ': command:', command
1131 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1132 content
= stdout
.read()
1134 command
= 'sudo ip link add tap-' + vlan
+ ' type veth peer name ovs-tap-' + vlan
1135 print self
.name
, ': command:', command
1136 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1137 content
= stdout
.read()
1139 command
= 'sudo ovs-vsctl add-port br-int ovs-tap-' + vlan
+ ' tag=' + vlan
1140 print self
.name
, ': command:', command
1141 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1142 content
= stdout
.read()
1144 command
= 'sudo ip link set tap-' + vlan
+ ' netns ' + net_namespace
1145 print self
.name
, ': command:', command
1146 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1147 content
= stdout
.read()
1149 command
= 'sudo ip netns exec ' + net_namespace
+ ' ip link set dev tap-' + vlan
+ ' up'
1150 print self
.name
, ': command:', command
1151 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1152 content
= stdout
.read()
1154 command
= 'sudo ip link set dev ovs-tap-' + vlan
+ ' up'
1155 print self
.name
, ': command:', command
1156 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1157 content
= stdout
.read()
1159 command
= 'sudo ip netns exec ' + net_namespace
+ ' ' + ' ifconfig ' + namespace_interface \
1160 + ' ' + ip
+ ' netmask ' + netmask
1161 print self
.name
, ': command:', command
1162 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1163 content
= stdout
.read()
1165 if len(content
) == 0:
1170 def create_ovs_vxlan_tunnel(self
, vxlan_interface
, remote_ip
):
1172 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1173 :param vxlan_interface: vlxan inteface name.
1174 :param remote_ip: tunnel endpoint remote compute ip.
1179 command
= 'sudo ovs-vsctl add-port br-int ' + vxlan_interface
+ \
1180 ' -- set Interface ' + vxlan_interface
+ ' type=vxlan options:remote_ip=' + remote_ip
+ \
1181 ' -- set Port ' + vxlan_interface
+ ' other_config:stp-path-cost=10'
1182 print self
.name
, ': command:', command
1183 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1184 content
= stdout
.read()
1186 if len(content
) == 0:
1191 def delete_ovs_vxlan_tunnel(self
, vxlan_interface
):
1193 Delete a vlxan tunnel port from a OVS brdige.
1194 :param vxlan_interface: vlxan name to be delete it.
1195 :return: True if success.
1199 command
= 'sudo ovs-vsctl del-port br-int ' + vxlan_interface
1200 print self
.name
, ': command:', command
1201 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1202 content
= stdout
.read()
1204 if len(content
) == 0:
1209 def delete_ovs_bridge(self
):
1211 Delete a OVS bridge from a compute.
1212 :return: True if success
1216 command
= 'sudo ovs-vsctl del-br br-int'
1217 print self
.name
, ': command:', command
1218 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1219 content
= stdout
.read()
1220 if len(content
) == 0:
1225 def get_file_info(self
, path
):
1226 command
= 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1227 print self
.name
, ': command:', command
1228 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1229 content
= stdout
.read()
1230 if len(content
) == 0:
1231 return None # file does not exist
1233 return content
.split(" ") #(permission, 1, owner, group, size, date, file)
1235 def qemu_get_info(self
, path
):
1236 command
= 'qemu-img info ' + path
1237 print self
.name
, ': command:', command
1238 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
1239 content
= stdout
.read()
1240 if len(content
) == 0:
1241 error
= stderr
.read()
1242 print self
.name
, ": get_qemu_info error ", error
1243 raise paramiko
.ssh_exception
.SSHException("Error getting qemu_info: " + error
)
1246 return yaml
.load(content
)
1247 except yaml
.YAMLError
as exc
:
1249 if hasattr(exc
, 'problem_mark'):
1250 mark
= exc
.problem_mark
1251 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
1252 print self
.name
, ": get_qemu_info yaml format Exception", text
1253 raise paramiko
.ssh_exception
.SSHException("Error getting qemu_info yaml format" + text
)
1255 def qemu_change_backing(self
, inc_file
, new_backing_file
):
1256 command
= 'qemu-img rebase -u -b ' + new_backing_file
+ ' ' + inc_file
1257 print self
.name
, ': command:', command
1258 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1259 content
= stderr
.read()
1260 if len(content
) == 0:
1263 print self
.name
, ": qemu_change_backing error: ", content
1266 def get_notused_filename(self
, proposed_name
, suffix
=''):
1267 '''Look for a non existing file_name in the host
1268 proposed_name: proposed file name, includes path
1269 suffix: suffix to be added to the name, before the extention
1271 extension
= proposed_name
.rfind(".")
1272 slash
= proposed_name
.rfind("/")
1273 if extension
< 0 or extension
< slash
: # no extension
1274 extension
= len(proposed_name
)
1275 target_name
= proposed_name
[:extension
] + suffix
+ proposed_name
[extension
:]
1276 info
= self
.get_file_info(target_name
)
1281 while info
is not None:
1282 target_name
= proposed_name
[:extension
] + suffix
+ "-" + str(index
) + proposed_name
[extension
:]
1284 info
= self
.get_file_info(target_name
)
1287 def get_notused_path(self
, proposed_path
, suffix
=''):
1288 '''Look for a non existing path at database for images
1289 proposed_path: proposed file name, includes path
1290 suffix: suffix to be added to the name, before the extention
1292 extension
= proposed_path
.rfind(".")
1294 extension
= len(proposed_path
)
1296 target_path
= proposed_path
[:extension
] + suffix
+ proposed_path
[extension
:]
1299 r
,_
=self
.db
.get_table(FROM
="images",WHERE
={"path":target_path
})
1302 target_path
= proposed_path
[:extension
] + suffix
+ "-" + str(index
) + proposed_path
[extension
:]
1306 def delete_file(self
, file_name
):
1307 command
= 'rm -f '+file_name
1308 print self
.name
, ': command:', command
1309 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1310 error_msg
= stderr
.read()
1311 if len(error_msg
) > 0:
1312 raise paramiko
.ssh_exception
.SSHException("Error deleting file: " + error_msg
)
1314 def copy_file(self
, source
, destination
, perserve_time
=True):
1315 if source
[0:4]=="http":
1316 command
= "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1317 dst
=destination
, src
=source
, dst_result
=destination
+ ".result" )
1319 command
= 'cp --no-preserve=mode'
1321 command
+= ' --preserve=timestamps'
1322 command
+= " '{}' '{}'".format(source
, destination
)
1323 print self
.name
, ': command:', command
1324 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1325 error_msg
= stderr
.read()
1326 if len(error_msg
) > 0:
1327 raise paramiko
.ssh_exception
.SSHException("Error copying image to local host: " + error_msg
)
1329 def copy_remote_file(self
, remote_file
, use_incremental
):
1330 ''' Copy a file from the repository to local folder and recursively
1331 copy the backing files in case the remote file is incremental
1332 Read and/or modified self.localinfo['files'] that contain the
1333 unmodified copies of images in the local path
1335 remote_file: path of remote file
1336 use_incremental: None (leave the decision to this function), True, False
1338 local_file: name of local file
1339 qemu_info: dict with quemu information of local file
1340 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1343 use_incremental_out
= use_incremental
1344 new_backing_file
= None
1346 file_from_local
= True
1348 #in case incremental use is not decided, take the decision depending on the image
1349 #avoid the use of incremental if this image is already incremental
1350 if remote_file
[0:4] == "http":
1351 file_from_local
= False
1353 qemu_remote_info
= self
.qemu_get_info(remote_file
)
1354 if use_incremental_out
==None:
1355 use_incremental_out
= not ( file_from_local
and 'backing file' in qemu_remote_info
)
1356 #copy recursivelly the backing files
1357 if file_from_local
and 'backing file' in qemu_remote_info
:
1358 new_backing_file
, _
, _
= self
.copy_remote_file(qemu_remote_info
['backing file'], True)
1360 #check if remote file is present locally
1361 if use_incremental_out
and remote_file
in self
.localinfo
['files']:
1362 local_file
= self
.localinfo
['files'][remote_file
]
1363 local_file_info
= self
.get_file_info(local_file
)
1365 remote_file_info
= self
.get_file_info(remote_file
)
1366 if local_file_info
== None:
1368 elif file_from_local
and (local_file_info
[4]!=remote_file_info
[4] or local_file_info
[5]!=remote_file_info
[5]):
1369 #local copy of file not valid because date or size are different.
1370 #TODO DELETE local file if this file is not used by any active virtual machine
1372 self
.delete_file(local_file
)
1373 del self
.localinfo
['files'][remote_file
]
1377 else: #check that the local file has the same backing file, or there are not backing at all
1378 qemu_info
= self
.qemu_get_info(local_file
)
1379 if new_backing_file
!= qemu_info
.get('backing file'):
1383 if local_file
== None: #copy the file
1384 img_name
= remote_file
.split('/') [-1]
1385 img_local
= self
.image_path
+ '/' + img_name
1386 local_file
= self
.get_notused_filename(img_local
)
1387 self
.copy_file(remote_file
, local_file
, use_incremental_out
)
1389 if use_incremental_out
:
1390 self
.localinfo
['files'][remote_file
] = local_file
1391 if new_backing_file
:
1392 self
.qemu_change_backing(local_file
, new_backing_file
)
1393 qemu_info
= self
.qemu_get_info(local_file
)
1395 return local_file
, qemu_info
, use_incremental_out
1397 def launch_server(self
, conn
, server
, rebuild
=False, domain
=None):
1399 time
.sleep(random
.randint(20,150)) #sleep random timeto be make it a bit more real
1402 server_id
= server
['uuid']
1403 paused
= server
.get('paused','no')
1405 if domain
!=None and rebuild
==False:
1407 #self.server_status[server_id] = 'ACTIVE'
1410 self
.db_lock
.acquire()
1411 result
, server_data
= self
.db
.get_instance(server_id
)
1412 self
.db_lock
.release()
1414 print self
.name
, ": launch_server ERROR getting server from DB",result
, server_data
1415 return result
, server_data
1417 #0: get image metadata
1418 server_metadata
= server
.get('metadata', {})
1419 use_incremental
= None
1421 if "use_incremental" in server_metadata
:
1422 use_incremental
= False if server_metadata
["use_incremental"]=="no" else True
1424 server_host_files
= self
.localinfo
['server_files'].get( server
['uuid'], {})
1426 #delete previous incremental files
1427 for file_
in server_host_files
.values():
1428 self
.delete_file(file_
['source file'] )
1429 server_host_files
={}
1431 #1: obtain aditional devices (disks)
1432 #Put as first device the main disk
1433 devices
= [ {"type":"disk", "image_id":server
['image_id'], "vpci":server_metadata
.get('vpci', None) } ]
1434 if 'extended' in server_data
and server_data
['extended']!=None and "devices" in server_data
['extended']:
1435 devices
+= server_data
['extended']['devices']
1438 if dev
['image_id'] == None:
1441 self
.db_lock
.acquire()
1442 result
, content
= self
.db
.get_table(FROM
='images', SELECT
=('path', 'metadata'),
1443 WHERE
={'uuid': dev
['image_id']})
1444 self
.db_lock
.release()
1446 error_text
= "ERROR", result
, content
, "when getting image", dev
['image_id']
1447 print self
.name
, ": launch_server", error_text
1448 return -1, error_text
1449 if content
[0]['metadata'] is not None:
1450 dev
['metadata'] = json
.loads(content
[0]['metadata'])
1452 dev
['metadata'] = {}
1454 if dev
['image_id'] in server_host_files
:
1455 dev
['source file'] = server_host_files
[ dev
['image_id'] ] ['source file'] #local path
1456 dev
['file format'] = server_host_files
[ dev
['image_id'] ] ['file format'] # raw or qcow2
1459 #2: copy image to host
1460 remote_file
= content
[0]['path']
1461 use_incremental_image
= use_incremental
1462 if dev
['metadata'].get("use_incremental") == "no":
1463 use_incremental_image
= False
1464 local_file
, qemu_info
, use_incremental_image
= self
.copy_remote_file(remote_file
, use_incremental_image
)
1466 #create incremental image
1467 if use_incremental_image
:
1468 local_file_inc
= self
.get_notused_filename(local_file
, '.inc')
1469 command
= 'qemu-img create -f qcow2 '+local_file_inc
+ ' -o backing_file='+ local_file
1470 print 'command:', command
1471 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1472 error_msg
= stderr
.read()
1473 if len(error_msg
) > 0:
1474 raise paramiko
.ssh_exception
.SSHException("Error creating incremental file: " + error_msg
)
1475 local_file
= local_file_inc
1476 qemu_info
= {'file format':'qcow2'}
1478 server_host_files
[ dev
['image_id'] ] = {'source file': local_file
, 'file format': qemu_info
['file format']}
1480 dev
['source file'] = local_file
1481 dev
['file format'] = qemu_info
['file format']
1483 self
.localinfo
['server_files'][ server
['uuid'] ] = server_host_files
1484 self
.localinfo_dirty
= True
1487 result
, xml
= self
.create_xml_server(server_data
, devices
, server_metadata
) #local_file
1489 print self
.name
, ": create xml server error:", xml
1491 print self
.name
, ": create xml:", xml
1492 atribute
= host_thread
.lvirt_module
.VIR_DOMAIN_START_PAUSED
if paused
== "yes" else 0
1494 if not rebuild
: #ensures that any pending destroying server is done
1495 self
.server_forceoff(True)
1496 #print self.name, ": launching instance" #, xml
1497 conn
.createXML(xml
, atribute
)
1498 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
1502 except paramiko
.ssh_exception
.SSHException
as e
:
1504 print self
.name
, ": launch_server(%s) ssh Exception: %s" %(server_id
, text
)
1505 if "SSH session not active" in text
:
1507 except host_thread
.lvirt_module
.libvirtError
as e
:
1508 text
= e
.get_error_message()
1509 print self
.name
, ": launch_server(%s) libvirt Exception: %s" %(server_id
, text
)
1510 except Exception as e
:
1512 print self
.name
, ": launch_server(%s) Exception: %s" %(server_id
, text
)
1515 def update_servers_status(self
):
1517 # VIR_DOMAIN_NOSTATE = 0
1518 # VIR_DOMAIN_RUNNING = 1
1519 # VIR_DOMAIN_BLOCKED = 2
1520 # VIR_DOMAIN_PAUSED = 3
1521 # VIR_DOMAIN_SHUTDOWN = 4
1522 # VIR_DOMAIN_SHUTOFF = 5
1523 # VIR_DOMAIN_CRASHED = 6
1524 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
1526 if self
.test
or len(self
.server_status
)==0:
1530 conn
= host_thread
.lvirt_module
.open("qemu+ssh://"+self
.user
+"@"+self
.host
+"/system")
1531 domains
= conn
.listAllDomains()
1533 for domain
in domains
:
1534 uuid
= domain
.UUIDString() ;
1535 libvirt_status
= domain
.state()
1536 #print libvirt_status
1537 if libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_RUNNING
or libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_SHUTDOWN
:
1538 new_status
= "ACTIVE"
1539 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_PAUSED
:
1540 new_status
= "PAUSED"
1541 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_SHUTOFF
:
1542 new_status
= "INACTIVE"
1543 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_CRASHED
:
1544 new_status
= "ERROR"
1547 domain_dict
[uuid
] = new_status
1549 except host_thread
.lvirt_module
.libvirtError
as e
:
1550 print self
.name
, ": get_state() Exception '", e
.get_error_message()
1553 for server_id
, current_status
in self
.server_status
.iteritems():
1555 if server_id
in domain_dict
:
1556 new_status
= domain_dict
[server_id
]
1558 new_status
= "INACTIVE"
1560 if new_status
== None or new_status
== current_status
:
1562 if new_status
== 'INACTIVE' and current_status
== 'ERROR':
1563 continue #keep ERROR status, because obviously this machine is not running
1565 print self
.name
, ": server ", server_id
, "status change from ", current_status
, "to", new_status
1566 STATUS
={'progress':100, 'status':new_status
}
1567 if new_status
== 'ERROR':
1568 STATUS
['last_error'] = 'machine has crashed'
1569 self
.db_lock
.acquire()
1570 r
,_
= self
.db
.update_rows('instances', STATUS
, {'uuid':server_id
}, log
=False)
1571 self
.db_lock
.release()
1573 self
.server_status
[server_id
] = new_status
1575 def action_on_server(self
, req
, last_retry
=True):
1576 '''Perform an action on a req
1578 req: dictionary that contain:
1579 server properties: 'uuid','name','tenant_id','status'
1581 host properties: 'user', 'ip_name'
1582 return (error, text)
1583 0: No error. VM is updated to new state,
1584 -1: Invalid action, as trying to pause a PAUSED VM
1585 -2: Error accessing host
1587 -4: Error at DB access
1588 -5: Error while trying to perform action. VM is updated to ERROR
1590 server_id
= req
['uuid']
1593 old_status
= req
['status']
1597 if 'terminate' in req
['action']:
1598 new_status
= 'deleted'
1599 elif 'shutoff' in req
['action'] or 'shutdown' in req
['action'] or 'forceOff' in req
['action']:
1600 if req
['status']!='ERROR':
1602 new_status
= 'INACTIVE'
1603 elif 'start' in req
['action'] and req
['status']!='ERROR': new_status
= 'ACTIVE'
1604 elif 'resume' in req
['action'] and req
['status']!='ERROR' and req
['status']!='INACTIVE' : new_status
= 'ACTIVE'
1605 elif 'pause' in req
['action'] and req
['status']!='ERROR': new_status
= 'PAUSED'
1606 elif 'reboot' in req
['action'] and req
['status']!='ERROR': new_status
= 'ACTIVE'
1607 elif 'rebuild' in req
['action']:
1608 time
.sleep(random
.randint(20,150))
1609 new_status
= 'ACTIVE'
1610 elif 'createImage' in req
['action']:
1612 self
.create_image(None, req
)
1615 conn
= host_thread
.lvirt_module
.open("qemu+ssh://"+self
.user
+"@"+self
.host
+"/system")
1617 dom
= conn
.lookupByUUIDString(server_id
)
1618 except host_thread
.lvirt_module
.libvirtError
as e
:
1619 text
= e
.get_error_message()
1620 if 'LookupByUUIDString' in text
or 'Domain not found' in text
or 'No existe un dominio coincidente' in text
:
1623 print self
.name
, ": action_on_server(",server_id
,") libvirt exception:", text
1626 if 'forceOff' in req
['action']:
1628 print self
.name
, ": action_on_server(",server_id
,") domain not running"
1631 print self
.name
, ": sending DESTROY to server", server_id
1633 except Exception as e
:
1634 if "domain is not running" not in e
.get_error_message():
1635 print self
.name
, ": action_on_server(",server_id
,") Exception while sending force off:", e
.get_error_message()
1636 last_error
= 'action_on_server Exception while destroy: ' + e
.get_error_message()
1637 new_status
= 'ERROR'
1639 elif 'terminate' in req
['action']:
1641 print self
.name
, ": action_on_server(",server_id
,") domain not running"
1642 new_status
= 'deleted'
1645 if req
['action']['terminate'] == 'force':
1646 print self
.name
, ": sending DESTROY to server", server_id
1648 new_status
= 'deleted'
1650 print self
.name
, ": sending SHUTDOWN to server", server_id
1652 self
.pending_terminate_server
.append( (time
.time()+10,server_id
) )
1653 except Exception as e
:
1654 print self
.name
, ": action_on_server(",server_id
,") Exception while destroy:", e
.get_error_message()
1655 last_error
= 'action_on_server Exception while destroy: ' + e
.get_error_message()
1656 new_status
= 'ERROR'
1657 if "domain is not running" in e
.get_error_message():
1660 new_status
= 'deleted'
1662 print self
.name
, ": action_on_server(",server_id
,") Exception while undefine:", e
.get_error_message()
1663 last_error
= 'action_on_server Exception2 while undefine:', e
.get_error_message()
1664 #Exception: 'virDomainDetachDevice() failed'
1665 if new_status
=='deleted':
1666 if server_id
in self
.server_status
:
1667 del self
.server_status
[server_id
]
1668 if req
['uuid'] in self
.localinfo
['server_files']:
1669 for file_
in self
.localinfo
['server_files'][ req
['uuid'] ].values():
1671 self
.delete_file(file_
['source file'])
1674 del self
.localinfo
['server_files'][ req
['uuid'] ]
1675 self
.localinfo_dirty
= True
1677 elif 'shutoff' in req
['action'] or 'shutdown' in req
['action']:
1680 print self
.name
, ": action_on_server(",server_id
,") domain not running"
1683 # new_status = 'INACTIVE'
1684 #TODO: check status for changing at database
1685 except Exception as e
:
1686 new_status
= 'ERROR'
1687 print self
.name
, ": action_on_server(",server_id
,") Exception while shutdown:", e
.get_error_message()
1688 last_error
= 'action_on_server Exception while shutdown: ' + e
.get_error_message()
1690 elif 'rebuild' in req
['action']:
1693 r
= self
.launch_server(conn
, req
, True, None)
1695 new_status
= 'ERROR'
1698 new_status
= 'ACTIVE'
1699 elif 'start' in req
['action']:
1700 # The instance is only create in DB but not yet at libvirt domain, needs to be create
1701 rebuild
= True if req
['action']['start'] == 'rebuild' else False
1702 r
= self
.launch_server(conn
, req
, rebuild
, dom
)
1704 new_status
= 'ERROR'
1707 new_status
= 'ACTIVE'
1709 elif 'resume' in req
['action']:
1715 # new_status = 'ACTIVE'
1716 except Exception as e
:
1717 print self
.name
, ": action_on_server(",server_id
,") Exception while resume:", e
.get_error_message()
1719 elif 'pause' in req
['action']:
1725 # new_status = 'PAUSED'
1726 except Exception as e
:
1727 print self
.name
, ": action_on_server(",server_id
,") Exception while pause:", e
.get_error_message()
1729 elif 'reboot' in req
['action']:
1735 print self
.name
, ": action_on_server(",server_id
,") reboot:"
1736 #new_status = 'ACTIVE'
1737 except Exception as e
:
1738 print self
.name
, ": action_on_server(",server_id
,") Exception while reboot:", e
.get_error_message()
1739 elif 'createImage' in req
['action']:
1740 self
.create_image(dom
, req
)
1744 except host_thread
.lvirt_module
.libvirtError
as e
:
1745 if conn
is not None: conn
.close()
1746 text
= e
.get_error_message()
1747 new_status
= "ERROR"
1749 print self
.name
, ": action_on_server(",server_id
,") Exception '", text
1750 if 'LookupByUUIDString' in text
or 'Domain not found' in text
or 'No existe un dominio coincidente' in text
:
1751 print self
.name
, ": action_on_server(",server_id
,") Exception removed from host"
1752 #end of if self.test
1753 if new_status
== None:
1756 print self
.name
, ": action_on_server(",server_id
,") new status", new_status
, last_error
1757 UPDATE
= {'progress':100, 'status':new_status
}
1759 if new_status
=='ERROR':
1760 if not last_retry
: #if there will be another retry do not update database
1762 elif 'terminate' in req
['action']:
1763 #PUT a log in the database
1764 print self
.name
, ": PANIC deleting server", server_id
, last_error
1765 self
.db_lock
.acquire()
1766 self
.db
.new_row('logs',
1767 {'uuid':server_id
, 'tenant_id':req
['tenant_id'], 'related':'instances','level':'panic',
1768 'description':'PANIC deleting server from host '+self
.name
+': '+last_error
}
1770 self
.db_lock
.release()
1771 if server_id
in self
.server_status
:
1772 del self
.server_status
[server_id
]
1775 UPDATE
['last_error'] = last_error
1776 if new_status
!= 'deleted' and (new_status
!= old_status
or new_status
== 'ERROR') :
1777 self
.db_lock
.acquire()
1778 self
.db
.update_rows('instances', UPDATE
, {'uuid':server_id
}, log
=True)
1779 self
.server_status
[server_id
] = new_status
1780 self
.db_lock
.release()
1781 if new_status
== 'ERROR':
1786 def restore_iface(self
, name
, mac
, lib_conn
=None):
1787 ''' make an ifdown, ifup to restore default parameter of na interface
1789 mac: mac address of the interface
1790 lib_conn: connection to the libvirt, if None a new connection is created
1791 Return 0,None if ok, -1,text if fails
1797 print self
.name
, ": restore_iface '%s' %s" % (name
, mac
)
1801 conn
= host_thread
.lvirt_module
.open("qemu+ssh://"+self
.user
+"@"+self
.host
+"/system")
1805 #wait to the pending VM deletion
1806 #TODO.Revise self.server_forceoff(True)
1808 iface
= conn
.interfaceLookupByMACString(mac
)
1811 print self
.name
, ": restore_iface '%s' %s" % (name
, mac
)
1812 except host_thread
.lvirt_module
.libvirtError
as e
:
1813 error_text
= e
.get_error_message()
1814 print self
.name
, ": restore_iface '%s' '%s' libvirt exception: %s" %(name
, mac
, error_text
)
1817 if lib_conn
is None and conn
is not None:
1819 return ret
, error_text
1822 def create_image(self
,dom
, req
):
1824 if 'path' in req
['action']['createImage']:
1825 file_dst
= req
['action']['createImage']['path']
1827 createImage
=req
['action']['createImage']
1828 img_name
= createImage
['source']['path']
1829 index
=img_name
.rfind('/')
1830 file_dst
= self
.get_notused_path(img_name
[:index
+1] + createImage
['name'] + '.qcow2')
1831 image_status
='ACTIVE'
1835 server_id
= req
['uuid']
1836 createImage
=req
['action']['createImage']
1837 file_orig
= self
.localinfo
['server_files'][server_id
] [ createImage
['source']['image_id'] ] ['source file']
1838 if 'path' in req
['action']['createImage']:
1839 file_dst
= req
['action']['createImage']['path']
1841 img_name
= createImage
['source']['path']
1842 index
=img_name
.rfind('/')
1843 file_dst
= self
.get_notused_filename(img_name
[:index
+1] + createImage
['name'] + '.qcow2')
1845 self
.copy_file(file_orig
, file_dst
)
1846 qemu_info
= self
.qemu_get_info(file_orig
)
1847 if 'backing file' in qemu_info
:
1848 for k
,v
in self
.localinfo
['files'].items():
1849 if v
==qemu_info
['backing file']:
1850 self
.qemu_change_backing(file_dst
, k
)
1852 image_status
='ACTIVE'
1854 except paramiko
.ssh_exception
.SSHException
as e
:
1855 image_status
='ERROR'
1856 error_text
= e
.args
[0]
1857 print self
.name
, "': create_image(",server_id
,") ssh Exception:", error_text
1858 if "SSH session not active" in error_text
and retry
==0:
1860 except Exception as e
:
1861 image_status
='ERROR'
1863 print self
.name
, "': create_image(",server_id
,") Exception:", error_text
1865 #TODO insert a last_error at database
1866 self
.db_lock
.acquire()
1867 self
.db
.update_rows('images', {'status':image_status
, 'progress': 100, 'path':file_dst
},
1868 {'uuid':req
['new_image']['uuid']}, log
=True)
1869 self
.db_lock
.release()
1871 def edit_iface(self
, port_id
, old_net
, new_net
):
1872 #This action imply remove and insert interface to put proper parameters
1877 self
.db_lock
.acquire()
1878 r
,c
= self
.db
.get_table(FROM
='ports as p join resources_port as rp on p.uuid=rp.port_id',
1879 WHERE
={'port_id': port_id
})
1880 self
.db_lock
.release()
1882 print self
.name
, ": edit_iface(",port_id
,") DDBB error:", c
1885 print self
.name
, ": edit_iface(",port_id
,") por not found"
1888 if port
["model"]!="VF":
1889 print self
.name
, ": edit_iface(",port_id
,") ERROR model must be VF"
1891 #create xml detach file
1894 xml
.append("<interface type='hostdev' managed='yes'>")
1895 xml
.append(" <mac address='" +port
['mac']+ "'/>")
1896 xml
.append(" <source>"+ self
.pci2xml(port
['pci'])+"\n </source>")
1897 xml
.append('</interface>')
1902 conn
= host_thread
.lvirt_module
.open("qemu+ssh://"+self
.user
+"@"+self
.host
+"/system")
1903 dom
= conn
.lookupByUUIDString(port
["instance_id"])
1906 print self
.name
, ": edit_iface detaching SRIOV interface", text
1907 dom
.detachDeviceFlags(text
, flags
=host_thread
.lvirt_module
.VIR_DOMAIN_AFFECT_LIVE
)
1909 xml
[-1] =" <vlan> <tag id='" + str(port
['vlan']) + "'/> </vlan>"
1911 xml
.append(self
.pci2xml(port
.get('vpci',None)) )
1912 xml
.append('</interface>')
1914 print self
.name
, ": edit_iface attaching SRIOV interface", text
1915 dom
.attachDeviceFlags(text
, flags
=host_thread
.lvirt_module
.VIR_DOMAIN_AFFECT_LIVE
)
1917 except host_thread
.lvirt_module
.libvirtError
as e
:
1918 text
= e
.get_error_message()
1919 print self
.name
, ": edit_iface(",port
["instance_id"],") libvirt exception:", text
1922 if conn
is not None: conn
.close()
1925 def create_server(server
, db
, db_lock
, only_of_ports
):
1932 # host_id = server.get('host_id', None)
1933 extended
= server
.get('extended', None)
1935 # print '----------------------'
1936 # print json.dumps(extended, indent=4)
1939 requirements
['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
1940 requirements
['ram'] = server
['flavor'].get('ram', 0)
1941 if requirements
['ram']== None:
1942 requirements
['ram'] = 0
1943 requirements
['vcpus'] = server
['flavor'].get('vcpus', 0)
1944 if requirements
['vcpus']== None:
1945 requirements
['vcpus'] = 0
1946 #If extended is not defined get requirements from flavor
1947 if extended
is None:
1948 #If extended is defined in flavor convert to dictionary and use it
1949 if 'extended' in server
['flavor'] and server
['flavor']['extended'] != None:
1950 json_acceptable_string
= server
['flavor']['extended'].replace("'", "\"")
1951 extended
= json
.loads(json_acceptable_string
)
1954 #print json.dumps(extended, indent=4)
1956 #For simplicity only one numa VM are supported in the initial implementation
1957 if extended
!= None:
1958 numas
= extended
.get('numas', [])
1960 return (-2, "Multi-NUMA VMs are not supported yet")
1962 # return (-1, "At least one numa must be specified")
1964 #a for loop is used in order to be ready to multi-NUMA VMs
1968 numa_req
['memory'] = numa
.get('memory', 0)
1970 numa_req
['proc_req_nb'] = numa
['cores'] #number of cores or threads to be reserved
1971 numa_req
['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
1972 numa_req
['proc_req_list'] = numa
.get('cores-id', None) #list of ids to be assigned to the cores or threads
1973 elif 'paired-threads' in numa
:
1974 numa_req
['proc_req_nb'] = numa
['paired-threads']
1975 numa_req
['proc_req_type'] = 'paired-threads'
1976 numa_req
['proc_req_list'] = numa
.get('paired-threads-id', None)
1977 elif 'threads' in numa
:
1978 numa_req
['proc_req_nb'] = numa
['threads']
1979 numa_req
['proc_req_type'] = 'threads'
1980 numa_req
['proc_req_list'] = numa
.get('threads-id', None)
1982 numa_req
['proc_req_nb'] = 0 # by default
1983 numa_req
['proc_req_type'] = 'threads'
1987 #Generate a list of sriov and another for physical interfaces
1988 interfaces
= numa
.get('interfaces', [])
1991 for iface
in interfaces
:
1992 iface
['bandwidth'] = int(iface
['bandwidth'])
1993 if iface
['dedicated'][:3]=='yes':
1994 port_list
.append(iface
)
1996 sriov_list
.append(iface
)
1998 #Save lists ordered from more restrictive to less bw requirements
1999 numa_req
['sriov_list'] = sorted(sriov_list
, key
=lambda k
: k
['bandwidth'], reverse
=True)
2000 numa_req
['port_list'] = sorted(port_list
, key
=lambda k
: k
['bandwidth'], reverse
=True)
2003 request
.append(numa_req
)
2005 # print "----------\n"+json.dumps(request[0], indent=4)
2006 # print '----------\n\n'
2008 #Search in db for an appropriate numa for each requested numa
2009 #at the moment multi-NUMA VMs are not supported
2011 requirements
['numa'].update(request
[0])
2012 if requirements
['numa']['memory']>0:
2013 requirements
['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2014 elif requirements
['ram']==0:
2015 return (-1, "Memory information not set neither at extended field not at ram")
2016 if requirements
['numa']['proc_req_nb']>0:
2017 requirements
['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2018 elif requirements
['vcpus']==0:
2019 return (-1, "Processor information not set neither at extended field not at vcpus")
2023 result
, content
= db
.get_numas(requirements
, server
.get('host_id', None), only_of_ports
)
2027 return (-1, content
)
2029 numa_id
= content
['numa_id']
2030 host_id
= content
['host_id']
2032 #obtain threads_id and calculate pinning
2035 if requirements
['numa']['proc_req_nb']>0:
2037 result
, content
= db
.get_table(FROM
='resources_core',
2038 SELECT
=('id','core_id','thread_id'),
2039 WHERE
={'numa_id':numa_id
,'instance_id': None, 'status':'ok'} )
2045 #convert rows to a dictionary indexed by core_id
2048 if not row
['core_id'] in cores_dict
:
2049 cores_dict
[row
['core_id']] = []
2050 cores_dict
[row
['core_id']].append([row
['thread_id'],row
['id']])
2052 #In case full cores are requested
2054 if requirements
['numa']['proc_req_type'] == 'cores':
2055 #Get/create the list of the vcpu_ids
2056 vcpu_id_list
= requirements
['numa']['proc_req_list']
2057 if vcpu_id_list
== None:
2058 vcpu_id_list
= range(0,int(requirements
['numa']['proc_req_nb']))
2060 for threads
in cores_dict
.itervalues():
2062 if len(threads
) != 2:
2065 #set pinning for the first thread
2066 cpu_pinning
.append( [ vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1] ] )
2068 #reserve so it is not used the second thread
2069 reserved_threads
.append(threads
[1][1])
2071 if len(vcpu_id_list
) == 0:
2074 #In case paired threads are requested
2075 elif requirements
['numa']['proc_req_type'] == 'paired-threads':
2077 #Get/create the list of the vcpu_ids
2078 if requirements
['numa']['proc_req_list'] != None:
2080 for pair
in requirements
['numa']['proc_req_list']:
2082 return -1, "Field paired-threads-id not properly specified"
2084 vcpu_id_list
.append(pair
[0])
2085 vcpu_id_list
.append(pair
[1])
2087 vcpu_id_list
= range(0,2*int(requirements
['numa']['proc_req_nb']))
2089 for threads
in cores_dict
.itervalues():
2091 if len(threads
) != 2:
2093 #set pinning for the first thread
2094 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1]])
2096 #set pinning for the second thread
2097 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[1][0], threads
[1][1]])
2099 if len(vcpu_id_list
) == 0:
2102 #In case normal threads are requested
2103 elif requirements
['numa']['proc_req_type'] == 'threads':
2104 #Get/create the list of the vcpu_ids
2105 vcpu_id_list
= requirements
['numa']['proc_req_list']
2106 if vcpu_id_list
== None:
2107 vcpu_id_list
= range(0,int(requirements
['numa']['proc_req_nb']))
2109 for threads_index
in sorted(cores_dict
, key
=lambda k
: len(cores_dict
[k
])):
2110 threads
= cores_dict
[threads_index
]
2111 #set pinning for the first thread
2112 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1]])
2114 #if exists, set pinning for the second thread
2115 if len(threads
) == 2 and len(vcpu_id_list
) != 0:
2116 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[1][0], threads
[1][1]])
2118 if len(vcpu_id_list
) == 0:
2121 #Get the source pci addresses for the selected numa
2122 used_sriov_ports
= []
2123 for port
in requirements
['numa']['sriov_list']:
2125 result
, content
= db
.get_table(FROM
='resources_port', SELECT
=('id', 'pci', 'mac'),WHERE
={'numa_id':numa_id
,'root_id': port
['port_id'], 'port_id': None, 'Mbps_used': 0} )
2131 if row
['id'] in used_sriov_ports
or row
['id']==port
['port_id']:
2133 port
['pci'] = row
['pci']
2134 if 'mac_address' not in port
:
2135 port
['mac_address'] = row
['mac']
2137 port
['port_id']=row
['id']
2138 port
['Mbps_used'] = port
['bandwidth']
2139 used_sriov_ports
.append(row
['id'])
2142 for port
in requirements
['numa']['port_list']:
2143 port
['Mbps_used'] = None
2144 if port
['dedicated'] != "yes:sriov":
2145 port
['mac_address'] = port
['mac']
2149 result
, content
= db
.get_table(FROM
='resources_port', SELECT
=('id', 'pci', 'mac', 'Mbps'),WHERE
={'numa_id':numa_id
,'root_id': port
['port_id'], 'port_id': None, 'Mbps_used': 0} )
2154 port
['Mbps_used'] = content
[0]['Mbps']
2156 if row
['id'] in used_sriov_ports
or row
['id']==port
['port_id']:
2158 port
['pci'] = row
['pci']
2159 if 'mac_address' not in port
:
2160 port
['mac_address'] = row
['mac'] # mac cannot be set to passthrough ports
2162 port
['port_id']=row
['id']
2163 used_sriov_ports
.append(row
['id'])
2166 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2167 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2169 server
['host_id'] = host_id
2172 #Generate dictionary for saving in db the instance resources
2174 resources
['bridged-ifaces'] = []
2177 numa_dict
['interfaces'] = []
2179 numa_dict
['interfaces'] += requirements
['numa']['port_list']
2180 numa_dict
['interfaces'] += requirements
['numa']['sriov_list']
2182 #Check bridge information
2183 unified_dataplane_iface
=[]
2184 unified_dataplane_iface
+= requirements
['numa']['port_list']
2185 unified_dataplane_iface
+= requirements
['numa']['sriov_list']
2187 for control_iface
in server
.get('networks', []):
2188 control_iface
['net_id']=control_iface
.pop('uuid')
2189 #Get the brifge name
2191 result
, content
= db
.get_table(FROM
='nets',
2192 SELECT
=('name', 'type', 'vlan', 'provider', 'enable_dhcp',
2193 'dhcp_first_ip', 'dhcp_last_ip', 'cidr'),
2194 WHERE
={'uuid': control_iface
['net_id']})
2199 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface
['net_id']
2202 if control_iface
.get("type", 'virtual') == 'virtual':
2203 if network
['type']!='bridge_data' and network
['type']!='bridge_man':
2204 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface
['net_id']
2205 resources
['bridged-ifaces'].append(control_iface
)
2206 if network
.get("provider") and network
["provider"][0:3] == "OVS":
2207 control_iface
["type"] = "instance:ovs"
2209 control_iface
["type"] = "instance:bridge"
2210 if network
.get("vlan"):
2211 control_iface
["vlan"] = network
["vlan"]
2213 if network
.get("enable_dhcp") == 'true':
2214 control_iface
["enable_dhcp"] = network
.get("enable_dhcp")
2215 control_iface
["dhcp_first_ip"] = network
["dhcp_first_ip"]
2216 control_iface
["dhcp_last_ip"] = network
["dhcp_last_ip"]
2217 control_iface
["cidr"] = network
["cidr"]
2219 if network
['type']!='data' and network
['type']!='ptp':
2220 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface
['net_id']
2221 #dataplane interface, look for it in the numa tree and asign this network
2223 for dataplane_iface
in numa_dict
['interfaces']:
2224 if dataplane_iface
['name'] == control_iface
.get("name"):
2225 if (dataplane_iface
['dedicated'] == "yes" and control_iface
["type"] != "PF") or \
2226 (dataplane_iface
['dedicated'] == "no" and control_iface
["type"] != "VF") or \
2227 (dataplane_iface
['dedicated'] == "yes:sriov" and control_iface
["type"] != "VFnotShared") :
2228 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2229 (control_iface
.get("name"), dataplane_iface
['dedicated'], control_iface
["type"])
2230 dataplane_iface
['uuid'] = control_iface
['net_id']
2231 if dataplane_iface
['dedicated'] == "no":
2232 dataplane_iface
['vlan'] = network
['vlan']
2233 if dataplane_iface
['dedicated'] != "yes" and control_iface
.get("mac_address"):
2234 dataplane_iface
['mac_address'] = control_iface
.get("mac_address")
2235 if control_iface
.get("vpci"):
2236 dataplane_iface
['vpci'] = control_iface
.get("vpci")
2240 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface
.get("name")
2242 resources
['host_id'] = host_id
2243 resources
['image_id'] = server
['image_id']
2244 resources
['flavor_id'] = server
['flavor_id']
2245 resources
['tenant_id'] = server
['tenant_id']
2246 resources
['ram'] = requirements
['ram']
2247 resources
['vcpus'] = requirements
['vcpus']
2248 resources
['status'] = 'CREATING'
2250 if 'description' in server
: resources
['description'] = server
['description']
2251 if 'name' in server
: resources
['name'] = server
['name']
2253 resources
['extended'] = {} #optional
2254 resources
['extended']['numas'] = []
2255 numa_dict
['numa_id'] = numa_id
2256 numa_dict
['memory'] = requirements
['numa']['memory']
2257 numa_dict
['cores'] = []
2259 for core
in cpu_pinning
:
2260 numa_dict
['cores'].append({'id': core
[2], 'vthread': core
[0], 'paired': paired
})
2261 for core
in reserved_threads
:
2262 numa_dict
['cores'].append({'id': core
})
2263 resources
['extended']['numas'].append(numa_dict
)
2264 if extended
!=None and 'devices' in extended
: #TODO allow extra devices without numa
2265 resources
['extended']['devices'] = extended
['devices']
2268 print '===================================={'
2269 print json
.dumps(resources
, indent
=4)
2270 print '====================================}'