1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openvim
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
28 __author__
= "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__
= "$10-jul-2014 12:07:15$"
37 from jsonschema
import validate
as js_v
, exceptions
as js_e
40 from vim_schema
import localinfo_schema
, hostinfo_schema
44 #TODO: insert a logging system
46 # from logging import Logger
47 # import auxiliary_functions as af
49 # TODO: insert a logging system
52 class host_thread(threading
.Thread
):
55 def __init__(self
, name
, host
, user
, db
, db_lock
, test
, image_path
, host_id
, version
, develop_mode
,
56 develop_bridge_iface
):
61 'host','user': host ip or name to manage and user
62 'db', 'db_lock': database class and lock to use it in exclusion
64 threading
.Thread
.__init
__(self
)
69 self
.db_lock
= db_lock
72 if not test
and not host_thread
.lvirt_module
:
74 module_info
= imp
.find_module("libvirt")
75 host_thread
.lvirt_module
= imp
.load_module("libvirt", *module_info
)
76 except (IOError, ImportError) as e
:
77 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e
))
80 self
.develop_mode
= develop_mode
81 self
.develop_bridge_iface
= develop_bridge_iface
82 self
.image_path
= image_path
83 self
.host_id
= host_id
84 self
.version
= version
89 self
.server_status
= {} #dictionary with pairs server_uuid:server_status
90 self
.pending_terminate_server
=[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
91 self
.next_update_server_status
= 0 #time when must be check servers status
95 self
.queueLock
= threading
.Lock()
96 self
.taskQueue
= Queue
.Queue(2000)
99 def ssh_connect(self
):
102 self
.ssh_conn
= paramiko
.SSHClient()
103 self
.ssh_conn
.set_missing_host_key_policy(paramiko
.AutoAddPolicy())
104 self
.ssh_conn
.load_system_host_keys()
105 self
.ssh_conn
.connect(self
.host
, username
=self
.user
, timeout
=10) #, None)
106 except paramiko
.ssh_exception
.SSHException
as e
:
108 print self
.name
, ": ssh_connect ssh Exception:", text
110 def load_localinfo(self
):
116 command
= 'mkdir -p ' + self
.image_path
117 #print self.name, ': command:', command
118 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
119 content
= stderr
.read()
121 print self
.name
, ': command:', command
, "stderr:", content
123 command
= 'cat ' + self
.image_path
+ '/.openvim.yaml'
124 #print self.name, ': command:', command
125 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
126 content
= stdout
.read()
127 if len(content
) == 0:
128 print self
.name
, ': command:', command
, "stderr:", stderr
.read()
129 raise paramiko
.ssh_exception
.SSHException("Error empty file ")
130 self
.localinfo
= yaml
.load(content
)
131 js_v(self
.localinfo
, localinfo_schema
)
132 self
.localinfo_dirty
=False
133 if 'server_files' not in self
.localinfo
:
134 self
.localinfo
['server_files'] = {}
135 print self
.name
, ': localinfo load from host'
138 except paramiko
.ssh_exception
.SSHException
as e
:
140 print self
.name
, ": load_localinfo ssh Exception:", text
141 except host_thread
.lvirt_module
.libvirtError
as e
:
142 text
= e
.get_error_message()
143 print self
.name
, ": load_localinfo libvirt Exception:", text
144 except yaml
.YAMLError
as exc
:
146 if hasattr(exc
, 'problem_mark'):
147 mark
= exc
.problem_mark
148 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
149 print self
.name
, ": load_localinfo yaml format Exception", text
150 except js_e
.ValidationError
as e
:
152 if len(e
.path
)>0: text
=" at '" + ":".join(map(str, e
.path
))+"'"
153 print self
.name
, ": load_localinfo format Exception:", text
, e
.message
154 except Exception as e
:
156 print self
.name
, ": load_localinfo Exception:", text
158 #not loaded, insert a default data and force saving by activating dirty flag
159 self
.localinfo
= {'files':{}, 'server_files':{} }
160 #self.localinfo_dirty=True
161 self
.localinfo_dirty
=False
163 def load_hostinfo(self
):
171 command
= 'cat ' + self
.image_path
+ '/hostinfo.yaml'
172 #print self.name, ': command:', command
173 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
174 content
= stdout
.read()
175 if len(content
) == 0:
176 print self
.name
, ': command:', command
, "stderr:", stderr
.read()
177 raise paramiko
.ssh_exception
.SSHException("Error empty file ")
178 self
.hostinfo
= yaml
.load(content
)
179 js_v(self
.hostinfo
, hostinfo_schema
)
180 print self
.name
, ': hostlinfo load from host', self
.hostinfo
183 except paramiko
.ssh_exception
.SSHException
as e
:
185 print self
.name
, ": load_hostinfo ssh Exception:", text
186 except host_thread
.lvirt_module
.libvirtError
as e
:
187 text
= e
.get_error_message()
188 print self
.name
, ": load_hostinfo libvirt Exception:", text
189 except yaml
.YAMLError
as exc
:
191 if hasattr(exc
, 'problem_mark'):
192 mark
= exc
.problem_mark
193 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
194 print self
.name
, ": load_hostinfo yaml format Exception", text
195 except js_e
.ValidationError
as e
:
197 if len(e
.path
)>0: text
=" at '" + ":".join(map(str, e
.path
))+"'"
198 print self
.name
, ": load_hostinfo format Exception:", text
, e
.message
199 except Exception as e
:
201 print self
.name
, ": load_hostinfo Exception:", text
203 #not loaded, insert a default data
206 def save_localinfo(self
, tries
=3):
208 self
.localinfo_dirty
= False
215 command
= 'cat > ' + self
.image_path
+ '/.openvim.yaml'
216 print self
.name
, ': command:', command
217 (stdin
, _
, _
) = self
.ssh_conn
.exec_command(command
)
218 yaml
.safe_dump(self
.localinfo
, stdin
, explicit_start
=True, indent
=4, default_flow_style
=False, tags
=False, encoding
='utf-8', allow_unicode
=True)
219 self
.localinfo_dirty
= False
222 except paramiko
.ssh_exception
.SSHException
as e
:
224 print self
.name
, ": save_localinfo ssh Exception:", text
225 if "SSH session not active" in text
:
227 except host_thread
.lvirt_module
.libvirtError
as e
:
228 text
= e
.get_error_message()
229 print self
.name
, ": save_localinfo libvirt Exception:", text
230 except yaml
.YAMLError
as exc
:
232 if hasattr(exc
, 'problem_mark'):
233 mark
= exc
.problem_mark
234 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
235 print self
.name
, ": save_localinfo yaml format Exception", text
236 except Exception as e
:
238 print self
.name
, ": save_localinfo Exception:", text
240 def load_servers_from_db(self
):
241 self
.db_lock
.acquire()
242 r
,c
= self
.db
.get_table(SELECT
=('uuid','status', 'image_id'), FROM
='instances', WHERE
={'host_id': self
.host_id
})
243 self
.db_lock
.release()
245 self
.server_status
= {}
247 print self
.name
, ": Error getting data from database:", c
250 self
.server_status
[ server
['uuid'] ] = server
['status']
252 #convert from old version to new one
253 if 'inc_files' in self
.localinfo
and server
['uuid'] in self
.localinfo
['inc_files']:
254 server_files_dict
= {'source file': self
.localinfo
['inc_files'][ server
['uuid'] ] [0], 'file format':'raw' }
255 if server_files_dict
['source file'][-5:] == 'qcow2':
256 server_files_dict
['file format'] = 'qcow2'
258 self
.localinfo
['server_files'][ server
['uuid'] ] = { server
['image_id'] : server_files_dict
}
259 if 'inc_files' in self
.localinfo
:
260 del self
.localinfo
['inc_files']
261 self
.localinfo_dirty
= True
263 def delete_unused_files(self
):
264 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
265 Deletes unused entries at self.loacalinfo and the corresponding local files.
266 The only reason for this mismatch is the manual deletion of instances (VM) at database
270 for uuid
,images
in self
.localinfo
['server_files'].items():
271 if uuid
not in self
.server_status
:
272 for localfile
in images
.values():
274 print self
.name
, ": deleting file '%s' of unused server '%s'" %(localfile
['source file'], uuid
)
275 self
.delete_file(localfile
['source file'])
276 except paramiko
.ssh_exception
.SSHException
as e
:
277 print self
.name
, ": Exception deleting file '%s': %s" %(localfile
['source file'], str(e
))
278 del self
.localinfo
['server_files'][uuid
]
279 self
.localinfo_dirty
= True
281 def insert_task(self
, task
, *aditional
):
283 self
.queueLock
.acquire()
284 task
= self
.taskQueue
.put( (task
,) + aditional
, timeout
=5)
285 self
.queueLock
.release()
288 return -1, "timeout inserting a task over host " + self
.name
292 self
.load_localinfo()
294 self
.load_servers_from_db()
295 self
.delete_unused_files()
297 self
.queueLock
.acquire()
298 if not self
.taskQueue
.empty():
299 task
= self
.taskQueue
.get()
302 self
.queueLock
.release()
306 if self
.localinfo_dirty
:
307 self
.save_localinfo()
308 elif self
.next_update_server_status
< now
:
309 self
.update_servers_status()
310 self
.next_update_server_status
= now
+ 5
311 elif len(self
.pending_terminate_server
)>0 and self
.pending_terminate_server
[0][0]<now
:
312 self
.server_forceoff()
317 if task
[0] == 'instance':
318 print self
.name
, ": processing task instance", task
[1]['action']
322 r
=self
.action_on_server(task
[1], retry
==2)
325 elif task
[0] == 'image':
327 elif task
[0] == 'exit':
328 print self
.name
, ": processing task exit"
331 elif task
[0] == 'reload':
332 print self
.name
, ": processing task reload terminating and relaunching"
335 elif task
[0] == 'edit-iface':
336 print self
.name
, ": processing task edit-iface port=%s, old_net=%s, new_net=%s" % (task
[1], task
[2], task
[3])
337 self
.edit_iface(task
[1], task
[2], task
[3])
338 elif task
[0] == 'restore-iface':
339 print self
.name
, ": processing task restore-iface %s mac=%s" % (task
[1], task
[2])
340 self
.restore_iface(task
[1], task
[2])
341 elif task
[0] == 'new-ovsbridge':
342 print self
.name
, ": Creating compute OVS bridge"
343 self
.create_ovs_bridge()
344 elif task
[0] == 'new-vxlan':
345 print self
.name
, ": Creating vxlan tunnel=" + task
[1] + ", remote ip=" + task
[2]
346 self
.create_ovs_vxlan_tunnel(task
[1], task
[2])
347 elif task
[0] == 'del-ovsbridge':
348 print self
.name
, ": Deleting OVS bridge"
349 self
.delete_ovs_bridge()
350 elif task
[0] == 'del-vxlan':
351 print self
.name
, ": Deleting vxlan " + task
[1] + " tunnel"
352 self
.delete_ovs_vxlan_tunnel(task
[1])
353 elif task
[0] == 'create-ovs-bridge-port':
354 print self
.name
, ": Adding port ovim-" + task
[1] + " to OVS bridge"
355 self
.create_ovs_bridge_port(task
[1])
356 elif task
[0] == 'del-ovs-port':
357 print self
.name
, ": Delete bridge attached to ovs port vlan {} net {}".format(task
[1], task
[2])
358 self
.delete_bridge_port_attached_to_ovs(task
[1], task
[2])
360 print self
.name
, ": unknown task", task
362 def server_forceoff(self
, wait_until_finished
=False):
363 while len(self
.pending_terminate_server
)>0:
365 if self
.pending_terminate_server
[0][0]>now
:
366 if wait_until_finished
:
371 req
={'uuid':self
.pending_terminate_server
[0][1],
372 'action':{'terminate':'force'},
375 self
.action_on_server(req
)
376 self
.pending_terminate_server
.pop(0)
380 self
.server_forceoff(True)
381 if self
.localinfo_dirty
:
382 self
.save_localinfo()
384 self
.ssh_conn
.close()
385 except Exception as e
:
387 print self
.name
, ": terminate Exception:", text
388 print self
.name
, ": exit from host_thread"
390 def get_local_iface_name(self
, generic_name
):
391 if self
.hostinfo
!= None and "iface_names" in self
.hostinfo
and generic_name
in self
.hostinfo
["iface_names"]:
392 return self
.hostinfo
["iface_names"][generic_name
]
395 def create_xml_server(self
, server
, dev_list
, server_metadata
={}):
396 """Function that implements the generation of the VM XML definition.
397 Additional devices are in dev_list list
398 The main disk is upon dev_list[0]"""
400 #get if operating system is Windows
402 os_type
= server_metadata
.get('os_type', None)
403 if os_type
== None and 'metadata' in dev_list
[0]:
404 os_type
= dev_list
[0]['metadata'].get('os_type', None)
405 if os_type
!= None and os_type
.lower() == "windows":
407 #get type of hard disk bus
408 bus_ide
= True if windows_os
else False
409 bus
= server_metadata
.get('bus', None)
410 if bus
== None and 'metadata' in dev_list
[0]:
411 bus
= dev_list
[0]['metadata'].get('bus', None)
413 bus_ide
= True if bus
=='ide' else False
417 text
= "<domain type='kvm'>"
419 topo
= server_metadata
.get('topology', None)
420 if topo
== None and 'metadata' in dev_list
[0]:
421 topo
= dev_list
[0]['metadata'].get('topology', None)
423 name
= server
.get('name','') + "_" + server
['uuid']
424 name
= name
[:58] #qemu impose a length limit of 59 chars or not start. Using 58
425 text
+= self
.inc_tab() + "<name>" + name
+ "</name>"
427 text
+= self
.tab() + "<uuid>" + server
['uuid'] + "</uuid>"
430 if 'extended' in server
and server
['extended']!=None and 'numas' in server
['extended']:
431 numa
= server
['extended']['numas'][0]
434 memory
= int(numa
.get('memory',0))*1024*1024 #in KiB
436 memory
= int(server
['ram'])*1024;
438 if not self
.develop_mode
:
441 return -1, 'No memory assigned to instance'
443 text
+= self
.tab() + "<memory unit='KiB'>" +memory
+"</memory>"
444 text
+= self
.tab() + "<currentMemory unit='KiB'>" +memory
+ "</currentMemory>"
446 text
+= self
.tab()+'<memoryBacking>'+ \
447 self
.inc_tab() + '<hugepages/>'+ \
448 self
.dec_tab()+ '</memoryBacking>'
451 use_cpu_pinning
=False
452 vcpus
= int(server
.get("vcpus",0))
454 if 'cores-source' in numa
:
456 for index
in range(0, len(numa
['cores-source'])):
457 cpu_pinning
.append( [ numa
['cores-id'][index
], numa
['cores-source'][index
] ] )
459 if 'threads-source' in numa
:
461 for index
in range(0, len(numa
['threads-source'])):
462 cpu_pinning
.append( [ numa
['threads-id'][index
], numa
['threads-source'][index
] ] )
464 if 'paired-threads-source' in numa
:
466 for index
in range(0, len(numa
['paired-threads-source'])):
467 cpu_pinning
.append( [numa
['paired-threads-id'][index
][0], numa
['paired-threads-source'][index
][0] ] )
468 cpu_pinning
.append( [numa
['paired-threads-id'][index
][1], numa
['paired-threads-source'][index
][1] ] )
471 if use_cpu_pinning
and not self
.develop_mode
:
472 text
+= self
.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning
)) +"</vcpu>" + \
473 self
.tab()+'<cputune>'
475 for i
in range(0, len(cpu_pinning
)):
476 text
+= self
.tab() + "<vcpupin vcpu='" +str(cpu_pinning
[i
][0])+ "' cpuset='" +str(cpu_pinning
[i
][1]) +"'/>"
477 text
+= self
.dec_tab()+'</cputune>'+ \
478 self
.tab() + '<numatune>' +\
479 self
.inc_tab() + "<memory mode='strict' nodeset='" +str(numa
['source'])+ "'/>" +\
480 self
.dec_tab() + '</numatune>'
483 return -1, "Instance without number of cpus"
484 text
+= self
.tab()+"<vcpu>" + str(vcpus
) + "</vcpu>"
489 if dev
['type']=='cdrom' :
492 text
+= self
.tab()+ '<os>' + \
493 self
.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
495 text
+= self
.tab() + "<boot dev='cdrom'/>"
496 text
+= self
.tab() + "<boot dev='hd'/>" + \
497 self
.dec_tab()+'</os>'
499 text
+= self
.tab()+'<features>'+\
500 self
.inc_tab()+'<acpi/>' +\
501 self
.tab()+'<apic/>' +\
502 self
.tab()+'<pae/>'+ \
503 self
.dec_tab() +'</features>'
504 if windows_os
or topo
=="oneSocket":
505 text
+= self
.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>"% vcpus
507 text
+= self
.tab() + "<cpu mode='host-model'></cpu>"
508 text
+= self
.tab() + "<clock offset='utc'/>" +\
509 self
.tab() + "<on_poweroff>preserve</on_poweroff>" + \
510 self
.tab() + "<on_reboot>restart</on_reboot>" + \
511 self
.tab() + "<on_crash>restart</on_crash>"
512 text
+= self
.tab() + "<devices>" + \
513 self
.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
514 self
.tab() + "<serial type='pty'>" +\
515 self
.inc_tab() + "<target port='0'/>" + \
516 self
.dec_tab() + "</serial>" +\
517 self
.tab() + "<console type='pty'>" + \
518 self
.inc_tab()+ "<target type='serial' port='0'/>" + \
519 self
.dec_tab()+'</console>'
521 text
+= self
.tab() + "<controller type='usb' index='0'/>" + \
522 self
.tab() + "<controller type='ide' index='0'/>" + \
523 self
.tab() + "<input type='mouse' bus='ps2'/>" + \
524 self
.tab() + "<sound model='ich6'/>" + \
525 self
.tab() + "<video>" + \
526 self
.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
527 self
.dec_tab() + "</video>" + \
528 self
.tab() + "<memballoon model='virtio'/>" + \
529 self
.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
531 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
532 #> self.dec_tab()+'</hostdev>\n' +\
533 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
535 text
+= self
.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
537 #If image contains 'GRAPH' include graphics
538 #if 'GRAPH' in image:
539 text
+= self
.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
540 self
.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
541 self
.dec_tab() + "</graphics>"
545 bus_ide_dev
= bus_ide
546 if dev
['type']=='cdrom' or dev
['type']=='disk':
547 if dev
['type']=='cdrom':
549 text
+= self
.tab() + "<disk type='file' device='"+dev
['type']+"'>"
550 if 'file format' in dev
:
551 text
+= self
.inc_tab() + "<driver name='qemu' type='" +dev
['file format']+ "' cache='writethrough'/>"
552 if 'source file' in dev
:
553 text
+= self
.tab() + "<source file='" +dev
['source file']+ "'/>"
554 #elif v['type'] == 'block':
555 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
557 # return -1, 'Unknown disk type ' + v['type']
558 vpci
= dev
.get('vpci',None)
560 vpci
= dev
['metadata'].get('vpci',None)
561 text
+= self
.pci2xml(vpci
)
564 text
+= self
.tab() + "<target dev='hd" +vd_index
+ "' bus='ide'/>" #TODO allows several type of disks
566 text
+= self
.tab() + "<target dev='vd" +vd_index
+ "' bus='virtio'/>"
567 text
+= self
.dec_tab() + '</disk>'
568 vd_index
= chr(ord(vd_index
)+1)
569 elif dev
['type']=='xml':
570 dev_text
= dev
['xml']
572 dev_text
= dev_text
.replace('__vpci__', dev
['vpci'])
573 if 'source file' in dev
:
574 dev_text
= dev_text
.replace('__file__', dev
['source file'])
575 if 'file format' in dev
:
576 dev_text
= dev_text
.replace('__format__', dev
['source file'])
577 if '__dev__' in dev_text
:
578 dev_text
= dev_text
.replace('__dev__', vd_index
)
579 vd_index
= chr(ord(vd_index
)+1)
582 return -1, 'Unknown device type ' + dev
['type']
585 bridge_interfaces
= server
.get('networks', [])
586 for v
in bridge_interfaces
:
588 self
.db_lock
.acquire()
589 result
, content
= self
.db
.get_table(FROM
='nets', SELECT
=('provider',),WHERE
={'uuid':v
['net_id']} )
590 self
.db_lock
.release()
592 print "create_xml_server ERROR getting nets",result
, content
594 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
595 #I know it is not secure
596 #for v in sorted(desc['network interfaces'].itervalues()):
597 model
= v
.get("model", None)
598 if content
[0]['provider']=='default':
599 text
+= self
.tab() + "<interface type='network'>" + \
600 self
.inc_tab() + "<source network='" +content
[0]['provider']+ "'/>"
601 elif content
[0]['provider'][0:7]=='macvtap':
602 text
+= self
.tab()+"<interface type='direct'>" + \
603 self
.inc_tab() + "<source dev='" + self
.get_local_iface_name(content
[0]['provider'][8:]) + "' mode='bridge'/>" + \
604 self
.tab() + "<target dev='macvtap0'/>"
606 text
+= self
.tab() + "<alias name='net" + str(net_nb
) + "'/>"
609 elif content
[0]['provider'][0:6]=='bridge':
610 text
+= self
.tab() + "<interface type='bridge'>" + \
611 self
.inc_tab()+"<source bridge='" +self
.get_local_iface_name(content
[0]['provider'][7:])+ "'/>"
613 text
+= self
.tab() + "<target dev='vnet" + str(net_nb
)+ "'/>" +\
614 self
.tab() + "<alias name='net" + str(net_nb
)+ "'/>"
617 elif content
[0]['provider'][0:3] == "OVS":
618 vlan
= content
[0]['provider'].replace('OVS:', '')
619 text
+= self
.tab() + "<interface type='bridge'>" + \
620 self
.inc_tab() + "<source bridge='ovim-" + vlan
+ "'/>"
622 return -1, 'Unknown Bridge net provider ' + content
[0]['provider']
624 text
+= self
.tab() + "<model type='" +model
+ "'/>"
625 if v
.get('mac_address', None) != None:
626 text
+= self
.tab() +"<mac address='" +v
['mac_address']+ "'/>"
627 text
+= self
.pci2xml(v
.get('vpci',None))
628 text
+= self
.dec_tab()+'</interface>'
632 interfaces
= numa
.get('interfaces', [])
636 if self
.develop_mode
: #map these interfaces to bridges
637 text
+= self
.tab() + "<interface type='bridge'>" + \
638 self
.inc_tab()+"<source bridge='" +self
.develop_bridge_iface
+ "'/>"
640 text
+= self
.tab() + "<target dev='vnet" + str(net_nb
)+ "'/>" +\
641 self
.tab() + "<alias name='net" + str(net_nb
)+ "'/>"
643 text
+= self
.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
644 if v
.get('mac_address', None) != None:
645 text
+= self
.tab() +"<mac address='" +v
['mac_address']+ "'/>"
646 text
+= self
.pci2xml(v
.get('vpci',None))
647 text
+= self
.dec_tab()+'</interface>'
650 if v
['dedicated'] == 'yes': #passthrought
651 text
+= self
.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
652 self
.inc_tab() + "<source>"
654 text
+= self
.pci2xml(v
['source'])
655 text
+= self
.dec_tab()+'</source>'
656 text
+= self
.pci2xml(v
.get('vpci',None))
658 text
+= self
.tab() + "<alias name='hostdev" + str(net_nb
) + "'/>"
659 text
+= self
.dec_tab()+'</hostdev>'
661 else: #sriov_interfaces
662 #skip not connected interfaces
663 if v
.get("net_id") == None:
665 text
+= self
.tab() + "<interface type='hostdev' managed='yes'>"
667 if v
.get('mac_address', None) != None:
668 text
+= self
.tab() + "<mac address='" +v
['mac_address']+ "'/>"
669 text
+= self
.tab()+'<source>'
671 text
+= self
.pci2xml(v
['source'])
672 text
+= self
.dec_tab()+'</source>'
673 if v
.get('vlan',None) != None:
674 text
+= self
.tab() + "<vlan> <tag id='" + str(v
['vlan']) + "'/> </vlan>"
675 text
+= self
.pci2xml(v
.get('vpci',None))
677 text
+= self
.tab() + "<alias name='hostdev" + str(net_nb
) + "'/>"
678 text
+= self
.dec_tab()+'</interface>'
681 text
+= self
.dec_tab()+'</devices>'+\
682 self
.dec_tab()+'</domain>'
685 def pci2xml(self
, pci
):
686 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
687 alows an empty pci text'''
690 first_part
= pci
.split(':')
691 second_part
= first_part
[2].split('.')
692 return self
.tab() + "<address type='pci' domain='0x" + first_part
[0] + \
693 "' bus='0x" + first_part
[1] + "' slot='0x" + second_part
[0] + \
694 "' function='0x" + second_part
[1] + "'/>"
697 """Return indentation according to xml_level"""
698 return "\n" + (' '*self
.xml_level
)
701 """Increment and return indentation according to xml_level"""
706 """Decrement and return indentation according to xml_level"""
710 def create_ovs_bridge(self
):
712 Create a bridge in compute OVS to allocate VMs
713 :return: True if success
717 command
= 'sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true'
718 print self
.name
, ': command:', command
719 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
720 content
= stdout
.read()
721 if len(content
) == 0:
726 def delete_port_to_ovs_bridge(self
, vlan
, net_uuid
):
728 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
729 :param vlan: vlan port id
730 :param net_uuid: network id
737 port_name
= 'ovim-' + vlan
738 command
= 'sudo ovs-vsctl del-port br-int ' + port_name
739 print self
.name
, ': command:', command
740 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
741 content
= stdout
.read()
742 if len(content
) == 0:
747 def delete_dhcp_server(self
, vlan
, net_uuid
, dhcp_path
):
749 Delete dhcp server process lining in namespace
750 :param vlan: segmentation id
751 :param net_uuid: network uuid
752 :param dhcp_path: conf fiel path that live in namespace side
757 if not self
.is_dhcp_port_free(vlan
, net_uuid
):
760 net_namespace
= 'ovim-' + vlan
761 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
762 pid_file
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
764 command
= 'sudo ip netns exec ' + net_namespace
+ ' cat ' + pid_file
765 print self
.name
, ': command:', command
766 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
767 content
= stdout
.read()
769 command
= 'sudo ip netns exec ' + net_namespace
+ ' kill -9 ' + content
770 print self
.name
, ': command:', command
771 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
772 content
= stdout
.read()
774 # if len(content) == 0:
779 def is_dhcp_port_free(self
, host_id
, net_uuid
):
781 Check if any port attached to the a net in a vxlan mesh across computes nodes
782 :param host_id: host id
783 :param net_uuid: network id
784 :return: True if is not free
786 self
.db_lock
.acquire()
787 result
, content
= self
.db
.get_table(
789 WHERE
={'p.type': 'instance:ovs', 'p.net_id': net_uuid
}
791 self
.db_lock
.release()
798 def is_port_free(self
, host_id
, net_uuid
):
800 Check if there not ovs ports of a network in a compute host.
801 :param host_id: host id
802 :param net_uuid: network id
803 :return: True if is not free
806 self
.db_lock
.acquire()
807 result
, content
= self
.db
.get_table(
808 FROM
='ports as p join instances as i on p.instance_id=i.uuid',
809 WHERE
={"i.host_id": self
.host_id
, 'p.type': 'instance:ovs', 'p.net_id': net_uuid
}
811 self
.db_lock
.release()
818 def add_port_to_ovs_bridge(self
, vlan
):
820 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
821 :param vlan: vlan port id
822 :return: True if success
828 port_name
= 'ovim-' + vlan
829 command
= 'sudo ovs-vsctl add-port br-int ' + port_name
+ ' tag=' + vlan
830 print self
.name
, ': command:', command
831 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
832 content
= stdout
.read()
833 if len(content
) == 0:
838 def delete_dhcp_port(self
, vlan
, net_uuid
):
840 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
841 :param vlan: segmentation id
842 :param net_uuid: network id
843 :return: True if success
849 if not self
.is_dhcp_port_free(vlan
, net_uuid
):
851 self
.delete_dhcp_interfaces(vlan
)
854 def delete_bridge_port_attached_to_ovs(self
, vlan
, net_uuid
):
856 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
859 :return: True if success
864 if not self
.is_port_free(vlan
, net_uuid
):
866 self
.delete_port_to_ovs_bridge(vlan
, net_uuid
)
867 self
.delete_linux_bridge(vlan
)
870 def delete_linux_bridge(self
, vlan
):
872 Delete a linux bridge in a scpecific compute.
873 :param vlan: vlan port id
874 :return: True if success
880 port_name
= 'ovim-' + vlan
881 command
= 'sudo ip link set dev veth0-' + vlan
+ ' down'
882 print self
.name
, ': command:', command
883 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
884 content
= stdout
.read()
886 # if len(content) != 0:
889 command
= 'sudo ifconfig ' + port_name
+ ' down && sudo brctl delbr ' + port_name
890 print self
.name
, ': command:', command
891 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
892 content
= stdout
.read()
893 if len(content
) == 0:
898 def create_ovs_bridge_port(self
, vlan
):
900 Generate a linux bridge and attache the port to a OVS bridge
901 :param vlan: vlan port id
906 self
.create_linux_bridge(vlan
)
907 self
.add_port_to_ovs_bridge(vlan
)
909 def create_linux_bridge(self
, vlan
):
911 Create a linux bridge with STP active
912 :param vlan: netowrk vlan id
919 port_name
= 'ovim-' + vlan
920 command
= 'sudo brctl show | grep ' + port_name
921 print self
.name
, ': command:', command
922 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
923 content
= stdout
.read()
925 # if exist nothing to create
926 # if len(content) == 0:
929 command
= 'sudo brctl addbr ' + port_name
930 print self
.name
, ': command:', command
931 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
932 content
= stdout
.read()
934 # if len(content) == 0:
939 command
= 'sudo brctl stp ' + port_name
+ ' on'
940 print self
.name
, ': command:', command
941 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
942 content
= stdout
.read()
944 # if len(content) == 0:
948 command
= 'sudo ip link set dev ' + port_name
+ ' up'
949 print self
.name
, ': command:', command
950 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
951 content
= stdout
.read()
953 if len(content
) == 0:
958 def set_mac_dhcp_server(self
, ip
, mac
, vlan
, netmask
, dhcp_path
):
960 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
961 :param ip: IP address asigned to a VM
962 :param mac: VM vnic mac to be macthed with the IP received
963 :param vlan: Segmentation id
964 :param netmask: netmask value
965 :param path: dhcp conf file path that live in namespace side
966 :return: True if success
972 net_namespace
= 'ovim-' + vlan
973 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
974 dhcp_hostsdir
= os
.path
.join(dhcp_path
, net_namespace
)
979 ip_data
= mac
.upper() + ',' + ip
981 command
= 'sudo ip netns exec ' + net_namespace
+ ' touch ' + dhcp_hostsdir
982 print self
.name
, ': command:', command
983 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
984 content
= stdout
.read()
986 command
= 'sudo ip netns exec ' + net_namespace
+ ' sudo bash -ec "echo ' + ip_data
+ ' >> ' + dhcp_hostsdir
+ '"'
988 print self
.name
, ': command:', command
989 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
990 content
= stdout
.read()
992 if len(content
) == 0:
997 def delete_mac_dhcp_server(self
, ip
, mac
, vlan
, dhcp_path
):
999 Delete into dhcp conf file the ip assigned to a specific MAC address
1001 :param ip: IP address asigned to a VM
1002 :param mac: VM vnic mac to be macthed with the IP received
1003 :param vlan: Segmentation id
1004 :param dhcp_path: dhcp conf file path that live in namespace side
1011 net_namespace
= 'ovim-' + vlan
1012 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
1013 dhcp_hostsdir
= os
.path
.join(dhcp_path
, net_namespace
)
1018 ip_data
= mac
.upper() + ',' + ip
1020 command
= 'sudo ip netns exec ' + net_namespace
+ ' sudo sed -i \'/' + ip_data
+ '/d\' ' + dhcp_hostsdir
1021 print self
.name
, ': command:', command
1022 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1023 content
= stdout
.read()
1025 if len(content
) == 0:
1030 def launch_dhcp_server(self
, vlan
, ip_range
, netmask
, dhcp_path
, gateway
):
1032 Generate a linux bridge and attache the port to a OVS bridge
1034 :param vlan: Segmentation id
1035 :param ip_range: IP dhcp range
1036 :param netmask: network netmask
1037 :param dhcp_path: dhcp conf file path that live in namespace side
1038 :param gateway: Gateway address for dhcp net
1039 :return: True if success
1045 interface
= 'tap-' + vlan
1046 net_namespace
= 'ovim-' + vlan
1047 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
1048 leases_path
= os
.path
.join(dhcp_path
, "dnsmasq.leases")
1049 pid_file
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
1051 dhcp_range
= ip_range
[0] + ',' + ip_range
[1] + ',' + netmask
1053 command
= 'sudo ip netns exec ' + net_namespace
+ ' mkdir -p ' + dhcp_path
1054 print self
.name
, ': command:', command
1055 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1056 content
= stdout
.read()
1058 pid_path
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
1059 command
= 'sudo ip netns exec ' + net_namespace
+ ' cat ' + pid_path
1060 print self
.name
, ': command:', command
1061 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1062 content
= stdout
.read()
1063 # check if pid is runing
1064 pid_status_path
= content
1066 command
= "ps aux | awk '{print $2 }' | grep " + pid_status_path
1067 print self
.name
, ': command:', command
1068 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1069 content
= stdout
.read()
1071 command
= 'sudo ip netns exec ' + net_namespace
+ ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1072 '--interface=' + interface
+ ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path
+ \
1073 ' --dhcp-range ' + dhcp_range
+ ' --pid-file=' + pid_file
+ ' --dhcp-leasefile=' + leases_path
+ \
1074 ' --listen-address ' + gateway
1076 print self
.name
, ': command:', command
1077 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1078 content
= stdout
.readline()
1080 if len(content
) == 0:
1085 def delete_dhcp_interfaces(self
, vlan
):
1087 Create a linux bridge with STP active
1088 :param vlan: netowrk vlan id
1095 net_namespace
= 'ovim-' + vlan
1096 command
= 'sudo ovs-vsctl del-port br-int ovs-tap-' + vlan
1097 print self
.name
, ': command:', command
1098 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1099 content
= stdout
.read()
1101 command
= 'sudo ip netns exec ' + net_namespace
+ ' ip link set dev tap-' + vlan
+ ' down'
1102 print self
.name
, ': command:', command
1103 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1104 content
= stdout
.read()
1106 command
= 'sudo ip link set dev ovs-tap-' + vlan
+ ' down'
1107 print self
.name
, ': command:', command
1108 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1109 content
= stdout
.read()
1111 def create_dhcp_interfaces(self
, vlan
, ip
, netmask
):
1113 Create a linux bridge with STP active
1114 :param vlan: segmentation id
1115 :param ip: Ip included in the dhcp range for the tap interface living in namesapce side
1116 :param netmask: dhcp net CIDR
1117 :return: True if success
1123 net_namespace
= 'ovim-' + vlan
1124 namespace_interface
= 'tap-' + vlan
1126 command
= 'sudo ip netns add ' + net_namespace
1127 print self
.name
, ': command:', command
1128 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1129 content
= stdout
.read()
1131 command
= 'sudo ip link add tap-' + vlan
+ ' type veth peer name ovs-tap-' + vlan
1132 print self
.name
, ': command:', command
1133 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1134 content
= stdout
.read()
1136 command
= 'sudo ovs-vsctl add-port br-int ovs-tap-' + vlan
+ ' tag=' + vlan
1137 print self
.name
, ': command:', command
1138 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1139 content
= stdout
.read()
1141 command
= 'sudo ip link set tap-' + vlan
+ ' netns ' + net_namespace
1142 print self
.name
, ': command:', command
1143 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1144 content
= stdout
.read()
1146 command
= 'sudo ip netns exec ' + net_namespace
+ ' ip link set dev tap-' + vlan
+ ' up'
1147 print self
.name
, ': command:', command
1148 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1149 content
= stdout
.read()
1151 command
= 'sudo ip link set dev ovs-tap-' + vlan
+ ' up'
1152 print self
.name
, ': command:', command
1153 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1154 content
= stdout
.read()
1156 command
= 'sudo ip netns exec ' + net_namespace
+ ' ' + ' ifconfig ' + namespace_interface \
1157 + ' ' + ip
+ ' netmask ' + netmask
1158 print self
.name
, ': command:', command
1159 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1160 content
= stdout
.read()
1162 if len(content
) == 0:
1167 def create_ovs_vxlan_tunnel(self
, vxlan_interface
, remote_ip
):
1169 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1170 :param vxlan_interface: vlxan inteface name.
1171 :param remote_ip: tunnel endpoint remote compute ip.
1176 command
= 'sudo ovs-vsctl add-port br-int ' + vxlan_interface
+ \
1177 ' -- set Interface ' + vxlan_interface
+ ' type=vxlan options:remote_ip=' + remote_ip
+ \
1178 ' -- set Port ' + vxlan_interface
+ ' other_config:stp-path-cost=10'
1179 print self
.name
, ': command:', command
1180 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1181 content
= stdout
.read()
1183 if len(content
) == 0:
1188 def delete_ovs_vxlan_tunnel(self
, vxlan_interface
):
1190 Delete a vlxan tunnel port from a OVS brdige.
1191 :param vxlan_interface: vlxan name to be delete it.
1192 :return: True if success.
1196 command
= 'sudo ovs-vsctl del-port br-int ' + vxlan_interface
1197 print self
.name
, ': command:', command
1198 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1199 content
= stdout
.read()
1201 if len(content
) == 0:
1206 def delete_ovs_bridge(self
):
1208 Delete a OVS bridge from a compute.
1209 :return: True if success
1213 command
= 'sudo ovs-vsctl del-br br-int'
1214 print self
.name
, ': command:', command
1215 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1216 content
= stdout
.read()
1217 if len(content
) == 0:
1222 def get_file_info(self
, path
):
1223 command
= 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1224 print self
.name
, ': command:', command
1225 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1226 content
= stdout
.read()
1227 if len(content
) == 0:
1228 return None # file does not exist
1230 return content
.split(" ") #(permission, 1, owner, group, size, date, file)
1232 def qemu_get_info(self
, path
):
1233 command
= 'qemu-img info ' + path
1234 print self
.name
, ': command:', command
1235 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
1236 content
= stdout
.read()
1237 if len(content
) == 0:
1238 error
= stderr
.read()
1239 print self
.name
, ": get_qemu_info error ", error
1240 raise paramiko
.ssh_exception
.SSHException("Error getting qemu_info: " + error
)
1243 return yaml
.load(content
)
1244 except yaml
.YAMLError
as exc
:
1246 if hasattr(exc
, 'problem_mark'):
1247 mark
= exc
.problem_mark
1248 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
1249 print self
.name
, ": get_qemu_info yaml format Exception", text
1250 raise paramiko
.ssh_exception
.SSHException("Error getting qemu_info yaml format" + text
)
1252 def qemu_change_backing(self
, inc_file
, new_backing_file
):
1253 command
= 'qemu-img rebase -u -b ' + new_backing_file
+ ' ' + inc_file
1254 print self
.name
, ': command:', command
1255 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1256 content
= stderr
.read()
1257 if len(content
) == 0:
1260 print self
.name
, ": qemu_change_backing error: ", content
1263 def get_notused_filename(self
, proposed_name
, suffix
=''):
1264 '''Look for a non existing file_name in the host
1265 proposed_name: proposed file name, includes path
1266 suffix: suffix to be added to the name, before the extention
1268 extension
= proposed_name
.rfind(".")
1269 slash
= proposed_name
.rfind("/")
1270 if extension
< 0 or extension
< slash
: # no extension
1271 extension
= len(proposed_name
)
1272 target_name
= proposed_name
[:extension
] + suffix
+ proposed_name
[extension
:]
1273 info
= self
.get_file_info(target_name
)
1278 while info
is not None:
1279 target_name
= proposed_name
[:extension
] + suffix
+ "-" + str(index
) + proposed_name
[extension
:]
1281 info
= self
.get_file_info(target_name
)
1284 def get_notused_path(self
, proposed_path
, suffix
=''):
1285 '''Look for a non existing path at database for images
1286 proposed_path: proposed file name, includes path
1287 suffix: suffix to be added to the name, before the extention
1289 extension
= proposed_path
.rfind(".")
1291 extension
= len(proposed_path
)
1293 target_path
= proposed_path
[:extension
] + suffix
+ proposed_path
[extension
:]
1296 r
,_
=self
.db
.get_table(FROM
="images",WHERE
={"path":target_path
})
1299 target_path
= proposed_path
[:extension
] + suffix
+ "-" + str(index
) + proposed_path
[extension
:]
1303 def delete_file(self
, file_name
):
1304 command
= 'rm -f '+file_name
1305 print self
.name
, ': command:', command
1306 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1307 error_msg
= stderr
.read()
1308 if len(error_msg
) > 0:
1309 raise paramiko
.ssh_exception
.SSHException("Error deleting file: " + error_msg
)
1311 def copy_file(self
, source
, destination
, perserve_time
=True):
1312 if source
[0:4]=="http":
1313 command
= "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1314 dst
=destination
, src
=source
, dst_result
=destination
+ ".result" )
1316 command
= 'cp --no-preserve=mode'
1318 command
+= ' --preserve=timestamps'
1319 command
+= " '{}' '{}'".format(source
, destination
)
1320 print self
.name
, ': command:', command
1321 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1322 error_msg
= stderr
.read()
1323 if len(error_msg
) > 0:
1324 raise paramiko
.ssh_exception
.SSHException("Error copying image to local host: " + error_msg
)
1326 def copy_remote_file(self
, remote_file
, use_incremental
):
1327 ''' Copy a file from the repository to local folder and recursively
1328 copy the backing files in case the remote file is incremental
1329 Read and/or modified self.localinfo['files'] that contain the
1330 unmodified copies of images in the local path
1332 remote_file: path of remote file
1333 use_incremental: None (leave the decision to this function), True, False
1335 local_file: name of local file
1336 qemu_info: dict with quemu information of local file
1337 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1340 use_incremental_out
= use_incremental
1341 new_backing_file
= None
1343 file_from_local
= True
1345 #in case incremental use is not decided, take the decision depending on the image
1346 #avoid the use of incremental if this image is already incremental
1347 if remote_file
[0:4] == "http":
1348 file_from_local
= False
1350 qemu_remote_info
= self
.qemu_get_info(remote_file
)
1351 if use_incremental_out
==None:
1352 use_incremental_out
= not ( file_from_local
and 'backing file' in qemu_remote_info
)
1353 #copy recursivelly the backing files
1354 if file_from_local
and 'backing file' in qemu_remote_info
:
1355 new_backing_file
, _
, _
= self
.copy_remote_file(qemu_remote_info
['backing file'], True)
1357 #check if remote file is present locally
1358 if use_incremental_out
and remote_file
in self
.localinfo
['files']:
1359 local_file
= self
.localinfo
['files'][remote_file
]
1360 local_file_info
= self
.get_file_info(local_file
)
1362 remote_file_info
= self
.get_file_info(remote_file
)
1363 if local_file_info
== None:
1365 elif file_from_local
and (local_file_info
[4]!=remote_file_info
[4] or local_file_info
[5]!=remote_file_info
[5]):
1366 #local copy of file not valid because date or size are different.
1367 #TODO DELETE local file if this file is not used by any active virtual machine
1369 self
.delete_file(local_file
)
1370 del self
.localinfo
['files'][remote_file
]
1374 else: #check that the local file has the same backing file, or there are not backing at all
1375 qemu_info
= self
.qemu_get_info(local_file
)
1376 if new_backing_file
!= qemu_info
.get('backing file'):
1380 if local_file
== None: #copy the file
1381 img_name
= remote_file
.split('/') [-1]
1382 img_local
= self
.image_path
+ '/' + img_name
1383 local_file
= self
.get_notused_filename(img_local
)
1384 self
.copy_file(remote_file
, local_file
, use_incremental_out
)
1386 if use_incremental_out
:
1387 self
.localinfo
['files'][remote_file
] = local_file
1388 if new_backing_file
:
1389 self
.qemu_change_backing(local_file
, new_backing_file
)
1390 qemu_info
= self
.qemu_get_info(local_file
)
1392 return local_file
, qemu_info
, use_incremental_out
1394 def launch_server(self
, conn
, server
, rebuild
=False, domain
=None):
1396 time
.sleep(random
.randint(20,150)) #sleep random timeto be make it a bit more real
1399 server_id
= server
['uuid']
1400 paused
= server
.get('paused','no')
1402 if domain
!=None and rebuild
==False:
1404 #self.server_status[server_id] = 'ACTIVE'
1407 self
.db_lock
.acquire()
1408 result
, server_data
= self
.db
.get_instance(server_id
)
1409 self
.db_lock
.release()
1411 print self
.name
, ": launch_server ERROR getting server from DB",result
, server_data
1412 return result
, server_data
1414 #0: get image metadata
1415 server_metadata
= server
.get('metadata', {})
1416 use_incremental
= None
1418 if "use_incremental" in server_metadata
:
1419 use_incremental
= False if server_metadata
["use_incremental"]=="no" else True
1421 server_host_files
= self
.localinfo
['server_files'].get( server
['uuid'], {})
1423 #delete previous incremental files
1424 for file_
in server_host_files
.values():
1425 self
.delete_file(file_
['source file'] )
1426 server_host_files
={}
1428 #1: obtain aditional devices (disks)
1429 #Put as first device the main disk
1430 devices
= [ {"type":"disk", "image_id":server
['image_id'], "vpci":server_metadata
.get('vpci', None) } ]
1431 if 'extended' in server_data
and server_data
['extended']!=None and "devices" in server_data
['extended']:
1432 devices
+= server_data
['extended']['devices']
1435 if dev
['image_id'] == None:
1438 self
.db_lock
.acquire()
1439 result
, content
= self
.db
.get_table(FROM
='images', SELECT
=('path', 'metadata'),
1440 WHERE
={'uuid': dev
['image_id']})
1441 self
.db_lock
.release()
1443 error_text
= "ERROR", result
, content
, "when getting image", dev
['image_id']
1444 print self
.name
, ": launch_server", error_text
1445 return -1, error_text
1446 if content
[0]['metadata'] is not None:
1447 dev
['metadata'] = json
.loads(content
[0]['metadata'])
1449 dev
['metadata'] = {}
1451 if dev
['image_id'] in server_host_files
:
1452 dev
['source file'] = server_host_files
[ dev
['image_id'] ] ['source file'] #local path
1453 dev
['file format'] = server_host_files
[ dev
['image_id'] ] ['file format'] # raw or qcow2
1456 #2: copy image to host
1457 remote_file
= content
[0]['path']
1458 use_incremental_image
= use_incremental
1459 if dev
['metadata'].get("use_incremental") == "no":
1460 use_incremental_image
= False
1461 local_file
, qemu_info
, use_incremental_image
= self
.copy_remote_file(remote_file
, use_incremental_image
)
1463 #create incremental image
1464 if use_incremental_image
:
1465 local_file_inc
= self
.get_notused_filename(local_file
, '.inc')
1466 command
= 'qemu-img create -f qcow2 '+local_file_inc
+ ' -o backing_file='+ local_file
1467 print 'command:', command
1468 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1469 error_msg
= stderr
.read()
1470 if len(error_msg
) > 0:
1471 raise paramiko
.ssh_exception
.SSHException("Error creating incremental file: " + error_msg
)
1472 local_file
= local_file_inc
1473 qemu_info
= {'file format':'qcow2'}
1475 server_host_files
[ dev
['image_id'] ] = {'source file': local_file
, 'file format': qemu_info
['file format']}
1477 dev
['source file'] = local_file
1478 dev
['file format'] = qemu_info
['file format']
1480 self
.localinfo
['server_files'][ server
['uuid'] ] = server_host_files
1481 self
.localinfo_dirty
= True
1484 result
, xml
= self
.create_xml_server(server_data
, devices
, server_metadata
) #local_file
1486 print self
.name
, ": create xml server error:", xml
1488 print self
.name
, ": create xml:", xml
1489 atribute
= host_thread
.lvirt_module
.VIR_DOMAIN_START_PAUSED
if paused
== "yes" else 0
1491 if not rebuild
: #ensures that any pending destroying server is done
1492 self
.server_forceoff(True)
1493 #print self.name, ": launching instance" #, xml
1494 conn
.createXML(xml
, atribute
)
1495 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
1499 except paramiko
.ssh_exception
.SSHException
as e
:
1501 print self
.name
, ": launch_server(%s) ssh Exception: %s" %(server_id
, text
)
1502 if "SSH session not active" in text
:
1504 except host_thread
.lvirt_module
.libvirtError
as e
:
1505 text
= e
.get_error_message()
1506 print self
.name
, ": launch_server(%s) libvirt Exception: %s" %(server_id
, text
)
1507 except Exception as e
:
1509 print self
.name
, ": launch_server(%s) Exception: %s" %(server_id
, text
)
1512 def update_servers_status(self
):
1514 # VIR_DOMAIN_NOSTATE = 0
1515 # VIR_DOMAIN_RUNNING = 1
1516 # VIR_DOMAIN_BLOCKED = 2
1517 # VIR_DOMAIN_PAUSED = 3
1518 # VIR_DOMAIN_SHUTDOWN = 4
1519 # VIR_DOMAIN_SHUTOFF = 5
1520 # VIR_DOMAIN_CRASHED = 6
1521 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
1523 if self
.test
or len(self
.server_status
)==0:
1527 conn
= host_thread
.lvirt_module
.open("qemu+ssh://"+self
.user
+"@"+self
.host
+"/system")
1528 domains
= conn
.listAllDomains()
1530 for domain
in domains
:
1531 uuid
= domain
.UUIDString() ;
1532 libvirt_status
= domain
.state()
1533 #print libvirt_status
1534 if libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_RUNNING
or libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_SHUTDOWN
:
1535 new_status
= "ACTIVE"
1536 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_PAUSED
:
1537 new_status
= "PAUSED"
1538 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_SHUTOFF
:
1539 new_status
= "INACTIVE"
1540 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_CRASHED
:
1541 new_status
= "ERROR"
1544 domain_dict
[uuid
] = new_status
1546 except host_thread
.lvirt_module
.libvirtError
as e
:
1547 print self
.name
, ": get_state() Exception '", e
.get_error_message()
1550 for server_id
, current_status
in self
.server_status
.iteritems():
1552 if server_id
in domain_dict
:
1553 new_status
= domain_dict
[server_id
]
1555 new_status
= "INACTIVE"
1557 if new_status
== None or new_status
== current_status
:
1559 if new_status
== 'INACTIVE' and current_status
== 'ERROR':
1560 continue #keep ERROR status, because obviously this machine is not running
1562 print self
.name
, ": server ", server_id
, "status change from ", current_status
, "to", new_status
1563 STATUS
={'progress':100, 'status':new_status
}
1564 if new_status
== 'ERROR':
1565 STATUS
['last_error'] = 'machine has crashed'
1566 self
.db_lock
.acquire()
1567 r
,_
= self
.db
.update_rows('instances', STATUS
, {'uuid':server_id
}, log
=False)
1568 self
.db_lock
.release()
1570 self
.server_status
[server_id
] = new_status
1572 def action_on_server(self
, req
, last_retry
=True):
1573 '''Perform an action on a req
1575 req: dictionary that contain:
1576 server properties: 'uuid','name','tenant_id','status'
1578 host properties: 'user', 'ip_name'
1579 return (error, text)
1580 0: No error. VM is updated to new state,
1581 -1: Invalid action, as trying to pause a PAUSED VM
1582 -2: Error accessing host
1584 -4: Error at DB access
1585 -5: Error while trying to perform action. VM is updated to ERROR
1587 server_id
= req
['uuid']
1590 old_status
= req
['status']
1594 if 'terminate' in req
['action']:
1595 new_status
= 'deleted'
1596 elif 'shutoff' in req
['action'] or 'shutdown' in req
['action'] or 'forceOff' in req
['action']:
1597 if req
['status']!='ERROR':
1599 new_status
= 'INACTIVE'
1600 elif 'start' in req
['action'] and req
['status']!='ERROR': new_status
= 'ACTIVE'
1601 elif 'resume' in req
['action'] and req
['status']!='ERROR' and req
['status']!='INACTIVE' : new_status
= 'ACTIVE'
1602 elif 'pause' in req
['action'] and req
['status']!='ERROR': new_status
= 'PAUSED'
1603 elif 'reboot' in req
['action'] and req
['status']!='ERROR': new_status
= 'ACTIVE'
1604 elif 'rebuild' in req
['action']:
1605 time
.sleep(random
.randint(20,150))
1606 new_status
= 'ACTIVE'
1607 elif 'createImage' in req
['action']:
1609 self
.create_image(None, req
)
1612 conn
= host_thread
.lvirt_module
.open("qemu+ssh://"+self
.user
+"@"+self
.host
+"/system")
1614 dom
= conn
.lookupByUUIDString(server_id
)
1615 except host_thread
.lvirt_module
.libvirtError
as e
:
1616 text
= e
.get_error_message()
1617 if 'LookupByUUIDString' in text
or 'Domain not found' in text
or 'No existe un dominio coincidente' in text
:
1620 print self
.name
, ": action_on_server(",server_id
,") libvirt exception:", text
1623 if 'forceOff' in req
['action']:
1625 print self
.name
, ": action_on_server(",server_id
,") domain not running"
1628 print self
.name
, ": sending DESTROY to server", server_id
1630 except Exception as e
:
1631 if "domain is not running" not in e
.get_error_message():
1632 print self
.name
, ": action_on_server(",server_id
,") Exception while sending force off:", e
.get_error_message()
1633 last_error
= 'action_on_server Exception while destroy: ' + e
.get_error_message()
1634 new_status
= 'ERROR'
1636 elif 'terminate' in req
['action']:
1638 print self
.name
, ": action_on_server(",server_id
,") domain not running"
1639 new_status
= 'deleted'
1642 if req
['action']['terminate'] == 'force':
1643 print self
.name
, ": sending DESTROY to server", server_id
1645 new_status
= 'deleted'
1647 print self
.name
, ": sending SHUTDOWN to server", server_id
1649 self
.pending_terminate_server
.append( (time
.time()+10,server_id
) )
1650 except Exception as e
:
1651 print self
.name
, ": action_on_server(",server_id
,") Exception while destroy:", e
.get_error_message()
1652 last_error
= 'action_on_server Exception while destroy: ' + e
.get_error_message()
1653 new_status
= 'ERROR'
1654 if "domain is not running" in e
.get_error_message():
1657 new_status
= 'deleted'
1659 print self
.name
, ": action_on_server(",server_id
,") Exception while undefine:", e
.get_error_message()
1660 last_error
= 'action_on_server Exception2 while undefine:', e
.get_error_message()
1661 #Exception: 'virDomainDetachDevice() failed'
1662 if new_status
=='deleted':
1663 if server_id
in self
.server_status
:
1664 del self
.server_status
[server_id
]
1665 if req
['uuid'] in self
.localinfo
['server_files']:
1666 for file_
in self
.localinfo
['server_files'][ req
['uuid'] ].values():
1668 self
.delete_file(file_
['source file'])
1671 del self
.localinfo
['server_files'][ req
['uuid'] ]
1672 self
.localinfo_dirty
= True
1674 elif 'shutoff' in req
['action'] or 'shutdown' in req
['action']:
1677 print self
.name
, ": action_on_server(",server_id
,") domain not running"
1680 # new_status = 'INACTIVE'
1681 #TODO: check status for changing at database
1682 except Exception as e
:
1683 new_status
= 'ERROR'
1684 print self
.name
, ": action_on_server(",server_id
,") Exception while shutdown:", e
.get_error_message()
1685 last_error
= 'action_on_server Exception while shutdown: ' + e
.get_error_message()
1687 elif 'rebuild' in req
['action']:
1690 r
= self
.launch_server(conn
, req
, True, None)
1692 new_status
= 'ERROR'
1695 new_status
= 'ACTIVE'
1696 elif 'start' in req
['action']:
1697 # The instance is only create in DB but not yet at libvirt domain, needs to be create
1698 rebuild
= True if req
['action']['start'] == 'rebuild' else False
1699 r
= self
.launch_server(conn
, req
, rebuild
, dom
)
1701 new_status
= 'ERROR'
1704 new_status
= 'ACTIVE'
1706 elif 'resume' in req
['action']:
1712 # new_status = 'ACTIVE'
1713 except Exception as e
:
1714 print self
.name
, ": action_on_server(",server_id
,") Exception while resume:", e
.get_error_message()
1716 elif 'pause' in req
['action']:
1722 # new_status = 'PAUSED'
1723 except Exception as e
:
1724 print self
.name
, ": action_on_server(",server_id
,") Exception while pause:", e
.get_error_message()
1726 elif 'reboot' in req
['action']:
1732 print self
.name
, ": action_on_server(",server_id
,") reboot:"
1733 #new_status = 'ACTIVE'
1734 except Exception as e
:
1735 print self
.name
, ": action_on_server(",server_id
,") Exception while reboot:", e
.get_error_message()
1736 elif 'createImage' in req
['action']:
1737 self
.create_image(dom
, req
)
1741 except host_thread
.lvirt_module
.libvirtError
as e
:
1742 if conn
is not None: conn
.close()
1743 text
= e
.get_error_message()
1744 new_status
= "ERROR"
1746 print self
.name
, ": action_on_server(",server_id
,") Exception '", text
1747 if 'LookupByUUIDString' in text
or 'Domain not found' in text
or 'No existe un dominio coincidente' in text
:
1748 print self
.name
, ": action_on_server(",server_id
,") Exception removed from host"
1749 #end of if self.test
1750 if new_status
== None:
1753 print self
.name
, ": action_on_server(",server_id
,") new status", new_status
, last_error
1754 UPDATE
= {'progress':100, 'status':new_status
}
1756 if new_status
=='ERROR':
1757 if not last_retry
: #if there will be another retry do not update database
1759 elif 'terminate' in req
['action']:
1760 #PUT a log in the database
1761 print self
.name
, ": PANIC deleting server", server_id
, last_error
1762 self
.db_lock
.acquire()
1763 self
.db
.new_row('logs',
1764 {'uuid':server_id
, 'tenant_id':req
['tenant_id'], 'related':'instances','level':'panic',
1765 'description':'PANIC deleting server from host '+self
.name
+': '+last_error
}
1767 self
.db_lock
.release()
1768 if server_id
in self
.server_status
:
1769 del self
.server_status
[server_id
]
1772 UPDATE
['last_error'] = last_error
1773 if new_status
!= 'deleted' and (new_status
!= old_status
or new_status
== 'ERROR') :
1774 self
.db_lock
.acquire()
1775 self
.db
.update_rows('instances', UPDATE
, {'uuid':server_id
}, log
=True)
1776 self
.server_status
[server_id
] = new_status
1777 self
.db_lock
.release()
1778 if new_status
== 'ERROR':
1783 def restore_iface(self
, name
, mac
, lib_conn
=None):
1784 ''' make an ifdown, ifup to restore default parameter of na interface
1786 mac: mac address of the interface
1787 lib_conn: connection to the libvirt, if None a new connection is created
1788 Return 0,None if ok, -1,text if fails
1794 print self
.name
, ": restore_iface '%s' %s" % (name
, mac
)
1798 conn
= host_thread
.lvirt_module
.open("qemu+ssh://"+self
.user
+"@"+self
.host
+"/system")
1802 #wait to the pending VM deletion
1803 #TODO.Revise self.server_forceoff(True)
1805 iface
= conn
.interfaceLookupByMACString(mac
)
1808 print self
.name
, ": restore_iface '%s' %s" % (name
, mac
)
1809 except host_thread
.lvirt_module
.libvirtError
as e
:
1810 error_text
= e
.get_error_message()
1811 print self
.name
, ": restore_iface '%s' '%s' libvirt exception: %s" %(name
, mac
, error_text
)
1814 if lib_conn
is None and conn
is not None:
1816 return ret
, error_text
1819 def create_image(self
,dom
, req
):
1821 if 'path' in req
['action']['createImage']:
1822 file_dst
= req
['action']['createImage']['path']
1824 createImage
=req
['action']['createImage']
1825 img_name
= createImage
['source']['path']
1826 index
=img_name
.rfind('/')
1827 file_dst
= self
.get_notused_path(img_name
[:index
+1] + createImage
['name'] + '.qcow2')
1828 image_status
='ACTIVE'
1832 server_id
= req
['uuid']
1833 createImage
=req
['action']['createImage']
1834 file_orig
= self
.localinfo
['server_files'][server_id
] [ createImage
['source']['image_id'] ] ['source file']
1835 if 'path' in req
['action']['createImage']:
1836 file_dst
= req
['action']['createImage']['path']
1838 img_name
= createImage
['source']['path']
1839 index
=img_name
.rfind('/')
1840 file_dst
= self
.get_notused_filename(img_name
[:index
+1] + createImage
['name'] + '.qcow2')
1842 self
.copy_file(file_orig
, file_dst
)
1843 qemu_info
= self
.qemu_get_info(file_orig
)
1844 if 'backing file' in qemu_info
:
1845 for k
,v
in self
.localinfo
['files'].items():
1846 if v
==qemu_info
['backing file']:
1847 self
.qemu_change_backing(file_dst
, k
)
1849 image_status
='ACTIVE'
1851 except paramiko
.ssh_exception
.SSHException
as e
:
1852 image_status
='ERROR'
1853 error_text
= e
.args
[0]
1854 print self
.name
, "': create_image(",server_id
,") ssh Exception:", error_text
1855 if "SSH session not active" in error_text
and retry
==0:
1857 except Exception as e
:
1858 image_status
='ERROR'
1860 print self
.name
, "': create_image(",server_id
,") Exception:", error_text
1862 #TODO insert a last_error at database
1863 self
.db_lock
.acquire()
1864 self
.db
.update_rows('images', {'status':image_status
, 'progress': 100, 'path':file_dst
},
1865 {'uuid':req
['new_image']['uuid']}, log
=True)
1866 self
.db_lock
.release()
1868 def edit_iface(self
, port_id
, old_net
, new_net
):
1869 #This action imply remove and insert interface to put proper parameters
1874 self
.db_lock
.acquire()
1875 r
,c
= self
.db
.get_table(FROM
='ports as p join resources_port as rp on p.uuid=rp.port_id',
1876 WHERE
={'port_id': port_id
})
1877 self
.db_lock
.release()
1879 print self
.name
, ": edit_iface(",port_id
,") DDBB error:", c
1882 print self
.name
, ": edit_iface(",port_id
,") por not found"
1885 if port
["model"]!="VF":
1886 print self
.name
, ": edit_iface(",port_id
,") ERROR model must be VF"
1888 #create xml detach file
1891 xml
.append("<interface type='hostdev' managed='yes'>")
1892 xml
.append(" <mac address='" +port
['mac']+ "'/>")
1893 xml
.append(" <source>"+ self
.pci2xml(port
['pci'])+"\n </source>")
1894 xml
.append('</interface>')
1899 conn
= host_thread
.lvirt_module
.open("qemu+ssh://"+self
.user
+"@"+self
.host
+"/system")
1900 dom
= conn
.lookupByUUIDString(port
["instance_id"])
1903 print self
.name
, ": edit_iface detaching SRIOV interface", text
1904 dom
.detachDeviceFlags(text
, flags
=host_thread
.lvirt_module
.VIR_DOMAIN_AFFECT_LIVE
)
1906 xml
[-1] =" <vlan> <tag id='" + str(port
['vlan']) + "'/> </vlan>"
1908 xml
.append(self
.pci2xml(port
.get('vpci',None)) )
1909 xml
.append('</interface>')
1911 print self
.name
, ": edit_iface attaching SRIOV interface", text
1912 dom
.attachDeviceFlags(text
, flags
=host_thread
.lvirt_module
.VIR_DOMAIN_AFFECT_LIVE
)
1914 except host_thread
.lvirt_module
.libvirtError
as e
:
1915 text
= e
.get_error_message()
1916 print self
.name
, ": edit_iface(",port
["instance_id"],") libvirt exception:", text
1919 if conn
is not None: conn
.close()
1922 def create_server(server
, db
, db_lock
, only_of_ports
):
1929 # host_id = server.get('host_id', None)
1930 extended
= server
.get('extended', None)
1932 # print '----------------------'
1933 # print json.dumps(extended, indent=4)
1936 requirements
['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
1937 requirements
['ram'] = server
['flavor'].get('ram', 0)
1938 if requirements
['ram']== None:
1939 requirements
['ram'] = 0
1940 requirements
['vcpus'] = server
['flavor'].get('vcpus', 0)
1941 if requirements
['vcpus']== None:
1942 requirements
['vcpus'] = 0
1943 #If extended is not defined get requirements from flavor
1944 if extended
is None:
1945 #If extended is defined in flavor convert to dictionary and use it
1946 if 'extended' in server
['flavor'] and server
['flavor']['extended'] != None:
1947 json_acceptable_string
= server
['flavor']['extended'].replace("'", "\"")
1948 extended
= json
.loads(json_acceptable_string
)
1951 #print json.dumps(extended, indent=4)
1953 #For simplicity only one numa VM are supported in the initial implementation
1954 if extended
!= None:
1955 numas
= extended
.get('numas', [])
1957 return (-2, "Multi-NUMA VMs are not supported yet")
1959 # return (-1, "At least one numa must be specified")
1961 #a for loop is used in order to be ready to multi-NUMA VMs
1965 numa_req
['memory'] = numa
.get('memory', 0)
1967 numa_req
['proc_req_nb'] = numa
['cores'] #number of cores or threads to be reserved
1968 numa_req
['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
1969 numa_req
['proc_req_list'] = numa
.get('cores-id', None) #list of ids to be assigned to the cores or threads
1970 elif 'paired-threads' in numa
:
1971 numa_req
['proc_req_nb'] = numa
['paired-threads']
1972 numa_req
['proc_req_type'] = 'paired-threads'
1973 numa_req
['proc_req_list'] = numa
.get('paired-threads-id', None)
1974 elif 'threads' in numa
:
1975 numa_req
['proc_req_nb'] = numa
['threads']
1976 numa_req
['proc_req_type'] = 'threads'
1977 numa_req
['proc_req_list'] = numa
.get('threads-id', None)
1979 numa_req
['proc_req_nb'] = 0 # by default
1980 numa_req
['proc_req_type'] = 'threads'
1984 #Generate a list of sriov and another for physical interfaces
1985 interfaces
= numa
.get('interfaces', [])
1988 for iface
in interfaces
:
1989 iface
['bandwidth'] = int(iface
['bandwidth'])
1990 if iface
['dedicated'][:3]=='yes':
1991 port_list
.append(iface
)
1993 sriov_list
.append(iface
)
1995 #Save lists ordered from more restrictive to less bw requirements
1996 numa_req
['sriov_list'] = sorted(sriov_list
, key
=lambda k
: k
['bandwidth'], reverse
=True)
1997 numa_req
['port_list'] = sorted(port_list
, key
=lambda k
: k
['bandwidth'], reverse
=True)
2000 request
.append(numa_req
)
2002 # print "----------\n"+json.dumps(request[0], indent=4)
2003 # print '----------\n\n'
2005 #Search in db for an appropriate numa for each requested numa
2006 #at the moment multi-NUMA VMs are not supported
2008 requirements
['numa'].update(request
[0])
2009 if requirements
['numa']['memory']>0:
2010 requirements
['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2011 elif requirements
['ram']==0:
2012 return (-1, "Memory information not set neither at extended field not at ram")
2013 if requirements
['numa']['proc_req_nb']>0:
2014 requirements
['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2015 elif requirements
['vcpus']==0:
2016 return (-1, "Processor information not set neither at extended field not at vcpus")
2020 result
, content
= db
.get_numas(requirements
, server
.get('host_id', None), only_of_ports
)
2024 return (-1, content
)
2026 numa_id
= content
['numa_id']
2027 host_id
= content
['host_id']
2029 #obtain threads_id and calculate pinning
2032 if requirements
['numa']['proc_req_nb']>0:
2034 result
, content
= db
.get_table(FROM
='resources_core',
2035 SELECT
=('id','core_id','thread_id'),
2036 WHERE
={'numa_id':numa_id
,'instance_id': None, 'status':'ok'} )
2042 #convert rows to a dictionary indexed by core_id
2045 if not row
['core_id'] in cores_dict
:
2046 cores_dict
[row
['core_id']] = []
2047 cores_dict
[row
['core_id']].append([row
['thread_id'],row
['id']])
2049 #In case full cores are requested
2051 if requirements
['numa']['proc_req_type'] == 'cores':
2052 #Get/create the list of the vcpu_ids
2053 vcpu_id_list
= requirements
['numa']['proc_req_list']
2054 if vcpu_id_list
== None:
2055 vcpu_id_list
= range(0,int(requirements
['numa']['proc_req_nb']))
2057 for threads
in cores_dict
.itervalues():
2059 if len(threads
) != 2:
2062 #set pinning for the first thread
2063 cpu_pinning
.append( [ vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1] ] )
2065 #reserve so it is not used the second thread
2066 reserved_threads
.append(threads
[1][1])
2068 if len(vcpu_id_list
) == 0:
2071 #In case paired threads are requested
2072 elif requirements
['numa']['proc_req_type'] == 'paired-threads':
2074 #Get/create the list of the vcpu_ids
2075 if requirements
['numa']['proc_req_list'] != None:
2077 for pair
in requirements
['numa']['proc_req_list']:
2079 return -1, "Field paired-threads-id not properly specified"
2081 vcpu_id_list
.append(pair
[0])
2082 vcpu_id_list
.append(pair
[1])
2084 vcpu_id_list
= range(0,2*int(requirements
['numa']['proc_req_nb']))
2086 for threads
in cores_dict
.itervalues():
2088 if len(threads
) != 2:
2090 #set pinning for the first thread
2091 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1]])
2093 #set pinning for the second thread
2094 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[1][0], threads
[1][1]])
2096 if len(vcpu_id_list
) == 0:
2099 #In case normal threads are requested
2100 elif requirements
['numa']['proc_req_type'] == 'threads':
2101 #Get/create the list of the vcpu_ids
2102 vcpu_id_list
= requirements
['numa']['proc_req_list']
2103 if vcpu_id_list
== None:
2104 vcpu_id_list
= range(0,int(requirements
['numa']['proc_req_nb']))
2106 for threads_index
in sorted(cores_dict
, key
=lambda k
: len(cores_dict
[k
])):
2107 threads
= cores_dict
[threads_index
]
2108 #set pinning for the first thread
2109 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1]])
2111 #if exists, set pinning for the second thread
2112 if len(threads
) == 2 and len(vcpu_id_list
) != 0:
2113 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[1][0], threads
[1][1]])
2115 if len(vcpu_id_list
) == 0:
2118 #Get the source pci addresses for the selected numa
2119 used_sriov_ports
= []
2120 for port
in requirements
['numa']['sriov_list']:
2122 result
, content
= db
.get_table(FROM
='resources_port', SELECT
=('id', 'pci', 'mac'),WHERE
={'numa_id':numa_id
,'root_id': port
['port_id'], 'port_id': None, 'Mbps_used': 0} )
2128 if row
['id'] in used_sriov_ports
or row
['id']==port
['port_id']:
2130 port
['pci'] = row
['pci']
2131 if 'mac_address' not in port
:
2132 port
['mac_address'] = row
['mac']
2134 port
['port_id']=row
['id']
2135 port
['Mbps_used'] = port
['bandwidth']
2136 used_sriov_ports
.append(row
['id'])
2139 for port
in requirements
['numa']['port_list']:
2140 port
['Mbps_used'] = None
2141 if port
['dedicated'] != "yes:sriov":
2142 port
['mac_address'] = port
['mac']
2146 result
, content
= db
.get_table(FROM
='resources_port', SELECT
=('id', 'pci', 'mac', 'Mbps'),WHERE
={'numa_id':numa_id
,'root_id': port
['port_id'], 'port_id': None, 'Mbps_used': 0} )
2151 port
['Mbps_used'] = content
[0]['Mbps']
2153 if row
['id'] in used_sriov_ports
or row
['id']==port
['port_id']:
2155 port
['pci'] = row
['pci']
2156 if 'mac_address' not in port
:
2157 port
['mac_address'] = row
['mac'] # mac cannot be set to passthrough ports
2159 port
['port_id']=row
['id']
2160 used_sriov_ports
.append(row
['id'])
2163 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2164 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2166 server
['host_id'] = host_id
2169 #Generate dictionary for saving in db the instance resources
2171 resources
['bridged-ifaces'] = []
2174 numa_dict
['interfaces'] = []
2176 numa_dict
['interfaces'] += requirements
['numa']['port_list']
2177 numa_dict
['interfaces'] += requirements
['numa']['sriov_list']
2179 #Check bridge information
2180 unified_dataplane_iface
=[]
2181 unified_dataplane_iface
+= requirements
['numa']['port_list']
2182 unified_dataplane_iface
+= requirements
['numa']['sriov_list']
2184 for control_iface
in server
.get('networks', []):
2185 control_iface
['net_id']=control_iface
.pop('uuid')
2186 #Get the brifge name
2188 result
, content
= db
.get_table(FROM
='nets',
2189 SELECT
=('name', 'type', 'vlan', 'provider', 'enable_dhcp',
2190 'dhcp_first_ip', 'dhcp_last_ip', 'cidr'),
2191 WHERE
={'uuid': control_iface
['net_id']})
2196 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface
['net_id']
2199 if control_iface
.get("type", 'virtual') == 'virtual':
2200 if network
['type']!='bridge_data' and network
['type']!='bridge_man':
2201 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface
['net_id']
2202 resources
['bridged-ifaces'].append(control_iface
)
2203 if network
.get("provider") and network
["provider"][0:3] == "OVS":
2204 control_iface
["type"] = "instance:ovs"
2206 control_iface
["type"] = "instance:bridge"
2207 if network
.get("vlan"):
2208 control_iface
["vlan"] = network
["vlan"]
2210 if network
.get("enable_dhcp") == 'true':
2211 control_iface
["enable_dhcp"] = network
.get("enable_dhcp")
2212 control_iface
["dhcp_first_ip"] = network
["dhcp_first_ip"]
2213 control_iface
["dhcp_last_ip"] = network
["dhcp_last_ip"]
2214 control_iface
["cidr"] = network
["cidr"]
2216 if network
['type']!='data' and network
['type']!='ptp':
2217 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface
['net_id']
2218 #dataplane interface, look for it in the numa tree and asign this network
2220 for dataplane_iface
in numa_dict
['interfaces']:
2221 if dataplane_iface
['name'] == control_iface
.get("name"):
2222 if (dataplane_iface
['dedicated'] == "yes" and control_iface
["type"] != "PF") or \
2223 (dataplane_iface
['dedicated'] == "no" and control_iface
["type"] != "VF") or \
2224 (dataplane_iface
['dedicated'] == "yes:sriov" and control_iface
["type"] != "VFnotShared") :
2225 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2226 (control_iface
.get("name"), dataplane_iface
['dedicated'], control_iface
["type"])
2227 dataplane_iface
['uuid'] = control_iface
['net_id']
2228 if dataplane_iface
['dedicated'] == "no":
2229 dataplane_iface
['vlan'] = network
['vlan']
2230 if dataplane_iface
['dedicated'] != "yes" and control_iface
.get("mac_address"):
2231 dataplane_iface
['mac_address'] = control_iface
.get("mac_address")
2232 if control_iface
.get("vpci"):
2233 dataplane_iface
['vpci'] = control_iface
.get("vpci")
2237 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface
.get("name")
2239 resources
['host_id'] = host_id
2240 resources
['image_id'] = server
['image_id']
2241 resources
['flavor_id'] = server
['flavor_id']
2242 resources
['tenant_id'] = server
['tenant_id']
2243 resources
['ram'] = requirements
['ram']
2244 resources
['vcpus'] = requirements
['vcpus']
2245 resources
['status'] = 'CREATING'
2247 if 'description' in server
: resources
['description'] = server
['description']
2248 if 'name' in server
: resources
['name'] = server
['name']
2250 resources
['extended'] = {} #optional
2251 resources
['extended']['numas'] = []
2252 numa_dict
['numa_id'] = numa_id
2253 numa_dict
['memory'] = requirements
['numa']['memory']
2254 numa_dict
['cores'] = []
2256 for core
in cpu_pinning
:
2257 numa_dict
['cores'].append({'id': core
[2], 'vthread': core
[0], 'paired': paired
})
2258 for core
in reserved_threads
:
2259 numa_dict
['cores'].append({'id': core
})
2260 resources
['extended']['numas'].append(numa_dict
)
2261 if extended
!=None and 'devices' in extended
: #TODO allow extra devices without numa
2262 resources
['extended']['devices'] = extended
['devices']
2265 print '===================================={'
2266 print json
.dumps(resources
, indent
=4)
2267 print '====================================}'