1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openvim
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
28 __author__
= "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__
= "$10-jul-2014 12:07:15$"
37 from jsonschema
import validate
as js_v
, exceptions
as js_e
40 from vim_schema
import localinfo_schema
, hostinfo_schema
44 #TODO: insert a logging system
46 # from logging import Logger
47 # import auxiliary_functions as af
49 # TODO: insert a logging system
52 class host_thread(threading
.Thread
):
55 def __init__(self
, name
, host
, user
, db
, db_lock
, test
, image_path
, host_id
, version
, develop_mode
,
56 develop_bridge_iface
):
61 'host','user': host ip or name to manage and user
62 'db', 'db_lock': database class and lock to use it in exclusion
64 threading
.Thread
.__init
__(self
)
69 self
.db_lock
= db_lock
72 if not test
and not host_thread
.lvirt_module
:
74 module_info
= imp
.find_module("libvirt")
75 host_thread
.lvirt_module
= imp
.load_module("libvirt", *module_info
)
76 except (IOError, ImportError) as e
:
77 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e
))
80 self
.develop_mode
= develop_mode
81 self
.develop_bridge_iface
= develop_bridge_iface
82 self
.image_path
= image_path
83 self
.host_id
= host_id
84 self
.version
= version
89 self
.server_status
= {} #dictionary with pairs server_uuid:server_status
90 self
.pending_terminate_server
=[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
91 self
.next_update_server_status
= 0 #time when must be check servers status
95 self
.queueLock
= threading
.Lock()
96 self
.taskQueue
= Queue
.Queue(2000)
99 def ssh_connect(self
):
102 self
.ssh_conn
= paramiko
.SSHClient()
103 self
.ssh_conn
.set_missing_host_key_policy(paramiko
.AutoAddPolicy())
104 self
.ssh_conn
.load_system_host_keys()
105 self
.ssh_conn
.connect(self
.host
, username
=self
.user
, timeout
=10) #, None)
106 except paramiko
.ssh_exception
.SSHException
as e
:
108 print self
.name
, ": ssh_connect ssh Exception:", text
110 def load_localinfo(self
):
116 command
= 'mkdir -p ' + self
.image_path
117 #print self.name, ': command:', command
118 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
119 content
= stderr
.read()
121 print self
.name
, ': command:', command
, "stderr:", content
123 command
= 'cat ' + self
.image_path
+ '/.openvim.yaml'
124 #print self.name, ': command:', command
125 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
126 content
= stdout
.read()
127 if len(content
) == 0:
128 print self
.name
, ': command:', command
, "stderr:", stderr
.read()
129 raise paramiko
.ssh_exception
.SSHException("Error empty file ")
130 self
.localinfo
= yaml
.load(content
)
131 js_v(self
.localinfo
, localinfo_schema
)
132 self
.localinfo_dirty
=False
133 if 'server_files' not in self
.localinfo
:
134 self
.localinfo
['server_files'] = {}
135 print self
.name
, ': localinfo load from host'
138 except paramiko
.ssh_exception
.SSHException
as e
:
140 print self
.name
, ": load_localinfo ssh Exception:", text
141 except host_thread
.lvirt_module
.libvirtError
as e
:
142 text
= e
.get_error_message()
143 print self
.name
, ": load_localinfo libvirt Exception:", text
144 except yaml
.YAMLError
as exc
:
146 if hasattr(exc
, 'problem_mark'):
147 mark
= exc
.problem_mark
148 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
149 print self
.name
, ": load_localinfo yaml format Exception", text
150 except js_e
.ValidationError
as e
:
152 if len(e
.path
)>0: text
=" at '" + ":".join(map(str, e
.path
))+"'"
153 print self
.name
, ": load_localinfo format Exception:", text
, e
.message
154 except Exception as e
:
156 print self
.name
, ": load_localinfo Exception:", text
158 #not loaded, insert a default data and force saving by activating dirty flag
159 self
.localinfo
= {'files':{}, 'server_files':{} }
160 #self.localinfo_dirty=True
161 self
.localinfo_dirty
=False
163 def load_hostinfo(self
):
171 command
= 'cat ' + self
.image_path
+ '/hostinfo.yaml'
172 #print self.name, ': command:', command
173 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
174 content
= stdout
.read()
175 if len(content
) == 0:
176 print self
.name
, ': command:', command
, "stderr:", stderr
.read()
177 raise paramiko
.ssh_exception
.SSHException("Error empty file ")
178 self
.hostinfo
= yaml
.load(content
)
179 js_v(self
.hostinfo
, hostinfo_schema
)
180 print self
.name
, ': hostlinfo load from host', self
.hostinfo
183 except paramiko
.ssh_exception
.SSHException
as e
:
185 print self
.name
, ": load_hostinfo ssh Exception:", text
186 except host_thread
.lvirt_module
.libvirtError
as e
:
187 text
= e
.get_error_message()
188 print self
.name
, ": load_hostinfo libvirt Exception:", text
189 except yaml
.YAMLError
as exc
:
191 if hasattr(exc
, 'problem_mark'):
192 mark
= exc
.problem_mark
193 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
194 print self
.name
, ": load_hostinfo yaml format Exception", text
195 except js_e
.ValidationError
as e
:
197 if len(e
.path
)>0: text
=" at '" + ":".join(map(str, e
.path
))+"'"
198 print self
.name
, ": load_hostinfo format Exception:", text
, e
.message
199 except Exception as e
:
201 print self
.name
, ": load_hostinfo Exception:", text
203 #not loaded, insert a default data
206 def save_localinfo(self
, tries
=3):
208 self
.localinfo_dirty
= False
215 command
= 'cat > ' + self
.image_path
+ '/.openvim.yaml'
216 print self
.name
, ': command:', command
217 (stdin
, _
, _
) = self
.ssh_conn
.exec_command(command
)
218 yaml
.safe_dump(self
.localinfo
, stdin
, explicit_start
=True, indent
=4, default_flow_style
=False, tags
=False, encoding
='utf-8', allow_unicode
=True)
219 self
.localinfo_dirty
= False
222 except paramiko
.ssh_exception
.SSHException
as e
:
224 print self
.name
, ": save_localinfo ssh Exception:", text
225 if "SSH session not active" in text
:
227 except host_thread
.lvirt_module
.libvirtError
as e
:
228 text
= e
.get_error_message()
229 print self
.name
, ": save_localinfo libvirt Exception:", text
230 except yaml
.YAMLError
as exc
:
232 if hasattr(exc
, 'problem_mark'):
233 mark
= exc
.problem_mark
234 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
235 print self
.name
, ": save_localinfo yaml format Exception", text
236 except Exception as e
:
238 print self
.name
, ": save_localinfo Exception:", text
240 def load_servers_from_db(self
):
241 self
.db_lock
.acquire()
242 r
,c
= self
.db
.get_table(SELECT
=('uuid','status', 'image_id'), FROM
='instances', WHERE
={'host_id': self
.host_id
})
243 self
.db_lock
.release()
245 self
.server_status
= {}
247 print self
.name
, ": Error getting data from database:", c
250 self
.server_status
[ server
['uuid'] ] = server
['status']
252 #convert from old version to new one
253 if 'inc_files' in self
.localinfo
and server
['uuid'] in self
.localinfo
['inc_files']:
254 server_files_dict
= {'source file': self
.localinfo
['inc_files'][ server
['uuid'] ] [0], 'file format':'raw' }
255 if server_files_dict
['source file'][-5:] == 'qcow2':
256 server_files_dict
['file format'] = 'qcow2'
258 self
.localinfo
['server_files'][ server
['uuid'] ] = { server
['image_id'] : server_files_dict
}
259 if 'inc_files' in self
.localinfo
:
260 del self
.localinfo
['inc_files']
261 self
.localinfo_dirty
= True
263 def delete_unused_files(self
):
264 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
265 Deletes unused entries at self.loacalinfo and the corresponding local files.
266 The only reason for this mismatch is the manual deletion of instances (VM) at database
270 for uuid
,images
in self
.localinfo
['server_files'].items():
271 if uuid
not in self
.server_status
:
272 for localfile
in images
.values():
274 print self
.name
, ": deleting file '%s' of unused server '%s'" %(localfile
['source file'], uuid
)
275 self
.delete_file(localfile
['source file'])
276 except paramiko
.ssh_exception
.SSHException
as e
:
277 print self
.name
, ": Exception deleting file '%s': %s" %(localfile
['source file'], str(e
))
278 del self
.localinfo
['server_files'][uuid
]
279 self
.localinfo_dirty
= True
281 def insert_task(self
, task
, *aditional
):
283 self
.queueLock
.acquire()
284 task
= self
.taskQueue
.put( (task
,) + aditional
, timeout
=5)
285 self
.queueLock
.release()
288 return -1, "timeout inserting a task over host " + self
.name
292 self
.load_localinfo()
294 self
.load_servers_from_db()
295 self
.delete_unused_files()
297 self
.queueLock
.acquire()
298 if not self
.taskQueue
.empty():
299 task
= self
.taskQueue
.get()
302 self
.queueLock
.release()
306 if self
.localinfo_dirty
:
307 self
.save_localinfo()
308 elif self
.next_update_server_status
< now
:
309 self
.update_servers_status()
310 self
.next_update_server_status
= now
+ 5
311 elif len(self
.pending_terminate_server
)>0 and self
.pending_terminate_server
[0][0]<now
:
312 self
.server_forceoff()
317 if task
[0] == 'instance':
318 print self
.name
, ": processing task instance", task
[1]['action']
322 r
=self
.action_on_server(task
[1], retry
==2)
325 elif task
[0] == 'image':
327 elif task
[0] == 'exit':
328 print self
.name
, ": processing task exit"
331 elif task
[0] == 'reload':
332 print self
.name
, ": processing task reload terminating and relaunching"
335 elif task
[0] == 'edit-iface':
336 print self
.name
, ": processing task edit-iface port=%s, old_net=%s, new_net=%s" % (task
[1], task
[2], task
[3])
337 self
.edit_iface(task
[1], task
[2], task
[3])
338 elif task
[0] == 'restore-iface':
339 print self
.name
, ": processing task restore-iface %s mac=%s" % (task
[1], task
[2])
340 self
.restore_iface(task
[1], task
[2])
341 elif task
[0] == 'new-ovsbridge':
342 print self
.name
, ": Creating compute OVS bridge"
343 self
.create_ovs_bridge()
344 elif task
[0] == 'new-vxlan':
345 print self
.name
, ": Creating vxlan tunnel=" + task
[1] + ", remote ip=" + task
[2]
346 self
.create_ovs_vxlan_tunnel(task
[1], task
[2])
347 elif task
[0] == 'del-ovsbridge':
348 print self
.name
, ": Deleting OVS bridge"
349 self
.delete_ovs_bridge()
350 elif task
[0] == 'del-vxlan':
351 print self
.name
, ": Deleting vxlan " + task
[1] + " tunnel"
352 self
.delete_ovs_vxlan_tunnel(task
[1])
353 elif task
[0] == 'create-ovs-bridge-port':
354 print self
.name
, ": Adding port ovim-" + task
[1] + " to OVS bridge"
355 self
.create_ovs_bridge_port(task
[1])
356 elif task
[0] == 'del-ovs-port':
357 print self
.name
, ": Delete bridge attached to ovs port vlan {} net {}".format(task
[1], task
[2])
358 self
.delete_bridge_port_attached_to_ovs(task
[1], task
[2])
360 print self
.name
, ": unknown task", task
362 def server_forceoff(self
, wait_until_finished
=False):
363 while len(self
.pending_terminate_server
)>0:
365 if self
.pending_terminate_server
[0][0]>now
:
366 if wait_until_finished
:
371 req
={'uuid':self
.pending_terminate_server
[0][1],
372 'action':{'terminate':'force'},
375 self
.action_on_server(req
)
376 self
.pending_terminate_server
.pop(0)
380 self
.server_forceoff(True)
381 if self
.localinfo_dirty
:
382 self
.save_localinfo()
384 self
.ssh_conn
.close()
385 except Exception as e
:
387 print self
.name
, ": terminate Exception:", text
388 print self
.name
, ": exit from host_thread"
390 def get_local_iface_name(self
, generic_name
):
391 if self
.hostinfo
!= None and "iface_names" in self
.hostinfo
and generic_name
in self
.hostinfo
["iface_names"]:
392 return self
.hostinfo
["iface_names"][generic_name
]
395 def create_xml_server(self
, server
, dev_list
, server_metadata
={}):
396 """Function that implements the generation of the VM XML definition.
397 Additional devices are in dev_list list
398 The main disk is upon dev_list[0]"""
400 #get if operating system is Windows
402 os_type
= server_metadata
.get('os_type', None)
403 if os_type
== None and 'metadata' in dev_list
[0]:
404 os_type
= dev_list
[0]['metadata'].get('os_type', None)
405 if os_type
!= None and os_type
.lower() == "windows":
407 #get type of hard disk bus
408 bus_ide
= True if windows_os
else False
409 bus
= server_metadata
.get('bus', None)
410 if bus
== None and 'metadata' in dev_list
[0]:
411 bus
= dev_list
[0]['metadata'].get('bus', None)
413 bus_ide
= True if bus
=='ide' else False
417 text
= "<domain type='kvm'>"
419 topo
= server_metadata
.get('topology', None)
420 if topo
== None and 'metadata' in dev_list
[0]:
421 topo
= dev_list
[0]['metadata'].get('topology', None)
423 name
= server
.get('name','') + "_" + server
['uuid']
424 name
= name
[:58] #qemu impose a length limit of 59 chars or not start. Using 58
425 text
+= self
.inc_tab() + "<name>" + name
+ "</name>"
427 text
+= self
.tab() + "<uuid>" + server
['uuid'] + "</uuid>"
430 if 'extended' in server
and server
['extended']!=None and 'numas' in server
['extended']:
431 numa
= server
['extended']['numas'][0]
434 memory
= int(numa
.get('memory',0))*1024*1024 #in KiB
436 memory
= int(server
['ram'])*1024;
438 if not self
.develop_mode
:
441 return -1, 'No memory assigned to instance'
443 text
+= self
.tab() + "<memory unit='KiB'>" +memory
+"</memory>"
444 text
+= self
.tab() + "<currentMemory unit='KiB'>" +memory
+ "</currentMemory>"
446 text
+= self
.tab()+'<memoryBacking>'+ \
447 self
.inc_tab() + '<hugepages/>'+ \
448 self
.dec_tab()+ '</memoryBacking>'
451 use_cpu_pinning
=False
452 vcpus
= int(server
.get("vcpus",0))
454 if 'cores-source' in numa
:
456 for index
in range(0, len(numa
['cores-source'])):
457 cpu_pinning
.append( [ numa
['cores-id'][index
], numa
['cores-source'][index
] ] )
459 if 'threads-source' in numa
:
461 for index
in range(0, len(numa
['threads-source'])):
462 cpu_pinning
.append( [ numa
['threads-id'][index
], numa
['threads-source'][index
] ] )
464 if 'paired-threads-source' in numa
:
466 for index
in range(0, len(numa
['paired-threads-source'])):
467 cpu_pinning
.append( [numa
['paired-threads-id'][index
][0], numa
['paired-threads-source'][index
][0] ] )
468 cpu_pinning
.append( [numa
['paired-threads-id'][index
][1], numa
['paired-threads-source'][index
][1] ] )
471 if use_cpu_pinning
and not self
.develop_mode
:
472 text
+= self
.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning
)) +"</vcpu>" + \
473 self
.tab()+'<cputune>'
475 for i
in range(0, len(cpu_pinning
)):
476 text
+= self
.tab() + "<vcpupin vcpu='" +str(cpu_pinning
[i
][0])+ "' cpuset='" +str(cpu_pinning
[i
][1]) +"'/>"
477 text
+= self
.dec_tab()+'</cputune>'+ \
478 self
.tab() + '<numatune>' +\
479 self
.inc_tab() + "<memory mode='strict' nodeset='" +str(numa
['source'])+ "'/>" +\
480 self
.dec_tab() + '</numatune>'
483 return -1, "Instance without number of cpus"
484 text
+= self
.tab()+"<vcpu>" + str(vcpus
) + "</vcpu>"
489 if dev
['type']=='cdrom' :
492 text
+= self
.tab()+ '<os>' + \
493 self
.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
495 text
+= self
.tab() + "<boot dev='cdrom'/>"
496 text
+= self
.tab() + "<boot dev='hd'/>" + \
497 self
.dec_tab()+'</os>'
499 text
+= self
.tab()+'<features>'+\
500 self
.inc_tab()+'<acpi/>' +\
501 self
.tab()+'<apic/>' +\
502 self
.tab()+'<pae/>'+ \
503 self
.dec_tab() +'</features>'
504 if windows_os
or topo
=="oneSocket":
505 text
+= self
.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>"% vcpus
507 text
+= self
.tab() + "<cpu mode='host-model'></cpu>"
508 text
+= self
.tab() + "<clock offset='utc'/>" +\
509 self
.tab() + "<on_poweroff>preserve</on_poweroff>" + \
510 self
.tab() + "<on_reboot>restart</on_reboot>" + \
511 self
.tab() + "<on_crash>restart</on_crash>"
512 text
+= self
.tab() + "<devices>" + \
513 self
.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
514 self
.tab() + "<serial type='pty'>" +\
515 self
.inc_tab() + "<target port='0'/>" + \
516 self
.dec_tab() + "</serial>" +\
517 self
.tab() + "<console type='pty'>" + \
518 self
.inc_tab()+ "<target type='serial' port='0'/>" + \
519 self
.dec_tab()+'</console>'
521 text
+= self
.tab() + "<controller type='usb' index='0'/>" + \
522 self
.tab() + "<controller type='ide' index='0'/>" + \
523 self
.tab() + "<input type='mouse' bus='ps2'/>" + \
524 self
.tab() + "<sound model='ich6'/>" + \
525 self
.tab() + "<video>" + \
526 self
.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
527 self
.dec_tab() + "</video>" + \
528 self
.tab() + "<memballoon model='virtio'/>" + \
529 self
.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
531 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
532 #> self.dec_tab()+'</hostdev>\n' +\
533 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
535 text
+= self
.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
537 #If image contains 'GRAPH' include graphics
538 #if 'GRAPH' in image:
539 text
+= self
.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
540 self
.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
541 self
.dec_tab() + "</graphics>"
545 bus_ide_dev
= bus_ide
546 if dev
['type']=='cdrom' or dev
['type']=='disk':
547 if dev
['type']=='cdrom':
549 text
+= self
.tab() + "<disk type='file' device='"+dev
['type']+"'>"
550 if 'file format' in dev
:
551 text
+= self
.inc_tab() + "<driver name='qemu' type='" +dev
['file format']+ "' cache='writethrough'/>"
552 if 'source file' in dev
:
553 text
+= self
.tab() + "<source file='" +dev
['source file']+ "'/>"
554 #elif v['type'] == 'block':
555 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
557 # return -1, 'Unknown disk type ' + v['type']
558 vpci
= dev
.get('vpci',None)
560 vpci
= dev
['metadata'].get('vpci',None)
561 text
+= self
.pci2xml(vpci
)
564 text
+= self
.tab() + "<target dev='hd" +vd_index
+ "' bus='ide'/>" #TODO allows several type of disks
566 text
+= self
.tab() + "<target dev='vd" +vd_index
+ "' bus='virtio'/>"
567 text
+= self
.dec_tab() + '</disk>'
568 vd_index
= chr(ord(vd_index
)+1)
569 elif dev
['type']=='xml':
570 dev_text
= dev
['xml']
572 dev_text
= dev_text
.replace('__vpci__', dev
['vpci'])
573 if 'source file' in dev
:
574 dev_text
= dev_text
.replace('__file__', dev
['source file'])
575 if 'file format' in dev
:
576 dev_text
= dev_text
.replace('__format__', dev
['source file'])
577 if '__dev__' in dev_text
:
578 dev_text
= dev_text
.replace('__dev__', vd_index
)
579 vd_index
= chr(ord(vd_index
)+1)
582 return -1, 'Unknown device type ' + dev
['type']
585 bridge_interfaces
= server
.get('networks', [])
586 for v
in bridge_interfaces
:
588 self
.db_lock
.acquire()
589 result
, content
= self
.db
.get_table(FROM
='nets', SELECT
=('provider',),WHERE
={'uuid':v
['net_id']} )
590 self
.db_lock
.release()
592 print "create_xml_server ERROR getting nets",result
, content
594 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
595 #I know it is not secure
596 #for v in sorted(desc['network interfaces'].itervalues()):
597 model
= v
.get("model", None)
598 if content
[0]['provider']=='default':
599 text
+= self
.tab() + "<interface type='network'>" + \
600 self
.inc_tab() + "<source network='" +content
[0]['provider']+ "'/>"
601 elif content
[0]['provider'][0:7]=='macvtap':
602 text
+= self
.tab()+"<interface type='direct'>" + \
603 self
.inc_tab() + "<source dev='" + self
.get_local_iface_name(content
[0]['provider'][8:]) + "' mode='bridge'/>" + \
604 self
.tab() + "<target dev='macvtap0'/>"
606 text
+= self
.tab() + "<alias name='net" + str(net_nb
) + "'/>"
609 elif content
[0]['provider'][0:6]=='bridge':
610 text
+= self
.tab() + "<interface type='bridge'>" + \
611 self
.inc_tab()+"<source bridge='" +self
.get_local_iface_name(content
[0]['provider'][7:])+ "'/>"
613 text
+= self
.tab() + "<target dev='vnet" + str(net_nb
)+ "'/>" +\
614 self
.tab() + "<alias name='net" + str(net_nb
)+ "'/>"
617 elif content
[0]['provider'][0:3] == "OVS":
618 vlan
= content
[0]['provider'].replace('OVS:', '')
619 text
+= self
.tab() + "<interface type='bridge'>" + \
620 self
.inc_tab() + "<source bridge='ovim-" + vlan
+ "'/>"
622 return -1, 'Unknown Bridge net provider ' + content
[0]['provider']
624 text
+= self
.tab() + "<model type='" +model
+ "'/>"
625 if v
.get('mac_address', None) != None:
626 text
+= self
.tab() +"<mac address='" +v
['mac_address']+ "'/>"
627 text
+= self
.pci2xml(v
.get('vpci',None))
628 text
+= self
.dec_tab()+'</interface>'
632 interfaces
= numa
.get('interfaces', [])
636 if self
.develop_mode
: #map these interfaces to bridges
637 text
+= self
.tab() + "<interface type='bridge'>" + \
638 self
.inc_tab()+"<source bridge='" +self
.develop_bridge_iface
+ "'/>"
640 text
+= self
.tab() + "<target dev='vnet" + str(net_nb
)+ "'/>" +\
641 self
.tab() + "<alias name='net" + str(net_nb
)+ "'/>"
643 text
+= self
.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
644 if v
.get('mac_address', None) != None:
645 text
+= self
.tab() +"<mac address='" +v
['mac_address']+ "'/>"
646 text
+= self
.pci2xml(v
.get('vpci',None))
647 text
+= self
.dec_tab()+'</interface>'
650 if v
['dedicated'] == 'yes': #passthrought
651 text
+= self
.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
652 self
.inc_tab() + "<source>"
654 text
+= self
.pci2xml(v
['source'])
655 text
+= self
.dec_tab()+'</source>'
656 text
+= self
.pci2xml(v
.get('vpci',None))
658 text
+= self
.tab() + "<alias name='hostdev" + str(net_nb
) + "'/>"
659 text
+= self
.dec_tab()+'</hostdev>'
661 else: #sriov_interfaces
662 #skip not connected interfaces
663 if v
.get("net_id") == None:
665 text
+= self
.tab() + "<interface type='hostdev' managed='yes'>"
667 if v
.get('mac_address', None) != None:
668 text
+= self
.tab() + "<mac address='" +v
['mac_address']+ "'/>"
669 text
+= self
.tab()+'<source>'
671 text
+= self
.pci2xml(v
['source'])
672 text
+= self
.dec_tab()+'</source>'
673 if v
.get('vlan',None) != None:
674 text
+= self
.tab() + "<vlan> <tag id='" + str(v
['vlan']) + "'/> </vlan>"
675 text
+= self
.pci2xml(v
.get('vpci',None))
677 text
+= self
.tab() + "<alias name='hostdev" + str(net_nb
) + "'/>"
678 text
+= self
.dec_tab()+'</interface>'
681 text
+= self
.dec_tab()+'</devices>'+\
682 self
.dec_tab()+'</domain>'
685 def pci2xml(self
, pci
):
686 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
687 alows an empty pci text'''
690 first_part
= pci
.split(':')
691 second_part
= first_part
[2].split('.')
692 return self
.tab() + "<address type='pci' domain='0x" + first_part
[0] + \
693 "' bus='0x" + first_part
[1] + "' slot='0x" + second_part
[0] + \
694 "' function='0x" + second_part
[1] + "'/>"
697 """Return indentation according to xml_level"""
698 return "\n" + (' '*self
.xml_level
)
701 """Increment and return indentation according to xml_level"""
706 """Decrement and return indentation according to xml_level"""
710 def create_ovs_bridge(self
):
712 Create a bridge in compute OVS to allocate VMs
713 :return: True if success
717 command
= 'sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true'
718 print self
.name
, ': command:', command
719 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
720 content
= stdout
.read()
721 if len(content
) == 0:
726 def delete_port_to_ovs_bridge(self
, vlan
, net_uuid
):
728 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
729 :param vlan: vlan port id
730 :param net_uuid: network id
737 port_name
= 'ovim-' + vlan
738 command
= 'sudo ovs-vsctl del-port br-int ' + port_name
739 print self
.name
, ': command:', command
740 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
741 content
= stdout
.read()
742 if len(content
) == 0:
747 def delete_dhcp_server(self
, vlan
, net_uuid
, dhcp_path
):
749 Delete dhcp server process lining in namespace
750 :param vlan: segmentation id
751 :param net_uuid: network uuid
752 :param dhcp_path: conf fiel path that live in namespace side
757 if not self
.is_dhcp_port_free(vlan
, net_uuid
):
760 net_namespace
= 'ovim-' + vlan
761 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
762 pid_file
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
764 command
= 'sudo ip netns exec ' + net_namespace
+ ' cat ' + pid_file
765 print self
.name
, ': command:', command
766 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
767 content
= stdout
.read()
769 command
= 'sudo ip netns exec ' + net_namespace
+ ' kill -9 ' + content
770 print self
.name
, ': command:', command
771 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
772 content
= stdout
.read()
774 # if len(content) == 0:
779 def is_dhcp_port_free(self
, host_id
, net_uuid
):
781 Check if any port attached to the a net in a vxlan mesh across computes nodes
782 :param host_id: host id
783 :param net_uuid: network id
784 :return: True if is not free
786 self
.db_lock
.acquire()
787 result
, content
= self
.db
.get_table(
789 WHERE
={'p.type': 'instance:ovs', 'p.net_id': net_uuid
}
791 self
.db_lock
.release()
798 def is_port_free(self
, host_id
, net_uuid
):
800 Check if there not ovs ports of a network in a compute host.
801 :param host_id: host id
802 :param net_uuid: network id
803 :return: True if is not free
806 self
.db_lock
.acquire()
807 result
, content
= self
.db
.get_table(
808 FROM
='ports as p join instances as i on p.instance_id=i.uuid',
809 WHERE
={"i.host_id": self
.host_id
, 'p.type': 'instance:ovs', 'p.net_id': net_uuid
}
811 self
.db_lock
.release()
818 def add_port_to_ovs_bridge(self
, vlan
):
820 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
821 :param vlan: vlan port id
822 :return: True if success
828 port_name
= 'ovim-' + vlan
829 command
= 'sudo ovs-vsctl add-port br-int ' + port_name
+ ' tag=' + vlan
830 print self
.name
, ': command:', command
831 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
832 content
= stdout
.read()
833 if len(content
) == 0:
838 def delete_dhcp_port(self
, vlan
, net_uuid
):
840 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
841 :param vlan: segmentation id
842 :param net_uuid: network id
843 :return: True if success
849 if not self
.is_dhcp_port_free(vlan
, net_uuid
):
851 self
.delete_dhcp_interfaces(vlan
)
854 def delete_bridge_port_attached_to_ovs(self
, vlan
, net_uuid
):
856 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
859 :return: True if success
864 if not self
.is_port_free(vlan
, net_uuid
):
866 self
.delete_port_to_ovs_bridge(vlan
, net_uuid
)
867 self
.delete_linux_bridge(vlan
)
870 def delete_linux_bridge(self
, vlan
):
872 Delete a linux bridge in a scpecific compute.
873 :param vlan: vlan port id
874 :return: True if success
880 port_name
= 'ovim-' + vlan
881 command
= 'sudo ip link set dev veth0-' + vlan
+ ' down'
882 print self
.name
, ': command:', command
883 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
884 content
= stdout
.read()
886 # if len(content) != 0:
889 command
= 'sudo ifconfig ' + port_name
+ ' down && sudo brctl delbr ' + port_name
890 print self
.name
, ': command:', command
891 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
892 content
= stdout
.read()
893 if len(content
) == 0:
898 def create_ovs_bridge_port(self
, vlan
):
900 Generate a linux bridge and attache the port to a OVS bridge
901 :param vlan: vlan port id
906 self
.create_linux_bridge(vlan
)
907 self
.add_port_to_ovs_bridge(vlan
)
909 def create_linux_bridge(self
, vlan
):
911 Create a linux bridge with STP active
912 :param vlan: netowrk vlan id
919 port_name
= 'ovim-' + vlan
920 command
= 'sudo brctl show | grep ' + port_name
921 print self
.name
, ': command:', command
922 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
923 content
= stdout
.read()
925 # if exist nothing to create
926 # if len(content) == 0:
929 command
= 'sudo brctl addbr ' + port_name
930 print self
.name
, ': command:', command
931 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
932 content
= stdout
.read()
934 # if len(content) == 0:
939 command
= 'sudo brctl stp ' + port_name
+ ' on'
940 print self
.name
, ': command:', command
941 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
942 content
= stdout
.read()
944 # if len(content) == 0:
948 command
= 'sudo ip link set dev ' + port_name
+ ' up'
949 print self
.name
, ': command:', command
950 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
951 content
= stdout
.read()
953 if len(content
) == 0:
958 def set_mac_dhcp_server(self
, ip
, mac
, vlan
, netmask
, dhcp_path
):
960 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
961 :param ip: IP address asigned to a VM
962 :param mac: VM vnic mac to be macthed with the IP received
963 :param vlan: Segmentation id
964 :param netmask: netmask value
965 :param path: dhcp conf file path that live in namespace side
966 :return: True if success
972 net_namespace
= 'ovim-' + vlan
973 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
974 dhcp_hostsdir
= os
.path
.join(dhcp_path
, net_namespace
)
979 ip_data
= mac
.upper() + ',' + ip
981 command
= 'sudo ip netns exec ' + net_namespace
+ ' touch ' + dhcp_hostsdir
982 print self
.name
, ': command:', command
983 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
984 content
= stdout
.read()
986 command
= 'sudo ip netns exec ' + net_namespace
+ ' sudo bash -ec "echo ' + ip_data
+ ' >> ' + dhcp_hostsdir
+ '"'
988 print self
.name
, ': command:', command
989 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
990 content
= stdout
.read()
992 if len(content
) == 0:
997 def delete_mac_dhcp_server(self
, ip
, mac
, vlan
, dhcp_path
):
999 Delete into dhcp conf file the ip assigned to a specific MAC address
1001 :param ip: IP address asigned to a VM
1002 :param mac: VM vnic mac to be macthed with the IP received
1003 :param vlan: Segmentation id
1004 :param dhcp_path: dhcp conf file path that live in namespace side
1011 net_namespace
= 'ovim-' + vlan
1012 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
1013 dhcp_hostsdir
= os
.path
.join(dhcp_path
, net_namespace
)
1018 ip_data
= mac
.upper() + ',' + ip
1020 command
= 'sudo ip netns exec ' + net_namespace
+ ' sudo sed -i \'/' + ip_data
+ '/d\' ' + dhcp_hostsdir
1021 print self
.name
, ': command:', command
1022 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1023 content
= stdout
.read()
1025 if len(content
) == 0:
1030 def launch_dhcp_server(self
, vlan
, ip_range
, netmask
, dhcp_path
):
1032 Generate a linux bridge and attache the port to a OVS bridge
1034 :param vlan: Segmentation id
1035 :param ip_range: IP dhcp range
1036 :param netmask: network netmask
1037 :param dhcp_path: dhcp conf file path that live in namespace side
1038 :return: True if success
1044 interface
= 'tap-' + vlan
1045 net_namespace
= 'ovim-' + vlan
1046 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
1047 leases_path
= os
.path
.join(dhcp_path
, "dnsmasq.leases")
1048 pid_file
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
1050 dhcp_range
= ip_range
[0] + ',' + ip_range
[1] + ',' + netmask
1052 command
= 'sudo ip netns exec ' + net_namespace
+ ' mkdir -p ' + dhcp_path
1053 print self
.name
, ': command:', command
1054 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1055 content
= stdout
.read()
1057 pid_path
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
1058 command
= 'sudo ip netns exec ' + net_namespace
+ ' cat ' + pid_path
1059 print self
.name
, ': command:', command
1060 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1061 content
= stdout
.read()
1062 # check if pid is runing
1063 pid_status_path
= content
1065 command
= "ps aux | awk '{print $2 }' | grep " + pid_status_path
1066 print self
.name
, ': command:', command
1067 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1068 content
= stdout
.read()
1070 command
= 'sudo ip netns exec ' + net_namespace
+ ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1071 '--interface=' + interface
+ ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path
+ \
1072 ' --dhcp-range ' + dhcp_range
+ ' --pid-file=' + pid_file
+ ' --dhcp-leasefile=' + leases_path
+ ' --listen-address ' + ip_range
[0]
1074 print self
.name
, ': command:', command
1075 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1076 content
= stdout
.readline()
1078 if len(content
) == 0:
1083 def delete_dhcp_interfaces(self
, vlan
):
1085 Create a linux bridge with STP active
1086 :param vlan: netowrk vlan id
1093 net_namespace
= 'ovim-' + vlan
1094 command
= 'sudo ovs-vsctl del-port br-int ovs-tap-' + vlan
1095 print self
.name
, ': command:', command
1096 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1097 content
= stdout
.read()
1099 command
= 'sudo ip netns exec ' + net_namespace
+ ' ip link set dev tap-' + vlan
+ ' down'
1100 print self
.name
, ': command:', command
1101 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1102 content
= stdout
.read()
1104 command
= 'sudo ip link set dev ovs-tap-' + vlan
+ ' down'
1105 print self
.name
, ': command:', command
1106 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1107 content
= stdout
.read()
1109 def create_dhcp_interfaces(self
, vlan
, ip
, netmask
):
1111 Create a linux bridge with STP active
1112 :param vlan: segmentation id
1113 :param ip: Ip included in the dhcp range for the tap interface living in namesapce side
1114 :param netmask: dhcp net CIDR
1115 :return: True if success
1121 net_namespace
= 'ovim-' + vlan
1122 namespace_interface
= 'tap-' + vlan
1124 command
= 'sudo ip netns add ' + net_namespace
1125 print self
.name
, ': command:', command
1126 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1127 content
= stdout
.read()
1129 command
= 'sudo ip link add tap-' + vlan
+ ' type veth peer name ovs-tap-' + vlan
1130 print self
.name
, ': command:', command
1131 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1132 content
= stdout
.read()
1134 command
= 'sudo ovs-vsctl add-port br-int ovs-tap-' + vlan
+ ' tag=' + vlan
1135 print self
.name
, ': command:', command
1136 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1137 content
= stdout
.read()
1139 command
= 'sudo ip link set tap-' + vlan
+ ' netns ' + net_namespace
1140 print self
.name
, ': command:', command
1141 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1142 content
= stdout
.read()
1144 command
= 'sudo ip netns exec ' + net_namespace
+ ' ip link set dev tap-' + vlan
+ ' up'
1145 print self
.name
, ': command:', command
1146 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1147 content
= stdout
.read()
1149 command
= 'sudo ip link set dev ovs-tap-' + vlan
+ ' up'
1150 print self
.name
, ': command:', command
1151 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1152 content
= stdout
.read()
1154 command
= 'sudo ip netns exec ' + net_namespace
+ ' ' + ' ifconfig ' + namespace_interface \
1155 + ' ' + ip
+ ' netmask ' + netmask
1156 print self
.name
, ': command:', command
1157 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1158 content
= stdout
.read()
1160 if len(content
) == 0:
1165 def create_ovs_vxlan_tunnel(self
, vxlan_interface
, remote_ip
):
1167 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1168 :param vxlan_interface: vlxan inteface name.
1169 :param remote_ip: tunnel endpoint remote compute ip.
1174 command
= 'sudo ovs-vsctl add-port br-int ' + vxlan_interface
+ \
1175 ' -- set Interface ' + vxlan_interface
+ ' type=vxlan options:remote_ip=' + remote_ip
+ \
1176 ' -- set Port ' + vxlan_interface
+ ' other_config:stp-path-cost=10'
1177 print self
.name
, ': command:', command
1178 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1179 content
= stdout
.read()
1181 if len(content
) == 0:
1186 def delete_ovs_vxlan_tunnel(self
, vxlan_interface
):
1188 Delete a vlxan tunnel port from a OVS brdige.
1189 :param vxlan_interface: vlxan name to be delete it.
1190 :return: True if success.
1194 command
= 'sudo ovs-vsctl del-port br-int ' + vxlan_interface
1195 print self
.name
, ': command:', command
1196 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1197 content
= stdout
.read()
1199 if len(content
) == 0:
1204 def delete_ovs_bridge(self
):
1206 Delete a OVS bridge from a compute.
1207 :return: True if success
1211 command
= 'sudo ovs-vsctl del-br br-int'
1212 print self
.name
, ': command:', command
1213 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1214 content
= stdout
.read()
1215 if len(content
) == 0:
1220 def get_file_info(self
, path
):
1221 command
= 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1222 print self
.name
, ': command:', command
1223 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1224 content
= stdout
.read()
1225 if len(content
) == 0:
1226 return None # file does not exist
1228 return content
.split(" ") #(permission, 1, owner, group, size, date, file)
1230 def qemu_get_info(self
, path
):
1231 command
= 'qemu-img info ' + path
1232 print self
.name
, ': command:', command
1233 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
1234 content
= stdout
.read()
1235 if len(content
) == 0:
1236 error
= stderr
.read()
1237 print self
.name
, ": get_qemu_info error ", error
1238 raise paramiko
.ssh_exception
.SSHException("Error getting qemu_info: " + error
)
1241 return yaml
.load(content
)
1242 except yaml
.YAMLError
as exc
:
1244 if hasattr(exc
, 'problem_mark'):
1245 mark
= exc
.problem_mark
1246 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
1247 print self
.name
, ": get_qemu_info yaml format Exception", text
1248 raise paramiko
.ssh_exception
.SSHException("Error getting qemu_info yaml format" + text
)
1250 def qemu_change_backing(self
, inc_file
, new_backing_file
):
1251 command
= 'qemu-img rebase -u -b ' + new_backing_file
+ ' ' + inc_file
1252 print self
.name
, ': command:', command
1253 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1254 content
= stderr
.read()
1255 if len(content
) == 0:
1258 print self
.name
, ": qemu_change_backing error: ", content
1261 def get_notused_filename(self
, proposed_name
, suffix
=''):
1262 '''Look for a non existing file_name in the host
1263 proposed_name: proposed file name, includes path
1264 suffix: suffix to be added to the name, before the extention
1266 extension
= proposed_name
.rfind(".")
1267 slash
= proposed_name
.rfind("/")
1268 if extension
< 0 or extension
< slash
: # no extension
1269 extension
= len(proposed_name
)
1270 target_name
= proposed_name
[:extension
] + suffix
+ proposed_name
[extension
:]
1271 info
= self
.get_file_info(target_name
)
1276 while info
is not None:
1277 target_name
= proposed_name
[:extension
] + suffix
+ "-" + str(index
) + proposed_name
[extension
:]
1279 info
= self
.get_file_info(target_name
)
1282 def get_notused_path(self
, proposed_path
, suffix
=''):
1283 '''Look for a non existing path at database for images
1284 proposed_path: proposed file name, includes path
1285 suffix: suffix to be added to the name, before the extention
1287 extension
= proposed_path
.rfind(".")
1289 extension
= len(proposed_path
)
1291 target_path
= proposed_path
[:extension
] + suffix
+ proposed_path
[extension
:]
1294 r
,_
=self
.db
.get_table(FROM
="images",WHERE
={"path":target_path
})
1297 target_path
= proposed_path
[:extension
] + suffix
+ "-" + str(index
) + proposed_path
[extension
:]
1301 def delete_file(self
, file_name
):
1302 command
= 'rm -f '+file_name
1303 print self
.name
, ': command:', command
1304 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1305 error_msg
= stderr
.read()
1306 if len(error_msg
) > 0:
1307 raise paramiko
.ssh_exception
.SSHException("Error deleting file: " + error_msg
)
1309 def copy_file(self
, source
, destination
, perserve_time
=True):
1310 if source
[0:4]=="http":
1311 command
= "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1312 dst
=destination
, src
=source
, dst_result
=destination
+ ".result" )
1314 command
= 'cp --no-preserve=mode'
1316 command
+= ' --preserve=timestamps'
1317 command
+= " '{}' '{}'".format(source
, destination
)
1318 print self
.name
, ': command:', command
1319 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1320 error_msg
= stderr
.read()
1321 if len(error_msg
) > 0:
1322 raise paramiko
.ssh_exception
.SSHException("Error copying image to local host: " + error_msg
)
1324 def copy_remote_file(self
, remote_file
, use_incremental
):
1325 ''' Copy a file from the repository to local folder and recursively
1326 copy the backing files in case the remote file is incremental
1327 Read and/or modified self.localinfo['files'] that contain the
1328 unmodified copies of images in the local path
1330 remote_file: path of remote file
1331 use_incremental: None (leave the decision to this function), True, False
1333 local_file: name of local file
1334 qemu_info: dict with quemu information of local file
1335 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1338 use_incremental_out
= use_incremental
1339 new_backing_file
= None
1341 file_from_local
= True
1343 #in case incremental use is not decided, take the decision depending on the image
1344 #avoid the use of incremental if this image is already incremental
1345 if remote_file
[0:4] == "http":
1346 file_from_local
= False
1348 qemu_remote_info
= self
.qemu_get_info(remote_file
)
1349 if use_incremental_out
==None:
1350 use_incremental_out
= not ( file_from_local
and 'backing file' in qemu_remote_info
)
1351 #copy recursivelly the backing files
1352 if file_from_local
and 'backing file' in qemu_remote_info
:
1353 new_backing_file
, _
, _
= self
.copy_remote_file(qemu_remote_info
['backing file'], True)
1355 #check if remote file is present locally
1356 if use_incremental_out
and remote_file
in self
.localinfo
['files']:
1357 local_file
= self
.localinfo
['files'][remote_file
]
1358 local_file_info
= self
.get_file_info(local_file
)
1360 remote_file_info
= self
.get_file_info(remote_file
)
1361 if local_file_info
== None:
1363 elif file_from_local
and (local_file_info
[4]!=remote_file_info
[4] or local_file_info
[5]!=remote_file_info
[5]):
1364 #local copy of file not valid because date or size are different.
1365 #TODO DELETE local file if this file is not used by any active virtual machine
1367 self
.delete_file(local_file
)
1368 del self
.localinfo
['files'][remote_file
]
1372 else: #check that the local file has the same backing file, or there are not backing at all
1373 qemu_info
= self
.qemu_get_info(local_file
)
1374 if new_backing_file
!= qemu_info
.get('backing file'):
1378 if local_file
== None: #copy the file
1379 img_name
= remote_file
.split('/') [-1]
1380 img_local
= self
.image_path
+ '/' + img_name
1381 local_file
= self
.get_notused_filename(img_local
)
1382 self
.copy_file(remote_file
, local_file
, use_incremental_out
)
1384 if use_incremental_out
:
1385 self
.localinfo
['files'][remote_file
] = local_file
1386 if new_backing_file
:
1387 self
.qemu_change_backing(local_file
, new_backing_file
)
1388 qemu_info
= self
.qemu_get_info(local_file
)
1390 return local_file
, qemu_info
, use_incremental_out
1392 def launch_server(self
, conn
, server
, rebuild
=False, domain
=None):
1394 time
.sleep(random
.randint(20,150)) #sleep random timeto be make it a bit more real
1397 server_id
= server
['uuid']
1398 paused
= server
.get('paused','no')
1400 if domain
!=None and rebuild
==False:
1402 #self.server_status[server_id] = 'ACTIVE'
1405 self
.db_lock
.acquire()
1406 result
, server_data
= self
.db
.get_instance(server_id
)
1407 self
.db_lock
.release()
1409 print self
.name
, ": launch_server ERROR getting server from DB",result
, server_data
1410 return result
, server_data
1412 #0: get image metadata
1413 server_metadata
= server
.get('metadata', {})
1414 use_incremental
= None
1416 if "use_incremental" in server_metadata
:
1417 use_incremental
= False if server_metadata
["use_incremental"]=="no" else True
1419 server_host_files
= self
.localinfo
['server_files'].get( server
['uuid'], {})
1421 #delete previous incremental files
1422 for file_
in server_host_files
.values():
1423 self
.delete_file(file_
['source file'] )
1424 server_host_files
={}
1426 #1: obtain aditional devices (disks)
1427 #Put as first device the main disk
1428 devices
= [ {"type":"disk", "image_id":server
['image_id'], "vpci":server_metadata
.get('vpci', None) } ]
1429 if 'extended' in server_data
and server_data
['extended']!=None and "devices" in server_data
['extended']:
1430 devices
+= server_data
['extended']['devices']
1433 if dev
['image_id'] == None:
1436 self
.db_lock
.acquire()
1437 result
, content
= self
.db
.get_table(FROM
='images', SELECT
=('path', 'metadata'),
1438 WHERE
={'uuid': dev
['image_id']})
1439 self
.db_lock
.release()
1441 error_text
= "ERROR", result
, content
, "when getting image", dev
['image_id']
1442 print self
.name
, ": launch_server", error_text
1443 return -1, error_text
1444 if content
[0]['metadata'] is not None:
1445 dev
['metadata'] = json
.loads(content
[0]['metadata'])
1447 dev
['metadata'] = {}
1449 if dev
['image_id'] in server_host_files
:
1450 dev
['source file'] = server_host_files
[ dev
['image_id'] ] ['source file'] #local path
1451 dev
['file format'] = server_host_files
[ dev
['image_id'] ] ['file format'] # raw or qcow2
1454 #2: copy image to host
1455 remote_file
= content
[0]['path']
1456 use_incremental_image
= use_incremental
1457 if dev
['metadata'].get("use_incremental") == "no":
1458 use_incremental_image
= False
1459 local_file
, qemu_info
, use_incremental_image
= self
.copy_remote_file(remote_file
, use_incremental_image
)
1461 #create incremental image
1462 if use_incremental_image
:
1463 local_file_inc
= self
.get_notused_filename(local_file
, '.inc')
1464 command
= 'qemu-img create -f qcow2 '+local_file_inc
+ ' -o backing_file='+ local_file
1465 print 'command:', command
1466 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1467 error_msg
= stderr
.read()
1468 if len(error_msg
) > 0:
1469 raise paramiko
.ssh_exception
.SSHException("Error creating incremental file: " + error_msg
)
1470 local_file
= local_file_inc
1471 qemu_info
= {'file format':'qcow2'}
1473 server_host_files
[ dev
['image_id'] ] = {'source file': local_file
, 'file format': qemu_info
['file format']}
1475 dev
['source file'] = local_file
1476 dev
['file format'] = qemu_info
['file format']
1478 self
.localinfo
['server_files'][ server
['uuid'] ] = server_host_files
1479 self
.localinfo_dirty
= True
1482 result
, xml
= self
.create_xml_server(server_data
, devices
, server_metadata
) #local_file
1484 print self
.name
, ": create xml server error:", xml
1486 print self
.name
, ": create xml:", xml
1487 atribute
= host_thread
.lvirt_module
.VIR_DOMAIN_START_PAUSED
if paused
== "yes" else 0
1489 if not rebuild
: #ensures that any pending destroying server is done
1490 self
.server_forceoff(True)
1491 #print self.name, ": launching instance" #, xml
1492 conn
.createXML(xml
, atribute
)
1493 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
1497 except paramiko
.ssh_exception
.SSHException
as e
:
1499 print self
.name
, ": launch_server(%s) ssh Exception: %s" %(server_id
, text
)
1500 if "SSH session not active" in text
:
1502 except host_thread
.lvirt_module
.libvirtError
as e
:
1503 text
= e
.get_error_message()
1504 print self
.name
, ": launch_server(%s) libvirt Exception: %s" %(server_id
, text
)
1505 except Exception as e
:
1507 print self
.name
, ": launch_server(%s) Exception: %s" %(server_id
, text
)
1510 def update_servers_status(self
):
1512 # VIR_DOMAIN_NOSTATE = 0
1513 # VIR_DOMAIN_RUNNING = 1
1514 # VIR_DOMAIN_BLOCKED = 2
1515 # VIR_DOMAIN_PAUSED = 3
1516 # VIR_DOMAIN_SHUTDOWN = 4
1517 # VIR_DOMAIN_SHUTOFF = 5
1518 # VIR_DOMAIN_CRASHED = 6
1519 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
1521 if self
.test
or len(self
.server_status
)==0:
1525 conn
= host_thread
.lvirt_module
.open("qemu+ssh://"+self
.user
+"@"+self
.host
+"/system")
1526 domains
= conn
.listAllDomains()
1528 for domain
in domains
:
1529 uuid
= domain
.UUIDString() ;
1530 libvirt_status
= domain
.state()
1531 #print libvirt_status
1532 if libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_RUNNING
or libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_SHUTDOWN
:
1533 new_status
= "ACTIVE"
1534 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_PAUSED
:
1535 new_status
= "PAUSED"
1536 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_SHUTOFF
:
1537 new_status
= "INACTIVE"
1538 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_CRASHED
:
1539 new_status
= "ERROR"
1542 domain_dict
[uuid
] = new_status
1544 except host_thread
.lvirt_module
.libvirtError
as e
:
1545 print self
.name
, ": get_state() Exception '", e
.get_error_message()
1548 for server_id
, current_status
in self
.server_status
.iteritems():
1550 if server_id
in domain_dict
:
1551 new_status
= domain_dict
[server_id
]
1553 new_status
= "INACTIVE"
1555 if new_status
== None or new_status
== current_status
:
1557 if new_status
== 'INACTIVE' and current_status
== 'ERROR':
1558 continue #keep ERROR status, because obviously this machine is not running
1560 print self
.name
, ": server ", server_id
, "status change from ", current_status
, "to", new_status
1561 STATUS
={'progress':100, 'status':new_status
}
1562 if new_status
== 'ERROR':
1563 STATUS
['last_error'] = 'machine has crashed'
1564 self
.db_lock
.acquire()
1565 r
,_
= self
.db
.update_rows('instances', STATUS
, {'uuid':server_id
}, log
=False)
1566 self
.db_lock
.release()
1568 self
.server_status
[server_id
] = new_status
1570 def action_on_server(self
, req
, last_retry
=True):
1571 '''Perform an action on a req
1573 req: dictionary that contain:
1574 server properties: 'uuid','name','tenant_id','status'
1576 host properties: 'user', 'ip_name'
1577 return (error, text)
1578 0: No error. VM is updated to new state,
1579 -1: Invalid action, as trying to pause a PAUSED VM
1580 -2: Error accessing host
1582 -4: Error at DB access
1583 -5: Error while trying to perform action. VM is updated to ERROR
1585 server_id
= req
['uuid']
1588 old_status
= req
['status']
1592 if 'terminate' in req
['action']:
1593 new_status
= 'deleted'
1594 elif 'shutoff' in req
['action'] or 'shutdown' in req
['action'] or 'forceOff' in req
['action']:
1595 if req
['status']!='ERROR':
1597 new_status
= 'INACTIVE'
1598 elif 'start' in req
['action'] and req
['status']!='ERROR': new_status
= 'ACTIVE'
1599 elif 'resume' in req
['action'] and req
['status']!='ERROR' and req
['status']!='INACTIVE' : new_status
= 'ACTIVE'
1600 elif 'pause' in req
['action'] and req
['status']!='ERROR': new_status
= 'PAUSED'
1601 elif 'reboot' in req
['action'] and req
['status']!='ERROR': new_status
= 'ACTIVE'
1602 elif 'rebuild' in req
['action']:
1603 time
.sleep(random
.randint(20,150))
1604 new_status
= 'ACTIVE'
1605 elif 'createImage' in req
['action']:
1607 self
.create_image(None, req
)
1610 conn
= host_thread
.lvirt_module
.open("qemu+ssh://"+self
.user
+"@"+self
.host
+"/system")
1612 dom
= conn
.lookupByUUIDString(server_id
)
1613 except host_thread
.lvirt_module
.libvirtError
as e
:
1614 text
= e
.get_error_message()
1615 if 'LookupByUUIDString' in text
or 'Domain not found' in text
or 'No existe un dominio coincidente' in text
:
1618 print self
.name
, ": action_on_server(",server_id
,") libvirt exception:", text
1621 if 'forceOff' in req
['action']:
1623 print self
.name
, ": action_on_server(",server_id
,") domain not running"
1626 print self
.name
, ": sending DESTROY to server", server_id
1628 except Exception as e
:
1629 if "domain is not running" not in e
.get_error_message():
1630 print self
.name
, ": action_on_server(",server_id
,") Exception while sending force off:", e
.get_error_message()
1631 last_error
= 'action_on_server Exception while destroy: ' + e
.get_error_message()
1632 new_status
= 'ERROR'
1634 elif 'terminate' in req
['action']:
1636 print self
.name
, ": action_on_server(",server_id
,") domain not running"
1637 new_status
= 'deleted'
1640 if req
['action']['terminate'] == 'force':
1641 print self
.name
, ": sending DESTROY to server", server_id
1643 new_status
= 'deleted'
1645 print self
.name
, ": sending SHUTDOWN to server", server_id
1647 self
.pending_terminate_server
.append( (time
.time()+10,server_id
) )
1648 except Exception as e
:
1649 print self
.name
, ": action_on_server(",server_id
,") Exception while destroy:", e
.get_error_message()
1650 last_error
= 'action_on_server Exception while destroy: ' + e
.get_error_message()
1651 new_status
= 'ERROR'
1652 if "domain is not running" in e
.get_error_message():
1655 new_status
= 'deleted'
1657 print self
.name
, ": action_on_server(",server_id
,") Exception while undefine:", e
.get_error_message()
1658 last_error
= 'action_on_server Exception2 while undefine:', e
.get_error_message()
1659 #Exception: 'virDomainDetachDevice() failed'
1660 if new_status
=='deleted':
1661 if server_id
in self
.server_status
:
1662 del self
.server_status
[server_id
]
1663 if req
['uuid'] in self
.localinfo
['server_files']:
1664 for file_
in self
.localinfo
['server_files'][ req
['uuid'] ].values():
1666 self
.delete_file(file_
['source file'])
1669 del self
.localinfo
['server_files'][ req
['uuid'] ]
1670 self
.localinfo_dirty
= True
1672 elif 'shutoff' in req
['action'] or 'shutdown' in req
['action']:
1675 print self
.name
, ": action_on_server(",server_id
,") domain not running"
1678 # new_status = 'INACTIVE'
1679 #TODO: check status for changing at database
1680 except Exception as e
:
1681 new_status
= 'ERROR'
1682 print self
.name
, ": action_on_server(",server_id
,") Exception while shutdown:", e
.get_error_message()
1683 last_error
= 'action_on_server Exception while shutdown: ' + e
.get_error_message()
1685 elif 'rebuild' in req
['action']:
1688 r
= self
.launch_server(conn
, req
, True, None)
1690 new_status
= 'ERROR'
1693 new_status
= 'ACTIVE'
1694 elif 'start' in req
['action']:
1695 # The instance is only create in DB but not yet at libvirt domain, needs to be create
1696 rebuild
= True if req
['action']['start'] == 'rebuild' else False
1697 r
= self
.launch_server(conn
, req
, rebuild
, dom
)
1699 new_status
= 'ERROR'
1702 new_status
= 'ACTIVE'
1704 elif 'resume' in req
['action']:
1710 # new_status = 'ACTIVE'
1711 except Exception as e
:
1712 print self
.name
, ": action_on_server(",server_id
,") Exception while resume:", e
.get_error_message()
1714 elif 'pause' in req
['action']:
1720 # new_status = 'PAUSED'
1721 except Exception as e
:
1722 print self
.name
, ": action_on_server(",server_id
,") Exception while pause:", e
.get_error_message()
1724 elif 'reboot' in req
['action']:
1730 print self
.name
, ": action_on_server(",server_id
,") reboot:"
1731 #new_status = 'ACTIVE'
1732 except Exception as e
:
1733 print self
.name
, ": action_on_server(",server_id
,") Exception while reboot:", e
.get_error_message()
1734 elif 'createImage' in req
['action']:
1735 self
.create_image(dom
, req
)
1739 except host_thread
.lvirt_module
.libvirtError
as e
:
1740 if conn
is not None: conn
.close()
1741 text
= e
.get_error_message()
1742 new_status
= "ERROR"
1744 print self
.name
, ": action_on_server(",server_id
,") Exception '", text
1745 if 'LookupByUUIDString' in text
or 'Domain not found' in text
or 'No existe un dominio coincidente' in text
:
1746 print self
.name
, ": action_on_server(",server_id
,") Exception removed from host"
1747 #end of if self.test
1748 if new_status
== None:
1751 print self
.name
, ": action_on_server(",server_id
,") new status", new_status
, last_error
1752 UPDATE
= {'progress':100, 'status':new_status
}
1754 if new_status
=='ERROR':
1755 if not last_retry
: #if there will be another retry do not update database
1757 elif 'terminate' in req
['action']:
1758 #PUT a log in the database
1759 print self
.name
, ": PANIC deleting server", server_id
, last_error
1760 self
.db_lock
.acquire()
1761 self
.db
.new_row('logs',
1762 {'uuid':server_id
, 'tenant_id':req
['tenant_id'], 'related':'instances','level':'panic',
1763 'description':'PANIC deleting server from host '+self
.name
+': '+last_error
}
1765 self
.db_lock
.release()
1766 if server_id
in self
.server_status
:
1767 del self
.server_status
[server_id
]
1770 UPDATE
['last_error'] = last_error
1771 if new_status
!= 'deleted' and (new_status
!= old_status
or new_status
== 'ERROR') :
1772 self
.db_lock
.acquire()
1773 self
.db
.update_rows('instances', UPDATE
, {'uuid':server_id
}, log
=True)
1774 self
.server_status
[server_id
] = new_status
1775 self
.db_lock
.release()
1776 if new_status
== 'ERROR':
1781 def restore_iface(self
, name
, mac
, lib_conn
=None):
1782 ''' make an ifdown, ifup to restore default parameter of na interface
1784 mac: mac address of the interface
1785 lib_conn: connection to the libvirt, if None a new connection is created
1786 Return 0,None if ok, -1,text if fails
1792 print self
.name
, ": restore_iface '%s' %s" % (name
, mac
)
1796 conn
= host_thread
.lvirt_module
.open("qemu+ssh://"+self
.user
+"@"+self
.host
+"/system")
1800 #wait to the pending VM deletion
1801 #TODO.Revise self.server_forceoff(True)
1803 iface
= conn
.interfaceLookupByMACString(mac
)
1806 print self
.name
, ": restore_iface '%s' %s" % (name
, mac
)
1807 except host_thread
.lvirt_module
.libvirtError
as e
:
1808 error_text
= e
.get_error_message()
1809 print self
.name
, ": restore_iface '%s' '%s' libvirt exception: %s" %(name
, mac
, error_text
)
1812 if lib_conn
is None and conn
is not None:
1814 return ret
, error_text
1817 def create_image(self
,dom
, req
):
1819 if 'path' in req
['action']['createImage']:
1820 file_dst
= req
['action']['createImage']['path']
1822 createImage
=req
['action']['createImage']
1823 img_name
= createImage
['source']['path']
1824 index
=img_name
.rfind('/')
1825 file_dst
= self
.get_notused_path(img_name
[:index
+1] + createImage
['name'] + '.qcow2')
1826 image_status
='ACTIVE'
1830 server_id
= req
['uuid']
1831 createImage
=req
['action']['createImage']
1832 file_orig
= self
.localinfo
['server_files'][server_id
] [ createImage
['source']['image_id'] ] ['source file']
1833 if 'path' in req
['action']['createImage']:
1834 file_dst
= req
['action']['createImage']['path']
1836 img_name
= createImage
['source']['path']
1837 index
=img_name
.rfind('/')
1838 file_dst
= self
.get_notused_filename(img_name
[:index
+1] + createImage
['name'] + '.qcow2')
1840 self
.copy_file(file_orig
, file_dst
)
1841 qemu_info
= self
.qemu_get_info(file_orig
)
1842 if 'backing file' in qemu_info
:
1843 for k
,v
in self
.localinfo
['files'].items():
1844 if v
==qemu_info
['backing file']:
1845 self
.qemu_change_backing(file_dst
, k
)
1847 image_status
='ACTIVE'
1849 except paramiko
.ssh_exception
.SSHException
as e
:
1850 image_status
='ERROR'
1851 error_text
= e
.args
[0]
1852 print self
.name
, "': create_image(",server_id
,") ssh Exception:", error_text
1853 if "SSH session not active" in error_text
and retry
==0:
1855 except Exception as e
:
1856 image_status
='ERROR'
1858 print self
.name
, "': create_image(",server_id
,") Exception:", error_text
1860 #TODO insert a last_error at database
1861 self
.db_lock
.acquire()
1862 self
.db
.update_rows('images', {'status':image_status
, 'progress': 100, 'path':file_dst
},
1863 {'uuid':req
['new_image']['uuid']}, log
=True)
1864 self
.db_lock
.release()
1866 def edit_iface(self
, port_id
, old_net
, new_net
):
1867 #This action imply remove and insert interface to put proper parameters
1872 self
.db_lock
.acquire()
1873 r
,c
= self
.db
.get_table(FROM
='ports as p join resources_port as rp on p.uuid=rp.port_id',
1874 WHERE
={'port_id': port_id
})
1875 self
.db_lock
.release()
1877 print self
.name
, ": edit_iface(",port_id
,") DDBB error:", c
1880 print self
.name
, ": edit_iface(",port_id
,") por not found"
1883 if port
["model"]!="VF":
1884 print self
.name
, ": edit_iface(",port_id
,") ERROR model must be VF"
1886 #create xml detach file
1889 xml
.append("<interface type='hostdev' managed='yes'>")
1890 xml
.append(" <mac address='" +port
['mac']+ "'/>")
1891 xml
.append(" <source>"+ self
.pci2xml(port
['pci'])+"\n </source>")
1892 xml
.append('</interface>')
1897 conn
= host_thread
.lvirt_module
.open("qemu+ssh://"+self
.user
+"@"+self
.host
+"/system")
1898 dom
= conn
.lookupByUUIDString(port
["instance_id"])
1901 print self
.name
, ": edit_iface detaching SRIOV interface", text
1902 dom
.detachDeviceFlags(text
, flags
=host_thread
.lvirt_module
.VIR_DOMAIN_AFFECT_LIVE
)
1904 xml
[-1] =" <vlan> <tag id='" + str(port
['vlan']) + "'/> </vlan>"
1906 xml
.append(self
.pci2xml(port
.get('vpci',None)) )
1907 xml
.append('</interface>')
1909 print self
.name
, ": edit_iface attaching SRIOV interface", text
1910 dom
.attachDeviceFlags(text
, flags
=host_thread
.lvirt_module
.VIR_DOMAIN_AFFECT_LIVE
)
1912 except host_thread
.lvirt_module
.libvirtError
as e
:
1913 text
= e
.get_error_message()
1914 print self
.name
, ": edit_iface(",port
["instance_id"],") libvirt exception:", text
1917 if conn
is not None: conn
.close()
1920 def create_server(server
, db
, db_lock
, only_of_ports
):
1927 # host_id = server.get('host_id', None)
1928 extended
= server
.get('extended', None)
1930 # print '----------------------'
1931 # print json.dumps(extended, indent=4)
1934 requirements
['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
1935 requirements
['ram'] = server
['flavor'].get('ram', 0)
1936 if requirements
['ram']== None:
1937 requirements
['ram'] = 0
1938 requirements
['vcpus'] = server
['flavor'].get('vcpus', 0)
1939 if requirements
['vcpus']== None:
1940 requirements
['vcpus'] = 0
1941 #If extended is not defined get requirements from flavor
1942 if extended
is None:
1943 #If extended is defined in flavor convert to dictionary and use it
1944 if 'extended' in server
['flavor'] and server
['flavor']['extended'] != None:
1945 json_acceptable_string
= server
['flavor']['extended'].replace("'", "\"")
1946 extended
= json
.loads(json_acceptable_string
)
1949 #print json.dumps(extended, indent=4)
1951 #For simplicity only one numa VM are supported in the initial implementation
1952 if extended
!= None:
1953 numas
= extended
.get('numas', [])
1955 return (-2, "Multi-NUMA VMs are not supported yet")
1957 # return (-1, "At least one numa must be specified")
1959 #a for loop is used in order to be ready to multi-NUMA VMs
1963 numa_req
['memory'] = numa
.get('memory', 0)
1965 numa_req
['proc_req_nb'] = numa
['cores'] #number of cores or threads to be reserved
1966 numa_req
['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
1967 numa_req
['proc_req_list'] = numa
.get('cores-id', None) #list of ids to be assigned to the cores or threads
1968 elif 'paired-threads' in numa
:
1969 numa_req
['proc_req_nb'] = numa
['paired-threads']
1970 numa_req
['proc_req_type'] = 'paired-threads'
1971 numa_req
['proc_req_list'] = numa
.get('paired-threads-id', None)
1972 elif 'threads' in numa
:
1973 numa_req
['proc_req_nb'] = numa
['threads']
1974 numa_req
['proc_req_type'] = 'threads'
1975 numa_req
['proc_req_list'] = numa
.get('threads-id', None)
1977 numa_req
['proc_req_nb'] = 0 # by default
1978 numa_req
['proc_req_type'] = 'threads'
1982 #Generate a list of sriov and another for physical interfaces
1983 interfaces
= numa
.get('interfaces', [])
1986 for iface
in interfaces
:
1987 iface
['bandwidth'] = int(iface
['bandwidth'])
1988 if iface
['dedicated'][:3]=='yes':
1989 port_list
.append(iface
)
1991 sriov_list
.append(iface
)
1993 #Save lists ordered from more restrictive to less bw requirements
1994 numa_req
['sriov_list'] = sorted(sriov_list
, key
=lambda k
: k
['bandwidth'], reverse
=True)
1995 numa_req
['port_list'] = sorted(port_list
, key
=lambda k
: k
['bandwidth'], reverse
=True)
1998 request
.append(numa_req
)
2000 # print "----------\n"+json.dumps(request[0], indent=4)
2001 # print '----------\n\n'
2003 #Search in db for an appropriate numa for each requested numa
2004 #at the moment multi-NUMA VMs are not supported
2006 requirements
['numa'].update(request
[0])
2007 if requirements
['numa']['memory']>0:
2008 requirements
['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2009 elif requirements
['ram']==0:
2010 return (-1, "Memory information not set neither at extended field not at ram")
2011 if requirements
['numa']['proc_req_nb']>0:
2012 requirements
['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2013 elif requirements
['vcpus']==0:
2014 return (-1, "Processor information not set neither at extended field not at vcpus")
2018 result
, content
= db
.get_numas(requirements
, server
.get('host_id', None), only_of_ports
)
2022 return (-1, content
)
2024 numa_id
= content
['numa_id']
2025 host_id
= content
['host_id']
2027 #obtain threads_id and calculate pinning
2030 if requirements
['numa']['proc_req_nb']>0:
2032 result
, content
= db
.get_table(FROM
='resources_core',
2033 SELECT
=('id','core_id','thread_id'),
2034 WHERE
={'numa_id':numa_id
,'instance_id': None, 'status':'ok'} )
2040 #convert rows to a dictionary indexed by core_id
2043 if not row
['core_id'] in cores_dict
:
2044 cores_dict
[row
['core_id']] = []
2045 cores_dict
[row
['core_id']].append([row
['thread_id'],row
['id']])
2047 #In case full cores are requested
2049 if requirements
['numa']['proc_req_type'] == 'cores':
2050 #Get/create the list of the vcpu_ids
2051 vcpu_id_list
= requirements
['numa']['proc_req_list']
2052 if vcpu_id_list
== None:
2053 vcpu_id_list
= range(0,int(requirements
['numa']['proc_req_nb']))
2055 for threads
in cores_dict
.itervalues():
2057 if len(threads
) != 2:
2060 #set pinning for the first thread
2061 cpu_pinning
.append( [ vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1] ] )
2063 #reserve so it is not used the second thread
2064 reserved_threads
.append(threads
[1][1])
2066 if len(vcpu_id_list
) == 0:
2069 #In case paired threads are requested
2070 elif requirements
['numa']['proc_req_type'] == 'paired-threads':
2072 #Get/create the list of the vcpu_ids
2073 if requirements
['numa']['proc_req_list'] != None:
2075 for pair
in requirements
['numa']['proc_req_list']:
2077 return -1, "Field paired-threads-id not properly specified"
2079 vcpu_id_list
.append(pair
[0])
2080 vcpu_id_list
.append(pair
[1])
2082 vcpu_id_list
= range(0,2*int(requirements
['numa']['proc_req_nb']))
2084 for threads
in cores_dict
.itervalues():
2086 if len(threads
) != 2:
2088 #set pinning for the first thread
2089 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1]])
2091 #set pinning for the second thread
2092 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[1][0], threads
[1][1]])
2094 if len(vcpu_id_list
) == 0:
2097 #In case normal threads are requested
2098 elif requirements
['numa']['proc_req_type'] == 'threads':
2099 #Get/create the list of the vcpu_ids
2100 vcpu_id_list
= requirements
['numa']['proc_req_list']
2101 if vcpu_id_list
== None:
2102 vcpu_id_list
= range(0,int(requirements
['numa']['proc_req_nb']))
2104 for threads_index
in sorted(cores_dict
, key
=lambda k
: len(cores_dict
[k
])):
2105 threads
= cores_dict
[threads_index
]
2106 #set pinning for the first thread
2107 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1]])
2109 #if exists, set pinning for the second thread
2110 if len(threads
) == 2 and len(vcpu_id_list
) != 0:
2111 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[1][0], threads
[1][1]])
2113 if len(vcpu_id_list
) == 0:
2116 #Get the source pci addresses for the selected numa
2117 used_sriov_ports
= []
2118 for port
in requirements
['numa']['sriov_list']:
2120 result
, content
= db
.get_table(FROM
='resources_port', SELECT
=('id', 'pci', 'mac'),WHERE
={'numa_id':numa_id
,'root_id': port
['port_id'], 'port_id': None, 'Mbps_used': 0} )
2126 if row
['id'] in used_sriov_ports
or row
['id']==port
['port_id']:
2128 port
['pci'] = row
['pci']
2129 if 'mac_address' not in port
:
2130 port
['mac_address'] = row
['mac']
2132 port
['port_id']=row
['id']
2133 port
['Mbps_used'] = port
['bandwidth']
2134 used_sriov_ports
.append(row
['id'])
2137 for port
in requirements
['numa']['port_list']:
2138 port
['Mbps_used'] = None
2139 if port
['dedicated'] != "yes:sriov":
2140 port
['mac_address'] = port
['mac']
2144 result
, content
= db
.get_table(FROM
='resources_port', SELECT
=('id', 'pci', 'mac', 'Mbps'),WHERE
={'numa_id':numa_id
,'root_id': port
['port_id'], 'port_id': None, 'Mbps_used': 0} )
2149 port
['Mbps_used'] = content
[0]['Mbps']
2151 if row
['id'] in used_sriov_ports
or row
['id']==port
['port_id']:
2153 port
['pci'] = row
['pci']
2154 if 'mac_address' not in port
:
2155 port
['mac_address'] = row
['mac'] # mac cannot be set to passthrough ports
2157 port
['port_id']=row
['id']
2158 used_sriov_ports
.append(row
['id'])
2161 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2162 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2164 server
['host_id'] = host_id
2167 #Generate dictionary for saving in db the instance resources
2169 resources
['bridged-ifaces'] = []
2172 numa_dict
['interfaces'] = []
2174 numa_dict
['interfaces'] += requirements
['numa']['port_list']
2175 numa_dict
['interfaces'] += requirements
['numa']['sriov_list']
2177 #Check bridge information
2178 unified_dataplane_iface
=[]
2179 unified_dataplane_iface
+= requirements
['numa']['port_list']
2180 unified_dataplane_iface
+= requirements
['numa']['sriov_list']
2182 for control_iface
in server
.get('networks', []):
2183 control_iface
['net_id']=control_iface
.pop('uuid')
2184 #Get the brifge name
2186 result
, content
= db
.get_table(FROM
='nets',
2187 SELECT
=('name', 'type', 'vlan', 'provider', 'enable_dhcp',
2188 'dhcp_first_ip', 'dhcp_last_ip', 'cidr'),
2189 WHERE
={'uuid': control_iface
['net_id']})
2194 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface
['net_id']
2197 if control_iface
.get("type", 'virtual') == 'virtual':
2198 if network
['type']!='bridge_data' and network
['type']!='bridge_man':
2199 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface
['net_id']
2200 resources
['bridged-ifaces'].append(control_iface
)
2201 if network
.get("provider") and network
["provider"][0:3] == "OVS":
2202 control_iface
["type"] = "instance:ovs"
2204 control_iface
["type"] = "instance:bridge"
2205 if network
.get("vlan"):
2206 control_iface
["vlan"] = network
["vlan"]
2208 if network
.get("enable_dhcp") == 'true':
2209 control_iface
["enable_dhcp"] = network
.get("enable_dhcp")
2210 control_iface
["dhcp_first_ip"] = network
["dhcp_first_ip"]
2211 control_iface
["dhcp_last_ip"] = network
["dhcp_last_ip"]
2212 control_iface
["cidr"] = network
["cidr"]
2214 if network
['type']!='data' and network
['type']!='ptp':
2215 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface
['net_id']
2216 #dataplane interface, look for it in the numa tree and asign this network
2218 for dataplane_iface
in numa_dict
['interfaces']:
2219 if dataplane_iface
['name'] == control_iface
.get("name"):
2220 if (dataplane_iface
['dedicated'] == "yes" and control_iface
["type"] != "PF") or \
2221 (dataplane_iface
['dedicated'] == "no" and control_iface
["type"] != "VF") or \
2222 (dataplane_iface
['dedicated'] == "yes:sriov" and control_iface
["type"] != "VFnotShared") :
2223 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2224 (control_iface
.get("name"), dataplane_iface
['dedicated'], control_iface
["type"])
2225 dataplane_iface
['uuid'] = control_iface
['net_id']
2226 if dataplane_iface
['dedicated'] == "no":
2227 dataplane_iface
['vlan'] = network
['vlan']
2228 if dataplane_iface
['dedicated'] != "yes" and control_iface
.get("mac_address"):
2229 dataplane_iface
['mac_address'] = control_iface
.get("mac_address")
2230 if control_iface
.get("vpci"):
2231 dataplane_iface
['vpci'] = control_iface
.get("vpci")
2235 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface
.get("name")
2237 resources
['host_id'] = host_id
2238 resources
['image_id'] = server
['image_id']
2239 resources
['flavor_id'] = server
['flavor_id']
2240 resources
['tenant_id'] = server
['tenant_id']
2241 resources
['ram'] = requirements
['ram']
2242 resources
['vcpus'] = requirements
['vcpus']
2243 resources
['status'] = 'CREATING'
2245 if 'description' in server
: resources
['description'] = server
['description']
2246 if 'name' in server
: resources
['name'] = server
['name']
2248 resources
['extended'] = {} #optional
2249 resources
['extended']['numas'] = []
2250 numa_dict
['numa_id'] = numa_id
2251 numa_dict
['memory'] = requirements
['numa']['memory']
2252 numa_dict
['cores'] = []
2254 for core
in cpu_pinning
:
2255 numa_dict
['cores'].append({'id': core
[2], 'vthread': core
[0], 'paired': paired
})
2256 for core
in reserved_threads
:
2257 numa_dict
['cores'].append({'id': core
})
2258 resources
['extended']['numas'].append(numa_dict
)
2259 if extended
!=None and 'devices' in extended
: #TODO allow extra devices without numa
2260 resources
['extended']['devices'] = extended
['devices']
2263 print '===================================={'
2264 print json
.dumps(resources
, indent
=4)
2265 print '====================================}'