Create dhcp thread at openvim startup
[osm/openvim.git] / host_thread.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openvim
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
22 ##
23
24 '''
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
27 '''
28 __author__ = "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__ = "$10-jul-2014 12:07:15$"
30
31 import json
32 import yaml
33 import threading
34 import time
35 import Queue
36 import paramiko
37 from jsonschema import validate as js_v, exceptions as js_e
38 #import libvirt
39 import imp
40 from vim_schema import localinfo_schema, hostinfo_schema
41 import random
42 import os
43
44 #TODO: insert a logging system
45
46 # from logging import Logger
47 # import auxiliary_functions as af
48
49 # TODO: insert a logging system
50
51
52 class host_thread(threading.Thread):
53 lvirt_module = None
54
55 def __init__(self, name, host, user, db, db_lock, test, image_path, host_id, version, develop_mode,
56 develop_bridge_iface):
57 '''Init a thread.
58 Arguments:
59 'id' number of thead
60 'name' name of thread
61 'host','user': host ip or name to manage and user
62 'db', 'db_lock': database class and lock to use it in exclusion
63 '''
64 threading.Thread.__init__(self)
65 self.name = name
66 self.host = host
67 self.user = user
68 self.db = db
69 self.db_lock = db_lock
70 self.test = test
71
72 if not test and not host_thread.lvirt_module:
73 try:
74 module_info = imp.find_module("libvirt")
75 host_thread.lvirt_module = imp.load_module("libvirt", *module_info)
76 except (IOError, ImportError) as e:
77 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e))
78
79
80 self.develop_mode = develop_mode
81 self.develop_bridge_iface = develop_bridge_iface
82 self.image_path = image_path
83 self.host_id = host_id
84 self.version = version
85
86 self.xml_level = 0
87 #self.pending ={}
88
89 self.server_status = {} #dictionary with pairs server_uuid:server_status
90 self.pending_terminate_server =[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
91 self.next_update_server_status = 0 #time when must be check servers status
92
93 self.hostinfo = None
94
95 self.queueLock = threading.Lock()
96 self.taskQueue = Queue.Queue(2000)
97 self.ssh_conn = None
98
99 def ssh_connect(self):
100 try:
101 #Connect SSH
102 self.ssh_conn = paramiko.SSHClient()
103 self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
104 self.ssh_conn.load_system_host_keys()
105 self.ssh_conn.connect(self.host, username=self.user, timeout=10) #, None)
106 except paramiko.ssh_exception.SSHException as e:
107 text = e.args[0]
108 print self.name, ": ssh_connect ssh Exception:", text
109
110 def load_localinfo(self):
111 if not self.test:
112 try:
113 #Connect SSH
114 self.ssh_connect()
115
116 command = 'mkdir -p ' + self.image_path
117 #print self.name, ': command:', command
118 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
119 content = stderr.read()
120 if len(content) > 0:
121 print self.name, ': command:', command, "stderr:", content
122
123 command = 'cat ' + self.image_path + '/.openvim.yaml'
124 #print self.name, ': command:', command
125 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
126 content = stdout.read()
127 if len(content) == 0:
128 print self.name, ': command:', command, "stderr:", stderr.read()
129 raise paramiko.ssh_exception.SSHException("Error empty file ")
130 self.localinfo = yaml.load(content)
131 js_v(self.localinfo, localinfo_schema)
132 self.localinfo_dirty=False
133 if 'server_files' not in self.localinfo:
134 self.localinfo['server_files'] = {}
135 print self.name, ': localinfo load from host'
136 return
137
138 except paramiko.ssh_exception.SSHException as e:
139 text = e.args[0]
140 print self.name, ": load_localinfo ssh Exception:", text
141 except host_thread.lvirt_module.libvirtError as e:
142 text = e.get_error_message()
143 print self.name, ": load_localinfo libvirt Exception:", text
144 except yaml.YAMLError as exc:
145 text = ""
146 if hasattr(exc, 'problem_mark'):
147 mark = exc.problem_mark
148 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
149 print self.name, ": load_localinfo yaml format Exception", text
150 except js_e.ValidationError as e:
151 text = ""
152 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
153 print self.name, ": load_localinfo format Exception:", text, e.message
154 except Exception as e:
155 text = str(e)
156 print self.name, ": load_localinfo Exception:", text
157
158 #not loaded, insert a default data and force saving by activating dirty flag
159 self.localinfo = {'files':{}, 'server_files':{} }
160 #self.localinfo_dirty=True
161 self.localinfo_dirty=False
162
163 def load_hostinfo(self):
164 if self.test:
165 return;
166 try:
167 #Connect SSH
168 self.ssh_connect()
169
170
171 command = 'cat ' + self.image_path + '/hostinfo.yaml'
172 #print self.name, ': command:', command
173 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
174 content = stdout.read()
175 if len(content) == 0:
176 print self.name, ': command:', command, "stderr:", stderr.read()
177 raise paramiko.ssh_exception.SSHException("Error empty file ")
178 self.hostinfo = yaml.load(content)
179 js_v(self.hostinfo, hostinfo_schema)
180 print self.name, ': hostlinfo load from host', self.hostinfo
181 return
182
183 except paramiko.ssh_exception.SSHException as e:
184 text = e.args[0]
185 print self.name, ": load_hostinfo ssh Exception:", text
186 except host_thread.lvirt_module.libvirtError as e:
187 text = e.get_error_message()
188 print self.name, ": load_hostinfo libvirt Exception:", text
189 except yaml.YAMLError as exc:
190 text = ""
191 if hasattr(exc, 'problem_mark'):
192 mark = exc.problem_mark
193 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
194 print self.name, ": load_hostinfo yaml format Exception", text
195 except js_e.ValidationError as e:
196 text = ""
197 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
198 print self.name, ": load_hostinfo format Exception:", text, e.message
199 except Exception as e:
200 text = str(e)
201 print self.name, ": load_hostinfo Exception:", text
202
203 #not loaded, insert a default data
204 self.hostinfo = None
205
206 def save_localinfo(self, tries=3):
207 if self.test:
208 self.localinfo_dirty = False
209 return
210
211 while tries>=0:
212 tries-=1
213
214 try:
215 command = 'cat > ' + self.image_path + '/.openvim.yaml'
216 print self.name, ': command:', command
217 (stdin, _, _) = self.ssh_conn.exec_command(command)
218 yaml.safe_dump(self.localinfo, stdin, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True)
219 self.localinfo_dirty = False
220 break #while tries
221
222 except paramiko.ssh_exception.SSHException as e:
223 text = e.args[0]
224 print self.name, ": save_localinfo ssh Exception:", text
225 if "SSH session not active" in text:
226 self.ssh_connect()
227 except host_thread.lvirt_module.libvirtError as e:
228 text = e.get_error_message()
229 print self.name, ": save_localinfo libvirt Exception:", text
230 except yaml.YAMLError as exc:
231 text = ""
232 if hasattr(exc, 'problem_mark'):
233 mark = exc.problem_mark
234 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
235 print self.name, ": save_localinfo yaml format Exception", text
236 except Exception as e:
237 text = str(e)
238 print self.name, ": save_localinfo Exception:", text
239
240 def load_servers_from_db(self):
241 self.db_lock.acquire()
242 r,c = self.db.get_table(SELECT=('uuid','status', 'image_id'), FROM='instances', WHERE={'host_id': self.host_id})
243 self.db_lock.release()
244
245 self.server_status = {}
246 if r<0:
247 print self.name, ": Error getting data from database:", c
248 return
249 for server in c:
250 self.server_status[ server['uuid'] ] = server['status']
251
252 #convert from old version to new one
253 if 'inc_files' in self.localinfo and server['uuid'] in self.localinfo['inc_files']:
254 server_files_dict = {'source file': self.localinfo['inc_files'][ server['uuid'] ] [0], 'file format':'raw' }
255 if server_files_dict['source file'][-5:] == 'qcow2':
256 server_files_dict['file format'] = 'qcow2'
257
258 self.localinfo['server_files'][ server['uuid'] ] = { server['image_id'] : server_files_dict }
259 if 'inc_files' in self.localinfo:
260 del self.localinfo['inc_files']
261 self.localinfo_dirty = True
262
263 def delete_unused_files(self):
264 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
265 Deletes unused entries at self.loacalinfo and the corresponding local files.
266 The only reason for this mismatch is the manual deletion of instances (VM) at database
267 '''
268 if self.test:
269 return
270 for uuid,images in self.localinfo['server_files'].items():
271 if uuid not in self.server_status:
272 for localfile in images.values():
273 try:
274 print self.name, ": deleting file '%s' of unused server '%s'" %(localfile['source file'], uuid)
275 self.delete_file(localfile['source file'])
276 except paramiko.ssh_exception.SSHException as e:
277 print self.name, ": Exception deleting file '%s': %s" %(localfile['source file'], str(e))
278 del self.localinfo['server_files'][uuid]
279 self.localinfo_dirty = True
280
281 def insert_task(self, task, *aditional):
282 try:
283 self.queueLock.acquire()
284 task = self.taskQueue.put( (task,) + aditional, timeout=5)
285 self.queueLock.release()
286 return 1, None
287 except Queue.Full:
288 return -1, "timeout inserting a task over host " + self.name
289
290 def run(self):
291 while True:
292 self.load_localinfo()
293 self.load_hostinfo()
294 self.load_servers_from_db()
295 self.delete_unused_files()
296 while True:
297 self.queueLock.acquire()
298 if not self.taskQueue.empty():
299 task = self.taskQueue.get()
300 else:
301 task = None
302 self.queueLock.release()
303
304 if task is None:
305 now=time.time()
306 if self.localinfo_dirty:
307 self.save_localinfo()
308 elif self.next_update_server_status < now:
309 self.update_servers_status()
310 self.next_update_server_status = now + 5
311 elif len(self.pending_terminate_server)>0 and self.pending_terminate_server[0][0]<now:
312 self.server_forceoff()
313 else:
314 time.sleep(1)
315 continue
316
317 if task[0] == 'instance':
318 print self.name, ": processing task instance", task[1]['action']
319 retry=0
320 while retry <2:
321 retry += 1
322 r=self.action_on_server(task[1], retry==2)
323 if r>=0:
324 break
325 elif task[0] == 'image':
326 pass
327 elif task[0] == 'exit':
328 print self.name, ": processing task exit"
329 self.terminate()
330 return 0
331 elif task[0] == 'reload':
332 print self.name, ": processing task reload terminating and relaunching"
333 self.terminate()
334 break
335 elif task[0] == 'edit-iface':
336 print self.name, ": processing task edit-iface port=%s, old_net=%s, new_net=%s" % (task[1], task[2], task[3])
337 self.edit_iface(task[1], task[2], task[3])
338 elif task[0] == 'restore-iface':
339 print self.name, ": processing task restore-iface %s mac=%s" % (task[1], task[2])
340 self.restore_iface(task[1], task[2])
341 elif task[0] == 'new-ovsbridge':
342 print self.name, ": Creating compute OVS bridge"
343 self.create_ovs_bridge()
344 elif task[0] == 'new-vxlan':
345 print self.name, ": Creating vxlan tunnel=" + task[1] + ", remote ip=" + task[2]
346 self.create_ovs_vxlan_tunnel(task[1], task[2])
347 elif task[0] == 'del-ovsbridge':
348 print self.name, ": Deleting OVS bridge"
349 self.delete_ovs_bridge()
350 elif task[0] == 'del-vxlan':
351 print self.name, ": Deleting vxlan " + task[1] + " tunnel"
352 self.delete_ovs_vxlan_tunnel(task[1])
353 elif task[0] == 'create-ovs-bridge-port':
354 print self.name, ": Adding port ovim-" + task[1] + " to OVS bridge"
355 self.create_ovs_bridge_port(task[1])
356 elif task[0] == 'del-ovs-port':
357 print self.name, ": Delete bridge attached to ovs port vlan {} net {}".format(task[1], task[2])
358 self.delete_bridge_port_attached_to_ovs(task[1], task[2])
359 else:
360 print self.name, ": unknown task", task
361
362 def server_forceoff(self, wait_until_finished=False):
363 while len(self.pending_terminate_server)>0:
364 now = time.time()
365 if self.pending_terminate_server[0][0]>now:
366 if wait_until_finished:
367 time.sleep(1)
368 continue
369 else:
370 return
371 req={'uuid':self.pending_terminate_server[0][1],
372 'action':{'terminate':'force'},
373 'status': None
374 }
375 self.action_on_server(req)
376 self.pending_terminate_server.pop(0)
377
378 def terminate(self):
379 try:
380 self.server_forceoff(True)
381 if self.localinfo_dirty:
382 self.save_localinfo()
383 if not self.test:
384 self.ssh_conn.close()
385 except Exception as e:
386 text = str(e)
387 print self.name, ": terminate Exception:", text
388 print self.name, ": exit from host_thread"
389
390 def get_local_iface_name(self, generic_name):
391 if self.hostinfo != None and "iface_names" in self.hostinfo and generic_name in self.hostinfo["iface_names"]:
392 return self.hostinfo["iface_names"][generic_name]
393 return generic_name
394
395 def create_xml_server(self, server, dev_list, server_metadata={}):
396 """Function that implements the generation of the VM XML definition.
397 Additional devices are in dev_list list
398 The main disk is upon dev_list[0]"""
399
400 #get if operating system is Windows
401 windows_os = False
402 os_type = server_metadata.get('os_type', None)
403 if os_type == None and 'metadata' in dev_list[0]:
404 os_type = dev_list[0]['metadata'].get('os_type', None)
405 if os_type != None and os_type.lower() == "windows":
406 windows_os = True
407 #get type of hard disk bus
408 bus_ide = True if windows_os else False
409 bus = server_metadata.get('bus', None)
410 if bus == None and 'metadata' in dev_list[0]:
411 bus = dev_list[0]['metadata'].get('bus', None)
412 if bus != None:
413 bus_ide = True if bus=='ide' else False
414
415 self.xml_level = 0
416
417 text = "<domain type='kvm'>"
418 #get topology
419 topo = server_metadata.get('topology', None)
420 if topo == None and 'metadata' in dev_list[0]:
421 topo = dev_list[0]['metadata'].get('topology', None)
422 #name
423 name = server.get('name','') + "_" + server['uuid']
424 name = name[:58] #qemu impose a length limit of 59 chars or not start. Using 58
425 text += self.inc_tab() + "<name>" + name+ "</name>"
426 #uuid
427 text += self.tab() + "<uuid>" + server['uuid'] + "</uuid>"
428
429 numa={}
430 if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:
431 numa = server['extended']['numas'][0]
432 #memory
433 use_huge = False
434 memory = int(numa.get('memory',0))*1024*1024 #in KiB
435 if memory==0:
436 memory = int(server['ram'])*1024;
437 else:
438 if not self.develop_mode:
439 use_huge = True
440 if memory==0:
441 return -1, 'No memory assigned to instance'
442 memory = str(memory)
443 text += self.tab() + "<memory unit='KiB'>" +memory+"</memory>"
444 text += self.tab() + "<currentMemory unit='KiB'>" +memory+ "</currentMemory>"
445 if use_huge:
446 text += self.tab()+'<memoryBacking>'+ \
447 self.inc_tab() + '<hugepages/>'+ \
448 self.dec_tab()+ '</memoryBacking>'
449
450 #cpu
451 use_cpu_pinning=False
452 vcpus = int(server.get("vcpus",0))
453 cpu_pinning = []
454 if 'cores-source' in numa:
455 use_cpu_pinning=True
456 for index in range(0, len(numa['cores-source'])):
457 cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )
458 vcpus += 1
459 if 'threads-source' in numa:
460 use_cpu_pinning=True
461 for index in range(0, len(numa['threads-source'])):
462 cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )
463 vcpus += 1
464 if 'paired-threads-source' in numa:
465 use_cpu_pinning=True
466 for index in range(0, len(numa['paired-threads-source'])):
467 cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )
468 cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )
469 vcpus += 2
470
471 if use_cpu_pinning and not self.develop_mode:
472 text += self.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning)) +"</vcpu>" + \
473 self.tab()+'<cputune>'
474 self.xml_level += 1
475 for i in range(0, len(cpu_pinning)):
476 text += self.tab() + "<vcpupin vcpu='" +str(cpu_pinning[i][0])+ "' cpuset='" +str(cpu_pinning[i][1]) +"'/>"
477 text += self.dec_tab()+'</cputune>'+ \
478 self.tab() + '<numatune>' +\
479 self.inc_tab() + "<memory mode='strict' nodeset='" +str(numa['source'])+ "'/>" +\
480 self.dec_tab() + '</numatune>'
481 else:
482 if vcpus==0:
483 return -1, "Instance without number of cpus"
484 text += self.tab()+"<vcpu>" + str(vcpus) + "</vcpu>"
485
486 #boot
487 boot_cdrom = False
488 for dev in dev_list:
489 if dev['type']=='cdrom' :
490 boot_cdrom = True
491 break
492 text += self.tab()+ '<os>' + \
493 self.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
494 if boot_cdrom:
495 text += self.tab() + "<boot dev='cdrom'/>"
496 text += self.tab() + "<boot dev='hd'/>" + \
497 self.dec_tab()+'</os>'
498 #features
499 text += self.tab()+'<features>'+\
500 self.inc_tab()+'<acpi/>' +\
501 self.tab()+'<apic/>' +\
502 self.tab()+'<pae/>'+ \
503 self.dec_tab() +'</features>'
504 if windows_os or topo=="oneSocket":
505 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>"% vcpus
506 else:
507 text += self.tab() + "<cpu mode='host-model'></cpu>"
508 text += self.tab() + "<clock offset='utc'/>" +\
509 self.tab() + "<on_poweroff>preserve</on_poweroff>" + \
510 self.tab() + "<on_reboot>restart</on_reboot>" + \
511 self.tab() + "<on_crash>restart</on_crash>"
512 text += self.tab() + "<devices>" + \
513 self.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
514 self.tab() + "<serial type='pty'>" +\
515 self.inc_tab() + "<target port='0'/>" + \
516 self.dec_tab() + "</serial>" +\
517 self.tab() + "<console type='pty'>" + \
518 self.inc_tab()+ "<target type='serial' port='0'/>" + \
519 self.dec_tab()+'</console>'
520 if windows_os:
521 text += self.tab() + "<controller type='usb' index='0'/>" + \
522 self.tab() + "<controller type='ide' index='0'/>" + \
523 self.tab() + "<input type='mouse' bus='ps2'/>" + \
524 self.tab() + "<sound model='ich6'/>" + \
525 self.tab() + "<video>" + \
526 self.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
527 self.dec_tab() + "</video>" + \
528 self.tab() + "<memballoon model='virtio'/>" + \
529 self.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
530
531 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
532 #> self.dec_tab()+'</hostdev>\n' +\
533 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
534 if windows_os:
535 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
536 else:
537 #If image contains 'GRAPH' include graphics
538 #if 'GRAPH' in image:
539 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
540 self.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
541 self.dec_tab() + "</graphics>"
542
543 vd_index = 'a'
544 for dev in dev_list:
545 bus_ide_dev = bus_ide
546 if dev['type']=='cdrom' or dev['type']=='disk':
547 if dev['type']=='cdrom':
548 bus_ide_dev = True
549 text += self.tab() + "<disk type='file' device='"+dev['type']+"'>"
550 if 'file format' in dev:
551 text += self.inc_tab() + "<driver name='qemu' type='" +dev['file format']+ "' cache='writethrough'/>"
552 if 'source file' in dev:
553 text += self.tab() + "<source file='" +dev['source file']+ "'/>"
554 #elif v['type'] == 'block':
555 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
556 #else:
557 # return -1, 'Unknown disk type ' + v['type']
558 vpci = dev.get('vpci',None)
559 if vpci == None:
560 vpci = dev['metadata'].get('vpci',None)
561 text += self.pci2xml(vpci)
562
563 if bus_ide_dev:
564 text += self.tab() + "<target dev='hd" +vd_index+ "' bus='ide'/>" #TODO allows several type of disks
565 else:
566 text += self.tab() + "<target dev='vd" +vd_index+ "' bus='virtio'/>"
567 text += self.dec_tab() + '</disk>'
568 vd_index = chr(ord(vd_index)+1)
569 elif dev['type']=='xml':
570 dev_text = dev['xml']
571 if 'vpci' in dev:
572 dev_text = dev_text.replace('__vpci__', dev['vpci'])
573 if 'source file' in dev:
574 dev_text = dev_text.replace('__file__', dev['source file'])
575 if 'file format' in dev:
576 dev_text = dev_text.replace('__format__', dev['source file'])
577 if '__dev__' in dev_text:
578 dev_text = dev_text.replace('__dev__', vd_index)
579 vd_index = chr(ord(vd_index)+1)
580 text += dev_text
581 else:
582 return -1, 'Unknown device type ' + dev['type']
583
584 net_nb=0
585 bridge_interfaces = server.get('networks', [])
586 for v in bridge_interfaces:
587 #Get the brifge name
588 self.db_lock.acquire()
589 result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )
590 self.db_lock.release()
591 if result <= 0:
592 print "create_xml_server ERROR getting nets",result, content
593 return -1, content
594 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
595 #I know it is not secure
596 #for v in sorted(desc['network interfaces'].itervalues()):
597 model = v.get("model", None)
598 if content[0]['provider']=='default':
599 text += self.tab() + "<interface type='network'>" + \
600 self.inc_tab() + "<source network='" +content[0]['provider']+ "'/>"
601 elif content[0]['provider'][0:7]=='macvtap':
602 text += self.tab()+"<interface type='direct'>" + \
603 self.inc_tab() + "<source dev='" + self.get_local_iface_name(content[0]['provider'][8:]) + "' mode='bridge'/>" + \
604 self.tab() + "<target dev='macvtap0'/>"
605 if windows_os:
606 text += self.tab() + "<alias name='net" + str(net_nb) + "'/>"
607 elif model==None:
608 model = "virtio"
609 elif content[0]['provider'][0:6]=='bridge':
610 text += self.tab() + "<interface type='bridge'>" + \
611 self.inc_tab()+"<source bridge='" +self.get_local_iface_name(content[0]['provider'][7:])+ "'/>"
612 if windows_os:
613 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
614 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
615 elif model==None:
616 model = "virtio"
617 elif content[0]['provider'][0:3] == "OVS":
618 vlan = content[0]['provider'].replace('OVS:', '')
619 text += self.tab() + "<interface type='bridge'>" + \
620 self.inc_tab() + "<source bridge='ovim-" + vlan + "'/>"
621 else:
622 return -1, 'Unknown Bridge net provider ' + content[0]['provider']
623 if model!=None:
624 text += self.tab() + "<model type='" +model+ "'/>"
625 if v.get('mac_address', None) != None:
626 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
627 text += self.pci2xml(v.get('vpci',None))
628 text += self.dec_tab()+'</interface>'
629
630 net_nb += 1
631
632 interfaces = numa.get('interfaces', [])
633
634 net_nb=0
635 for v in interfaces:
636 if self.develop_mode: #map these interfaces to bridges
637 text += self.tab() + "<interface type='bridge'>" + \
638 self.inc_tab()+"<source bridge='" +self.develop_bridge_iface+ "'/>"
639 if windows_os:
640 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
641 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
642 else:
643 text += self.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
644 if v.get('mac_address', None) != None:
645 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
646 text += self.pci2xml(v.get('vpci',None))
647 text += self.dec_tab()+'</interface>'
648 continue
649
650 if v['dedicated'] == 'yes': #passthrought
651 text += self.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
652 self.inc_tab() + "<source>"
653 self.inc_tab()
654 text += self.pci2xml(v['source'])
655 text += self.dec_tab()+'</source>'
656 text += self.pci2xml(v.get('vpci',None))
657 if windows_os:
658 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
659 text += self.dec_tab()+'</hostdev>'
660 net_nb += 1
661 else: #sriov_interfaces
662 #skip not connected interfaces
663 if v.get("net_id") == None:
664 continue
665 text += self.tab() + "<interface type='hostdev' managed='yes'>"
666 self.inc_tab()
667 if v.get('mac_address', None) != None:
668 text+= self.tab() + "<mac address='" +v['mac_address']+ "'/>"
669 text+= self.tab()+'<source>'
670 self.inc_tab()
671 text += self.pci2xml(v['source'])
672 text += self.dec_tab()+'</source>'
673 if v.get('vlan',None) != None:
674 text += self.tab() + "<vlan> <tag id='" + str(v['vlan']) + "'/> </vlan>"
675 text += self.pci2xml(v.get('vpci',None))
676 if windows_os:
677 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
678 text += self.dec_tab()+'</interface>'
679
680
681 text += self.dec_tab()+'</devices>'+\
682 self.dec_tab()+'</domain>'
683 return 0, text
684
685 def pci2xml(self, pci):
686 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
687 alows an empty pci text'''
688 if pci is None:
689 return ""
690 first_part = pci.split(':')
691 second_part = first_part[2].split('.')
692 return self.tab() + "<address type='pci' domain='0x" + first_part[0] + \
693 "' bus='0x" + first_part[1] + "' slot='0x" + second_part[0] + \
694 "' function='0x" + second_part[1] + "'/>"
695
696 def tab(self):
697 """Return indentation according to xml_level"""
698 return "\n" + (' '*self.xml_level)
699
700 def inc_tab(self):
701 """Increment and return indentation according to xml_level"""
702 self.xml_level += 1
703 return self.tab()
704
705 def dec_tab(self):
706 """Decrement and return indentation according to xml_level"""
707 self.xml_level -= 1
708 return self.tab()
709
710 def create_ovs_bridge(self):
711 """
712 Create a bridge in compute OVS to allocate VMs
713 :return: True if success
714 """
715 if self.test:
716 return
717 command = 'sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true'
718 print self.name, ': command:', command
719 (_, stdout, _) = self.ssh_conn.exec_command(command)
720 content = stdout.read()
721 if len(content) == 0:
722 return True
723 else:
724 return False
725
726 def delete_port_to_ovs_bridge(self, vlan, net_uuid):
727 """
728 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
729 :param vlan: vlan port id
730 :param net_uuid: network id
731 :return:
732 """
733
734 if self.test:
735 return
736
737 port_name = 'ovim-' + vlan
738 command = 'sudo ovs-vsctl del-port br-int ' + port_name
739 print self.name, ': command:', command
740 (_, stdout, _) = self.ssh_conn.exec_command(command)
741 content = stdout.read()
742 if len(content) == 0:
743 return True
744 else:
745 return False
746
747 def delete_dhcp_server(self, vlan, net_uuid, dhcp_path):
748 """
749 Delete dhcp server process lining in namespace
750 :param vlan: segmentation id
751 :param net_uuid: network uuid
752 :param dhcp_path: conf fiel path that live in namespace side
753 :return:
754 """
755 if self.test:
756 return
757 if not self.is_dhcp_port_free(vlan, net_uuid):
758 return True
759
760 net_namespace = 'ovim-' + vlan
761 dhcp_path = os.path.join(dhcp_path, net_namespace)
762 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
763
764 command = 'sudo ip netns exec ' + net_namespace + ' cat ' + pid_file
765 print self.name, ': command:', command
766 (_, stdout, _) = self.ssh_conn.exec_command(command)
767 content = stdout.read()
768
769 command = 'sudo ip netns exec ' + net_namespace + ' kill -9 ' + content
770 print self.name, ': command:', command
771 (_, stdout, _) = self.ssh_conn.exec_command(command)
772 content = stdout.read()
773
774 # if len(content) == 0:
775 # return True
776 # else:
777 # return False
778
779 def is_dhcp_port_free(self, host_id, net_uuid):
780 """
781 Check if any port attached to the a net in a vxlan mesh across computes nodes
782 :param host_id: host id
783 :param net_uuid: network id
784 :return: True if is not free
785 """
786 self.db_lock.acquire()
787 result, content = self.db.get_table(
788 FROM='ports',
789 WHERE={'p.type': 'instance:ovs', 'p.net_id': net_uuid}
790 )
791 self.db_lock.release()
792
793 if len(content) > 0:
794 return False
795 else:
796 return True
797
798 def is_port_free(self, host_id, net_uuid):
799 """
800 Check if there not ovs ports of a network in a compute host.
801 :param host_id: host id
802 :param net_uuid: network id
803 :return: True if is not free
804 """
805
806 self.db_lock.acquire()
807 result, content = self.db.get_table(
808 FROM='ports as p join instances as i on p.instance_id=i.uuid',
809 WHERE={"i.host_id": self.host_id, 'p.type': 'instance:ovs', 'p.net_id': net_uuid}
810 )
811 self.db_lock.release()
812
813 if len(content) > 0:
814 return False
815 else:
816 return True
817
818 def add_port_to_ovs_bridge(self, vlan):
819 """
820 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
821 :param vlan: vlan port id
822 :return: True if success
823 """
824
825 if self.test:
826 return
827
828 port_name = 'ovim-' + vlan
829 command = 'sudo ovs-vsctl add-port br-int ' + port_name + ' tag=' + vlan
830 print self.name, ': command:', command
831 (_, stdout, _) = self.ssh_conn.exec_command(command)
832 content = stdout.read()
833 if len(content) == 0:
834 return True
835 else:
836 return False
837
838 def delete_dhcp_port(self, vlan, net_uuid):
839 """
840 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
841 :param vlan: segmentation id
842 :param net_uuid: network id
843 :return: True if success
844 """
845
846 if self.test:
847 return
848
849 if not self.is_dhcp_port_free(vlan, net_uuid):
850 return True
851 self.delete_dhcp_interfaces(vlan)
852 return True
853
854 def delete_bridge_port_attached_to_ovs(self, vlan, net_uuid):
855 """
856 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
857 :param vlan:
858 :param net_uuid:
859 :return: True if success
860 """
861 if self.test:
862 return
863
864 if not self.is_port_free(vlan, net_uuid):
865 return True
866 self.delete_port_to_ovs_bridge(vlan, net_uuid)
867 self.delete_linux_bridge(vlan)
868 return True
869
870 def delete_linux_bridge(self, vlan):
871 """
872 Delete a linux bridge in a scpecific compute.
873 :param vlan: vlan port id
874 :return: True if success
875 """
876
877 if self.test:
878 return
879
880 port_name = 'ovim-' + vlan
881 command = 'sudo ip link set dev veth0-' + vlan + ' down'
882 print self.name, ': command:', command
883 (_, stdout, _) = self.ssh_conn.exec_command(command)
884 content = stdout.read()
885 #
886 # if len(content) != 0:
887 # return False
888
889 command = 'sudo ifconfig ' + port_name + ' down && sudo brctl delbr ' + port_name
890 print self.name, ': command:', command
891 (_, stdout, _) = self.ssh_conn.exec_command(command)
892 content = stdout.read()
893 if len(content) == 0:
894 return True
895 else:
896 return False
897
898 def create_ovs_bridge_port(self, vlan):
899 """
900 Generate a linux bridge and attache the port to a OVS bridge
901 :param vlan: vlan port id
902 :return:
903 """
904 if self.test:
905 return
906 self.create_linux_bridge(vlan)
907 self.add_port_to_ovs_bridge(vlan)
908
909 def create_linux_bridge(self, vlan):
910 """
911 Create a linux bridge with STP active
912 :param vlan: netowrk vlan id
913 :return:
914 """
915
916 if self.test:
917 return
918
919 port_name = 'ovim-' + vlan
920 command = 'sudo brctl show | grep ' + port_name
921 print self.name, ': command:', command
922 (_, stdout, _) = self.ssh_conn.exec_command(command)
923 content = stdout.read()
924
925 # if exist nothing to create
926 # if len(content) == 0:
927 # return False
928
929 command = 'sudo brctl addbr ' + port_name
930 print self.name, ': command:', command
931 (_, stdout, _) = self.ssh_conn.exec_command(command)
932 content = stdout.read()
933
934 # if len(content) == 0:
935 # return True
936 # else:
937 # return False
938
939 command = 'sudo brctl stp ' + port_name + ' on'
940 print self.name, ': command:', command
941 (_, stdout, _) = self.ssh_conn.exec_command(command)
942 content = stdout.read()
943
944 # if len(content) == 0:
945 # return True
946 # else:
947 # return False
948 command = 'sudo ip link set dev ' + port_name + ' up'
949 print self.name, ': command:', command
950 (_, stdout, _) = self.ssh_conn.exec_command(command)
951 content = stdout.read()
952
953 if len(content) == 0:
954 return True
955 else:
956 return False
957
958 def set_mac_dhcp_server(self, ip, mac, vlan, netmask, dhcp_path):
959 """
960 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
961 :param ip: IP address asigned to a VM
962 :param mac: VM vnic mac to be macthed with the IP received
963 :param vlan: Segmentation id
964 :param netmask: netmask value
965 :param path: dhcp conf file path that live in namespace side
966 :return: True if success
967 """
968
969 if self.test:
970 return
971
972 net_namespace = 'ovim-' + vlan
973 dhcp_path = os.path.join(dhcp_path, net_namespace)
974 dhcp_hostsdir = os.path.join(dhcp_path, net_namespace)
975
976 if not ip:
977 return False
978
979 ip_data = mac.upper() + ',' + ip
980
981 command = 'sudo ip netns exec ' + net_namespace + ' touch ' + dhcp_hostsdir
982 print self.name, ': command:', command
983 (_, stdout, _) = self.ssh_conn.exec_command(command)
984 content = stdout.read()
985
986 command = 'sudo ip netns exec ' + net_namespace + ' sudo bash -ec "echo ' + ip_data + ' >> ' + dhcp_hostsdir + '"'
987
988 print self.name, ': command:', command
989 (_, stdout, _) = self.ssh_conn.exec_command(command)
990 content = stdout.read()
991
992 if len(content) == 0:
993 return True
994 else:
995 return False
996
997 def delete_mac_dhcp_server(self, ip, mac, vlan, dhcp_path):
998 """
999 Delete into dhcp conf file the ip assigned to a specific MAC address
1000
1001 :param ip: IP address asigned to a VM
1002 :param mac: VM vnic mac to be macthed with the IP received
1003 :param vlan: Segmentation id
1004 :param dhcp_path: dhcp conf file path that live in namespace side
1005 :return:
1006 """
1007
1008 if self.test:
1009 return
1010
1011 net_namespace = 'ovim-' + vlan
1012 dhcp_path = os.path.join(dhcp_path, net_namespace)
1013 dhcp_hostsdir = os.path.join(dhcp_path, net_namespace)
1014
1015 if not ip:
1016 return False
1017
1018 ip_data = mac.upper() + ',' + ip
1019
1020 command = 'sudo ip netns exec ' + net_namespace + ' sudo sed -i \'/' + ip_data + '/d\' ' + dhcp_hostsdir
1021 print self.name, ': command:', command
1022 (_, stdout, _) = self.ssh_conn.exec_command(command)
1023 content = stdout.read()
1024
1025 if len(content) == 0:
1026 return True
1027 else:
1028 return False
1029
1030 def launch_dhcp_server(self, vlan, ip_range, netmask, dhcp_path):
1031 """
1032 Generate a linux bridge and attache the port to a OVS bridge
1033 :param self:
1034 :param vlan: Segmentation id
1035 :param ip_range: IP dhcp range
1036 :param netmask: network netmask
1037 :param dhcp_path: dhcp conf file path that live in namespace side
1038 :return: True if success
1039 """
1040
1041 if self.test:
1042 return
1043
1044 interface = 'tap-' + vlan
1045 net_namespace = 'ovim-' + vlan
1046 dhcp_path = os.path.join(dhcp_path, net_namespace)
1047 leases_path = os.path.join(dhcp_path, "dnsmasq.leases")
1048 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
1049
1050 dhcp_range = ip_range[0] + ',' + ip_range[1] + ',' + netmask
1051
1052 command = 'sudo ip netns exec ' + net_namespace + ' mkdir -p ' + dhcp_path
1053 print self.name, ': command:', command
1054 (_, stdout, _) = self.ssh_conn.exec_command(command)
1055 content = stdout.read()
1056
1057 pid_path = os.path.join(dhcp_path, 'dnsmasq.pid')
1058 command = 'sudo ip netns exec ' + net_namespace + ' cat ' + pid_path
1059 print self.name, ': command:', command
1060 (_, stdout, _) = self.ssh_conn.exec_command(command)
1061 content = stdout.read()
1062 # check if pid is runing
1063 pid_status_path = content
1064 if content:
1065 command = "ps aux | awk '{print $2 }' | grep " + pid_status_path
1066 print self.name, ': command:', command
1067 (_, stdout, _) = self.ssh_conn.exec_command(command)
1068 content = stdout.read()
1069 if not content:
1070 command = 'sudo ip netns exec ' + net_namespace + ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1071 '--interface=' + interface + ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path + \
1072 ' --dhcp-range ' + dhcp_range + ' --pid-file=' + pid_file + ' --dhcp-leasefile=' + leases_path + ' --listen-address ' + ip_range[0]
1073
1074 print self.name, ': command:', command
1075 (_, stdout, _) = self.ssh_conn.exec_command(command)
1076 content = stdout.readline()
1077
1078 if len(content) == 0:
1079 return True
1080 else:
1081 return False
1082
1083 def delete_dhcp_interfaces(self, vlan):
1084 """
1085 Create a linux bridge with STP active
1086 :param vlan: netowrk vlan id
1087 :return:
1088 """
1089
1090 if self.test:
1091 return
1092
1093 net_namespace = 'ovim-' + vlan
1094 command = 'sudo ovs-vsctl del-port br-int ovs-tap-' + vlan
1095 print self.name, ': command:', command
1096 (_, stdout, _) = self.ssh_conn.exec_command(command)
1097 content = stdout.read()
1098
1099 command = 'sudo ip netns exec ' + net_namespace + ' ip link set dev tap-' + vlan + ' down'
1100 print self.name, ': command:', command
1101 (_, stdout, _) = self.ssh_conn.exec_command(command)
1102 content = stdout.read()
1103
1104 command = 'sudo ip link set dev ovs-tap-' + vlan + ' down'
1105 print self.name, ': command:', command
1106 (_, stdout, _) = self.ssh_conn.exec_command(command)
1107 content = stdout.read()
1108
1109 def create_dhcp_interfaces(self, vlan, ip, netmask):
1110 """
1111 Create a linux bridge with STP active
1112 :param vlan: segmentation id
1113 :param ip: Ip included in the dhcp range for the tap interface living in namesapce side
1114 :param netmask: dhcp net CIDR
1115 :return: True if success
1116 """
1117
1118 if self.test:
1119 return
1120
1121 net_namespace = 'ovim-' + vlan
1122 namespace_interface = 'tap-' + vlan
1123
1124 command = 'sudo ip netns add ' + net_namespace
1125 print self.name, ': command:', command
1126 (_, stdout, _) = self.ssh_conn.exec_command(command)
1127 content = stdout.read()
1128
1129 command = 'sudo ip link add tap-' + vlan + ' type veth peer name ovs-tap-' + vlan
1130 print self.name, ': command:', command
1131 (_, stdout, _) = self.ssh_conn.exec_command(command)
1132 content = stdout.read()
1133
1134 command = 'sudo ovs-vsctl add-port br-int ovs-tap-' + vlan + ' tag=' + vlan
1135 print self.name, ': command:', command
1136 (_, stdout, _) = self.ssh_conn.exec_command(command)
1137 content = stdout.read()
1138
1139 command = 'sudo ip link set tap-' + vlan + ' netns ' + net_namespace
1140 print self.name, ': command:', command
1141 (_, stdout, _) = self.ssh_conn.exec_command(command)
1142 content = stdout.read()
1143
1144 command = 'sudo ip netns exec ' + net_namespace + ' ip link set dev tap-' + vlan + ' up'
1145 print self.name, ': command:', command
1146 (_, stdout, _) = self.ssh_conn.exec_command(command)
1147 content = stdout.read()
1148
1149 command = 'sudo ip link set dev ovs-tap-' + vlan + ' up'
1150 print self.name, ': command:', command
1151 (_, stdout, _) = self.ssh_conn.exec_command(command)
1152 content = stdout.read()
1153
1154 command = 'sudo ip netns exec ' + net_namespace + ' ' + ' ifconfig ' + namespace_interface \
1155 + ' ' + ip + ' netmask ' + netmask
1156 print self.name, ': command:', command
1157 (_, stdout, _) = self.ssh_conn.exec_command(command)
1158 content = stdout.read()
1159
1160 if len(content) == 0:
1161 return True
1162 else:
1163 return False
1164
1165 def create_ovs_vxlan_tunnel(self, vxlan_interface, remote_ip):
1166 """
1167 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1168 :param vxlan_interface: vlxan inteface name.
1169 :param remote_ip: tunnel endpoint remote compute ip.
1170 :return:
1171 """
1172 if self.test:
1173 return
1174 command = 'sudo ovs-vsctl add-port br-int ' + vxlan_interface + \
1175 ' -- set Interface ' + vxlan_interface + ' type=vxlan options:remote_ip=' + remote_ip + \
1176 ' -- set Port ' + vxlan_interface + ' other_config:stp-path-cost=10'
1177 print self.name, ': command:', command
1178 (_, stdout, _) = self.ssh_conn.exec_command(command)
1179 content = stdout.read()
1180 print content
1181 if len(content) == 0:
1182 return True
1183 else:
1184 return False
1185
1186 def delete_ovs_vxlan_tunnel(self, vxlan_interface):
1187 """
1188 Delete a vlxan tunnel port from a OVS brdige.
1189 :param vxlan_interface: vlxan name to be delete it.
1190 :return: True if success.
1191 """
1192 if self.test:
1193 return
1194 command = 'sudo ovs-vsctl del-port br-int ' + vxlan_interface
1195 print self.name, ': command:', command
1196 (_, stdout, _) = self.ssh_conn.exec_command(command)
1197 content = stdout.read()
1198 print content
1199 if len(content) == 0:
1200 return True
1201 else:
1202 return False
1203
1204 def delete_ovs_bridge(self):
1205 """
1206 Delete a OVS bridge from a compute.
1207 :return: True if success
1208 """
1209 if self.test:
1210 return
1211 command = 'sudo ovs-vsctl del-br br-int'
1212 print self.name, ': command:', command
1213 (_, stdout, _) = self.ssh_conn.exec_command(command)
1214 content = stdout.read()
1215 if len(content) == 0:
1216 return True
1217 else:
1218 return False
1219
1220 def get_file_info(self, path):
1221 command = 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1222 print self.name, ': command:', command
1223 (_, stdout, _) = self.ssh_conn.exec_command(command)
1224 content = stdout.read()
1225 if len(content) == 0:
1226 return None # file does not exist
1227 else:
1228 return content.split(" ") #(permission, 1, owner, group, size, date, file)
1229
1230 def qemu_get_info(self, path):
1231 command = 'qemu-img info ' + path
1232 print self.name, ': command:', command
1233 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
1234 content = stdout.read()
1235 if len(content) == 0:
1236 error = stderr.read()
1237 print self.name, ": get_qemu_info error ", error
1238 raise paramiko.ssh_exception.SSHException("Error getting qemu_info: " + error)
1239 else:
1240 try:
1241 return yaml.load(content)
1242 except yaml.YAMLError as exc:
1243 text = ""
1244 if hasattr(exc, 'problem_mark'):
1245 mark = exc.problem_mark
1246 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
1247 print self.name, ": get_qemu_info yaml format Exception", text
1248 raise paramiko.ssh_exception.SSHException("Error getting qemu_info yaml format" + text)
1249
1250 def qemu_change_backing(self, inc_file, new_backing_file):
1251 command = 'qemu-img rebase -u -b ' + new_backing_file + ' ' + inc_file
1252 print self.name, ': command:', command
1253 (_, _, stderr) = self.ssh_conn.exec_command(command)
1254 content = stderr.read()
1255 if len(content) == 0:
1256 return 0
1257 else:
1258 print self.name, ": qemu_change_backing error: ", content
1259 return -1
1260
1261 def get_notused_filename(self, proposed_name, suffix=''):
1262 '''Look for a non existing file_name in the host
1263 proposed_name: proposed file name, includes path
1264 suffix: suffix to be added to the name, before the extention
1265 '''
1266 extension = proposed_name.rfind(".")
1267 slash = proposed_name.rfind("/")
1268 if extension < 0 or extension < slash: # no extension
1269 extension = len(proposed_name)
1270 target_name = proposed_name[:extension] + suffix + proposed_name[extension:]
1271 info = self.get_file_info(target_name)
1272 if info is None:
1273 return target_name
1274
1275 index=0
1276 while info is not None:
1277 target_name = proposed_name[:extension] + suffix + "-" + str(index) + proposed_name[extension:]
1278 index+=1
1279 info = self.get_file_info(target_name)
1280 return target_name
1281
1282 def get_notused_path(self, proposed_path, suffix=''):
1283 '''Look for a non existing path at database for images
1284 proposed_path: proposed file name, includes path
1285 suffix: suffix to be added to the name, before the extention
1286 '''
1287 extension = proposed_path.rfind(".")
1288 if extension < 0:
1289 extension = len(proposed_path)
1290 if suffix != None:
1291 target_path = proposed_path[:extension] + suffix + proposed_path[extension:]
1292 index=0
1293 while True:
1294 r,_=self.db.get_table(FROM="images",WHERE={"path":target_path})
1295 if r<=0:
1296 return target_path
1297 target_path = proposed_path[:extension] + suffix + "-" + str(index) + proposed_path[extension:]
1298 index+=1
1299
1300
1301 def delete_file(self, file_name):
1302 command = 'rm -f '+file_name
1303 print self.name, ': command:', command
1304 (_, _, stderr) = self.ssh_conn.exec_command(command)
1305 error_msg = stderr.read()
1306 if len(error_msg) > 0:
1307 raise paramiko.ssh_exception.SSHException("Error deleting file: " + error_msg)
1308
1309 def copy_file(self, source, destination, perserve_time=True):
1310 if source[0:4]=="http":
1311 command = "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1312 dst=destination, src=source, dst_result=destination + ".result" )
1313 else:
1314 command = 'cp --no-preserve=mode'
1315 if perserve_time:
1316 command += ' --preserve=timestamps'
1317 command += " '{}' '{}'".format(source, destination)
1318 print self.name, ': command:', command
1319 (_, _, stderr) = self.ssh_conn.exec_command(command)
1320 error_msg = stderr.read()
1321 if len(error_msg) > 0:
1322 raise paramiko.ssh_exception.SSHException("Error copying image to local host: " + error_msg)
1323
1324 def copy_remote_file(self, remote_file, use_incremental):
1325 ''' Copy a file from the repository to local folder and recursively
1326 copy the backing files in case the remote file is incremental
1327 Read and/or modified self.localinfo['files'] that contain the
1328 unmodified copies of images in the local path
1329 params:
1330 remote_file: path of remote file
1331 use_incremental: None (leave the decision to this function), True, False
1332 return:
1333 local_file: name of local file
1334 qemu_info: dict with quemu information of local file
1335 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1336 '''
1337
1338 use_incremental_out = use_incremental
1339 new_backing_file = None
1340 local_file = None
1341 file_from_local = True
1342
1343 #in case incremental use is not decided, take the decision depending on the image
1344 #avoid the use of incremental if this image is already incremental
1345 if remote_file[0:4] == "http":
1346 file_from_local = False
1347 if file_from_local:
1348 qemu_remote_info = self.qemu_get_info(remote_file)
1349 if use_incremental_out==None:
1350 use_incremental_out = not ( file_from_local and 'backing file' in qemu_remote_info)
1351 #copy recursivelly the backing files
1352 if file_from_local and 'backing file' in qemu_remote_info:
1353 new_backing_file, _, _ = self.copy_remote_file(qemu_remote_info['backing file'], True)
1354
1355 #check if remote file is present locally
1356 if use_incremental_out and remote_file in self.localinfo['files']:
1357 local_file = self.localinfo['files'][remote_file]
1358 local_file_info = self.get_file_info(local_file)
1359 if file_from_local:
1360 remote_file_info = self.get_file_info(remote_file)
1361 if local_file_info == None:
1362 local_file = None
1363 elif file_from_local and (local_file_info[4]!=remote_file_info[4] or local_file_info[5]!=remote_file_info[5]):
1364 #local copy of file not valid because date or size are different.
1365 #TODO DELETE local file if this file is not used by any active virtual machine
1366 try:
1367 self.delete_file(local_file)
1368 del self.localinfo['files'][remote_file]
1369 except Exception:
1370 pass
1371 local_file = None
1372 else: #check that the local file has the same backing file, or there are not backing at all
1373 qemu_info = self.qemu_get_info(local_file)
1374 if new_backing_file != qemu_info.get('backing file'):
1375 local_file = None
1376
1377
1378 if local_file == None: #copy the file
1379 img_name= remote_file.split('/') [-1]
1380 img_local = self.image_path + '/' + img_name
1381 local_file = self.get_notused_filename(img_local)
1382 self.copy_file(remote_file, local_file, use_incremental_out)
1383
1384 if use_incremental_out:
1385 self.localinfo['files'][remote_file] = local_file
1386 if new_backing_file:
1387 self.qemu_change_backing(local_file, new_backing_file)
1388 qemu_info = self.qemu_get_info(local_file)
1389
1390 return local_file, qemu_info, use_incremental_out
1391
1392 def launch_server(self, conn, server, rebuild=False, domain=None):
1393 if self.test:
1394 time.sleep(random.randint(20,150)) #sleep random timeto be make it a bit more real
1395 return 0, 'Success'
1396
1397 server_id = server['uuid']
1398 paused = server.get('paused','no')
1399 try:
1400 if domain!=None and rebuild==False:
1401 domain.resume()
1402 #self.server_status[server_id] = 'ACTIVE'
1403 return 0, 'Success'
1404
1405 self.db_lock.acquire()
1406 result, server_data = self.db.get_instance(server_id)
1407 self.db_lock.release()
1408 if result <= 0:
1409 print self.name, ": launch_server ERROR getting server from DB",result, server_data
1410 return result, server_data
1411
1412 #0: get image metadata
1413 server_metadata = server.get('metadata', {})
1414 use_incremental = None
1415
1416 if "use_incremental" in server_metadata:
1417 use_incremental = False if server_metadata["use_incremental"]=="no" else True
1418
1419 server_host_files = self.localinfo['server_files'].get( server['uuid'], {})
1420 if rebuild:
1421 #delete previous incremental files
1422 for file_ in server_host_files.values():
1423 self.delete_file(file_['source file'] )
1424 server_host_files={}
1425
1426 #1: obtain aditional devices (disks)
1427 #Put as first device the main disk
1428 devices = [ {"type":"disk", "image_id":server['image_id'], "vpci":server_metadata.get('vpci', None) } ]
1429 if 'extended' in server_data and server_data['extended']!=None and "devices" in server_data['extended']:
1430 devices += server_data['extended']['devices']
1431
1432 for dev in devices:
1433 if dev['image_id'] == None:
1434 continue
1435
1436 self.db_lock.acquire()
1437 result, content = self.db.get_table(FROM='images', SELECT=('path', 'metadata'),
1438 WHERE={'uuid': dev['image_id']})
1439 self.db_lock.release()
1440 if result <= 0:
1441 error_text = "ERROR", result, content, "when getting image", dev['image_id']
1442 print self.name, ": launch_server", error_text
1443 return -1, error_text
1444 if content[0]['metadata'] is not None:
1445 dev['metadata'] = json.loads(content[0]['metadata'])
1446 else:
1447 dev['metadata'] = {}
1448
1449 if dev['image_id'] in server_host_files:
1450 dev['source file'] = server_host_files[ dev['image_id'] ] ['source file'] #local path
1451 dev['file format'] = server_host_files[ dev['image_id'] ] ['file format'] # raw or qcow2
1452 continue
1453
1454 #2: copy image to host
1455 remote_file = content[0]['path']
1456 use_incremental_image = use_incremental
1457 if dev['metadata'].get("use_incremental") == "no":
1458 use_incremental_image = False
1459 local_file, qemu_info, use_incremental_image = self.copy_remote_file(remote_file, use_incremental_image)
1460
1461 #create incremental image
1462 if use_incremental_image:
1463 local_file_inc = self.get_notused_filename(local_file, '.inc')
1464 command = 'qemu-img create -f qcow2 '+local_file_inc+ ' -o backing_file='+ local_file
1465 print 'command:', command
1466 (_, _, stderr) = self.ssh_conn.exec_command(command)
1467 error_msg = stderr.read()
1468 if len(error_msg) > 0:
1469 raise paramiko.ssh_exception.SSHException("Error creating incremental file: " + error_msg)
1470 local_file = local_file_inc
1471 qemu_info = {'file format':'qcow2'}
1472
1473 server_host_files[ dev['image_id'] ] = {'source file': local_file, 'file format': qemu_info['file format']}
1474
1475 dev['source file'] = local_file
1476 dev['file format'] = qemu_info['file format']
1477
1478 self.localinfo['server_files'][ server['uuid'] ] = server_host_files
1479 self.localinfo_dirty = True
1480
1481 #3 Create XML
1482 result, xml = self.create_xml_server(server_data, devices, server_metadata) #local_file
1483 if result <0:
1484 print self.name, ": create xml server error:", xml
1485 return -2, xml
1486 print self.name, ": create xml:", xml
1487 atribute = host_thread.lvirt_module.VIR_DOMAIN_START_PAUSED if paused == "yes" else 0
1488 #4 Start the domain
1489 if not rebuild: #ensures that any pending destroying server is done
1490 self.server_forceoff(True)
1491 #print self.name, ": launching instance" #, xml
1492 conn.createXML(xml, atribute)
1493 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
1494
1495 return 0, 'Success'
1496
1497 except paramiko.ssh_exception.SSHException as e:
1498 text = e.args[0]
1499 print self.name, ": launch_server(%s) ssh Exception: %s" %(server_id, text)
1500 if "SSH session not active" in text:
1501 self.ssh_connect()
1502 except host_thread.lvirt_module.libvirtError as e:
1503 text = e.get_error_message()
1504 print self.name, ": launch_server(%s) libvirt Exception: %s" %(server_id, text)
1505 except Exception as e:
1506 text = str(e)
1507 print self.name, ": launch_server(%s) Exception: %s" %(server_id, text)
1508 return -1, text
1509
1510 def update_servers_status(self):
1511 # # virDomainState
1512 # VIR_DOMAIN_NOSTATE = 0
1513 # VIR_DOMAIN_RUNNING = 1
1514 # VIR_DOMAIN_BLOCKED = 2
1515 # VIR_DOMAIN_PAUSED = 3
1516 # VIR_DOMAIN_SHUTDOWN = 4
1517 # VIR_DOMAIN_SHUTOFF = 5
1518 # VIR_DOMAIN_CRASHED = 6
1519 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
1520
1521 if self.test or len(self.server_status)==0:
1522 return
1523
1524 try:
1525 conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
1526 domains= conn.listAllDomains()
1527 domain_dict={}
1528 for domain in domains:
1529 uuid = domain.UUIDString() ;
1530 libvirt_status = domain.state()
1531 #print libvirt_status
1532 if libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_RUNNING or libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTDOWN:
1533 new_status = "ACTIVE"
1534 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_PAUSED:
1535 new_status = "PAUSED"
1536 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTOFF:
1537 new_status = "INACTIVE"
1538 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_CRASHED:
1539 new_status = "ERROR"
1540 else:
1541 new_status = None
1542 domain_dict[uuid] = new_status
1543 conn.close()
1544 except host_thread.lvirt_module.libvirtError as e:
1545 print self.name, ": get_state() Exception '", e.get_error_message()
1546 return
1547
1548 for server_id, current_status in self.server_status.iteritems():
1549 new_status = None
1550 if server_id in domain_dict:
1551 new_status = domain_dict[server_id]
1552 else:
1553 new_status = "INACTIVE"
1554
1555 if new_status == None or new_status == current_status:
1556 continue
1557 if new_status == 'INACTIVE' and current_status == 'ERROR':
1558 continue #keep ERROR status, because obviously this machine is not running
1559 #change status
1560 print self.name, ": server ", server_id, "status change from ", current_status, "to", new_status
1561 STATUS={'progress':100, 'status':new_status}
1562 if new_status == 'ERROR':
1563 STATUS['last_error'] = 'machine has crashed'
1564 self.db_lock.acquire()
1565 r,_ = self.db.update_rows('instances', STATUS, {'uuid':server_id}, log=False)
1566 self.db_lock.release()
1567 if r>=0:
1568 self.server_status[server_id] = new_status
1569
1570 def action_on_server(self, req, last_retry=True):
1571 '''Perform an action on a req
1572 Attributes:
1573 req: dictionary that contain:
1574 server properties: 'uuid','name','tenant_id','status'
1575 action: 'action'
1576 host properties: 'user', 'ip_name'
1577 return (error, text)
1578 0: No error. VM is updated to new state,
1579 -1: Invalid action, as trying to pause a PAUSED VM
1580 -2: Error accessing host
1581 -3: VM nor present
1582 -4: Error at DB access
1583 -5: Error while trying to perform action. VM is updated to ERROR
1584 '''
1585 server_id = req['uuid']
1586 conn = None
1587 new_status = None
1588 old_status = req['status']
1589 last_error = None
1590
1591 if self.test:
1592 if 'terminate' in req['action']:
1593 new_status = 'deleted'
1594 elif 'shutoff' in req['action'] or 'shutdown' in req['action'] or 'forceOff' in req['action']:
1595 if req['status']!='ERROR':
1596 time.sleep(5)
1597 new_status = 'INACTIVE'
1598 elif 'start' in req['action'] and req['status']!='ERROR': new_status = 'ACTIVE'
1599 elif 'resume' in req['action'] and req['status']!='ERROR' and req['status']!='INACTIVE' : new_status = 'ACTIVE'
1600 elif 'pause' in req['action'] and req['status']!='ERROR': new_status = 'PAUSED'
1601 elif 'reboot' in req['action'] and req['status']!='ERROR': new_status = 'ACTIVE'
1602 elif 'rebuild' in req['action']:
1603 time.sleep(random.randint(20,150))
1604 new_status = 'ACTIVE'
1605 elif 'createImage' in req['action']:
1606 time.sleep(5)
1607 self.create_image(None, req)
1608 else:
1609 try:
1610 conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
1611 try:
1612 dom = conn.lookupByUUIDString(server_id)
1613 except host_thread.lvirt_module.libvirtError as e:
1614 text = e.get_error_message()
1615 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
1616 dom = None
1617 else:
1618 print self.name, ": action_on_server(",server_id,") libvirt exception:", text
1619 raise e
1620
1621 if 'forceOff' in req['action']:
1622 if dom == None:
1623 print self.name, ": action_on_server(",server_id,") domain not running"
1624 else:
1625 try:
1626 print self.name, ": sending DESTROY to server", server_id
1627 dom.destroy()
1628 except Exception as e:
1629 if "domain is not running" not in e.get_error_message():
1630 print self.name, ": action_on_server(",server_id,") Exception while sending force off:", e.get_error_message()
1631 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
1632 new_status = 'ERROR'
1633
1634 elif 'terminate' in req['action']:
1635 if dom == None:
1636 print self.name, ": action_on_server(",server_id,") domain not running"
1637 new_status = 'deleted'
1638 else:
1639 try:
1640 if req['action']['terminate'] == 'force':
1641 print self.name, ": sending DESTROY to server", server_id
1642 dom.destroy()
1643 new_status = 'deleted'
1644 else:
1645 print self.name, ": sending SHUTDOWN to server", server_id
1646 dom.shutdown()
1647 self.pending_terminate_server.append( (time.time()+10,server_id) )
1648 except Exception as e:
1649 print self.name, ": action_on_server(",server_id,") Exception while destroy:", e.get_error_message()
1650 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
1651 new_status = 'ERROR'
1652 if "domain is not running" in e.get_error_message():
1653 try:
1654 dom.undefine()
1655 new_status = 'deleted'
1656 except Exception:
1657 print self.name, ": action_on_server(",server_id,") Exception while undefine:", e.get_error_message()
1658 last_error = 'action_on_server Exception2 while undefine:', e.get_error_message()
1659 #Exception: 'virDomainDetachDevice() failed'
1660 if new_status=='deleted':
1661 if server_id in self.server_status:
1662 del self.server_status[server_id]
1663 if req['uuid'] in self.localinfo['server_files']:
1664 for file_ in self.localinfo['server_files'][ req['uuid'] ].values():
1665 try:
1666 self.delete_file(file_['source file'])
1667 except Exception:
1668 pass
1669 del self.localinfo['server_files'][ req['uuid'] ]
1670 self.localinfo_dirty = True
1671
1672 elif 'shutoff' in req['action'] or 'shutdown' in req['action']:
1673 try:
1674 if dom == None:
1675 print self.name, ": action_on_server(",server_id,") domain not running"
1676 else:
1677 dom.shutdown()
1678 # new_status = 'INACTIVE'
1679 #TODO: check status for changing at database
1680 except Exception as e:
1681 new_status = 'ERROR'
1682 print self.name, ": action_on_server(",server_id,") Exception while shutdown:", e.get_error_message()
1683 last_error = 'action_on_server Exception while shutdown: ' + e.get_error_message()
1684
1685 elif 'rebuild' in req['action']:
1686 if dom != None:
1687 dom.destroy()
1688 r = self.launch_server(conn, req, True, None)
1689 if r[0] <0:
1690 new_status = 'ERROR'
1691 last_error = r[1]
1692 else:
1693 new_status = 'ACTIVE'
1694 elif 'start' in req['action']:
1695 # The instance is only create in DB but not yet at libvirt domain, needs to be create
1696 rebuild = True if req['action']['start'] == 'rebuild' else False
1697 r = self.launch_server(conn, req, rebuild, dom)
1698 if r[0] <0:
1699 new_status = 'ERROR'
1700 last_error = r[1]
1701 else:
1702 new_status = 'ACTIVE'
1703
1704 elif 'resume' in req['action']:
1705 try:
1706 if dom == None:
1707 pass
1708 else:
1709 dom.resume()
1710 # new_status = 'ACTIVE'
1711 except Exception as e:
1712 print self.name, ": action_on_server(",server_id,") Exception while resume:", e.get_error_message()
1713
1714 elif 'pause' in req['action']:
1715 try:
1716 if dom == None:
1717 pass
1718 else:
1719 dom.suspend()
1720 # new_status = 'PAUSED'
1721 except Exception as e:
1722 print self.name, ": action_on_server(",server_id,") Exception while pause:", e.get_error_message()
1723
1724 elif 'reboot' in req['action']:
1725 try:
1726 if dom == None:
1727 pass
1728 else:
1729 dom.reboot()
1730 print self.name, ": action_on_server(",server_id,") reboot:"
1731 #new_status = 'ACTIVE'
1732 except Exception as e:
1733 print self.name, ": action_on_server(",server_id,") Exception while reboot:", e.get_error_message()
1734 elif 'createImage' in req['action']:
1735 self.create_image(dom, req)
1736
1737
1738 conn.close()
1739 except host_thread.lvirt_module.libvirtError as e:
1740 if conn is not None: conn.close()
1741 text = e.get_error_message()
1742 new_status = "ERROR"
1743 last_error = text
1744 print self.name, ": action_on_server(",server_id,") Exception '", text
1745 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
1746 print self.name, ": action_on_server(",server_id,") Exception removed from host"
1747 #end of if self.test
1748 if new_status == None:
1749 return 1
1750
1751 print self.name, ": action_on_server(",server_id,") new status", new_status, last_error
1752 UPDATE = {'progress':100, 'status':new_status}
1753
1754 if new_status=='ERROR':
1755 if not last_retry: #if there will be another retry do not update database
1756 return -1
1757 elif 'terminate' in req['action']:
1758 #PUT a log in the database
1759 print self.name, ": PANIC deleting server", server_id, last_error
1760 self.db_lock.acquire()
1761 self.db.new_row('logs',
1762 {'uuid':server_id, 'tenant_id':req['tenant_id'], 'related':'instances','level':'panic',
1763 'description':'PANIC deleting server from host '+self.name+': '+last_error}
1764 )
1765 self.db_lock.release()
1766 if server_id in self.server_status:
1767 del self.server_status[server_id]
1768 return -1
1769 else:
1770 UPDATE['last_error'] = last_error
1771 if new_status != 'deleted' and (new_status != old_status or new_status == 'ERROR') :
1772 self.db_lock.acquire()
1773 self.db.update_rows('instances', UPDATE, {'uuid':server_id}, log=True)
1774 self.server_status[server_id] = new_status
1775 self.db_lock.release()
1776 if new_status == 'ERROR':
1777 return -1
1778 return 1
1779
1780
1781 def restore_iface(self, name, mac, lib_conn=None):
1782 ''' make an ifdown, ifup to restore default parameter of na interface
1783 Params:
1784 mac: mac address of the interface
1785 lib_conn: connection to the libvirt, if None a new connection is created
1786 Return 0,None if ok, -1,text if fails
1787 '''
1788 conn=None
1789 ret = 0
1790 error_text=None
1791 if self.test:
1792 print self.name, ": restore_iface '%s' %s" % (name, mac)
1793 return 0, None
1794 try:
1795 if not lib_conn:
1796 conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
1797 else:
1798 conn = lib_conn
1799
1800 #wait to the pending VM deletion
1801 #TODO.Revise self.server_forceoff(True)
1802
1803 iface = conn.interfaceLookupByMACString(mac)
1804 iface.destroy()
1805 iface.create()
1806 print self.name, ": restore_iface '%s' %s" % (name, mac)
1807 except host_thread.lvirt_module.libvirtError as e:
1808 error_text = e.get_error_message()
1809 print self.name, ": restore_iface '%s' '%s' libvirt exception: %s" %(name, mac, error_text)
1810 ret=-1
1811 finally:
1812 if lib_conn is None and conn is not None:
1813 conn.close()
1814 return ret, error_text
1815
1816
1817 def create_image(self,dom, req):
1818 if self.test:
1819 if 'path' in req['action']['createImage']:
1820 file_dst = req['action']['createImage']['path']
1821 else:
1822 createImage=req['action']['createImage']
1823 img_name= createImage['source']['path']
1824 index=img_name.rfind('/')
1825 file_dst = self.get_notused_path(img_name[:index+1] + createImage['name'] + '.qcow2')
1826 image_status='ACTIVE'
1827 else:
1828 for retry in (0,1):
1829 try:
1830 server_id = req['uuid']
1831 createImage=req['action']['createImage']
1832 file_orig = self.localinfo['server_files'][server_id] [ createImage['source']['image_id'] ] ['source file']
1833 if 'path' in req['action']['createImage']:
1834 file_dst = req['action']['createImage']['path']
1835 else:
1836 img_name= createImage['source']['path']
1837 index=img_name.rfind('/')
1838 file_dst = self.get_notused_filename(img_name[:index+1] + createImage['name'] + '.qcow2')
1839
1840 self.copy_file(file_orig, file_dst)
1841 qemu_info = self.qemu_get_info(file_orig)
1842 if 'backing file' in qemu_info:
1843 for k,v in self.localinfo['files'].items():
1844 if v==qemu_info['backing file']:
1845 self.qemu_change_backing(file_dst, k)
1846 break
1847 image_status='ACTIVE'
1848 break
1849 except paramiko.ssh_exception.SSHException as e:
1850 image_status='ERROR'
1851 error_text = e.args[0]
1852 print self.name, "': create_image(",server_id,") ssh Exception:", error_text
1853 if "SSH session not active" in error_text and retry==0:
1854 self.ssh_connect()
1855 except Exception as e:
1856 image_status='ERROR'
1857 error_text = str(e)
1858 print self.name, "': create_image(",server_id,") Exception:", error_text
1859
1860 #TODO insert a last_error at database
1861 self.db_lock.acquire()
1862 self.db.update_rows('images', {'status':image_status, 'progress': 100, 'path':file_dst},
1863 {'uuid':req['new_image']['uuid']}, log=True)
1864 self.db_lock.release()
1865
1866 def edit_iface(self, port_id, old_net, new_net):
1867 #This action imply remove and insert interface to put proper parameters
1868 if self.test:
1869 time.sleep(1)
1870 else:
1871 #get iface details
1872 self.db_lock.acquire()
1873 r,c = self.db.get_table(FROM='ports as p join resources_port as rp on p.uuid=rp.port_id',
1874 WHERE={'port_id': port_id})
1875 self.db_lock.release()
1876 if r<0:
1877 print self.name, ": edit_iface(",port_id,") DDBB error:", c
1878 return
1879 elif r==0:
1880 print self.name, ": edit_iface(",port_id,") por not found"
1881 return
1882 port=c[0]
1883 if port["model"]!="VF":
1884 print self.name, ": edit_iface(",port_id,") ERROR model must be VF"
1885 return
1886 #create xml detach file
1887 xml=[]
1888 self.xml_level = 2
1889 xml.append("<interface type='hostdev' managed='yes'>")
1890 xml.append(" <mac address='" +port['mac']+ "'/>")
1891 xml.append(" <source>"+ self.pci2xml(port['pci'])+"\n </source>")
1892 xml.append('</interface>')
1893
1894
1895 try:
1896 conn=None
1897 conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
1898 dom = conn.lookupByUUIDString(port["instance_id"])
1899 if old_net:
1900 text="\n".join(xml)
1901 print self.name, ": edit_iface detaching SRIOV interface", text
1902 dom.detachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
1903 if new_net:
1904 xml[-1] =" <vlan> <tag id='" + str(port['vlan']) + "'/> </vlan>"
1905 self.xml_level = 1
1906 xml.append(self.pci2xml(port.get('vpci',None)) )
1907 xml.append('</interface>')
1908 text="\n".join(xml)
1909 print self.name, ": edit_iface attaching SRIOV interface", text
1910 dom.attachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
1911
1912 except host_thread.lvirt_module.libvirtError as e:
1913 text = e.get_error_message()
1914 print self.name, ": edit_iface(",port["instance_id"],") libvirt exception:", text
1915
1916 finally:
1917 if conn is not None: conn.close()
1918
1919
1920 def create_server(server, db, db_lock, only_of_ports):
1921 #print "server"
1922 #print "server"
1923 #print server
1924 #print "server"
1925 #print "server"
1926 #try:
1927 # host_id = server.get('host_id', None)
1928 extended = server.get('extended', None)
1929
1930 # print '----------------------'
1931 # print json.dumps(extended, indent=4)
1932
1933 requirements={}
1934 requirements['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
1935 requirements['ram'] = server['flavor'].get('ram', 0)
1936 if requirements['ram']== None:
1937 requirements['ram'] = 0
1938 requirements['vcpus'] = server['flavor'].get('vcpus', 0)
1939 if requirements['vcpus']== None:
1940 requirements['vcpus'] = 0
1941 #If extended is not defined get requirements from flavor
1942 if extended is None:
1943 #If extended is defined in flavor convert to dictionary and use it
1944 if 'extended' in server['flavor'] and server['flavor']['extended'] != None:
1945 json_acceptable_string = server['flavor']['extended'].replace("'", "\"")
1946 extended = json.loads(json_acceptable_string)
1947 else:
1948 extended = None
1949 #print json.dumps(extended, indent=4)
1950
1951 #For simplicity only one numa VM are supported in the initial implementation
1952 if extended != None:
1953 numas = extended.get('numas', [])
1954 if len(numas)>1:
1955 return (-2, "Multi-NUMA VMs are not supported yet")
1956 #elif len(numas)<1:
1957 # return (-1, "At least one numa must be specified")
1958
1959 #a for loop is used in order to be ready to multi-NUMA VMs
1960 request = []
1961 for numa in numas:
1962 numa_req = {}
1963 numa_req['memory'] = numa.get('memory', 0)
1964 if 'cores' in numa:
1965 numa_req['proc_req_nb'] = numa['cores'] #number of cores or threads to be reserved
1966 numa_req['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
1967 numa_req['proc_req_list'] = numa.get('cores-id', None) #list of ids to be assigned to the cores or threads
1968 elif 'paired-threads' in numa:
1969 numa_req['proc_req_nb'] = numa['paired-threads']
1970 numa_req['proc_req_type'] = 'paired-threads'
1971 numa_req['proc_req_list'] = numa.get('paired-threads-id', None)
1972 elif 'threads' in numa:
1973 numa_req['proc_req_nb'] = numa['threads']
1974 numa_req['proc_req_type'] = 'threads'
1975 numa_req['proc_req_list'] = numa.get('threads-id', None)
1976 else:
1977 numa_req['proc_req_nb'] = 0 # by default
1978 numa_req['proc_req_type'] = 'threads'
1979
1980
1981
1982 #Generate a list of sriov and another for physical interfaces
1983 interfaces = numa.get('interfaces', [])
1984 sriov_list = []
1985 port_list = []
1986 for iface in interfaces:
1987 iface['bandwidth'] = int(iface['bandwidth'])
1988 if iface['dedicated'][:3]=='yes':
1989 port_list.append(iface)
1990 else:
1991 sriov_list.append(iface)
1992
1993 #Save lists ordered from more restrictive to less bw requirements
1994 numa_req['sriov_list'] = sorted(sriov_list, key=lambda k: k['bandwidth'], reverse=True)
1995 numa_req['port_list'] = sorted(port_list, key=lambda k: k['bandwidth'], reverse=True)
1996
1997
1998 request.append(numa_req)
1999
2000 # print "----------\n"+json.dumps(request[0], indent=4)
2001 # print '----------\n\n'
2002
2003 #Search in db for an appropriate numa for each requested numa
2004 #at the moment multi-NUMA VMs are not supported
2005 if len(request)>0:
2006 requirements['numa'].update(request[0])
2007 if requirements['numa']['memory']>0:
2008 requirements['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2009 elif requirements['ram']==0:
2010 return (-1, "Memory information not set neither at extended field not at ram")
2011 if requirements['numa']['proc_req_nb']>0:
2012 requirements['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2013 elif requirements['vcpus']==0:
2014 return (-1, "Processor information not set neither at extended field not at vcpus")
2015
2016
2017 db_lock.acquire()
2018 result, content = db.get_numas(requirements, server.get('host_id', None), only_of_ports)
2019 db_lock.release()
2020
2021 if result == -1:
2022 return (-1, content)
2023
2024 numa_id = content['numa_id']
2025 host_id = content['host_id']
2026
2027 #obtain threads_id and calculate pinning
2028 cpu_pinning = []
2029 reserved_threads=[]
2030 if requirements['numa']['proc_req_nb']>0:
2031 db_lock.acquire()
2032 result, content = db.get_table(FROM='resources_core',
2033 SELECT=('id','core_id','thread_id'),
2034 WHERE={'numa_id':numa_id,'instance_id': None, 'status':'ok'} )
2035 db_lock.release()
2036 if result <= 0:
2037 print content
2038 return -1, content
2039
2040 #convert rows to a dictionary indexed by core_id
2041 cores_dict = {}
2042 for row in content:
2043 if not row['core_id'] in cores_dict:
2044 cores_dict[row['core_id']] = []
2045 cores_dict[row['core_id']].append([row['thread_id'],row['id']])
2046
2047 #In case full cores are requested
2048 paired = 'N'
2049 if requirements['numa']['proc_req_type'] == 'cores':
2050 #Get/create the list of the vcpu_ids
2051 vcpu_id_list = requirements['numa']['proc_req_list']
2052 if vcpu_id_list == None:
2053 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2054
2055 for threads in cores_dict.itervalues():
2056 #we need full cores
2057 if len(threads) != 2:
2058 continue
2059
2060 #set pinning for the first thread
2061 cpu_pinning.append( [ vcpu_id_list.pop(0), threads[0][0], threads[0][1] ] )
2062
2063 #reserve so it is not used the second thread
2064 reserved_threads.append(threads[1][1])
2065
2066 if len(vcpu_id_list) == 0:
2067 break
2068
2069 #In case paired threads are requested
2070 elif requirements['numa']['proc_req_type'] == 'paired-threads':
2071 paired = 'Y'
2072 #Get/create the list of the vcpu_ids
2073 if requirements['numa']['proc_req_list'] != None:
2074 vcpu_id_list = []
2075 for pair in requirements['numa']['proc_req_list']:
2076 if len(pair)!=2:
2077 return -1, "Field paired-threads-id not properly specified"
2078 return
2079 vcpu_id_list.append(pair[0])
2080 vcpu_id_list.append(pair[1])
2081 else:
2082 vcpu_id_list = range(0,2*int(requirements['numa']['proc_req_nb']))
2083
2084 for threads in cores_dict.itervalues():
2085 #we need full cores
2086 if len(threads) != 2:
2087 continue
2088 #set pinning for the first thread
2089 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2090
2091 #set pinning for the second thread
2092 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2093
2094 if len(vcpu_id_list) == 0:
2095 break
2096
2097 #In case normal threads are requested
2098 elif requirements['numa']['proc_req_type'] == 'threads':
2099 #Get/create the list of the vcpu_ids
2100 vcpu_id_list = requirements['numa']['proc_req_list']
2101 if vcpu_id_list == None:
2102 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2103
2104 for threads_index in sorted(cores_dict, key=lambda k: len(cores_dict[k])):
2105 threads = cores_dict[threads_index]
2106 #set pinning for the first thread
2107 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2108
2109 #if exists, set pinning for the second thread
2110 if len(threads) == 2 and len(vcpu_id_list) != 0:
2111 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2112
2113 if len(vcpu_id_list) == 0:
2114 break
2115
2116 #Get the source pci addresses for the selected numa
2117 used_sriov_ports = []
2118 for port in requirements['numa']['sriov_list']:
2119 db_lock.acquire()
2120 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2121 db_lock.release()
2122 if result <= 0:
2123 print content
2124 return -1, content
2125 for row in content:
2126 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2127 continue
2128 port['pci'] = row['pci']
2129 if 'mac_address' not in port:
2130 port['mac_address'] = row['mac']
2131 del port['mac']
2132 port['port_id']=row['id']
2133 port['Mbps_used'] = port['bandwidth']
2134 used_sriov_ports.append(row['id'])
2135 break
2136
2137 for port in requirements['numa']['port_list']:
2138 port['Mbps_used'] = None
2139 if port['dedicated'] != "yes:sriov":
2140 port['mac_address'] = port['mac']
2141 del port['mac']
2142 continue
2143 db_lock.acquire()
2144 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac', 'Mbps'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2145 db_lock.release()
2146 if result <= 0:
2147 print content
2148 return -1, content
2149 port['Mbps_used'] = content[0]['Mbps']
2150 for row in content:
2151 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2152 continue
2153 port['pci'] = row['pci']
2154 if 'mac_address' not in port:
2155 port['mac_address'] = row['mac'] # mac cannot be set to passthrough ports
2156 del port['mac']
2157 port['port_id']=row['id']
2158 used_sriov_ports.append(row['id'])
2159 break
2160
2161 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2162 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2163
2164 server['host_id'] = host_id
2165
2166
2167 #Generate dictionary for saving in db the instance resources
2168 resources = {}
2169 resources['bridged-ifaces'] = []
2170
2171 numa_dict = {}
2172 numa_dict['interfaces'] = []
2173
2174 numa_dict['interfaces'] += requirements['numa']['port_list']
2175 numa_dict['interfaces'] += requirements['numa']['sriov_list']
2176
2177 #Check bridge information
2178 unified_dataplane_iface=[]
2179 unified_dataplane_iface += requirements['numa']['port_list']
2180 unified_dataplane_iface += requirements['numa']['sriov_list']
2181
2182 for control_iface in server.get('networks', []):
2183 control_iface['net_id']=control_iface.pop('uuid')
2184 #Get the brifge name
2185 db_lock.acquire()
2186 result, content = db.get_table(FROM='nets',
2187 SELECT=('name', 'type', 'vlan', 'provider', 'enable_dhcp',
2188 'dhcp_first_ip', 'dhcp_last_ip', 'cidr'),
2189 WHERE={'uuid': control_iface['net_id']})
2190 db_lock.release()
2191 if result < 0:
2192 pass
2193 elif result==0:
2194 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface['net_id']
2195 else:
2196 network=content[0]
2197 if control_iface.get("type", 'virtual') == 'virtual':
2198 if network['type']!='bridge_data' and network['type']!='bridge_man':
2199 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface['net_id']
2200 resources['bridged-ifaces'].append(control_iface)
2201 if network.get("provider") and network["provider"][0:3] == "OVS":
2202 control_iface["type"] = "instance:ovs"
2203 else:
2204 control_iface["type"] = "instance:bridge"
2205 if network.get("vlan"):
2206 control_iface["vlan"] = network["vlan"]
2207
2208 if network.get("enable_dhcp") == 'true':
2209 control_iface["enable_dhcp"] = network.get("enable_dhcp")
2210 control_iface["dhcp_first_ip"] = network["dhcp_first_ip"]
2211 control_iface["dhcp_last_ip"] = network["dhcp_last_ip"]
2212 control_iface["cidr"] = network["cidr"]
2213 else:
2214 if network['type']!='data' and network['type']!='ptp':
2215 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface['net_id']
2216 #dataplane interface, look for it in the numa tree and asign this network
2217 iface_found=False
2218 for dataplane_iface in numa_dict['interfaces']:
2219 if dataplane_iface['name'] == control_iface.get("name"):
2220 if (dataplane_iface['dedicated'] == "yes" and control_iface["type"] != "PF") or \
2221 (dataplane_iface['dedicated'] == "no" and control_iface["type"] != "VF") or \
2222 (dataplane_iface['dedicated'] == "yes:sriov" and control_iface["type"] != "VFnotShared") :
2223 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2224 (control_iface.get("name"), dataplane_iface['dedicated'], control_iface["type"])
2225 dataplane_iface['uuid'] = control_iface['net_id']
2226 if dataplane_iface['dedicated'] == "no":
2227 dataplane_iface['vlan'] = network['vlan']
2228 if dataplane_iface['dedicated'] != "yes" and control_iface.get("mac_address"):
2229 dataplane_iface['mac_address'] = control_iface.get("mac_address")
2230 if control_iface.get("vpci"):
2231 dataplane_iface['vpci'] = control_iface.get("vpci")
2232 iface_found=True
2233 break
2234 if not iface_found:
2235 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface.get("name")
2236
2237 resources['host_id'] = host_id
2238 resources['image_id'] = server['image_id']
2239 resources['flavor_id'] = server['flavor_id']
2240 resources['tenant_id'] = server['tenant_id']
2241 resources['ram'] = requirements['ram']
2242 resources['vcpus'] = requirements['vcpus']
2243 resources['status'] = 'CREATING'
2244
2245 if 'description' in server: resources['description'] = server['description']
2246 if 'name' in server: resources['name'] = server['name']
2247
2248 resources['extended'] = {} #optional
2249 resources['extended']['numas'] = []
2250 numa_dict['numa_id'] = numa_id
2251 numa_dict['memory'] = requirements['numa']['memory']
2252 numa_dict['cores'] = []
2253
2254 for core in cpu_pinning:
2255 numa_dict['cores'].append({'id': core[2], 'vthread': core[0], 'paired': paired})
2256 for core in reserved_threads:
2257 numa_dict['cores'].append({'id': core})
2258 resources['extended']['numas'].append(numa_dict)
2259 if extended!=None and 'devices' in extended: #TODO allow extra devices without numa
2260 resources['extended']['devices'] = extended['devices']
2261
2262
2263 print '===================================={'
2264 print json.dumps(resources, indent=4)
2265 print '====================================}'
2266
2267 return 0, resources
2268