get_of_rules corrected
[osm/openvim.git] / host_thread.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openvim
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
22 ##
23
24 '''
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
27 '''
28 __author__ = "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__ = "$10-jul-2014 12:07:15$"
30
31 import json
32 import yaml
33 import threading
34 import time
35 import Queue
36 import paramiko
37 from jsonschema import validate as js_v, exceptions as js_e
38 #import libvirt
39 import imp
40 from vim_schema import localinfo_schema, hostinfo_schema
41 import random
42 import os
43
44 #TODO: insert a logging system
45
46 # from logging import Logger
47 # import auxiliary_functions as af
48
49 # TODO: insert a logging system
50
51
52 class host_thread(threading.Thread):
53 lvirt_module = None
54
55 def __init__(self, name, host, user, db, db_lock, test, image_path, host_id, version, develop_mode,
56 develop_bridge_iface):
57 '''Init a thread.
58 Arguments:
59 'id' number of thead
60 'name' name of thread
61 'host','user': host ip or name to manage and user
62 'db', 'db_lock': database class and lock to use it in exclusion
63 '''
64 threading.Thread.__init__(self)
65 self.name = name
66 self.host = host
67 self.user = user
68 self.db = db
69 self.db_lock = db_lock
70 self.test = test
71
72 if not test and not host_thread.lvirt_module:
73 try:
74 module_info = imp.find_module("libvirt")
75 host_thread.lvirt_module = imp.load_module("libvirt", *module_info)
76 except (IOError, ImportError) as e:
77 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e))
78
79
80 self.develop_mode = develop_mode
81 self.develop_bridge_iface = develop_bridge_iface
82 self.image_path = image_path
83 self.host_id = host_id
84 self.version = version
85
86 self.xml_level = 0
87 #self.pending ={}
88
89 self.server_status = {} #dictionary with pairs server_uuid:server_status
90 self.pending_terminate_server =[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
91 self.next_update_server_status = 0 #time when must be check servers status
92
93 self.hostinfo = None
94
95 self.queueLock = threading.Lock()
96 self.taskQueue = Queue.Queue(2000)
97 self.ssh_conn = None
98
99 def ssh_connect(self):
100 try:
101 #Connect SSH
102 self.ssh_conn = paramiko.SSHClient()
103 self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
104 self.ssh_conn.load_system_host_keys()
105 self.ssh_conn.connect(self.host, username=self.user, timeout=10) #, None)
106 except paramiko.ssh_exception.SSHException as e:
107 text = e.args[0]
108 print self.name, ": ssh_connect ssh Exception:", text
109
110 def load_localinfo(self):
111 if not self.test:
112 try:
113 #Connect SSH
114 self.ssh_connect()
115
116 command = 'mkdir -p ' + self.image_path
117 #print self.name, ': command:', command
118 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
119 content = stderr.read()
120 if len(content) > 0:
121 print self.name, ': command:', command, "stderr:", content
122
123 command = 'cat ' + self.image_path + '/.openvim.yaml'
124 #print self.name, ': command:', command
125 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
126 content = stdout.read()
127 if len(content) == 0:
128 print self.name, ': command:', command, "stderr:", stderr.read()
129 raise paramiko.ssh_exception.SSHException("Error empty file ")
130 self.localinfo = yaml.load(content)
131 js_v(self.localinfo, localinfo_schema)
132 self.localinfo_dirty=False
133 if 'server_files' not in self.localinfo:
134 self.localinfo['server_files'] = {}
135 print self.name, ': localinfo load from host'
136 return
137
138 except paramiko.ssh_exception.SSHException as e:
139 text = e.args[0]
140 print self.name, ": load_localinfo ssh Exception:", text
141 except host_thread.lvirt_module.libvirtError as e:
142 text = e.get_error_message()
143 print self.name, ": load_localinfo libvirt Exception:", text
144 except yaml.YAMLError as exc:
145 text = ""
146 if hasattr(exc, 'problem_mark'):
147 mark = exc.problem_mark
148 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
149 print self.name, ": load_localinfo yaml format Exception", text
150 except js_e.ValidationError as e:
151 text = ""
152 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
153 print self.name, ": load_localinfo format Exception:", text, e.message
154 except Exception as e:
155 text = str(e)
156 print self.name, ": load_localinfo Exception:", text
157
158 #not loaded, insert a default data and force saving by activating dirty flag
159 self.localinfo = {'files':{}, 'server_files':{} }
160 #self.localinfo_dirty=True
161 self.localinfo_dirty=False
162
163 def load_hostinfo(self):
164 if self.test:
165 return;
166 try:
167 #Connect SSH
168 self.ssh_connect()
169
170
171 command = 'cat ' + self.image_path + '/hostinfo.yaml'
172 #print self.name, ': command:', command
173 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
174 content = stdout.read()
175 if len(content) == 0:
176 print self.name, ': command:', command, "stderr:", stderr.read()
177 raise paramiko.ssh_exception.SSHException("Error empty file ")
178 self.hostinfo = yaml.load(content)
179 js_v(self.hostinfo, hostinfo_schema)
180 print self.name, ': hostlinfo load from host', self.hostinfo
181 return
182
183 except paramiko.ssh_exception.SSHException as e:
184 text = e.args[0]
185 print self.name, ": load_hostinfo ssh Exception:", text
186 except host_thread.lvirt_module.libvirtError as e:
187 text = e.get_error_message()
188 print self.name, ": load_hostinfo libvirt Exception:", text
189 except yaml.YAMLError as exc:
190 text = ""
191 if hasattr(exc, 'problem_mark'):
192 mark = exc.problem_mark
193 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
194 print self.name, ": load_hostinfo yaml format Exception", text
195 except js_e.ValidationError as e:
196 text = ""
197 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
198 print self.name, ": load_hostinfo format Exception:", text, e.message
199 except Exception as e:
200 text = str(e)
201 print self.name, ": load_hostinfo Exception:", text
202
203 #not loaded, insert a default data
204 self.hostinfo = None
205
206 def save_localinfo(self, tries=3):
207 if self.test:
208 self.localinfo_dirty = False
209 return
210
211 while tries>=0:
212 tries-=1
213
214 try:
215 command = 'cat > ' + self.image_path + '/.openvim.yaml'
216 print self.name, ': command:', command
217 (stdin, _, _) = self.ssh_conn.exec_command(command)
218 yaml.safe_dump(self.localinfo, stdin, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True)
219 self.localinfo_dirty = False
220 break #while tries
221
222 except paramiko.ssh_exception.SSHException as e:
223 text = e.args[0]
224 print self.name, ": save_localinfo ssh Exception:", text
225 if "SSH session not active" in text:
226 self.ssh_connect()
227 except host_thread.lvirt_module.libvirtError as e:
228 text = e.get_error_message()
229 print self.name, ": save_localinfo libvirt Exception:", text
230 except yaml.YAMLError as exc:
231 text = ""
232 if hasattr(exc, 'problem_mark'):
233 mark = exc.problem_mark
234 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
235 print self.name, ": save_localinfo yaml format Exception", text
236 except Exception as e:
237 text = str(e)
238 print self.name, ": save_localinfo Exception:", text
239
240 def load_servers_from_db(self):
241 self.db_lock.acquire()
242 r,c = self.db.get_table(SELECT=('uuid','status', 'image_id'), FROM='instances', WHERE={'host_id': self.host_id})
243 self.db_lock.release()
244
245 self.server_status = {}
246 if r<0:
247 print self.name, ": Error getting data from database:", c
248 return
249 for server in c:
250 self.server_status[ server['uuid'] ] = server['status']
251
252 #convert from old version to new one
253 if 'inc_files' in self.localinfo and server['uuid'] in self.localinfo['inc_files']:
254 server_files_dict = {'source file': self.localinfo['inc_files'][ server['uuid'] ] [0], 'file format':'raw' }
255 if server_files_dict['source file'][-5:] == 'qcow2':
256 server_files_dict['file format'] = 'qcow2'
257
258 self.localinfo['server_files'][ server['uuid'] ] = { server['image_id'] : server_files_dict }
259 if 'inc_files' in self.localinfo:
260 del self.localinfo['inc_files']
261 self.localinfo_dirty = True
262
263 def delete_unused_files(self):
264 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
265 Deletes unused entries at self.loacalinfo and the corresponding local files.
266 The only reason for this mismatch is the manual deletion of instances (VM) at database
267 '''
268 if self.test:
269 return
270 for uuid,images in self.localinfo['server_files'].items():
271 if uuid not in self.server_status:
272 for localfile in images.values():
273 try:
274 print self.name, ": deleting file '%s' of unused server '%s'" %(localfile['source file'], uuid)
275 self.delete_file(localfile['source file'])
276 except paramiko.ssh_exception.SSHException as e:
277 print self.name, ": Exception deleting file '%s': %s" %(localfile['source file'], str(e))
278 del self.localinfo['server_files'][uuid]
279 self.localinfo_dirty = True
280
281 def insert_task(self, task, *aditional):
282 try:
283 self.queueLock.acquire()
284 task = self.taskQueue.put( (task,) + aditional, timeout=5)
285 self.queueLock.release()
286 return 1, None
287 except Queue.Full:
288 return -1, "timeout inserting a task over host " + self.name
289
290 def run(self):
291 while True:
292 self.load_localinfo()
293 self.load_hostinfo()
294 self.load_servers_from_db()
295 self.delete_unused_files()
296 while True:
297 self.queueLock.acquire()
298 if not self.taskQueue.empty():
299 task = self.taskQueue.get()
300 else:
301 task = None
302 self.queueLock.release()
303
304 if task is None:
305 now=time.time()
306 if self.localinfo_dirty:
307 self.save_localinfo()
308 elif self.next_update_server_status < now:
309 self.update_servers_status()
310 self.next_update_server_status = now + 5
311 elif len(self.pending_terminate_server)>0 and self.pending_terminate_server[0][0]<now:
312 self.server_forceoff()
313 else:
314 time.sleep(1)
315 continue
316
317 if task[0] == 'instance':
318 print self.name, ": processing task instance", task[1]['action']
319 retry=0
320 while retry <2:
321 retry += 1
322 r=self.action_on_server(task[1], retry==2)
323 if r>=0:
324 break
325 elif task[0] == 'image':
326 pass
327 elif task[0] == 'exit':
328 print self.name, ": processing task exit"
329 self.terminate()
330 return 0
331 elif task[0] == 'reload':
332 print self.name, ": processing task reload terminating and relaunching"
333 self.terminate()
334 break
335 elif task[0] == 'edit-iface':
336 print self.name, ": processing task edit-iface port=%s, old_net=%s, new_net=%s" % (task[1], task[2], task[3])
337 self.edit_iface(task[1], task[2], task[3])
338 elif task[0] == 'restore-iface':
339 print self.name, ": processing task restore-iface %s mac=%s" % (task[1], task[2])
340 self.restore_iface(task[1], task[2])
341 elif task[0] == 'new-ovsbridge':
342 print self.name, ": Creating compute OVS bridge"
343 self.create_ovs_bridge()
344 elif task[0] == 'new-vxlan':
345 print self.name, ": Creating vxlan tunnel=" + task[1] + ", remote ip=" + task[2]
346 self.create_ovs_vxlan_tunnel(task[1], task[2])
347 elif task[0] == 'del-ovsbridge':
348 print self.name, ": Deleting OVS bridge"
349 self.delete_ovs_bridge()
350 elif task[0] == 'del-vxlan':
351 print self.name, ": Deleting vxlan " + task[1] + " tunnel"
352 self.delete_ovs_vxlan_tunnel(task[1])
353 elif task[0] == 'create-ovs-bridge-port':
354 print self.name, ": Adding port ovim-" + task[1] + " to OVS bridge"
355 self.create_ovs_bridge_port(task[1])
356 elif task[0] == 'del-ovs-port':
357 print self.name, ": Delete bridge attached to ovs port vlan {} net {}".format(task[1], task[2])
358 self.delete_bridge_port_attached_to_ovs(task[1], task[2])
359 else:
360 print self.name, ": unknown task", task
361
362 def server_forceoff(self, wait_until_finished=False):
363 while len(self.pending_terminate_server)>0:
364 now = time.time()
365 if self.pending_terminate_server[0][0]>now:
366 if wait_until_finished:
367 time.sleep(1)
368 continue
369 else:
370 return
371 req={'uuid':self.pending_terminate_server[0][1],
372 'action':{'terminate':'force'},
373 'status': None
374 }
375 self.action_on_server(req)
376 self.pending_terminate_server.pop(0)
377
378 def terminate(self):
379 try:
380 self.server_forceoff(True)
381 if self.localinfo_dirty:
382 self.save_localinfo()
383 if not self.test:
384 self.ssh_conn.close()
385 except Exception as e:
386 text = str(e)
387 print self.name, ": terminate Exception:", text
388 print self.name, ": exit from host_thread"
389
390 def get_local_iface_name(self, generic_name):
391 if self.hostinfo != None and "iface_names" in self.hostinfo and generic_name in self.hostinfo["iface_names"]:
392 return self.hostinfo["iface_names"][generic_name]
393 return generic_name
394
395 def create_xml_server(self, server, dev_list, server_metadata={}):
396 """Function that implements the generation of the VM XML definition.
397 Additional devices are in dev_list list
398 The main disk is upon dev_list[0]"""
399
400 #get if operating system is Windows
401 windows_os = False
402 os_type = server_metadata.get('os_type', None)
403 if os_type == None and 'metadata' in dev_list[0]:
404 os_type = dev_list[0]['metadata'].get('os_type', None)
405 if os_type != None and os_type.lower() == "windows":
406 windows_os = True
407 #get type of hard disk bus
408 bus_ide = True if windows_os else False
409 bus = server_metadata.get('bus', None)
410 if bus == None and 'metadata' in dev_list[0]:
411 bus = dev_list[0]['metadata'].get('bus', None)
412 if bus != None:
413 bus_ide = True if bus=='ide' else False
414
415 self.xml_level = 0
416
417 text = "<domain type='kvm'>"
418 #get topology
419 topo = server_metadata.get('topology', None)
420 if topo == None and 'metadata' in dev_list[0]:
421 topo = dev_list[0]['metadata'].get('topology', None)
422 #name
423 name = server.get('name','') + "_" + server['uuid']
424 name = name[:58] #qemu impose a length limit of 59 chars or not start. Using 58
425 text += self.inc_tab() + "<name>" + name+ "</name>"
426 #uuid
427 text += self.tab() + "<uuid>" + server['uuid'] + "</uuid>"
428
429 numa={}
430 if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:
431 numa = server['extended']['numas'][0]
432 #memory
433 use_huge = False
434 memory = int(numa.get('memory',0))*1024*1024 #in KiB
435 if memory==0:
436 memory = int(server['ram'])*1024;
437 else:
438 if not self.develop_mode:
439 use_huge = True
440 if memory==0:
441 return -1, 'No memory assigned to instance'
442 memory = str(memory)
443 text += self.tab() + "<memory unit='KiB'>" +memory+"</memory>"
444 text += self.tab() + "<currentMemory unit='KiB'>" +memory+ "</currentMemory>"
445 if use_huge:
446 text += self.tab()+'<memoryBacking>'+ \
447 self.inc_tab() + '<hugepages/>'+ \
448 self.dec_tab()+ '</memoryBacking>'
449
450 #cpu
451 use_cpu_pinning=False
452 vcpus = int(server.get("vcpus",0))
453 cpu_pinning = []
454 if 'cores-source' in numa:
455 use_cpu_pinning=True
456 for index in range(0, len(numa['cores-source'])):
457 cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )
458 vcpus += 1
459 if 'threads-source' in numa:
460 use_cpu_pinning=True
461 for index in range(0, len(numa['threads-source'])):
462 cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )
463 vcpus += 1
464 if 'paired-threads-source' in numa:
465 use_cpu_pinning=True
466 for index in range(0, len(numa['paired-threads-source'])):
467 cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )
468 cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )
469 vcpus += 2
470
471 if use_cpu_pinning and not self.develop_mode:
472 text += self.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning)) +"</vcpu>" + \
473 self.tab()+'<cputune>'
474 self.xml_level += 1
475 for i in range(0, len(cpu_pinning)):
476 text += self.tab() + "<vcpupin vcpu='" +str(cpu_pinning[i][0])+ "' cpuset='" +str(cpu_pinning[i][1]) +"'/>"
477 text += self.dec_tab()+'</cputune>'+ \
478 self.tab() + '<numatune>' +\
479 self.inc_tab() + "<memory mode='strict' nodeset='" +str(numa['source'])+ "'/>" +\
480 self.dec_tab() + '</numatune>'
481 else:
482 if vcpus==0:
483 return -1, "Instance without number of cpus"
484 text += self.tab()+"<vcpu>" + str(vcpus) + "</vcpu>"
485
486 #boot
487 boot_cdrom = False
488 for dev in dev_list:
489 if dev['type']=='cdrom' :
490 boot_cdrom = True
491 break
492 text += self.tab()+ '<os>' + \
493 self.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
494 if boot_cdrom:
495 text += self.tab() + "<boot dev='cdrom'/>"
496 text += self.tab() + "<boot dev='hd'/>" + \
497 self.dec_tab()+'</os>'
498 #features
499 text += self.tab()+'<features>'+\
500 self.inc_tab()+'<acpi/>' +\
501 self.tab()+'<apic/>' +\
502 self.tab()+'<pae/>'+ \
503 self.dec_tab() +'</features>'
504 if windows_os or topo=="oneSocket":
505 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>"% vcpus
506 else:
507 text += self.tab() + "<cpu mode='host-model'></cpu>"
508 text += self.tab() + "<clock offset='utc'/>" +\
509 self.tab() + "<on_poweroff>preserve</on_poweroff>" + \
510 self.tab() + "<on_reboot>restart</on_reboot>" + \
511 self.tab() + "<on_crash>restart</on_crash>"
512 text += self.tab() + "<devices>" + \
513 self.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
514 self.tab() + "<serial type='pty'>" +\
515 self.inc_tab() + "<target port='0'/>" + \
516 self.dec_tab() + "</serial>" +\
517 self.tab() + "<console type='pty'>" + \
518 self.inc_tab()+ "<target type='serial' port='0'/>" + \
519 self.dec_tab()+'</console>'
520 if windows_os:
521 text += self.tab() + "<controller type='usb' index='0'/>" + \
522 self.tab() + "<controller type='ide' index='0'/>" + \
523 self.tab() + "<input type='mouse' bus='ps2'/>" + \
524 self.tab() + "<sound model='ich6'/>" + \
525 self.tab() + "<video>" + \
526 self.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
527 self.dec_tab() + "</video>" + \
528 self.tab() + "<memballoon model='virtio'/>" + \
529 self.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
530
531 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
532 #> self.dec_tab()+'</hostdev>\n' +\
533 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
534 if windows_os:
535 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
536 else:
537 #If image contains 'GRAPH' include graphics
538 #if 'GRAPH' in image:
539 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
540 self.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
541 self.dec_tab() + "</graphics>"
542
543 vd_index = 'a'
544 for dev in dev_list:
545 bus_ide_dev = bus_ide
546 if dev['type']=='cdrom' or dev['type']=='disk':
547 if dev['type']=='cdrom':
548 bus_ide_dev = True
549 text += self.tab() + "<disk type='file' device='"+dev['type']+"'>"
550 if 'file format' in dev:
551 text += self.inc_tab() + "<driver name='qemu' type='" +dev['file format']+ "' cache='writethrough'/>"
552 if 'source file' in dev:
553 text += self.tab() + "<source file='" +dev['source file']+ "'/>"
554 #elif v['type'] == 'block':
555 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
556 #else:
557 # return -1, 'Unknown disk type ' + v['type']
558 vpci = dev.get('vpci',None)
559 if vpci == None:
560 vpci = dev['metadata'].get('vpci',None)
561 text += self.pci2xml(vpci)
562
563 if bus_ide_dev:
564 text += self.tab() + "<target dev='hd" +vd_index+ "' bus='ide'/>" #TODO allows several type of disks
565 else:
566 text += self.tab() + "<target dev='vd" +vd_index+ "' bus='virtio'/>"
567 text += self.dec_tab() + '</disk>'
568 vd_index = chr(ord(vd_index)+1)
569 elif dev['type']=='xml':
570 dev_text = dev['xml']
571 if 'vpci' in dev:
572 dev_text = dev_text.replace('__vpci__', dev['vpci'])
573 if 'source file' in dev:
574 dev_text = dev_text.replace('__file__', dev['source file'])
575 if 'file format' in dev:
576 dev_text = dev_text.replace('__format__', dev['source file'])
577 if '__dev__' in dev_text:
578 dev_text = dev_text.replace('__dev__', vd_index)
579 vd_index = chr(ord(vd_index)+1)
580 text += dev_text
581 else:
582 return -1, 'Unknown device type ' + dev['type']
583
584 net_nb=0
585 bridge_interfaces = server.get('networks', [])
586 for v in bridge_interfaces:
587 #Get the brifge name
588 self.db_lock.acquire()
589 result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )
590 self.db_lock.release()
591 if result <= 0:
592 print "create_xml_server ERROR getting nets",result, content
593 return -1, content
594 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
595 #I know it is not secure
596 #for v in sorted(desc['network interfaces'].itervalues()):
597 model = v.get("model", None)
598 if content[0]['provider']=='default':
599 text += self.tab() + "<interface type='network'>" + \
600 self.inc_tab() + "<source network='" +content[0]['provider']+ "'/>"
601 elif content[0]['provider'][0:7]=='macvtap':
602 text += self.tab()+"<interface type='direct'>" + \
603 self.inc_tab() + "<source dev='" + self.get_local_iface_name(content[0]['provider'][8:]) + "' mode='bridge'/>" + \
604 self.tab() + "<target dev='macvtap0'/>"
605 if windows_os:
606 text += self.tab() + "<alias name='net" + str(net_nb) + "'/>"
607 elif model==None:
608 model = "virtio"
609 elif content[0]['provider'][0:6]=='bridge':
610 text += self.tab() + "<interface type='bridge'>" + \
611 self.inc_tab()+"<source bridge='" +self.get_local_iface_name(content[0]['provider'][7:])+ "'/>"
612 if windows_os:
613 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
614 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
615 elif model==None:
616 model = "virtio"
617 elif content[0]['provider'][0:3] == "OVS":
618 vlan = content[0]['provider'].replace('OVS:', '')
619 text += self.tab() + "<interface type='bridge'>" + \
620 self.inc_tab() + "<source bridge='ovim-" + vlan + "'/>"
621 else:
622 return -1, 'Unknown Bridge net provider ' + content[0]['provider']
623 if model!=None:
624 text += self.tab() + "<model type='" +model+ "'/>"
625 if v.get('mac_address', None) != None:
626 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
627 text += self.pci2xml(v.get('vpci',None))
628 text += self.dec_tab()+'</interface>'
629
630 net_nb += 1
631
632 interfaces = numa.get('interfaces', [])
633
634 net_nb=0
635 for v in interfaces:
636 if self.develop_mode: #map these interfaces to bridges
637 text += self.tab() + "<interface type='bridge'>" + \
638 self.inc_tab()+"<source bridge='" +self.develop_bridge_iface+ "'/>"
639 if windows_os:
640 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
641 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
642 else:
643 text += self.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
644 if v.get('mac_address', None) != None:
645 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
646 text += self.pci2xml(v.get('vpci',None))
647 text += self.dec_tab()+'</interface>'
648 continue
649
650 if v['dedicated'] == 'yes': #passthrought
651 text += self.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
652 self.inc_tab() + "<source>"
653 self.inc_tab()
654 text += self.pci2xml(v['source'])
655 text += self.dec_tab()+'</source>'
656 text += self.pci2xml(v.get('vpci',None))
657 if windows_os:
658 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
659 text += self.dec_tab()+'</hostdev>'
660 net_nb += 1
661 else: #sriov_interfaces
662 #skip not connected interfaces
663 if v.get("net_id") == None:
664 continue
665 text += self.tab() + "<interface type='hostdev' managed='yes'>"
666 self.inc_tab()
667 if v.get('mac_address', None) != None:
668 text+= self.tab() + "<mac address='" +v['mac_address']+ "'/>"
669 text+= self.tab()+'<source>'
670 self.inc_tab()
671 text += self.pci2xml(v['source'])
672 text += self.dec_tab()+'</source>'
673 if v.get('vlan',None) != None:
674 text += self.tab() + "<vlan> <tag id='" + str(v['vlan']) + "'/> </vlan>"
675 text += self.pci2xml(v.get('vpci',None))
676 if windows_os:
677 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
678 text += self.dec_tab()+'</interface>'
679
680
681 text += self.dec_tab()+'</devices>'+\
682 self.dec_tab()+'</domain>'
683 return 0, text
684
685 def pci2xml(self, pci):
686 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
687 alows an empty pci text'''
688 if pci is None:
689 return ""
690 first_part = pci.split(':')
691 second_part = first_part[2].split('.')
692 return self.tab() + "<address type='pci' domain='0x" + first_part[0] + \
693 "' bus='0x" + first_part[1] + "' slot='0x" + second_part[0] + \
694 "' function='0x" + second_part[1] + "'/>"
695
696 def tab(self):
697 """Return indentation according to xml_level"""
698 return "\n" + (' '*self.xml_level)
699
700 def inc_tab(self):
701 """Increment and return indentation according to xml_level"""
702 self.xml_level += 1
703 return self.tab()
704
705 def dec_tab(self):
706 """Decrement and return indentation according to xml_level"""
707 self.xml_level -= 1
708 return self.tab()
709
710 def create_ovs_bridge(self):
711 """
712 Create a bridge in compute OVS to allocate VMs
713 :return: True if success
714 """
715 if self.test:
716 return
717 command = 'sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true'
718 print self.name, ': command:', command
719 (_, stdout, _) = self.ssh_conn.exec_command(command)
720 content = stdout.read()
721 if len(content) == 0:
722 return True
723 else:
724 return False
725
726 def delete_port_to_ovs_bridge(self, vlan, net_uuid):
727 """
728 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
729 :param vlan: vlan port id
730 :param net_uuid: network id
731 :return:
732 """
733
734 if self.test:
735 return
736
737 port_name = 'ovim-' + vlan
738 command = 'sudo ovs-vsctl del-port br-int ' + port_name
739 print self.name, ': command:', command
740 (_, stdout, _) = self.ssh_conn.exec_command(command)
741 content = stdout.read()
742 if len(content) == 0:
743 return True
744 else:
745 return False
746
747 def delete_dhcp_server(self, vlan, net_uuid, dhcp_path):
748 """
749 Delete dhcp server process lining in namespace
750 :param vlan: segmentation id
751 :param net_uuid: network uuid
752 :param dhcp_path: conf fiel path that live in namespace side
753 :return:
754 """
755 if self.test:
756 return
757 if not self.is_dhcp_port_free(vlan, net_uuid):
758 return True
759
760 net_namespace = 'ovim-' + vlan
761 dhcp_path = os.path.join(dhcp_path, net_namespace)
762 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
763
764 command = 'sudo ip netns exec ' + net_namespace + ' cat ' + pid_file
765 print self.name, ': command:', command
766 (_, stdout, _) = self.ssh_conn.exec_command(command)
767 content = stdout.read()
768
769 command = 'sudo ip netns exec ' + net_namespace + ' kill -9 ' + content
770 print self.name, ': command:', command
771 (_, stdout, _) = self.ssh_conn.exec_command(command)
772 content = stdout.read()
773
774 # if len(content) == 0:
775 # return True
776 # else:
777 # return False
778
779 def is_dhcp_port_free(self, host_id, net_uuid):
780 """
781 Check if any port attached to the a net in a vxlan mesh across computes nodes
782 :param host_id: host id
783 :param net_uuid: network id
784 :return: True if is not free
785 """
786 self.db_lock.acquire()
787 result, content = self.db.get_table(
788 FROM='ports',
789 WHERE={'p.type': 'instance:ovs', 'p.net_id': net_uuid}
790 )
791 self.db_lock.release()
792
793 if len(content) > 0:
794 return False
795 else:
796 return True
797
798 def is_port_free(self, host_id, net_uuid):
799 """
800 Check if there not ovs ports of a network in a compute host.
801 :param host_id: host id
802 :param net_uuid: network id
803 :return: True if is not free
804 """
805
806 self.db_lock.acquire()
807 result, content = self.db.get_table(
808 FROM='ports as p join instances as i on p.instance_id=i.uuid',
809 WHERE={"i.host_id": self.host_id, 'p.type': 'instance:ovs', 'p.net_id': net_uuid}
810 )
811 self.db_lock.release()
812
813 if len(content) > 0:
814 return False
815 else:
816 return True
817
818 def add_port_to_ovs_bridge(self, vlan):
819 """
820 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
821 :param vlan: vlan port id
822 :return: True if success
823 """
824
825 if self.test:
826 return
827
828 port_name = 'ovim-' + vlan
829 command = 'sudo ovs-vsctl add-port br-int ' + port_name + ' tag=' + vlan
830 print self.name, ': command:', command
831 (_, stdout, _) = self.ssh_conn.exec_command(command)
832 content = stdout.read()
833 if len(content) == 0:
834 return True
835 else:
836 return False
837
838 def delete_dhcp_port(self, vlan, net_uuid):
839 """
840 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
841 :param vlan: segmentation id
842 :param net_uuid: network id
843 :return: True if success
844 """
845
846 if self.test:
847 return
848
849 if not self.is_dhcp_port_free(vlan, net_uuid):
850 return True
851 self.delete_dhcp_interfaces(vlan)
852 return True
853
854 def delete_bridge_port_attached_to_ovs(self, vlan, net_uuid):
855 """
856 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
857 :param vlan:
858 :param net_uuid:
859 :return: True if success
860 """
861 if self.test:
862 return
863
864 if not self.is_port_free(vlan, net_uuid):
865 return True
866 self.delete_port_to_ovs_bridge(vlan, net_uuid)
867 self.delete_linux_bridge(vlan)
868 return True
869
870 def delete_linux_bridge(self, vlan):
871 """
872 Delete a linux bridge in a scpecific compute.
873 :param vlan: vlan port id
874 :return: True if success
875 """
876
877 if self.test:
878 return
879
880 port_name = 'ovim-' + vlan
881 command = 'sudo ip link set dev veth0-' + vlan + ' down'
882 print self.name, ': command:', command
883 (_, stdout, _) = self.ssh_conn.exec_command(command)
884 content = stdout.read()
885 #
886 # if len(content) != 0:
887 # return False
888
889 command = 'sudo ifconfig ' + port_name + ' down && sudo brctl delbr ' + port_name
890 print self.name, ': command:', command
891 (_, stdout, _) = self.ssh_conn.exec_command(command)
892 content = stdout.read()
893 if len(content) == 0:
894 return True
895 else:
896 return False
897
898 def create_ovs_bridge_port(self, vlan):
899 """
900 Generate a linux bridge and attache the port to a OVS bridge
901 :param vlan: vlan port id
902 :return:
903 """
904 if self.test:
905 return
906 self.create_linux_bridge(vlan)
907 self.add_port_to_ovs_bridge(vlan)
908
909 def create_linux_bridge(self, vlan):
910 """
911 Create a linux bridge with STP active
912 :param vlan: netowrk vlan id
913 :return:
914 """
915
916 if self.test:
917 return
918
919 port_name = 'ovim-' + vlan
920 command = 'sudo brctl show | grep ' + port_name
921 print self.name, ': command:', command
922 (_, stdout, _) = self.ssh_conn.exec_command(command)
923 content = stdout.read()
924
925 # if exist nothing to create
926 # if len(content) == 0:
927 # return False
928
929 command = 'sudo brctl addbr ' + port_name
930 print self.name, ': command:', command
931 (_, stdout, _) = self.ssh_conn.exec_command(command)
932 content = stdout.read()
933
934 # if len(content) == 0:
935 # return True
936 # else:
937 # return False
938
939 command = 'sudo brctl stp ' + port_name + ' on'
940 print self.name, ': command:', command
941 (_, stdout, _) = self.ssh_conn.exec_command(command)
942 content = stdout.read()
943
944 # if len(content) == 0:
945 # return True
946 # else:
947 # return False
948 command = 'sudo ip link set dev ' + port_name + ' up'
949 print self.name, ': command:', command
950 (_, stdout, _) = self.ssh_conn.exec_command(command)
951 content = stdout.read()
952
953 if len(content) == 0:
954 return True
955 else:
956 return False
957
958 def set_mac_dhcp_server(self, ip, mac, vlan, netmask, dhcp_path):
959 """
960 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
961 :param ip: IP address asigned to a VM
962 :param mac: VM vnic mac to be macthed with the IP received
963 :param vlan: Segmentation id
964 :param netmask: netmask value
965 :param path: dhcp conf file path that live in namespace side
966 :return: True if success
967 """
968
969 if self.test:
970 return
971
972 net_namespace = 'ovim-' + vlan
973 dhcp_path = os.path.join(dhcp_path, net_namespace)
974 dhcp_hostsdir = os.path.join(dhcp_path, net_namespace)
975
976 if not ip:
977 return False
978
979 ip_data = mac.upper() + ',' + ip
980
981 command = 'sudo ip netns exec ' + net_namespace + ' touch ' + dhcp_hostsdir
982 print self.name, ': command:', command
983 (_, stdout, _) = self.ssh_conn.exec_command(command)
984 content = stdout.read()
985
986 command = 'sudo ip netns exec ' + net_namespace + ' sudo bash -ec "echo ' + ip_data + ' >> ' + dhcp_hostsdir + '"'
987
988 print self.name, ': command:', command
989 (_, stdout, _) = self.ssh_conn.exec_command(command)
990 content = stdout.read()
991
992 if len(content) == 0:
993 return True
994 else:
995 return False
996
997 def delete_mac_dhcp_server(self, ip, mac, vlan, dhcp_path):
998 """
999 Delete into dhcp conf file the ip assigned to a specific MAC address
1000
1001 :param ip: IP address asigned to a VM
1002 :param mac: VM vnic mac to be macthed with the IP received
1003 :param vlan: Segmentation id
1004 :param dhcp_path: dhcp conf file path that live in namespace side
1005 :return:
1006 """
1007
1008 if self.test:
1009 return
1010
1011 net_namespace = 'ovim-' + vlan
1012 dhcp_path = os.path.join(dhcp_path, net_namespace)
1013 dhcp_hostsdir = os.path.join(dhcp_path, net_namespace)
1014
1015 if not ip:
1016 return False
1017
1018 ip_data = mac.upper() + ',' + ip
1019
1020 command = 'sudo ip netns exec ' + net_namespace + ' sudo sed -i \'/' + ip_data + '/d\' ' + dhcp_hostsdir
1021 print self.name, ': command:', command
1022 (_, stdout, _) = self.ssh_conn.exec_command(command)
1023 content = stdout.read()
1024
1025 if len(content) == 0:
1026 return True
1027 else:
1028 return False
1029
1030 def launch_dhcp_server(self, vlan, ip_range, netmask, dhcp_path, gateway):
1031 """
1032 Generate a linux bridge and attache the port to a OVS bridge
1033 :param self:
1034 :param vlan: Segmentation id
1035 :param ip_range: IP dhcp range
1036 :param netmask: network netmask
1037 :param dhcp_path: dhcp conf file path that live in namespace side
1038 :param gateway: Gateway address for dhcp net
1039 :return: True if success
1040 """
1041
1042 if self.test:
1043 return
1044
1045 interface = 'tap-' + vlan
1046 net_namespace = 'ovim-' + vlan
1047 dhcp_path = os.path.join(dhcp_path, net_namespace)
1048 leases_path = os.path.join(dhcp_path, "dnsmasq.leases")
1049 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
1050
1051 dhcp_range = ip_range[0] + ',' + ip_range[1] + ',' + netmask
1052
1053 command = 'sudo ip netns exec ' + net_namespace + ' mkdir -p ' + dhcp_path
1054 print self.name, ': command:', command
1055 (_, stdout, _) = self.ssh_conn.exec_command(command)
1056 content = stdout.read()
1057
1058 pid_path = os.path.join(dhcp_path, 'dnsmasq.pid')
1059 command = 'sudo ip netns exec ' + net_namespace + ' cat ' + pid_path
1060 print self.name, ': command:', command
1061 (_, stdout, _) = self.ssh_conn.exec_command(command)
1062 content = stdout.read()
1063 # check if pid is runing
1064 pid_status_path = content
1065 if content:
1066 command = "ps aux | awk '{print $2 }' | grep " + pid_status_path
1067 print self.name, ': command:', command
1068 (_, stdout, _) = self.ssh_conn.exec_command(command)
1069 content = stdout.read()
1070 if not content:
1071 command = 'sudo ip netns exec ' + net_namespace + ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1072 '--interface=' + interface + ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path + \
1073 ' --dhcp-range ' + dhcp_range + ' --pid-file=' + pid_file + ' --dhcp-leasefile=' + leases_path + \
1074 ' --listen-address ' + gateway
1075
1076 print self.name, ': command:', command
1077 (_, stdout, _) = self.ssh_conn.exec_command(command)
1078 content = stdout.readline()
1079
1080 if len(content) == 0:
1081 return True
1082 else:
1083 return False
1084
1085 def delete_dhcp_interfaces(self, vlan):
1086 """
1087 Create a linux bridge with STP active
1088 :param vlan: netowrk vlan id
1089 :return:
1090 """
1091
1092 if self.test:
1093 return
1094
1095 net_namespace = 'ovim-' + vlan
1096 command = 'sudo ovs-vsctl del-port br-int ovs-tap-' + vlan
1097 print self.name, ': command:', command
1098 (_, stdout, _) = self.ssh_conn.exec_command(command)
1099 content = stdout.read()
1100
1101 command = 'sudo ip netns exec ' + net_namespace + ' ip link set dev tap-' + vlan + ' down'
1102 print self.name, ': command:', command
1103 (_, stdout, _) = self.ssh_conn.exec_command(command)
1104 content = stdout.read()
1105
1106 command = 'sudo ip link set dev ovs-tap-' + vlan + ' down'
1107 print self.name, ': command:', command
1108 (_, stdout, _) = self.ssh_conn.exec_command(command)
1109 content = stdout.read()
1110
1111 def create_dhcp_interfaces(self, vlan, ip, netmask):
1112 """
1113 Create a linux bridge with STP active
1114 :param vlan: segmentation id
1115 :param ip: Ip included in the dhcp range for the tap interface living in namesapce side
1116 :param netmask: dhcp net CIDR
1117 :return: True if success
1118 """
1119
1120 if self.test:
1121 return
1122
1123 net_namespace = 'ovim-' + vlan
1124 namespace_interface = 'tap-' + vlan
1125
1126 command = 'sudo ip netns add ' + net_namespace
1127 print self.name, ': command:', command
1128 (_, stdout, _) = self.ssh_conn.exec_command(command)
1129 content = stdout.read()
1130
1131 command = 'sudo ip link add tap-' + vlan + ' type veth peer name ovs-tap-' + vlan
1132 print self.name, ': command:', command
1133 (_, stdout, _) = self.ssh_conn.exec_command(command)
1134 content = stdout.read()
1135
1136 command = 'sudo ovs-vsctl add-port br-int ovs-tap-' + vlan + ' tag=' + vlan
1137 print self.name, ': command:', command
1138 (_, stdout, _) = self.ssh_conn.exec_command(command)
1139 content = stdout.read()
1140
1141 command = 'sudo ip link set tap-' + vlan + ' netns ' + net_namespace
1142 print self.name, ': command:', command
1143 (_, stdout, _) = self.ssh_conn.exec_command(command)
1144 content = stdout.read()
1145
1146 command = 'sudo ip netns exec ' + net_namespace + ' ip link set dev tap-' + vlan + ' up'
1147 print self.name, ': command:', command
1148 (_, stdout, _) = self.ssh_conn.exec_command(command)
1149 content = stdout.read()
1150
1151 command = 'sudo ip link set dev ovs-tap-' + vlan + ' up'
1152 print self.name, ': command:', command
1153 (_, stdout, _) = self.ssh_conn.exec_command(command)
1154 content = stdout.read()
1155
1156 command = 'sudo ip netns exec ' + net_namespace + ' ' + ' ifconfig ' + namespace_interface \
1157 + ' ' + ip + ' netmask ' + netmask
1158 print self.name, ': command:', command
1159 (_, stdout, _) = self.ssh_conn.exec_command(command)
1160 content = stdout.read()
1161
1162 if len(content) == 0:
1163 return True
1164 else:
1165 return False
1166
1167 def create_ovs_vxlan_tunnel(self, vxlan_interface, remote_ip):
1168 """
1169 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1170 :param vxlan_interface: vlxan inteface name.
1171 :param remote_ip: tunnel endpoint remote compute ip.
1172 :return:
1173 """
1174 if self.test:
1175 return
1176 command = 'sudo ovs-vsctl add-port br-int ' + vxlan_interface + \
1177 ' -- set Interface ' + vxlan_interface + ' type=vxlan options:remote_ip=' + remote_ip + \
1178 ' -- set Port ' + vxlan_interface + ' other_config:stp-path-cost=10'
1179 print self.name, ': command:', command
1180 (_, stdout, _) = self.ssh_conn.exec_command(command)
1181 content = stdout.read()
1182 print content
1183 if len(content) == 0:
1184 return True
1185 else:
1186 return False
1187
1188 def delete_ovs_vxlan_tunnel(self, vxlan_interface):
1189 """
1190 Delete a vlxan tunnel port from a OVS brdige.
1191 :param vxlan_interface: vlxan name to be delete it.
1192 :return: True if success.
1193 """
1194 if self.test:
1195 return
1196 command = 'sudo ovs-vsctl del-port br-int ' + vxlan_interface
1197 print self.name, ': command:', command
1198 (_, stdout, _) = self.ssh_conn.exec_command(command)
1199 content = stdout.read()
1200 print content
1201 if len(content) == 0:
1202 return True
1203 else:
1204 return False
1205
1206 def delete_ovs_bridge(self):
1207 """
1208 Delete a OVS bridge from a compute.
1209 :return: True if success
1210 """
1211 if self.test:
1212 return
1213 command = 'sudo ovs-vsctl del-br br-int'
1214 print self.name, ': command:', command
1215 (_, stdout, _) = self.ssh_conn.exec_command(command)
1216 content = stdout.read()
1217 if len(content) == 0:
1218 return True
1219 else:
1220 return False
1221
1222 def get_file_info(self, path):
1223 command = 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1224 print self.name, ': command:', command
1225 (_, stdout, _) = self.ssh_conn.exec_command(command)
1226 content = stdout.read()
1227 if len(content) == 0:
1228 return None # file does not exist
1229 else:
1230 return content.split(" ") #(permission, 1, owner, group, size, date, file)
1231
1232 def qemu_get_info(self, path):
1233 command = 'qemu-img info ' + path
1234 print self.name, ': command:', command
1235 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
1236 content = stdout.read()
1237 if len(content) == 0:
1238 error = stderr.read()
1239 print self.name, ": get_qemu_info error ", error
1240 raise paramiko.ssh_exception.SSHException("Error getting qemu_info: " + error)
1241 else:
1242 try:
1243 return yaml.load(content)
1244 except yaml.YAMLError as exc:
1245 text = ""
1246 if hasattr(exc, 'problem_mark'):
1247 mark = exc.problem_mark
1248 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
1249 print self.name, ": get_qemu_info yaml format Exception", text
1250 raise paramiko.ssh_exception.SSHException("Error getting qemu_info yaml format" + text)
1251
1252 def qemu_change_backing(self, inc_file, new_backing_file):
1253 command = 'qemu-img rebase -u -b ' + new_backing_file + ' ' + inc_file
1254 print self.name, ': command:', command
1255 (_, _, stderr) = self.ssh_conn.exec_command(command)
1256 content = stderr.read()
1257 if len(content) == 0:
1258 return 0
1259 else:
1260 print self.name, ": qemu_change_backing error: ", content
1261 return -1
1262
1263 def get_notused_filename(self, proposed_name, suffix=''):
1264 '''Look for a non existing file_name in the host
1265 proposed_name: proposed file name, includes path
1266 suffix: suffix to be added to the name, before the extention
1267 '''
1268 extension = proposed_name.rfind(".")
1269 slash = proposed_name.rfind("/")
1270 if extension < 0 or extension < slash: # no extension
1271 extension = len(proposed_name)
1272 target_name = proposed_name[:extension] + suffix + proposed_name[extension:]
1273 info = self.get_file_info(target_name)
1274 if info is None:
1275 return target_name
1276
1277 index=0
1278 while info is not None:
1279 target_name = proposed_name[:extension] + suffix + "-" + str(index) + proposed_name[extension:]
1280 index+=1
1281 info = self.get_file_info(target_name)
1282 return target_name
1283
1284 def get_notused_path(self, proposed_path, suffix=''):
1285 '''Look for a non existing path at database for images
1286 proposed_path: proposed file name, includes path
1287 suffix: suffix to be added to the name, before the extention
1288 '''
1289 extension = proposed_path.rfind(".")
1290 if extension < 0:
1291 extension = len(proposed_path)
1292 if suffix != None:
1293 target_path = proposed_path[:extension] + suffix + proposed_path[extension:]
1294 index=0
1295 while True:
1296 r,_=self.db.get_table(FROM="images",WHERE={"path":target_path})
1297 if r<=0:
1298 return target_path
1299 target_path = proposed_path[:extension] + suffix + "-" + str(index) + proposed_path[extension:]
1300 index+=1
1301
1302
1303 def delete_file(self, file_name):
1304 command = 'rm -f '+file_name
1305 print self.name, ': command:', command
1306 (_, _, stderr) = self.ssh_conn.exec_command(command)
1307 error_msg = stderr.read()
1308 if len(error_msg) > 0:
1309 raise paramiko.ssh_exception.SSHException("Error deleting file: " + error_msg)
1310
1311 def copy_file(self, source, destination, perserve_time=True):
1312 if source[0:4]=="http":
1313 command = "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1314 dst=destination, src=source, dst_result=destination + ".result" )
1315 else:
1316 command = 'cp --no-preserve=mode'
1317 if perserve_time:
1318 command += ' --preserve=timestamps'
1319 command += " '{}' '{}'".format(source, destination)
1320 print self.name, ': command:', command
1321 (_, _, stderr) = self.ssh_conn.exec_command(command)
1322 error_msg = stderr.read()
1323 if len(error_msg) > 0:
1324 raise paramiko.ssh_exception.SSHException("Error copying image to local host: " + error_msg)
1325
1326 def copy_remote_file(self, remote_file, use_incremental):
1327 ''' Copy a file from the repository to local folder and recursively
1328 copy the backing files in case the remote file is incremental
1329 Read and/or modified self.localinfo['files'] that contain the
1330 unmodified copies of images in the local path
1331 params:
1332 remote_file: path of remote file
1333 use_incremental: None (leave the decision to this function), True, False
1334 return:
1335 local_file: name of local file
1336 qemu_info: dict with quemu information of local file
1337 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1338 '''
1339
1340 use_incremental_out = use_incremental
1341 new_backing_file = None
1342 local_file = None
1343 file_from_local = True
1344
1345 #in case incremental use is not decided, take the decision depending on the image
1346 #avoid the use of incremental if this image is already incremental
1347 if remote_file[0:4] == "http":
1348 file_from_local = False
1349 if file_from_local:
1350 qemu_remote_info = self.qemu_get_info(remote_file)
1351 if use_incremental_out==None:
1352 use_incremental_out = not ( file_from_local and 'backing file' in qemu_remote_info)
1353 #copy recursivelly the backing files
1354 if file_from_local and 'backing file' in qemu_remote_info:
1355 new_backing_file, _, _ = self.copy_remote_file(qemu_remote_info['backing file'], True)
1356
1357 #check if remote file is present locally
1358 if use_incremental_out and remote_file in self.localinfo['files']:
1359 local_file = self.localinfo['files'][remote_file]
1360 local_file_info = self.get_file_info(local_file)
1361 if file_from_local:
1362 remote_file_info = self.get_file_info(remote_file)
1363 if local_file_info == None:
1364 local_file = None
1365 elif file_from_local and (local_file_info[4]!=remote_file_info[4] or local_file_info[5]!=remote_file_info[5]):
1366 #local copy of file not valid because date or size are different.
1367 #TODO DELETE local file if this file is not used by any active virtual machine
1368 try:
1369 self.delete_file(local_file)
1370 del self.localinfo['files'][remote_file]
1371 except Exception:
1372 pass
1373 local_file = None
1374 else: #check that the local file has the same backing file, or there are not backing at all
1375 qemu_info = self.qemu_get_info(local_file)
1376 if new_backing_file != qemu_info.get('backing file'):
1377 local_file = None
1378
1379
1380 if local_file == None: #copy the file
1381 img_name= remote_file.split('/') [-1]
1382 img_local = self.image_path + '/' + img_name
1383 local_file = self.get_notused_filename(img_local)
1384 self.copy_file(remote_file, local_file, use_incremental_out)
1385
1386 if use_incremental_out:
1387 self.localinfo['files'][remote_file] = local_file
1388 if new_backing_file:
1389 self.qemu_change_backing(local_file, new_backing_file)
1390 qemu_info = self.qemu_get_info(local_file)
1391
1392 return local_file, qemu_info, use_incremental_out
1393
1394 def launch_server(self, conn, server, rebuild=False, domain=None):
1395 if self.test:
1396 time.sleep(random.randint(20,150)) #sleep random timeto be make it a bit more real
1397 return 0, 'Success'
1398
1399 server_id = server['uuid']
1400 paused = server.get('paused','no')
1401 try:
1402 if domain!=None and rebuild==False:
1403 domain.resume()
1404 #self.server_status[server_id] = 'ACTIVE'
1405 return 0, 'Success'
1406
1407 self.db_lock.acquire()
1408 result, server_data = self.db.get_instance(server_id)
1409 self.db_lock.release()
1410 if result <= 0:
1411 print self.name, ": launch_server ERROR getting server from DB",result, server_data
1412 return result, server_data
1413
1414 #0: get image metadata
1415 server_metadata = server.get('metadata', {})
1416 use_incremental = None
1417
1418 if "use_incremental" in server_metadata:
1419 use_incremental = False if server_metadata["use_incremental"]=="no" else True
1420
1421 server_host_files = self.localinfo['server_files'].get( server['uuid'], {})
1422 if rebuild:
1423 #delete previous incremental files
1424 for file_ in server_host_files.values():
1425 self.delete_file(file_['source file'] )
1426 server_host_files={}
1427
1428 #1: obtain aditional devices (disks)
1429 #Put as first device the main disk
1430 devices = [ {"type":"disk", "image_id":server['image_id'], "vpci":server_metadata.get('vpci', None) } ]
1431 if 'extended' in server_data and server_data['extended']!=None and "devices" in server_data['extended']:
1432 devices += server_data['extended']['devices']
1433
1434 for dev in devices:
1435 if dev['image_id'] == None:
1436 continue
1437
1438 self.db_lock.acquire()
1439 result, content = self.db.get_table(FROM='images', SELECT=('path', 'metadata'),
1440 WHERE={'uuid': dev['image_id']})
1441 self.db_lock.release()
1442 if result <= 0:
1443 error_text = "ERROR", result, content, "when getting image", dev['image_id']
1444 print self.name, ": launch_server", error_text
1445 return -1, error_text
1446 if content[0]['metadata'] is not None:
1447 dev['metadata'] = json.loads(content[0]['metadata'])
1448 else:
1449 dev['metadata'] = {}
1450
1451 if dev['image_id'] in server_host_files:
1452 dev['source file'] = server_host_files[ dev['image_id'] ] ['source file'] #local path
1453 dev['file format'] = server_host_files[ dev['image_id'] ] ['file format'] # raw or qcow2
1454 continue
1455
1456 #2: copy image to host
1457 remote_file = content[0]['path']
1458 use_incremental_image = use_incremental
1459 if dev['metadata'].get("use_incremental") == "no":
1460 use_incremental_image = False
1461 local_file, qemu_info, use_incremental_image = self.copy_remote_file(remote_file, use_incremental_image)
1462
1463 #create incremental image
1464 if use_incremental_image:
1465 local_file_inc = self.get_notused_filename(local_file, '.inc')
1466 command = 'qemu-img create -f qcow2 '+local_file_inc+ ' -o backing_file='+ local_file
1467 print 'command:', command
1468 (_, _, stderr) = self.ssh_conn.exec_command(command)
1469 error_msg = stderr.read()
1470 if len(error_msg) > 0:
1471 raise paramiko.ssh_exception.SSHException("Error creating incremental file: " + error_msg)
1472 local_file = local_file_inc
1473 qemu_info = {'file format':'qcow2'}
1474
1475 server_host_files[ dev['image_id'] ] = {'source file': local_file, 'file format': qemu_info['file format']}
1476
1477 dev['source file'] = local_file
1478 dev['file format'] = qemu_info['file format']
1479
1480 self.localinfo['server_files'][ server['uuid'] ] = server_host_files
1481 self.localinfo_dirty = True
1482
1483 #3 Create XML
1484 result, xml = self.create_xml_server(server_data, devices, server_metadata) #local_file
1485 if result <0:
1486 print self.name, ": create xml server error:", xml
1487 return -2, xml
1488 print self.name, ": create xml:", xml
1489 atribute = host_thread.lvirt_module.VIR_DOMAIN_START_PAUSED if paused == "yes" else 0
1490 #4 Start the domain
1491 if not rebuild: #ensures that any pending destroying server is done
1492 self.server_forceoff(True)
1493 #print self.name, ": launching instance" #, xml
1494 conn.createXML(xml, atribute)
1495 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
1496
1497 return 0, 'Success'
1498
1499 except paramiko.ssh_exception.SSHException as e:
1500 text = e.args[0]
1501 print self.name, ": launch_server(%s) ssh Exception: %s" %(server_id, text)
1502 if "SSH session not active" in text:
1503 self.ssh_connect()
1504 except host_thread.lvirt_module.libvirtError as e:
1505 text = e.get_error_message()
1506 print self.name, ": launch_server(%s) libvirt Exception: %s" %(server_id, text)
1507 except Exception as e:
1508 text = str(e)
1509 print self.name, ": launch_server(%s) Exception: %s" %(server_id, text)
1510 return -1, text
1511
1512 def update_servers_status(self):
1513 # # virDomainState
1514 # VIR_DOMAIN_NOSTATE = 0
1515 # VIR_DOMAIN_RUNNING = 1
1516 # VIR_DOMAIN_BLOCKED = 2
1517 # VIR_DOMAIN_PAUSED = 3
1518 # VIR_DOMAIN_SHUTDOWN = 4
1519 # VIR_DOMAIN_SHUTOFF = 5
1520 # VIR_DOMAIN_CRASHED = 6
1521 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
1522
1523 if self.test or len(self.server_status)==0:
1524 return
1525
1526 try:
1527 conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
1528 domains= conn.listAllDomains()
1529 domain_dict={}
1530 for domain in domains:
1531 uuid = domain.UUIDString() ;
1532 libvirt_status = domain.state()
1533 #print libvirt_status
1534 if libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_RUNNING or libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTDOWN:
1535 new_status = "ACTIVE"
1536 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_PAUSED:
1537 new_status = "PAUSED"
1538 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTOFF:
1539 new_status = "INACTIVE"
1540 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_CRASHED:
1541 new_status = "ERROR"
1542 else:
1543 new_status = None
1544 domain_dict[uuid] = new_status
1545 conn.close()
1546 except host_thread.lvirt_module.libvirtError as e:
1547 print self.name, ": get_state() Exception '", e.get_error_message()
1548 return
1549
1550 for server_id, current_status in self.server_status.iteritems():
1551 new_status = None
1552 if server_id in domain_dict:
1553 new_status = domain_dict[server_id]
1554 else:
1555 new_status = "INACTIVE"
1556
1557 if new_status == None or new_status == current_status:
1558 continue
1559 if new_status == 'INACTIVE' and current_status == 'ERROR':
1560 continue #keep ERROR status, because obviously this machine is not running
1561 #change status
1562 print self.name, ": server ", server_id, "status change from ", current_status, "to", new_status
1563 STATUS={'progress':100, 'status':new_status}
1564 if new_status == 'ERROR':
1565 STATUS['last_error'] = 'machine has crashed'
1566 self.db_lock.acquire()
1567 r,_ = self.db.update_rows('instances', STATUS, {'uuid':server_id}, log=False)
1568 self.db_lock.release()
1569 if r>=0:
1570 self.server_status[server_id] = new_status
1571
1572 def action_on_server(self, req, last_retry=True):
1573 '''Perform an action on a req
1574 Attributes:
1575 req: dictionary that contain:
1576 server properties: 'uuid','name','tenant_id','status'
1577 action: 'action'
1578 host properties: 'user', 'ip_name'
1579 return (error, text)
1580 0: No error. VM is updated to new state,
1581 -1: Invalid action, as trying to pause a PAUSED VM
1582 -2: Error accessing host
1583 -3: VM nor present
1584 -4: Error at DB access
1585 -5: Error while trying to perform action. VM is updated to ERROR
1586 '''
1587 server_id = req['uuid']
1588 conn = None
1589 new_status = None
1590 old_status = req['status']
1591 last_error = None
1592
1593 if self.test:
1594 if 'terminate' in req['action']:
1595 new_status = 'deleted'
1596 elif 'shutoff' in req['action'] or 'shutdown' in req['action'] or 'forceOff' in req['action']:
1597 if req['status']!='ERROR':
1598 time.sleep(5)
1599 new_status = 'INACTIVE'
1600 elif 'start' in req['action'] and req['status']!='ERROR': new_status = 'ACTIVE'
1601 elif 'resume' in req['action'] and req['status']!='ERROR' and req['status']!='INACTIVE' : new_status = 'ACTIVE'
1602 elif 'pause' in req['action'] and req['status']!='ERROR': new_status = 'PAUSED'
1603 elif 'reboot' in req['action'] and req['status']!='ERROR': new_status = 'ACTIVE'
1604 elif 'rebuild' in req['action']:
1605 time.sleep(random.randint(20,150))
1606 new_status = 'ACTIVE'
1607 elif 'createImage' in req['action']:
1608 time.sleep(5)
1609 self.create_image(None, req)
1610 else:
1611 try:
1612 conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
1613 try:
1614 dom = conn.lookupByUUIDString(server_id)
1615 except host_thread.lvirt_module.libvirtError as e:
1616 text = e.get_error_message()
1617 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
1618 dom = None
1619 else:
1620 print self.name, ": action_on_server(",server_id,") libvirt exception:", text
1621 raise e
1622
1623 if 'forceOff' in req['action']:
1624 if dom == None:
1625 print self.name, ": action_on_server(",server_id,") domain not running"
1626 else:
1627 try:
1628 print self.name, ": sending DESTROY to server", server_id
1629 dom.destroy()
1630 except Exception as e:
1631 if "domain is not running" not in e.get_error_message():
1632 print self.name, ": action_on_server(",server_id,") Exception while sending force off:", e.get_error_message()
1633 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
1634 new_status = 'ERROR'
1635
1636 elif 'terminate' in req['action']:
1637 if dom == None:
1638 print self.name, ": action_on_server(",server_id,") domain not running"
1639 new_status = 'deleted'
1640 else:
1641 try:
1642 if req['action']['terminate'] == 'force':
1643 print self.name, ": sending DESTROY to server", server_id
1644 dom.destroy()
1645 new_status = 'deleted'
1646 else:
1647 print self.name, ": sending SHUTDOWN to server", server_id
1648 dom.shutdown()
1649 self.pending_terminate_server.append( (time.time()+10,server_id) )
1650 except Exception as e:
1651 print self.name, ": action_on_server(",server_id,") Exception while destroy:", e.get_error_message()
1652 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
1653 new_status = 'ERROR'
1654 if "domain is not running" in e.get_error_message():
1655 try:
1656 dom.undefine()
1657 new_status = 'deleted'
1658 except Exception:
1659 print self.name, ": action_on_server(",server_id,") Exception while undefine:", e.get_error_message()
1660 last_error = 'action_on_server Exception2 while undefine:', e.get_error_message()
1661 #Exception: 'virDomainDetachDevice() failed'
1662 if new_status=='deleted':
1663 if server_id in self.server_status:
1664 del self.server_status[server_id]
1665 if req['uuid'] in self.localinfo['server_files']:
1666 for file_ in self.localinfo['server_files'][ req['uuid'] ].values():
1667 try:
1668 self.delete_file(file_['source file'])
1669 except Exception:
1670 pass
1671 del self.localinfo['server_files'][ req['uuid'] ]
1672 self.localinfo_dirty = True
1673
1674 elif 'shutoff' in req['action'] or 'shutdown' in req['action']:
1675 try:
1676 if dom == None:
1677 print self.name, ": action_on_server(",server_id,") domain not running"
1678 else:
1679 dom.shutdown()
1680 # new_status = 'INACTIVE'
1681 #TODO: check status for changing at database
1682 except Exception as e:
1683 new_status = 'ERROR'
1684 print self.name, ": action_on_server(",server_id,") Exception while shutdown:", e.get_error_message()
1685 last_error = 'action_on_server Exception while shutdown: ' + e.get_error_message()
1686
1687 elif 'rebuild' in req['action']:
1688 if dom != None:
1689 dom.destroy()
1690 r = self.launch_server(conn, req, True, None)
1691 if r[0] <0:
1692 new_status = 'ERROR'
1693 last_error = r[1]
1694 else:
1695 new_status = 'ACTIVE'
1696 elif 'start' in req['action']:
1697 # The instance is only create in DB but not yet at libvirt domain, needs to be create
1698 rebuild = True if req['action']['start'] == 'rebuild' else False
1699 r = self.launch_server(conn, req, rebuild, dom)
1700 if r[0] <0:
1701 new_status = 'ERROR'
1702 last_error = r[1]
1703 else:
1704 new_status = 'ACTIVE'
1705
1706 elif 'resume' in req['action']:
1707 try:
1708 if dom == None:
1709 pass
1710 else:
1711 dom.resume()
1712 # new_status = 'ACTIVE'
1713 except Exception as e:
1714 print self.name, ": action_on_server(",server_id,") Exception while resume:", e.get_error_message()
1715
1716 elif 'pause' in req['action']:
1717 try:
1718 if dom == None:
1719 pass
1720 else:
1721 dom.suspend()
1722 # new_status = 'PAUSED'
1723 except Exception as e:
1724 print self.name, ": action_on_server(",server_id,") Exception while pause:", e.get_error_message()
1725
1726 elif 'reboot' in req['action']:
1727 try:
1728 if dom == None:
1729 pass
1730 else:
1731 dom.reboot()
1732 print self.name, ": action_on_server(",server_id,") reboot:"
1733 #new_status = 'ACTIVE'
1734 except Exception as e:
1735 print self.name, ": action_on_server(",server_id,") Exception while reboot:", e.get_error_message()
1736 elif 'createImage' in req['action']:
1737 self.create_image(dom, req)
1738
1739
1740 conn.close()
1741 except host_thread.lvirt_module.libvirtError as e:
1742 if conn is not None: conn.close()
1743 text = e.get_error_message()
1744 new_status = "ERROR"
1745 last_error = text
1746 print self.name, ": action_on_server(",server_id,") Exception '", text
1747 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
1748 print self.name, ": action_on_server(",server_id,") Exception removed from host"
1749 #end of if self.test
1750 if new_status == None:
1751 return 1
1752
1753 print self.name, ": action_on_server(",server_id,") new status", new_status, last_error
1754 UPDATE = {'progress':100, 'status':new_status}
1755
1756 if new_status=='ERROR':
1757 if not last_retry: #if there will be another retry do not update database
1758 return -1
1759 elif 'terminate' in req['action']:
1760 #PUT a log in the database
1761 print self.name, ": PANIC deleting server", server_id, last_error
1762 self.db_lock.acquire()
1763 self.db.new_row('logs',
1764 {'uuid':server_id, 'tenant_id':req['tenant_id'], 'related':'instances','level':'panic',
1765 'description':'PANIC deleting server from host '+self.name+': '+last_error}
1766 )
1767 self.db_lock.release()
1768 if server_id in self.server_status:
1769 del self.server_status[server_id]
1770 return -1
1771 else:
1772 UPDATE['last_error'] = last_error
1773 if new_status != 'deleted' and (new_status != old_status or new_status == 'ERROR') :
1774 self.db_lock.acquire()
1775 self.db.update_rows('instances', UPDATE, {'uuid':server_id}, log=True)
1776 self.server_status[server_id] = new_status
1777 self.db_lock.release()
1778 if new_status == 'ERROR':
1779 return -1
1780 return 1
1781
1782
1783 def restore_iface(self, name, mac, lib_conn=None):
1784 ''' make an ifdown, ifup to restore default parameter of na interface
1785 Params:
1786 mac: mac address of the interface
1787 lib_conn: connection to the libvirt, if None a new connection is created
1788 Return 0,None if ok, -1,text if fails
1789 '''
1790 conn=None
1791 ret = 0
1792 error_text=None
1793 if self.test:
1794 print self.name, ": restore_iface '%s' %s" % (name, mac)
1795 return 0, None
1796 try:
1797 if not lib_conn:
1798 conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
1799 else:
1800 conn = lib_conn
1801
1802 #wait to the pending VM deletion
1803 #TODO.Revise self.server_forceoff(True)
1804
1805 iface = conn.interfaceLookupByMACString(mac)
1806 iface.destroy()
1807 iface.create()
1808 print self.name, ": restore_iface '%s' %s" % (name, mac)
1809 except host_thread.lvirt_module.libvirtError as e:
1810 error_text = e.get_error_message()
1811 print self.name, ": restore_iface '%s' '%s' libvirt exception: %s" %(name, mac, error_text)
1812 ret=-1
1813 finally:
1814 if lib_conn is None and conn is not None:
1815 conn.close()
1816 return ret, error_text
1817
1818
1819 def create_image(self,dom, req):
1820 if self.test:
1821 if 'path' in req['action']['createImage']:
1822 file_dst = req['action']['createImage']['path']
1823 else:
1824 createImage=req['action']['createImage']
1825 img_name= createImage['source']['path']
1826 index=img_name.rfind('/')
1827 file_dst = self.get_notused_path(img_name[:index+1] + createImage['name'] + '.qcow2')
1828 image_status='ACTIVE'
1829 else:
1830 for retry in (0,1):
1831 try:
1832 server_id = req['uuid']
1833 createImage=req['action']['createImage']
1834 file_orig = self.localinfo['server_files'][server_id] [ createImage['source']['image_id'] ] ['source file']
1835 if 'path' in req['action']['createImage']:
1836 file_dst = req['action']['createImage']['path']
1837 else:
1838 img_name= createImage['source']['path']
1839 index=img_name.rfind('/')
1840 file_dst = self.get_notused_filename(img_name[:index+1] + createImage['name'] + '.qcow2')
1841
1842 self.copy_file(file_orig, file_dst)
1843 qemu_info = self.qemu_get_info(file_orig)
1844 if 'backing file' in qemu_info:
1845 for k,v in self.localinfo['files'].items():
1846 if v==qemu_info['backing file']:
1847 self.qemu_change_backing(file_dst, k)
1848 break
1849 image_status='ACTIVE'
1850 break
1851 except paramiko.ssh_exception.SSHException as e:
1852 image_status='ERROR'
1853 error_text = e.args[0]
1854 print self.name, "': create_image(",server_id,") ssh Exception:", error_text
1855 if "SSH session not active" in error_text and retry==0:
1856 self.ssh_connect()
1857 except Exception as e:
1858 image_status='ERROR'
1859 error_text = str(e)
1860 print self.name, "': create_image(",server_id,") Exception:", error_text
1861
1862 #TODO insert a last_error at database
1863 self.db_lock.acquire()
1864 self.db.update_rows('images', {'status':image_status, 'progress': 100, 'path':file_dst},
1865 {'uuid':req['new_image']['uuid']}, log=True)
1866 self.db_lock.release()
1867
1868 def edit_iface(self, port_id, old_net, new_net):
1869 #This action imply remove and insert interface to put proper parameters
1870 if self.test:
1871 time.sleep(1)
1872 else:
1873 #get iface details
1874 self.db_lock.acquire()
1875 r,c = self.db.get_table(FROM='ports as p join resources_port as rp on p.uuid=rp.port_id',
1876 WHERE={'port_id': port_id})
1877 self.db_lock.release()
1878 if r<0:
1879 print self.name, ": edit_iface(",port_id,") DDBB error:", c
1880 return
1881 elif r==0:
1882 print self.name, ": edit_iface(",port_id,") por not found"
1883 return
1884 port=c[0]
1885 if port["model"]!="VF":
1886 print self.name, ": edit_iface(",port_id,") ERROR model must be VF"
1887 return
1888 #create xml detach file
1889 xml=[]
1890 self.xml_level = 2
1891 xml.append("<interface type='hostdev' managed='yes'>")
1892 xml.append(" <mac address='" +port['mac']+ "'/>")
1893 xml.append(" <source>"+ self.pci2xml(port['pci'])+"\n </source>")
1894 xml.append('</interface>')
1895
1896
1897 try:
1898 conn=None
1899 conn = host_thread.lvirt_module.open("qemu+ssh://"+self.user+"@"+self.host+"/system")
1900 dom = conn.lookupByUUIDString(port["instance_id"])
1901 if old_net:
1902 text="\n".join(xml)
1903 print self.name, ": edit_iface detaching SRIOV interface", text
1904 dom.detachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
1905 if new_net:
1906 xml[-1] =" <vlan> <tag id='" + str(port['vlan']) + "'/> </vlan>"
1907 self.xml_level = 1
1908 xml.append(self.pci2xml(port.get('vpci',None)) )
1909 xml.append('</interface>')
1910 text="\n".join(xml)
1911 print self.name, ": edit_iface attaching SRIOV interface", text
1912 dom.attachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
1913
1914 except host_thread.lvirt_module.libvirtError as e:
1915 text = e.get_error_message()
1916 print self.name, ": edit_iface(",port["instance_id"],") libvirt exception:", text
1917
1918 finally:
1919 if conn is not None: conn.close()
1920
1921
1922 def create_server(server, db, db_lock, only_of_ports):
1923 #print "server"
1924 #print "server"
1925 #print server
1926 #print "server"
1927 #print "server"
1928 #try:
1929 # host_id = server.get('host_id', None)
1930 extended = server.get('extended', None)
1931
1932 # print '----------------------'
1933 # print json.dumps(extended, indent=4)
1934
1935 requirements={}
1936 requirements['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
1937 requirements['ram'] = server['flavor'].get('ram', 0)
1938 if requirements['ram']== None:
1939 requirements['ram'] = 0
1940 requirements['vcpus'] = server['flavor'].get('vcpus', 0)
1941 if requirements['vcpus']== None:
1942 requirements['vcpus'] = 0
1943 #If extended is not defined get requirements from flavor
1944 if extended is None:
1945 #If extended is defined in flavor convert to dictionary and use it
1946 if 'extended' in server['flavor'] and server['flavor']['extended'] != None:
1947 json_acceptable_string = server['flavor']['extended'].replace("'", "\"")
1948 extended = json.loads(json_acceptable_string)
1949 else:
1950 extended = None
1951 #print json.dumps(extended, indent=4)
1952
1953 #For simplicity only one numa VM are supported in the initial implementation
1954 if extended != None:
1955 numas = extended.get('numas', [])
1956 if len(numas)>1:
1957 return (-2, "Multi-NUMA VMs are not supported yet")
1958 #elif len(numas)<1:
1959 # return (-1, "At least one numa must be specified")
1960
1961 #a for loop is used in order to be ready to multi-NUMA VMs
1962 request = []
1963 for numa in numas:
1964 numa_req = {}
1965 numa_req['memory'] = numa.get('memory', 0)
1966 if 'cores' in numa:
1967 numa_req['proc_req_nb'] = numa['cores'] #number of cores or threads to be reserved
1968 numa_req['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
1969 numa_req['proc_req_list'] = numa.get('cores-id', None) #list of ids to be assigned to the cores or threads
1970 elif 'paired-threads' in numa:
1971 numa_req['proc_req_nb'] = numa['paired-threads']
1972 numa_req['proc_req_type'] = 'paired-threads'
1973 numa_req['proc_req_list'] = numa.get('paired-threads-id', None)
1974 elif 'threads' in numa:
1975 numa_req['proc_req_nb'] = numa['threads']
1976 numa_req['proc_req_type'] = 'threads'
1977 numa_req['proc_req_list'] = numa.get('threads-id', None)
1978 else:
1979 numa_req['proc_req_nb'] = 0 # by default
1980 numa_req['proc_req_type'] = 'threads'
1981
1982
1983
1984 #Generate a list of sriov and another for physical interfaces
1985 interfaces = numa.get('interfaces', [])
1986 sriov_list = []
1987 port_list = []
1988 for iface in interfaces:
1989 iface['bandwidth'] = int(iface['bandwidth'])
1990 if iface['dedicated'][:3]=='yes':
1991 port_list.append(iface)
1992 else:
1993 sriov_list.append(iface)
1994
1995 #Save lists ordered from more restrictive to less bw requirements
1996 numa_req['sriov_list'] = sorted(sriov_list, key=lambda k: k['bandwidth'], reverse=True)
1997 numa_req['port_list'] = sorted(port_list, key=lambda k: k['bandwidth'], reverse=True)
1998
1999
2000 request.append(numa_req)
2001
2002 # print "----------\n"+json.dumps(request[0], indent=4)
2003 # print '----------\n\n'
2004
2005 #Search in db for an appropriate numa for each requested numa
2006 #at the moment multi-NUMA VMs are not supported
2007 if len(request)>0:
2008 requirements['numa'].update(request[0])
2009 if requirements['numa']['memory']>0:
2010 requirements['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2011 elif requirements['ram']==0:
2012 return (-1, "Memory information not set neither at extended field not at ram")
2013 if requirements['numa']['proc_req_nb']>0:
2014 requirements['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2015 elif requirements['vcpus']==0:
2016 return (-1, "Processor information not set neither at extended field not at vcpus")
2017
2018
2019 db_lock.acquire()
2020 result, content = db.get_numas(requirements, server.get('host_id', None), only_of_ports)
2021 db_lock.release()
2022
2023 if result == -1:
2024 return (-1, content)
2025
2026 numa_id = content['numa_id']
2027 host_id = content['host_id']
2028
2029 #obtain threads_id and calculate pinning
2030 cpu_pinning = []
2031 reserved_threads=[]
2032 if requirements['numa']['proc_req_nb']>0:
2033 db_lock.acquire()
2034 result, content = db.get_table(FROM='resources_core',
2035 SELECT=('id','core_id','thread_id'),
2036 WHERE={'numa_id':numa_id,'instance_id': None, 'status':'ok'} )
2037 db_lock.release()
2038 if result <= 0:
2039 print content
2040 return -1, content
2041
2042 #convert rows to a dictionary indexed by core_id
2043 cores_dict = {}
2044 for row in content:
2045 if not row['core_id'] in cores_dict:
2046 cores_dict[row['core_id']] = []
2047 cores_dict[row['core_id']].append([row['thread_id'],row['id']])
2048
2049 #In case full cores are requested
2050 paired = 'N'
2051 if requirements['numa']['proc_req_type'] == 'cores':
2052 #Get/create the list of the vcpu_ids
2053 vcpu_id_list = requirements['numa']['proc_req_list']
2054 if vcpu_id_list == None:
2055 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2056
2057 for threads in cores_dict.itervalues():
2058 #we need full cores
2059 if len(threads) != 2:
2060 continue
2061
2062 #set pinning for the first thread
2063 cpu_pinning.append( [ vcpu_id_list.pop(0), threads[0][0], threads[0][1] ] )
2064
2065 #reserve so it is not used the second thread
2066 reserved_threads.append(threads[1][1])
2067
2068 if len(vcpu_id_list) == 0:
2069 break
2070
2071 #In case paired threads are requested
2072 elif requirements['numa']['proc_req_type'] == 'paired-threads':
2073 paired = 'Y'
2074 #Get/create the list of the vcpu_ids
2075 if requirements['numa']['proc_req_list'] != None:
2076 vcpu_id_list = []
2077 for pair in requirements['numa']['proc_req_list']:
2078 if len(pair)!=2:
2079 return -1, "Field paired-threads-id not properly specified"
2080 return
2081 vcpu_id_list.append(pair[0])
2082 vcpu_id_list.append(pair[1])
2083 else:
2084 vcpu_id_list = range(0,2*int(requirements['numa']['proc_req_nb']))
2085
2086 for threads in cores_dict.itervalues():
2087 #we need full cores
2088 if len(threads) != 2:
2089 continue
2090 #set pinning for the first thread
2091 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2092
2093 #set pinning for the second thread
2094 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2095
2096 if len(vcpu_id_list) == 0:
2097 break
2098
2099 #In case normal threads are requested
2100 elif requirements['numa']['proc_req_type'] == 'threads':
2101 #Get/create the list of the vcpu_ids
2102 vcpu_id_list = requirements['numa']['proc_req_list']
2103 if vcpu_id_list == None:
2104 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2105
2106 for threads_index in sorted(cores_dict, key=lambda k: len(cores_dict[k])):
2107 threads = cores_dict[threads_index]
2108 #set pinning for the first thread
2109 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2110
2111 #if exists, set pinning for the second thread
2112 if len(threads) == 2 and len(vcpu_id_list) != 0:
2113 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2114
2115 if len(vcpu_id_list) == 0:
2116 break
2117
2118 #Get the source pci addresses for the selected numa
2119 used_sriov_ports = []
2120 for port in requirements['numa']['sriov_list']:
2121 db_lock.acquire()
2122 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2123 db_lock.release()
2124 if result <= 0:
2125 print content
2126 return -1, content
2127 for row in content:
2128 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2129 continue
2130 port['pci'] = row['pci']
2131 if 'mac_address' not in port:
2132 port['mac_address'] = row['mac']
2133 del port['mac']
2134 port['port_id']=row['id']
2135 port['Mbps_used'] = port['bandwidth']
2136 used_sriov_ports.append(row['id'])
2137 break
2138
2139 for port in requirements['numa']['port_list']:
2140 port['Mbps_used'] = None
2141 if port['dedicated'] != "yes:sriov":
2142 port['mac_address'] = port['mac']
2143 del port['mac']
2144 continue
2145 db_lock.acquire()
2146 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac', 'Mbps'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2147 db_lock.release()
2148 if result <= 0:
2149 print content
2150 return -1, content
2151 port['Mbps_used'] = content[0]['Mbps']
2152 for row in content:
2153 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2154 continue
2155 port['pci'] = row['pci']
2156 if 'mac_address' not in port:
2157 port['mac_address'] = row['mac'] # mac cannot be set to passthrough ports
2158 del port['mac']
2159 port['port_id']=row['id']
2160 used_sriov_ports.append(row['id'])
2161 break
2162
2163 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2164 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2165
2166 server['host_id'] = host_id
2167
2168
2169 #Generate dictionary for saving in db the instance resources
2170 resources = {}
2171 resources['bridged-ifaces'] = []
2172
2173 numa_dict = {}
2174 numa_dict['interfaces'] = []
2175
2176 numa_dict['interfaces'] += requirements['numa']['port_list']
2177 numa_dict['interfaces'] += requirements['numa']['sriov_list']
2178
2179 #Check bridge information
2180 unified_dataplane_iface=[]
2181 unified_dataplane_iface += requirements['numa']['port_list']
2182 unified_dataplane_iface += requirements['numa']['sriov_list']
2183
2184 for control_iface in server.get('networks', []):
2185 control_iface['net_id']=control_iface.pop('uuid')
2186 #Get the brifge name
2187 db_lock.acquire()
2188 result, content = db.get_table(FROM='nets',
2189 SELECT=('name', 'type', 'vlan', 'provider', 'enable_dhcp',
2190 'dhcp_first_ip', 'dhcp_last_ip', 'cidr'),
2191 WHERE={'uuid': control_iface['net_id']})
2192 db_lock.release()
2193 if result < 0:
2194 pass
2195 elif result==0:
2196 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface['net_id']
2197 else:
2198 network=content[0]
2199 if control_iface.get("type", 'virtual') == 'virtual':
2200 if network['type']!='bridge_data' and network['type']!='bridge_man':
2201 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface['net_id']
2202 resources['bridged-ifaces'].append(control_iface)
2203 if network.get("provider") and network["provider"][0:3] == "OVS":
2204 control_iface["type"] = "instance:ovs"
2205 else:
2206 control_iface["type"] = "instance:bridge"
2207 if network.get("vlan"):
2208 control_iface["vlan"] = network["vlan"]
2209
2210 if network.get("enable_dhcp") == 'true':
2211 control_iface["enable_dhcp"] = network.get("enable_dhcp")
2212 control_iface["dhcp_first_ip"] = network["dhcp_first_ip"]
2213 control_iface["dhcp_last_ip"] = network["dhcp_last_ip"]
2214 control_iface["cidr"] = network["cidr"]
2215 else:
2216 if network['type']!='data' and network['type']!='ptp':
2217 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface['net_id']
2218 #dataplane interface, look for it in the numa tree and asign this network
2219 iface_found=False
2220 for dataplane_iface in numa_dict['interfaces']:
2221 if dataplane_iface['name'] == control_iface.get("name"):
2222 if (dataplane_iface['dedicated'] == "yes" and control_iface["type"] != "PF") or \
2223 (dataplane_iface['dedicated'] == "no" and control_iface["type"] != "VF") or \
2224 (dataplane_iface['dedicated'] == "yes:sriov" and control_iface["type"] != "VFnotShared") :
2225 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2226 (control_iface.get("name"), dataplane_iface['dedicated'], control_iface["type"])
2227 dataplane_iface['uuid'] = control_iface['net_id']
2228 if dataplane_iface['dedicated'] == "no":
2229 dataplane_iface['vlan'] = network['vlan']
2230 if dataplane_iface['dedicated'] != "yes" and control_iface.get("mac_address"):
2231 dataplane_iface['mac_address'] = control_iface.get("mac_address")
2232 if control_iface.get("vpci"):
2233 dataplane_iface['vpci'] = control_iface.get("vpci")
2234 iface_found=True
2235 break
2236 if not iface_found:
2237 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface.get("name")
2238
2239 resources['host_id'] = host_id
2240 resources['image_id'] = server['image_id']
2241 resources['flavor_id'] = server['flavor_id']
2242 resources['tenant_id'] = server['tenant_id']
2243 resources['ram'] = requirements['ram']
2244 resources['vcpus'] = requirements['vcpus']
2245 resources['status'] = 'CREATING'
2246
2247 if 'description' in server: resources['description'] = server['description']
2248 if 'name' in server: resources['name'] = server['name']
2249
2250 resources['extended'] = {} #optional
2251 resources['extended']['numas'] = []
2252 numa_dict['numa_id'] = numa_id
2253 numa_dict['memory'] = requirements['numa']['memory']
2254 numa_dict['cores'] = []
2255
2256 for core in cpu_pinning:
2257 numa_dict['cores'].append({'id': core[2], 'vthread': core[0], 'paired': paired})
2258 for core in reserved_threads:
2259 numa_dict['cores'].append({'id': core})
2260 resources['extended']['numas'].append(numa_dict)
2261 if extended!=None and 'devices' in extended: #TODO allow extra devices without numa
2262 resources['extended']['devices'] = extended['devices']
2263
2264
2265 print '===================================={'
2266 print json.dumps(resources, indent=4)
2267 print '====================================}'
2268
2269 return 0, resources
2270