1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openvim
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
28 __author__
= "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__
= "$10-jul-2014 12:07:15$"
43 from jsonschema
import validate
as js_v
, exceptions
as js_e
44 from vim_schema
import localinfo_schema
, hostinfo_schema
47 class host_thread(threading
.Thread
):
50 def __init__(self
, name
, host
, user
, db
, db_lock
, test
, image_path
, host_id
, version
, develop_mode
,
51 develop_bridge_iface
, password
=None, keyfile
= None, logger_name
=None, debug
=None):
56 'host','user': host ip or name to manage and user
57 'db', 'db_lock': database class and lock to use it in exclusion
59 threading
.Thread
.__init
__(self
)
64 self
.db_lock
= db_lock
66 self
.password
= password
67 self
.keyfile
= keyfile
68 self
.localinfo_dirty
= False
70 if not test
and not host_thread
.lvirt_module
:
72 module_info
= imp
.find_module("libvirt")
73 host_thread
.lvirt_module
= imp
.load_module("libvirt", *module_info
)
74 except (IOError, ImportError) as e
:
75 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e
))
77 self
.logger_name
= logger_name
79 self
.logger_name
= "openvim.host."+name
80 self
.logger
= logging
.getLogger(self
.logger_name
)
82 self
.logger
.setLevel(getattr(logging
, debug
))
85 self
.develop_mode
= develop_mode
86 self
.develop_bridge_iface
= develop_bridge_iface
87 self
.image_path
= image_path
88 self
.host_id
= host_id
89 self
.version
= version
94 self
.server_status
= {} #dictionary with pairs server_uuid:server_status
95 self
.pending_terminate_server
=[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
96 self
.next_update_server_status
= 0 #time when must be check servers status
100 self
.queueLock
= threading
.Lock()
101 self
.taskQueue
= Queue
.Queue(2000)
103 self
.lvirt_conn_uri
= "qemu+ssh://{user}@{host}/system?no_tty=1&no_verify=1".format(
104 user
=self
.user
, host
=self
.host
)
106 self
.lvirt_conn_uri
+= "&keyfile=" + keyfile
108 def ssh_connect(self
):
111 self
.ssh_conn
= paramiko
.SSHClient()
112 self
.ssh_conn
.set_missing_host_key_policy(paramiko
.AutoAddPolicy())
113 self
.ssh_conn
.load_system_host_keys()
114 self
.ssh_conn
.connect(self
.host
, username
=self
.user
, password
=self
.password
, key_filename
=self
.keyfile
,
116 except paramiko
.ssh_exception
.SSHException
as e
:
118 self
.logger
.error("ssh_connect ssh Exception: " + text
)
120 def load_localinfo(self
):
126 command
= 'mkdir -p ' + self
.image_path
127 # print self.name, ': command:', command
128 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
129 content
= stderr
.read()
131 self
.logger
.error("command: '%s' stderr: '%s'", command
, content
)
133 command
= 'cat ' + self
.image_path
+ '/.openvim.yaml'
134 # print self.name, ': command:', command
135 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
136 content
= stdout
.read()
137 if len(content
) == 0:
138 self
.logger
.error("command: '%s' stderr='%s'", command
, stderr
.read())
139 raise paramiko
.ssh_exception
.SSHException("Error empty file, command: '{}'".format(command
))
140 self
.localinfo
= yaml
.load(content
)
141 js_v(self
.localinfo
, localinfo_schema
)
142 self
.localinfo_dirty
= False
143 if 'server_files' not in self
.localinfo
:
144 self
.localinfo
['server_files'] = {}
145 self
.logger
.debug("localinfo load from host")
148 except paramiko
.ssh_exception
.SSHException
as e
:
150 self
.logger
.error("load_localinfo ssh Exception: " + text
)
151 except host_thread
.lvirt_module
.libvirtError
as e
:
152 text
= e
.get_error_message()
153 self
.logger
.error("load_localinfo libvirt Exception: " + text
)
154 except yaml
.YAMLError
as exc
:
156 if hasattr(exc
, 'problem_mark'):
157 mark
= exc
.problem_mark
158 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
159 self
.logger
.error("load_localinfo yaml format Exception " + text
)
160 except js_e
.ValidationError
as e
:
162 if len(e
.path
)>0: text
=" at '" + ":".join(map(str, e
.path
))+"'"
163 self
.logger
.error("load_localinfo format Exception: %s %s", text
, str(e
))
164 except Exception as e
:
166 self
.logger
.error("load_localinfo Exception: " + text
)
168 #not loaded, insert a default data and force saving by activating dirty flag
169 self
.localinfo
= {'files':{}, 'server_files':{} }
170 #self.localinfo_dirty=True
171 self
.localinfo_dirty
=False
173 def load_hostinfo(self
):
181 command
= 'cat ' + self
.image_path
+ '/hostinfo.yaml'
182 #print self.name, ': command:', command
183 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
184 content
= stdout
.read()
185 if len(content
) == 0:
186 self
.logger
.error("command: '%s' stderr: '%s'", command
, stderr
.read())
187 raise paramiko
.ssh_exception
.SSHException("Error empty file ")
188 self
.hostinfo
= yaml
.load(content
)
189 js_v(self
.hostinfo
, hostinfo_schema
)
190 self
.logger
.debug("hostlinfo load from host " + str(self
.hostinfo
))
193 except paramiko
.ssh_exception
.SSHException
as e
:
195 self
.logger
.error("load_hostinfo ssh Exception: " + text
)
196 except host_thread
.lvirt_module
.libvirtError
as e
:
197 text
= e
.get_error_message()
198 self
.logger
.error("load_hostinfo libvirt Exception: " + text
)
199 except yaml
.YAMLError
as exc
:
201 if hasattr(exc
, 'problem_mark'):
202 mark
= exc
.problem_mark
203 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
204 self
.logger
.error("load_hostinfo yaml format Exception " + text
)
205 except js_e
.ValidationError
as e
:
207 if len(e
.path
)>0: text
=" at '" + ":".join(map(str, e
.path
))+"'"
208 self
.logger
.error("load_hostinfo format Exception: %s %s", text
, e
.message
)
209 except Exception as e
:
211 self
.logger
.error("load_hostinfo Exception: " + text
)
213 #not loaded, insert a default data
216 def save_localinfo(self
, tries
=3):
218 self
.localinfo_dirty
= False
225 command
= 'cat > ' + self
.image_path
+ '/.openvim.yaml'
226 self
.logger
.debug("command:" + command
)
227 (stdin
, _
, _
) = self
.ssh_conn
.exec_command(command
)
228 yaml
.safe_dump(self
.localinfo
, stdin
, explicit_start
=True, indent
=4, default_flow_style
=False, tags
=False, encoding
='utf-8', allow_unicode
=True)
229 self
.localinfo_dirty
= False
232 except paramiko
.ssh_exception
.SSHException
as e
:
234 self
.logger
.error("save_localinfo ssh Exception: " + text
)
235 if "SSH session not active" in text
:
237 except host_thread
.lvirt_module
.libvirtError
as e
:
238 text
= e
.get_error_message()
239 self
.logger
.error("save_localinfo libvirt Exception: " + text
)
240 except yaml
.YAMLError
as exc
:
242 if hasattr(exc
, 'problem_mark'):
243 mark
= exc
.problem_mark
244 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
245 self
.logger
.error("save_localinfo yaml format Exception " + text
)
246 except Exception as e
:
248 self
.logger
.error("save_localinfo Exception: " + text
)
250 def load_servers_from_db(self
):
251 self
.db_lock
.acquire()
252 r
,c
= self
.db
.get_table(SELECT
=('uuid','status', 'image_id'), FROM
='instances', WHERE
={'host_id': self
.host_id
})
253 self
.db_lock
.release()
255 self
.server_status
= {}
257 self
.logger
.error("Error getting data from database: " + c
)
260 self
.server_status
[ server
['uuid'] ] = server
['status']
262 #convert from old version to new one
263 if 'inc_files' in self
.localinfo
and server
['uuid'] in self
.localinfo
['inc_files']:
264 server_files_dict
= {'source file': self
.localinfo
['inc_files'][ server
['uuid'] ] [0], 'file format':'raw' }
265 if server_files_dict
['source file'][-5:] == 'qcow2':
266 server_files_dict
['file format'] = 'qcow2'
268 self
.localinfo
['server_files'][ server
['uuid'] ] = { server
['image_id'] : server_files_dict
}
269 if 'inc_files' in self
.localinfo
:
270 del self
.localinfo
['inc_files']
271 self
.localinfo_dirty
= True
273 def delete_unused_files(self
):
274 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
275 Deletes unused entries at self.loacalinfo and the corresponding local files.
276 The only reason for this mismatch is the manual deletion of instances (VM) at database
280 for uuid
,images
in self
.localinfo
['server_files'].items():
281 if uuid
not in self
.server_status
:
282 for localfile
in images
.values():
284 self
.logger
.debug("deleting file '%s' of unused server '%s'", localfile
['source file'], uuid
)
285 self
.delete_file(localfile
['source file'])
286 except paramiko
.ssh_exception
.SSHException
as e
:
287 self
.logger
.error("Exception deleting file '%s': %s", localfile
['source file'], str(e
))
288 del self
.localinfo
['server_files'][uuid
]
289 self
.localinfo_dirty
= True
291 def insert_task(self
, task
, *aditional
):
293 self
.queueLock
.acquire()
294 task
= self
.taskQueue
.put( (task
,) + aditional
, timeout
=5)
295 self
.queueLock
.release()
298 return -1, "timeout inserting a task over host " + self
.name
302 self
.load_localinfo()
304 self
.load_servers_from_db()
305 self
.delete_unused_files()
308 self
.queueLock
.acquire()
309 if not self
.taskQueue
.empty():
310 task
= self
.taskQueue
.get()
313 self
.queueLock
.release()
317 if self
.localinfo_dirty
:
318 self
.save_localinfo()
319 elif self
.next_update_server_status
< now
:
320 self
.update_servers_status()
321 self
.next_update_server_status
= now
+ 5
322 elif len(self
.pending_terminate_server
)>0 and self
.pending_terminate_server
[0][0]<now
:
323 self
.server_forceoff()
328 if task
[0] == 'instance':
329 self
.logger
.debug("processing task instance " + str(task
[1]['action']))
333 r
= self
.action_on_server(task
[1], retry
==2)
336 elif task
[0] == 'image':
338 elif task
[0] == 'exit':
339 self
.logger
.debug("processing task exit")
342 elif task
[0] == 'reload':
343 self
.logger
.debug("processing task reload terminating and relaunching")
346 elif task
[0] == 'edit-iface':
347 self
.logger
.debug("processing task edit-iface port={}, old_net={}, new_net={}".format(
348 task
[1], task
[2], task
[3]))
349 self
.edit_iface(task
[1], task
[2], task
[3])
350 elif task
[0] == 'restore-iface':
351 self
.logger
.debug("processing task restore-iface={} mac={}".format(task
[1], task
[2]))
352 self
.restore_iface(task
[1], task
[2])
353 elif task
[0] == 'new-ovsbridge':
354 self
.logger
.debug("Creating compute OVS bridge")
355 self
.create_ovs_bridge()
356 elif task
[0] == 'new-vxlan':
357 self
.logger
.debug("Creating vxlan tunnel='{}', remote ip='{}'".format(task
[1], task
[2]))
358 self
.create_ovs_vxlan_tunnel(task
[1], task
[2])
359 elif task
[0] == 'del-ovsbridge':
360 self
.logger
.debug("Deleting OVS bridge")
361 self
.delete_ovs_bridge()
362 elif task
[0] == 'del-vxlan':
363 self
.logger
.debug("Deleting vxlan {} tunnel".format(task
[1]))
364 self
.delete_ovs_vxlan_tunnel(task
[1])
365 elif task
[0] == 'create-ovs-bridge-port':
366 self
.logger
.debug("Adding port ovim-{} to OVS bridge".format(task
[1]))
367 self
.create_ovs_bridge_port(task
[1])
368 elif task
[0] == 'del-ovs-port':
369 self
.logger
.debug("Delete bridge attached to ovs port vlan {} net {}".format(task
[1], task
[2]))
370 self
.delete_bridge_port_attached_to_ovs(task
[1], task
[2])
372 self
.logger
.debug("unknown task " + str(task
))
374 except Exception as e
:
375 self
.logger
.critical("Unexpected exception at run: " + str(e
), exc_info
=True)
377 def server_forceoff(self
, wait_until_finished
=False):
378 while len(self
.pending_terminate_server
)>0:
380 if self
.pending_terminate_server
[0][0]>now
:
381 if wait_until_finished
:
386 req
={'uuid':self
.pending_terminate_server
[0][1],
387 'action':{'terminate':'force'},
390 self
.action_on_server(req
)
391 self
.pending_terminate_server
.pop(0)
395 self
.server_forceoff(True)
396 if self
.localinfo_dirty
:
397 self
.save_localinfo()
399 self
.ssh_conn
.close()
400 except Exception as e
:
402 self
.logger
.error("terminate Exception: " + text
)
403 self
.logger
.debug("exit from host_thread")
405 def get_local_iface_name(self
, generic_name
):
406 if self
.hostinfo
!= None and "iface_names" in self
.hostinfo
and generic_name
in self
.hostinfo
["iface_names"]:
407 return self
.hostinfo
["iface_names"][generic_name
]
410 def create_xml_server(self
, server
, dev_list
, server_metadata
={}):
411 """Function that implements the generation of the VM XML definition.
412 Additional devices are in dev_list list
413 The main disk is upon dev_list[0]"""
415 #get if operating system is Windows
417 os_type
= server_metadata
.get('os_type', None)
418 if os_type
== None and 'metadata' in dev_list
[0]:
419 os_type
= dev_list
[0]['metadata'].get('os_type', None)
420 if os_type
!= None and os_type
.lower() == "windows":
422 #get type of hard disk bus
423 bus_ide
= True if windows_os
else False
424 bus
= server_metadata
.get('bus', None)
425 if bus
== None and 'metadata' in dev_list
[0]:
426 bus
= dev_list
[0]['metadata'].get('bus', None)
428 bus_ide
= True if bus
=='ide' else False
432 text
= "<domain type='kvm'>"
434 topo
= server_metadata
.get('topology', None)
435 if topo
== None and 'metadata' in dev_list
[0]:
436 topo
= dev_list
[0]['metadata'].get('topology', None)
438 name
= server
.get('name','') + "_" + server
['uuid']
439 name
= name
[:58] #qemu impose a length limit of 59 chars or not start. Using 58
440 text
+= self
.inc_tab() + "<name>" + name
+ "</name>"
442 text
+= self
.tab() + "<uuid>" + server
['uuid'] + "</uuid>"
445 if 'extended' in server
and server
['extended']!=None and 'numas' in server
['extended']:
446 numa
= server
['extended']['numas'][0]
449 memory
= int(numa
.get('memory',0))*1024*1024 #in KiB
451 memory
= int(server
['ram'])*1024;
453 if not self
.develop_mode
:
456 return -1, 'No memory assigned to instance'
458 text
+= self
.tab() + "<memory unit='KiB'>" +memory
+"</memory>"
459 text
+= self
.tab() + "<currentMemory unit='KiB'>" +memory
+ "</currentMemory>"
461 text
+= self
.tab()+'<memoryBacking>'+ \
462 self
.inc_tab() + '<hugepages/>'+ \
463 self
.dec_tab()+ '</memoryBacking>'
466 use_cpu_pinning
=False
467 vcpus
= int(server
.get("vcpus",0))
469 if 'cores-source' in numa
:
471 for index
in range(0, len(numa
['cores-source'])):
472 cpu_pinning
.append( [ numa
['cores-id'][index
], numa
['cores-source'][index
] ] )
474 if 'threads-source' in numa
:
476 for index
in range(0, len(numa
['threads-source'])):
477 cpu_pinning
.append( [ numa
['threads-id'][index
], numa
['threads-source'][index
] ] )
479 if 'paired-threads-source' in numa
:
481 for index
in range(0, len(numa
['paired-threads-source'])):
482 cpu_pinning
.append( [numa
['paired-threads-id'][index
][0], numa
['paired-threads-source'][index
][0] ] )
483 cpu_pinning
.append( [numa
['paired-threads-id'][index
][1], numa
['paired-threads-source'][index
][1] ] )
486 if use_cpu_pinning
and not self
.develop_mode
:
487 text
+= self
.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning
)) +"</vcpu>" + \
488 self
.tab()+'<cputune>'
490 for i
in range(0, len(cpu_pinning
)):
491 text
+= self
.tab() + "<vcpupin vcpu='" +str(cpu_pinning
[i
][0])+ "' cpuset='" +str(cpu_pinning
[i
][1]) +"'/>"
492 text
+= self
.dec_tab()+'</cputune>'+ \
493 self
.tab() + '<numatune>' +\
494 self
.inc_tab() + "<memory mode='strict' nodeset='" +str(numa
['source'])+ "'/>" +\
495 self
.dec_tab() + '</numatune>'
498 return -1, "Instance without number of cpus"
499 text
+= self
.tab()+"<vcpu>" + str(vcpus
) + "</vcpu>"
504 if dev
['type']=='cdrom' :
507 text
+= self
.tab()+ '<os>' + \
508 self
.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
510 text
+= self
.tab() + "<boot dev='cdrom'/>"
511 text
+= self
.tab() + "<boot dev='hd'/>" + \
512 self
.dec_tab()+'</os>'
514 text
+= self
.tab()+'<features>'+\
515 self
.inc_tab()+'<acpi/>' +\
516 self
.tab()+'<apic/>' +\
517 self
.tab()+'<pae/>'+ \
518 self
.dec_tab() +'</features>'
519 if topo
== "oneSocket:hyperthreading":
521 return -1, 'Cannot expose hyperthreading with an odd number of vcpus'
522 text
+= self
.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>" % vcpus
/2
523 elif windows_os
or topo
== "oneSocket":
524 text
+= self
.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>" % vcpus
526 text
+= self
.tab() + "<cpu mode='host-model'></cpu>"
527 text
+= self
.tab() + "<clock offset='utc'/>" +\
528 self
.tab() + "<on_poweroff>preserve</on_poweroff>" + \
529 self
.tab() + "<on_reboot>restart</on_reboot>" + \
530 self
.tab() + "<on_crash>restart</on_crash>"
531 text
+= self
.tab() + "<devices>" + \
532 self
.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
533 self
.tab() + "<serial type='pty'>" +\
534 self
.inc_tab() + "<target port='0'/>" + \
535 self
.dec_tab() + "</serial>" +\
536 self
.tab() + "<console type='pty'>" + \
537 self
.inc_tab()+ "<target type='serial' port='0'/>" + \
538 self
.dec_tab()+'</console>'
540 text
+= self
.tab() + "<controller type='usb' index='0'/>" + \
541 self
.tab() + "<controller type='ide' index='0'/>" + \
542 self
.tab() + "<input type='mouse' bus='ps2'/>" + \
543 self
.tab() + "<sound model='ich6'/>" + \
544 self
.tab() + "<video>" + \
545 self
.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
546 self
.dec_tab() + "</video>" + \
547 self
.tab() + "<memballoon model='virtio'/>" + \
548 self
.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
550 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
551 #> self.dec_tab()+'</hostdev>\n' +\
552 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
554 text
+= self
.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
556 #If image contains 'GRAPH' include graphics
557 #if 'GRAPH' in image:
558 text
+= self
.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
559 self
.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
560 self
.dec_tab() + "</graphics>"
564 bus_ide_dev
= bus_ide
565 if dev
['type']=='cdrom' or dev
['type']=='disk':
566 if dev
['type']=='cdrom':
568 text
+= self
.tab() + "<disk type='file' device='"+dev
['type']+"'>"
569 if 'file format' in dev
:
570 text
+= self
.inc_tab() + "<driver name='qemu' type='" +dev
['file format']+ "' cache='writethrough'/>"
571 if 'source file' in dev
:
572 text
+= self
.tab() + "<source file='" +dev
['source file']+ "'/>"
573 #elif v['type'] == 'block':
574 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
576 # return -1, 'Unknown disk type ' + v['type']
577 vpci
= dev
.get('vpci',None)
579 vpci
= dev
['metadata'].get('vpci',None)
580 text
+= self
.pci2xml(vpci
)
583 text
+= self
.tab() + "<target dev='hd" +vd_index
+ "' bus='ide'/>" #TODO allows several type of disks
585 text
+= self
.tab() + "<target dev='vd" +vd_index
+ "' bus='virtio'/>"
586 text
+= self
.dec_tab() + '</disk>'
587 vd_index
= chr(ord(vd_index
)+1)
588 elif dev
['type']=='xml':
589 dev_text
= dev
['xml']
591 dev_text
= dev_text
.replace('__vpci__', dev
['vpci'])
592 if 'source file' in dev
:
593 dev_text
= dev_text
.replace('__file__', dev
['source file'])
594 if 'file format' in dev
:
595 dev_text
= dev_text
.replace('__format__', dev
['source file'])
596 if '__dev__' in dev_text
:
597 dev_text
= dev_text
.replace('__dev__', vd_index
)
598 vd_index
= chr(ord(vd_index
)+1)
601 return -1, 'Unknown device type ' + dev
['type']
604 bridge_interfaces
= server
.get('networks', [])
605 for v
in bridge_interfaces
:
607 self
.db_lock
.acquire()
608 result
, content
= self
.db
.get_table(FROM
='nets', SELECT
=('provider',),WHERE
={'uuid':v
['net_id']} )
609 self
.db_lock
.release()
611 self
.logger
.error("create_xml_server ERROR %d getting nets %s", result
, content
)
613 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
614 #I know it is not secure
615 #for v in sorted(desc['network interfaces'].itervalues()):
616 model
= v
.get("model", None)
617 if content
[0]['provider']=='default':
618 text
+= self
.tab() + "<interface type='network'>" + \
619 self
.inc_tab() + "<source network='" +content
[0]['provider']+ "'/>"
620 elif content
[0]['provider'][0:7]=='macvtap':
621 text
+= self
.tab()+"<interface type='direct'>" + \
622 self
.inc_tab() + "<source dev='" + self
.get_local_iface_name(content
[0]['provider'][8:]) + "' mode='bridge'/>" + \
623 self
.tab() + "<target dev='macvtap0'/>"
625 text
+= self
.tab() + "<alias name='net" + str(net_nb
) + "'/>"
628 elif content
[0]['provider'][0:6]=='bridge':
629 text
+= self
.tab() + "<interface type='bridge'>" + \
630 self
.inc_tab()+"<source bridge='" +self
.get_local_iface_name(content
[0]['provider'][7:])+ "'/>"
632 text
+= self
.tab() + "<target dev='vnet" + str(net_nb
)+ "'/>" +\
633 self
.tab() + "<alias name='net" + str(net_nb
)+ "'/>"
636 elif content
[0]['provider'][0:3] == "OVS":
637 vlan
= content
[0]['provider'].replace('OVS:', '')
638 text
+= self
.tab() + "<interface type='bridge'>" + \
639 self
.inc_tab() + "<source bridge='ovim-" + str(vlan
) + "'/>"
641 return -1, 'Unknown Bridge net provider ' + content
[0]['provider']
643 text
+= self
.tab() + "<model type='" +model
+ "'/>"
644 if v
.get('mac_address', None) != None:
645 text
+= self
.tab() +"<mac address='" +v
['mac_address']+ "'/>"
646 text
+= self
.pci2xml(v
.get('vpci',None))
647 text
+= self
.dec_tab()+'</interface>'
651 interfaces
= numa
.get('interfaces', [])
655 if self
.develop_mode
: #map these interfaces to bridges
656 text
+= self
.tab() + "<interface type='bridge'>" + \
657 self
.inc_tab()+"<source bridge='" +self
.develop_bridge_iface
+ "'/>"
659 text
+= self
.tab() + "<target dev='vnet" + str(net_nb
)+ "'/>" +\
660 self
.tab() + "<alias name='net" + str(net_nb
)+ "'/>"
662 text
+= self
.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
663 if v
.get('mac_address', None) != None:
664 text
+= self
.tab() +"<mac address='" +v
['mac_address']+ "'/>"
665 text
+= self
.pci2xml(v
.get('vpci',None))
666 text
+= self
.dec_tab()+'</interface>'
669 if v
['dedicated'] == 'yes': #passthrought
670 text
+= self
.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
671 self
.inc_tab() + "<source>"
673 text
+= self
.pci2xml(v
['source'])
674 text
+= self
.dec_tab()+'</source>'
675 text
+= self
.pci2xml(v
.get('vpci',None))
677 text
+= self
.tab() + "<alias name='hostdev" + str(net_nb
) + "'/>"
678 text
+= self
.dec_tab()+'</hostdev>'
680 else: #sriov_interfaces
681 #skip not connected interfaces
682 if v
.get("net_id") == None:
684 text
+= self
.tab() + "<interface type='hostdev' managed='yes'>"
686 if v
.get('mac_address', None) != None:
687 text
+= self
.tab() + "<mac address='" +v
['mac_address']+ "'/>"
688 text
+= self
.tab()+'<source>'
690 text
+= self
.pci2xml(v
['source'])
691 text
+= self
.dec_tab()+'</source>'
692 if v
.get('vlan',None) != None:
693 text
+= self
.tab() + "<vlan> <tag id='" + str(v
['vlan']) + "'/> </vlan>"
694 text
+= self
.pci2xml(v
.get('vpci',None))
696 text
+= self
.tab() + "<alias name='hostdev" + str(net_nb
) + "'/>"
697 text
+= self
.dec_tab()+'</interface>'
700 text
+= self
.dec_tab()+'</devices>'+\
701 self
.dec_tab()+'</domain>'
704 def pci2xml(self
, pci
):
705 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
706 alows an empty pci text'''
709 first_part
= pci
.split(':')
710 second_part
= first_part
[2].split('.')
711 return self
.tab() + "<address type='pci' domain='0x" + first_part
[0] + \
712 "' bus='0x" + first_part
[1] + "' slot='0x" + second_part
[0] + \
713 "' function='0x" + second_part
[1] + "'/>"
716 """Return indentation according to xml_level"""
717 return "\n" + (' '*self
.xml_level
)
720 """Increment and return indentation according to xml_level"""
725 """Decrement and return indentation according to xml_level"""
729 def create_ovs_bridge(self
):
731 Create a bridge in compute OVS to allocate VMs
732 :return: True if success
737 command
= 'sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true'
738 self
.logger
.debug("command: " + command
)
739 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
740 content
= stdout
.read()
741 if len(content
) == 0:
745 except paramiko
.ssh_exception
.SSHException
as e
:
746 self
.logger
.error("create_ovs_bridge ssh Exception: " + str(e
))
747 if "SSH session not active" in str(e
):
751 def delete_port_to_ovs_bridge(self
, vlan
, net_uuid
):
753 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
754 :param vlan: vlan port id
755 :param net_uuid: network id
762 port_name
= 'ovim-' + str(vlan
)
763 command
= 'sudo ovs-vsctl del-port br-int ' + port_name
764 self
.logger
.debug("command: " + command
)
765 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
766 content
= stdout
.read()
767 if len(content
) == 0:
771 except paramiko
.ssh_exception
.SSHException
as e
:
772 self
.logger
.error("delete_port_to_ovs_bridge ssh Exception: " + str(e
))
773 if "SSH session not active" in str(e
):
777 def delete_dhcp_server(self
, vlan
, net_uuid
, dhcp_path
):
779 Delete dhcp server process lining in namespace
780 :param vlan: segmentation id
781 :param net_uuid: network uuid
782 :param dhcp_path: conf fiel path that live in namespace side
787 if not self
.is_dhcp_port_free(vlan
, net_uuid
):
790 net_namespace
= 'ovim-' + str(vlan
)
791 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
792 pid_file
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
794 command
= 'sudo ip netns exec ' + net_namespace
+ ' cat ' + pid_file
795 self
.logger
.debug("command: " + command
)
796 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
797 content
= stdout
.read()
799 command
= 'sudo ip netns exec ' + net_namespace
+ ' kill -9 ' + content
800 self
.logger
.debug("command: " + command
)
801 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
802 content
= stdout
.read()
804 # if len(content) == 0:
808 except paramiko
.ssh_exception
.SSHException
as e
:
809 self
.logger
.error("delete_dhcp_server ssh Exception: " + str(e
))
810 if "SSH session not active" in str(e
):
814 def is_dhcp_port_free(self
, host_id
, net_uuid
):
816 Check if any port attached to the a net in a vxlan mesh across computes nodes
817 :param host_id: host id
818 :param net_uuid: network id
819 :return: True if is not free
821 self
.db_lock
.acquire()
822 result
, content
= self
.db
.get_table(
824 WHERE
={'type': 'instance:ovs', 'net_id': net_uuid
}
826 self
.db_lock
.release()
833 def is_port_free(self
, host_id
, net_uuid
):
835 Check if there not ovs ports of a network in a compute host.
836 :param host_id: host id
837 :param net_uuid: network id
838 :return: True if is not free
841 self
.db_lock
.acquire()
842 result
, content
= self
.db
.get_table(
843 FROM
='ports as p join instances as i on p.instance_id=i.uuid',
844 WHERE
={"i.host_id": self
.host_id
, 'p.type': 'instance:ovs', 'p.net_id': net_uuid
}
846 self
.db_lock
.release()
853 def add_port_to_ovs_bridge(self
, vlan
):
855 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
856 :param vlan: vlan port id
857 :return: True if success
863 port_name
= 'ovim-' + str(vlan
)
864 command
= 'sudo ovs-vsctl add-port br-int ' + port_name
+ ' tag=' + str(vlan
)
865 self
.logger
.debug("command: " + command
)
866 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
867 content
= stdout
.read()
868 if len(content
) == 0:
872 except paramiko
.ssh_exception
.SSHException
as e
:
873 self
.logger
.error("add_port_to_ovs_bridge ssh Exception: " + str(e
))
874 if "SSH session not active" in str(e
):
878 def delete_dhcp_port(self
, vlan
, net_uuid
):
880 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
881 :param vlan: segmentation id
882 :param net_uuid: network id
883 :return: True if success
889 if not self
.is_dhcp_port_free(vlan
, net_uuid
):
891 self
.delete_dhcp_interfaces(vlan
)
894 def delete_bridge_port_attached_to_ovs(self
, vlan
, net_uuid
):
896 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
899 :return: True if success
904 if not self
.is_port_free(vlan
, net_uuid
):
906 self
.delete_port_to_ovs_bridge(vlan
, net_uuid
)
907 self
.delete_linux_bridge(vlan
)
910 def delete_linux_bridge(self
, vlan
):
912 Delete a linux bridge in a scpecific compute.
913 :param vlan: vlan port id
914 :return: True if success
920 port_name
= 'ovim-' + str(vlan
)
921 command
= 'sudo ip link set dev veth0-' + str(vlan
) + ' down'
922 self
.logger
.debug("command: " + command
)
923 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
924 # content = stdout.read()
926 # if len(content) != 0:
928 command
= 'sudo ifconfig ' + port_name
+ ' down && sudo brctl delbr ' + port_name
929 self
.logger
.debug("command: " + command
)
930 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
931 content
= stdout
.read()
932 if len(content
) == 0:
936 except paramiko
.ssh_exception
.SSHException
as e
:
937 self
.logger
.error("delete_linux_bridge ssh Exception: " + str(e
))
938 if "SSH session not active" in str(e
):
942 def create_ovs_bridge_port(self
, vlan
):
944 Generate a linux bridge and attache the port to a OVS bridge
945 :param vlan: vlan port id
950 self
.create_linux_bridge(vlan
)
951 self
.add_port_to_ovs_bridge(vlan
)
953 def create_linux_bridge(self
, vlan
):
955 Create a linux bridge with STP active
956 :param vlan: netowrk vlan id
963 port_name
= 'ovim-' + str(vlan
)
964 command
= 'sudo brctl show | grep ' + port_name
965 self
.logger
.debug("command: " + command
)
966 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
967 content
= stdout
.read()
969 # if exist nothing to create
970 # if len(content) == 0:
973 command
= 'sudo brctl addbr ' + port_name
974 self
.logger
.debug("command: " + command
)
975 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
976 content
= stdout
.read()
978 # if len(content) == 0:
983 command
= 'sudo brctl stp ' + port_name
+ ' on'
984 self
.logger
.debug("command: " + command
)
985 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
986 content
= stdout
.read()
988 # if len(content) == 0:
992 command
= 'sudo ip link set dev ' + port_name
+ ' up'
993 self
.logger
.debug("command: " + command
)
994 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
995 content
= stdout
.read()
997 if len(content
) == 0:
1001 except paramiko
.ssh_exception
.SSHException
as e
:
1002 self
.logger
.error("create_linux_bridge ssh Exception: " + str(e
))
1003 if "SSH session not active" in str(e
):
1007 def set_mac_dhcp_server(self
, ip
, mac
, vlan
, netmask
, dhcp_path
):
1009 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
1010 :param ip: IP address asigned to a VM
1011 :param mac: VM vnic mac to be macthed with the IP received
1012 :param vlan: Segmentation id
1013 :param netmask: netmask value
1014 :param path: dhcp conf file path that live in namespace side
1015 :return: True if success
1021 net_namespace
= 'ovim-' + str(vlan
)
1022 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
1023 dhcp_hostsdir
= os
.path
.join(dhcp_path
, net_namespace
)
1028 ip_data
= mac
.upper() + ',' + ip
1030 command
= 'sudo ip netns exec ' + net_namespace
+ ' touch ' + dhcp_hostsdir
1031 self
.logger
.debug("command: " + command
)
1032 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1033 content
= stdout
.read()
1035 command
= 'sudo ip netns exec ' + net_namespace
+ ' sudo bash -ec "echo ' + ip_data
+ ' >> ' + dhcp_hostsdir
+ '"'
1037 self
.logger
.debug("command: " + command
)
1038 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1039 content
= stdout
.read()
1041 if len(content
) == 0:
1045 except paramiko
.ssh_exception
.SSHException
as e
:
1046 self
.logger
.error("set_mac_dhcp_server ssh Exception: " + str(e
))
1047 if "SSH session not active" in str(e
):
1051 def delete_mac_dhcp_server(self
, ip
, mac
, vlan
, dhcp_path
):
1053 Delete into dhcp conf file the ip assigned to a specific MAC address
1055 :param ip: IP address asigned to a VM
1056 :param mac: VM vnic mac to be macthed with the IP received
1057 :param vlan: Segmentation id
1058 :param dhcp_path: dhcp conf file path that live in namespace side
1065 net_namespace
= 'ovim-' + str(vlan
)
1066 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
1067 dhcp_hostsdir
= os
.path
.join(dhcp_path
, net_namespace
)
1072 ip_data
= mac
.upper() + ',' + ip
1074 command
= 'sudo ip netns exec ' + net_namespace
+ ' sudo sed -i \'/' + ip_data
+ '/d\' ' + dhcp_hostsdir
1075 self
.logger
.debug("command: " + command
)
1076 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1077 content
= stdout
.read()
1079 if len(content
) == 0:
1084 except paramiko
.ssh_exception
.SSHException
as e
:
1085 self
.logger
.error("set_mac_dhcp_server ssh Exception: " + str(e
))
1086 if "SSH session not active" in str(e
):
1090 def launch_dhcp_server(self
, vlan
, ip_range
, netmask
, dhcp_path
, gateway
):
1092 Generate a linux bridge and attache the port to a OVS bridge
1094 :param vlan: Segmentation id
1095 :param ip_range: IP dhcp range
1096 :param netmask: network netmask
1097 :param dhcp_path: dhcp conf file path that live in namespace side
1098 :param gateway: Gateway address for dhcp net
1099 :return: True if success
1105 interface
= 'tap-' + str(vlan
)
1106 net_namespace
= 'ovim-' + str(vlan
)
1107 dhcp_path
= os
.path
.join(dhcp_path
, net_namespace
)
1108 leases_path
= os
.path
.join(dhcp_path
, "dnsmasq.leases")
1109 pid_file
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
1111 dhcp_range
= ip_range
[0] + ',' + ip_range
[1] + ',' + netmask
1113 command
= 'sudo ip netns exec ' + net_namespace
+ ' mkdir -p ' + dhcp_path
1114 self
.logger
.debug("command: " + command
)
1115 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1116 content
= stdout
.read()
1118 pid_path
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
1119 command
= 'sudo ip netns exec ' + net_namespace
+ ' cat ' + pid_path
1120 self
.logger
.debug("command: " + command
)
1121 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1122 content
= stdout
.read()
1123 # check if pid is runing
1124 pid_status_path
= content
1126 command
= "ps aux | awk '{print $2 }' | grep " + pid_status_path
1127 self
.logger
.debug("command: " + command
)
1128 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1129 content
= stdout
.read()
1131 command
= 'sudo ip netns exec ' + net_namespace
+ ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1132 '--interface=' + interface
+ ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path
+ \
1133 ' --dhcp-range ' + dhcp_range
+ ' --pid-file=' + pid_file
+ ' --dhcp-leasefile=' + leases_path
+ \
1134 ' --listen-address ' + gateway
1136 self
.logger
.debug("command: " + command
)
1137 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1138 content
= stdout
.readline()
1140 if len(content
) == 0:
1144 except paramiko
.ssh_exception
.SSHException
as e
:
1145 self
.logger
.error("launch_dhcp_server ssh Exception: " + str(e
))
1146 if "SSH session not active" in str(e
):
1150 def delete_dhcp_interfaces(self
, vlan
):
1152 Create a linux bridge with STP active
1153 :param vlan: netowrk vlan id
1160 net_namespace
= 'ovim-' + str(vlan
)
1161 command
= 'sudo ovs-vsctl del-port br-int ovs-tap-' + str(vlan
)
1162 self
.logger
.debug("command: " + command
)
1163 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1164 content
= stdout
.read()
1166 command
= 'sudo ip netns exec ' + net_namespace
+ ' ip link set dev tap-' + str(vlan
) + ' down'
1167 self
.logger
.debug("command: " + command
)
1168 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1169 content
= stdout
.read()
1171 command
= 'sudo ip link set dev ovs-tap-' + str(vlan
) + ' down'
1172 self
.logger
.debug("command: " + command
)
1173 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1174 content
= stdout
.read()
1175 except paramiko
.ssh_exception
.SSHException
as e
:
1176 self
.logger
.error("delete_dhcp_interfaces ssh Exception: " + str(e
))
1177 if "SSH session not active" in str(e
):
1181 def create_dhcp_interfaces(self
, vlan
, ip_listen_address
, netmask
):
1183 Create a linux bridge with STP active
1184 :param vlan: segmentation id
1185 :param ip_listen_address: Listen Ip address for the dhcp service, the tap interface living in namesapce side
1186 :param netmask: dhcp net CIDR
1187 :return: True if success
1193 net_namespace
= 'ovim-' + str(vlan
)
1194 namespace_interface
= 'tap-' + str(vlan
)
1196 command
= 'sudo ip netns add ' + net_namespace
1197 self
.logger
.debug("command: " + command
)
1198 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1199 content
= stdout
.read()
1201 command
= 'sudo ip link add tap-' + str(vlan
) + ' type veth peer name ovs-tap-' + str(vlan
)
1202 self
.logger
.debug("command: " + command
)
1203 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1204 content
= stdout
.read()
1206 command
= 'sudo ovs-vsctl add-port br-int ovs-tap-' + str(vlan
) + ' tag=' + str(vlan
)
1207 self
.logger
.debug("command: " + command
)
1208 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1209 content
= stdout
.read()
1211 command
= 'sudo ip link set tap-' + str(vlan
) + ' netns ' + net_namespace
1212 self
.logger
.debug("command: " + command
)
1213 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1214 content
= stdout
.read()
1216 command
= 'sudo ip netns exec ' + net_namespace
+ ' ip link set dev tap-' + str(vlan
) + ' up'
1217 self
.logger
.debug("command: " + command
)
1218 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1219 content
= stdout
.read()
1221 command
= 'sudo ip link set dev ovs-tap-' + str(vlan
) + ' up'
1222 self
.logger
.debug("command: " + command
)
1223 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1224 content
= stdout
.read()
1226 command
= 'sudo ip netns exec ' + net_namespace
+ ' ip link set dev lo up'
1227 self
.logger
.debug("command: " + command
)
1228 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1229 content
= stdout
.read()
1231 command
= 'sudo ip netns exec ' + net_namespace
+ ' ' + ' ifconfig ' + namespace_interface \
1232 + ' ' + ip_listen_address
+ ' netmask ' + netmask
1233 self
.logger
.debug("command: " + command
)
1234 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1235 content
= stdout
.read()
1237 if len(content
) == 0:
1241 except paramiko
.ssh_exception
.SSHException
as e
:
1242 self
.logger
.error("create_dhcp_interfaces ssh Exception: " + str(e
))
1243 if "SSH session not active" in str(e
):
1248 def create_ovs_vxlan_tunnel(self
, vxlan_interface
, remote_ip
):
1250 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1251 :param vxlan_interface: vlxan inteface name.
1252 :param remote_ip: tunnel endpoint remote compute ip.
1258 command
= 'sudo ovs-vsctl add-port br-int ' + vxlan_interface
+ \
1259 ' -- set Interface ' + vxlan_interface
+ ' type=vxlan options:remote_ip=' + remote_ip
+ \
1260 ' -- set Port ' + vxlan_interface
+ ' other_config:stp-path-cost=10'
1261 self
.logger
.debug("command: " + command
)
1262 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1263 content
= stdout
.read()
1265 if len(content
) == 0:
1269 except paramiko
.ssh_exception
.SSHException
as e
:
1270 self
.logger
.error("create_ovs_vxlan_tunnel ssh Exception: " + str(e
))
1271 if "SSH session not active" in str(e
):
1275 def delete_ovs_vxlan_tunnel(self
, vxlan_interface
):
1277 Delete a vlxan tunnel port from a OVS brdige.
1278 :param vxlan_interface: vlxan name to be delete it.
1279 :return: True if success.
1284 command
= 'sudo ovs-vsctl del-port br-int ' + vxlan_interface
1285 self
.logger
.debug("command: " + command
)
1286 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1287 content
= stdout
.read()
1289 if len(content
) == 0:
1293 except paramiko
.ssh_exception
.SSHException
as e
:
1294 self
.logger
.error("delete_ovs_vxlan_tunnel ssh Exception: " + str(e
))
1295 if "SSH session not active" in str(e
):
1299 def delete_ovs_bridge(self
):
1301 Delete a OVS bridge from a compute.
1302 :return: True if success
1307 command
= 'sudo ovs-vsctl del-br br-int'
1308 self
.logger
.debug("command: " + command
)
1309 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1310 content
= stdout
.read()
1311 if len(content
) == 0:
1315 except paramiko
.ssh_exception
.SSHException
as e
:
1316 self
.logger
.error("delete_ovs_bridge ssh Exception: " + str(e
))
1317 if "SSH session not active" in str(e
):
1321 def get_file_info(self
, path
):
1322 command
= 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1323 self
.logger
.debug("command: " + command
)
1324 (_
, stdout
, _
) = self
.ssh_conn
.exec_command(command
)
1325 content
= stdout
.read()
1326 if len(content
) == 0:
1327 return None # file does not exist
1329 return content
.split(" ") # (permission, 1, owner, group, size, date, file)
1331 def qemu_get_info(self
, path
):
1332 command
= 'qemu-img info ' + path
1333 self
.logger
.debug("command: " + command
)
1334 (_
, stdout
, stderr
) = self
.ssh_conn
.exec_command(command
)
1335 content
= stdout
.read()
1336 if len(content
) == 0:
1337 error
= stderr
.read()
1338 self
.logger
.error("get_qemu_info error " + error
)
1339 raise paramiko
.ssh_exception
.SSHException("Error getting qemu_info: " + error
)
1342 return yaml
.load(content
)
1343 except yaml
.YAMLError
as exc
:
1345 if hasattr(exc
, 'problem_mark'):
1346 mark
= exc
.problem_mark
1347 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
1348 self
.logger
.error("get_qemu_info yaml format Exception " + text
)
1349 raise paramiko
.ssh_exception
.SSHException("Error getting qemu_info yaml format" + text
)
1351 def qemu_change_backing(self
, inc_file
, new_backing_file
):
1352 command
= 'qemu-img rebase -u -b ' + new_backing_file
+ ' ' + inc_file
1353 self
.logger
.debug("command: " + command
)
1354 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1355 content
= stderr
.read()
1356 if len(content
) == 0:
1359 self
.logger
.error("qemu_change_backing error: " + content
)
1362 def get_notused_filename(self
, proposed_name
, suffix
=''):
1363 '''Look for a non existing file_name in the host
1364 proposed_name: proposed file name, includes path
1365 suffix: suffix to be added to the name, before the extention
1367 extension
= proposed_name
.rfind(".")
1368 slash
= proposed_name
.rfind("/")
1369 if extension
< 0 or extension
< slash
: # no extension
1370 extension
= len(proposed_name
)
1371 target_name
= proposed_name
[:extension
] + suffix
+ proposed_name
[extension
:]
1372 info
= self
.get_file_info(target_name
)
1377 while info
is not None:
1378 target_name
= proposed_name
[:extension
] + suffix
+ "-" + str(index
) + proposed_name
[extension
:]
1380 info
= self
.get_file_info(target_name
)
1383 def get_notused_path(self
, proposed_path
, suffix
=''):
1384 '''Look for a non existing path at database for images
1385 proposed_path: proposed file name, includes path
1386 suffix: suffix to be added to the name, before the extention
1388 extension
= proposed_path
.rfind(".")
1390 extension
= len(proposed_path
)
1392 target_path
= proposed_path
[:extension
] + suffix
+ proposed_path
[extension
:]
1395 r
,_
=self
.db
.get_table(FROM
="images",WHERE
={"path":target_path
})
1398 target_path
= proposed_path
[:extension
] + suffix
+ "-" + str(index
) + proposed_path
[extension
:]
1402 def delete_file(self
, file_name
):
1403 command
= 'rm -f '+file_name
1404 self
.logger
.debug("command: " + command
)
1405 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1406 error_msg
= stderr
.read()
1407 if len(error_msg
) > 0:
1408 raise paramiko
.ssh_exception
.SSHException("Error deleting file: " + error_msg
)
1410 def copy_file(self
, source
, destination
, perserve_time
=True):
1411 if source
[0:4]=="http":
1412 command
= "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1413 dst
=destination
, src
=source
, dst_result
=destination
+ ".result" )
1415 command
= 'cp --no-preserve=mode'
1417 command
+= ' --preserve=timestamps'
1418 command
+= " '{}' '{}'".format(source
, destination
)
1419 self
.logger
.debug("command: " + command
)
1420 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1421 error_msg
= stderr
.read()
1422 if len(error_msg
) > 0:
1423 raise paramiko
.ssh_exception
.SSHException("Error copying image to local host: " + error_msg
)
1425 def copy_remote_file(self
, remote_file
, use_incremental
):
1426 ''' Copy a file from the repository to local folder and recursively
1427 copy the backing files in case the remote file is incremental
1428 Read and/or modified self.localinfo['files'] that contain the
1429 unmodified copies of images in the local path
1431 remote_file: path of remote file
1432 use_incremental: None (leave the decision to this function), True, False
1434 local_file: name of local file
1435 qemu_info: dict with quemu information of local file
1436 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1439 use_incremental_out
= use_incremental
1440 new_backing_file
= None
1442 file_from_local
= True
1444 #in case incremental use is not decided, take the decision depending on the image
1445 #avoid the use of incremental if this image is already incremental
1446 if remote_file
[0:4] == "http":
1447 file_from_local
= False
1449 qemu_remote_info
= self
.qemu_get_info(remote_file
)
1450 if use_incremental_out
==None:
1451 use_incremental_out
= not ( file_from_local
and 'backing file' in qemu_remote_info
)
1452 #copy recursivelly the backing files
1453 if file_from_local
and 'backing file' in qemu_remote_info
:
1454 new_backing_file
, _
, _
= self
.copy_remote_file(qemu_remote_info
['backing file'], True)
1456 #check if remote file is present locally
1457 if use_incremental_out
and remote_file
in self
.localinfo
['files']:
1458 local_file
= self
.localinfo
['files'][remote_file
]
1459 local_file_info
= self
.get_file_info(local_file
)
1461 remote_file_info
= self
.get_file_info(remote_file
)
1462 if local_file_info
== None:
1464 elif file_from_local
and (local_file_info
[4]!=remote_file_info
[4] or local_file_info
[5]!=remote_file_info
[5]):
1465 #local copy of file not valid because date or size are different.
1466 #TODO DELETE local file if this file is not used by any active virtual machine
1468 self
.delete_file(local_file
)
1469 del self
.localinfo
['files'][remote_file
]
1473 else: #check that the local file has the same backing file, or there are not backing at all
1474 qemu_info
= self
.qemu_get_info(local_file
)
1475 if new_backing_file
!= qemu_info
.get('backing file'):
1479 if local_file
== None: #copy the file
1480 img_name
= remote_file
.split('/') [-1]
1481 img_local
= self
.image_path
+ '/' + img_name
1482 local_file
= self
.get_notused_filename(img_local
)
1483 self
.copy_file(remote_file
, local_file
, use_incremental_out
)
1485 if use_incremental_out
:
1486 self
.localinfo
['files'][remote_file
] = local_file
1487 if new_backing_file
:
1488 self
.qemu_change_backing(local_file
, new_backing_file
)
1489 qemu_info
= self
.qemu_get_info(local_file
)
1491 return local_file
, qemu_info
, use_incremental_out
1493 def launch_server(self
, conn
, server
, rebuild
=False, domain
=None):
1495 time
.sleep(random
.randint(20,150)) #sleep random timeto be make it a bit more real
1498 server_id
= server
['uuid']
1499 paused
= server
.get('paused','no')
1501 if domain
!=None and rebuild
==False:
1503 #self.server_status[server_id] = 'ACTIVE'
1506 self
.db_lock
.acquire()
1507 result
, server_data
= self
.db
.get_instance(server_id
)
1508 self
.db_lock
.release()
1510 self
.logger
.error("launch_server ERROR getting server from DB %d %s", result
, server_data
)
1511 return result
, server_data
1513 #0: get image metadata
1514 server_metadata
= server
.get('metadata', {})
1515 use_incremental
= None
1517 if "use_incremental" in server_metadata
:
1518 use_incremental
= False if server_metadata
["use_incremental"] == "no" else True
1520 server_host_files
= self
.localinfo
['server_files'].get( server
['uuid'], {})
1522 #delete previous incremental files
1523 for file_
in server_host_files
.values():
1524 self
.delete_file(file_
['source file'] )
1525 server_host_files
={}
1527 #1: obtain aditional devices (disks)
1528 #Put as first device the main disk
1529 devices
= [ {"type":"disk", "image_id":server
['image_id'], "vpci":server_metadata
.get('vpci', None) } ]
1530 if 'extended' in server_data
and server_data
['extended']!=None and "devices" in server_data
['extended']:
1531 devices
+= server_data
['extended']['devices']
1534 if dev
['image_id'] == None:
1537 self
.db_lock
.acquire()
1538 result
, content
= self
.db
.get_table(FROM
='images', SELECT
=('path', 'metadata'),
1539 WHERE
={'uuid': dev
['image_id']})
1540 self
.db_lock
.release()
1542 error_text
= "ERROR", result
, content
, "when getting image", dev
['image_id']
1543 self
.logger
.error("launch_server " + error_text
)
1544 return -1, error_text
1545 if content
[0]['metadata'] is not None:
1546 dev
['metadata'] = json
.loads(content
[0]['metadata'])
1548 dev
['metadata'] = {}
1550 if dev
['image_id'] in server_host_files
:
1551 dev
['source file'] = server_host_files
[ dev
['image_id'] ] ['source file'] #local path
1552 dev
['file format'] = server_host_files
[ dev
['image_id'] ] ['file format'] # raw or qcow2
1555 #2: copy image to host
1556 remote_file
= content
[0]['path']
1557 use_incremental_image
= use_incremental
1558 if dev
['metadata'].get("use_incremental") == "no":
1559 use_incremental_image
= False
1560 local_file
, qemu_info
, use_incremental_image
= self
.copy_remote_file(remote_file
, use_incremental_image
)
1562 #create incremental image
1563 if use_incremental_image
:
1564 local_file_inc
= self
.get_notused_filename(local_file
, '.inc')
1565 command
= 'qemu-img create -f qcow2 '+local_file_inc
+ ' -o backing_file='+ local_file
1566 self
.logger
.debug("command: " + command
)
1567 (_
, _
, stderr
) = self
.ssh_conn
.exec_command(command
)
1568 error_msg
= stderr
.read()
1569 if len(error_msg
) > 0:
1570 raise paramiko
.ssh_exception
.SSHException("Error creating incremental file: " + error_msg
)
1571 local_file
= local_file_inc
1572 qemu_info
= {'file format':'qcow2'}
1574 server_host_files
[ dev
['image_id'] ] = {'source file': local_file
, 'file format': qemu_info
['file format']}
1576 dev
['source file'] = local_file
1577 dev
['file format'] = qemu_info
['file format']
1579 self
.localinfo
['server_files'][ server
['uuid'] ] = server_host_files
1580 self
.localinfo_dirty
= True
1583 result
, xml
= self
.create_xml_server(server_data
, devices
, server_metadata
) #local_file
1585 self
.logger
.error("create xml server error: " + xml
)
1587 self
.logger
.debug("create xml: " + xml
)
1588 atribute
= host_thread
.lvirt_module
.VIR_DOMAIN_START_PAUSED
if paused
== "yes" else 0
1590 if not rebuild
: #ensures that any pending destroying server is done
1591 self
.server_forceoff(True)
1592 #self.logger.debug("launching instance " + xml)
1593 conn
.createXML(xml
, atribute
)
1594 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
1598 except paramiko
.ssh_exception
.SSHException
as e
:
1600 self
.logger
.error("launch_server id='%s' ssh Exception: %s", server_id
, text
)
1601 if "SSH session not active" in text
:
1603 except host_thread
.lvirt_module
.libvirtError
as e
:
1604 text
= e
.get_error_message()
1605 self
.logger
.error("launch_server id='%s' libvirt Exception: %s", server_id
, text
)
1606 except Exception as e
:
1608 self
.logger
.error("launch_server id='%s' Exception: %s", server_id
, text
)
1611 def update_servers_status(self
):
1613 # VIR_DOMAIN_NOSTATE = 0
1614 # VIR_DOMAIN_RUNNING = 1
1615 # VIR_DOMAIN_BLOCKED = 2
1616 # VIR_DOMAIN_PAUSED = 3
1617 # VIR_DOMAIN_SHUTDOWN = 4
1618 # VIR_DOMAIN_SHUTOFF = 5
1619 # VIR_DOMAIN_CRASHED = 6
1620 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
1622 if self
.test
or len(self
.server_status
)==0:
1626 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
1627 domains
= conn
.listAllDomains()
1629 for domain
in domains
:
1630 uuid
= domain
.UUIDString() ;
1631 libvirt_status
= domain
.state()
1632 #print libvirt_status
1633 if libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_RUNNING
or libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_SHUTDOWN
:
1634 new_status
= "ACTIVE"
1635 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_PAUSED
:
1636 new_status
= "PAUSED"
1637 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_SHUTOFF
:
1638 new_status
= "INACTIVE"
1639 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_CRASHED
:
1640 new_status
= "ERROR"
1643 domain_dict
[uuid
] = new_status
1645 except host_thread
.lvirt_module
.libvirtError
as e
:
1646 self
.logger
.error("get_state() Exception " + e
.get_error_message())
1649 for server_id
, current_status
in self
.server_status
.iteritems():
1651 if server_id
in domain_dict
:
1652 new_status
= domain_dict
[server_id
]
1654 new_status
= "INACTIVE"
1656 if new_status
== None or new_status
== current_status
:
1658 if new_status
== 'INACTIVE' and current_status
== 'ERROR':
1659 continue #keep ERROR status, because obviously this machine is not running
1661 self
.logger
.debug("server id='%s' status change from '%s' to '%s'", server_id
, current_status
, new_status
)
1662 STATUS
={'progress':100, 'status':new_status
}
1663 if new_status
== 'ERROR':
1664 STATUS
['last_error'] = 'machine has crashed'
1665 self
.db_lock
.acquire()
1666 r
,_
= self
.db
.update_rows('instances', STATUS
, {'uuid':server_id
}, log
=False)
1667 self
.db_lock
.release()
1669 self
.server_status
[server_id
] = new_status
1671 def action_on_server(self
, req
, last_retry
=True):
1672 '''Perform an action on a req
1674 req: dictionary that contain:
1675 server properties: 'uuid','name','tenant_id','status'
1677 host properties: 'user', 'ip_name'
1678 return (error, text)
1679 0: No error. VM is updated to new state,
1680 -1: Invalid action, as trying to pause a PAUSED VM
1681 -2: Error accessing host
1683 -4: Error at DB access
1684 -5: Error while trying to perform action. VM is updated to ERROR
1686 server_id
= req
['uuid']
1689 old_status
= req
['status']
1693 if 'terminate' in req
['action']:
1694 new_status
= 'deleted'
1695 elif 'shutoff' in req
['action'] or 'shutdown' in req
['action'] or 'forceOff' in req
['action']:
1696 if req
['status']!='ERROR':
1698 new_status
= 'INACTIVE'
1699 elif 'start' in req
['action'] and req
['status']!='ERROR':
1700 new_status
= 'ACTIVE'
1701 elif 'resume' in req
['action'] and req
['status']!='ERROR' and req
['status']!='INACTIVE':
1702 new_status
= 'ACTIVE'
1703 elif 'pause' in req
['action'] and req
['status']!='ERROR':
1704 new_status
= 'PAUSED'
1705 elif 'reboot' in req
['action'] and req
['status']!='ERROR':
1706 new_status
= 'ACTIVE'
1707 elif 'rebuild' in req
['action']:
1708 time
.sleep(random
.randint(20,150))
1709 new_status
= 'ACTIVE'
1710 elif 'createImage' in req
['action']:
1712 self
.create_image(None, req
)
1715 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
1717 dom
= conn
.lookupByUUIDString(server_id
)
1718 except host_thread
.lvirt_module
.libvirtError
as e
:
1719 text
= e
.get_error_message()
1720 if 'LookupByUUIDString' in text
or 'Domain not found' in text
or 'No existe un dominio coincidente' in text
:
1723 self
.logger
.error("action_on_server id='%s' libvirt exception: %s", server_id
, text
)
1726 if 'forceOff' in req
['action']:
1728 self
.logger
.debug("action_on_server id='%s' domain not running", server_id
)
1731 self
.logger
.debug("sending DESTROY to server id='%s'", server_id
)
1733 except Exception as e
:
1734 if "domain is not running" not in e
.get_error_message():
1735 self
.logger
.error("action_on_server id='%s' Exception while sending force off: %s",
1736 server_id
, e
.get_error_message())
1737 last_error
= 'action_on_server Exception while destroy: ' + e
.get_error_message()
1738 new_status
= 'ERROR'
1740 elif 'terminate' in req
['action']:
1742 self
.logger
.debug("action_on_server id='%s' domain not running", server_id
)
1743 new_status
= 'deleted'
1746 if req
['action']['terminate'] == 'force':
1747 self
.logger
.debug("sending DESTROY to server id='%s'", server_id
)
1749 new_status
= 'deleted'
1751 self
.logger
.debug("sending SHUTDOWN to server id='%s'", server_id
)
1753 self
.pending_terminate_server
.append( (time
.time()+10,server_id
) )
1754 except Exception as e
:
1755 self
.logger
.error("action_on_server id='%s' Exception while destroy: %s",
1756 server_id
, e
.get_error_message())
1757 last_error
= 'action_on_server Exception while destroy: ' + e
.get_error_message()
1758 new_status
= 'ERROR'
1759 if "domain is not running" in e
.get_error_message():
1762 new_status
= 'deleted'
1764 self
.logger
.error("action_on_server id='%s' Exception while undefine: %s",
1765 server_id
, e
.get_error_message())
1766 last_error
= 'action_on_server Exception2 while undefine:', e
.get_error_message()
1767 #Exception: 'virDomainDetachDevice() failed'
1768 if new_status
=='deleted':
1769 if server_id
in self
.server_status
:
1770 del self
.server_status
[server_id
]
1771 if req
['uuid'] in self
.localinfo
['server_files']:
1772 for file_
in self
.localinfo
['server_files'][ req
['uuid'] ].values():
1774 self
.delete_file(file_
['source file'])
1777 del self
.localinfo
['server_files'][ req
['uuid'] ]
1778 self
.localinfo_dirty
= True
1780 elif 'shutoff' in req
['action'] or 'shutdown' in req
['action']:
1783 self
.logger
.debug("action_on_server id='%s' domain not running", server_id
)
1786 # new_status = 'INACTIVE'
1787 #TODO: check status for changing at database
1788 except Exception as e
:
1789 new_status
= 'ERROR'
1790 self
.logger
.error("action_on_server id='%s' Exception while shutdown: %s",
1791 server_id
, e
.get_error_message())
1792 last_error
= 'action_on_server Exception while shutdown: ' + e
.get_error_message()
1794 elif 'rebuild' in req
['action']:
1797 r
= self
.launch_server(conn
, req
, True, None)
1799 new_status
= 'ERROR'
1802 new_status
= 'ACTIVE'
1803 elif 'start' in req
['action']:
1804 # The instance is only create in DB but not yet at libvirt domain, needs to be create
1805 rebuild
= True if req
['action']['start'] == 'rebuild' else False
1806 r
= self
.launch_server(conn
, req
, rebuild
, dom
)
1808 new_status
= 'ERROR'
1811 new_status
= 'ACTIVE'
1813 elif 'resume' in req
['action']:
1819 # new_status = 'ACTIVE'
1820 except Exception as e
:
1821 self
.logger
.error("action_on_server id='%s' Exception while resume: %s",
1822 server_id
, e
.get_error_message())
1824 elif 'pause' in req
['action']:
1830 # new_status = 'PAUSED'
1831 except Exception as e
:
1832 self
.logger
.error("action_on_server id='%s' Exception while pause: %s",
1833 server_id
, e
.get_error_message())
1835 elif 'reboot' in req
['action']:
1841 self
.logger
.debug("action_on_server id='%s' reboot:", server_id
)
1842 #new_status = 'ACTIVE'
1843 except Exception as e
:
1844 self
.logger
.error("action_on_server id='%s' Exception while reboot: %s",
1845 server_id
, e
.get_error_message())
1846 elif 'createImage' in req
['action']:
1847 self
.create_image(dom
, req
)
1851 except host_thread
.lvirt_module
.libvirtError
as e
:
1852 if conn
is not None: conn
.close()
1853 text
= e
.get_error_message()
1854 new_status
= "ERROR"
1856 if 'LookupByUUIDString' in text
or 'Domain not found' in text
or 'No existe un dominio coincidente' in text
:
1857 self
.logger
.debug("action_on_server id='%s' Exception removed from host", server_id
)
1859 self
.logger
.error("action_on_server id='%s' Exception %s", server_id
, text
)
1860 #end of if self.test
1861 if new_status
== None:
1864 self
.logger
.debug("action_on_server id='%s' new status=%s %s",server_id
, new_status
, last_error
)
1865 UPDATE
= {'progress':100, 'status':new_status
}
1867 if new_status
=='ERROR':
1868 if not last_retry
: #if there will be another retry do not update database
1870 elif 'terminate' in req
['action']:
1871 #PUT a log in the database
1872 self
.logger
.error("PANIC deleting server id='%s' %s", server_id
, last_error
)
1873 self
.db_lock
.acquire()
1874 self
.db
.new_row('logs',
1875 {'uuid':server_id
, 'tenant_id':req
['tenant_id'], 'related':'instances','level':'panic',
1876 'description':'PANIC deleting server from host '+self
.name
+': '+last_error
}
1878 self
.db_lock
.release()
1879 if server_id
in self
.server_status
:
1880 del self
.server_status
[server_id
]
1883 UPDATE
['last_error'] = last_error
1884 if new_status
!= 'deleted' and (new_status
!= old_status
or new_status
== 'ERROR') :
1885 self
.db_lock
.acquire()
1886 self
.db
.update_rows('instances', UPDATE
, {'uuid':server_id
}, log
=True)
1887 self
.server_status
[server_id
] = new_status
1888 self
.db_lock
.release()
1889 if new_status
== 'ERROR':
1894 def restore_iface(self
, name
, mac
, lib_conn
=None):
1895 ''' make an ifdown, ifup to restore default parameter of na interface
1897 mac: mac address of the interface
1898 lib_conn: connection to the libvirt, if None a new connection is created
1899 Return 0,None if ok, -1,text if fails
1905 self
.logger
.debug("restore_iface '%s' %s", name
, mac
)
1909 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
1913 #wait to the pending VM deletion
1914 #TODO.Revise self.server_forceoff(True)
1916 iface
= conn
.interfaceLookupByMACString(mac
)
1917 if iface
.isActive():
1920 self
.logger
.debug("restore_iface '%s' %s", name
, mac
)
1921 except host_thread
.lvirt_module
.libvirtError
as e
:
1922 error_text
= e
.get_error_message()
1923 self
.logger
.error("restore_iface '%s' '%s' libvirt exception: %s", name
, mac
, error_text
)
1926 if lib_conn
is None and conn
is not None:
1928 return ret
, error_text
1931 def create_image(self
,dom
, req
):
1933 if 'path' in req
['action']['createImage']:
1934 file_dst
= req
['action']['createImage']['path']
1936 createImage
=req
['action']['createImage']
1937 img_name
= createImage
['source']['path']
1938 index
=img_name
.rfind('/')
1939 file_dst
= self
.get_notused_path(img_name
[:index
+1] + createImage
['name'] + '.qcow2')
1940 image_status
='ACTIVE'
1944 server_id
= req
['uuid']
1945 createImage
=req
['action']['createImage']
1946 file_orig
= self
.localinfo
['server_files'][server_id
] [ createImage
['source']['image_id'] ] ['source file']
1947 if 'path' in req
['action']['createImage']:
1948 file_dst
= req
['action']['createImage']['path']
1950 img_name
= createImage
['source']['path']
1951 index
=img_name
.rfind('/')
1952 file_dst
= self
.get_notused_filename(img_name
[:index
+1] + createImage
['name'] + '.qcow2')
1954 self
.copy_file(file_orig
, file_dst
)
1955 qemu_info
= self
.qemu_get_info(file_orig
)
1956 if 'backing file' in qemu_info
:
1957 for k
,v
in self
.localinfo
['files'].items():
1958 if v
==qemu_info
['backing file']:
1959 self
.qemu_change_backing(file_dst
, k
)
1961 image_status
='ACTIVE'
1963 except paramiko
.ssh_exception
.SSHException
as e
:
1964 image_status
='ERROR'
1965 error_text
= e
.args
[0]
1966 self
.logger
.error("create_image id='%s' ssh Exception: %s", server_id
, error_text
)
1967 if "SSH session not active" in error_text
and retry
==0:
1969 except Exception as e
:
1970 image_status
='ERROR'
1972 self
.logger
.error("create_image id='%s' Exception: %s", server_id
, error_text
)
1974 #TODO insert a last_error at database
1975 self
.db_lock
.acquire()
1976 self
.db
.update_rows('images', {'status':image_status
, 'progress': 100, 'path':file_dst
},
1977 {'uuid':req
['new_image']['uuid']}, log
=True)
1978 self
.db_lock
.release()
1980 def edit_iface(self
, port_id
, old_net
, new_net
):
1981 #This action imply remove and insert interface to put proper parameters
1986 self
.db_lock
.acquire()
1987 r
,c
= self
.db
.get_table(FROM
='ports as p join resources_port as rp on p.uuid=rp.port_id',
1988 WHERE
={'port_id': port_id
})
1989 self
.db_lock
.release()
1991 self
.logger
.error("edit_iface %s DDBB error: %s", port_id
, c
)
1994 self
.logger
.error("edit_iface %s port not found", port_id
)
1997 if port
["model"]!="VF":
1998 self
.logger
.error("edit_iface %s ERROR model must be VF", port_id
)
2000 #create xml detach file
2003 xml
.append("<interface type='hostdev' managed='yes'>")
2004 xml
.append(" <mac address='" +port
['mac']+ "'/>")
2005 xml
.append(" <source>"+ self
.pci2xml(port
['pci'])+"\n </source>")
2006 xml
.append('</interface>')
2011 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
2012 dom
= conn
.lookupByUUIDString(port
["instance_id"])
2015 self
.logger
.debug("edit_iface detaching SRIOV interface " + text
)
2016 dom
.detachDeviceFlags(text
, flags
=host_thread
.lvirt_module
.VIR_DOMAIN_AFFECT_LIVE
)
2018 xml
[-1] =" <vlan> <tag id='" + str(port
['vlan']) + "'/> </vlan>"
2020 xml
.append(self
.pci2xml(port
.get('vpci',None)) )
2021 xml
.append('</interface>')
2023 self
.logger
.debug("edit_iface attaching SRIOV interface " + text
)
2024 dom
.attachDeviceFlags(text
, flags
=host_thread
.lvirt_module
.VIR_DOMAIN_AFFECT_LIVE
)
2026 except host_thread
.lvirt_module
.libvirtError
as e
:
2027 text
= e
.get_error_message()
2028 self
.logger
.error("edit_iface %s libvirt exception: %s", port
["instance_id"], text
)
2031 if conn
is not None: conn
.close()
2034 def create_server(server
, db
, db_lock
, only_of_ports
):
2035 extended
= server
.get('extended', None)
2037 requirements
['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
2038 requirements
['ram'] = server
['flavor'].get('ram', 0)
2039 if requirements
['ram']== None:
2040 requirements
['ram'] = 0
2041 requirements
['vcpus'] = server
['flavor'].get('vcpus', 0)
2042 if requirements
['vcpus']== None:
2043 requirements
['vcpus'] = 0
2044 #If extended is not defined get requirements from flavor
2045 if extended
is None:
2046 #If extended is defined in flavor convert to dictionary and use it
2047 if 'extended' in server
['flavor'] and server
['flavor']['extended'] != None:
2048 json_acceptable_string
= server
['flavor']['extended'].replace("'", "\"")
2049 extended
= json
.loads(json_acceptable_string
)
2052 #print json.dumps(extended, indent=4)
2054 #For simplicity only one numa VM are supported in the initial implementation
2055 if extended
!= None:
2056 numas
= extended
.get('numas', [])
2058 return (-2, "Multi-NUMA VMs are not supported yet")
2060 # return (-1, "At least one numa must be specified")
2062 #a for loop is used in order to be ready to multi-NUMA VMs
2066 numa_req
['memory'] = numa
.get('memory', 0)
2068 numa_req
['proc_req_nb'] = numa
['cores'] #number of cores or threads to be reserved
2069 numa_req
['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
2070 numa_req
['proc_req_list'] = numa
.get('cores-id', None) #list of ids to be assigned to the cores or threads
2071 elif 'paired-threads' in numa
:
2072 numa_req
['proc_req_nb'] = numa
['paired-threads']
2073 numa_req
['proc_req_type'] = 'paired-threads'
2074 numa_req
['proc_req_list'] = numa
.get('paired-threads-id', None)
2075 elif 'threads' in numa
:
2076 numa_req
['proc_req_nb'] = numa
['threads']
2077 numa_req
['proc_req_type'] = 'threads'
2078 numa_req
['proc_req_list'] = numa
.get('threads-id', None)
2080 numa_req
['proc_req_nb'] = 0 # by default
2081 numa_req
['proc_req_type'] = 'threads'
2085 #Generate a list of sriov and another for physical interfaces
2086 interfaces
= numa
.get('interfaces', [])
2089 for iface
in interfaces
:
2090 iface
['bandwidth'] = int(iface
['bandwidth'])
2091 if iface
['dedicated'][:3]=='yes':
2092 port_list
.append(iface
)
2094 sriov_list
.append(iface
)
2096 #Save lists ordered from more restrictive to less bw requirements
2097 numa_req
['sriov_list'] = sorted(sriov_list
, key
=lambda k
: k
['bandwidth'], reverse
=True)
2098 numa_req
['port_list'] = sorted(port_list
, key
=lambda k
: k
['bandwidth'], reverse
=True)
2101 request
.append(numa_req
)
2103 # print "----------\n"+json.dumps(request[0], indent=4)
2104 # print '----------\n\n'
2106 #Search in db for an appropriate numa for each requested numa
2107 #at the moment multi-NUMA VMs are not supported
2109 requirements
['numa'].update(request
[0])
2110 if requirements
['numa']['memory']>0:
2111 requirements
['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2112 elif requirements
['ram']==0:
2113 return (-1, "Memory information not set neither at extended field not at ram")
2114 if requirements
['numa']['proc_req_nb']>0:
2115 requirements
['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2116 elif requirements
['vcpus']==0:
2117 return (-1, "Processor information not set neither at extended field not at vcpus")
2121 result
, content
= db
.get_numas(requirements
, server
.get('host_id', None), only_of_ports
)
2125 return (-1, content
)
2127 numa_id
= content
['numa_id']
2128 host_id
= content
['host_id']
2130 #obtain threads_id and calculate pinning
2133 if requirements
['numa']['proc_req_nb']>0:
2135 result
, content
= db
.get_table(FROM
='resources_core',
2136 SELECT
=('id','core_id','thread_id'),
2137 WHERE
={'numa_id':numa_id
,'instance_id': None, 'status':'ok'} )
2143 #convert rows to a dictionary indexed by core_id
2146 if not row
['core_id'] in cores_dict
:
2147 cores_dict
[row
['core_id']] = []
2148 cores_dict
[row
['core_id']].append([row
['thread_id'],row
['id']])
2150 #In case full cores are requested
2152 if requirements
['numa']['proc_req_type'] == 'cores':
2153 #Get/create the list of the vcpu_ids
2154 vcpu_id_list
= requirements
['numa']['proc_req_list']
2155 if vcpu_id_list
== None:
2156 vcpu_id_list
= range(0,int(requirements
['numa']['proc_req_nb']))
2158 for threads
in cores_dict
.itervalues():
2160 if len(threads
) != 2:
2163 #set pinning for the first thread
2164 cpu_pinning
.append( [ vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1] ] )
2166 #reserve so it is not used the second thread
2167 reserved_threads
.append(threads
[1][1])
2169 if len(vcpu_id_list
) == 0:
2172 #In case paired threads are requested
2173 elif requirements
['numa']['proc_req_type'] == 'paired-threads':
2175 #Get/create the list of the vcpu_ids
2176 if requirements
['numa']['proc_req_list'] != None:
2178 for pair
in requirements
['numa']['proc_req_list']:
2180 return -1, "Field paired-threads-id not properly specified"
2182 vcpu_id_list
.append(pair
[0])
2183 vcpu_id_list
.append(pair
[1])
2185 vcpu_id_list
= range(0,2*int(requirements
['numa']['proc_req_nb']))
2187 for threads
in cores_dict
.itervalues():
2189 if len(threads
) != 2:
2191 #set pinning for the first thread
2192 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1]])
2194 #set pinning for the second thread
2195 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[1][0], threads
[1][1]])
2197 if len(vcpu_id_list
) == 0:
2200 #In case normal threads are requested
2201 elif requirements
['numa']['proc_req_type'] == 'threads':
2202 #Get/create the list of the vcpu_ids
2203 vcpu_id_list
= requirements
['numa']['proc_req_list']
2204 if vcpu_id_list
== None:
2205 vcpu_id_list
= range(0,int(requirements
['numa']['proc_req_nb']))
2207 for threads_index
in sorted(cores_dict
, key
=lambda k
: len(cores_dict
[k
])):
2208 threads
= cores_dict
[threads_index
]
2209 #set pinning for the first thread
2210 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1]])
2212 #if exists, set pinning for the second thread
2213 if len(threads
) == 2 and len(vcpu_id_list
) != 0:
2214 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[1][0], threads
[1][1]])
2216 if len(vcpu_id_list
) == 0:
2219 #Get the source pci addresses for the selected numa
2220 used_sriov_ports
= []
2221 for port
in requirements
['numa']['sriov_list']:
2223 result
, content
= db
.get_table(FROM
='resources_port', SELECT
=('id', 'pci', 'mac'),WHERE
={'numa_id':numa_id
,'root_id': port
['port_id'], 'port_id': None, 'Mbps_used': 0} )
2229 if row
['id'] in used_sriov_ports
or row
['id']==port
['port_id']:
2231 port
['pci'] = row
['pci']
2232 if 'mac_address' not in port
:
2233 port
['mac_address'] = row
['mac']
2235 port
['port_id']=row
['id']
2236 port
['Mbps_used'] = port
['bandwidth']
2237 used_sriov_ports
.append(row
['id'])
2240 for port
in requirements
['numa']['port_list']:
2241 port
['Mbps_used'] = None
2242 if port
['dedicated'] != "yes:sriov":
2243 port
['mac_address'] = port
['mac']
2247 result
, content
= db
.get_table(FROM
='resources_port', SELECT
=('id', 'pci', 'mac', 'Mbps'),WHERE
={'numa_id':numa_id
,'root_id': port
['port_id'], 'port_id': None, 'Mbps_used': 0} )
2252 port
['Mbps_used'] = content
[0]['Mbps']
2254 if row
['id'] in used_sriov_ports
or row
['id']==port
['port_id']:
2256 port
['pci'] = row
['pci']
2257 if 'mac_address' not in port
:
2258 port
['mac_address'] = row
['mac'] # mac cannot be set to passthrough ports
2260 port
['port_id']=row
['id']
2261 used_sriov_ports
.append(row
['id'])
2264 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2265 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2267 server
['host_id'] = host_id
2269 #Generate dictionary for saving in db the instance resources
2271 resources
['bridged-ifaces'] = []
2274 numa_dict
['interfaces'] = []
2276 numa_dict
['interfaces'] += requirements
['numa']['port_list']
2277 numa_dict
['interfaces'] += requirements
['numa']['sriov_list']
2279 #Check bridge information
2280 unified_dataplane_iface
=[]
2281 unified_dataplane_iface
+= requirements
['numa']['port_list']
2282 unified_dataplane_iface
+= requirements
['numa']['sriov_list']
2284 for control_iface
in server
.get('networks', []):
2285 control_iface
['net_id']=control_iface
.pop('uuid')
2286 #Get the brifge name
2288 result
, content
= db
.get_table(FROM
='nets',
2289 SELECT
=('name', 'type', 'vlan', 'provider', 'enable_dhcp',
2290 'dhcp_first_ip', 'dhcp_last_ip', 'cidr'),
2291 WHERE
={'uuid': control_iface
['net_id']})
2296 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface
['net_id']
2299 if control_iface
.get("type", 'virtual') == 'virtual':
2300 if network
['type']!='bridge_data' and network
['type']!='bridge_man':
2301 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface
['net_id']
2302 resources
['bridged-ifaces'].append(control_iface
)
2303 if network
.get("provider") and network
["provider"][0:3] == "OVS":
2304 control_iface
["type"] = "instance:ovs"
2306 control_iface
["type"] = "instance:bridge"
2307 if network
.get("vlan"):
2308 control_iface
["vlan"] = network
["vlan"]
2310 if network
.get("enable_dhcp") == 'true':
2311 control_iface
["enable_dhcp"] = network
.get("enable_dhcp")
2312 control_iface
["dhcp_first_ip"] = network
["dhcp_first_ip"]
2313 control_iface
["dhcp_last_ip"] = network
["dhcp_last_ip"]
2314 control_iface
["cidr"] = network
["cidr"]
2316 if network
['type']!='data' and network
['type']!='ptp':
2317 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface
['net_id']
2318 #dataplane interface, look for it in the numa tree and asign this network
2320 for dataplane_iface
in numa_dict
['interfaces']:
2321 if dataplane_iface
['name'] == control_iface
.get("name"):
2322 if (dataplane_iface
['dedicated'] == "yes" and control_iface
["type"] != "PF") or \
2323 (dataplane_iface
['dedicated'] == "no" and control_iface
["type"] != "VF") or \
2324 (dataplane_iface
['dedicated'] == "yes:sriov" and control_iface
["type"] != "VFnotShared") :
2325 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2326 (control_iface
.get("name"), dataplane_iface
['dedicated'], control_iface
["type"])
2327 dataplane_iface
['uuid'] = control_iface
['net_id']
2328 if dataplane_iface
['dedicated'] == "no":
2329 dataplane_iface
['vlan'] = network
['vlan']
2330 if dataplane_iface
['dedicated'] != "yes" and control_iface
.get("mac_address"):
2331 dataplane_iface
['mac_address'] = control_iface
.get("mac_address")
2332 if control_iface
.get("vpci"):
2333 dataplane_iface
['vpci'] = control_iface
.get("vpci")
2337 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface
.get("name")
2339 resources
['host_id'] = host_id
2340 resources
['image_id'] = server
['image_id']
2341 resources
['flavor_id'] = server
['flavor_id']
2342 resources
['tenant_id'] = server
['tenant_id']
2343 resources
['ram'] = requirements
['ram']
2344 resources
['vcpus'] = requirements
['vcpus']
2345 resources
['status'] = 'CREATING'
2347 if 'description' in server
: resources
['description'] = server
['description']
2348 if 'name' in server
: resources
['name'] = server
['name']
2350 resources
['extended'] = {} #optional
2351 resources
['extended']['numas'] = []
2352 numa_dict
['numa_id'] = numa_id
2353 numa_dict
['memory'] = requirements
['numa']['memory']
2354 numa_dict
['cores'] = []
2356 for core
in cpu_pinning
:
2357 numa_dict
['cores'].append({'id': core
[2], 'vthread': core
[0], 'paired': paired
})
2358 for core
in reserved_threads
:
2359 numa_dict
['cores'].append({'id': core
})
2360 resources
['extended']['numas'].append(numa_dict
)
2361 if extended
!=None and 'devices' in extended
: #TODO allow extra devices without numa
2362 resources
['extended']['devices'] = extended
['devices']
2365 # '===================================={'
2366 #print json.dumps(resources, indent=4)
2367 #print '====================================}'