6fe8331087c68961c54b5a22c6d0ac84940a61e8
[osm/openvim.git] / osm_openvim / host_thread.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openvim
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
22 ##
23
24 '''
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
27 '''
28 __author__ = "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__ = "$10-jul-2014 12:07:15$"
30
31 import json
32 import yaml
33 import threading
34 import time
35 import Queue
36 import paramiko
37 # import subprocess
38 # import libvirt
39 import imp
40 import random
41 import os
42 import logging
43 from jsonschema import validate as js_v, exceptions as js_e
44 from vim_schema import localinfo_schema, hostinfo_schema
45
46
47 class host_thread(threading.Thread):
48 lvirt_module = None
49
50 def __init__(self, name, host, user, db, db_lock, test, image_path, host_id, version, develop_mode,
51 develop_bridge_iface, password=None, keyfile = None, logger_name=None, debug=None):
52 '''Init a thread.
53 Arguments:
54 'id' number of thead
55 'name' name of thread
56 'host','user': host ip or name to manage and user
57 'db', 'db_lock': database class and lock to use it in exclusion
58 '''
59 threading.Thread.__init__(self)
60 self.name = name
61 self.host = host
62 self.user = user
63 self.db = db
64 self.db_lock = db_lock
65 self.test = test
66 self.password = password
67 self.keyfile = keyfile
68 self.localinfo_dirty = False
69
70 if not test and not host_thread.lvirt_module:
71 try:
72 module_info = imp.find_module("libvirt")
73 host_thread.lvirt_module = imp.load_module("libvirt", *module_info)
74 except (IOError, ImportError) as e:
75 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e))
76 if logger_name:
77 self.logger_name = logger_name
78 else:
79 self.logger_name = "openvim.host."+name
80 self.logger = logging.getLogger(self.logger_name)
81 if debug:
82 self.logger.setLevel(getattr(logging, debug))
83
84
85 self.develop_mode = develop_mode
86 self.develop_bridge_iface = develop_bridge_iface
87 self.image_path = image_path
88 self.empty_image_path = image_path
89 self.host_id = host_id
90 self.version = version
91
92 self.xml_level = 0
93 #self.pending ={}
94
95 self.server_status = {} #dictionary with pairs server_uuid:server_status
96 self.pending_terminate_server =[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
97 self.next_update_server_status = 0 #time when must be check servers status
98
99 self.hostinfo = None
100
101 self.queueLock = threading.Lock()
102 self.taskQueue = Queue.Queue(2000)
103 self.ssh_conn = None
104 self.connectivity = True
105 self.lvirt_conn_uri = "qemu+ssh://{user}@{host}/system?no_tty=1&no_verify=1".format(
106 user=self.user, host=self.host)
107 if keyfile:
108 self.lvirt_conn_uri += "&keyfile=" + keyfile
109
110 def ssh_connect(self):
111 try:
112 # Connect SSH
113 self.ssh_conn = paramiko.SSHClient()
114 self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
115 self.ssh_conn.load_system_host_keys()
116 self.ssh_conn.connect(self.host, username=self.user, password=self.password, key_filename=self.keyfile,
117 timeout=10) #, None)
118 except paramiko.ssh_exception.SSHException as e:
119 text = e.args[0]
120 self.logger.error("ssh_connect ssh Exception: " + text)
121
122 def check_connectivity(self):
123 if not self.test:
124
125 try:
126 if not self.ssh_conn:
127 self.ssh_connect()
128
129 command = 'sudo brctl show'
130 (_, stdout, stderr) = self.ssh_conn.exec_command(command, timeout=10)
131 content = stderr.read()
132 if len(content) > 0:
133 self.connectivity = False
134 self.logger.error("ssh conection error")
135 except paramiko.ssh_exception.SSHException as e:
136 text = e.args[0]
137 self.connectivity = False
138 self.logger.error("ssh_connect ssh Exception: " + text)
139 raise paramiko.ssh_exception.SSHException("ssh error conection")
140 except Exception as e:
141 self.connectivity = False
142 raise paramiko.ssh_exception.SSHException("ssh error conection")
143
144 def load_localinfo(self):
145 if not self.test:
146 try:
147 # Connect SSH
148 self.ssh_connect()
149
150 command = 'mkdir -p ' + self.image_path
151 # print self.name, ': command:', command
152 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
153 content = stderr.read()
154 if len(content) > 0:
155 self.logger.error("command: '%s' stderr: '%s'", command, content)
156
157 command = 'cat ' + self.image_path + '/.openvim.yaml'
158 # print self.name, ': command:', command
159 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
160 content = stdout.read()
161 if len(content) == 0:
162 self.logger.error("command: '%s' stderr='%s'", command, stderr.read())
163 raise paramiko.ssh_exception.SSHException("Error empty file, command: '{}'".format(command))
164 self.localinfo = yaml.load(content)
165 js_v(self.localinfo, localinfo_schema)
166 self.localinfo_dirty = False
167 if 'server_files' not in self.localinfo:
168 self.localinfo['server_files'] = {}
169 self.logger.debug("localinfo load from host")
170 return
171
172 except paramiko.ssh_exception.SSHException as e:
173 text = e.args[0]
174 self.logger.error("load_localinfo ssh Exception: " + text)
175 except host_thread.lvirt_module.libvirtError as e:
176 text = e.get_error_message()
177 self.logger.error("load_localinfo libvirt Exception: " + text)
178 except yaml.YAMLError as exc:
179 text = ""
180 if hasattr(exc, 'problem_mark'):
181 mark = exc.problem_mark
182 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
183 self.logger.error("load_localinfo yaml format Exception " + text)
184 except js_e.ValidationError as e:
185 text = ""
186 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
187 self.logger.error("load_localinfo format Exception: %s %s", text, str(e))
188 except Exception as e:
189 text = str(e)
190 self.logger.error("load_localinfo Exception: " + text)
191
192 #not loaded, insert a default data and force saving by activating dirty flag
193 self.localinfo = {'files':{}, 'server_files':{} }
194 #self.localinfo_dirty=True
195 self.localinfo_dirty=False
196
197 def load_hostinfo(self):
198 if self.test:
199 return;
200 try:
201 #Connect SSH
202 self.ssh_connect()
203
204
205 command = 'cat ' + self.image_path + '/hostinfo.yaml'
206 #print self.name, ': command:', command
207 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
208 content = stdout.read()
209 if len(content) == 0:
210 self.logger.error("command: '%s' stderr: '%s'", command, stderr.read())
211 raise paramiko.ssh_exception.SSHException("Error empty file ")
212 self.hostinfo = yaml.load(content)
213 js_v(self.hostinfo, hostinfo_schema)
214 self.logger.debug("hostlinfo load from host " + str(self.hostinfo))
215 return
216
217 except paramiko.ssh_exception.SSHException as e:
218 text = e.args[0]
219 self.logger.error("load_hostinfo ssh Exception: " + text)
220 except host_thread.lvirt_module.libvirtError as e:
221 text = e.get_error_message()
222 self.logger.error("load_hostinfo libvirt Exception: " + text)
223 except yaml.YAMLError as exc:
224 text = ""
225 if hasattr(exc, 'problem_mark'):
226 mark = exc.problem_mark
227 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
228 self.logger.error("load_hostinfo yaml format Exception " + text)
229 except js_e.ValidationError as e:
230 text = ""
231 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
232 self.logger.error("load_hostinfo format Exception: %s %s", text, e.message)
233 except Exception as e:
234 text = str(e)
235 self.logger.error("load_hostinfo Exception: " + text)
236
237 #not loaded, insert a default data
238 self.hostinfo = None
239
240 def save_localinfo(self, tries=3):
241 if self.test:
242 self.localinfo_dirty = False
243 return
244
245 while tries>=0:
246 tries-=1
247
248 try:
249 command = 'cat > ' + self.image_path + '/.openvim.yaml'
250 self.logger.debug("command:" + command)
251 (stdin, _, _) = self.ssh_conn.exec_command(command)
252 yaml.safe_dump(self.localinfo, stdin, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True)
253
254 self.localinfo_dirty = False
255 break #while tries
256
257 except paramiko.ssh_exception.SSHException as e:
258 text = e.args[0]
259 self.logger.error("save_localinfo ssh Exception: " + text)
260 if "SSH session not active" in text:
261 self.ssh_connect()
262 except host_thread.lvirt_module.libvirtError as e:
263 text = e.get_error_message()
264 self.logger.error("save_localinfo libvirt Exception: " + text)
265 except yaml.YAMLError as exc:
266 text = ""
267 if hasattr(exc, 'problem_mark'):
268 mark = exc.problem_mark
269 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
270 self.logger.error("save_localinfo yaml format Exception " + text)
271 except Exception as e:
272 text = str(e)
273 self.logger.error("save_localinfo Exception: " + text)
274
275 def load_servers_from_db(self):
276 self.db_lock.acquire()
277 r,c = self.db.get_table(SELECT=('uuid','status', 'image_id'), FROM='instances', WHERE={'host_id': self.host_id})
278 self.db_lock.release()
279
280 self.server_status = {}
281 if r<0:
282 self.logger.error("Error getting data from database: " + c)
283 return
284 for server in c:
285 self.server_status[ server['uuid'] ] = server['status']
286
287 #convert from old version to new one
288 if 'inc_files' in self.localinfo and server['uuid'] in self.localinfo['inc_files']:
289 server_files_dict = {'source file': self.localinfo['inc_files'][ server['uuid'] ] [0], 'file format':'raw' }
290 if server_files_dict['source file'][-5:] == 'qcow2':
291 server_files_dict['file format'] = 'qcow2'
292
293 self.localinfo['server_files'][ server['uuid'] ] = { server['image_id'] : server_files_dict }
294 if 'inc_files' in self.localinfo:
295 del self.localinfo['inc_files']
296 self.localinfo_dirty = True
297
298 def delete_unused_files(self):
299 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
300 Deletes unused entries at self.loacalinfo and the corresponding local files.
301 The only reason for this mismatch is the manual deletion of instances (VM) at database
302 '''
303 if self.test:
304 return
305 for uuid,images in self.localinfo['server_files'].items():
306 if uuid not in self.server_status:
307 for localfile in images.values():
308 try:
309 self.logger.debug("deleting file '%s' of unused server '%s'", localfile['source file'], uuid)
310 self.delete_file(localfile['source file'])
311 except paramiko.ssh_exception.SSHException as e:
312 self.logger.error("Exception deleting file '%s': %s", localfile['source file'], str(e))
313 del self.localinfo['server_files'][uuid]
314 self.localinfo_dirty = True
315
316 def insert_task(self, task, *aditional):
317 try:
318 self.queueLock.acquire()
319 task = self.taskQueue.put( (task,) + aditional, timeout=5)
320 self.queueLock.release()
321 return 1, None
322 except Queue.Full:
323 return -1, "timeout inserting a task over host " + self.name
324
325 def run(self):
326 while True:
327 self.load_localinfo()
328 self.load_hostinfo()
329 self.load_servers_from_db()
330 self.delete_unused_files()
331 while True:
332 try:
333 self.queueLock.acquire()
334 if not self.taskQueue.empty():
335 task = self.taskQueue.get()
336 else:
337 task = None
338 self.queueLock.release()
339
340 if task is None:
341 now=time.time()
342 if self.localinfo_dirty:
343 self.save_localinfo()
344 elif self.next_update_server_status < now:
345 self.update_servers_status()
346 self.next_update_server_status = now + 5
347 elif len(self.pending_terminate_server)>0 and self.pending_terminate_server[0][0]<now:
348 self.server_forceoff()
349 else:
350 time.sleep(1)
351 continue
352
353 if task[0] == 'instance':
354 self.logger.debug("processing task instance " + str(task[1]['action']))
355 retry = 0
356 while retry < 2:
357 retry += 1
358 r = self.action_on_server(task[1], retry==2)
359 if r >= 0:
360 break
361 elif task[0] == 'image':
362 pass
363 elif task[0] == 'exit':
364 self.logger.debug("processing task exit")
365 self.terminate()
366 return 0
367 elif task[0] == 'reload':
368 self.logger.debug("processing task reload terminating and relaunching")
369 self.terminate()
370 break
371 elif task[0] == 'edit-iface':
372 self.logger.debug("processing task edit-iface port={}, old_net={}, new_net={}".format(
373 task[1], task[2], task[3]))
374 self.edit_iface(task[1], task[2], task[3])
375 elif task[0] == 'restore-iface':
376 self.logger.debug("processing task restore-iface={} mac={}".format(task[1], task[2]))
377 self.restore_iface(task[1], task[2])
378 elif task[0] == 'new-ovsbridge':
379 self.logger.debug("Creating compute OVS bridge")
380 self.create_ovs_bridge()
381 elif task[0] == 'new-vxlan':
382 self.logger.debug("Creating vxlan tunnel='{}', remote ip='{}'".format(task[1], task[2]))
383 self.create_ovs_vxlan_tunnel(task[1], task[2])
384 elif task[0] == 'del-ovsbridge':
385 self.logger.debug("Deleting OVS bridge")
386 self.delete_ovs_bridge()
387 elif task[0] == 'del-vxlan':
388 self.logger.debug("Deleting vxlan {} tunnel".format(task[1]))
389 self.delete_ovs_vxlan_tunnel(task[1])
390 elif task[0] == 'create-ovs-bridge-port':
391 self.logger.debug("Adding port ovim-{} to OVS bridge".format(task[1]))
392 self.create_ovs_bridge_port(task[1])
393 elif task[0] == 'del-ovs-port':
394 self.logger.debug("Delete bridge attached to ovs port vlan {} net {}".format(task[1], task[2]))
395 self.delete_bridge_port_attached_to_ovs(task[1], task[2])
396 else:
397 self.logger.debug("unknown task " + str(task))
398
399 except Exception as e:
400 self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
401
402 def server_forceoff(self, wait_until_finished=False):
403 while len(self.pending_terminate_server)>0:
404 now = time.time()
405 if self.pending_terminate_server[0][0]>now:
406 if wait_until_finished:
407 time.sleep(1)
408 continue
409 else:
410 return
411 req={'uuid':self.pending_terminate_server[0][1],
412 'action':{'terminate':'force'},
413 'status': None
414 }
415 self.action_on_server(req)
416 self.pending_terminate_server.pop(0)
417
418 def terminate(self):
419 try:
420 self.server_forceoff(True)
421 if self.localinfo_dirty:
422 self.save_localinfo()
423 if not self.test:
424 self.ssh_conn.close()
425 except Exception as e:
426 text = str(e)
427 self.logger.error("terminate Exception: " + text)
428 self.logger.debug("exit from host_thread")
429
430 def get_local_iface_name(self, generic_name):
431 if self.hostinfo != None and "iface_names" in self.hostinfo and generic_name in self.hostinfo["iface_names"]:
432 return self.hostinfo["iface_names"][generic_name]
433 return generic_name
434
435 def create_xml_server(self, server, dev_list, server_metadata={}):
436 """Function that implements the generation of the VM XML definition.
437 Additional devices are in dev_list list
438 The main disk is upon dev_list[0]"""
439
440 #get if operating system is Windows
441 windows_os = False
442 os_type = server_metadata.get('os_type', None)
443 if os_type == None and 'metadata' in dev_list[0]:
444 os_type = dev_list[0]['metadata'].get('os_type', None)
445 if os_type != None and os_type.lower() == "windows":
446 windows_os = True
447 #get type of hard disk bus
448 bus_ide = True if windows_os else False
449 bus = server_metadata.get('bus', None)
450 if bus == None and 'metadata' in dev_list[0]:
451 bus = dev_list[0]['metadata'].get('bus', None)
452 if bus != None:
453 bus_ide = True if bus=='ide' else False
454
455 self.xml_level = 0
456
457 text = "<domain type='kvm'>"
458 #get topology
459 topo = server_metadata.get('topology', None)
460 if topo == None and 'metadata' in dev_list[0]:
461 topo = dev_list[0]['metadata'].get('topology', None)
462 #name
463 name = server.get('name', '')[:28] + "_" + server['uuid'][:28] #qemu impose a length limit of 59 chars or not start. Using 58
464 text += self.inc_tab() + "<name>" + name+ "</name>"
465 #uuid
466 text += self.tab() + "<uuid>" + server['uuid'] + "</uuid>"
467
468 numa={}
469 if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:
470 numa = server['extended']['numas'][0]
471 #memory
472 use_huge = False
473 memory = int(numa.get('memory',0))*1024*1024 #in KiB
474 if memory==0:
475 memory = int(server['ram'])*1024;
476 else:
477 if not self.develop_mode:
478 use_huge = True
479 if memory==0:
480 return -1, 'No memory assigned to instance'
481 memory = str(memory)
482 text += self.tab() + "<memory unit='KiB'>" +memory+"</memory>"
483 text += self.tab() + "<currentMemory unit='KiB'>" +memory+ "</currentMemory>"
484 if use_huge:
485 text += self.tab()+'<memoryBacking>'+ \
486 self.inc_tab() + '<hugepages/>'+ \
487 self.dec_tab()+ '</memoryBacking>'
488
489 #cpu
490 use_cpu_pinning=False
491 vcpus = int(server.get("vcpus",0))
492 cpu_pinning = []
493 if 'cores-source' in numa:
494 use_cpu_pinning=True
495 for index in range(0, len(numa['cores-source'])):
496 cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )
497 vcpus += 1
498 if 'threads-source' in numa:
499 use_cpu_pinning=True
500 for index in range(0, len(numa['threads-source'])):
501 cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )
502 vcpus += 1
503 if 'paired-threads-source' in numa:
504 use_cpu_pinning=True
505 for index in range(0, len(numa['paired-threads-source'])):
506 cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )
507 cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )
508 vcpus += 2
509
510 if use_cpu_pinning and not self.develop_mode:
511 text += self.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning)) +"</vcpu>" + \
512 self.tab()+'<cputune>'
513 self.xml_level += 1
514 for i in range(0, len(cpu_pinning)):
515 text += self.tab() + "<vcpupin vcpu='" +str(cpu_pinning[i][0])+ "' cpuset='" +str(cpu_pinning[i][1]) +"'/>"
516 text += self.dec_tab()+'</cputune>'+ \
517 self.tab() + '<numatune>' +\
518 self.inc_tab() + "<memory mode='strict' nodeset='" +str(numa['source'])+ "'/>" +\
519 self.dec_tab() + '</numatune>'
520 else:
521 if vcpus==0:
522 return -1, "Instance without number of cpus"
523 text += self.tab()+"<vcpu>" + str(vcpus) + "</vcpu>"
524
525 #boot
526 boot_cdrom = False
527 for dev in dev_list:
528 if dev['type']=='cdrom' :
529 boot_cdrom = True
530 break
531 text += self.tab()+ '<os>' + \
532 self.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
533 if boot_cdrom:
534 text += self.tab() + "<boot dev='cdrom'/>"
535 text += self.tab() + "<boot dev='hd'/>" + \
536 self.dec_tab()+'</os>'
537 #features
538 text += self.tab()+'<features>'+\
539 self.inc_tab()+'<acpi/>' +\
540 self.tab()+'<apic/>' +\
541 self.tab()+'<pae/>'+ \
542 self.dec_tab() +'</features>'
543 if topo == "oneSocket:hyperthreading":
544 if vcpus % 2 != 0:
545 return -1, 'Cannot expose hyperthreading with an odd number of vcpus'
546 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>" % (vcpus/2)
547 elif windows_os or topo == "oneSocket":
548 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>" % vcpus
549 else:
550 text += self.tab() + "<cpu mode='host-model'></cpu>"
551 text += self.tab() + "<clock offset='utc'/>" +\
552 self.tab() + "<on_poweroff>preserve</on_poweroff>" + \
553 self.tab() + "<on_reboot>restart</on_reboot>" + \
554 self.tab() + "<on_crash>restart</on_crash>"
555 text += self.tab() + "<devices>" + \
556 self.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
557 self.tab() + "<serial type='pty'>" +\
558 self.inc_tab() + "<target port='0'/>" + \
559 self.dec_tab() + "</serial>" +\
560 self.tab() + "<console type='pty'>" + \
561 self.inc_tab()+ "<target type='serial' port='0'/>" + \
562 self.dec_tab()+'</console>'
563 if windows_os:
564 text += self.tab() + "<controller type='usb' index='0'/>" + \
565 self.tab() + "<controller type='ide' index='0'/>" + \
566 self.tab() + "<input type='mouse' bus='ps2'/>" + \
567 self.tab() + "<sound model='ich6'/>" + \
568 self.tab() + "<video>" + \
569 self.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
570 self.dec_tab() + "</video>" + \
571 self.tab() + "<memballoon model='virtio'/>" + \
572 self.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
573
574 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
575 #> self.dec_tab()+'</hostdev>\n' +\
576 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
577 if windows_os:
578 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
579 else:
580 #If image contains 'GRAPH' include graphics
581 #if 'GRAPH' in image:
582 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
583 self.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
584 self.dec_tab() + "</graphics>"
585
586 vd_index = 'a'
587 for dev in dev_list:
588 bus_ide_dev = bus_ide
589 if dev['type']=='cdrom' or dev['type']=='disk':
590 if dev['type']=='cdrom':
591 bus_ide_dev = True
592 text += self.tab() + "<disk type='file' device='"+dev['type']+"'>"
593 if 'file format' in dev:
594 text += self.inc_tab() + "<driver name='qemu' type='" +dev['file format']+ "' cache='writethrough'/>"
595 if 'source file' in dev:
596 text += self.tab() + "<source file='" +dev['source file']+ "'/>"
597 #elif v['type'] == 'block':
598 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
599 #else:
600 # return -1, 'Unknown disk type ' + v['type']
601 vpci = dev.get('vpci',None)
602 if vpci == None and 'metadata' in dev:
603 vpci = dev['metadata'].get('vpci',None)
604 text += self.pci2xml(vpci)
605
606 if bus_ide_dev:
607 text += self.tab() + "<target dev='hd" +vd_index+ "' bus='ide'/>" #TODO allows several type of disks
608 else:
609 text += self.tab() + "<target dev='vd" +vd_index+ "' bus='virtio'/>"
610 text += self.dec_tab() + '</disk>'
611 vd_index = chr(ord(vd_index)+1)
612 elif dev['type']=='xml':
613 dev_text = dev['xml']
614 if 'vpci' in dev:
615 dev_text = dev_text.replace('__vpci__', dev['vpci'])
616 if 'source file' in dev:
617 dev_text = dev_text.replace('__file__', dev['source file'])
618 if 'file format' in dev:
619 dev_text = dev_text.replace('__format__', dev['source file'])
620 if '__dev__' in dev_text:
621 dev_text = dev_text.replace('__dev__', vd_index)
622 vd_index = chr(ord(vd_index)+1)
623 text += dev_text
624 else:
625 return -1, 'Unknown device type ' + dev['type']
626
627 net_nb=0
628 bridge_interfaces = server.get('networks', [])
629 for v in bridge_interfaces:
630 #Get the brifge name
631 self.db_lock.acquire()
632 result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )
633 self.db_lock.release()
634 if result <= 0:
635 self.logger.error("create_xml_server ERROR %d getting nets %s", result, content)
636 return -1, content
637 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
638 #I know it is not secure
639 #for v in sorted(desc['network interfaces'].itervalues()):
640 model = v.get("model", None)
641 if content[0]['provider']=='default':
642 text += self.tab() + "<interface type='network'>" + \
643 self.inc_tab() + "<source network='" +content[0]['provider']+ "'/>"
644 elif content[0]['provider'][0:7]=='macvtap':
645 text += self.tab()+"<interface type='direct'>" + \
646 self.inc_tab() + "<source dev='" + self.get_local_iface_name(content[0]['provider'][8:]) + "' mode='bridge'/>" + \
647 self.tab() + "<target dev='macvtap0'/>"
648 if windows_os:
649 text += self.tab() + "<alias name='net" + str(net_nb) + "'/>"
650 elif model==None:
651 model = "virtio"
652 elif content[0]['provider'][0:6]=='bridge':
653 text += self.tab() + "<interface type='bridge'>" + \
654 self.inc_tab()+"<source bridge='" +self.get_local_iface_name(content[0]['provider'][7:])+ "'/>"
655 if windows_os:
656 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
657 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
658 elif model==None:
659 model = "virtio"
660 elif content[0]['provider'][0:3] == "OVS":
661 vlan = content[0]['provider'].replace('OVS:', '')
662 text += self.tab() + "<interface type='bridge'>" + \
663 self.inc_tab() + "<source bridge='ovim-" + str(vlan) + "'/>"
664 else:
665 return -1, 'Unknown Bridge net provider ' + content[0]['provider']
666 if model!=None:
667 text += self.tab() + "<model type='" +model+ "'/>"
668 if v.get('mac_address', None) != None:
669 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
670 text += self.pci2xml(v.get('vpci',None))
671 text += self.dec_tab()+'</interface>'
672
673 net_nb += 1
674
675 interfaces = numa.get('interfaces', [])
676
677 net_nb=0
678 for v in interfaces:
679 if self.develop_mode: #map these interfaces to bridges
680 text += self.tab() + "<interface type='bridge'>" + \
681 self.inc_tab()+"<source bridge='" +self.develop_bridge_iface+ "'/>"
682 if windows_os:
683 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
684 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
685 else:
686 text += self.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
687 if v.get('mac_address', None) != None:
688 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
689 text += self.pci2xml(v.get('vpci',None))
690 text += self.dec_tab()+'</interface>'
691 continue
692
693 if v['dedicated'] == 'yes': #passthrought
694 text += self.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
695 self.inc_tab() + "<source>"
696 self.inc_tab()
697 text += self.pci2xml(v['source'])
698 text += self.dec_tab()+'</source>'
699 text += self.pci2xml(v.get('vpci',None))
700 if windows_os:
701 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
702 text += self.dec_tab()+'</hostdev>'
703 net_nb += 1
704 else: #sriov_interfaces
705 #skip not connected interfaces
706 if v.get("net_id") == None:
707 continue
708 text += self.tab() + "<interface type='hostdev' managed='yes'>"
709 self.inc_tab()
710 if v.get('mac_address', None) != None:
711 text+= self.tab() + "<mac address='" +v['mac_address']+ "'/>"
712 text+= self.tab()+'<source>'
713 self.inc_tab()
714 text += self.pci2xml(v['source'])
715 text += self.dec_tab()+'</source>'
716 if v.get('vlan',None) != None:
717 text += self.tab() + "<vlan> <tag id='" + str(v['vlan']) + "'/> </vlan>"
718 text += self.pci2xml(v.get('vpci',None))
719 if windows_os:
720 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
721 text += self.dec_tab()+'</interface>'
722
723
724 text += self.dec_tab()+'</devices>'+\
725 self.dec_tab()+'</domain>'
726 return 0, text
727
728 def pci2xml(self, pci):
729 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
730 alows an empty pci text'''
731 if pci is None:
732 return ""
733 first_part = pci.split(':')
734 second_part = first_part[2].split('.')
735 return self.tab() + "<address type='pci' domain='0x" + first_part[0] + \
736 "' bus='0x" + first_part[1] + "' slot='0x" + second_part[0] + \
737 "' function='0x" + second_part[1] + "'/>"
738
739 def tab(self):
740 """Return indentation according to xml_level"""
741 return "\n" + (' '*self.xml_level)
742
743 def inc_tab(self):
744 """Increment and return indentation according to xml_level"""
745 self.xml_level += 1
746 return self.tab()
747
748 def dec_tab(self):
749 """Decrement and return indentation according to xml_level"""
750 self.xml_level -= 1
751 return self.tab()
752
753 def create_ovs_bridge(self):
754 """
755 Create a bridge in compute OVS to allocate VMs
756 :return: True if success
757 """
758 if self.test or not self.connectivity:
759 return True
760
761
762 try:
763 command = 'sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true'
764 self.logger.debug("command: " + command)
765 (_, stdout, _) = self.ssh_conn.exec_command(command)
766 content = stdout.read()
767 if len(content) == 0:
768 return True
769 else:
770 return False
771 except paramiko.ssh_exception.SSHException as e:
772 self.logger.error("create_ovs_bridge ssh Exception: " + str(e))
773 if "SSH session not active" in str(e):
774 self.ssh_connect()
775 return False
776
777 def delete_port_to_ovs_bridge(self, vlan, net_uuid):
778 """
779 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
780 :param vlan: vlan port id
781 :param net_uuid: network id
782 :return:
783 """
784
785 if self.test or not self.connectivity:
786 return True
787 try:
788 port_name = 'ovim-' + str(vlan)
789 command = 'sudo ovs-vsctl del-port br-int ' + port_name
790 self.logger.debug("command: " + command)
791 (_, stdout, _) = self.ssh_conn.exec_command(command)
792 content = stdout.read()
793 if len(content) == 0:
794 return True
795 else:
796 return False
797 except paramiko.ssh_exception.SSHException as e:
798 self.logger.error("delete_port_to_ovs_bridge ssh Exception: " + str(e))
799 if "SSH session not active" in str(e):
800 self.ssh_connect()
801 return False
802
803 def delete_dhcp_server(self, vlan, net_uuid, dhcp_path):
804 """
805 Delete dhcp server process lining in namespace
806 :param vlan: segmentation id
807 :param net_uuid: network uuid
808 :param dhcp_path: conf fiel path that live in namespace side
809 :return:
810 """
811 if self.test or not self.connectivity:
812 return True
813 if not self.is_dhcp_port_free(vlan, net_uuid):
814 return True
815 try:
816 dhcp_namespace = str(vlan) + '-dnsmasq'
817 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
818 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
819
820 command = 'sudo ip netns exec ' + dhcp_namespace + ' cat ' + pid_file
821 self.logger.debug("command: " + command)
822 (_, stdout, _) = self.ssh_conn.exec_command(command)
823 content = stdout.read()
824
825 command = 'sudo ip netns exec ' + dhcp_namespace + ' kill -9 ' + content
826 self.logger.debug("command: " + command)
827 (_, stdout, _) = self.ssh_conn.exec_command(command)
828 content = stdout.read()
829
830 # if len(content) == 0:
831 # return True
832 # else:
833 # return False
834 except paramiko.ssh_exception.SSHException as e:
835 self.logger.error("delete_dhcp_server ssh Exception: " + str(e))
836 if "SSH session not active" in str(e):
837 self.ssh_connect()
838 return False
839
840 def is_dhcp_port_free(self, host_id, net_uuid):
841 """
842 Check if any port attached to the a net in a vxlan mesh across computes nodes
843 :param host_id: host id
844 :param net_uuid: network id
845 :return: True if is not free
846 """
847 self.db_lock.acquire()
848 result, content = self.db.get_table(
849 FROM='ports',
850 WHERE={'type': 'instance:ovs', 'net_id': net_uuid}
851 )
852 self.db_lock.release()
853
854 if len(content) > 0:
855 return False
856 else:
857 return True
858
859 def is_port_free(self, host_id, net_uuid):
860 """
861 Check if there not ovs ports of a network in a compute host.
862 :param host_id: host id
863 :param net_uuid: network id
864 :return: True if is not free
865 """
866
867 self.db_lock.acquire()
868 result, content = self.db.get_table(
869 FROM='ports as p join instances as i on p.instance_id=i.uuid',
870 WHERE={"i.host_id": self.host_id, 'p.type': 'instance:ovs', 'p.net_id': net_uuid}
871 )
872 self.db_lock.release()
873
874 if len(content) > 0:
875 return False
876 else:
877 return True
878
879 def add_port_to_ovs_bridge(self, vlan):
880 """
881 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
882 :param vlan: vlan port id
883 :return: True if success
884 """
885
886 if self.test:
887 return True
888 try:
889 port_name = 'ovim-' + str(vlan)
890 command = 'sudo ovs-vsctl add-port br-int ' + port_name + ' tag=' + str(vlan)
891 self.logger.debug("command: " + command)
892 (_, stdout, _) = self.ssh_conn.exec_command(command)
893 content = stdout.read()
894 if len(content) == 0:
895 return True
896 else:
897 return False
898 except paramiko.ssh_exception.SSHException as e:
899 self.logger.error("add_port_to_ovs_bridge ssh Exception: " + str(e))
900 if "SSH session not active" in str(e):
901 self.ssh_connect()
902 return False
903
904 def delete_dhcp_port(self, vlan, net_uuid):
905 """
906 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
907 :param vlan: segmentation id
908 :param net_uuid: network id
909 :return: True if success
910 """
911
912 if self.test:
913 return True
914
915 if not self.is_dhcp_port_free(vlan, net_uuid):
916 return True
917 self.delete_dhcp_interfaces(vlan)
918 return True
919
920 def delete_bridge_port_attached_to_ovs(self, vlan, net_uuid):
921 """
922 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
923 :param vlan:
924 :param net_uuid:
925 :return: True if success
926 """
927 if self.test:
928 return
929
930 if not self.is_port_free(vlan, net_uuid):
931 return True
932 self.delete_port_to_ovs_bridge(vlan, net_uuid)
933 self.delete_linux_bridge(vlan)
934 return True
935
936 def delete_linux_bridge(self, vlan):
937 """
938 Delete a linux bridge in a scpecific compute.
939 :param vlan: vlan port id
940 :return: True if success
941 """
942
943 if self.test:
944 return True
945 try:
946 port_name = 'ovim-' + str(vlan)
947 command = 'sudo ip link set dev ovim-' + str(vlan) + ' down'
948 self.logger.debug("command: " + command)
949 (_, stdout, _) = self.ssh_conn.exec_command(command)
950 # content = stdout.read()
951 #
952 # if len(content) != 0:
953 # return False
954 command = 'sudo ifconfig ' + port_name + ' down && sudo brctl delbr ' + port_name
955 self.logger.debug("command: " + command)
956 (_, stdout, _) = self.ssh_conn.exec_command(command)
957 content = stdout.read()
958 if len(content) == 0:
959 return True
960 else:
961 return False
962 except paramiko.ssh_exception.SSHException as e:
963 self.logger.error("delete_linux_bridge ssh Exception: " + str(e))
964 if "SSH session not active" in str(e):
965 self.ssh_connect()
966 return False
967
968 def remove_link_bridge_to_ovs(self, vlan, link):
969 """
970 Delete a linux provider net connection to tenatn net
971 :param vlan: vlan port id
972 :param link: link name
973 :return: True if success
974 """
975
976 if self.test:
977 return True
978 try:
979 br_tap_name = str(vlan) + '-vethBO'
980 br_ovs_name = str(vlan) + '-vethOB'
981
982 # Delete ovs veth pair
983 command = 'sudo ip link set dev {} down'.format(br_ovs_name)
984 self.logger.debug("command: " + command)
985 (_, stdout, _) = self.ssh_conn.exec_command(command)
986 content = stdout.read()
987
988 command = 'sudo ovs-vsctl del-port br-int {}'.format(br_ovs_name)
989 self.logger.debug("command: " + command)
990 (_, stdout, _) = self.ssh_conn.exec_command(command)
991 content = stdout.read()
992
993 # Delete br veth pair
994 command = 'sudo ip link set dev {} down'.format(br_tap_name)
995 self.logger.debug("command: " + command)
996 (_, stdout, _) = self.ssh_conn.exec_command(command)
997 content = stdout.read()
998
999 # Delete br veth interface form bridge
1000 command = 'sudo brctl delif {} {}'.format(link, br_tap_name)
1001 self.logger.debug("command: " + command)
1002 (_, stdout, _) = self.ssh_conn.exec_command(command)
1003 content = stdout.read()
1004
1005 # Delete br veth pair
1006 command = 'sudo ip link set dev {} down'.format(link)
1007 self.logger.debug("command: " + command)
1008 (_, stdout, _) = self.ssh_conn.exec_command(command)
1009 content = stdout.read()
1010
1011 if len(content) == 0:
1012 return True
1013 else:
1014 return False
1015 except paramiko.ssh_exception.SSHException as e:
1016 self.logger.error("delete_linux_bridge ssh Exception: " + str(e))
1017 if "SSH session not active" in str(e):
1018 self.ssh_connect()
1019 return False
1020
1021 def create_ovs_bridge_port(self, vlan):
1022 """
1023 Generate a linux bridge and attache the port to a OVS bridge
1024 :param vlan: vlan port id
1025 :return:
1026 """
1027 if self.test:
1028 return
1029 self.create_linux_bridge(vlan)
1030 self.add_port_to_ovs_bridge(vlan)
1031
1032 def create_linux_bridge(self, vlan):
1033 """
1034 Create a linux bridge with STP active
1035 :param vlan: netowrk vlan id
1036 :return:
1037 """
1038
1039 if self.test:
1040 return True
1041 try:
1042 port_name = 'ovim-' + str(vlan)
1043 command = 'sudo brctl show | grep ' + port_name
1044 self.logger.debug("command: " + command)
1045 (_, stdout, _) = self.ssh_conn.exec_command(command)
1046 content = stdout.read()
1047
1048 # if exist nothing to create
1049 # if len(content) == 0:
1050 # return False
1051
1052 command = 'sudo brctl addbr ' + port_name
1053 self.logger.debug("command: " + command)
1054 (_, stdout, _) = self.ssh_conn.exec_command(command)
1055 content = stdout.read()
1056
1057 # if len(content) == 0:
1058 # return True
1059 # else:
1060 # return False
1061
1062 command = 'sudo brctl stp ' + port_name + ' on'
1063 self.logger.debug("command: " + command)
1064 (_, stdout, _) = self.ssh_conn.exec_command(command)
1065 content = stdout.read()
1066
1067 # if len(content) == 0:
1068 # return True
1069 # else:
1070 # return False
1071 command = 'sudo ip link set dev ' + port_name + ' up'
1072 self.logger.debug("command: " + command)
1073 (_, stdout, _) = self.ssh_conn.exec_command(command)
1074 content = stdout.read()
1075
1076 if len(content) == 0:
1077 return True
1078 else:
1079 return False
1080 except paramiko.ssh_exception.SSHException as e:
1081 self.logger.error("create_linux_bridge ssh Exception: " + str(e))
1082 if "SSH session not active" in str(e):
1083 self.ssh_connect()
1084 return False
1085
1086 def set_mac_dhcp_server(self, ip, mac, vlan, netmask, first_ip, dhcp_path):
1087 """
1088 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
1089 :param ip: IP address asigned to a VM
1090 :param mac: VM vnic mac to be macthed with the IP received
1091 :param vlan: Segmentation id
1092 :param netmask: netmask value
1093 :param path: dhcp conf file path that live in namespace side
1094 :return: True if success
1095 """
1096
1097 if self.test:
1098 return True
1099
1100 dhcp_namespace = str(vlan) + '-dnsmasq'
1101 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1102 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1103
1104 if not ip:
1105 return False
1106 try:
1107
1108 ns_interface = str(vlan) + '-vethDO'
1109 command = 'sudo ip netns exec ' + dhcp_namespace + ' cat /sys/class/net/{}/address'.format(ns_interface)
1110 self.logger.debug("command: " + command)
1111 (_, stdout, _) = self.ssh_conn.exec_command(command)
1112 iface_listen_mac = stdout.read()
1113
1114 if iface_listen_mac > 0:
1115 command = 'sudo ip netns exec ' + dhcp_namespace + ' cat {} | grep {}'.format(dhcp_hostsdir, dhcp_hostsdir)
1116 self.logger.debug("command: " + command)
1117 (_, stdout, _) = self.ssh_conn.exec_command(command)
1118 content = stdout.read()
1119 if content > 0:
1120 ip_data = iface_listen_mac.upper().replace('\n', '') + ',' + first_ip
1121 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1122
1123 command = 'sudo ip netns exec ' + dhcp_namespace + ' sudo bash -ec "echo ' + ip_data + ' >> ' + dhcp_hostsdir + '"'
1124 self.logger.debug("command: " + command)
1125 (_, stdout, _) = self.ssh_conn.exec_command(command)
1126 content = stdout.read()
1127
1128
1129 ip_data = mac.upper() + ',' + ip
1130
1131 command = 'sudo ip netns exec ' + dhcp_namespace + ' touch ' + dhcp_hostsdir
1132 self.logger.debug("command: " + command)
1133 (_, stdout, _) = self.ssh_conn.exec_command(command)
1134 content = stdout.read()
1135
1136 command = 'sudo ip netns exec ' + dhcp_namespace + ' sudo bash -ec "echo ' + ip_data + ' >> ' + dhcp_hostsdir + '"'
1137
1138 self.logger.debug("command: " + command)
1139 (_, stdout, _) = self.ssh_conn.exec_command(command)
1140 content = stdout.read()
1141
1142 if len(content) == 0:
1143 return True
1144 else:
1145 return False
1146 except paramiko.ssh_exception.SSHException as e:
1147 self.logger.error("set_mac_dhcp_server ssh Exception: " + str(e))
1148 if "SSH session not active" in str(e):
1149 self.ssh_connect()
1150 return False
1151
1152 def delete_mac_dhcp_server(self, ip, mac, vlan, dhcp_path):
1153 """
1154 Delete into dhcp conf file the ip assigned to a specific MAC address
1155
1156 :param ip: IP address asigned to a VM
1157 :param mac: VM vnic mac to be macthed with the IP received
1158 :param vlan: Segmentation id
1159 :param dhcp_path: dhcp conf file path that live in namespace side
1160 :return:
1161 """
1162
1163 if self.test:
1164 return False
1165 try:
1166 dhcp_namespace = str(vlan) + '-dnsmasq'
1167 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1168 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1169
1170 if not ip:
1171 return False
1172
1173 ip_data = mac.upper() + ',' + ip
1174
1175 command = 'sudo ip netns exec ' + dhcp_namespace + ' sudo sed -i \'/' + ip_data + '/d\' ' + dhcp_hostsdir
1176 self.logger.debug("command: " + command)
1177 (_, stdout, _) = self.ssh_conn.exec_command(command)
1178 content = stdout.read()
1179
1180 if len(content) == 0:
1181 return True
1182 else:
1183 return False
1184
1185 except paramiko.ssh_exception.SSHException as e:
1186 self.logger.error("set_mac_dhcp_server ssh Exception: " + str(e))
1187 if "SSH session not active" in str(e):
1188 self.ssh_connect()
1189 return False
1190
1191 def launch_dhcp_server(self, vlan, ip_range, netmask, dhcp_path, gateway, dns_list=None, routes=None):
1192 """
1193 Generate a linux bridge and attache the port to a OVS bridge
1194 :param self:
1195 :param vlan: Segmentation id
1196 :param ip_range: IP dhcp range
1197 :param netmask: network netmask
1198 :param dhcp_path: dhcp conf file path that live in namespace side
1199 :param gateway: Gateway address for dhcp net
1200 :param dns_list: dns list for dhcp server
1201 :param routes: routes list for dhcp server
1202 :return: True if success
1203 """
1204
1205 if self.test:
1206 return True
1207 try:
1208 ns_interface = str(vlan) + '-vethDO'
1209 dhcp_namespace = str(vlan) + '-dnsmasq'
1210 dhcp_path = os.path.join(dhcp_path, dhcp_namespace, '')
1211 leases_path = os.path.join(dhcp_path, "dnsmasq.leases")
1212 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
1213
1214
1215 dhcp_range = ip_range[0] + ',' + ip_range[1] + ',' + netmask
1216
1217 command = 'sudo ip netns exec ' + dhcp_namespace + ' mkdir -p ' + dhcp_path
1218 self.logger.debug("command: " + command)
1219 (_, stdout, _) = self.ssh_conn.exec_command(command)
1220 content = stdout.read()
1221
1222 pid_path = os.path.join(dhcp_path, 'dnsmasq.pid')
1223 command = 'sudo ip netns exec ' + dhcp_namespace + ' cat ' + pid_path
1224 self.logger.debug("command: " + command)
1225 (_, stdout, _) = self.ssh_conn.exec_command(command)
1226 content = stdout.read()
1227
1228 # check if pid is runing
1229 pid_status_path = content
1230 if content:
1231 command = "ps aux | awk '{print $2 }' | grep " + pid_status_path
1232 self.logger.debug("command: " + command)
1233 (_, stdout, _) = self.ssh_conn.exec_command(command)
1234 content = stdout.read()
1235
1236 gateway_option = ' --dhcp-option=3,' + gateway
1237
1238 dhcp_route_option = ''
1239 if routes:
1240 dhcp_route_option = ' --dhcp-option=121'
1241 for key, value in routes.iteritems():
1242 if 'default' == key:
1243 gateway_option = ' --dhcp-option=3,' + value
1244 else:
1245 dhcp_route_option += ',' + key + ',' + value
1246 dns_data = ''
1247 if dns_list:
1248 dns_data = ' --dhcp-option=6'
1249 for dns in dns_list:
1250 dns_data += ',' + dns
1251
1252 if not content:
1253 command = 'sudo ip netns exec ' + dhcp_namespace + ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1254 '--interface=' + ns_interface + \
1255 ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path + \
1256 ' --dhcp-range ' + dhcp_range + \
1257 ' --pid-file=' + pid_file + \
1258 ' --dhcp-leasefile=' + leases_path + \
1259 ' --listen-address ' + ip_range[0] + \
1260 gateway_option + \
1261 dhcp_route_option + \
1262 dns_data
1263
1264 self.logger.debug("command: " + command)
1265 (_, stdout, _) = self.ssh_conn.exec_command(command)
1266 content = stdout.readline()
1267
1268 if len(content) == 0:
1269 return True
1270 else:
1271 return False
1272 except paramiko.ssh_exception.SSHException as e:
1273 self.logger.error("launch_dhcp_server ssh Exception: " + str(e))
1274 if "SSH session not active" in str(e):
1275 self.ssh_connect()
1276 return False
1277
1278 def delete_dhcp_interfaces(self, vlan):
1279 """
1280 Create a linux bridge with STP active
1281 :param vlan: netowrk vlan id
1282 :return:
1283 """
1284
1285 if self.test:
1286 return True
1287 try:
1288 br_veth_name = str(vlan) + '-vethDO'
1289 ovs_veth_name = str(vlan) + '-vethOD'
1290 dhcp_namespace = str(vlan) + '-dnsmasq'
1291
1292 command = 'sudo ovs-vsctl del-port br-int ' + ovs_veth_name
1293 self.logger.debug("command: " + command)
1294 (_, stdout, _) = self.ssh_conn.exec_command(command)
1295 content = stdout.read()
1296
1297 command = 'sudo ip netns exec ' + dhcp_namespace + ' ip link set dev ' + br_veth_name + ' down'
1298 self.logger.debug("command: " + command)
1299 (_, stdout, _) = self.ssh_conn.exec_command(command)
1300 content = stdout.read()
1301
1302 command = 'sudo ip link set dev ' + dhcp_namespace + ' down'
1303 self.logger.debug("command: " + command)
1304 (_, stdout, _) = self.ssh_conn.exec_command(command)
1305 content = stdout.read()
1306
1307 command = 'sudo brctl delbr ' + dhcp_namespace
1308 self.logger.debug("command: " + command)
1309 (_, stdout, _) = self.ssh_conn.exec_command(command)
1310 content = stdout.read()
1311
1312 command = 'sudo ip netns del ' + dhcp_namespace
1313 self.logger.debug("command: " + command)
1314 (_, stdout, _) = self.ssh_conn.exec_command(command)
1315 content = stdout.read()
1316
1317 except paramiko.ssh_exception.SSHException as e:
1318 self.logger.error("delete_dhcp_interfaces ssh Exception: " + str(e))
1319 if "SSH session not active" in str(e):
1320 self.ssh_connect()
1321 return False
1322
1323 def create_dhcp_interfaces(self, vlan, ip_listen_address, netmask):
1324 """
1325 Create a linux bridge with STP active
1326 :param vlan: segmentation id
1327 :param ip_listen_address: Listen Ip address for the dhcp service, the tap interface living in namesapce side
1328 :param netmask: dhcp net CIDR
1329 :return: True if success
1330 """
1331
1332 if self.test:
1333 return True
1334 try:
1335 ovs_veth_name = str(vlan) + '-vethOD'
1336 ns_veth = str(vlan) + '-vethDO'
1337 dhcp_namespace = str(vlan) + '-dnsmasq'
1338
1339 command = 'sudo ip netns add ' + dhcp_namespace
1340 self.logger.debug("command: " + command)
1341 (_, stdout, _) = self.ssh_conn.exec_command(command)
1342 content = stdout.read()
1343
1344 command = 'sudo ip link add ' + ns_veth + ' type veth peer name ' + ovs_veth_name
1345 self.logger.debug("command: " + command)
1346 (_, stdout, _) = self.ssh_conn.exec_command(command)
1347 content = stdout.read()
1348
1349 command = 'sudo ip link set ' + ns_veth + ' netns ' + dhcp_namespace
1350 self.logger.debug("command: " + command)
1351 (_, stdout, _) = self.ssh_conn.exec_command(command)
1352 content = stdout.read()
1353
1354 command = 'sudo ip netns exec ' + dhcp_namespace + ' ip link set dev ' + ns_veth + ' up'
1355 self.logger.debug("command: " + command)
1356 (_, stdout, _) = self.ssh_conn.exec_command(command)
1357 content = stdout.read()
1358
1359 command = 'sudo ovs-vsctl add-port br-int ' + ovs_veth_name + ' tag=' + str(vlan)
1360 self.logger.debug("command: " + command)
1361 (_, stdout, _) = self.ssh_conn.exec_command(command)
1362 content = stdout.read()
1363
1364 command = 'sudo ip link set dev ' + ovs_veth_name + ' up'
1365 self.logger.debug("command: " + command)
1366 (_, stdout, _) = self.ssh_conn.exec_command(command)
1367 content = stdout.read()
1368
1369 command = 'sudo ip netns exec ' + dhcp_namespace + ' ip link set dev lo up'
1370 self.logger.debug("command: " + command)
1371 (_, stdout, _) = self.ssh_conn.exec_command(command)
1372 content = stdout.read()
1373
1374 command = 'sudo ip netns exec ' + dhcp_namespace + ' ' + ' ifconfig ' + ns_veth \
1375 + ' ' + ip_listen_address + ' netmask ' + netmask
1376 self.logger.debug("command: " + command)
1377 (_, stdout, _) = self.ssh_conn.exec_command(command)
1378 content = stdout.read()
1379 if len(content) == 0:
1380 return True
1381 else:
1382 return False
1383 except paramiko.ssh_exception.SSHException as e:
1384 self.logger.error("create_dhcp_interfaces ssh Exception: " + str(e))
1385 if "SSH session not active" in str(e):
1386 self.ssh_connect()
1387 return False
1388
1389 def delete_qrouter_connection(self, vlan, link):
1390 """
1391 Delete qrouter Namesapce with all veth interfaces need it
1392 :param vlan:
1393 :param link:
1394 :return:
1395 """
1396
1397 ns_qouter = str(vlan) + '-qrouter'
1398 qrouter_ovs_veth = str(vlan) + '-vethOQ'
1399 qrouter_ns_veth = str(vlan) + '-vethQO'
1400
1401 qrouter_br_veth = str(vlan) + '-vethBQ'
1402 qrouter_ns_router_veth = str(vlan) + '-vethQB'
1403
1404 # delete ovs veth to ovs br-int
1405 command = 'sudo ovs-vsctl del-port br-int {}'.format(qrouter_ovs_veth)
1406 self.logger.debug("command: " + command)
1407 (_, stdout, _) = self.ssh_conn.exec_command(command)
1408 content = stdout.read()
1409
1410 # down ns veth
1411 command = 'sudo ip netns exec {} ip link set dev {} down'.format(ns_qouter, qrouter_ns_veth)
1412 self.logger.debug("command: " + command)
1413 (_, stdout, _) = self.ssh_conn.exec_command(command)
1414 content = stdout.read()
1415
1416 # down ovs veth interface
1417 command = 'sudo ip link set dev {} down'.format(qrouter_br_veth)
1418 self.logger.debug("command: " + command)
1419 (_, stdout, _) = self.ssh_conn.exec_command(command)
1420 content = stdout.read()
1421
1422 # down br veth interface
1423 command = 'sudo ip link set dev {} down'.format(qrouter_ovs_veth)
1424 self.logger.debug("command: " + command)
1425 (_, stdout, _) = self.ssh_conn.exec_command(command)
1426 content = stdout.read()
1427
1428 # down br veth interface
1429 command = 'sudo ip link set dev {} down'.format(qrouter_ns_router_veth)
1430 self.logger.debug("command: " + command)
1431 (_, stdout, _) = self.ssh_conn.exec_command(command)
1432 content = stdout.read()
1433
1434 # down br veth interface
1435 command = 'sudo brctl delif {} {}'.format(link, qrouter_br_veth)
1436 self.logger.debug("command: " + command)
1437 (_, stdout, _) = self.ssh_conn.exec_command(command)
1438 content = stdout.read()
1439
1440
1441 # delete NS
1442 command = 'sudo ip netns del ' + ns_qouter
1443 self.logger.debug("command: " + command)
1444 (_, stdout, _) = self.ssh_conn.exec_command(command)
1445 content = stdout.read()
1446
1447 def create_qrouter_ovs_connection(self, vlan, gateway, dhcp_cidr):
1448 """
1449 Create qrouter Namesapce with all veth interfaces need it between NS and OVS
1450 :param vlan:
1451 :param gateway:
1452 :return:
1453 """
1454
1455 ns_qouter = str(vlan) + '-qrouter'
1456 qrouter_ovs_veth = str(vlan) + '-vethOQ'
1457 qrouter_ns_veth = str(vlan) + '-vethQO'
1458
1459 # Create NS
1460 command = 'sudo ip netns add ' + ns_qouter
1461 self.logger.debug("command: " + command)
1462 (_, stdout, _) = self.ssh_conn.exec_command(command)
1463 content = stdout.read()
1464
1465 # Create pait veth
1466 command = 'sudo ip link add {} type veth peer name {}'.format(qrouter_ns_veth, qrouter_ovs_veth)
1467 self.logger.debug("command: " + command)
1468 (_, stdout, _) = self.ssh_conn.exec_command(command)
1469 content = stdout.read()
1470
1471 # up ovs veth interface
1472 command = 'sudo ip link set dev {} up'.format(qrouter_ovs_veth)
1473 self.logger.debug("command: " + command)
1474 (_, stdout, _) = self.ssh_conn.exec_command(command)
1475 content = stdout.read()
1476
1477 # add ovs veth to ovs br-int
1478 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(qrouter_ovs_veth, vlan)
1479 self.logger.debug("command: " + command)
1480 (_, stdout, _) = self.ssh_conn.exec_command(command)
1481 content = stdout.read()
1482
1483 # add veth to ns
1484 command = 'sudo ip link set {} netns {}'.format(qrouter_ns_veth, ns_qouter)
1485 self.logger.debug("command: " + command)
1486 (_, stdout, _) = self.ssh_conn.exec_command(command)
1487 content = stdout.read()
1488
1489 # up ns loopback
1490 command = 'sudo ip netns exec {} ip link set dev lo up'.format(ns_qouter)
1491 self.logger.debug("command: " + command)
1492 (_, stdout, _) = self.ssh_conn.exec_command(command)
1493 content = stdout.read()
1494
1495 # up ns veth
1496 command = 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter, qrouter_ns_veth)
1497 self.logger.debug("command: " + command)
1498 (_, stdout, _) = self.ssh_conn.exec_command(command)
1499 content = stdout.read()
1500
1501 from netaddr import IPNetwork
1502 ip_tools = IPNetwork(dhcp_cidr)
1503 cidr_len = ip_tools.prefixlen
1504
1505 # set gw to ns veth
1506 command = 'sudo ip netns exec {} ip address add {}/{} dev {}'.format(ns_qouter, gateway, cidr_len, qrouter_ns_veth)
1507 self.logger.debug("command: " + command)
1508 (_, stdout, _) = self.ssh_conn.exec_command(command)
1509 content = stdout.read()
1510
1511 def add_ns_routes(self, vlan, routes):
1512
1513 for key, value in routes.iteritems():
1514 ns_qouter = str(vlan) + '-qrouter'
1515 qrouter_ns_router_veth = str(vlan) + '-vethQB'
1516 # up ns veth
1517 if key == 'default':
1518 command = 'sudo ip netns exec {} ip route add {} via {} '.format(ns_qouter, key, value)
1519 else:
1520 command = 'sudo ip netns exec {} ip route add {} via {} dev {}'.format(ns_qouter, key, value,
1521 qrouter_ns_router_veth)
1522 self.logger.debug("command: " + command)
1523 (_, stdout, _) = self.ssh_conn.exec_command(command)
1524 content = stdout.read()
1525
1526 def create_qrouter_br_connection(self, vlan, cidr, link):
1527 """
1528 Create veth interfaces between user bridge (link) and OVS
1529 :param vlan:
1530 :param link:
1531 :return:
1532 """
1533
1534 ns_qouter = str(vlan) + '-qrouter'
1535 qrouter_ns_router_veth = str(vlan) + '-vethQB'
1536 qrouter_br_veth = str(vlan) + '-vethBQ'
1537
1538 # Create pait veth
1539 command = 'sudo ip link add {} type veth peer name {}'.format(qrouter_br_veth, qrouter_ns_router_veth)
1540 self.logger.debug("command: " + command)
1541 (_, stdout, _) = self.ssh_conn.exec_command(command)
1542 content = stdout.read()
1543
1544 # up ovs veth interface
1545 command = 'sudo ip link set dev {} up'.format(qrouter_br_veth)
1546 self.logger.debug("command: " + command)
1547 (_, stdout, _) = self.ssh_conn.exec_command(command)
1548 content = stdout.read()
1549
1550 # add veth to ns
1551 command = 'sudo ip link set {} netns {}'.format(qrouter_ns_router_veth, ns_qouter)
1552 self.logger.debug("command: " + command)
1553 (_, stdout, _) = self.ssh_conn.exec_command(command)
1554 content = stdout.read()
1555
1556 # up ns veth
1557 command = 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter, qrouter_ns_router_veth)
1558 self.logger.debug("command: " + command)
1559 (_, stdout, _) = self.ssh_conn.exec_command(command)
1560 content = stdout.read()
1561
1562 command = 'sudo ip netns exec {} ip address add {} dev {}'.format(ns_qouter, link['nat'], qrouter_ns_router_veth)
1563 self.logger.debug("command: " + command)
1564 (_, stdout, _) = self.ssh_conn.exec_command(command)
1565 content = stdout.read()
1566
1567 command = 'sudo brctl show | grep {}'.format(link['iface'])
1568 self.logger.debug("command: " + command)
1569 (_, stdout, _) = self.ssh_conn.exec_command(command)
1570 content = stdout.read()
1571
1572 if content > '':
1573 # up ns veth
1574 command = 'sudo brctl addif {} {}'.format(link['iface'], qrouter_br_veth)
1575 self.logger.debug("command: " + command)
1576 (_, stdout, _) = self.ssh_conn.exec_command(command)
1577 content = stdout.read()
1578
1579 # up ns veth
1580 command = 'sudo ip netns exec {} iptables -t nat -A POSTROUTING -o {} -s {} -d {} -j MASQUERADE' \
1581 .format(ns_qouter, qrouter_ns_router_veth, link['nat'], cidr)
1582 self.logger.debug("command: " + command)
1583 (_, stdout, _) = self.ssh_conn.exec_command(command)
1584 content = stdout.read()
1585
1586
1587 else:
1588 self.logger.error('Bridge {} given by user not exist'.format(qrouter_br_veth))
1589
1590
1591
1592 def create_link_bridge_to_ovs(self, vlan, link):
1593 """
1594 Create interfaces to connect a linux bridge with tenant net
1595 :param vlan: segmentation id
1596 :return: True if success
1597 """
1598 if self.test:
1599 return True
1600 try:
1601
1602 br_tap_name = str(vlan) + '-vethBO'
1603 br_ovs_name = str(vlan) + '-vethOB'
1604
1605 # is a bridge or a interface
1606 command = 'sudo brctl show | grep {}'.format(link)
1607 self.logger.debug("command: " + command)
1608 (_, stdout, _) = self.ssh_conn.exec_command(command)
1609 content = stdout.read()
1610
1611 if content > '':
1612 command = 'sudo ip link add {} type veth peer name {}'.format(br_tap_name, br_ovs_name)
1613 self.logger.debug("command: " + command)
1614 (_, stdout, _) = self.ssh_conn.exec_command(command)
1615 content = stdout.read()
1616
1617 command = 'sudo ip link set dev {} up'.format(br_tap_name)
1618 self.logger.debug("command: " + command)
1619 (_, stdout, _) = self.ssh_conn.exec_command(command)
1620 content = stdout.read()
1621
1622 command = 'sudo ip link set dev {} up'.format(br_ovs_name)
1623 self.logger.debug("command: " + command)
1624 (_, stdout, _) = self.ssh_conn.exec_command(command)
1625 content = stdout.read()
1626
1627 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(br_ovs_name, str(vlan))
1628 self.logger.debug("command: " + command)
1629 (_, stdout, _) = self.ssh_conn.exec_command(command)
1630 content = stdout.read()
1631
1632 command = 'sudo brctl addif ' + link + ' {}'.format(br_tap_name)
1633 self.logger.debug("command: " + command)
1634 (_, stdout, _) = self.ssh_conn.exec_command(command)
1635 content = stdout.read()
1636
1637 if len(content) == 0:
1638 return True
1639 else:
1640 return False
1641 else:
1642 self.logger.error('Link is not present, please check {}'.format(link))
1643 return False
1644 except paramiko.ssh_exception.SSHException as e:
1645 self.logger.error("create_dhcp_interfaces ssh Exception: " + str(e))
1646 if "SSH session not active" in str(e):
1647 self.ssh_connect()
1648 return False
1649
1650 def create_ovs_vxlan_tunnel(self, vxlan_interface, remote_ip):
1651 """
1652 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1653 :param vxlan_interface: vlxan inteface name.
1654 :param remote_ip: tunnel endpoint remote compute ip.
1655 :return:
1656 """
1657 if self.test or not self.connectivity:
1658 return True
1659 try:
1660 command = 'sudo ovs-vsctl add-port br-int ' + vxlan_interface + \
1661 ' -- set Interface ' + vxlan_interface + ' type=vxlan options:remote_ip=' + remote_ip + \
1662 ' -- set Port ' + vxlan_interface + ' other_config:stp-path-cost=10'
1663 self.logger.debug("command: " + command)
1664 (_, stdout, _) = self.ssh_conn.exec_command(command)
1665 content = stdout.read()
1666 # print content
1667 if len(content) == 0:
1668 return True
1669 else:
1670 return False
1671 except paramiko.ssh_exception.SSHException as e:
1672 self.logger.error("create_ovs_vxlan_tunnel ssh Exception: " + str(e))
1673 if "SSH session not active" in str(e):
1674 self.ssh_connect()
1675 return False
1676
1677 def delete_ovs_vxlan_tunnel(self, vxlan_interface):
1678 """
1679 Delete a vlxan tunnel port from a OVS brdige.
1680 :param vxlan_interface: vlxan name to be delete it.
1681 :return: True if success.
1682 """
1683 if self.test or not self.connectivity:
1684 return True
1685 try:
1686 command = 'sudo ovs-vsctl del-port br-int ' + vxlan_interface
1687 self.logger.debug("command: " + command)
1688 (_, stdout, _) = self.ssh_conn.exec_command(command)
1689 content = stdout.read()
1690 # print content
1691 if len(content) == 0:
1692 return True
1693 else:
1694 return False
1695 except paramiko.ssh_exception.SSHException as e:
1696 self.logger.error("delete_ovs_vxlan_tunnel ssh Exception: " + str(e))
1697 if "SSH session not active" in str(e):
1698 self.ssh_connect()
1699 return False
1700
1701 def delete_ovs_bridge(self):
1702 """
1703 Delete a OVS bridge from a compute.
1704 :return: True if success
1705 """
1706 if self.test or not self.connectivity:
1707 return True
1708 try:
1709 command = 'sudo ovs-vsctl del-br br-int'
1710 self.logger.debug("command: " + command)
1711 (_, stdout, _) = self.ssh_conn.exec_command(command)
1712 content = stdout.read()
1713 if len(content) == 0:
1714 return True
1715 else:
1716 return False
1717 except paramiko.ssh_exception.SSHException as e:
1718 self.logger.error("delete_ovs_bridge ssh Exception: " + str(e))
1719 if "SSH session not active" in str(e):
1720 self.ssh_connect()
1721 return False
1722
1723 def get_file_info(self, path):
1724 command = 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1725 self.logger.debug("command: " + command)
1726 (_, stdout, _) = self.ssh_conn.exec_command(command)
1727 content = stdout.read()
1728 if len(content) == 0:
1729 return None # file does not exist
1730 else:
1731 return content.split(" ") # (permission, 1, owner, group, size, date, file)
1732
1733 def qemu_get_info(self, path):
1734 command = 'qemu-img info ' + path
1735 self.logger.debug("command: " + command)
1736 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
1737 content = stdout.read()
1738 if len(content) == 0:
1739 error = stderr.read()
1740 self.logger.error("get_qemu_info error " + error)
1741 raise paramiko.ssh_exception.SSHException("Error getting qemu_info: " + error)
1742 else:
1743 try:
1744 return yaml.load(content)
1745 except yaml.YAMLError as exc:
1746 text = ""
1747 if hasattr(exc, 'problem_mark'):
1748 mark = exc.problem_mark
1749 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
1750 self.logger.error("get_qemu_info yaml format Exception " + text)
1751 raise paramiko.ssh_exception.SSHException("Error getting qemu_info yaml format" + text)
1752
1753 def qemu_change_backing(self, inc_file, new_backing_file):
1754 command = 'qemu-img rebase -u -b ' + new_backing_file + ' ' + inc_file
1755 self.logger.debug("command: " + command)
1756 (_, _, stderr) = self.ssh_conn.exec_command(command)
1757 content = stderr.read()
1758 if len(content) == 0:
1759 return 0
1760 else:
1761 self.logger.error("qemu_change_backing error: " + content)
1762 return -1
1763
1764 def qemu_create_empty_disk(self, dev):
1765
1766 if not dev and 'source' not in dev and 'file format' not in dev and 'image_size' not in dev:
1767 self.logger.error("qemu_create_empty_disk error: missing image parameter")
1768 return -1
1769
1770 empty_disk_path = dev['source file']
1771
1772 command = 'qemu-img create -f qcow2 ' + empty_disk_path + ' ' + str(dev['image_size']) + 'G'
1773 self.logger.debug("command: " + command)
1774 (_, _, stderr) = self.ssh_conn.exec_command(command)
1775 content = stderr.read()
1776 if len(content) == 0:
1777 return 0
1778 else:
1779 self.logger.error("qemu_create_empty_disk error: " + content)
1780 return -1
1781
1782 def get_notused_filename(self, proposed_name, suffix=''):
1783 '''Look for a non existing file_name in the host
1784 proposed_name: proposed file name, includes path
1785 suffix: suffix to be added to the name, before the extention
1786 '''
1787 extension = proposed_name.rfind(".")
1788 slash = proposed_name.rfind("/")
1789 if extension < 0 or extension < slash: # no extension
1790 extension = len(proposed_name)
1791 target_name = proposed_name[:extension] + suffix + proposed_name[extension:]
1792 info = self.get_file_info(target_name)
1793 if info is None:
1794 return target_name
1795
1796 index=0
1797 while info is not None:
1798 target_name = proposed_name[:extension] + suffix + "-" + str(index) + proposed_name[extension:]
1799 index+=1
1800 info = self.get_file_info(target_name)
1801 return target_name
1802
1803 def get_notused_path(self, proposed_path, suffix=''):
1804 '''Look for a non existing path at database for images
1805 proposed_path: proposed file name, includes path
1806 suffix: suffix to be added to the name, before the extention
1807 '''
1808 extension = proposed_path.rfind(".")
1809 if extension < 0:
1810 extension = len(proposed_path)
1811 if suffix != None:
1812 target_path = proposed_path[:extension] + suffix + proposed_path[extension:]
1813 index=0
1814 while True:
1815 r,_=self.db.get_table(FROM="images",WHERE={"path":target_path})
1816 if r<=0:
1817 return target_path
1818 target_path = proposed_path[:extension] + suffix + "-" + str(index) + proposed_path[extension:]
1819 index+=1
1820
1821
1822 def delete_file(self, file_name):
1823 command = 'rm -f '+file_name
1824 self.logger.debug("command: " + command)
1825 (_, _, stderr) = self.ssh_conn.exec_command(command)
1826 error_msg = stderr.read()
1827 if len(error_msg) > 0:
1828 raise paramiko.ssh_exception.SSHException("Error deleting file: " + error_msg)
1829
1830 def copy_file(self, source, destination, perserve_time=True):
1831 if source[0:4]=="http":
1832 command = "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1833 dst=destination, src=source, dst_result=destination + ".result" )
1834 else:
1835 command = 'cp --no-preserve=mode'
1836 if perserve_time:
1837 command += ' --preserve=timestamps'
1838 command += " '{}' '{}'".format(source, destination)
1839 self.logger.debug("command: " + command)
1840 (_, _, stderr) = self.ssh_conn.exec_command(command)
1841 error_msg = stderr.read()
1842 if len(error_msg) > 0:
1843 raise paramiko.ssh_exception.SSHException("Error copying image to local host: " + error_msg)
1844
1845 def copy_remote_file(self, remote_file, use_incremental):
1846 ''' Copy a file from the repository to local folder and recursively
1847 copy the backing files in case the remote file is incremental
1848 Read and/or modified self.localinfo['files'] that contain the
1849 unmodified copies of images in the local path
1850 params:
1851 remote_file: path of remote file
1852 use_incremental: None (leave the decision to this function), True, False
1853 return:
1854 local_file: name of local file
1855 qemu_info: dict with quemu information of local file
1856 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1857 '''
1858
1859 use_incremental_out = use_incremental
1860 new_backing_file = None
1861 local_file = None
1862 file_from_local = True
1863
1864 #in case incremental use is not decided, take the decision depending on the image
1865 #avoid the use of incremental if this image is already incremental
1866 if remote_file[0:4] == "http":
1867 file_from_local = False
1868 if file_from_local:
1869 qemu_remote_info = self.qemu_get_info(remote_file)
1870 if use_incremental_out==None:
1871 use_incremental_out = not ( file_from_local and 'backing file' in qemu_remote_info)
1872 #copy recursivelly the backing files
1873 if file_from_local and 'backing file' in qemu_remote_info:
1874 new_backing_file, _, _ = self.copy_remote_file(qemu_remote_info['backing file'], True)
1875
1876 #check if remote file is present locally
1877 if use_incremental_out and remote_file in self.localinfo['files']:
1878 local_file = self.localinfo['files'][remote_file]
1879 local_file_info = self.get_file_info(local_file)
1880 if file_from_local:
1881 remote_file_info = self.get_file_info(remote_file)
1882 if local_file_info == None:
1883 local_file = None
1884 elif file_from_local and (local_file_info[4]!=remote_file_info[4] or local_file_info[5]!=remote_file_info[5]):
1885 #local copy of file not valid because date or size are different.
1886 #TODO DELETE local file if this file is not used by any active virtual machine
1887 try:
1888 self.delete_file(local_file)
1889 del self.localinfo['files'][remote_file]
1890 except Exception:
1891 pass
1892 local_file = None
1893 else: #check that the local file has the same backing file, or there are not backing at all
1894 qemu_info = self.qemu_get_info(local_file)
1895 if new_backing_file != qemu_info.get('backing file'):
1896 local_file = None
1897
1898
1899 if local_file == None: #copy the file
1900 img_name= remote_file.split('/') [-1]
1901 img_local = self.image_path + '/' + img_name
1902 local_file = self.get_notused_filename(img_local)
1903 self.copy_file(remote_file, local_file, use_incremental_out)
1904
1905 if use_incremental_out:
1906 self.localinfo['files'][remote_file] = local_file
1907 if new_backing_file:
1908 self.qemu_change_backing(local_file, new_backing_file)
1909 qemu_info = self.qemu_get_info(local_file)
1910
1911 return local_file, qemu_info, use_incremental_out
1912
1913 def launch_server(self, conn, server, rebuild=False, domain=None):
1914 if self.test:
1915 time.sleep(random.randint(20,150)) #sleep random timeto be make it a bit more real
1916 return 0, 'Success'
1917
1918 server_id = server['uuid']
1919 paused = server.get('paused','no')
1920 try:
1921 if domain!=None and rebuild==False:
1922 domain.resume()
1923 #self.server_status[server_id] = 'ACTIVE'
1924 return 0, 'Success'
1925
1926 self.db_lock.acquire()
1927 result, server_data = self.db.get_instance(server_id)
1928 self.db_lock.release()
1929 if result <= 0:
1930 self.logger.error("launch_server ERROR getting server from DB %d %s", result, server_data)
1931 return result, server_data
1932
1933 #0: get image metadata
1934 server_metadata = server.get('metadata', {})
1935 use_incremental = None
1936
1937 if "use_incremental" in server_metadata:
1938 use_incremental = False if server_metadata["use_incremental"] == "no" else True
1939
1940 server_host_files = self.localinfo['server_files'].get( server['uuid'], {})
1941 if rebuild:
1942 #delete previous incremental files
1943 for file_ in server_host_files.values():
1944 self.delete_file(file_['source file'] )
1945 server_host_files={}
1946
1947 #1: obtain aditional devices (disks)
1948 #Put as first device the main disk
1949 devices = [ {"type":"disk", "image_id":server['image_id'], "vpci":server_metadata.get('vpci', None) } ]
1950 if 'extended' in server_data and server_data['extended']!=None and "devices" in server_data['extended']:
1951 devices += server_data['extended']['devices']
1952 empty_path = None
1953 for dev in devices:
1954 image_id = dev.get('image_id')
1955 if not image_id:
1956 import uuid
1957 uuid_empty = str(uuid.uuid4())
1958 empty_path = self.empty_image_path + uuid_empty + '.qcow2' # local path for empty disk
1959
1960 dev['source file'] = empty_path
1961 dev['file format'] = 'qcow2'
1962 self.qemu_create_empty_disk(dev)
1963 server_host_files[uuid_empty] = {'source file': empty_path,
1964 'file format': dev['file format']}
1965
1966 continue
1967 else:
1968 self.db_lock.acquire()
1969 result, content = self.db.get_table(FROM='images', SELECT=('path', 'metadata'),
1970 WHERE={'uuid': image_id})
1971 self.db_lock.release()
1972 if result <= 0:
1973 error_text = "ERROR", result, content, "when getting image", dev['image_id']
1974 self.logger.error("launch_server " + error_text)
1975 return -1, error_text
1976 if content[0]['metadata'] is not None:
1977 dev['metadata'] = json.loads(content[0]['metadata'])
1978 else:
1979 dev['metadata'] = {}
1980
1981 if image_id in server_host_files:
1982 dev['source file'] = server_host_files[image_id]['source file'] #local path
1983 dev['file format'] = server_host_files[image_id]['file format'] # raw or qcow2
1984 continue
1985
1986 #2: copy image to host
1987 if image_id:
1988 remote_file = content[0]['path']
1989 else:
1990 remote_file = empty_path
1991 use_incremental_image = use_incremental
1992 if dev['metadata'].get("use_incremental") == "no":
1993 use_incremental_image = False
1994 local_file, qemu_info, use_incremental_image = self.copy_remote_file(remote_file, use_incremental_image)
1995
1996 #create incremental image
1997 if use_incremental_image:
1998 local_file_inc = self.get_notused_filename(local_file, '.inc')
1999 command = 'qemu-img create -f qcow2 '+local_file_inc+ ' -o backing_file='+ local_file
2000 self.logger.debug("command: " + command)
2001 (_, _, stderr) = self.ssh_conn.exec_command(command)
2002 error_msg = stderr.read()
2003 if len(error_msg) > 0:
2004 raise paramiko.ssh_exception.SSHException("Error creating incremental file: " + error_msg)
2005 local_file = local_file_inc
2006 qemu_info = {'file format':'qcow2'}
2007
2008 server_host_files[ dev['image_id'] ] = {'source file': local_file, 'file format': qemu_info['file format']}
2009
2010 dev['source file'] = local_file
2011 dev['file format'] = qemu_info['file format']
2012
2013 self.localinfo['server_files'][ server['uuid'] ] = server_host_files
2014 self.localinfo_dirty = True
2015
2016 #3 Create XML
2017 result, xml = self.create_xml_server(server_data, devices, server_metadata) #local_file
2018 if result <0:
2019 self.logger.error("create xml server error: " + xml)
2020 return -2, xml
2021 self.logger.debug("create xml: " + xml)
2022 atribute = host_thread.lvirt_module.VIR_DOMAIN_START_PAUSED if paused == "yes" else 0
2023 #4 Start the domain
2024 if not rebuild: #ensures that any pending destroying server is done
2025 self.server_forceoff(True)
2026 #self.logger.debug("launching instance " + xml)
2027 conn.createXML(xml, atribute)
2028 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
2029
2030 return 0, 'Success'
2031
2032 except paramiko.ssh_exception.SSHException as e:
2033 text = e.args[0]
2034 self.logger.error("launch_server id='%s' ssh Exception: %s", server_id, text)
2035 if "SSH session not active" in text:
2036 self.ssh_connect()
2037 except host_thread.lvirt_module.libvirtError as e:
2038 text = e.get_error_message()
2039 self.logger.error("launch_server id='%s' libvirt Exception: %s", server_id, text)
2040 except Exception as e:
2041 text = str(e)
2042 self.logger.error("launch_server id='%s' Exception: %s", server_id, text)
2043 return -1, text
2044
2045 def update_servers_status(self):
2046 # # virDomainState
2047 # VIR_DOMAIN_NOSTATE = 0
2048 # VIR_DOMAIN_RUNNING = 1
2049 # VIR_DOMAIN_BLOCKED = 2
2050 # VIR_DOMAIN_PAUSED = 3
2051 # VIR_DOMAIN_SHUTDOWN = 4
2052 # VIR_DOMAIN_SHUTOFF = 5
2053 # VIR_DOMAIN_CRASHED = 6
2054 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
2055
2056 if self.test or len(self.server_status)==0:
2057 return
2058
2059 try:
2060 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2061 domains= conn.listAllDomains()
2062 domain_dict={}
2063 for domain in domains:
2064 uuid = domain.UUIDString() ;
2065 libvirt_status = domain.state()
2066 #print libvirt_status
2067 if libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_RUNNING or libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTDOWN:
2068 new_status = "ACTIVE"
2069 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_PAUSED:
2070 new_status = "PAUSED"
2071 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTOFF:
2072 new_status = "INACTIVE"
2073 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_CRASHED:
2074 new_status = "ERROR"
2075 else:
2076 new_status = None
2077 domain_dict[uuid] = new_status
2078 conn.close()
2079 except host_thread.lvirt_module.libvirtError as e:
2080 self.logger.error("get_state() Exception " + e.get_error_message())
2081 return
2082
2083 for server_id, current_status in self.server_status.iteritems():
2084 new_status = None
2085 if server_id in domain_dict:
2086 new_status = domain_dict[server_id]
2087 else:
2088 new_status = "INACTIVE"
2089
2090 if new_status == None or new_status == current_status:
2091 continue
2092 if new_status == 'INACTIVE' and current_status == 'ERROR':
2093 continue #keep ERROR status, because obviously this machine is not running
2094 #change status
2095 self.logger.debug("server id='%s' status change from '%s' to '%s'", server_id, current_status, new_status)
2096 STATUS={'progress':100, 'status':new_status}
2097 if new_status == 'ERROR':
2098 STATUS['last_error'] = 'machine has crashed'
2099 self.db_lock.acquire()
2100 r,_ = self.db.update_rows('instances', STATUS, {'uuid':server_id}, log=False)
2101 self.db_lock.release()
2102 if r>=0:
2103 self.server_status[server_id] = new_status
2104
2105 def action_on_server(self, req, last_retry=True):
2106 '''Perform an action on a req
2107 Attributes:
2108 req: dictionary that contain:
2109 server properties: 'uuid','name','tenant_id','status'
2110 action: 'action'
2111 host properties: 'user', 'ip_name'
2112 return (error, text)
2113 0: No error. VM is updated to new state,
2114 -1: Invalid action, as trying to pause a PAUSED VM
2115 -2: Error accessing host
2116 -3: VM nor present
2117 -4: Error at DB access
2118 -5: Error while trying to perform action. VM is updated to ERROR
2119 '''
2120 server_id = req['uuid']
2121 conn = None
2122 new_status = None
2123 old_status = req['status']
2124 last_error = None
2125
2126 if self.test:
2127 if 'terminate' in req['action']:
2128 new_status = 'deleted'
2129 elif 'shutoff' in req['action'] or 'shutdown' in req['action'] or 'forceOff' in req['action']:
2130 if req['status']!='ERROR':
2131 time.sleep(5)
2132 new_status = 'INACTIVE'
2133 elif 'start' in req['action'] and req['status']!='ERROR':
2134 new_status = 'ACTIVE'
2135 elif 'resume' in req['action'] and req['status']!='ERROR' and req['status']!='INACTIVE':
2136 new_status = 'ACTIVE'
2137 elif 'pause' in req['action'] and req['status']!='ERROR':
2138 new_status = 'PAUSED'
2139 elif 'reboot' in req['action'] and req['status']!='ERROR':
2140 new_status = 'ACTIVE'
2141 elif 'rebuild' in req['action']:
2142 time.sleep(random.randint(20,150))
2143 new_status = 'ACTIVE'
2144 elif 'createImage' in req['action']:
2145 time.sleep(5)
2146 self.create_image(None, req)
2147 else:
2148 try:
2149 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2150 try:
2151 dom = conn.lookupByUUIDString(server_id)
2152 except host_thread.lvirt_module.libvirtError as e:
2153 text = e.get_error_message()
2154 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
2155 dom = None
2156 else:
2157 self.logger.error("action_on_server id='%s' libvirt exception: %s", server_id, text)
2158 raise e
2159
2160 if 'forceOff' in req['action']:
2161 if dom == None:
2162 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2163 else:
2164 try:
2165 self.logger.debug("sending DESTROY to server id='%s'", server_id)
2166 dom.destroy()
2167 except Exception as e:
2168 if "domain is not running" not in e.get_error_message():
2169 self.logger.error("action_on_server id='%s' Exception while sending force off: %s",
2170 server_id, e.get_error_message())
2171 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
2172 new_status = 'ERROR'
2173
2174 elif 'terminate' in req['action']:
2175 if dom == None:
2176 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2177 new_status = 'deleted'
2178 else:
2179 try:
2180 if req['action']['terminate'] == 'force':
2181 self.logger.debug("sending DESTROY to server id='%s'", server_id)
2182 dom.destroy()
2183 new_status = 'deleted'
2184 else:
2185 self.logger.debug("sending SHUTDOWN to server id='%s'", server_id)
2186 dom.shutdown()
2187 self.pending_terminate_server.append( (time.time()+10,server_id) )
2188 except Exception as e:
2189 self.logger.error("action_on_server id='%s' Exception while destroy: %s",
2190 server_id, e.get_error_message())
2191 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
2192 new_status = 'ERROR'
2193 if "domain is not running" in e.get_error_message():
2194 try:
2195 dom.undefine()
2196 new_status = 'deleted'
2197 except Exception:
2198 self.logger.error("action_on_server id='%s' Exception while undefine: %s",
2199 server_id, e.get_error_message())
2200 last_error = 'action_on_server Exception2 while undefine:', e.get_error_message()
2201 #Exception: 'virDomainDetachDevice() failed'
2202 if new_status=='deleted':
2203 if server_id in self.server_status:
2204 del self.server_status[server_id]
2205 if req['uuid'] in self.localinfo['server_files']:
2206 for file_ in self.localinfo['server_files'][ req['uuid'] ].values():
2207 try:
2208 self.delete_file(file_['source file'])
2209 except Exception:
2210 pass
2211 del self.localinfo['server_files'][ req['uuid'] ]
2212 self.localinfo_dirty = True
2213
2214 elif 'shutoff' in req['action'] or 'shutdown' in req['action']:
2215 try:
2216 if dom == None:
2217 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2218 else:
2219 dom.shutdown()
2220 # new_status = 'INACTIVE'
2221 #TODO: check status for changing at database
2222 except Exception as e:
2223 new_status = 'ERROR'
2224 self.logger.error("action_on_server id='%s' Exception while shutdown: %s",
2225 server_id, e.get_error_message())
2226 last_error = 'action_on_server Exception while shutdown: ' + e.get_error_message()
2227
2228 elif 'rebuild' in req['action']:
2229 if dom != None:
2230 dom.destroy()
2231 r = self.launch_server(conn, req, True, None)
2232 if r[0] <0:
2233 new_status = 'ERROR'
2234 last_error = r[1]
2235 else:
2236 new_status = 'ACTIVE'
2237 elif 'start' in req['action']:
2238 # The instance is only create in DB but not yet at libvirt domain, needs to be create
2239 rebuild = True if req['action']['start'] == 'rebuild' else False
2240 r = self.launch_server(conn, req, rebuild, dom)
2241 if r[0] <0:
2242 new_status = 'ERROR'
2243 last_error = r[1]
2244 else:
2245 new_status = 'ACTIVE'
2246
2247 elif 'resume' in req['action']:
2248 try:
2249 if dom == None:
2250 pass
2251 else:
2252 dom.resume()
2253 # new_status = 'ACTIVE'
2254 except Exception as e:
2255 self.logger.error("action_on_server id='%s' Exception while resume: %s",
2256 server_id, e.get_error_message())
2257
2258 elif 'pause' in req['action']:
2259 try:
2260 if dom == None:
2261 pass
2262 else:
2263 dom.suspend()
2264 # new_status = 'PAUSED'
2265 except Exception as e:
2266 self.logger.error("action_on_server id='%s' Exception while pause: %s",
2267 server_id, e.get_error_message())
2268
2269 elif 'reboot' in req['action']:
2270 try:
2271 if dom == None:
2272 pass
2273 else:
2274 dom.reboot()
2275 self.logger.debug("action_on_server id='%s' reboot:", server_id)
2276 #new_status = 'ACTIVE'
2277 except Exception as e:
2278 self.logger.error("action_on_server id='%s' Exception while reboot: %s",
2279 server_id, e.get_error_message())
2280 elif 'createImage' in req['action']:
2281 self.create_image(dom, req)
2282
2283
2284 conn.close()
2285 except host_thread.lvirt_module.libvirtError as e:
2286 if conn is not None: conn.close()
2287 text = e.get_error_message()
2288 new_status = "ERROR"
2289 last_error = text
2290 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
2291 self.logger.debug("action_on_server id='%s' Exception removed from host", server_id)
2292 else:
2293 self.logger.error("action_on_server id='%s' Exception %s", server_id, text)
2294 #end of if self.test
2295 if new_status == None:
2296 return 1
2297
2298 self.logger.debug("action_on_server id='%s' new status=%s %s",server_id, new_status, last_error)
2299 UPDATE = {'progress':100, 'status':new_status}
2300
2301 if new_status=='ERROR':
2302 if not last_retry: #if there will be another retry do not update database
2303 return -1
2304 elif 'terminate' in req['action']:
2305 #PUT a log in the database
2306 self.logger.error("PANIC deleting server id='%s' %s", server_id, last_error)
2307 self.db_lock.acquire()
2308 self.db.new_row('logs',
2309 {'uuid':server_id, 'tenant_id':req['tenant_id'], 'related':'instances','level':'panic',
2310 'description':'PANIC deleting server from host '+self.name+': '+last_error}
2311 )
2312 self.db_lock.release()
2313 if server_id in self.server_status:
2314 del self.server_status[server_id]
2315 return -1
2316 else:
2317 UPDATE['last_error'] = last_error
2318 if new_status != 'deleted' and (new_status != old_status or new_status == 'ERROR') :
2319 self.db_lock.acquire()
2320 self.db.update_rows('instances', UPDATE, {'uuid':server_id}, log=True)
2321 self.server_status[server_id] = new_status
2322 self.db_lock.release()
2323 if new_status == 'ERROR':
2324 return -1
2325 return 1
2326
2327
2328 def restore_iface(self, name, mac, lib_conn=None):
2329 ''' make an ifdown, ifup to restore default parameter of na interface
2330 Params:
2331 mac: mac address of the interface
2332 lib_conn: connection to the libvirt, if None a new connection is created
2333 Return 0,None if ok, -1,text if fails
2334 '''
2335 conn=None
2336 ret = 0
2337 error_text=None
2338 if self.test:
2339 self.logger.debug("restore_iface '%s' %s", name, mac)
2340 return 0, None
2341 try:
2342 if not lib_conn:
2343 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2344 else:
2345 conn = lib_conn
2346
2347 #wait to the pending VM deletion
2348 #TODO.Revise self.server_forceoff(True)
2349
2350 iface = conn.interfaceLookupByMACString(mac)
2351 if iface.isActive():
2352 iface.destroy()
2353 iface.create()
2354 self.logger.debug("restore_iface '%s' %s", name, mac)
2355 except host_thread.lvirt_module.libvirtError as e:
2356 error_text = e.get_error_message()
2357 self.logger.error("restore_iface '%s' '%s' libvirt exception: %s", name, mac, error_text)
2358 ret=-1
2359 finally:
2360 if lib_conn is None and conn is not None:
2361 conn.close()
2362 return ret, error_text
2363
2364
2365 def create_image(self,dom, req):
2366 if self.test:
2367 if 'path' in req['action']['createImage']:
2368 file_dst = req['action']['createImage']['path']
2369 else:
2370 createImage=req['action']['createImage']
2371 img_name= createImage['source']['path']
2372 index=img_name.rfind('/')
2373 file_dst = self.get_notused_path(img_name[:index+1] + createImage['name'] + '.qcow2')
2374 image_status='ACTIVE'
2375 else:
2376 for retry in (0,1):
2377 try:
2378 server_id = req['uuid']
2379 createImage=req['action']['createImage']
2380 file_orig = self.localinfo['server_files'][server_id] [ createImage['source']['image_id'] ] ['source file']
2381 if 'path' in req['action']['createImage']:
2382 file_dst = req['action']['createImage']['path']
2383 else:
2384 img_name= createImage['source']['path']
2385 index=img_name.rfind('/')
2386 file_dst = self.get_notused_filename(img_name[:index+1] + createImage['name'] + '.qcow2')
2387
2388 self.copy_file(file_orig, file_dst)
2389 qemu_info = self.qemu_get_info(file_orig)
2390 if 'backing file' in qemu_info:
2391 for k,v in self.localinfo['files'].items():
2392 if v==qemu_info['backing file']:
2393 self.qemu_change_backing(file_dst, k)
2394 break
2395 image_status='ACTIVE'
2396 break
2397 except paramiko.ssh_exception.SSHException as e:
2398 image_status='ERROR'
2399 error_text = e.args[0]
2400 self.logger.error("create_image id='%s' ssh Exception: %s", server_id, error_text)
2401 if "SSH session not active" in error_text and retry==0:
2402 self.ssh_connect()
2403 except Exception as e:
2404 image_status='ERROR'
2405 error_text = str(e)
2406 self.logger.error("create_image id='%s' Exception: %s", server_id, error_text)
2407
2408 #TODO insert a last_error at database
2409 self.db_lock.acquire()
2410 self.db.update_rows('images', {'status':image_status, 'progress': 100, 'path':file_dst},
2411 {'uuid':req['new_image']['uuid']}, log=True)
2412 self.db_lock.release()
2413
2414 def edit_iface(self, port_id, old_net, new_net):
2415 #This action imply remove and insert interface to put proper parameters
2416 if self.test:
2417 time.sleep(1)
2418 else:
2419 #get iface details
2420 self.db_lock.acquire()
2421 r,c = self.db.get_table(FROM='ports as p join resources_port as rp on p.uuid=rp.port_id',
2422 WHERE={'port_id': port_id})
2423 self.db_lock.release()
2424 if r<0:
2425 self.logger.error("edit_iface %s DDBB error: %s", port_id, c)
2426 return
2427 elif r==0:
2428 self.logger.error("edit_iface %s port not found", port_id)
2429 return
2430 port=c[0]
2431 if port["model"]!="VF":
2432 self.logger.error("edit_iface %s ERROR model must be VF", port_id)
2433 return
2434 #create xml detach file
2435 xml=[]
2436 self.xml_level = 2
2437 xml.append("<interface type='hostdev' managed='yes'>")
2438 xml.append(" <mac address='" +port['mac']+ "'/>")
2439 xml.append(" <source>"+ self.pci2xml(port['pci'])+"\n </source>")
2440 xml.append('</interface>')
2441
2442
2443 try:
2444 conn=None
2445 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2446 dom = conn.lookupByUUIDString(port["instance_id"])
2447 if old_net:
2448 text="\n".join(xml)
2449 self.logger.debug("edit_iface detaching SRIOV interface " + text)
2450 dom.detachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
2451 if new_net:
2452 xml[-1] =" <vlan> <tag id='" + str(port['vlan']) + "'/> </vlan>"
2453 self.xml_level = 1
2454 xml.append(self.pci2xml(port.get('vpci',None)) )
2455 xml.append('</interface>')
2456 text="\n".join(xml)
2457 self.logger.debug("edit_iface attaching SRIOV interface " + text)
2458 dom.attachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
2459
2460 except host_thread.lvirt_module.libvirtError as e:
2461 text = e.get_error_message()
2462 self.logger.error("edit_iface %s libvirt exception: %s", port["instance_id"], text)
2463
2464 finally:
2465 if conn is not None: conn.close()
2466
2467
2468 def create_server(server, db, db_lock, only_of_ports):
2469 extended = server.get('extended', None)
2470 requirements={}
2471 requirements['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
2472 requirements['ram'] = server['flavor'].get('ram', 0)
2473 if requirements['ram']== None:
2474 requirements['ram'] = 0
2475 requirements['vcpus'] = server['flavor'].get('vcpus', 0)
2476 if requirements['vcpus']== None:
2477 requirements['vcpus'] = 0
2478 #If extended is not defined get requirements from flavor
2479 if extended is None:
2480 #If extended is defined in flavor convert to dictionary and use it
2481 if 'extended' in server['flavor'] and server['flavor']['extended'] != None:
2482 json_acceptable_string = server['flavor']['extended'].replace("'", "\"")
2483 extended = json.loads(json_acceptable_string)
2484 else:
2485 extended = None
2486 #print json.dumps(extended, indent=4)
2487
2488 #For simplicity only one numa VM are supported in the initial implementation
2489 if extended != None:
2490 numas = extended.get('numas', [])
2491 if len(numas)>1:
2492 return (-2, "Multi-NUMA VMs are not supported yet")
2493 #elif len(numas)<1:
2494 # return (-1, "At least one numa must be specified")
2495
2496 #a for loop is used in order to be ready to multi-NUMA VMs
2497 request = []
2498 for numa in numas:
2499 numa_req = {}
2500 numa_req['memory'] = numa.get('memory', 0)
2501 if 'cores' in numa:
2502 numa_req['proc_req_nb'] = numa['cores'] #number of cores or threads to be reserved
2503 numa_req['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
2504 numa_req['proc_req_list'] = numa.get('cores-id', None) #list of ids to be assigned to the cores or threads
2505 elif 'paired-threads' in numa:
2506 numa_req['proc_req_nb'] = numa['paired-threads']
2507 numa_req['proc_req_type'] = 'paired-threads'
2508 numa_req['proc_req_list'] = numa.get('paired-threads-id', None)
2509 elif 'threads' in numa:
2510 numa_req['proc_req_nb'] = numa['threads']
2511 numa_req['proc_req_type'] = 'threads'
2512 numa_req['proc_req_list'] = numa.get('threads-id', None)
2513 else:
2514 numa_req['proc_req_nb'] = 0 # by default
2515 numa_req['proc_req_type'] = 'threads'
2516
2517
2518
2519 #Generate a list of sriov and another for physical interfaces
2520 interfaces = numa.get('interfaces', [])
2521 sriov_list = []
2522 port_list = []
2523 for iface in interfaces:
2524 iface['bandwidth'] = int(iface['bandwidth'])
2525 if iface['dedicated'][:3]=='yes':
2526 port_list.append(iface)
2527 else:
2528 sriov_list.append(iface)
2529
2530 #Save lists ordered from more restrictive to less bw requirements
2531 numa_req['sriov_list'] = sorted(sriov_list, key=lambda k: k['bandwidth'], reverse=True)
2532 numa_req['port_list'] = sorted(port_list, key=lambda k: k['bandwidth'], reverse=True)
2533
2534
2535 request.append(numa_req)
2536
2537 # print "----------\n"+json.dumps(request[0], indent=4)
2538 # print '----------\n\n'
2539
2540 #Search in db for an appropriate numa for each requested numa
2541 #at the moment multi-NUMA VMs are not supported
2542 if len(request)>0:
2543 requirements['numa'].update(request[0])
2544 if requirements['numa']['memory']>0:
2545 requirements['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2546 elif requirements['ram']==0:
2547 return (-1, "Memory information not set neither at extended field not at ram")
2548 if requirements['numa']['proc_req_nb']>0:
2549 requirements['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2550 elif requirements['vcpus']==0:
2551 return (-1, "Processor information not set neither at extended field not at vcpus")
2552
2553
2554 db_lock.acquire()
2555 result, content = db.get_numas(requirements, server.get('host_id', None), only_of_ports)
2556 db_lock.release()
2557
2558 if result == -1:
2559 return (-1, content)
2560
2561 numa_id = content['numa_id']
2562 host_id = content['host_id']
2563
2564 #obtain threads_id and calculate pinning
2565 cpu_pinning = []
2566 reserved_threads=[]
2567 if requirements['numa']['proc_req_nb']>0:
2568 db_lock.acquire()
2569 result, content = db.get_table(FROM='resources_core',
2570 SELECT=('id','core_id','thread_id'),
2571 WHERE={'numa_id':numa_id,'instance_id': None, 'status':'ok'} )
2572 db_lock.release()
2573 if result <= 0:
2574 #print content
2575 return -1, content
2576
2577 #convert rows to a dictionary indexed by core_id
2578 cores_dict = {}
2579 for row in content:
2580 if not row['core_id'] in cores_dict:
2581 cores_dict[row['core_id']] = []
2582 cores_dict[row['core_id']].append([row['thread_id'],row['id']])
2583
2584 #In case full cores are requested
2585 paired = 'N'
2586 if requirements['numa']['proc_req_type'] == 'cores':
2587 #Get/create the list of the vcpu_ids
2588 vcpu_id_list = requirements['numa']['proc_req_list']
2589 if vcpu_id_list == None:
2590 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2591
2592 for threads in cores_dict.itervalues():
2593 #we need full cores
2594 if len(threads) != 2:
2595 continue
2596
2597 #set pinning for the first thread
2598 cpu_pinning.append( [ vcpu_id_list.pop(0), threads[0][0], threads[0][1] ] )
2599
2600 #reserve so it is not used the second thread
2601 reserved_threads.append(threads[1][1])
2602
2603 if len(vcpu_id_list) == 0:
2604 break
2605
2606 #In case paired threads are requested
2607 elif requirements['numa']['proc_req_type'] == 'paired-threads':
2608 paired = 'Y'
2609 #Get/create the list of the vcpu_ids
2610 if requirements['numa']['proc_req_list'] != None:
2611 vcpu_id_list = []
2612 for pair in requirements['numa']['proc_req_list']:
2613 if len(pair)!=2:
2614 return -1, "Field paired-threads-id not properly specified"
2615 return
2616 vcpu_id_list.append(pair[0])
2617 vcpu_id_list.append(pair[1])
2618 else:
2619 vcpu_id_list = range(0,2*int(requirements['numa']['proc_req_nb']))
2620
2621 for threads in cores_dict.itervalues():
2622 #we need full cores
2623 if len(threads) != 2:
2624 continue
2625 #set pinning for the first thread
2626 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2627
2628 #set pinning for the second thread
2629 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2630
2631 if len(vcpu_id_list) == 0:
2632 break
2633
2634 #In case normal threads are requested
2635 elif requirements['numa']['proc_req_type'] == 'threads':
2636 #Get/create the list of the vcpu_ids
2637 vcpu_id_list = requirements['numa']['proc_req_list']
2638 if vcpu_id_list == None:
2639 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2640
2641 for threads_index in sorted(cores_dict, key=lambda k: len(cores_dict[k])):
2642 threads = cores_dict[threads_index]
2643 #set pinning for the first thread
2644 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2645
2646 #if exists, set pinning for the second thread
2647 if len(threads) == 2 and len(vcpu_id_list) != 0:
2648 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2649
2650 if len(vcpu_id_list) == 0:
2651 break
2652
2653 #Get the source pci addresses for the selected numa
2654 used_sriov_ports = []
2655 for port in requirements['numa']['sriov_list']:
2656 db_lock.acquire()
2657 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2658 db_lock.release()
2659 if result <= 0:
2660 #print content
2661 return -1, content
2662 for row in content:
2663 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2664 continue
2665 port['pci'] = row['pci']
2666 if 'mac_address' not in port:
2667 port['mac_address'] = row['mac']
2668 del port['mac']
2669 port['port_id']=row['id']
2670 port['Mbps_used'] = port['bandwidth']
2671 used_sriov_ports.append(row['id'])
2672 break
2673
2674 for port in requirements['numa']['port_list']:
2675 port['Mbps_used'] = None
2676 if port['dedicated'] != "yes:sriov":
2677 port['mac_address'] = port['mac']
2678 del port['mac']
2679 continue
2680 db_lock.acquire()
2681 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac', 'Mbps'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2682 db_lock.release()
2683 if result <= 0:
2684 #print content
2685 return -1, content
2686 port['Mbps_used'] = content[0]['Mbps']
2687 for row in content:
2688 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2689 continue
2690 port['pci'] = row['pci']
2691 if 'mac_address' not in port:
2692 port['mac_address'] = row['mac'] # mac cannot be set to passthrough ports
2693 del port['mac']
2694 port['port_id']=row['id']
2695 used_sriov_ports.append(row['id'])
2696 break
2697
2698 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2699 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2700
2701 server['host_id'] = host_id
2702
2703 #Generate dictionary for saving in db the instance resources
2704 resources = {}
2705 resources['bridged-ifaces'] = []
2706
2707 numa_dict = {}
2708 numa_dict['interfaces'] = []
2709
2710 numa_dict['interfaces'] += requirements['numa']['port_list']
2711 numa_dict['interfaces'] += requirements['numa']['sriov_list']
2712
2713 #Check bridge information
2714 unified_dataplane_iface=[]
2715 unified_dataplane_iface += requirements['numa']['port_list']
2716 unified_dataplane_iface += requirements['numa']['sriov_list']
2717
2718 for control_iface in server.get('networks', []):
2719 control_iface['net_id']=control_iface.pop('uuid')
2720 #Get the brifge name
2721 db_lock.acquire()
2722 result, content = db.get_table(FROM='nets',
2723 SELECT=('name', 'type', 'vlan', 'provider', 'enable_dhcp','dhcp_first_ip',
2724 'dhcp_last_ip', 'cidr', 'gateway_ip', 'dns', 'links', 'routes'),
2725 WHERE={'uuid': control_iface['net_id']})
2726 db_lock.release()
2727 if result < 0:
2728 pass
2729 elif result==0:
2730 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface['net_id']
2731 else:
2732 network=content[0]
2733 if control_iface.get("type", 'virtual') == 'virtual':
2734 if network['type']!='bridge_data' and network['type']!='bridge_man':
2735 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface['net_id']
2736 resources['bridged-ifaces'].append(control_iface)
2737 if network.get("provider") and network["provider"][0:3] == "OVS":
2738 control_iface["type"] = "instance:ovs"
2739 else:
2740 control_iface["type"] = "instance:bridge"
2741 if network.get("vlan"):
2742 control_iface["vlan"] = network["vlan"]
2743
2744 if network.get("enable_dhcp") == 'true':
2745 control_iface["enable_dhcp"] = network.get("enable_dhcp")
2746 control_iface["dhcp_first_ip"] = network["dhcp_first_ip"]
2747 control_iface["dhcp_last_ip"] = network["dhcp_last_ip"]
2748 control_iface["cidr"] = network["cidr"]
2749
2750 if network.get("dns"):
2751 control_iface["dns"] = yaml.safe_load(network.get("dns"))
2752 if network.get("links"):
2753 control_iface["links"] = yaml.safe_load(network.get("links"))
2754 if network.get("routes"):
2755 control_iface["routes"] = yaml.safe_load(network.get("routes"))
2756 else:
2757 if network['type']!='data' and network['type']!='ptp':
2758 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface['net_id']
2759 #dataplane interface, look for it in the numa tree and asign this network
2760 iface_found=False
2761 for dataplane_iface in numa_dict['interfaces']:
2762 if dataplane_iface['name'] == control_iface.get("name"):
2763 if (dataplane_iface['dedicated'] == "yes" and control_iface["type"] != "PF") or \
2764 (dataplane_iface['dedicated'] == "no" and control_iface["type"] != "VF") or \
2765 (dataplane_iface['dedicated'] == "yes:sriov" and control_iface["type"] != "VFnotShared") :
2766 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2767 (control_iface.get("name"), dataplane_iface['dedicated'], control_iface["type"])
2768 dataplane_iface['uuid'] = control_iface['net_id']
2769 if dataplane_iface['dedicated'] == "no":
2770 dataplane_iface['vlan'] = network['vlan']
2771 if dataplane_iface['dedicated'] != "yes" and control_iface.get("mac_address"):
2772 dataplane_iface['mac_address'] = control_iface.get("mac_address")
2773 if control_iface.get("vpci"):
2774 dataplane_iface['vpci'] = control_iface.get("vpci")
2775 iface_found=True
2776 break
2777 if not iface_found:
2778 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface.get("name")
2779
2780 resources['host_id'] = host_id
2781 resources['image_id'] = server['image_id']
2782 resources['flavor_id'] = server['flavor_id']
2783 resources['tenant_id'] = server['tenant_id']
2784 resources['ram'] = requirements['ram']
2785 resources['vcpus'] = requirements['vcpus']
2786 resources['status'] = 'CREATING'
2787
2788 if 'description' in server: resources['description'] = server['description']
2789 if 'name' in server: resources['name'] = server['name']
2790
2791 resources['extended'] = {} #optional
2792 resources['extended']['numas'] = []
2793 numa_dict['numa_id'] = numa_id
2794 numa_dict['memory'] = requirements['numa']['memory']
2795 numa_dict['cores'] = []
2796
2797 for core in cpu_pinning:
2798 numa_dict['cores'].append({'id': core[2], 'vthread': core[0], 'paired': paired})
2799 for core in reserved_threads:
2800 numa_dict['cores'].append({'id': core})
2801 resources['extended']['numas'].append(numa_dict)
2802 if extended!=None and 'devices' in extended: #TODO allow extra devices without numa
2803 resources['extended']['devices'] = extended['devices']
2804
2805
2806 # '===================================={'
2807 #print json.dumps(resources, indent=4)
2808 #print '====================================}'
2809
2810 return 0, resources
2811