Unify ssh_command. Allow remote ssh with paramiko and localhost with subprocess
[osm/openvim.git] / osm_openvim / host_thread.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openvim
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
22 ##
23
24 '''
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
27 '''
28 __author__ = "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__ = "$10-jul-2014 12:07:15$"
30
31 import json
32 import yaml
33 import threading
34 import time
35 import Queue
36 import paramiko
37 import subprocess
38 # import libvirt
39 import imp
40 import random
41 import os
42 import logging
43 from jsonschema import validate as js_v, exceptions as js_e
44 from vim_schema import localinfo_schema, hostinfo_schema
45
46 class RunCommandException(Exception):
47 pass
48
49 class host_thread(threading.Thread):
50 lvirt_module = None
51
52 def __init__(self, name, host, user, db, db_lock, test, image_path, host_id, version, develop_mode,
53 develop_bridge_iface, password=None, keyfile = None, logger_name=None, debug=None):
54 """Init a thread to communicate with compute node or ovs_controller.
55 :param host_id: host identity
56 :param name: name of the thread
57 :param host: host ip or name to manage and user
58 :param user, password, keyfile: user and credentials to connect to host
59 :param db, db_lock': database class and lock to use it in exclusion
60 """
61 threading.Thread.__init__(self)
62 self.name = name
63 self.host = host
64 self.user = user
65 self.db = db
66 self.db_lock = db_lock
67 self.test = test
68 self.password = password
69 self.keyfile = keyfile
70 self.localinfo_dirty = False
71
72 if not test and not host_thread.lvirt_module:
73 try:
74 module_info = imp.find_module("libvirt")
75 host_thread.lvirt_module = imp.load_module("libvirt", *module_info)
76 except (IOError, ImportError) as e:
77 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e))
78 if logger_name:
79 self.logger_name = logger_name
80 else:
81 self.logger_name = "openvim.host."+name
82 self.logger = logging.getLogger(self.logger_name)
83 if debug:
84 self.logger.setLevel(getattr(logging, debug))
85
86
87 self.develop_mode = develop_mode
88 self.develop_bridge_iface = develop_bridge_iface
89 self.image_path = image_path
90 self.empty_image_path = image_path
91 self.host_id = host_id
92 self.version = version
93
94 self.xml_level = 0
95 # self.pending ={}
96
97 self.server_status = {} #dictionary with pairs server_uuid:server_status
98 self.pending_terminate_server =[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
99 self.next_update_server_status = 0 #time when must be check servers status
100
101 self.hostinfo = None
102
103 self.queueLock = threading.Lock()
104 self.taskQueue = Queue.Queue(2000)
105 self.ssh_conn = None
106 self.run_command_session = None
107 self.error = None
108 self.localhost = True if host == 'localhost' else False
109 self.lvirt_conn_uri = "qemu+ssh://{user}@{host}/system?no_tty=1&no_verify=1".format(
110 user=self.user, host=self.host)
111 if keyfile:
112 self.lvirt_conn_uri += "&keyfile=" + keyfile
113 self.remote_ip = None
114 self.local_ip = None
115
116 def run_command(self, command, keep_session=False):
117 """Run a command passed as a str on a localhost or at remote machine.
118 :param command: text with the command to execute.
119 :param keep_session: if True it returns a <stdin> for sending input with '<stdin>.write("text\n")'.
120 A command with keep_session=True MUST be followed by a command with keep_session=False in order to
121 close the session and get the output
122 :return: the output of the command if 'keep_session=False' or the <stdin> object if 'keep_session=True'
123 :raises: RunCommandException if command fails
124 """
125 if self.run_command_session and keep_session:
126 raise RunCommandException("Internal error. A command with keep_session=True must be followed by another "
127 "command with keep_session=False to close session")
128 try:
129 if self.localhost:
130 if self.run_command_session:
131 p = self.run_command_session
132 self.run_command_session = None
133 (output, outerror) = p.communicate()
134 returncode = p.returncode
135 p.stdin.close()
136 elif keep_session:
137 p = subprocess.Popen(('bash', "-c", command), stdin=subprocess.PIPE, stdout=subprocess.PIPE,
138 stderr=subprocess.PIPE)
139 self.run_command_session = p
140 return p.stdin
141 else:
142 output = subprocess.check_output(('bash', "-c", command))
143 returncode = 0
144 else:
145 if self.run_command_session:
146 (i, o, e) = self.run_command_session
147 self.run_command_session = None
148 i.channel.shutdown_write()
149 else:
150 if not self.ssh_conn:
151 self.ssh_connect()
152 (i, o, e) = self.ssh_conn.exec_command(command, timeout=10)
153 if keep_session:
154 self.run_command_session = (i, o, e)
155 return i
156 returncode = o.channel.recv_exit_status()
157 output = o.read()
158 outerror = e.read()
159 if returncode != 0:
160 text = "run_command='{}' Error='{}'".format(command, outerror)
161 self.logger.error(text)
162 raise RunCommandException(text)
163
164 self.logger.debug("run_command='{}' result='{}'".format(command, output))
165 return output
166
167 except RunCommandException:
168 raise
169 except subprocess.CalledProcessError as e:
170 text = "run_command Exception '{}' '{}'".format(str(e), e.output)
171 except (paramiko.ssh_exception.SSHException, Exception) as e:
172 text = "run_command='{}' Exception='{}'".format(command, str(e))
173 self.ssh_conn = None
174 self.run_command_session = None
175 raise RunCommandException(text)
176
177 def ssh_connect(self):
178 try:
179 # Connect SSH
180 self.ssh_conn = paramiko.SSHClient()
181 self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
182 self.ssh_conn.load_system_host_keys()
183 self.ssh_conn.connect(self.host, username=self.user, password=self.password, key_filename=self.keyfile,
184 timeout=10) # auth_timeout=10)
185 self.remote_ip = self.ssh_conn.get_transport().sock.getpeername()[0]
186 self.local_ip = self.ssh_conn.get_transport().sock.getsockname()[0]
187 except (paramiko.ssh_exception.SSHException, Exception) as e:
188 text = 'ssh connect Exception: {}'.format(e)
189 self.ssh_conn = None
190 self.error = text
191 raise
192
193 def check_connectivity(self):
194 if not self.test:
195 # TODO change to run_command
196 try:
197 if not self.ssh_conn:
198 self.ssh_connect()
199
200 command = 'sudo brctl show'
201 (_, stdout, stderr) = self.ssh_conn.exec_command(command, timeout=10)
202 content = stderr.read()
203 if len(content) > 0:
204 self.connectivity = False
205 self.logger.error("ssh conection error")
206 except paramiko.ssh_exception.SSHException as e:
207 text = e.args[0]
208 self.connectivity = False
209 self.logger.error("ssh_connect ssh Exception: " + text)
210 raise paramiko.ssh_exception.SSHException("ssh error conection")
211 except Exception as e:
212 self.connectivity = False
213 raise paramiko.ssh_exception.SSHException("ssh error conection")
214
215 def load_localinfo(self):
216 if not self.test:
217 try:
218 self.run_command('sudo mkdir -p ' + self.image_path)
219 result = self.run_command('cat {}/.openvim.yaml'.format(self.image_path))
220 self.localinfo = yaml.load(result)
221 js_v(self.localinfo, localinfo_schema)
222 self.localinfo_dirty = False
223 if 'server_files' not in self.localinfo:
224 self.localinfo['server_files'] = {}
225 self.logger.debug("localinfo loaded from host")
226 return
227 except RunCommandException as e:
228 self.logger.error("load_localinfo Exception: " + str(e))
229 except host_thread.lvirt_module.libvirtError as e:
230 text = e.get_error_message()
231 self.logger.error("load_localinfo libvirt Exception: " + text)
232 except yaml.YAMLError as exc:
233 text = ""
234 if hasattr(exc, 'problem_mark'):
235 mark = exc.problem_mark
236 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
237 self.logger.error("load_localinfo yaml format Exception " + text)
238 except js_e.ValidationError as e:
239 text = ""
240 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
241 self.logger.error("load_localinfo format Exception: %s %s", text, str(e))
242 except Exception as e:
243 text = str(e)
244 self.logger.error("load_localinfo Exception: " + text)
245
246 # not loaded, insert a default data and force saving by activating dirty flag
247 self.localinfo = {'files':{}, 'server_files':{} }
248 # self.localinfo_dirty=True
249 self.localinfo_dirty=False
250
251 def load_hostinfo(self):
252 if self.test:
253 return
254 try:
255 result = self.run_command('cat {}/hostinfo.yaml'.format(self.image_path))
256 self.hostinfo = yaml.load(result)
257 js_v(self.hostinfo, hostinfo_schema)
258 self.logger.debug("hostinfo load from host " + str(self.hostinfo))
259 return
260 except RunCommandException as e:
261 self.logger.error("load_hostinfo ssh Exception: " + str(e))
262 except host_thread.lvirt_module.libvirtError as e:
263 text = e.get_error_message()
264 self.logger.error("load_hostinfo libvirt Exception: " + text)
265 except yaml.YAMLError as exc:
266 text = ""
267 if hasattr(exc, 'problem_mark'):
268 mark = exc.problem_mark
269 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
270 self.logger.error("load_hostinfo yaml format Exception " + text)
271 except js_e.ValidationError as e:
272 text = ""
273 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
274 self.logger.error("load_hostinfo format Exception: %s %s", text, str(e))
275 except Exception as e:
276 text = str(e)
277 self.logger.error("load_hostinfo Exception: " + text)
278
279 #not loaded, insert a default data
280 self.hostinfo = None
281
282 def save_localinfo(self, tries=3):
283 if self.test:
284 self.localinfo_dirty = False
285 return
286
287 while tries>=0:
288 tries-=1
289
290 try:
291 command = 'cat > {}/.openvim.yaml'.format(self.image_path)
292 in_stream = self.run_command(command, keep_session=True)
293 yaml.safe_dump(self.localinfo, in_stream, explicit_start=True, indent=4, default_flow_style=False,
294 tags=False, encoding='utf-8', allow_unicode=True)
295 result = self.run_command(command, keep_session=False) # to end session
296
297 self.localinfo_dirty = False
298 break #while tries
299
300 except RunCommandException as e:
301 self.logger.error("save_localinfo ssh Exception: " + str(e))
302 except host_thread.lvirt_module.libvirtError as e:
303 text = e.get_error_message()
304 self.logger.error("save_localinfo libvirt Exception: " + text)
305 except yaml.YAMLError as exc:
306 text = ""
307 if hasattr(exc, 'problem_mark'):
308 mark = exc.problem_mark
309 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
310 self.logger.error("save_localinfo yaml format Exception " + text)
311 except Exception as e:
312 text = str(e)
313 self.logger.error("save_localinfo Exception: " + text)
314
315 def load_servers_from_db(self):
316 self.db_lock.acquire()
317 r,c = self.db.get_table(SELECT=('uuid','status', 'image_id'), FROM='instances', WHERE={'host_id': self.host_id})
318 self.db_lock.release()
319
320 self.server_status = {}
321 if r<0:
322 self.logger.error("Error getting data from database: " + c)
323 return
324 for server in c:
325 self.server_status[ server['uuid'] ] = server['status']
326
327 #convert from old version to new one
328 if 'inc_files' in self.localinfo and server['uuid'] in self.localinfo['inc_files']:
329 server_files_dict = {'source file': self.localinfo['inc_files'][ server['uuid'] ] [0], 'file format':'raw' }
330 if server_files_dict['source file'][-5:] == 'qcow2':
331 server_files_dict['file format'] = 'qcow2'
332
333 self.localinfo['server_files'][ server['uuid'] ] = { server['image_id'] : server_files_dict }
334 if 'inc_files' in self.localinfo:
335 del self.localinfo['inc_files']
336 self.localinfo_dirty = True
337
338 def delete_unused_files(self):
339 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
340 Deletes unused entries at self.loacalinfo and the corresponding local files.
341 The only reason for this mismatch is the manual deletion of instances (VM) at database
342 '''
343 if self.test:
344 return
345 for uuid,images in self.localinfo['server_files'].items():
346 if uuid not in self.server_status:
347 for localfile in images.values():
348 try:
349 self.logger.debug("deleting file '%s' of unused server '%s'", localfile['source file'], uuid)
350 self.delete_file(localfile['source file'])
351 except RunCommandException as e:
352 self.logger.error("Exception deleting file '%s': %s", localfile['source file'], str(e))
353 del self.localinfo['server_files'][uuid]
354 self.localinfo_dirty = True
355
356 def insert_task(self, task, *aditional):
357 try:
358 self.queueLock.acquire()
359 task = self.taskQueue.put( (task,) + aditional, timeout=5)
360 self.queueLock.release()
361 return 1, None
362 except Queue.Full:
363 return -1, "timeout inserting a task over host " + self.name
364
365 def run(self):
366 while True:
367 self.load_localinfo()
368 self.load_hostinfo()
369 self.load_servers_from_db()
370 self.delete_unused_files()
371 while True:
372 try:
373 self.queueLock.acquire()
374 if not self.taskQueue.empty():
375 task = self.taskQueue.get()
376 else:
377 task = None
378 self.queueLock.release()
379
380 if task is None:
381 now=time.time()
382 if self.localinfo_dirty:
383 self.save_localinfo()
384 elif self.next_update_server_status < now:
385 self.update_servers_status()
386 self.next_update_server_status = now + 5
387 elif len(self.pending_terminate_server)>0 and self.pending_terminate_server[0][0]<now:
388 self.server_forceoff()
389 else:
390 time.sleep(1)
391 continue
392
393 if task[0] == 'instance':
394 self.logger.debug("processing task instance " + str(task[1]['action']))
395 retry = 0
396 while retry < 2:
397 retry += 1
398 r = self.action_on_server(task[1], retry==2)
399 if r >= 0:
400 break
401 elif task[0] == 'image':
402 pass
403 elif task[0] == 'exit':
404 self.logger.debug("processing task exit")
405 self.terminate()
406 return 0
407 elif task[0] == 'reload':
408 self.logger.debug("processing task reload terminating and relaunching")
409 self.terminate()
410 break
411 elif task[0] == 'edit-iface':
412 self.logger.debug("processing task edit-iface port={}, old_net={}, new_net={}".format(
413 task[1], task[2], task[3]))
414 self.edit_iface(task[1], task[2], task[3])
415 elif task[0] == 'restore-iface':
416 self.logger.debug("processing task restore-iface={} mac={}".format(task[1], task[2]))
417 self.restore_iface(task[1], task[2])
418 elif task[0] == 'new-ovsbridge':
419 self.logger.debug("Creating compute OVS bridge")
420 self.create_ovs_bridge()
421 elif task[0] == 'new-vxlan':
422 self.logger.debug("Creating vxlan tunnel='{}', remote ip='{}'".format(task[1], task[2]))
423 self.create_ovs_vxlan_tunnel(task[1], task[2])
424 elif task[0] == 'del-ovsbridge':
425 self.logger.debug("Deleting OVS bridge")
426 self.delete_ovs_bridge()
427 elif task[0] == 'del-vxlan':
428 self.logger.debug("Deleting vxlan {} tunnel".format(task[1]))
429 self.delete_ovs_vxlan_tunnel(task[1])
430 elif task[0] == 'create-ovs-bridge-port':
431 self.logger.debug("Adding port ovim-{} to OVS bridge".format(task[1]))
432 self.create_ovs_bridge_port(task[1])
433 elif task[0] == 'del-ovs-port':
434 self.logger.debug("Delete bridge attached to ovs port vlan {} net {}".format(task[1], task[2]))
435 self.delete_bridge_port_attached_to_ovs(task[1], task[2])
436 else:
437 self.logger.debug("unknown task " + str(task))
438
439 except Exception as e:
440 self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
441
442 def server_forceoff(self, wait_until_finished=False):
443 while len(self.pending_terminate_server)>0:
444 now = time.time()
445 if self.pending_terminate_server[0][0]>now:
446 if wait_until_finished:
447 time.sleep(1)
448 continue
449 else:
450 return
451 req={'uuid':self.pending_terminate_server[0][1],
452 'action':{'terminate':'force'},
453 'status': None
454 }
455 self.action_on_server(req)
456 self.pending_terminate_server.pop(0)
457
458 def terminate(self):
459 try:
460 self.server_forceoff(True)
461 if self.localinfo_dirty:
462 self.save_localinfo()
463 if not self.test:
464 self.ssh_conn.close()
465 except Exception as e:
466 text = str(e)
467 self.logger.error("terminate Exception: " + text)
468 self.logger.debug("exit from host_thread")
469
470 def get_local_iface_name(self, generic_name):
471 if self.hostinfo != None and "iface_names" in self.hostinfo and generic_name in self.hostinfo["iface_names"]:
472 return self.hostinfo["iface_names"][generic_name]
473 return generic_name
474
475 def create_xml_server(self, server, dev_list, server_metadata={}):
476 """Function that implements the generation of the VM XML definition.
477 Additional devices are in dev_list list
478 The main disk is upon dev_list[0]"""
479
480 #get if operating system is Windows
481 windows_os = False
482 os_type = server_metadata.get('os_type', None)
483 if os_type == None and 'metadata' in dev_list[0]:
484 os_type = dev_list[0]['metadata'].get('os_type', None)
485 if os_type != None and os_type.lower() == "windows":
486 windows_os = True
487 #get type of hard disk bus
488 bus_ide = True if windows_os else False
489 bus = server_metadata.get('bus', None)
490 if bus == None and 'metadata' in dev_list[0]:
491 bus = dev_list[0]['metadata'].get('bus', None)
492 if bus != None:
493 bus_ide = True if bus=='ide' else False
494
495 self.xml_level = 0
496
497 text = "<domain type='kvm'>"
498 #get topology
499 topo = server_metadata.get('topology', None)
500 if topo == None and 'metadata' in dev_list[0]:
501 topo = dev_list[0]['metadata'].get('topology', None)
502 #name
503 name = server.get('name', '')[:28] + "_" + server['uuid'][:28] #qemu impose a length limit of 59 chars or not start. Using 58
504 text += self.inc_tab() + "<name>" + name+ "</name>"
505 #uuid
506 text += self.tab() + "<uuid>" + server['uuid'] + "</uuid>"
507
508 numa={}
509 if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:
510 numa = server['extended']['numas'][0]
511 #memory
512 use_huge = False
513 memory = int(numa.get('memory',0))*1024*1024 #in KiB
514 if memory==0:
515 memory = int(server['ram'])*1024;
516 else:
517 if not self.develop_mode:
518 use_huge = True
519 if memory==0:
520 return -1, 'No memory assigned to instance'
521 memory = str(memory)
522 text += self.tab() + "<memory unit='KiB'>" +memory+"</memory>"
523 text += self.tab() + "<currentMemory unit='KiB'>" +memory+ "</currentMemory>"
524 if use_huge:
525 text += self.tab()+'<memoryBacking>'+ \
526 self.inc_tab() + '<hugepages/>'+ \
527 self.dec_tab()+ '</memoryBacking>'
528
529 #cpu
530 use_cpu_pinning=False
531 vcpus = int(server.get("vcpus",0))
532 cpu_pinning = []
533 if 'cores-source' in numa:
534 use_cpu_pinning=True
535 for index in range(0, len(numa['cores-source'])):
536 cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )
537 vcpus += 1
538 if 'threads-source' in numa:
539 use_cpu_pinning=True
540 for index in range(0, len(numa['threads-source'])):
541 cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )
542 vcpus += 1
543 if 'paired-threads-source' in numa:
544 use_cpu_pinning=True
545 for index in range(0, len(numa['paired-threads-source'])):
546 cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )
547 cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )
548 vcpus += 2
549
550 if use_cpu_pinning and not self.develop_mode:
551 text += self.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning)) +"</vcpu>" + \
552 self.tab()+'<cputune>'
553 self.xml_level += 1
554 for i in range(0, len(cpu_pinning)):
555 text += self.tab() + "<vcpupin vcpu='" +str(cpu_pinning[i][0])+ "' cpuset='" +str(cpu_pinning[i][1]) +"'/>"
556 text += self.dec_tab()+'</cputune>'+ \
557 self.tab() + '<numatune>' +\
558 self.inc_tab() + "<memory mode='strict' nodeset='" +str(numa['source'])+ "'/>" +\
559 self.dec_tab() + '</numatune>'
560 else:
561 if vcpus==0:
562 return -1, "Instance without number of cpus"
563 text += self.tab()+"<vcpu>" + str(vcpus) + "</vcpu>"
564
565 #boot
566 boot_cdrom = False
567 for dev in dev_list:
568 if dev['type']=='cdrom' :
569 boot_cdrom = True
570 break
571 text += self.tab()+ '<os>' + \
572 self.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
573 if boot_cdrom:
574 text += self.tab() + "<boot dev='cdrom'/>"
575 text += self.tab() + "<boot dev='hd'/>" + \
576 self.dec_tab()+'</os>'
577 #features
578 text += self.tab()+'<features>'+\
579 self.inc_tab()+'<acpi/>' +\
580 self.tab()+'<apic/>' +\
581 self.tab()+'<pae/>'+ \
582 self.dec_tab() +'</features>'
583 if topo == "oneSocket:hyperthreading":
584 if vcpus % 2 != 0:
585 return -1, 'Cannot expose hyperthreading with an odd number of vcpus'
586 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>" % (vcpus/2)
587 elif windows_os or topo == "oneSocket":
588 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>" % vcpus
589 else:
590 text += self.tab() + "<cpu mode='host-model'></cpu>"
591 text += self.tab() + "<clock offset='utc'/>" +\
592 self.tab() + "<on_poweroff>preserve</on_poweroff>" + \
593 self.tab() + "<on_reboot>restart</on_reboot>" + \
594 self.tab() + "<on_crash>restart</on_crash>"
595 text += self.tab() + "<devices>" + \
596 self.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
597 self.tab() + "<serial type='pty'>" +\
598 self.inc_tab() + "<target port='0'/>" + \
599 self.dec_tab() + "</serial>" +\
600 self.tab() + "<console type='pty'>" + \
601 self.inc_tab()+ "<target type='serial' port='0'/>" + \
602 self.dec_tab()+'</console>'
603 if windows_os:
604 text += self.tab() + "<controller type='usb' index='0'/>" + \
605 self.tab() + "<controller type='ide' index='0'/>" + \
606 self.tab() + "<input type='mouse' bus='ps2'/>" + \
607 self.tab() + "<sound model='ich6'/>" + \
608 self.tab() + "<video>" + \
609 self.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
610 self.dec_tab() + "</video>" + \
611 self.tab() + "<memballoon model='virtio'/>" + \
612 self.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
613
614 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
615 #> self.dec_tab()+'</hostdev>\n' +\
616 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
617 if windows_os:
618 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
619 else:
620 #If image contains 'GRAPH' include graphics
621 #if 'GRAPH' in image:
622 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
623 self.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
624 self.dec_tab() + "</graphics>"
625
626 vd_index = 'a'
627 for dev in dev_list:
628 bus_ide_dev = bus_ide
629 if dev['type']=='cdrom' or dev['type']=='disk':
630 if dev['type']=='cdrom':
631 bus_ide_dev = True
632 text += self.tab() + "<disk type='file' device='"+dev['type']+"'>"
633 if 'file format' in dev:
634 text += self.inc_tab() + "<driver name='qemu' type='" +dev['file format']+ "' cache='writethrough'/>"
635 if 'source file' in dev:
636 text += self.tab() + "<source file='" +dev['source file']+ "'/>"
637 #elif v['type'] == 'block':
638 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
639 #else:
640 # return -1, 'Unknown disk type ' + v['type']
641 vpci = dev.get('vpci',None)
642 if vpci == None and 'metadata' in dev:
643 vpci = dev['metadata'].get('vpci',None)
644 text += self.pci2xml(vpci)
645
646 if bus_ide_dev:
647 text += self.tab() + "<target dev='hd" +vd_index+ "' bus='ide'/>" #TODO allows several type of disks
648 else:
649 text += self.tab() + "<target dev='vd" +vd_index+ "' bus='virtio'/>"
650 text += self.dec_tab() + '</disk>'
651 vd_index = chr(ord(vd_index)+1)
652 elif dev['type']=='xml':
653 dev_text = dev['xml']
654 if 'vpci' in dev:
655 dev_text = dev_text.replace('__vpci__', dev['vpci'])
656 if 'source file' in dev:
657 dev_text = dev_text.replace('__file__', dev['source file'])
658 if 'file format' in dev:
659 dev_text = dev_text.replace('__format__', dev['source file'])
660 if '__dev__' in dev_text:
661 dev_text = dev_text.replace('__dev__', vd_index)
662 vd_index = chr(ord(vd_index)+1)
663 text += dev_text
664 else:
665 return -1, 'Unknown device type ' + dev['type']
666
667 net_nb=0
668 bridge_interfaces = server.get('networks', [])
669 for v in bridge_interfaces:
670 #Get the brifge name
671 self.db_lock.acquire()
672 result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )
673 self.db_lock.release()
674 if result <= 0:
675 self.logger.error("create_xml_server ERROR %d getting nets %s", result, content)
676 return -1, content
677 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
678 #I know it is not secure
679 #for v in sorted(desc['network interfaces'].itervalues()):
680 model = v.get("model", None)
681 if content[0]['provider']=='default':
682 text += self.tab() + "<interface type='network'>" + \
683 self.inc_tab() + "<source network='" +content[0]['provider']+ "'/>"
684 elif content[0]['provider'][0:7]=='macvtap':
685 text += self.tab()+"<interface type='direct'>" + \
686 self.inc_tab() + "<source dev='" + self.get_local_iface_name(content[0]['provider'][8:]) + "' mode='bridge'/>" + \
687 self.tab() + "<target dev='macvtap0'/>"
688 if windows_os:
689 text += self.tab() + "<alias name='net" + str(net_nb) + "'/>"
690 elif model==None:
691 model = "virtio"
692 elif content[0]['provider'][0:6]=='bridge':
693 text += self.tab() + "<interface type='bridge'>" + \
694 self.inc_tab()+"<source bridge='" +self.get_local_iface_name(content[0]['provider'][7:])+ "'/>"
695 if windows_os:
696 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
697 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
698 elif model==None:
699 model = "virtio"
700 elif content[0]['provider'][0:3] == "OVS":
701 vlan = content[0]['provider'].replace('OVS:', '')
702 text += self.tab() + "<interface type='bridge'>" + \
703 self.inc_tab() + "<source bridge='ovim-" + str(vlan) + "'/>"
704 else:
705 return -1, 'Unknown Bridge net provider ' + content[0]['provider']
706 if model!=None:
707 text += self.tab() + "<model type='" +model+ "'/>"
708 if v.get('mac_address', None) != None:
709 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
710 text += self.pci2xml(v.get('vpci',None))
711 text += self.dec_tab()+'</interface>'
712
713 net_nb += 1
714
715 interfaces = numa.get('interfaces', [])
716
717 net_nb=0
718 for v in interfaces:
719 if self.develop_mode: #map these interfaces to bridges
720 text += self.tab() + "<interface type='bridge'>" + \
721 self.inc_tab()+"<source bridge='" +self.develop_bridge_iface+ "'/>"
722 if windows_os:
723 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
724 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
725 else:
726 text += self.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
727 if v.get('mac_address', None) != None:
728 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
729 text += self.pci2xml(v.get('vpci',None))
730 text += self.dec_tab()+'</interface>'
731 continue
732
733 if v['dedicated'] == 'yes': #passthrought
734 text += self.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
735 self.inc_tab() + "<source>"
736 self.inc_tab()
737 text += self.pci2xml(v['source'])
738 text += self.dec_tab()+'</source>'
739 text += self.pci2xml(v.get('vpci',None))
740 if windows_os:
741 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
742 text += self.dec_tab()+'</hostdev>'
743 net_nb += 1
744 else: #sriov_interfaces
745 #skip not connected interfaces
746 if v.get("net_id") == None:
747 continue
748 text += self.tab() + "<interface type='hostdev' managed='yes'>"
749 self.inc_tab()
750 if v.get('mac_address', None) != None:
751 text+= self.tab() + "<mac address='" +v['mac_address']+ "'/>"
752 text+= self.tab()+'<source>'
753 self.inc_tab()
754 text += self.pci2xml(v['source'])
755 text += self.dec_tab()+'</source>'
756 if v.get('vlan',None) != None:
757 text += self.tab() + "<vlan> <tag id='" + str(v['vlan']) + "'/> </vlan>"
758 text += self.pci2xml(v.get('vpci',None))
759 if windows_os:
760 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
761 text += self.dec_tab()+'</interface>'
762
763
764 text += self.dec_tab()+'</devices>'+\
765 self.dec_tab()+'</domain>'
766 return 0, text
767
768 def pci2xml(self, pci):
769 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
770 alows an empty pci text'''
771 if pci is None:
772 return ""
773 first_part = pci.split(':')
774 second_part = first_part[2].split('.')
775 return self.tab() + "<address type='pci' domain='0x" + first_part[0] + \
776 "' bus='0x" + first_part[1] + "' slot='0x" + second_part[0] + \
777 "' function='0x" + second_part[1] + "'/>"
778
779 def tab(self):
780 """Return indentation according to xml_level"""
781 return "\n" + (' '*self.xml_level)
782
783 def inc_tab(self):
784 """Increment and return indentation according to xml_level"""
785 self.xml_level += 1
786 return self.tab()
787
788 def dec_tab(self):
789 """Decrement and return indentation according to xml_level"""
790 self.xml_level -= 1
791 return self.tab()
792
793 def create_ovs_bridge(self):
794 """
795 Create a bridge in compute OVS to allocate VMs
796 :return: True if success
797 """
798 if self.test or not self.connectivity:
799 return True
800
801
802 try:
803 self.run_command('sudo', 'ovs-vsctl', '--may-exist', 'add-br', 'br-int', '--', 'set', 'Bridge', 'br-int',
804 'stp_enable=true')
805 return True
806 except RunCommandException as e:
807 self.logger.error("create_ovs_bridge ssh Exception: " + str(e))
808 if "SSH session not active" in str(e):
809 self.ssh_connect()
810 return False
811
812 def delete_port_to_ovs_bridge(self, vlan, net_uuid):
813 """
814 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
815 :param vlan: vlan port id
816 :param net_uuid: network id
817 :return:
818 """
819
820 if self.test or not self.connectivity:
821 return True
822 try:
823 port_name = 'ovim-' + str(vlan)
824 command = 'sudo ovs-vsctl del-port br-int ' + port_name
825 self.run_command(command)
826 return True
827 except RunCommandException as e:
828 self.logger.error("delete_port_to_ovs_bridge ssh Exception: " + str(e))
829 return False
830
831 def delete_dhcp_server(self, vlan, net_uuid, dhcp_path):
832 """
833 Delete dhcp server process lining in namespace
834 :param vlan: segmentation id
835 :param net_uuid: network uuid
836 :param dhcp_path: conf fiel path that live in namespace side
837 :return:
838 """
839 if self.test or not self.connectivity:
840 return True
841 if not self.is_dhcp_port_free(vlan, net_uuid):
842 return True
843 try:
844 dhcp_namespace = str(vlan) + '-dnsmasq'
845 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
846 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
847
848 command = 'sudo ip netns exec ' + dhcp_namespace + ' cat ' + pid_file
849 self.logger.debug("command: " + command)
850 (_, stdout, _) = self.ssh_conn.exec_command(command)
851 content = stdout.read()
852
853 command = 'sudo ip netns exec ' + dhcp_namespace + ' kill -9 ' + content
854 self.logger.debug("command: " + command)
855 (_, stdout, _) = self.ssh_conn.exec_command(command)
856 content = stdout.read()
857
858 # if len(content) == 0:
859 # return True
860 # else:
861 # return False
862 except paramiko.ssh_exception.SSHException as e:
863 self.logger.error("delete_dhcp_server ssh Exception: " + str(e))
864 if "SSH session not active" in str(e):
865 self.ssh_connect()
866 return False
867
868 def is_dhcp_port_free(self, host_id, net_uuid):
869 """
870 Check if any port attached to the a net in a vxlan mesh across computes nodes
871 :param host_id: host id
872 :param net_uuid: network id
873 :return: True if is not free
874 """
875 self.db_lock.acquire()
876 result, content = self.db.get_table(
877 FROM='ports',
878 WHERE={'type': 'instance:ovs', 'net_id': net_uuid}
879 )
880 self.db_lock.release()
881
882 if len(content) > 0:
883 return False
884 else:
885 return True
886
887 def is_port_free(self, host_id, net_uuid):
888 """
889 Check if there not ovs ports of a network in a compute host.
890 :param host_id: host id
891 :param net_uuid: network id
892 :return: True if is not free
893 """
894
895 self.db_lock.acquire()
896 result, content = self.db.get_table(
897 FROM='ports as p join instances as i on p.instance_id=i.uuid',
898 WHERE={"i.host_id": self.host_id, 'p.type': 'instance:ovs', 'p.net_id': net_uuid}
899 )
900 self.db_lock.release()
901
902 if len(content) > 0:
903 return False
904 else:
905 return True
906
907 def add_port_to_ovs_bridge(self, vlan):
908 """
909 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
910 :param vlan: vlan port id
911 :return: True if success
912 """
913
914 if self.test:
915 return True
916 try:
917 port_name = 'ovim-' + str(vlan)
918 command = 'sudo ovs-vsctl add-port br-int ' + port_name + ' tag=' + str(vlan)
919 self.logger.debug("command: " + command)
920 (_, stdout, _) = self.ssh_conn.exec_command(command)
921 content = stdout.read()
922 if len(content) == 0:
923 return True
924 else:
925 return False
926 except paramiko.ssh_exception.SSHException as e:
927 self.logger.error("add_port_to_ovs_bridge ssh Exception: " + str(e))
928 if "SSH session not active" in str(e):
929 self.ssh_connect()
930 return False
931
932 def delete_dhcp_port(self, vlan, net_uuid):
933 """
934 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
935 :param vlan: segmentation id
936 :param net_uuid: network id
937 :return: True if success
938 """
939
940 if self.test:
941 return True
942
943 if not self.is_dhcp_port_free(vlan, net_uuid):
944 return True
945 self.delete_dhcp_interfaces(vlan)
946 return True
947
948 def delete_bridge_port_attached_to_ovs(self, vlan, net_uuid):
949 """
950 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
951 :param vlan:
952 :param net_uuid:
953 :return: True if success
954 """
955 if self.test:
956 return
957
958 if not self.is_port_free(vlan, net_uuid):
959 return True
960 self.delete_port_to_ovs_bridge(vlan, net_uuid)
961 self.delete_linux_bridge(vlan)
962 return True
963
964 def delete_linux_bridge(self, vlan):
965 """
966 Delete a linux bridge in a scpecific compute.
967 :param vlan: vlan port id
968 :return: True if success
969 """
970
971 if self.test:
972 return True
973 try:
974 port_name = 'ovim-' + str(vlan)
975 command = 'sudo ip link set dev ovim-' + str(vlan) + ' down'
976 self.logger.debug("command: " + command)
977 (_, stdout, _) = self.ssh_conn.exec_command(command)
978 # content = stdout.read()
979 #
980 # if len(content) != 0:
981 # return False
982 command = 'sudo ifconfig ' + port_name + ' down && sudo brctl delbr ' + port_name
983 self.logger.debug("command: " + command)
984 (_, stdout, _) = self.ssh_conn.exec_command(command)
985 content = stdout.read()
986 if len(content) == 0:
987 return True
988 else:
989 return False
990 except paramiko.ssh_exception.SSHException as e:
991 self.logger.error("delete_linux_bridge ssh Exception: " + str(e))
992 if "SSH session not active" in str(e):
993 self.ssh_connect()
994 return False
995
996 def remove_link_bridge_to_ovs(self, vlan, link):
997 """
998 Delete a linux provider net connection to tenatn net
999 :param vlan: vlan port id
1000 :param link: link name
1001 :return: True if success
1002 """
1003
1004 if self.test:
1005 return True
1006 try:
1007 br_tap_name = str(vlan) + '-vethBO'
1008 br_ovs_name = str(vlan) + '-vethOB'
1009
1010 # Delete ovs veth pair
1011 command = 'sudo ip link set dev {} down'.format(br_ovs_name)
1012 self.logger.debug("command: " + command)
1013 (_, stdout, _) = self.ssh_conn.exec_command(command)
1014 content = stdout.read()
1015
1016 command = 'sudo ovs-vsctl del-port br-int {}'.format(br_ovs_name)
1017 self.logger.debug("command: " + command)
1018 (_, stdout, _) = self.ssh_conn.exec_command(command)
1019 content = stdout.read()
1020
1021 # Delete br veth pair
1022 command = 'sudo ip link set dev {} down'.format(br_tap_name)
1023 self.logger.debug("command: " + command)
1024 (_, stdout, _) = self.ssh_conn.exec_command(command)
1025 content = stdout.read()
1026
1027 # Delete br veth interface form bridge
1028 command = 'sudo brctl delif {} {}'.format(link, br_tap_name)
1029 self.logger.debug("command: " + command)
1030 (_, stdout, _) = self.ssh_conn.exec_command(command)
1031 content = stdout.read()
1032
1033 # Delete br veth pair
1034 command = 'sudo ip link set dev {} down'.format(link)
1035 self.logger.debug("command: " + command)
1036 (_, stdout, _) = self.ssh_conn.exec_command(command)
1037 content = stdout.read()
1038
1039 if len(content) == 0:
1040 return True
1041 else:
1042 return False
1043 except paramiko.ssh_exception.SSHException as e:
1044 self.logger.error("delete_linux_bridge ssh Exception: " + str(e))
1045 if "SSH session not active" in str(e):
1046 self.ssh_connect()
1047 return False
1048
1049 def create_ovs_bridge_port(self, vlan):
1050 """
1051 Generate a linux bridge and attache the port to a OVS bridge
1052 :param vlan: vlan port id
1053 :return:
1054 """
1055 if self.test:
1056 return
1057 self.create_linux_bridge(vlan)
1058 self.add_port_to_ovs_bridge(vlan)
1059
1060 def create_linux_bridge(self, vlan):
1061 """
1062 Create a linux bridge with STP active
1063 :param vlan: netowrk vlan id
1064 :return:
1065 """
1066
1067 if self.test:
1068 return True
1069 try:
1070 port_name = 'ovim-' + str(vlan)
1071 command = 'sudo brctl show | grep ' + port_name
1072 self.logger.debug("command: " + command)
1073 (_, stdout, _) = self.ssh_conn.exec_command(command)
1074 content = stdout.read()
1075
1076 # if exist nothing to create
1077 # if len(content) == 0:
1078 # return False
1079
1080 command = 'sudo brctl addbr ' + port_name
1081 self.logger.debug("command: " + command)
1082 (_, stdout, _) = self.ssh_conn.exec_command(command)
1083 content = stdout.read()
1084
1085 # if len(content) == 0:
1086 # return True
1087 # else:
1088 # return False
1089
1090 command = 'sudo brctl stp ' + port_name + ' on'
1091 self.logger.debug("command: " + command)
1092 (_, stdout, _) = self.ssh_conn.exec_command(command)
1093 content = stdout.read()
1094
1095 # if len(content) == 0:
1096 # return True
1097 # else:
1098 # return False
1099 command = 'sudo ip link set dev ' + port_name + ' up'
1100 self.logger.debug("command: " + command)
1101 (_, stdout, _) = self.ssh_conn.exec_command(command)
1102 content = stdout.read()
1103
1104 if len(content) == 0:
1105 return True
1106 else:
1107 return False
1108 except paramiko.ssh_exception.SSHException as e:
1109 self.logger.error("create_linux_bridge ssh Exception: " + str(e))
1110 if "SSH session not active" in str(e):
1111 self.ssh_connect()
1112 return False
1113
1114 def set_mac_dhcp_server(self, ip, mac, vlan, netmask, first_ip, dhcp_path):
1115 """
1116 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
1117 :param ip: IP address asigned to a VM
1118 :param mac: VM vnic mac to be macthed with the IP received
1119 :param vlan: Segmentation id
1120 :param netmask: netmask value
1121 :param path: dhcp conf file path that live in namespace side
1122 :return: True if success
1123 """
1124
1125 if self.test:
1126 return True
1127
1128 dhcp_namespace = str(vlan) + '-dnsmasq'
1129 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1130 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1131
1132 if not ip:
1133 return False
1134 try:
1135
1136 ns_interface = str(vlan) + '-vethDO'
1137 command = 'sudo ip netns exec ' + dhcp_namespace + ' cat /sys/class/net/{}/address'.format(ns_interface)
1138 self.logger.debug("command: " + command)
1139 (_, stdout, _) = self.ssh_conn.exec_command(command)
1140 iface_listen_mac = stdout.read()
1141
1142 if iface_listen_mac > 0:
1143 command = 'sudo ip netns exec ' + dhcp_namespace + ' cat {} | grep {}'.format(dhcp_hostsdir, dhcp_hostsdir)
1144 self.logger.debug("command: " + command)
1145 (_, stdout, _) = self.ssh_conn.exec_command(command)
1146 content = stdout.read()
1147 if content > 0:
1148 ip_data = iface_listen_mac.upper().replace('\n', '') + ',' + first_ip
1149 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1150
1151 command = 'sudo ip netns exec ' + dhcp_namespace + ' sudo bash -ec "echo ' + ip_data + ' >> ' + dhcp_hostsdir + '"'
1152 self.logger.debug("command: " + command)
1153 (_, stdout, _) = self.ssh_conn.exec_command(command)
1154 content = stdout.read()
1155
1156
1157 ip_data = mac.upper() + ',' + ip
1158
1159 command = 'sudo ip netns exec ' + dhcp_namespace + ' touch ' + dhcp_hostsdir
1160 self.logger.debug("command: " + command)
1161 (_, stdout, _) = self.ssh_conn.exec_command(command)
1162 content = stdout.read()
1163
1164 command = 'sudo ip netns exec ' + dhcp_namespace + ' sudo bash -ec "echo ' + ip_data + ' >> ' + dhcp_hostsdir + '"'
1165
1166 self.logger.debug("command: " + command)
1167 (_, stdout, _) = self.ssh_conn.exec_command(command)
1168 content = stdout.read()
1169
1170 if len(content) == 0:
1171 return True
1172 else:
1173 return False
1174 except paramiko.ssh_exception.SSHException as e:
1175 self.logger.error("set_mac_dhcp_server ssh Exception: " + str(e))
1176 if "SSH session not active" in str(e):
1177 self.ssh_connect()
1178 return False
1179
1180 def delete_mac_dhcp_server(self, ip, mac, vlan, dhcp_path):
1181 """
1182 Delete into dhcp conf file the ip assigned to a specific MAC address
1183
1184 :param ip: IP address asigned to a VM
1185 :param mac: VM vnic mac to be macthed with the IP received
1186 :param vlan: Segmentation id
1187 :param dhcp_path: dhcp conf file path that live in namespace side
1188 :return:
1189 """
1190
1191 if self.test:
1192 return False
1193 try:
1194 dhcp_namespace = str(vlan) + '-dnsmasq'
1195 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1196 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1197
1198 if not ip:
1199 return False
1200
1201 ip_data = mac.upper() + ',' + ip
1202
1203 command = 'sudo ip netns exec ' + dhcp_namespace + ' sudo sed -i \'/' + ip_data + '/d\' ' + dhcp_hostsdir
1204 self.logger.debug("command: " + command)
1205 (_, stdout, _) = self.ssh_conn.exec_command(command)
1206 content = stdout.read()
1207
1208 if len(content) == 0:
1209 return True
1210 else:
1211 return False
1212
1213 except paramiko.ssh_exception.SSHException as e:
1214 self.logger.error("set_mac_dhcp_server ssh Exception: " + str(e))
1215 if "SSH session not active" in str(e):
1216 self.ssh_connect()
1217 return False
1218
1219 def launch_dhcp_server(self, vlan, ip_range, netmask, dhcp_path, gateway, dns_list=None, routes=None):
1220 """
1221 Generate a linux bridge and attache the port to a OVS bridge
1222 :param self:
1223 :param vlan: Segmentation id
1224 :param ip_range: IP dhcp range
1225 :param netmask: network netmask
1226 :param dhcp_path: dhcp conf file path that live in namespace side
1227 :param gateway: Gateway address for dhcp net
1228 :param dns_list: dns list for dhcp server
1229 :param routes: routes list for dhcp server
1230 :return: True if success
1231 """
1232
1233 if self.test:
1234 return True
1235 try:
1236 ns_interface = str(vlan) + '-vethDO'
1237 dhcp_namespace = str(vlan) + '-dnsmasq'
1238 dhcp_path = os.path.join(dhcp_path, dhcp_namespace, '')
1239 leases_path = os.path.join(dhcp_path, "dnsmasq.leases")
1240 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
1241
1242
1243 dhcp_range = ip_range[0] + ',' + ip_range[1] + ',' + netmask
1244
1245 command = 'sudo ip netns exec ' + dhcp_namespace + ' mkdir -p ' + dhcp_path
1246 self.logger.debug("command: " + command)
1247 (_, stdout, _) = self.ssh_conn.exec_command(command)
1248 content = stdout.read()
1249
1250 pid_path = os.path.join(dhcp_path, 'dnsmasq.pid')
1251 command = 'sudo ip netns exec ' + dhcp_namespace + ' cat ' + pid_path
1252 self.logger.debug("command: " + command)
1253 (_, stdout, _) = self.ssh_conn.exec_command(command)
1254 content = stdout.read()
1255
1256 # check if pid is runing
1257 pid_status_path = content
1258 if content:
1259 command = "ps aux | awk '{print $2 }' | grep " + pid_status_path
1260 self.logger.debug("command: " + command)
1261 (_, stdout, _) = self.ssh_conn.exec_command(command)
1262 content = stdout.read()
1263
1264 gateway_option = ' --dhcp-option=3,' + gateway
1265
1266 dhcp_route_option = ''
1267 if routes:
1268 dhcp_route_option = ' --dhcp-option=121'
1269 for key, value in routes.iteritems():
1270 if 'default' == key:
1271 gateway_option = ' --dhcp-option=3,' + value
1272 else:
1273 dhcp_route_option += ',' + key + ',' + value
1274 dns_data = ''
1275 if dns_list:
1276 dns_data = ' --dhcp-option=6'
1277 for dns in dns_list:
1278 dns_data += ',' + dns
1279
1280 if not content:
1281 command = 'sudo ip netns exec ' + dhcp_namespace + ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1282 '--interface=' + ns_interface + \
1283 ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path + \
1284 ' --dhcp-range ' + dhcp_range + \
1285 ' --pid-file=' + pid_file + \
1286 ' --dhcp-leasefile=' + leases_path + \
1287 ' --listen-address ' + ip_range[0] + \
1288 gateway_option + \
1289 dhcp_route_option + \
1290 dns_data
1291
1292 self.logger.debug("command: " + command)
1293 (_, stdout, _) = self.ssh_conn.exec_command(command)
1294 content = stdout.readline()
1295
1296 if len(content) == 0:
1297 return True
1298 else:
1299 return False
1300 except paramiko.ssh_exception.SSHException as e:
1301 self.logger.error("launch_dhcp_server ssh Exception: " + str(e))
1302 if "SSH session not active" in str(e):
1303 self.ssh_connect()
1304 return False
1305
1306 def delete_dhcp_interfaces(self, vlan):
1307 """
1308 Create a linux bridge with STP active
1309 :param vlan: netowrk vlan id
1310 :return:
1311 """
1312
1313 if self.test:
1314 return True
1315 try:
1316 br_veth_name = str(vlan) + '-vethDO'
1317 ovs_veth_name = str(vlan) + '-vethOD'
1318 dhcp_namespace = str(vlan) + '-dnsmasq'
1319
1320 command = 'sudo ovs-vsctl del-port br-int ' + ovs_veth_name
1321 self.logger.debug("command: " + command)
1322 (_, stdout, _) = self.ssh_conn.exec_command(command)
1323 content = stdout.read()
1324
1325 command = 'sudo ip netns exec ' + dhcp_namespace + ' ip link set dev ' + br_veth_name + ' down'
1326 self.logger.debug("command: " + command)
1327 (_, stdout, _) = self.ssh_conn.exec_command(command)
1328 content = stdout.read()
1329
1330 command = 'sudo ip link set dev ' + dhcp_namespace + ' down'
1331 self.logger.debug("command: " + command)
1332 (_, stdout, _) = self.ssh_conn.exec_command(command)
1333 content = stdout.read()
1334
1335 command = 'sudo brctl delbr ' + dhcp_namespace
1336 self.logger.debug("command: " + command)
1337 (_, stdout, _) = self.ssh_conn.exec_command(command)
1338 content = stdout.read()
1339
1340 command = 'sudo ip netns del ' + dhcp_namespace
1341 self.logger.debug("command: " + command)
1342 (_, stdout, _) = self.ssh_conn.exec_command(command)
1343 content = stdout.read()
1344
1345 except paramiko.ssh_exception.SSHException as e:
1346 self.logger.error("delete_dhcp_interfaces ssh Exception: " + str(e))
1347 if "SSH session not active" in str(e):
1348 self.ssh_connect()
1349 return False
1350
1351 def create_dhcp_interfaces(self, vlan, ip_listen_address, netmask):
1352 """
1353 Create a linux bridge with STP active
1354 :param vlan: segmentation id
1355 :param ip_listen_address: Listen Ip address for the dhcp service, the tap interface living in namesapce side
1356 :param netmask: dhcp net CIDR
1357 :return: True if success
1358 """
1359
1360 if self.test:
1361 return True
1362 try:
1363 ovs_veth_name = str(vlan) + '-vethOD'
1364 ns_veth = str(vlan) + '-vethDO'
1365 dhcp_namespace = str(vlan) + '-dnsmasq'
1366
1367 command = 'sudo ip netns add ' + dhcp_namespace
1368 self.logger.debug("command: " + command)
1369 (_, stdout, _) = self.ssh_conn.exec_command(command)
1370 content = stdout.read()
1371
1372 command = 'sudo ip link add ' + ns_veth + ' type veth peer name ' + ovs_veth_name
1373 self.logger.debug("command: " + command)
1374 (_, stdout, _) = self.ssh_conn.exec_command(command)
1375 content = stdout.read()
1376
1377 command = 'sudo ip link set ' + ns_veth + ' netns ' + dhcp_namespace
1378 self.logger.debug("command: " + command)
1379 (_, stdout, _) = self.ssh_conn.exec_command(command)
1380 content = stdout.read()
1381
1382 command = 'sudo ip netns exec ' + dhcp_namespace + ' ip link set dev ' + ns_veth + ' up'
1383 self.logger.debug("command: " + command)
1384 (_, stdout, _) = self.ssh_conn.exec_command(command)
1385 content = stdout.read()
1386
1387 command = 'sudo ovs-vsctl add-port br-int ' + ovs_veth_name + ' tag=' + str(vlan)
1388 self.logger.debug("command: " + command)
1389 (_, stdout, _) = self.ssh_conn.exec_command(command)
1390 content = stdout.read()
1391
1392 command = 'sudo ip link set dev ' + ovs_veth_name + ' up'
1393 self.logger.debug("command: " + command)
1394 (_, stdout, _) = self.ssh_conn.exec_command(command)
1395 content = stdout.read()
1396
1397 command = 'sudo ip netns exec ' + dhcp_namespace + ' ip link set dev lo up'
1398 self.logger.debug("command: " + command)
1399 (_, stdout, _) = self.ssh_conn.exec_command(command)
1400 content = stdout.read()
1401
1402 command = 'sudo ip netns exec ' + dhcp_namespace + ' ' + ' ifconfig ' + ns_veth \
1403 + ' ' + ip_listen_address + ' netmask ' + netmask
1404 self.logger.debug("command: " + command)
1405 (_, stdout, _) = self.ssh_conn.exec_command(command)
1406 content = stdout.read()
1407 if len(content) == 0:
1408 return True
1409 else:
1410 return False
1411 except paramiko.ssh_exception.SSHException as e:
1412 self.logger.error("create_dhcp_interfaces ssh Exception: " + str(e))
1413 if "SSH session not active" in str(e):
1414 self.ssh_connect()
1415 return False
1416
1417 def delete_qrouter_connection(self, vlan, link):
1418 """
1419 Delete qrouter Namesapce with all veth interfaces need it
1420 :param vlan:
1421 :param link:
1422 :return:
1423 """
1424
1425 ns_qouter = str(vlan) + '-qrouter'
1426 qrouter_ovs_veth = str(vlan) + '-vethOQ'
1427 qrouter_ns_veth = str(vlan) + '-vethQO'
1428
1429 qrouter_br_veth = str(vlan) + '-vethBQ'
1430 qrouter_ns_router_veth = str(vlan) + '-vethQB'
1431
1432 # delete ovs veth to ovs br-int
1433 command = 'sudo ovs-vsctl del-port br-int {}'.format(qrouter_ovs_veth)
1434 self.logger.debug("command: " + command)
1435 (_, stdout, _) = self.ssh_conn.exec_command(command)
1436 content = stdout.read()
1437
1438 # down ns veth
1439 command = 'sudo ip netns exec {} ip link set dev {} down'.format(ns_qouter, qrouter_ns_veth)
1440 self.logger.debug("command: " + command)
1441 (_, stdout, _) = self.ssh_conn.exec_command(command)
1442 content = stdout.read()
1443
1444 # down ovs veth interface
1445 command = 'sudo ip link set dev {} down'.format(qrouter_br_veth)
1446 self.logger.debug("command: " + command)
1447 (_, stdout, _) = self.ssh_conn.exec_command(command)
1448 content = stdout.read()
1449
1450 # down br veth interface
1451 command = 'sudo ip link set dev {} down'.format(qrouter_ovs_veth)
1452 self.logger.debug("command: " + command)
1453 (_, stdout, _) = self.ssh_conn.exec_command(command)
1454 content = stdout.read()
1455
1456 # down br veth interface
1457 command = 'sudo ip link set dev {} down'.format(qrouter_ns_router_veth)
1458 self.logger.debug("command: " + command)
1459 (_, stdout, _) = self.ssh_conn.exec_command(command)
1460 content = stdout.read()
1461
1462 # down br veth interface
1463 command = 'sudo brctl delif {} {}'.format(link, qrouter_br_veth)
1464 self.logger.debug("command: " + command)
1465 (_, stdout, _) = self.ssh_conn.exec_command(command)
1466 content = stdout.read()
1467
1468
1469 # delete NS
1470 command = 'sudo ip netns del ' + ns_qouter
1471 self.logger.debug("command: " + command)
1472 (_, stdout, _) = self.ssh_conn.exec_command(command)
1473 content = stdout.read()
1474
1475 def create_qrouter_ovs_connection(self, vlan, gateway, dhcp_cidr):
1476 """
1477 Create qrouter Namesapce with all veth interfaces need it between NS and OVS
1478 :param vlan:
1479 :param gateway:
1480 :return:
1481 """
1482
1483 ns_qouter = str(vlan) + '-qrouter'
1484 qrouter_ovs_veth = str(vlan) + '-vethOQ'
1485 qrouter_ns_veth = str(vlan) + '-vethQO'
1486
1487 # Create NS
1488 command = 'sudo ip netns add ' + ns_qouter
1489 self.logger.debug("command: " + command)
1490 (_, stdout, _) = self.ssh_conn.exec_command(command)
1491 content = stdout.read()
1492
1493 # Create pait veth
1494 command = 'sudo ip link add {} type veth peer name {}'.format(qrouter_ns_veth, qrouter_ovs_veth)
1495 self.logger.debug("command: " + command)
1496 (_, stdout, _) = self.ssh_conn.exec_command(command)
1497 content = stdout.read()
1498
1499 # up ovs veth interface
1500 command = 'sudo ip link set dev {} up'.format(qrouter_ovs_veth)
1501 self.logger.debug("command: " + command)
1502 (_, stdout, _) = self.ssh_conn.exec_command(command)
1503 content = stdout.read()
1504
1505 # add ovs veth to ovs br-int
1506 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(qrouter_ovs_veth, vlan)
1507 self.logger.debug("command: " + command)
1508 (_, stdout, _) = self.ssh_conn.exec_command(command)
1509 content = stdout.read()
1510
1511 # add veth to ns
1512 command = 'sudo ip link set {} netns {}'.format(qrouter_ns_veth, ns_qouter)
1513 self.logger.debug("command: " + command)
1514 (_, stdout, _) = self.ssh_conn.exec_command(command)
1515 content = stdout.read()
1516
1517 # up ns loopback
1518 command = 'sudo ip netns exec {} ip link set dev lo up'.format(ns_qouter)
1519 self.logger.debug("command: " + command)
1520 (_, stdout, _) = self.ssh_conn.exec_command(command)
1521 content = stdout.read()
1522
1523 # up ns veth
1524 command = 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter, qrouter_ns_veth)
1525 self.logger.debug("command: " + command)
1526 (_, stdout, _) = self.ssh_conn.exec_command(command)
1527 content = stdout.read()
1528
1529 from netaddr import IPNetwork
1530 ip_tools = IPNetwork(dhcp_cidr)
1531 cidr_len = ip_tools.prefixlen
1532
1533 # set gw to ns veth
1534 command = 'sudo ip netns exec {} ip address add {}/{} dev {}'.format(ns_qouter, gateway, cidr_len, qrouter_ns_veth)
1535 self.logger.debug("command: " + command)
1536 (_, stdout, _) = self.ssh_conn.exec_command(command)
1537 content = stdout.read()
1538
1539 def add_ns_routes(self, vlan, routes):
1540
1541 for key, value in routes.iteritems():
1542 ns_qouter = str(vlan) + '-qrouter'
1543 qrouter_ns_router_veth = str(vlan) + '-vethQB'
1544 # up ns veth
1545 if key == 'default':
1546 command = 'sudo ip netns exec {} ip route add {} via {} '.format(ns_qouter, key, value)
1547 else:
1548 command = 'sudo ip netns exec {} ip route add {} via {} dev {}'.format(ns_qouter, key, value,
1549 qrouter_ns_router_veth)
1550 self.logger.debug("command: " + command)
1551 (_, stdout, _) = self.ssh_conn.exec_command(command)
1552 content = stdout.read()
1553
1554 def create_qrouter_br_connection(self, vlan, cidr, link):
1555 """
1556 Create veth interfaces between user bridge (link) and OVS
1557 :param vlan:
1558 :param link:
1559 :return:
1560 """
1561
1562 ns_qouter = str(vlan) + '-qrouter'
1563 qrouter_ns_router_veth = str(vlan) + '-vethQB'
1564 qrouter_br_veth = str(vlan) + '-vethBQ'
1565
1566 # Create pait veth
1567 command = 'sudo ip link add {} type veth peer name {}'.format(qrouter_br_veth, qrouter_ns_router_veth)
1568 self.logger.debug("command: " + command)
1569 (_, stdout, _) = self.ssh_conn.exec_command(command)
1570 content = stdout.read()
1571
1572 # up ovs veth interface
1573 command = 'sudo ip link set dev {} up'.format(qrouter_br_veth)
1574 self.logger.debug("command: " + command)
1575 (_, stdout, _) = self.ssh_conn.exec_command(command)
1576 content = stdout.read()
1577
1578 # add veth to ns
1579 command = 'sudo ip link set {} netns {}'.format(qrouter_ns_router_veth, ns_qouter)
1580 self.logger.debug("command: " + command)
1581 (_, stdout, _) = self.ssh_conn.exec_command(command)
1582 content = stdout.read()
1583
1584 # up ns veth
1585 command = 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter, qrouter_ns_router_veth)
1586 self.logger.debug("command: " + command)
1587 (_, stdout, _) = self.ssh_conn.exec_command(command)
1588 content = stdout.read()
1589
1590 command = 'sudo ip netns exec {} ip address add {} dev {}'.format(ns_qouter, link['nat'], qrouter_ns_router_veth)
1591 self.logger.debug("command: " + command)
1592 (_, stdout, _) = self.ssh_conn.exec_command(command)
1593 content = stdout.read()
1594
1595 command = 'sudo brctl show | grep {}'.format(link['iface'])
1596 self.logger.debug("command: " + command)
1597 (_, stdout, _) = self.ssh_conn.exec_command(command)
1598 content = stdout.read()
1599
1600 if content > '':
1601 # up ns veth
1602 command = 'sudo brctl addif {} {}'.format(link['iface'], qrouter_br_veth)
1603 self.logger.debug("command: " + command)
1604 (_, stdout, _) = self.ssh_conn.exec_command(command)
1605 content = stdout.read()
1606
1607 # up ns veth
1608 command = 'sudo ip netns exec {} iptables -t nat -A POSTROUTING -o {} -s {} -d {} -j MASQUERADE' \
1609 .format(ns_qouter, qrouter_ns_router_veth, link['nat'], cidr)
1610 self.logger.debug("command: " + command)
1611 (_, stdout, _) = self.ssh_conn.exec_command(command)
1612 content = stdout.read()
1613
1614
1615 else:
1616 self.logger.error('Bridge {} given by user not exist'.format(qrouter_br_veth))
1617
1618
1619
1620 def create_link_bridge_to_ovs(self, vlan, link):
1621 """
1622 Create interfaces to connect a linux bridge with tenant net
1623 :param vlan: segmentation id
1624 :return: True if success
1625 """
1626 if self.test:
1627 return True
1628 try:
1629
1630 br_tap_name = str(vlan) + '-vethBO'
1631 br_ovs_name = str(vlan) + '-vethOB'
1632
1633 # is a bridge or a interface
1634 command = 'sudo brctl show | grep {}'.format(link)
1635 self.logger.debug("command: " + command)
1636 (_, stdout, _) = self.ssh_conn.exec_command(command)
1637 content = stdout.read()
1638
1639 if content > '':
1640 command = 'sudo ip link add {} type veth peer name {}'.format(br_tap_name, br_ovs_name)
1641 self.logger.debug("command: " + command)
1642 (_, stdout, _) = self.ssh_conn.exec_command(command)
1643 content = stdout.read()
1644
1645 command = 'sudo ip link set dev {} up'.format(br_tap_name)
1646 self.logger.debug("command: " + command)
1647 (_, stdout, _) = self.ssh_conn.exec_command(command)
1648 content = stdout.read()
1649
1650 command = 'sudo ip link set dev {} up'.format(br_ovs_name)
1651 self.logger.debug("command: " + command)
1652 (_, stdout, _) = self.ssh_conn.exec_command(command)
1653 content = stdout.read()
1654
1655 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(br_ovs_name, str(vlan))
1656 self.logger.debug("command: " + command)
1657 (_, stdout, _) = self.ssh_conn.exec_command(command)
1658 content = stdout.read()
1659
1660 command = 'sudo brctl addif ' + link + ' {}'.format(br_tap_name)
1661 self.logger.debug("command: " + command)
1662 (_, stdout, _) = self.ssh_conn.exec_command(command)
1663 content = stdout.read()
1664
1665 if len(content) == 0:
1666 return True
1667 else:
1668 return False
1669 else:
1670 self.logger.error('Link is not present, please check {}'.format(link))
1671 return False
1672 except paramiko.ssh_exception.SSHException as e:
1673 self.logger.error("create_dhcp_interfaces ssh Exception: " + str(e))
1674 if "SSH session not active" in str(e):
1675 self.ssh_connect()
1676 return False
1677
1678 def create_ovs_vxlan_tunnel(self, vxlan_interface, remote_ip):
1679 """
1680 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1681 :param vxlan_interface: vlxan inteface name.
1682 :param remote_ip: tunnel endpoint remote compute ip.
1683 :return:
1684 """
1685 if self.test or not self.connectivity:
1686 return True
1687 if remote_ip == 'localhost':
1688 if self.localhost:
1689 return # TODO: Cannot create a vxlan between localhost and localhost
1690 remote_ip = self.local_ip
1691 try:
1692 command = 'sudo ovs-vsctl add-port br-int ' + vxlan_interface + \
1693 ' -- set Interface ' + vxlan_interface + ' type=vxlan options:remote_ip=' + remote_ip + \
1694 ' -- set Port ' + vxlan_interface + ' other_config:stp-path-cost=10'
1695 self.logger.debug("command: " + command)
1696 (_, stdout, _) = self.ssh_conn.exec_command(command)
1697 content = stdout.read()
1698 # print content
1699 if len(content) == 0:
1700 return True
1701 else:
1702 return False
1703 except paramiko.ssh_exception.SSHException as e:
1704 self.logger.error("create_ovs_vxlan_tunnel ssh Exception: " + str(e))
1705 if "SSH session not active" in str(e):
1706 self.ssh_connect()
1707 return False
1708
1709 def delete_ovs_vxlan_tunnel(self, vxlan_interface):
1710 """
1711 Delete a vlxan tunnel port from a OVS brdige.
1712 :param vxlan_interface: vlxan name to be delete it.
1713 :return: True if success.
1714 """
1715 if self.test or not self.connectivity:
1716 return True
1717 try:
1718 command = 'sudo ovs-vsctl del-port br-int ' + vxlan_interface
1719 self.logger.debug("command: " + command)
1720 (_, stdout, _) = self.ssh_conn.exec_command(command)
1721 content = stdout.read()
1722 # print content
1723 if len(content) == 0:
1724 return True
1725 else:
1726 return False
1727 except paramiko.ssh_exception.SSHException as e:
1728 self.logger.error("delete_ovs_vxlan_tunnel ssh Exception: " + str(e))
1729 if "SSH session not active" in str(e):
1730 self.ssh_connect()
1731 return False
1732
1733 def delete_ovs_bridge(self):
1734 """
1735 Delete a OVS bridge from a compute.
1736 :return: True if success
1737 """
1738 if self.test or not self.connectivity:
1739 return True
1740 try:
1741 command = 'sudo ovs-vsctl del-br br-int'
1742 self.logger.debug("command: " + command)
1743 (_, stdout, _) = self.ssh_conn.exec_command(command)
1744 content = stdout.read()
1745 if len(content) == 0:
1746 return True
1747 else:
1748 return False
1749 except paramiko.ssh_exception.SSHException as e:
1750 self.logger.error("delete_ovs_bridge ssh Exception: " + str(e))
1751 if "SSH session not active" in str(e):
1752 self.ssh_connect()
1753 return False
1754
1755 def get_file_info(self, path):
1756 command = 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1757 try:
1758 content = self.run_command(command)
1759 return content.split(" ") # (permission, 1, owner, group, size, date, file)
1760 except RunCommandException as e:
1761 return None # file does not exist
1762
1763 def qemu_get_info(self, path):
1764 command = 'qemu-img info ' + path
1765 content = self.run_command(command)
1766 try:
1767 return yaml.load(content)
1768 except yaml.YAMLError as exc:
1769 text = ""
1770 if hasattr(exc, 'problem_mark'):
1771 mark = exc.problem_mark
1772 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
1773 self.logger.error("get_qemu_info yaml format Exception " + text)
1774 raise RunCommandException("Error getting qemu_info yaml format" + text)
1775
1776 def qemu_change_backing(self, inc_file, new_backing_file):
1777 command = 'qemu-img rebase -u -b {} {}'.format(new_backing_file, inc_file)
1778 try:
1779 self.run_command(command)
1780 return 0
1781 except RunCommandException as e:
1782 self.logger.error("qemu_change_backing error: " + str(e))
1783 return -1
1784
1785 def qemu_create_empty_disk(self, dev):
1786
1787 if not dev and 'source' not in dev and 'file format' not in dev and 'image_size' not in dev:
1788 self.logger.error("qemu_create_empty_disk error: missing image parameter")
1789 return -1
1790
1791 empty_disk_path = dev['source file']
1792
1793 command = 'qemu-img create -f qcow2 {} {}G'.format(empty_disk_path, dev['image_size'])
1794 try:
1795 self.run_command(command)
1796 return 0
1797 except RunCommandException as e:
1798 self.logger.error("qemu_create_empty_disk error: " + str(e))
1799 return -1
1800
1801 def get_notused_filename(self, proposed_name, suffix=''):
1802 '''Look for a non existing file_name in the host
1803 proposed_name: proposed file name, includes path
1804 suffix: suffix to be added to the name, before the extention
1805 '''
1806 extension = proposed_name.rfind(".")
1807 slash = proposed_name.rfind("/")
1808 if extension < 0 or extension < slash: # no extension
1809 extension = len(proposed_name)
1810 target_name = proposed_name[:extension] + suffix + proposed_name[extension:]
1811 info = self.get_file_info(target_name)
1812 if info is None:
1813 return target_name
1814
1815 index=0
1816 while info is not None:
1817 target_name = proposed_name[:extension] + suffix + "-" + str(index) + proposed_name[extension:]
1818 index+=1
1819 info = self.get_file_info(target_name)
1820 return target_name
1821
1822 def get_notused_path(self, proposed_path, suffix=''):
1823 '''Look for a non existing path at database for images
1824 proposed_path: proposed file name, includes path
1825 suffix: suffix to be added to the name, before the extention
1826 '''
1827 extension = proposed_path.rfind(".")
1828 if extension < 0:
1829 extension = len(proposed_path)
1830 if suffix != None:
1831 target_path = proposed_path[:extension] + suffix + proposed_path[extension:]
1832 index=0
1833 while True:
1834 r,_=self.db.get_table(FROM="images",WHERE={"path":target_path})
1835 if r<=0:
1836 return target_path
1837 target_path = proposed_path[:extension] + suffix + "-" + str(index) + proposed_path[extension:]
1838 index+=1
1839
1840
1841 def delete_file(self, file_name):
1842 command = 'rm -f ' + file_name
1843 self.run_command(command)
1844
1845 def copy_file(self, source, destination, perserve_time=True):
1846 if source[0:4]=="http":
1847 command = "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1848 dst=destination, src=source, dst_result=destination + ".result" )
1849 else:
1850 command = 'cp --no-preserve=mode'
1851 if perserve_time:
1852 command += ' --preserve=timestamps'
1853 command += " '{}' '{}'".format(source, destination)
1854 self.run_command(command)
1855
1856 def copy_remote_file(self, remote_file, use_incremental):
1857 ''' Copy a file from the repository to local folder and recursively
1858 copy the backing files in case the remote file is incremental
1859 Read and/or modified self.localinfo['files'] that contain the
1860 unmodified copies of images in the local path
1861 params:
1862 remote_file: path of remote file
1863 use_incremental: None (leave the decision to this function), True, False
1864 return:
1865 local_file: name of local file
1866 qemu_info: dict with quemu information of local file
1867 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1868 '''
1869
1870 use_incremental_out = use_incremental
1871 new_backing_file = None
1872 local_file = None
1873 file_from_local = True
1874
1875 #in case incremental use is not decided, take the decision depending on the image
1876 #avoid the use of incremental if this image is already incremental
1877 if remote_file[0:4] == "http":
1878 file_from_local = False
1879 if file_from_local:
1880 qemu_remote_info = self.qemu_get_info(remote_file)
1881 if use_incremental_out==None:
1882 use_incremental_out = not ( file_from_local and 'backing file' in qemu_remote_info)
1883 #copy recursivelly the backing files
1884 if file_from_local and 'backing file' in qemu_remote_info:
1885 new_backing_file, _, _ = self.copy_remote_file(qemu_remote_info['backing file'], True)
1886
1887 #check if remote file is present locally
1888 if use_incremental_out and remote_file in self.localinfo['files']:
1889 local_file = self.localinfo['files'][remote_file]
1890 local_file_info = self.get_file_info(local_file)
1891 if file_from_local:
1892 remote_file_info = self.get_file_info(remote_file)
1893 if local_file_info == None:
1894 local_file = None
1895 elif file_from_local and (local_file_info[4]!=remote_file_info[4] or local_file_info[5]!=remote_file_info[5]):
1896 #local copy of file not valid because date or size are different.
1897 #TODO DELETE local file if this file is not used by any active virtual machine
1898 try:
1899 self.delete_file(local_file)
1900 del self.localinfo['files'][remote_file]
1901 except Exception:
1902 pass
1903 local_file = None
1904 else: #check that the local file has the same backing file, or there are not backing at all
1905 qemu_info = self.qemu_get_info(local_file)
1906 if new_backing_file != qemu_info.get('backing file'):
1907 local_file = None
1908
1909
1910 if local_file == None: #copy the file
1911 img_name= remote_file.split('/') [-1]
1912 img_local = self.image_path + '/' + img_name
1913 local_file = self.get_notused_filename(img_local)
1914 self.copy_file(remote_file, local_file, use_incremental_out)
1915
1916 if use_incremental_out:
1917 self.localinfo['files'][remote_file] = local_file
1918 if new_backing_file:
1919 self.qemu_change_backing(local_file, new_backing_file)
1920 qemu_info = self.qemu_get_info(local_file)
1921
1922 return local_file, qemu_info, use_incremental_out
1923
1924 def launch_server(self, conn, server, rebuild=False, domain=None):
1925 if self.test:
1926 time.sleep(random.randint(20,150)) #sleep random timeto be make it a bit more real
1927 return 0, 'Success'
1928
1929 server_id = server['uuid']
1930 paused = server.get('paused','no')
1931 try:
1932 if domain!=None and rebuild==False:
1933 domain.resume()
1934 #self.server_status[server_id] = 'ACTIVE'
1935 return 0, 'Success'
1936
1937 self.db_lock.acquire()
1938 result, server_data = self.db.get_instance(server_id)
1939 self.db_lock.release()
1940 if result <= 0:
1941 self.logger.error("launch_server ERROR getting server from DB %d %s", result, server_data)
1942 return result, server_data
1943
1944 #0: get image metadata
1945 server_metadata = server.get('metadata', {})
1946 use_incremental = None
1947
1948 if "use_incremental" in server_metadata:
1949 use_incremental = False if server_metadata["use_incremental"] == "no" else True
1950
1951 server_host_files = self.localinfo['server_files'].get( server['uuid'], {})
1952 if rebuild:
1953 #delete previous incremental files
1954 for file_ in server_host_files.values():
1955 self.delete_file(file_['source file'] )
1956 server_host_files={}
1957
1958 #1: obtain aditional devices (disks)
1959 #Put as first device the main disk
1960 devices = [ {"type":"disk", "image_id":server['image_id'], "vpci":server_metadata.get('vpci', None) } ]
1961 if 'extended' in server_data and server_data['extended']!=None and "devices" in server_data['extended']:
1962 devices += server_data['extended']['devices']
1963 empty_path = None
1964 for dev in devices:
1965 image_id = dev.get('image_id')
1966 if not image_id:
1967 import uuid
1968 uuid_empty = str(uuid.uuid4())
1969 empty_path = self.empty_image_path + uuid_empty + '.qcow2' # local path for empty disk
1970
1971 dev['source file'] = empty_path
1972 dev['file format'] = 'qcow2'
1973 self.qemu_create_empty_disk(dev)
1974 server_host_files[uuid_empty] = {'source file': empty_path,
1975 'file format': dev['file format']}
1976
1977 continue
1978 else:
1979 self.db_lock.acquire()
1980 result, content = self.db.get_table(FROM='images', SELECT=('path', 'metadata'),
1981 WHERE={'uuid': image_id})
1982 self.db_lock.release()
1983 if result <= 0:
1984 error_text = "ERROR", result, content, "when getting image", dev['image_id']
1985 self.logger.error("launch_server " + error_text)
1986 return -1, error_text
1987 if content[0]['metadata'] is not None:
1988 dev['metadata'] = json.loads(content[0]['metadata'])
1989 else:
1990 dev['metadata'] = {}
1991
1992 if image_id in server_host_files:
1993 dev['source file'] = server_host_files[image_id]['source file'] #local path
1994 dev['file format'] = server_host_files[image_id]['file format'] # raw or qcow2
1995 continue
1996
1997 #2: copy image to host
1998 if image_id:
1999 remote_file = content[0]['path']
2000 else:
2001 remote_file = empty_path
2002 use_incremental_image = use_incremental
2003 if dev['metadata'].get("use_incremental") == "no":
2004 use_incremental_image = False
2005 local_file, qemu_info, use_incremental_image = self.copy_remote_file(remote_file, use_incremental_image)
2006
2007 #create incremental image
2008 if use_incremental_image:
2009 local_file_inc = self.get_notused_filename(local_file, '.inc')
2010 command = 'qemu-img create -f qcow2 {} -o backing_file={}'.format(local_file_inc, local_file)
2011 self.run_command(command)
2012 local_file = local_file_inc
2013 qemu_info = {'file format': 'qcow2'}
2014
2015 server_host_files[ dev['image_id'] ] = {'source file': local_file, 'file format': qemu_info['file format']}
2016
2017 dev['source file'] = local_file
2018 dev['file format'] = qemu_info['file format']
2019
2020 self.localinfo['server_files'][ server['uuid'] ] = server_host_files
2021 self.localinfo_dirty = True
2022
2023 #3 Create XML
2024 result, xml = self.create_xml_server(server_data, devices, server_metadata) #local_file
2025 if result <0:
2026 self.logger.error("create xml server error: " + xml)
2027 return -2, xml
2028 self.logger.debug("create xml: " + xml)
2029 atribute = host_thread.lvirt_module.VIR_DOMAIN_START_PAUSED if paused == "yes" else 0
2030 #4 Start the domain
2031 if not rebuild: #ensures that any pending destroying server is done
2032 self.server_forceoff(True)
2033 #self.logger.debug("launching instance " + xml)
2034 conn.createXML(xml, atribute)
2035 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
2036
2037 return 0, 'Success'
2038
2039 except paramiko.ssh_exception.SSHException as e:
2040 text = e.args[0]
2041 self.logger.error("launch_server id='%s' ssh Exception: %s", server_id, text)
2042 if "SSH session not active" in text:
2043 self.ssh_connect()
2044 except host_thread.lvirt_module.libvirtError as e:
2045 text = e.get_error_message()
2046 self.logger.error("launch_server id='%s' libvirt Exception: %s", server_id, text)
2047 except Exception as e:
2048 text = str(e)
2049 self.logger.error("launch_server id='%s' Exception: %s", server_id, text)
2050 return -1, text
2051
2052 def update_servers_status(self):
2053 # # virDomainState
2054 # VIR_DOMAIN_NOSTATE = 0
2055 # VIR_DOMAIN_RUNNING = 1
2056 # VIR_DOMAIN_BLOCKED = 2
2057 # VIR_DOMAIN_PAUSED = 3
2058 # VIR_DOMAIN_SHUTDOWN = 4
2059 # VIR_DOMAIN_SHUTOFF = 5
2060 # VIR_DOMAIN_CRASHED = 6
2061 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
2062
2063 if self.test or len(self.server_status)==0:
2064 return
2065
2066 try:
2067 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2068 domains= conn.listAllDomains()
2069 domain_dict={}
2070 for domain in domains:
2071 uuid = domain.UUIDString() ;
2072 libvirt_status = domain.state()
2073 #print libvirt_status
2074 if libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_RUNNING or libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTDOWN:
2075 new_status = "ACTIVE"
2076 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_PAUSED:
2077 new_status = "PAUSED"
2078 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTOFF:
2079 new_status = "INACTIVE"
2080 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_CRASHED:
2081 new_status = "ERROR"
2082 else:
2083 new_status = None
2084 domain_dict[uuid] = new_status
2085 conn.close()
2086 except host_thread.lvirt_module.libvirtError as e:
2087 self.logger.error("get_state() Exception " + e.get_error_message())
2088 return
2089
2090 for server_id, current_status in self.server_status.iteritems():
2091 new_status = None
2092 if server_id in domain_dict:
2093 new_status = domain_dict[server_id]
2094 else:
2095 new_status = "INACTIVE"
2096
2097 if new_status == None or new_status == current_status:
2098 continue
2099 if new_status == 'INACTIVE' and current_status == 'ERROR':
2100 continue #keep ERROR status, because obviously this machine is not running
2101 #change status
2102 self.logger.debug("server id='%s' status change from '%s' to '%s'", server_id, current_status, new_status)
2103 STATUS={'progress':100, 'status':new_status}
2104 if new_status == 'ERROR':
2105 STATUS['last_error'] = 'machine has crashed'
2106 self.db_lock.acquire()
2107 r,_ = self.db.update_rows('instances', STATUS, {'uuid':server_id}, log=False)
2108 self.db_lock.release()
2109 if r>=0:
2110 self.server_status[server_id] = new_status
2111
2112 def action_on_server(self, req, last_retry=True):
2113 '''Perform an action on a req
2114 Attributes:
2115 req: dictionary that contain:
2116 server properties: 'uuid','name','tenant_id','status'
2117 action: 'action'
2118 host properties: 'user', 'ip_name'
2119 return (error, text)
2120 0: No error. VM is updated to new state,
2121 -1: Invalid action, as trying to pause a PAUSED VM
2122 -2: Error accessing host
2123 -3: VM nor present
2124 -4: Error at DB access
2125 -5: Error while trying to perform action. VM is updated to ERROR
2126 '''
2127 server_id = req['uuid']
2128 conn = None
2129 new_status = None
2130 old_status = req['status']
2131 last_error = None
2132
2133 if self.test:
2134 if 'terminate' in req['action']:
2135 new_status = 'deleted'
2136 elif 'shutoff' in req['action'] or 'shutdown' in req['action'] or 'forceOff' in req['action']:
2137 if req['status']!='ERROR':
2138 time.sleep(5)
2139 new_status = 'INACTIVE'
2140 elif 'start' in req['action'] and req['status']!='ERROR':
2141 new_status = 'ACTIVE'
2142 elif 'resume' in req['action'] and req['status']!='ERROR' and req['status']!='INACTIVE':
2143 new_status = 'ACTIVE'
2144 elif 'pause' in req['action'] and req['status']!='ERROR':
2145 new_status = 'PAUSED'
2146 elif 'reboot' in req['action'] and req['status']!='ERROR':
2147 new_status = 'ACTIVE'
2148 elif 'rebuild' in req['action']:
2149 time.sleep(random.randint(20,150))
2150 new_status = 'ACTIVE'
2151 elif 'createImage' in req['action']:
2152 time.sleep(5)
2153 self.create_image(None, req)
2154 else:
2155 try:
2156 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2157 try:
2158 dom = conn.lookupByUUIDString(server_id)
2159 except host_thread.lvirt_module.libvirtError as e:
2160 text = e.get_error_message()
2161 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
2162 dom = None
2163 else:
2164 self.logger.error("action_on_server id='%s' libvirt exception: %s", server_id, text)
2165 raise e
2166
2167 if 'forceOff' in req['action']:
2168 if dom == None:
2169 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2170 else:
2171 try:
2172 self.logger.debug("sending DESTROY to server id='%s'", server_id)
2173 dom.destroy()
2174 except Exception as e:
2175 if "domain is not running" not in e.get_error_message():
2176 self.logger.error("action_on_server id='%s' Exception while sending force off: %s",
2177 server_id, e.get_error_message())
2178 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
2179 new_status = 'ERROR'
2180
2181 elif 'terminate' in req['action']:
2182 if dom == None:
2183 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2184 new_status = 'deleted'
2185 else:
2186 try:
2187 if req['action']['terminate'] == 'force':
2188 self.logger.debug("sending DESTROY to server id='%s'", server_id)
2189 dom.destroy()
2190 new_status = 'deleted'
2191 else:
2192 self.logger.debug("sending SHUTDOWN to server id='%s'", server_id)
2193 dom.shutdown()
2194 self.pending_terminate_server.append( (time.time()+10,server_id) )
2195 except Exception as e:
2196 self.logger.error("action_on_server id='%s' Exception while destroy: %s",
2197 server_id, e.get_error_message())
2198 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
2199 new_status = 'ERROR'
2200 if "domain is not running" in e.get_error_message():
2201 try:
2202 dom.undefine()
2203 new_status = 'deleted'
2204 except Exception:
2205 self.logger.error("action_on_server id='%s' Exception while undefine: %s",
2206 server_id, e.get_error_message())
2207 last_error = 'action_on_server Exception2 while undefine:', e.get_error_message()
2208 #Exception: 'virDomainDetachDevice() failed'
2209 if new_status=='deleted':
2210 if server_id in self.server_status:
2211 del self.server_status[server_id]
2212 if req['uuid'] in self.localinfo['server_files']:
2213 for file_ in self.localinfo['server_files'][ req['uuid'] ].values():
2214 try:
2215 self.delete_file(file_['source file'])
2216 except Exception:
2217 pass
2218 del self.localinfo['server_files'][ req['uuid'] ]
2219 self.localinfo_dirty = True
2220
2221 elif 'shutoff' in req['action'] or 'shutdown' in req['action']:
2222 try:
2223 if dom == None:
2224 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2225 else:
2226 dom.shutdown()
2227 # new_status = 'INACTIVE'
2228 #TODO: check status for changing at database
2229 except Exception as e:
2230 new_status = 'ERROR'
2231 self.logger.error("action_on_server id='%s' Exception while shutdown: %s",
2232 server_id, e.get_error_message())
2233 last_error = 'action_on_server Exception while shutdown: ' + e.get_error_message()
2234
2235 elif 'rebuild' in req['action']:
2236 if dom != None:
2237 dom.destroy()
2238 r = self.launch_server(conn, req, True, None)
2239 if r[0] <0:
2240 new_status = 'ERROR'
2241 last_error = r[1]
2242 else:
2243 new_status = 'ACTIVE'
2244 elif 'start' in req['action']:
2245 # The instance is only create in DB but not yet at libvirt domain, needs to be create
2246 rebuild = True if req['action']['start'] == 'rebuild' else False
2247 r = self.launch_server(conn, req, rebuild, dom)
2248 if r[0] <0:
2249 new_status = 'ERROR'
2250 last_error = r[1]
2251 else:
2252 new_status = 'ACTIVE'
2253
2254 elif 'resume' in req['action']:
2255 try:
2256 if dom == None:
2257 pass
2258 else:
2259 dom.resume()
2260 # new_status = 'ACTIVE'
2261 except Exception as e:
2262 self.logger.error("action_on_server id='%s' Exception while resume: %s",
2263 server_id, e.get_error_message())
2264
2265 elif 'pause' in req['action']:
2266 try:
2267 if dom == None:
2268 pass
2269 else:
2270 dom.suspend()
2271 # new_status = 'PAUSED'
2272 except Exception as e:
2273 self.logger.error("action_on_server id='%s' Exception while pause: %s",
2274 server_id, e.get_error_message())
2275
2276 elif 'reboot' in req['action']:
2277 try:
2278 if dom == None:
2279 pass
2280 else:
2281 dom.reboot()
2282 self.logger.debug("action_on_server id='%s' reboot:", server_id)
2283 #new_status = 'ACTIVE'
2284 except Exception as e:
2285 self.logger.error("action_on_server id='%s' Exception while reboot: %s",
2286 server_id, e.get_error_message())
2287 elif 'createImage' in req['action']:
2288 self.create_image(dom, req)
2289
2290
2291 conn.close()
2292 except host_thread.lvirt_module.libvirtError as e:
2293 if conn is not None: conn.close()
2294 text = e.get_error_message()
2295 new_status = "ERROR"
2296 last_error = text
2297 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
2298 self.logger.debug("action_on_server id='%s' Exception removed from host", server_id)
2299 else:
2300 self.logger.error("action_on_server id='%s' Exception %s", server_id, text)
2301 #end of if self.test
2302 if new_status == None:
2303 return 1
2304
2305 self.logger.debug("action_on_server id='%s' new status=%s %s",server_id, new_status, last_error)
2306 UPDATE = {'progress':100, 'status':new_status}
2307
2308 if new_status=='ERROR':
2309 if not last_retry: #if there will be another retry do not update database
2310 return -1
2311 elif 'terminate' in req['action']:
2312 #PUT a log in the database
2313 self.logger.error("PANIC deleting server id='%s' %s", server_id, last_error)
2314 self.db_lock.acquire()
2315 self.db.new_row('logs',
2316 {'uuid':server_id, 'tenant_id':req['tenant_id'], 'related':'instances','level':'panic',
2317 'description':'PANIC deleting server from host '+self.name+': '+last_error}
2318 )
2319 self.db_lock.release()
2320 if server_id in self.server_status:
2321 del self.server_status[server_id]
2322 return -1
2323 else:
2324 UPDATE['last_error'] = last_error
2325 if new_status != 'deleted' and (new_status != old_status or new_status == 'ERROR') :
2326 self.db_lock.acquire()
2327 self.db.update_rows('instances', UPDATE, {'uuid':server_id}, log=True)
2328 self.server_status[server_id] = new_status
2329 self.db_lock.release()
2330 if new_status == 'ERROR':
2331 return -1
2332 return 1
2333
2334
2335 def restore_iface(self, name, mac, lib_conn=None):
2336 ''' make an ifdown, ifup to restore default parameter of na interface
2337 Params:
2338 mac: mac address of the interface
2339 lib_conn: connection to the libvirt, if None a new connection is created
2340 Return 0,None if ok, -1,text if fails
2341 '''
2342 conn=None
2343 ret = 0
2344 error_text=None
2345 if self.test:
2346 self.logger.debug("restore_iface '%s' %s", name, mac)
2347 return 0, None
2348 try:
2349 if not lib_conn:
2350 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2351 else:
2352 conn = lib_conn
2353
2354 #wait to the pending VM deletion
2355 #TODO.Revise self.server_forceoff(True)
2356
2357 iface = conn.interfaceLookupByMACString(mac)
2358 if iface.isActive():
2359 iface.destroy()
2360 iface.create()
2361 self.logger.debug("restore_iface '%s' %s", name, mac)
2362 except host_thread.lvirt_module.libvirtError as e:
2363 error_text = e.get_error_message()
2364 self.logger.error("restore_iface '%s' '%s' libvirt exception: %s", name, mac, error_text)
2365 ret=-1
2366 finally:
2367 if lib_conn is None and conn is not None:
2368 conn.close()
2369 return ret, error_text
2370
2371
2372 def create_image(self,dom, req):
2373 if self.test:
2374 if 'path' in req['action']['createImage']:
2375 file_dst = req['action']['createImage']['path']
2376 else:
2377 createImage=req['action']['createImage']
2378 img_name= createImage['source']['path']
2379 index=img_name.rfind('/')
2380 file_dst = self.get_notused_path(img_name[:index+1] + createImage['name'] + '.qcow2')
2381 image_status='ACTIVE'
2382 else:
2383 for retry in (0,1):
2384 try:
2385 server_id = req['uuid']
2386 createImage=req['action']['createImage']
2387 file_orig = self.localinfo['server_files'][server_id] [ createImage['source']['image_id'] ] ['source file']
2388 if 'path' in req['action']['createImage']:
2389 file_dst = req['action']['createImage']['path']
2390 else:
2391 img_name= createImage['source']['path']
2392 index=img_name.rfind('/')
2393 file_dst = self.get_notused_filename(img_name[:index+1] + createImage['name'] + '.qcow2')
2394
2395 self.copy_file(file_orig, file_dst)
2396 qemu_info = self.qemu_get_info(file_orig)
2397 if 'backing file' in qemu_info:
2398 for k,v in self.localinfo['files'].items():
2399 if v==qemu_info['backing file']:
2400 self.qemu_change_backing(file_dst, k)
2401 break
2402 image_status='ACTIVE'
2403 break
2404 except paramiko.ssh_exception.SSHException as e:
2405 image_status='ERROR'
2406 error_text = e.args[0]
2407 self.logger.error("create_image id='%s' ssh Exception: %s", server_id, error_text)
2408 if "SSH session not active" in error_text and retry==0:
2409 self.ssh_connect()
2410 except Exception as e:
2411 image_status='ERROR'
2412 error_text = str(e)
2413 self.logger.error("create_image id='%s' Exception: %s", server_id, error_text)
2414
2415 #TODO insert a last_error at database
2416 self.db_lock.acquire()
2417 self.db.update_rows('images', {'status':image_status, 'progress': 100, 'path':file_dst},
2418 {'uuid':req['new_image']['uuid']}, log=True)
2419 self.db_lock.release()
2420
2421 def edit_iface(self, port_id, old_net, new_net):
2422 #This action imply remove and insert interface to put proper parameters
2423 if self.test:
2424 time.sleep(1)
2425 else:
2426 #get iface details
2427 self.db_lock.acquire()
2428 r,c = self.db.get_table(FROM='ports as p join resources_port as rp on p.uuid=rp.port_id',
2429 WHERE={'port_id': port_id})
2430 self.db_lock.release()
2431 if r<0:
2432 self.logger.error("edit_iface %s DDBB error: %s", port_id, c)
2433 return
2434 elif r==0:
2435 self.logger.error("edit_iface %s port not found", port_id)
2436 return
2437 port=c[0]
2438 if port["model"]!="VF":
2439 self.logger.error("edit_iface %s ERROR model must be VF", port_id)
2440 return
2441 #create xml detach file
2442 xml=[]
2443 self.xml_level = 2
2444 xml.append("<interface type='hostdev' managed='yes'>")
2445 xml.append(" <mac address='" +port['mac']+ "'/>")
2446 xml.append(" <source>"+ self.pci2xml(port['pci'])+"\n </source>")
2447 xml.append('</interface>')
2448
2449
2450 try:
2451 conn=None
2452 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2453 dom = conn.lookupByUUIDString(port["instance_id"])
2454 if old_net:
2455 text="\n".join(xml)
2456 self.logger.debug("edit_iface detaching SRIOV interface " + text)
2457 dom.detachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
2458 if new_net:
2459 xml[-1] =" <vlan> <tag id='" + str(port['vlan']) + "'/> </vlan>"
2460 self.xml_level = 1
2461 xml.append(self.pci2xml(port.get('vpci',None)) )
2462 xml.append('</interface>')
2463 text="\n".join(xml)
2464 self.logger.debug("edit_iface attaching SRIOV interface " + text)
2465 dom.attachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
2466
2467 except host_thread.lvirt_module.libvirtError as e:
2468 text = e.get_error_message()
2469 self.logger.error("edit_iface %s libvirt exception: %s", port["instance_id"], text)
2470
2471 finally:
2472 if conn is not None: conn.close()
2473
2474
2475 def create_server(server, db, db_lock, only_of_ports):
2476 extended = server.get('extended', None)
2477 requirements={}
2478 requirements['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
2479 requirements['ram'] = server['flavor'].get('ram', 0)
2480 if requirements['ram']== None:
2481 requirements['ram'] = 0
2482 requirements['vcpus'] = server['flavor'].get('vcpus', 0)
2483 if requirements['vcpus']== None:
2484 requirements['vcpus'] = 0
2485 #If extended is not defined get requirements from flavor
2486 if extended is None:
2487 #If extended is defined in flavor convert to dictionary and use it
2488 if 'extended' in server['flavor'] and server['flavor']['extended'] != None:
2489 json_acceptable_string = server['flavor']['extended'].replace("'", "\"")
2490 extended = json.loads(json_acceptable_string)
2491 else:
2492 extended = None
2493 #print json.dumps(extended, indent=4)
2494
2495 #For simplicity only one numa VM are supported in the initial implementation
2496 if extended != None:
2497 numas = extended.get('numas', [])
2498 if len(numas)>1:
2499 return (-2, "Multi-NUMA VMs are not supported yet")
2500 #elif len(numas)<1:
2501 # return (-1, "At least one numa must be specified")
2502
2503 #a for loop is used in order to be ready to multi-NUMA VMs
2504 request = []
2505 for numa in numas:
2506 numa_req = {}
2507 numa_req['memory'] = numa.get('memory', 0)
2508 if 'cores' in numa:
2509 numa_req['proc_req_nb'] = numa['cores'] #number of cores or threads to be reserved
2510 numa_req['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
2511 numa_req['proc_req_list'] = numa.get('cores-id', None) #list of ids to be assigned to the cores or threads
2512 elif 'paired-threads' in numa:
2513 numa_req['proc_req_nb'] = numa['paired-threads']
2514 numa_req['proc_req_type'] = 'paired-threads'
2515 numa_req['proc_req_list'] = numa.get('paired-threads-id', None)
2516 elif 'threads' in numa:
2517 numa_req['proc_req_nb'] = numa['threads']
2518 numa_req['proc_req_type'] = 'threads'
2519 numa_req['proc_req_list'] = numa.get('threads-id', None)
2520 else:
2521 numa_req['proc_req_nb'] = 0 # by default
2522 numa_req['proc_req_type'] = 'threads'
2523
2524
2525
2526 #Generate a list of sriov and another for physical interfaces
2527 interfaces = numa.get('interfaces', [])
2528 sriov_list = []
2529 port_list = []
2530 for iface in interfaces:
2531 iface['bandwidth'] = int(iface['bandwidth'])
2532 if iface['dedicated'][:3]=='yes':
2533 port_list.append(iface)
2534 else:
2535 sriov_list.append(iface)
2536
2537 #Save lists ordered from more restrictive to less bw requirements
2538 numa_req['sriov_list'] = sorted(sriov_list, key=lambda k: k['bandwidth'], reverse=True)
2539 numa_req['port_list'] = sorted(port_list, key=lambda k: k['bandwidth'], reverse=True)
2540
2541
2542 request.append(numa_req)
2543
2544 # print "----------\n"+json.dumps(request[0], indent=4)
2545 # print '----------\n\n'
2546
2547 #Search in db for an appropriate numa for each requested numa
2548 #at the moment multi-NUMA VMs are not supported
2549 if len(request)>0:
2550 requirements['numa'].update(request[0])
2551 if requirements['numa']['memory']>0:
2552 requirements['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2553 elif requirements['ram']==0:
2554 return (-1, "Memory information not set neither at extended field not at ram")
2555 if requirements['numa']['proc_req_nb']>0:
2556 requirements['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2557 elif requirements['vcpus']==0:
2558 return (-1, "Processor information not set neither at extended field not at vcpus")
2559
2560
2561 db_lock.acquire()
2562 result, content = db.get_numas(requirements, server.get('host_id', None), only_of_ports)
2563 db_lock.release()
2564
2565 if result == -1:
2566 return (-1, content)
2567
2568 numa_id = content['numa_id']
2569 host_id = content['host_id']
2570
2571 #obtain threads_id and calculate pinning
2572 cpu_pinning = []
2573 reserved_threads=[]
2574 if requirements['numa']['proc_req_nb']>0:
2575 db_lock.acquire()
2576 result, content = db.get_table(FROM='resources_core',
2577 SELECT=('id','core_id','thread_id'),
2578 WHERE={'numa_id':numa_id,'instance_id': None, 'status':'ok'} )
2579 db_lock.release()
2580 if result <= 0:
2581 #print content
2582 return -1, content
2583
2584 #convert rows to a dictionary indexed by core_id
2585 cores_dict = {}
2586 for row in content:
2587 if not row['core_id'] in cores_dict:
2588 cores_dict[row['core_id']] = []
2589 cores_dict[row['core_id']].append([row['thread_id'],row['id']])
2590
2591 #In case full cores are requested
2592 paired = 'N'
2593 if requirements['numa']['proc_req_type'] == 'cores':
2594 #Get/create the list of the vcpu_ids
2595 vcpu_id_list = requirements['numa']['proc_req_list']
2596 if vcpu_id_list == None:
2597 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2598
2599 for threads in cores_dict.itervalues():
2600 #we need full cores
2601 if len(threads) != 2:
2602 continue
2603
2604 #set pinning for the first thread
2605 cpu_pinning.append( [ vcpu_id_list.pop(0), threads[0][0], threads[0][1] ] )
2606
2607 #reserve so it is not used the second thread
2608 reserved_threads.append(threads[1][1])
2609
2610 if len(vcpu_id_list) == 0:
2611 break
2612
2613 #In case paired threads are requested
2614 elif requirements['numa']['proc_req_type'] == 'paired-threads':
2615 paired = 'Y'
2616 #Get/create the list of the vcpu_ids
2617 if requirements['numa']['proc_req_list'] != None:
2618 vcpu_id_list = []
2619 for pair in requirements['numa']['proc_req_list']:
2620 if len(pair)!=2:
2621 return -1, "Field paired-threads-id not properly specified"
2622 return
2623 vcpu_id_list.append(pair[0])
2624 vcpu_id_list.append(pair[1])
2625 else:
2626 vcpu_id_list = range(0,2*int(requirements['numa']['proc_req_nb']))
2627
2628 for threads in cores_dict.itervalues():
2629 #we need full cores
2630 if len(threads) != 2:
2631 continue
2632 #set pinning for the first thread
2633 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2634
2635 #set pinning for the second thread
2636 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2637
2638 if len(vcpu_id_list) == 0:
2639 break
2640
2641 #In case normal threads are requested
2642 elif requirements['numa']['proc_req_type'] == 'threads':
2643 #Get/create the list of the vcpu_ids
2644 vcpu_id_list = requirements['numa']['proc_req_list']
2645 if vcpu_id_list == None:
2646 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2647
2648 for threads_index in sorted(cores_dict, key=lambda k: len(cores_dict[k])):
2649 threads = cores_dict[threads_index]
2650 #set pinning for the first thread
2651 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2652
2653 #if exists, set pinning for the second thread
2654 if len(threads) == 2 and len(vcpu_id_list) != 0:
2655 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2656
2657 if len(vcpu_id_list) == 0:
2658 break
2659
2660 #Get the source pci addresses for the selected numa
2661 used_sriov_ports = []
2662 for port in requirements['numa']['sriov_list']:
2663 db_lock.acquire()
2664 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2665 db_lock.release()
2666 if result <= 0:
2667 #print content
2668 return -1, content
2669 for row in content:
2670 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2671 continue
2672 port['pci'] = row['pci']
2673 if 'mac_address' not in port:
2674 port['mac_address'] = row['mac']
2675 del port['mac']
2676 port['port_id']=row['id']
2677 port['Mbps_used'] = port['bandwidth']
2678 used_sriov_ports.append(row['id'])
2679 break
2680
2681 for port in requirements['numa']['port_list']:
2682 port['Mbps_used'] = None
2683 if port['dedicated'] != "yes:sriov":
2684 port['mac_address'] = port['mac']
2685 del port['mac']
2686 continue
2687 db_lock.acquire()
2688 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac', 'Mbps'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2689 db_lock.release()
2690 if result <= 0:
2691 #print content
2692 return -1, content
2693 port['Mbps_used'] = content[0]['Mbps']
2694 for row in content:
2695 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2696 continue
2697 port['pci'] = row['pci']
2698 if 'mac_address' not in port:
2699 port['mac_address'] = row['mac'] # mac cannot be set to passthrough ports
2700 del port['mac']
2701 port['port_id']=row['id']
2702 used_sriov_ports.append(row['id'])
2703 break
2704
2705 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2706 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2707
2708 server['host_id'] = host_id
2709
2710 #Generate dictionary for saving in db the instance resources
2711 resources = {}
2712 resources['bridged-ifaces'] = []
2713
2714 numa_dict = {}
2715 numa_dict['interfaces'] = []
2716
2717 numa_dict['interfaces'] += requirements['numa']['port_list']
2718 numa_dict['interfaces'] += requirements['numa']['sriov_list']
2719
2720 #Check bridge information
2721 unified_dataplane_iface=[]
2722 unified_dataplane_iface += requirements['numa']['port_list']
2723 unified_dataplane_iface += requirements['numa']['sriov_list']
2724
2725 for control_iface in server.get('networks', []):
2726 control_iface['net_id']=control_iface.pop('uuid')
2727 #Get the brifge name
2728 db_lock.acquire()
2729 result, content = db.get_table(FROM='nets',
2730 SELECT=('name', 'type', 'vlan', 'provider', 'enable_dhcp','dhcp_first_ip',
2731 'dhcp_last_ip', 'cidr', 'gateway_ip', 'dns', 'links', 'routes'),
2732 WHERE={'uuid': control_iface['net_id']})
2733 db_lock.release()
2734 if result < 0:
2735 pass
2736 elif result==0:
2737 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface['net_id']
2738 else:
2739 network=content[0]
2740 if control_iface.get("type", 'virtual') == 'virtual':
2741 if network['type']!='bridge_data' and network['type']!='bridge_man':
2742 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface['net_id']
2743 resources['bridged-ifaces'].append(control_iface)
2744 if network.get("provider") and network["provider"][0:3] == "OVS":
2745 control_iface["type"] = "instance:ovs"
2746 else:
2747 control_iface["type"] = "instance:bridge"
2748 if network.get("vlan"):
2749 control_iface["vlan"] = network["vlan"]
2750
2751 if network.get("enable_dhcp") == 'true':
2752 control_iface["enable_dhcp"] = network.get("enable_dhcp")
2753 control_iface["dhcp_first_ip"] = network["dhcp_first_ip"]
2754 control_iface["dhcp_last_ip"] = network["dhcp_last_ip"]
2755 control_iface["cidr"] = network["cidr"]
2756
2757 if network.get("dns"):
2758 control_iface["dns"] = yaml.safe_load(network.get("dns"))
2759 if network.get("links"):
2760 control_iface["links"] = yaml.safe_load(network.get("links"))
2761 if network.get("routes"):
2762 control_iface["routes"] = yaml.safe_load(network.get("routes"))
2763 else:
2764 if network['type']!='data' and network['type']!='ptp':
2765 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface['net_id']
2766 #dataplane interface, look for it in the numa tree and asign this network
2767 iface_found=False
2768 for dataplane_iface in numa_dict['interfaces']:
2769 if dataplane_iface['name'] == control_iface.get("name"):
2770 if (dataplane_iface['dedicated'] == "yes" and control_iface["type"] != "PF") or \
2771 (dataplane_iface['dedicated'] == "no" and control_iface["type"] != "VF") or \
2772 (dataplane_iface['dedicated'] == "yes:sriov" and control_iface["type"] != "VFnotShared") :
2773 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2774 (control_iface.get("name"), dataplane_iface['dedicated'], control_iface["type"])
2775 dataplane_iface['uuid'] = control_iface['net_id']
2776 if dataplane_iface['dedicated'] == "no":
2777 dataplane_iface['vlan'] = network['vlan']
2778 if dataplane_iface['dedicated'] != "yes" and control_iface.get("mac_address"):
2779 dataplane_iface['mac_address'] = control_iface.get("mac_address")
2780 if control_iface.get("vpci"):
2781 dataplane_iface['vpci'] = control_iface.get("vpci")
2782 iface_found=True
2783 break
2784 if not iface_found:
2785 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface.get("name")
2786
2787 resources['host_id'] = host_id
2788 resources['image_id'] = server['image_id']
2789 resources['flavor_id'] = server['flavor_id']
2790 resources['tenant_id'] = server['tenant_id']
2791 resources['ram'] = requirements['ram']
2792 resources['vcpus'] = requirements['vcpus']
2793 resources['status'] = 'CREATING'
2794
2795 if 'description' in server: resources['description'] = server['description']
2796 if 'name' in server: resources['name'] = server['name']
2797
2798 resources['extended'] = {} #optional
2799 resources['extended']['numas'] = []
2800 numa_dict['numa_id'] = numa_id
2801 numa_dict['memory'] = requirements['numa']['memory']
2802 numa_dict['cores'] = []
2803
2804 for core in cpu_pinning:
2805 numa_dict['cores'].append({'id': core[2], 'vthread': core[0], 'paired': paired})
2806 for core in reserved_threads:
2807 numa_dict['cores'].append({'id': core})
2808 resources['extended']['numas'].append(numa_dict)
2809 if extended!=None and 'devices' in extended: #TODO allow extra devices without numa
2810 resources['extended']['devices'] = extended['devices']
2811
2812
2813 # '===================================={'
2814 #print json.dumps(resources, indent=4)
2815 #print '====================================}'
2816
2817 return 0, resources
2818