78be1c93965d3d4de502dfbd8ecece530d1abe3b
[osm/openvim.git] / osm_openvim / host_thread.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openvim
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
22 ##
23
24 '''
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
27 '''
28 __author__ = "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__ = "$10-jul-2014 12:07:15$"
30
31 import json
32 import yaml
33 import threading
34 import time
35 import Queue
36 import paramiko
37 import subprocess
38 # import libvirt
39 import imp
40 import random
41 import os
42 import logging
43 from jsonschema import validate as js_v, exceptions as js_e
44 from vim_schema import localinfo_schema, hostinfo_schema
45
46 class RunCommandException(Exception):
47 pass
48
49 class host_thread(threading.Thread):
50 lvirt_module = None
51
52 def __init__(self, name, host, user, db, db_lock, test, image_path, host_id, version, develop_mode,
53 develop_bridge_iface, password=None, keyfile = None, logger_name=None, debug=None):
54 """Init a thread to communicate with compute node or ovs_controller.
55 :param host_id: host identity
56 :param name: name of the thread
57 :param host: host ip or name to manage and user
58 :param user, password, keyfile: user and credentials to connect to host
59 :param db, db_lock': database class and lock to use it in exclusion
60 """
61 threading.Thread.__init__(self)
62 self.name = name
63 self.host = host
64 self.user = user
65 self.db = db
66 self.db_lock = db_lock
67 self.test = test
68 self.password = password
69 self.keyfile = keyfile
70 self.localinfo_dirty = False
71 self.connectivity = True
72
73 if not test and not host_thread.lvirt_module:
74 try:
75 module_info = imp.find_module("libvirt")
76 host_thread.lvirt_module = imp.load_module("libvirt", *module_info)
77 except (IOError, ImportError) as e:
78 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e))
79 if logger_name:
80 self.logger_name = logger_name
81 else:
82 self.logger_name = "openvim.host."+name
83 self.logger = logging.getLogger(self.logger_name)
84 if debug:
85 self.logger.setLevel(getattr(logging, debug))
86
87
88 self.develop_mode = develop_mode
89 self.develop_bridge_iface = develop_bridge_iface
90 self.image_path = image_path
91 self.empty_image_path = image_path
92 self.host_id = host_id
93 self.version = version
94
95 self.xml_level = 0
96 # self.pending ={}
97
98 self.server_status = {} #dictionary with pairs server_uuid:server_status
99 self.pending_terminate_server =[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
100 self.next_update_server_status = 0 #time when must be check servers status
101
102 self.hostinfo = None
103
104 self.queueLock = threading.Lock()
105 self.taskQueue = Queue.Queue(2000)
106 self.ssh_conn = None
107 self.run_command_session = None
108 self.error = None
109 self.localhost = True if host == 'localhost' else False
110 self.lvirt_conn_uri = "qemu+ssh://{user}@{host}/system?no_tty=1&no_verify=1".format(
111 user=self.user, host=self.host)
112 if keyfile:
113 self.lvirt_conn_uri += "&keyfile=" + keyfile
114 self.remote_ip = None
115 self.local_ip = None
116
117 def run_command(self, command, keep_session=False, ignore_exit_status=False):
118 """Run a command passed as a str on a localhost or at remote machine.
119 :param command: text with the command to execute.
120 :param keep_session: if True it returns a <stdin> for sending input with '<stdin>.write("text\n")'.
121 A command with keep_session=True MUST be followed by a command with keep_session=False in order to
122 close the session and get the output
123 :param ignore_exit_status: Return stdout and not raise an exepction in case of error.
124 :return: the output of the command if 'keep_session=False' or the <stdin> object if 'keep_session=True'
125 :raises: RunCommandException if command fails
126 """
127 if self.run_command_session and keep_session:
128 raise RunCommandException("Internal error. A command with keep_session=True must be followed by another "
129 "command with keep_session=False to close session")
130 try:
131 if self.localhost:
132 if self.run_command_session:
133 p = self.run_command_session
134 self.run_command_session = None
135 (output, outerror) = p.communicate()
136 returncode = p.returncode
137 p.stdin.close()
138 elif keep_session:
139 p = subprocess.Popen(('bash', "-c", command), stdin=subprocess.PIPE, stdout=subprocess.PIPE,
140 stderr=subprocess.PIPE)
141 self.run_command_session = p
142 return p.stdin
143 else:
144 if not ignore_exit_status:
145 output = subprocess.check_output(('bash', "-c", command))
146 returncode = 0
147 else:
148 out = None
149 p = subprocess.Popen(('bash', "-c", command), stdout=subprocess.PIPE)
150 out, err = p.communicate()
151 return out
152 else:
153 if self.run_command_session:
154 (i, o, e) = self.run_command_session
155 self.run_command_session = None
156 i.channel.shutdown_write()
157 else:
158 if not self.ssh_conn:
159 self.ssh_connect()
160 (i, o, e) = self.ssh_conn.exec_command(command, timeout=10)
161 if keep_session:
162 self.run_command_session = (i, o, e)
163 return i
164 returncode = o.channel.recv_exit_status()
165 output = o.read()
166 outerror = e.read()
167 if returncode != 0 and not ignore_exit_status:
168 text = "run_command='{}' Error='{}'".format(command, outerror)
169 self.logger.error(text)
170 raise RunCommandException(text)
171
172 self.logger.debug("run_command='{}' result='{}'".format(command, output))
173 return output
174
175 except RunCommandException:
176 raise
177 except subprocess.CalledProcessError as e:
178 text = "run_command Exception '{}' '{}'".format(str(e), e.output)
179 except (paramiko.ssh_exception.SSHException, Exception) as e:
180 text = "run_command='{}' Exception='{}'".format(command, str(e))
181 self.ssh_conn = None
182 self.run_command_session = None
183 raise RunCommandException(text)
184
185 def ssh_connect(self):
186 try:
187 # Connect SSH
188 self.ssh_conn = paramiko.SSHClient()
189 self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
190 self.ssh_conn.load_system_host_keys()
191 self.ssh_conn.connect(self.host, username=self.user, password=self.password, key_filename=self.keyfile,
192 timeout=10) # auth_timeout=10)
193 self.remote_ip = self.ssh_conn.get_transport().sock.getpeername()[0]
194 self.local_ip = self.ssh_conn.get_transport().sock.getsockname()[0]
195 except (paramiko.ssh_exception.SSHException, Exception) as e:
196 text = 'ssh connect Exception: {}'.format(e)
197 self.ssh_conn = None
198 self.error = text
199 raise
200
201 def check_connectivity(self):
202 if not self.test:
203 try:
204 command = 'sudo brctl show'
205 self.run_command(command)
206 except RunCommandException as e:
207 self.connectivity = False
208 self.logger.error("check_connectivity Exception: " + str(e))
209
210 def load_localinfo(self):
211 if not self.test:
212 try:
213 self.run_command('sudo mkdir -p ' + self.image_path)
214 result = self.run_command('cat {}/.openvim.yaml'.format(self.image_path))
215 self.localinfo = yaml.load(result)
216 js_v(self.localinfo, localinfo_schema)
217 self.localinfo_dirty = False
218 if 'server_files' not in self.localinfo:
219 self.localinfo['server_files'] = {}
220 self.logger.debug("localinfo loaded from host")
221 return
222 except RunCommandException as e:
223 self.logger.error("load_localinfo Exception: " + str(e))
224 except host_thread.lvirt_module.libvirtError as e:
225 text = e.get_error_message()
226 self.logger.error("load_localinfo libvirt Exception: " + text)
227 except yaml.YAMLError as exc:
228 text = ""
229 if hasattr(exc, 'problem_mark'):
230 mark = exc.problem_mark
231 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
232 self.logger.error("load_localinfo yaml format Exception " + text)
233 except js_e.ValidationError as e:
234 text = ""
235 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
236 self.logger.error("load_localinfo format Exception: %s %s", text, str(e))
237 except Exception as e:
238 text = str(e)
239 self.logger.error("load_localinfo Exception: " + text)
240
241 # not loaded, insert a default data and force saving by activating dirty flag
242 self.localinfo = {'files':{}, 'server_files':{} }
243 # self.localinfo_dirty=True
244 self.localinfo_dirty=False
245
246 def load_hostinfo(self):
247 if self.test:
248 return
249 try:
250 result = self.run_command('cat {}/hostinfo.yaml'.format(self.image_path))
251 self.hostinfo = yaml.load(result)
252 js_v(self.hostinfo, hostinfo_schema)
253 self.logger.debug("hostinfo load from host " + str(self.hostinfo))
254 return
255 except RunCommandException as e:
256 self.logger.error("load_hostinfo ssh Exception: " + str(e))
257 except host_thread.lvirt_module.libvirtError as e:
258 text = e.get_error_message()
259 self.logger.error("load_hostinfo libvirt Exception: " + text)
260 except yaml.YAMLError as exc:
261 text = ""
262 if hasattr(exc, 'problem_mark'):
263 mark = exc.problem_mark
264 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
265 self.logger.error("load_hostinfo yaml format Exception " + text)
266 except js_e.ValidationError as e:
267 text = ""
268 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
269 self.logger.error("load_hostinfo format Exception: %s %s", text, str(e))
270 except Exception as e:
271 text = str(e)
272 self.logger.error("load_hostinfo Exception: " + text)
273
274 #not loaded, insert a default data
275 self.hostinfo = None
276
277 def save_localinfo(self, tries=3):
278 if self.test:
279 self.localinfo_dirty = False
280 return
281
282 while tries>=0:
283 tries-=1
284
285 try:
286 command = 'cat > {}/.openvim.yaml'.format(self.image_path)
287 in_stream = self.run_command(command, keep_session=True)
288 yaml.safe_dump(self.localinfo, in_stream, explicit_start=True, indent=4, default_flow_style=False,
289 tags=False, encoding='utf-8', allow_unicode=True)
290 result = self.run_command(command, keep_session=False) # to end session
291
292 self.localinfo_dirty = False
293 break #while tries
294
295 except RunCommandException as e:
296 self.logger.error("save_localinfo ssh Exception: " + str(e))
297 except host_thread.lvirt_module.libvirtError as e:
298 text = e.get_error_message()
299 self.logger.error("save_localinfo libvirt Exception: " + text)
300 except yaml.YAMLError as exc:
301 text = ""
302 if hasattr(exc, 'problem_mark'):
303 mark = exc.problem_mark
304 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
305 self.logger.error("save_localinfo yaml format Exception " + text)
306 except Exception as e:
307 text = str(e)
308 self.logger.error("save_localinfo Exception: " + text)
309
310 def load_servers_from_db(self):
311 self.db_lock.acquire()
312 r,c = self.db.get_table(SELECT=('uuid','status', 'image_id'), FROM='instances', WHERE={'host_id': self.host_id})
313 self.db_lock.release()
314
315 self.server_status = {}
316 if r<0:
317 self.logger.error("Error getting data from database: " + c)
318 return
319 for server in c:
320 self.server_status[ server['uuid'] ] = server['status']
321
322 #convert from old version to new one
323 if 'inc_files' in self.localinfo and server['uuid'] in self.localinfo['inc_files']:
324 server_files_dict = {'source file': self.localinfo['inc_files'][ server['uuid'] ] [0], 'file format':'raw' }
325 if server_files_dict['source file'][-5:] == 'qcow2':
326 server_files_dict['file format'] = 'qcow2'
327
328 self.localinfo['server_files'][ server['uuid'] ] = { server['image_id'] : server_files_dict }
329 if 'inc_files' in self.localinfo:
330 del self.localinfo['inc_files']
331 self.localinfo_dirty = True
332
333 def delete_unused_files(self):
334 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
335 Deletes unused entries at self.loacalinfo and the corresponding local files.
336 The only reason for this mismatch is the manual deletion of instances (VM) at database
337 '''
338 if self.test:
339 return
340 for uuid,images in self.localinfo['server_files'].items():
341 if uuid not in self.server_status:
342 for localfile in images.values():
343 try:
344 self.logger.debug("deleting file '%s' of unused server '%s'", localfile['source file'], uuid)
345 self.delete_file(localfile['source file'])
346 except RunCommandException as e:
347 self.logger.error("Exception deleting file '%s': %s", localfile['source file'], str(e))
348 del self.localinfo['server_files'][uuid]
349 self.localinfo_dirty = True
350
351 def insert_task(self, task, *aditional):
352 try:
353 self.queueLock.acquire()
354 task = self.taskQueue.put( (task,) + aditional, timeout=5)
355 self.queueLock.release()
356 return 1, None
357 except Queue.Full:
358 return -1, "timeout inserting a task over host " + self.name
359
360 def run(self):
361 while True:
362 self.load_localinfo()
363 self.load_hostinfo()
364 self.load_servers_from_db()
365 self.delete_unused_files()
366 while True:
367 try:
368 self.queueLock.acquire()
369 if not self.taskQueue.empty():
370 task = self.taskQueue.get()
371 else:
372 task = None
373 self.queueLock.release()
374
375 if task is None:
376 now=time.time()
377 if self.localinfo_dirty:
378 self.save_localinfo()
379 elif self.next_update_server_status < now:
380 self.update_servers_status()
381 self.next_update_server_status = now + 5
382 elif len(self.pending_terminate_server)>0 and self.pending_terminate_server[0][0]<now:
383 self.server_forceoff()
384 else:
385 time.sleep(1)
386 continue
387
388 if task[0] == 'instance':
389 self.logger.debug("processing task instance " + str(task[1]['action']))
390 retry = 0
391 while retry < 2:
392 retry += 1
393 r = self.action_on_server(task[1], retry==2)
394 if r >= 0:
395 break
396 elif task[0] == 'image':
397 pass
398 elif task[0] == 'exit':
399 self.logger.debug("processing task exit")
400 self.terminate()
401 return 0
402 elif task[0] == 'reload':
403 self.logger.debug("processing task reload terminating and relaunching")
404 self.terminate()
405 break
406 elif task[0] == 'edit-iface':
407 self.logger.debug("processing task edit-iface port={}, old_net={}, new_net={}".format(
408 task[1], task[2], task[3]))
409 self.edit_iface(task[1], task[2], task[3])
410 elif task[0] == 'restore-iface':
411 self.logger.debug("processing task restore-iface={} mac={}".format(task[1], task[2]))
412 self.restore_iface(task[1], task[2])
413 elif task[0] == 'new-ovsbridge':
414 self.logger.debug("Creating compute OVS bridge")
415 self.create_ovs_bridge()
416 elif task[0] == 'new-vxlan':
417 self.logger.debug("Creating vxlan tunnel='{}', remote ip='{}'".format(task[1], task[2]))
418 self.create_ovs_vxlan_tunnel(task[1], task[2])
419 elif task[0] == 'del-ovsbridge':
420 self.logger.debug("Deleting OVS bridge")
421 self.delete_ovs_bridge()
422 elif task[0] == 'del-vxlan':
423 self.logger.debug("Deleting vxlan {} tunnel".format(task[1]))
424 self.delete_ovs_vxlan_tunnel(task[1])
425 elif task[0] == 'create-ovs-bridge-port':
426 self.logger.debug("Adding port ovim-{} to OVS bridge".format(task[1]))
427 self.create_ovs_bridge_port(task[1])
428 elif task[0] == 'del-ovs-port':
429 self.logger.debug("Delete bridge attached to ovs port vlan {} net {}".format(task[1], task[2]))
430 self.delete_bridge_port_attached_to_ovs(task[1], task[2])
431 else:
432 self.logger.debug("unknown task " + str(task))
433
434 except Exception as e:
435 self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
436
437 def server_forceoff(self, wait_until_finished=False):
438 while len(self.pending_terminate_server)>0:
439 now = time.time()
440 if self.pending_terminate_server[0][0]>now:
441 if wait_until_finished:
442 time.sleep(1)
443 continue
444 else:
445 return
446 req={'uuid':self.pending_terminate_server[0][1],
447 'action':{'terminate':'force'},
448 'status': None
449 }
450 self.action_on_server(req)
451 self.pending_terminate_server.pop(0)
452
453 def terminate(self):
454 try:
455 self.server_forceoff(True)
456 if self.localinfo_dirty:
457 self.save_localinfo()
458 if not self.test:
459 self.ssh_conn.close()
460 except Exception as e:
461 text = str(e)
462 self.logger.error("terminate Exception: " + text)
463 self.logger.debug("exit from host_thread")
464
465 def get_local_iface_name(self, generic_name):
466 if self.hostinfo != None and "iface_names" in self.hostinfo and generic_name in self.hostinfo["iface_names"]:
467 return self.hostinfo["iface_names"][generic_name]
468 return generic_name
469
470 def create_xml_server(self, server, dev_list, server_metadata={}):
471 """Function that implements the generation of the VM XML definition.
472 Additional devices are in dev_list list
473 The main disk is upon dev_list[0]"""
474
475 #get if operating system is Windows
476 windows_os = False
477 os_type = server_metadata.get('os_type', None)
478 if os_type == None and 'metadata' in dev_list[0]:
479 os_type = dev_list[0]['metadata'].get('os_type', None)
480 if os_type != None and os_type.lower() == "windows":
481 windows_os = True
482 #get type of hard disk bus
483 bus_ide = True if windows_os else False
484 bus = server_metadata.get('bus', None)
485 if bus == None and 'metadata' in dev_list[0]:
486 bus = dev_list[0]['metadata'].get('bus', None)
487 if bus != None:
488 bus_ide = True if bus=='ide' else False
489
490 self.xml_level = 0
491
492 text = "<domain type='kvm'>"
493 #get topology
494 topo = server_metadata.get('topology', None)
495 if topo == None and 'metadata' in dev_list[0]:
496 topo = dev_list[0]['metadata'].get('topology', None)
497 #name
498 name = server.get('name', '')[:28] + "_" + server['uuid'][:28] #qemu impose a length limit of 59 chars or not start. Using 58
499 text += self.inc_tab() + "<name>" + name+ "</name>"
500 #uuid
501 text += self.tab() + "<uuid>" + server['uuid'] + "</uuid>"
502
503 numa={}
504 if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:
505 numa = server['extended']['numas'][0]
506 #memory
507 use_huge = False
508 memory = int(numa.get('memory',0))*1024*1024 #in KiB
509 if memory==0:
510 memory = int(server['ram'])*1024;
511 else:
512 if not self.develop_mode:
513 use_huge = True
514 if memory==0:
515 return -1, 'No memory assigned to instance'
516 memory = str(memory)
517 text += self.tab() + "<memory unit='KiB'>" +memory+"</memory>"
518 text += self.tab() + "<currentMemory unit='KiB'>" +memory+ "</currentMemory>"
519 if use_huge:
520 text += self.tab()+'<memoryBacking>'+ \
521 self.inc_tab() + '<hugepages/>'+ \
522 self.dec_tab()+ '</memoryBacking>'
523
524 #cpu
525 use_cpu_pinning=False
526 vcpus = int(server.get("vcpus",0))
527 cpu_pinning = []
528 if 'cores-source' in numa:
529 use_cpu_pinning=True
530 for index in range(0, len(numa['cores-source'])):
531 cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )
532 vcpus += 1
533 if 'threads-source' in numa:
534 use_cpu_pinning=True
535 for index in range(0, len(numa['threads-source'])):
536 cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )
537 vcpus += 1
538 if 'paired-threads-source' in numa:
539 use_cpu_pinning=True
540 for index in range(0, len(numa['paired-threads-source'])):
541 cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )
542 cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )
543 vcpus += 2
544
545 if use_cpu_pinning and not self.develop_mode:
546 text += self.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning)) +"</vcpu>" + \
547 self.tab()+'<cputune>'
548 self.xml_level += 1
549 for i in range(0, len(cpu_pinning)):
550 text += self.tab() + "<vcpupin vcpu='" +str(cpu_pinning[i][0])+ "' cpuset='" +str(cpu_pinning[i][1]) +"'/>"
551 text += self.dec_tab()+'</cputune>'+ \
552 self.tab() + '<numatune>' +\
553 self.inc_tab() + "<memory mode='strict' nodeset='" +str(numa['source'])+ "'/>" +\
554 self.dec_tab() + '</numatune>'
555 else:
556 if vcpus==0:
557 return -1, "Instance without number of cpus"
558 text += self.tab()+"<vcpu>" + str(vcpus) + "</vcpu>"
559
560 #boot
561 boot_cdrom = False
562 for dev in dev_list:
563 if dev['type']=='cdrom' :
564 boot_cdrom = True
565 break
566 text += self.tab()+ '<os>' + \
567 self.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
568 if boot_cdrom:
569 text += self.tab() + "<boot dev='cdrom'/>"
570 text += self.tab() + "<boot dev='hd'/>" + \
571 self.dec_tab()+'</os>'
572 #features
573 text += self.tab()+'<features>'+\
574 self.inc_tab()+'<acpi/>' +\
575 self.tab()+'<apic/>' +\
576 self.tab()+'<pae/>'+ \
577 self.dec_tab() +'</features>'
578 if topo == "oneSocket:hyperthreading":
579 if vcpus % 2 != 0:
580 return -1, 'Cannot expose hyperthreading with an odd number of vcpus'
581 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>" % (vcpus/2)
582 elif windows_os or topo == "oneSocket":
583 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>" % vcpus
584 else:
585 text += self.tab() + "<cpu mode='host-model'></cpu>"
586 text += self.tab() + "<clock offset='utc'/>" +\
587 self.tab() + "<on_poweroff>preserve</on_poweroff>" + \
588 self.tab() + "<on_reboot>restart</on_reboot>" + \
589 self.tab() + "<on_crash>restart</on_crash>"
590 text += self.tab() + "<devices>" + \
591 self.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
592 self.tab() + "<serial type='pty'>" +\
593 self.inc_tab() + "<target port='0'/>" + \
594 self.dec_tab() + "</serial>" +\
595 self.tab() + "<console type='pty'>" + \
596 self.inc_tab()+ "<target type='serial' port='0'/>" + \
597 self.dec_tab()+'</console>'
598 if windows_os:
599 text += self.tab() + "<controller type='usb' index='0'/>" + \
600 self.tab() + "<controller type='ide' index='0'/>" + \
601 self.tab() + "<input type='mouse' bus='ps2'/>" + \
602 self.tab() + "<sound model='ich6'/>" + \
603 self.tab() + "<video>" + \
604 self.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
605 self.dec_tab() + "</video>" + \
606 self.tab() + "<memballoon model='virtio'/>" + \
607 self.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
608
609 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
610 #> self.dec_tab()+'</hostdev>\n' +\
611 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
612 if windows_os:
613 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
614 else:
615 #If image contains 'GRAPH' include graphics
616 #if 'GRAPH' in image:
617 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
618 self.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
619 self.dec_tab() + "</graphics>"
620
621 vd_index = 'a'
622 for dev in dev_list:
623 bus_ide_dev = bus_ide
624 if dev['type']=='cdrom' or dev['type']=='disk':
625 if dev['type']=='cdrom':
626 bus_ide_dev = True
627 text += self.tab() + "<disk type='file' device='"+dev['type']+"'>"
628 if 'file format' in dev:
629 text += self.inc_tab() + "<driver name='qemu' type='" +dev['file format']+ "' cache='writethrough'/>"
630 if 'source file' in dev:
631 text += self.tab() + "<source file='" +dev['source file']+ "'/>"
632 #elif v['type'] == 'block':
633 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
634 #else:
635 # return -1, 'Unknown disk type ' + v['type']
636 vpci = dev.get('vpci',None)
637 if vpci == None and 'metadata' in dev:
638 vpci = dev['metadata'].get('vpci',None)
639 text += self.pci2xml(vpci)
640
641 if bus_ide_dev:
642 text += self.tab() + "<target dev='hd" +vd_index+ "' bus='ide'/>" #TODO allows several type of disks
643 else:
644 text += self.tab() + "<target dev='vd" +vd_index+ "' bus='virtio'/>"
645 text += self.dec_tab() + '</disk>'
646 vd_index = chr(ord(vd_index)+1)
647 elif dev['type']=='xml':
648 dev_text = dev['xml']
649 if 'vpci' in dev:
650 dev_text = dev_text.replace('__vpci__', dev['vpci'])
651 if 'source file' in dev:
652 dev_text = dev_text.replace('__file__', dev['source file'])
653 if 'file format' in dev:
654 dev_text = dev_text.replace('__format__', dev['source file'])
655 if '__dev__' in dev_text:
656 dev_text = dev_text.replace('__dev__', vd_index)
657 vd_index = chr(ord(vd_index)+1)
658 text += dev_text
659 else:
660 return -1, 'Unknown device type ' + dev['type']
661
662 net_nb=0
663 bridge_interfaces = server.get('networks', [])
664 for v in bridge_interfaces:
665 #Get the brifge name
666 self.db_lock.acquire()
667 result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )
668 self.db_lock.release()
669 if result <= 0:
670 self.logger.error("create_xml_server ERROR %d getting nets %s", result, content)
671 return -1, content
672 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
673 #I know it is not secure
674 #for v in sorted(desc['network interfaces'].itervalues()):
675 model = v.get("model", None)
676 if content[0]['provider']=='default':
677 text += self.tab() + "<interface type='network'>" + \
678 self.inc_tab() + "<source network='" +content[0]['provider']+ "'/>"
679 elif content[0]['provider'][0:7]=='macvtap':
680 text += self.tab()+"<interface type='direct'>" + \
681 self.inc_tab() + "<source dev='" + self.get_local_iface_name(content[0]['provider'][8:]) + "' mode='bridge'/>" + \
682 self.tab() + "<target dev='macvtap0'/>"
683 if windows_os:
684 text += self.tab() + "<alias name='net" + str(net_nb) + "'/>"
685 elif model==None:
686 model = "virtio"
687 elif content[0]['provider'][0:6]=='bridge':
688 text += self.tab() + "<interface type='bridge'>" + \
689 self.inc_tab()+"<source bridge='" +self.get_local_iface_name(content[0]['provider'][7:])+ "'/>"
690 if windows_os:
691 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
692 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
693 elif model==None:
694 model = "virtio"
695 elif content[0]['provider'][0:3] == "OVS":
696 vlan = content[0]['provider'].replace('OVS:', '')
697 text += self.tab() + "<interface type='bridge'>" + \
698 self.inc_tab() + "<source bridge='ovim-" + str(vlan) + "'/>"
699 else:
700 return -1, 'Unknown Bridge net provider ' + content[0]['provider']
701 if model!=None:
702 text += self.tab() + "<model type='" +model+ "'/>"
703 if v.get('mac_address', None) != None:
704 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
705 text += self.pci2xml(v.get('vpci',None))
706 text += self.dec_tab()+'</interface>'
707
708 net_nb += 1
709
710 interfaces = numa.get('interfaces', [])
711
712 net_nb=0
713 for v in interfaces:
714 if self.develop_mode: #map these interfaces to bridges
715 text += self.tab() + "<interface type='bridge'>" + \
716 self.inc_tab()+"<source bridge='" +self.develop_bridge_iface+ "'/>"
717 if windows_os:
718 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
719 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
720 else:
721 text += self.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
722 if v.get('mac_address', None) != None:
723 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
724 text += self.pci2xml(v.get('vpci',None))
725 text += self.dec_tab()+'</interface>'
726 continue
727
728 if v['dedicated'] == 'yes': #passthrought
729 text += self.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
730 self.inc_tab() + "<source>"
731 self.inc_tab()
732 text += self.pci2xml(v['source'])
733 text += self.dec_tab()+'</source>'
734 text += self.pci2xml(v.get('vpci',None))
735 if windows_os:
736 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
737 text += self.dec_tab()+'</hostdev>'
738 net_nb += 1
739 else: #sriov_interfaces
740 #skip not connected interfaces
741 if v.get("net_id") == None:
742 continue
743 text += self.tab() + "<interface type='hostdev' managed='yes'>"
744 self.inc_tab()
745 if v.get('mac_address', None) != None:
746 text+= self.tab() + "<mac address='" +v['mac_address']+ "'/>"
747 text+= self.tab()+'<source>'
748 self.inc_tab()
749 text += self.pci2xml(v['source'])
750 text += self.dec_tab()+'</source>'
751 if v.get('vlan',None) != None:
752 text += self.tab() + "<vlan> <tag id='" + str(v['vlan']) + "'/> </vlan>"
753 text += self.pci2xml(v.get('vpci',None))
754 if windows_os:
755 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
756 text += self.dec_tab()+'</interface>'
757
758
759 text += self.dec_tab()+'</devices>'+\
760 self.dec_tab()+'</domain>'
761 return 0, text
762
763 def pci2xml(self, pci):
764 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
765 alows an empty pci text'''
766 if pci is None:
767 return ""
768 first_part = pci.split(':')
769 second_part = first_part[2].split('.')
770 return self.tab() + "<address type='pci' domain='0x" + first_part[0] + \
771 "' bus='0x" + first_part[1] + "' slot='0x" + second_part[0] + \
772 "' function='0x" + second_part[1] + "'/>"
773
774 def tab(self):
775 """Return indentation according to xml_level"""
776 return "\n" + (' '*self.xml_level)
777
778 def inc_tab(self):
779 """Increment and return indentation according to xml_level"""
780 self.xml_level += 1
781 return self.tab()
782
783 def dec_tab(self):
784 """Decrement and return indentation according to xml_level"""
785 self.xml_level -= 1
786 return self.tab()
787
788 def create_ovs_bridge(self):
789 """
790 Create a bridge in compute OVS to allocate VMs
791 :return: True if success
792 """
793 if self.test or not self.connectivity:
794 return True
795
796 try:
797 self.run_command('sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true')
798
799 return True
800 except RunCommandException as e:
801 self.logger.error("create_ovs_bridge ssh Exception: " + str(e))
802 if "SSH session not active" in str(e):
803 self.ssh_connect()
804 return False
805
806 def delete_port_to_ovs_bridge(self, vlan, net_uuid):
807 """
808 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
809 :param vlan: vlan port id
810 :param net_uuid: network id
811 :return:
812 """
813
814 if self.test or not self.connectivity:
815 return True
816 try:
817 port_name = 'ovim-{}'.format(str(vlan))
818 command = 'sudo ovs-vsctl del-port br-int {}'.format(port_name)
819 self.run_command(command)
820 return True
821 except RunCommandException as e:
822 self.logger.error("delete_port_to_ovs_bridge ssh Exception: {}".format(str(e)))
823 return False
824
825 def delete_dhcp_server(self, vlan, net_uuid, dhcp_path):
826 """
827 Delete dhcp server process lining in namespace
828 :param vlan: segmentation id
829 :param net_uuid: network uuid
830 :param dhcp_path: conf fiel path that live in namespace side
831 :return:
832 """
833 if self.test or not self.connectivity:
834 return True
835 if not self.is_dhcp_port_free(vlan, net_uuid):
836 return True
837 try:
838 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
839 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
840 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
841
842 command = 'sudo ip netns exec {} cat {}'.format(dhcp_namespace, pid_file)
843 content = self.run_command(command, ignore_exit_status=True)
844 dns_pid = content.replace('\n', '')
845 command = 'sudo ip netns exec {} kill -9 {}'.format(dhcp_namespace, dns_pid)
846 self.run_command(command, ignore_exit_status=True)
847
848 except RunCommandException as e:
849 self.logger.error("delete_dhcp_server ssh Exception: " + str(e))
850 return False
851
852 def is_dhcp_port_free(self, host_id, net_uuid):
853 """
854 Check if any port attached to the a net in a vxlan mesh across computes nodes
855 :param host_id: host id
856 :param net_uuid: network id
857 :return: True if is not free
858 """
859 self.db_lock.acquire()
860 result, content = self.db.get_table(
861 FROM='ports',
862 WHERE={'type': 'instance:ovs', 'net_id': net_uuid}
863 )
864 self.db_lock.release()
865
866 if len(content) > 0:
867 return False
868 else:
869 return True
870
871 def is_port_free(self, host_id, net_uuid):
872 """
873 Check if there not ovs ports of a network in a compute host.
874 :param host_id: host id
875 :param net_uuid: network id
876 :return: True if is not free
877 """
878
879 self.db_lock.acquire()
880 result, content = self.db.get_table(
881 FROM='ports as p join instances as i on p.instance_id=i.uuid',
882 WHERE={"i.host_id": self.host_id, 'p.type': 'instance:ovs', 'p.net_id': net_uuid}
883 )
884 self.db_lock.release()
885
886 if len(content) > 0:
887 return False
888 else:
889 return True
890
891 def add_port_to_ovs_bridge(self, vlan):
892 """
893 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
894 :param vlan: vlan port id
895 :return: True if success
896 """
897
898 if self.test:
899 return True
900 try:
901 port_name = 'ovim-{}'.format(str(vlan))
902 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(port_name, str(vlan))
903 self.run_command(command)
904 return True
905 except RunCommandException as e:
906 self.logger.error("add_port_to_ovs_bridge Exception: " + str(e))
907 return False
908
909 def delete_dhcp_port(self, vlan, net_uuid, dhcp_path):
910 """
911 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
912 :param vlan: segmentation id
913 :param net_uuid: network id
914 :return: True if success
915 """
916
917 if self.test:
918 return True
919
920 if not self.is_dhcp_port_free(vlan, net_uuid):
921 return True
922 self.delete_dhcp_interfaces(vlan, dhcp_path)
923 return True
924
925 def delete_bridge_port_attached_to_ovs(self, vlan, net_uuid):
926 """
927 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
928 :param vlan:
929 :param net_uuid:
930 :return: True if success
931 """
932 if self.test:
933 return
934
935 if not self.is_port_free(vlan, net_uuid):
936 return True
937 self.delete_port_to_ovs_bridge(vlan, net_uuid)
938 self.delete_linux_bridge(vlan)
939 return True
940
941 def delete_linux_bridge(self, vlan):
942 """
943 Delete a linux bridge in a scpecific compute.
944 :param vlan: vlan port id
945 :return: True if success
946 """
947
948 if self.test:
949 return True
950 try:
951 port_name = 'ovim-{}'.format(str(vlan))
952 command = 'sudo ip link set dev ovim-{} down'.format(str(vlan))
953 self.run_command(command)
954
955 command = 'sudo ip link delete {} && sudo brctl delbr {}'.format(port_name, port_name)
956 self.run_command(command)
957 return True
958 except RunCommandException as e:
959 self.logger.error("delete_linux_bridge Exception: {}".format(str(e)))
960 return False
961
962 def remove_link_bridge_to_ovs(self, vlan, link):
963 """
964 Delete a linux provider net connection to tenatn net
965 :param vlan: vlan port id
966 :param link: link name
967 :return: True if success
968 """
969
970 if self.test:
971 return True
972 try:
973 br_tap_name = '{}-vethBO'.format(str(vlan))
974 br_ovs_name = '{}-vethOB'.format(str(vlan))
975
976 # Delete ovs veth pair
977 command = 'sudo ip link set dev {} down'.format(br_ovs_name)
978 self.run_command(command, ignore_exit_status=True)
979
980 command = 'sudo ovs-vsctl del-port br-int {}'.format(br_ovs_name)
981 self.run_command(command)
982
983 # Delete br veth pair
984 command = 'sudo ip link set dev {} down'.format(br_tap_name)
985 self.run_command(command, ignore_exit_status=True)
986
987 # Delete br veth interface form bridge
988 command = 'sudo brctl delif {} {}'.format(link, br_tap_name)
989 self.run_command(command)
990
991 # Delete br veth pair
992 command = 'sudo ip link set dev {} down'.format(link)
993 self.run_command(command, ignore_exit_status=True)
994
995 return True
996 except RunCommandException as e:
997 self.logger.error("remove_link_bridge_to_ovs Exception: {}".format(str(e)))
998 return False
999
1000 def create_ovs_bridge_port(self, vlan):
1001 """
1002 Generate a linux bridge and attache the port to a OVS bridge
1003 :param vlan: vlan port id
1004 :return:
1005 """
1006 if self.test:
1007 return
1008 self.create_linux_bridge(vlan)
1009 self.add_port_to_ovs_bridge(vlan)
1010
1011 def create_linux_bridge(self, vlan):
1012 """
1013 Create a linux bridge with STP active
1014 :param vlan: netowrk vlan id
1015 :return:
1016 """
1017
1018 if self.test:
1019 return True
1020 try:
1021 port_name = 'ovim-{}'.format(str(vlan))
1022 command = 'sudo brctl show | grep {}'.format(port_name)
1023 result = self.run_command(command, ignore_exit_status=True)
1024 if not result:
1025 command = 'sudo brctl addbr {}'.format(port_name)
1026 self.run_command(command)
1027
1028 command = 'sudo brctl stp {} on'.format(port_name)
1029 self.run_command(command)
1030
1031 command = 'sudo ip link set dev {} up'.format(port_name)
1032 self.run_command(command)
1033 return True
1034 except RunCommandException as e:
1035 self.logger.error("create_linux_bridge ssh Exception: {}".format(str(e)))
1036 return False
1037
1038 def set_mac_dhcp_server(self, ip, mac, vlan, netmask, first_ip, dhcp_path):
1039 """
1040 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
1041 :param ip: IP address asigned to a VM
1042 :param mac: VM vnic mac to be macthed with the IP received
1043 :param vlan: Segmentation id
1044 :param netmask: netmask value
1045 :param path: dhcp conf file path that live in namespace side
1046 :return: True if success
1047 """
1048
1049 if self.test:
1050 return True
1051
1052 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
1053 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1054 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1055 ns_interface = '{}-vethDO'.format(str(vlan))
1056
1057 if not ip:
1058 return False
1059 try:
1060 command = 'sudo ip netns exec {} cat /sys/class/net/{}/address'.format(dhcp_namespace, ns_interface)
1061 iface_listen_mac = self.run_command(command, ignore_exit_status=True)
1062
1063 if iface_listen_mac > 0:
1064 command = 'sudo ip netns exec {} cat {} | grep -i {}'.format(dhcp_namespace,
1065 dhcp_hostsdir,
1066 iface_listen_mac)
1067 content = self.run_command(command, ignore_exit_status=True)
1068 if content == '':
1069 ip_data = iface_listen_mac.upper().replace('\n', '') + ',' + first_ip
1070 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1071
1072 command = 'sudo ip netns exec {} sudo bash -ec "echo {} >> {}"'.format(dhcp_namespace,
1073 ip_data,
1074 dhcp_hostsdir)
1075 self.run_command(command)
1076
1077 ip_data = mac.upper() + ',' + ip
1078 command = 'sudo ip netns exec {} sudo bash -ec "echo {} >> {}"'.format(dhcp_namespace,
1079 ip_data,
1080 dhcp_hostsdir)
1081 self.run_command(command, ignore_exit_status=False)
1082
1083 return True
1084
1085 return False
1086 except RunCommandException as e:
1087 self.logger.error("set_mac_dhcp_server ssh Exception: " + str(e))
1088 return False
1089
1090 def delete_mac_dhcp_server(self, ip, mac, vlan, dhcp_path):
1091 """
1092 Delete into dhcp conf file the ip assigned to a specific MAC address
1093
1094 :param ip: IP address asigned to a VM
1095 :param mac: VM vnic mac to be macthed with the IP received
1096 :param vlan: Segmentation id
1097 :param dhcp_path: dhcp conf file path that live in namespace side
1098 :return:
1099 """
1100
1101 if self.test:
1102 return False
1103 try:
1104 dhcp_namespace = str(vlan) + '-dnsmasq'
1105 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1106 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1107
1108 if not ip:
1109 return False
1110
1111 ip_data = mac.upper() + ',' + ip
1112
1113 command = 'sudo ip netns exec ' + dhcp_namespace + ' sudo sed -i \'/' + ip_data + '/d\' ' + dhcp_hostsdir
1114 self.run_command(command)
1115
1116 return True
1117 except RunCommandException as e:
1118 self.logger.error("delete_mac_dhcp_server Exception: " + str(e))
1119 return False
1120
1121 def launch_dhcp_server(self, vlan, ip_range, netmask, dhcp_path, gateway, dns_list=None, routes=None):
1122 """
1123 Generate a linux bridge and attache the port to a OVS bridge
1124 :param self:
1125 :param vlan: Segmentation id
1126 :param ip_range: IP dhcp range
1127 :param netmask: network netmask
1128 :param dhcp_path: dhcp conf file path that live in namespace side
1129 :param gateway: Gateway address for dhcp net
1130 :param dns_list: dns list for dhcp server
1131 :param routes: routes list for dhcp server
1132 :return: True if success
1133 """
1134
1135 if self.test:
1136 return True
1137 try:
1138 ns_interface = str(vlan) + '-vethDO'
1139 dhcp_namespace = str(vlan) + '-dnsmasq'
1140 dhcp_path = os.path.join(dhcp_path, dhcp_namespace, '')
1141 leases_path = os.path.join(dhcp_path, "dnsmasq.leases")
1142 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
1143
1144 dhcp_range = ip_range[0] + ',' + ip_range[1] + ',' + netmask
1145
1146 command = 'sudo ip netns exec {} mkdir -p {}'.format(dhcp_namespace, dhcp_path)
1147 self.run_command(command)
1148
1149 # check if dnsmasq process is running
1150 dnsmasq_is_runing = False
1151 pid_path = os.path.join(dhcp_path, 'dnsmasq.pid')
1152 command = 'sudo ip netns exec ' + dhcp_namespace + ' ls ' + pid_path
1153 content = self.run_command(command, ignore_exit_status=True)
1154
1155 # check if pid is runing
1156 if content:
1157 pid_path = content.replace('\n', '')
1158 command = "ps aux | awk '{print $2 }' | grep {}" + pid_path
1159 dnsmasq_is_runing = self.run_command(command, ignore_exit_status=True)
1160
1161 gateway_option = ' --dhcp-option=3,' + gateway
1162
1163 dhcp_route_option = ''
1164 if routes:
1165 dhcp_route_option = ' --dhcp-option=121'
1166 for key, value in routes.iteritems():
1167 if 'default' == key:
1168 gateway_option = ' --dhcp-option=3,' + value
1169 else:
1170 dhcp_route_option += ',' + key + ',' + value
1171 dns_data = ''
1172 if dns_list:
1173 dns_data = ' --dhcp-option=6'
1174 for dns in dns_list:
1175 dns_data += ',' + dns
1176
1177 if not dnsmasq_is_runing:
1178 command = 'sudo ip netns exec ' + dhcp_namespace + ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1179 '--interface=' + ns_interface + \
1180 ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path + \
1181 ' --dhcp-range ' + dhcp_range + \
1182 ' --pid-file=' + pid_file + \
1183 ' --dhcp-leasefile=' + leases_path + \
1184 ' --listen-address ' + ip_range[0] + \
1185 gateway_option + \
1186 dhcp_route_option + \
1187 dns_data
1188
1189 self.run_command(command)
1190 return True
1191 except RunCommandException as e:
1192 self.logger.error("launch_dhcp_server ssh Exception: " + str(e))
1193 return False
1194
1195 def delete_dhcp_interfaces(self, vlan, dhcp_path):
1196 """
1197 Delete a linux dnsmasq bridge and namespace
1198 :param vlan: netowrk vlan id
1199 :param dhcp_path:
1200 :return:
1201 """
1202 if self.test:
1203 return True
1204 try:
1205 br_veth_name ='{}-vethDO'.format(str(vlan))
1206 ovs_veth_name = '{}-vethOD'.format(str(vlan))
1207 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
1208
1209 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1210 command = 'sudo ovs-vsctl del-port br-int {}'.format(ovs_veth_name)
1211 self.run_command(command, ignore_exit_status=True) # to end session
1212
1213 command = 'sudo ip link set dev {} down'.format(ovs_veth_name)
1214 self.run_command(command, ignore_exit_status=True) # to end session
1215
1216 command = 'sudo ip link delete {} '.format(ovs_veth_name)
1217 self.run_command(command, ignore_exit_status=True)
1218
1219 command = 'sudo ip netns exec {} ip link set dev {} down'.format(dhcp_namespace, br_veth_name)
1220 self.run_command(command, ignore_exit_status=True)
1221
1222 command = 'sudo rm -rf {}'.format(dhcp_path)
1223 self.run_command(command)
1224
1225 command = 'sudo ip netns del {}'.format(dhcp_namespace)
1226 self.run_command(command)
1227
1228 return True
1229 except RunCommandException as e:
1230 self.logger.error("delete_dhcp_interfaces ssh Exception: {}".format(str(e)))
1231 return False
1232
1233 def create_dhcp_interfaces(self, vlan, ip_listen_address, netmask):
1234 """
1235 Create a linux bridge with STP active
1236 :param vlan: segmentation id
1237 :param ip_listen_address: Listen Ip address for the dhcp service, the tap interface living in namesapce side
1238 :param netmask: dhcp net CIDR
1239 :return: True if success
1240 """
1241 if self.test:
1242 return True
1243 try:
1244 ovs_veth_name = '{}-vethOD'.format(str(vlan))
1245 ns_veth = '{}-vethDO'.format(str(vlan))
1246 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
1247
1248 command = 'sudo ip netns add {}'.format(dhcp_namespace)
1249 self.run_command(command)
1250
1251 command = 'sudo ip link add {} type veth peer name {}'.format(ns_veth, ovs_veth_name)
1252 self.run_command(command)
1253
1254 command = 'sudo ip link set {} netns {}'.format(ns_veth, dhcp_namespace)
1255 self.run_command(command)
1256
1257 command = 'sudo ip netns exec {} ip link set dev {} up'.format(dhcp_namespace, ns_veth)
1258 self.run_command(command)
1259
1260 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(ovs_veth_name, str(vlan))
1261 self.run_command(command, ignore_exit_status=True)
1262
1263 command = 'sudo ip link set dev {} up'.format(ovs_veth_name)
1264 self.run_command(command)
1265
1266 command = 'sudo ip netns exec {} ip link set dev lo up'.format(dhcp_namespace)
1267 self.run_command(command)
1268
1269 command = 'sudo ip netns exec {} ifconfig {} {} netmask {}'.format(dhcp_namespace,
1270 ns_veth,
1271 ip_listen_address,
1272 netmask)
1273 self.run_command(command)
1274 return True
1275 except RunCommandException as e:
1276 self.logger.error("create_dhcp_interfaces ssh Exception: {}".format(str(e)))
1277 return False
1278
1279 def delete_qrouter_connection(self, vlan, link):
1280 """
1281 Delete qrouter Namesapce with all veth interfaces need it
1282 :param vlan:
1283 :param link:
1284 :return:
1285 """
1286
1287 if self.test:
1288 return True
1289 try:
1290 ns_qouter = '{}-qrouter'.format(str(vlan))
1291 qrouter_ovs_veth = '{}-vethOQ'.format(str(vlan))
1292 qrouter_ns_veth = '{}-vethQO'.format(str(vlan))
1293 qrouter_br_veth = '{}-vethBQ'.format(str(vlan))
1294 qrouter_ns_router_veth = '{}-vethQB'.format(str(vlan))
1295
1296 command = 'sudo ovs-vsctl del-port br-int {}'.format(qrouter_ovs_veth)
1297 self.run_command(command, ignore_exit_status=True)
1298
1299 # down ns veth
1300 command = 'sudo ip netns exec {} ip link set dev {} down'.format(ns_qouter, qrouter_ns_veth)
1301 self.run_command(command, ignore_exit_status=True)
1302
1303 command = 'sudo ip netns exec {} ip link delete {} '.format(ns_qouter, qrouter_ns_veth)
1304 self.run_command(command, ignore_exit_status=True)
1305
1306 command = 'sudo ip netns del ' + ns_qouter
1307 self.run_command(command)
1308
1309 # down ovs veth interface
1310 command = 'sudo ip link set dev {} down'.format(qrouter_br_veth)
1311 self.run_command(command, ignore_exit_status=True)
1312
1313 # down br veth interface
1314 command = 'sudo ip link set dev {} down'.format(qrouter_ovs_veth)
1315 self.run_command(command, ignore_exit_status=True)
1316
1317 # delete veth interface
1318 command = 'sudo ip link delete {} '.format(link, qrouter_ovs_veth)
1319 self.run_command(command, ignore_exit_status=True)
1320
1321 # down br veth interface
1322 command = 'sudo ip link set dev {} down'.format(qrouter_ns_router_veth)
1323 self.run_command(command, ignore_exit_status=True)
1324
1325 # delete veth interface
1326 command = 'sudo ip link delete {} '.format(link, qrouter_ns_router_veth)
1327 self.run_command(command, ignore_exit_status=True)
1328
1329 # down br veth interface
1330 command = 'sudo brctl delif {} {}'.format(link, qrouter_br_veth)
1331 self.run_command(command)
1332
1333 # delete NS
1334 return True
1335 except RunCommandException as e:
1336 self.logger.error("delete_qrouter_connection ssh Exception: {}".format(str(e)))
1337 return False
1338
1339 def create_qrouter_ovs_connection(self, vlan, gateway, dhcp_cidr):
1340 """
1341 Create qrouter Namesapce with all veth interfaces need it between NS and OVS
1342 :param vlan:
1343 :param gateway:
1344 :return:
1345 """
1346
1347 if self.test:
1348 return True
1349
1350 try:
1351 ns_qouter = '{}-qrouter'.format(str(vlan))
1352 qrouter_ovs_veth ='{}-vethOQ'.format(str(vlan))
1353 qrouter_ns_veth = '{}-vethQO'.format(str(vlan))
1354
1355 # Create NS
1356 command = 'sudo ip netns add {}'.format(ns_qouter)
1357 self.run_command(command)
1358
1359 # Create pait veth
1360 command = 'sudo ip link add {} type veth peer name {}'.format(qrouter_ns_veth, qrouter_ovs_veth)
1361 self.run_command(command, ignore_exit_status=True)
1362
1363 # up ovs veth interface
1364 command = 'sudo ip link set dev {} up'.format(qrouter_ovs_veth)
1365 self.run_command(command)
1366
1367 # add ovs veth to ovs br-int
1368 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(qrouter_ovs_veth, vlan)
1369 self.run_command(command)
1370
1371 # add veth to ns
1372 command = 'sudo ip link set {} netns {}'.format(qrouter_ns_veth, ns_qouter)
1373 self.run_command(command)
1374
1375 # up ns loopback
1376 command = 'sudo ip netns exec {} ip link set dev lo up'.format(ns_qouter)
1377 self.run_command(command)
1378
1379 # up ns veth
1380 command = 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter, qrouter_ns_veth)
1381 self.run_command(command)
1382
1383 from netaddr import IPNetwork
1384 ip_tools = IPNetwork(dhcp_cidr)
1385 cidr_len = ip_tools.prefixlen
1386
1387 # set gw to ns veth
1388 command = 'sudo ip netns exec {} ip address add {}/{} dev {}'.format(ns_qouter, gateway, cidr_len, qrouter_ns_veth)
1389 self.run_command(command)
1390
1391 return True
1392
1393 except RunCommandException as e:
1394 self.logger.error("Create_dhcp_interfaces ssh Exception: {}".format(str(e)))
1395 return False
1396
1397 def add_ns_routes(self, vlan, routes):
1398 """
1399
1400 :param vlan:
1401 :param routes:
1402 :return:
1403 """
1404
1405 if self.test:
1406 return True
1407
1408 try:
1409 ns_qouter = '{}-qrouter'.format(str(vlan))
1410 qrouter_ns_router_veth = '{}-vethQB'.format(str(vlan))
1411
1412 for key, value in routes.iteritems():
1413 # up ns veth
1414 if key == 'default':
1415 command = 'sudo ip netns exec {} ip route add {} via {} '.format(ns_qouter, key, value)
1416 else:
1417 command = 'sudo ip netns exec {} ip route add {} via {} dev {}'.format(ns_qouter, key, value,
1418 qrouter_ns_router_veth)
1419
1420 self.run_command(command)
1421
1422 return True
1423
1424 except RunCommandException as e:
1425 self.logger.error("add_ns_routes, error adding routes to namesapce, {}".format(str(e)))
1426 return False
1427
1428 def create_qrouter_br_connection(self, vlan, cidr, link):
1429 """
1430 Create veth interfaces between user bridge (link) and OVS
1431 :param vlan:
1432 :param link:
1433 :return:
1434 """
1435
1436 if self.test:
1437 return True
1438
1439 try:
1440 ns_qouter = '{}-qrouter'.format(str(vlan))
1441 qrouter_ns_router_veth = '{}-vethQB'.format(str(vlan))
1442 qrouter_br_veth = '{}-vethBQ'.format(str(vlan))
1443
1444 command = 'sudo brctl show | grep {}'.format(link['iface'])
1445 content = self.run_command(command, ignore_exit_status=True)
1446
1447 if content > '':
1448 # Create pait veth
1449 command = 'sudo ip link add {} type veth peer name {}'.format(qrouter_br_veth, qrouter_ns_router_veth)
1450 self.run_command(command)
1451
1452 # up ovs veth interface
1453 command = 'sudo ip link set dev {} up'.format(qrouter_br_veth)
1454 self.run_command(command)
1455
1456 # add veth to ns
1457 command = 'sudo ip link set {} netns {}'.format(qrouter_ns_router_veth, ns_qouter)
1458 self.run_command(command)
1459
1460 # up ns veth
1461 command = 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter, qrouter_ns_router_veth)
1462 self.run_command(command)
1463
1464 command = 'sudo ip netns exec {} ip address add {} dev {}'.format(ns_qouter,
1465 link['nat'],
1466 qrouter_ns_router_veth)
1467 self.run_command(command)
1468
1469 # up ns veth
1470 command = 'sudo brctl addif {} {}'.format(link['iface'], qrouter_br_veth)
1471 self.run_command(command)
1472
1473 # up ns veth
1474 command = 'sudo ip netns exec {} iptables -t nat -A POSTROUTING -o {} -s {} -d {} -j MASQUERADE' \
1475 .format(ns_qouter, qrouter_ns_router_veth, link['nat'], cidr)
1476 self.run_command(command)
1477
1478 return True
1479 else:
1480
1481 self.logger.error('create_qrouter_br_connection, Bridge {} given by user not exist'.format(qrouter_br_veth))
1482 return False
1483
1484 except RunCommandException as e:
1485 self.logger.error("Error creating qrouter, {}".format(str(e)))
1486 return False
1487
1488 def create_link_bridge_to_ovs(self, vlan, link):
1489 """
1490 Create interfaces to connect a linux bridge with tenant net
1491 :param vlan: segmentation id
1492 :return: True if success
1493 """
1494 if self.test:
1495 return True
1496 try:
1497
1498 br_tap_name = '{}-vethBO'.format(str(vlan))
1499 br_ovs_name = '{}-vethOB'.format(str(vlan))
1500
1501 # is a bridge or a interface
1502 command = 'sudo brctl show | grep {}'.format(link)
1503 content = self.run_command(command, ignore_exit_status=True)
1504 if content > '':
1505 command = 'sudo ip link add {} type veth peer name {}'.format(br_tap_name, br_ovs_name)
1506 self.run_command(command)
1507
1508 command = 'sudo ip link set dev {} up'.format(br_tap_name)
1509 self.run_command(command)
1510
1511 command = 'sudo ip link set dev {} up'.format(br_ovs_name)
1512 self.run_command(command)
1513
1514 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(br_ovs_name, str(vlan))
1515 self.run_command(command)
1516
1517 command = 'sudo brctl addif ' + link + ' {}'.format(br_tap_name)
1518 self.run_command(command)
1519 return True
1520 else:
1521 self.logger.error('Link is not present, please check {}'.format(link))
1522 return False
1523
1524 except RunCommandException as e:
1525 self.logger.error("create_link_bridge_to_ovs, Error creating link to ovs, {}".format(str(e)))
1526 return False
1527
1528 def create_ovs_vxlan_tunnel(self, vxlan_interface, remote_ip):
1529 """
1530 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1531 :param vxlan_interface: vlxan inteface name.
1532 :param remote_ip: tunnel endpoint remote compute ip.
1533 :return:
1534 """
1535 if self.test or not self.connectivity:
1536 return True
1537 if remote_ip == 'localhost':
1538 if self.localhost:
1539 return True # TODO: Cannot create a vxlan between localhost and localhost
1540 remote_ip = self.local_ip
1541 try:
1542
1543 command = 'sudo ovs-vsctl add-port br-int {} -- set Interface {} type=vxlan options:remote_ip={} ' \
1544 '-- set Port {} other_config:stp-path-cost=10'.format(vxlan_interface,
1545 vxlan_interface,
1546 remote_ip,
1547 vxlan_interface)
1548 self.run_command(command)
1549 return True
1550 except RunCommandException as e:
1551 self.logger.error("create_ovs_vxlan_tunnel, error creating vxlan tunnel, {}".format(str(e)))
1552 return False
1553
1554 def delete_ovs_vxlan_tunnel(self, vxlan_interface):
1555 """
1556 Delete a vlxan tunnel port from a OVS brdige.
1557 :param vxlan_interface: vlxan name to be delete it.
1558 :return: True if success.
1559 """
1560 if self.test or not self.connectivity:
1561 return True
1562 try:
1563 command = 'sudo ovs-vsctl del-port br-int {}'.format(vxlan_interface)
1564 self.run_command(command)
1565 return True
1566 except RunCommandException as e:
1567 self.logger.error("delete_ovs_vxlan_tunnel, error deleting vxlan tunenl, {}".format(str(e)))
1568 return False
1569
1570 def delete_ovs_bridge(self):
1571 """
1572 Delete a OVS bridge from a compute.
1573 :return: True if success
1574 """
1575 if self.test or not self.connectivity:
1576 return True
1577 try:
1578 command = 'sudo ovs-vsctl del-br br-int'
1579 self.run_command(command)
1580 return True
1581 except RunCommandException as e:
1582 self.logger.error("delete_ovs_bridge ssh Exception: {}".format(str(e)))
1583 return False
1584
1585 def get_file_info(self, path):
1586 command = 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1587 try:
1588 content = self.run_command(command)
1589 return content.split(" ") # (permission, 1, owner, group, size, date, file)
1590 except RunCommandException as e:
1591 return None # file does not exist
1592
1593 def qemu_get_info(self, path):
1594 command = 'qemu-img info ' + path
1595 content = self.run_command(command)
1596 try:
1597 return yaml.load(content)
1598 except yaml.YAMLError as exc:
1599 text = ""
1600 if hasattr(exc, 'problem_mark'):
1601 mark = exc.problem_mark
1602 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
1603 self.logger.error("get_qemu_info yaml format Exception " + text)
1604 raise RunCommandException("Error getting qemu_info yaml format" + text)
1605
1606 def qemu_change_backing(self, inc_file, new_backing_file):
1607 command = 'qemu-img rebase -u -b {} {}'.format(new_backing_file, inc_file)
1608 try:
1609 self.run_command(command)
1610 return 0
1611 except RunCommandException as e:
1612 self.logger.error("qemu_change_backing error: " + str(e))
1613 return -1
1614
1615 def qemu_create_empty_disk(self, dev):
1616
1617 if not dev and 'source' not in dev and 'file format' not in dev and 'image_size' not in dev:
1618 self.logger.error("qemu_create_empty_disk error: missing image parameter")
1619 return -1
1620
1621 empty_disk_path = dev['source file']
1622
1623 command = 'qemu-img create -f qcow2 {} {}G'.format(empty_disk_path, dev['image_size'])
1624 try:
1625 self.run_command(command)
1626 return 0
1627 except RunCommandException as e:
1628 self.logger.error("qemu_create_empty_disk error: " + str(e))
1629 return -1
1630
1631 def get_notused_filename(self, proposed_name, suffix=''):
1632 '''Look for a non existing file_name in the host
1633 proposed_name: proposed file name, includes path
1634 suffix: suffix to be added to the name, before the extention
1635 '''
1636 extension = proposed_name.rfind(".")
1637 slash = proposed_name.rfind("/")
1638 if extension < 0 or extension < slash: # no extension
1639 extension = len(proposed_name)
1640 target_name = proposed_name[:extension] + suffix + proposed_name[extension:]
1641 info = self.get_file_info(target_name)
1642 if info is None:
1643 return target_name
1644
1645 index=0
1646 while info is not None:
1647 target_name = proposed_name[:extension] + suffix + "-" + str(index) + proposed_name[extension:]
1648 index+=1
1649 info = self.get_file_info(target_name)
1650 return target_name
1651
1652 def get_notused_path(self, proposed_path, suffix=''):
1653 '''Look for a non existing path at database for images
1654 proposed_path: proposed file name, includes path
1655 suffix: suffix to be added to the name, before the extention
1656 '''
1657 extension = proposed_path.rfind(".")
1658 if extension < 0:
1659 extension = len(proposed_path)
1660 if suffix != None:
1661 target_path = proposed_path[:extension] + suffix + proposed_path[extension:]
1662 index=0
1663 while True:
1664 r,_=self.db.get_table(FROM="images",WHERE={"path":target_path})
1665 if r<=0:
1666 return target_path
1667 target_path = proposed_path[:extension] + suffix + "-" + str(index) + proposed_path[extension:]
1668 index+=1
1669
1670
1671 def delete_file(self, file_name):
1672 command = 'rm -f ' + file_name
1673 self.run_command(command)
1674
1675 def copy_file(self, source, destination, perserve_time=True):
1676 if source[0:4]=="http":
1677 command = "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1678 dst=destination, src=source, dst_result=destination + ".result" )
1679 else:
1680 command = 'cp --no-preserve=mode'
1681 if perserve_time:
1682 command += ' --preserve=timestamps'
1683 command += " '{}' '{}'".format(source, destination)
1684 self.run_command(command)
1685
1686 def copy_remote_file(self, remote_file, use_incremental):
1687 ''' Copy a file from the repository to local folder and recursively
1688 copy the backing files in case the remote file is incremental
1689 Read and/or modified self.localinfo['files'] that contain the
1690 unmodified copies of images in the local path
1691 params:
1692 remote_file: path of remote file
1693 use_incremental: None (leave the decision to this function), True, False
1694 return:
1695 local_file: name of local file
1696 qemu_info: dict with quemu information of local file
1697 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1698 '''
1699
1700 use_incremental_out = use_incremental
1701 new_backing_file = None
1702 local_file = None
1703 file_from_local = True
1704
1705 #in case incremental use is not decided, take the decision depending on the image
1706 #avoid the use of incremental if this image is already incremental
1707 if remote_file[0:4] == "http":
1708 file_from_local = False
1709 if file_from_local:
1710 qemu_remote_info = self.qemu_get_info(remote_file)
1711 if use_incremental_out==None:
1712 use_incremental_out = not ( file_from_local and 'backing file' in qemu_remote_info)
1713 #copy recursivelly the backing files
1714 if file_from_local and 'backing file' in qemu_remote_info:
1715 new_backing_file, _, _ = self.copy_remote_file(qemu_remote_info['backing file'], True)
1716
1717 #check if remote file is present locally
1718 if use_incremental_out and remote_file in self.localinfo['files']:
1719 local_file = self.localinfo['files'][remote_file]
1720 local_file_info = self.get_file_info(local_file)
1721 if file_from_local:
1722 remote_file_info = self.get_file_info(remote_file)
1723 if local_file_info == None:
1724 local_file = None
1725 elif file_from_local and (local_file_info[4]!=remote_file_info[4] or local_file_info[5]!=remote_file_info[5]):
1726 #local copy of file not valid because date or size are different.
1727 #TODO DELETE local file if this file is not used by any active virtual machine
1728 try:
1729 self.delete_file(local_file)
1730 del self.localinfo['files'][remote_file]
1731 except Exception:
1732 pass
1733 local_file = None
1734 else: #check that the local file has the same backing file, or there are not backing at all
1735 qemu_info = self.qemu_get_info(local_file)
1736 if new_backing_file != qemu_info.get('backing file'):
1737 local_file = None
1738
1739
1740 if local_file == None: #copy the file
1741 img_name= remote_file.split('/') [-1]
1742 img_local = self.image_path + '/' + img_name
1743 local_file = self.get_notused_filename(img_local)
1744 self.copy_file(remote_file, local_file, use_incremental_out)
1745
1746 if use_incremental_out:
1747 self.localinfo['files'][remote_file] = local_file
1748 if new_backing_file:
1749 self.qemu_change_backing(local_file, new_backing_file)
1750 qemu_info = self.qemu_get_info(local_file)
1751
1752 return local_file, qemu_info, use_incremental_out
1753
1754 def launch_server(self, conn, server, rebuild=False, domain=None):
1755 if self.test:
1756 time.sleep(random.randint(20,150)) #sleep random timeto be make it a bit more real
1757 return 0, 'Success'
1758
1759 server_id = server['uuid']
1760 paused = server.get('paused','no')
1761 try:
1762 if domain!=None and rebuild==False:
1763 domain.resume()
1764 #self.server_status[server_id] = 'ACTIVE'
1765 return 0, 'Success'
1766
1767 self.db_lock.acquire()
1768 result, server_data = self.db.get_instance(server_id)
1769 self.db_lock.release()
1770 if result <= 0:
1771 self.logger.error("launch_server ERROR getting server from DB %d %s", result, server_data)
1772 return result, server_data
1773
1774 #0: get image metadata
1775 server_metadata = server.get('metadata', {})
1776 use_incremental = None
1777
1778 if "use_incremental" in server_metadata:
1779 use_incremental = False if server_metadata["use_incremental"] == "no" else True
1780
1781 server_host_files = self.localinfo['server_files'].get( server['uuid'], {})
1782 if rebuild:
1783 #delete previous incremental files
1784 for file_ in server_host_files.values():
1785 self.delete_file(file_['source file'] )
1786 server_host_files={}
1787
1788 #1: obtain aditional devices (disks)
1789 #Put as first device the main disk
1790 devices = [ {"type":"disk", "image_id":server['image_id'], "vpci":server_metadata.get('vpci', None) } ]
1791 if 'extended' in server_data and server_data['extended']!=None and "devices" in server_data['extended']:
1792 devices += server_data['extended']['devices']
1793 empty_path = None
1794 for dev in devices:
1795 image_id = dev.get('image_id')
1796 if not image_id:
1797 import uuid
1798 uuid_empty = str(uuid.uuid4())
1799 empty_path = self.empty_image_path + uuid_empty + '.qcow2' # local path for empty disk
1800
1801 dev['source file'] = empty_path
1802 dev['file format'] = 'qcow2'
1803 self.qemu_create_empty_disk(dev)
1804 server_host_files[uuid_empty] = {'source file': empty_path,
1805 'file format': dev['file format']}
1806
1807 continue
1808 else:
1809 self.db_lock.acquire()
1810 result, content = self.db.get_table(FROM='images', SELECT=('path', 'metadata'),
1811 WHERE={'uuid': image_id})
1812 self.db_lock.release()
1813 if result <= 0:
1814 error_text = "ERROR", result, content, "when getting image", dev['image_id']
1815 self.logger.error("launch_server " + error_text)
1816 return -1, error_text
1817 if content[0]['metadata'] is not None:
1818 dev['metadata'] = json.loads(content[0]['metadata'])
1819 else:
1820 dev['metadata'] = {}
1821
1822 if image_id in server_host_files:
1823 dev['source file'] = server_host_files[image_id]['source file'] #local path
1824 dev['file format'] = server_host_files[image_id]['file format'] # raw or qcow2
1825 continue
1826
1827 #2: copy image to host
1828 if image_id:
1829 remote_file = content[0]['path']
1830 else:
1831 remote_file = empty_path
1832 use_incremental_image = use_incremental
1833 if dev['metadata'].get("use_incremental") == "no":
1834 use_incremental_image = False
1835 local_file, qemu_info, use_incremental_image = self.copy_remote_file(remote_file, use_incremental_image)
1836
1837 #create incremental image
1838 if use_incremental_image:
1839 local_file_inc = self.get_notused_filename(local_file, '.inc')
1840 command = 'qemu-img create -f qcow2 {} -o backing_file={}'.format(local_file_inc, local_file)
1841 self.run_command(command)
1842 local_file = local_file_inc
1843 qemu_info = {'file format': 'qcow2'}
1844
1845 server_host_files[ dev['image_id'] ] = {'source file': local_file, 'file format': qemu_info['file format']}
1846
1847 dev['source file'] = local_file
1848 dev['file format'] = qemu_info['file format']
1849
1850 self.localinfo['server_files'][ server['uuid'] ] = server_host_files
1851 self.localinfo_dirty = True
1852
1853 #3 Create XML
1854 result, xml = self.create_xml_server(server_data, devices, server_metadata) #local_file
1855 if result <0:
1856 self.logger.error("create xml server error: " + xml)
1857 return -2, xml
1858 self.logger.debug("create xml: " + xml)
1859 atribute = host_thread.lvirt_module.VIR_DOMAIN_START_PAUSED if paused == "yes" else 0
1860 #4 Start the domain
1861 if not rebuild: #ensures that any pending destroying server is done
1862 self.server_forceoff(True)
1863 #self.logger.debug("launching instance " + xml)
1864 conn.createXML(xml, atribute)
1865 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
1866
1867 return 0, 'Success'
1868
1869 except paramiko.ssh_exception.SSHException as e:
1870 text = e.args[0]
1871 self.logger.error("launch_server id='%s' ssh Exception: %s", server_id, text)
1872 if "SSH session not active" in text:
1873 self.ssh_connect()
1874 except host_thread.lvirt_module.libvirtError as e:
1875 text = e.get_error_message()
1876 self.logger.error("launch_server id='%s' libvirt Exception: %s", server_id, text)
1877 except Exception as e:
1878 text = str(e)
1879 self.logger.error("launch_server id='%s' Exception: %s", server_id, text)
1880 return -1, text
1881
1882 def update_servers_status(self):
1883 # # virDomainState
1884 # VIR_DOMAIN_NOSTATE = 0
1885 # VIR_DOMAIN_RUNNING = 1
1886 # VIR_DOMAIN_BLOCKED = 2
1887 # VIR_DOMAIN_PAUSED = 3
1888 # VIR_DOMAIN_SHUTDOWN = 4
1889 # VIR_DOMAIN_SHUTOFF = 5
1890 # VIR_DOMAIN_CRASHED = 6
1891 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
1892
1893 if self.test or len(self.server_status)==0:
1894 return
1895
1896 try:
1897 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
1898 domains= conn.listAllDomains()
1899 domain_dict={}
1900 for domain in domains:
1901 uuid = domain.UUIDString() ;
1902 libvirt_status = domain.state()
1903 #print libvirt_status
1904 if libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_RUNNING or libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTDOWN:
1905 new_status = "ACTIVE"
1906 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_PAUSED:
1907 new_status = "PAUSED"
1908 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTOFF:
1909 new_status = "INACTIVE"
1910 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_CRASHED:
1911 new_status = "ERROR"
1912 else:
1913 new_status = None
1914 domain_dict[uuid] = new_status
1915 conn.close()
1916 except host_thread.lvirt_module.libvirtError as e:
1917 self.logger.error("get_state() Exception " + e.get_error_message())
1918 return
1919
1920 for server_id, current_status in self.server_status.iteritems():
1921 new_status = None
1922 if server_id in domain_dict:
1923 new_status = domain_dict[server_id]
1924 else:
1925 new_status = "INACTIVE"
1926
1927 if new_status == None or new_status == current_status:
1928 continue
1929 if new_status == 'INACTIVE' and current_status == 'ERROR':
1930 continue #keep ERROR status, because obviously this machine is not running
1931 #change status
1932 self.logger.debug("server id='%s' status change from '%s' to '%s'", server_id, current_status, new_status)
1933 STATUS={'progress':100, 'status':new_status}
1934 if new_status == 'ERROR':
1935 STATUS['last_error'] = 'machine has crashed'
1936 self.db_lock.acquire()
1937 r,_ = self.db.update_rows('instances', STATUS, {'uuid':server_id}, log=False)
1938 self.db_lock.release()
1939 if r>=0:
1940 self.server_status[server_id] = new_status
1941
1942 def action_on_server(self, req, last_retry=True):
1943 '''Perform an action on a req
1944 Attributes:
1945 req: dictionary that contain:
1946 server properties: 'uuid','name','tenant_id','status'
1947 action: 'action'
1948 host properties: 'user', 'ip_name'
1949 return (error, text)
1950 0: No error. VM is updated to new state,
1951 -1: Invalid action, as trying to pause a PAUSED VM
1952 -2: Error accessing host
1953 -3: VM nor present
1954 -4: Error at DB access
1955 -5: Error while trying to perform action. VM is updated to ERROR
1956 '''
1957 server_id = req['uuid']
1958 conn = None
1959 new_status = None
1960 old_status = req['status']
1961 last_error = None
1962
1963 if self.test:
1964 if 'terminate' in req['action']:
1965 new_status = 'deleted'
1966 elif 'shutoff' in req['action'] or 'shutdown' in req['action'] or 'forceOff' in req['action']:
1967 if req['status']!='ERROR':
1968 time.sleep(5)
1969 new_status = 'INACTIVE'
1970 elif 'start' in req['action'] and req['status']!='ERROR':
1971 new_status = 'ACTIVE'
1972 elif 'resume' in req['action'] and req['status']!='ERROR' and req['status']!='INACTIVE':
1973 new_status = 'ACTIVE'
1974 elif 'pause' in req['action'] and req['status']!='ERROR':
1975 new_status = 'PAUSED'
1976 elif 'reboot' in req['action'] and req['status']!='ERROR':
1977 new_status = 'ACTIVE'
1978 elif 'rebuild' in req['action']:
1979 time.sleep(random.randint(20,150))
1980 new_status = 'ACTIVE'
1981 elif 'createImage' in req['action']:
1982 time.sleep(5)
1983 self.create_image(None, req)
1984 else:
1985 try:
1986 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
1987 try:
1988 dom = conn.lookupByUUIDString(server_id)
1989 except host_thread.lvirt_module.libvirtError as e:
1990 text = e.get_error_message()
1991 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
1992 dom = None
1993 else:
1994 self.logger.error("action_on_server id='%s' libvirt exception: %s", server_id, text)
1995 raise e
1996
1997 if 'forceOff' in req['action']:
1998 if dom == None:
1999 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2000 else:
2001 try:
2002 self.logger.debug("sending DESTROY to server id='%s'", server_id)
2003 dom.destroy()
2004 except Exception as e:
2005 if "domain is not running" not in e.get_error_message():
2006 self.logger.error("action_on_server id='%s' Exception while sending force off: %s",
2007 server_id, e.get_error_message())
2008 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
2009 new_status = 'ERROR'
2010
2011 elif 'terminate' in req['action']:
2012 if dom == None:
2013 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2014 new_status = 'deleted'
2015 else:
2016 try:
2017 if req['action']['terminate'] == 'force':
2018 self.logger.debug("sending DESTROY to server id='%s'", server_id)
2019 dom.destroy()
2020 new_status = 'deleted'
2021 else:
2022 self.logger.debug("sending SHUTDOWN to server id='%s'", server_id)
2023 dom.shutdown()
2024 self.pending_terminate_server.append( (time.time()+10,server_id) )
2025 except Exception as e:
2026 self.logger.error("action_on_server id='%s' Exception while destroy: %s",
2027 server_id, e.get_error_message())
2028 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
2029 new_status = 'ERROR'
2030 if "domain is not running" in e.get_error_message():
2031 try:
2032 dom.undefine()
2033 new_status = 'deleted'
2034 except Exception:
2035 self.logger.error("action_on_server id='%s' Exception while undefine: %s",
2036 server_id, e.get_error_message())
2037 last_error = 'action_on_server Exception2 while undefine:', e.get_error_message()
2038 #Exception: 'virDomainDetachDevice() failed'
2039 if new_status=='deleted':
2040 if server_id in self.server_status:
2041 del self.server_status[server_id]
2042 if req['uuid'] in self.localinfo['server_files']:
2043 for file_ in self.localinfo['server_files'][ req['uuid'] ].values():
2044 try:
2045 self.delete_file(file_['source file'])
2046 except Exception:
2047 pass
2048 del self.localinfo['server_files'][ req['uuid'] ]
2049 self.localinfo_dirty = True
2050
2051 elif 'shutoff' in req['action'] or 'shutdown' in req['action']:
2052 try:
2053 if dom == None:
2054 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2055 else:
2056 dom.shutdown()
2057 # new_status = 'INACTIVE'
2058 #TODO: check status for changing at database
2059 except Exception as e:
2060 new_status = 'ERROR'
2061 self.logger.error("action_on_server id='%s' Exception while shutdown: %s",
2062 server_id, e.get_error_message())
2063 last_error = 'action_on_server Exception while shutdown: ' + e.get_error_message()
2064
2065 elif 'rebuild' in req['action']:
2066 if dom != None:
2067 dom.destroy()
2068 r = self.launch_server(conn, req, True, None)
2069 if r[0] <0:
2070 new_status = 'ERROR'
2071 last_error = r[1]
2072 else:
2073 new_status = 'ACTIVE'
2074 elif 'start' in req['action']:
2075 # The instance is only create in DB but not yet at libvirt domain, needs to be create
2076 rebuild = True if req['action']['start'] == 'rebuild' else False
2077 r = self.launch_server(conn, req, rebuild, dom)
2078 if r[0] <0:
2079 new_status = 'ERROR'
2080 last_error = r[1]
2081 else:
2082 new_status = 'ACTIVE'
2083
2084 elif 'resume' in req['action']:
2085 try:
2086 if dom == None:
2087 pass
2088 else:
2089 dom.resume()
2090 # new_status = 'ACTIVE'
2091 except Exception as e:
2092 self.logger.error("action_on_server id='%s' Exception while resume: %s",
2093 server_id, e.get_error_message())
2094
2095 elif 'pause' in req['action']:
2096 try:
2097 if dom == None:
2098 pass
2099 else:
2100 dom.suspend()
2101 # new_status = 'PAUSED'
2102 except Exception as e:
2103 self.logger.error("action_on_server id='%s' Exception while pause: %s",
2104 server_id, e.get_error_message())
2105
2106 elif 'reboot' in req['action']:
2107 try:
2108 if dom == None:
2109 pass
2110 else:
2111 dom.reboot()
2112 self.logger.debug("action_on_server id='%s' reboot:", server_id)
2113 #new_status = 'ACTIVE'
2114 except Exception as e:
2115 self.logger.error("action_on_server id='%s' Exception while reboot: %s",
2116 server_id, e.get_error_message())
2117 elif 'createImage' in req['action']:
2118 self.create_image(dom, req)
2119
2120
2121 conn.close()
2122 except host_thread.lvirt_module.libvirtError as e:
2123 if conn is not None: conn.close()
2124 text = e.get_error_message()
2125 new_status = "ERROR"
2126 last_error = text
2127 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
2128 self.logger.debug("action_on_server id='%s' Exception removed from host", server_id)
2129 else:
2130 self.logger.error("action_on_server id='%s' Exception %s", server_id, text)
2131 #end of if self.test
2132 if new_status == None:
2133 return 1
2134
2135 self.logger.debug("action_on_server id='%s' new status=%s %s",server_id, new_status, last_error)
2136 UPDATE = {'progress':100, 'status':new_status}
2137
2138 if new_status=='ERROR':
2139 if not last_retry: #if there will be another retry do not update database
2140 return -1
2141 elif 'terminate' in req['action']:
2142 #PUT a log in the database
2143 self.logger.error("PANIC deleting server id='%s' %s", server_id, last_error)
2144 self.db_lock.acquire()
2145 self.db.new_row('logs',
2146 {'uuid':server_id, 'tenant_id':req['tenant_id'], 'related':'instances','level':'panic',
2147 'description':'PANIC deleting server from host '+self.name+': '+last_error}
2148 )
2149 self.db_lock.release()
2150 if server_id in self.server_status:
2151 del self.server_status[server_id]
2152 return -1
2153 else:
2154 UPDATE['last_error'] = last_error
2155 if new_status != 'deleted' and (new_status != old_status or new_status == 'ERROR') :
2156 self.db_lock.acquire()
2157 self.db.update_rows('instances', UPDATE, {'uuid':server_id}, log=True)
2158 self.server_status[server_id] = new_status
2159 self.db_lock.release()
2160 if new_status == 'ERROR':
2161 return -1
2162 return 1
2163
2164
2165 def restore_iface(self, name, mac, lib_conn=None):
2166 ''' make an ifdown, ifup to restore default parameter of na interface
2167 Params:
2168 mac: mac address of the interface
2169 lib_conn: connection to the libvirt, if None a new connection is created
2170 Return 0,None if ok, -1,text if fails
2171 '''
2172 conn=None
2173 ret = 0
2174 error_text=None
2175 if self.test:
2176 self.logger.debug("restore_iface '%s' %s", name, mac)
2177 return 0, None
2178 try:
2179 if not lib_conn:
2180 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2181 else:
2182 conn = lib_conn
2183
2184 #wait to the pending VM deletion
2185 #TODO.Revise self.server_forceoff(True)
2186
2187 iface = conn.interfaceLookupByMACString(mac)
2188 if iface.isActive():
2189 iface.destroy()
2190 iface.create()
2191 self.logger.debug("restore_iface '%s' %s", name, mac)
2192 except host_thread.lvirt_module.libvirtError as e:
2193 error_text = e.get_error_message()
2194 self.logger.error("restore_iface '%s' '%s' libvirt exception: %s", name, mac, error_text)
2195 ret=-1
2196 finally:
2197 if lib_conn is None and conn is not None:
2198 conn.close()
2199 return ret, error_text
2200
2201
2202 def create_image(self,dom, req):
2203 if self.test:
2204 if 'path' in req['action']['createImage']:
2205 file_dst = req['action']['createImage']['path']
2206 else:
2207 createImage=req['action']['createImage']
2208 img_name= createImage['source']['path']
2209 index=img_name.rfind('/')
2210 file_dst = self.get_notused_path(img_name[:index+1] + createImage['name'] + '.qcow2')
2211 image_status='ACTIVE'
2212 else:
2213 for retry in (0,1):
2214 try:
2215 server_id = req['uuid']
2216 createImage=req['action']['createImage']
2217 file_orig = self.localinfo['server_files'][server_id] [ createImage['source']['image_id'] ] ['source file']
2218 if 'path' in req['action']['createImage']:
2219 file_dst = req['action']['createImage']['path']
2220 else:
2221 img_name= createImage['source']['path']
2222 index=img_name.rfind('/')
2223 file_dst = self.get_notused_filename(img_name[:index+1] + createImage['name'] + '.qcow2')
2224
2225 self.copy_file(file_orig, file_dst)
2226 qemu_info = self.qemu_get_info(file_orig)
2227 if 'backing file' in qemu_info:
2228 for k,v in self.localinfo['files'].items():
2229 if v==qemu_info['backing file']:
2230 self.qemu_change_backing(file_dst, k)
2231 break
2232 image_status='ACTIVE'
2233 break
2234 except paramiko.ssh_exception.SSHException as e:
2235 image_status='ERROR'
2236 error_text = e.args[0]
2237 self.logger.error("create_image id='%s' ssh Exception: %s", server_id, error_text)
2238 if "SSH session not active" in error_text and retry==0:
2239 self.ssh_connect()
2240 except Exception as e:
2241 image_status='ERROR'
2242 error_text = str(e)
2243 self.logger.error("create_image id='%s' Exception: %s", server_id, error_text)
2244
2245 #TODO insert a last_error at database
2246 self.db_lock.acquire()
2247 self.db.update_rows('images', {'status':image_status, 'progress': 100, 'path':file_dst},
2248 {'uuid':req['new_image']['uuid']}, log=True)
2249 self.db_lock.release()
2250
2251 def edit_iface(self, port_id, old_net, new_net):
2252 #This action imply remove and insert interface to put proper parameters
2253 if self.test:
2254 time.sleep(1)
2255 else:
2256 #get iface details
2257 self.db_lock.acquire()
2258 r,c = self.db.get_table(FROM='ports as p join resources_port as rp on p.uuid=rp.port_id',
2259 WHERE={'port_id': port_id})
2260 self.db_lock.release()
2261 if r<0:
2262 self.logger.error("edit_iface %s DDBB error: %s", port_id, c)
2263 return
2264 elif r==0:
2265 self.logger.error("edit_iface %s port not found", port_id)
2266 return
2267 port=c[0]
2268 if port["model"]!="VF":
2269 self.logger.error("edit_iface %s ERROR model must be VF", port_id)
2270 return
2271 #create xml detach file
2272 xml=[]
2273 self.xml_level = 2
2274 xml.append("<interface type='hostdev' managed='yes'>")
2275 xml.append(" <mac address='" +port['mac']+ "'/>")
2276 xml.append(" <source>"+ self.pci2xml(port['pci'])+"\n </source>")
2277 xml.append('</interface>')
2278
2279
2280 try:
2281 conn=None
2282 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2283 dom = conn.lookupByUUIDString(port["instance_id"])
2284 if old_net:
2285 text="\n".join(xml)
2286 self.logger.debug("edit_iface detaching SRIOV interface " + text)
2287 dom.detachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
2288 if new_net:
2289 xml[-1] =" <vlan> <tag id='" + str(port['vlan']) + "'/> </vlan>"
2290 self.xml_level = 1
2291 xml.append(self.pci2xml(port.get('vpci',None)) )
2292 xml.append('</interface>')
2293 text="\n".join(xml)
2294 self.logger.debug("edit_iface attaching SRIOV interface " + text)
2295 dom.attachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
2296
2297 except host_thread.lvirt_module.libvirtError as e:
2298 text = e.get_error_message()
2299 self.logger.error("edit_iface %s libvirt exception: %s", port["instance_id"], text)
2300
2301 finally:
2302 if conn is not None: conn.close()
2303
2304
2305 def create_server(server, db, db_lock, only_of_ports):
2306 extended = server.get('extended', None)
2307 requirements={}
2308 requirements['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
2309 requirements['ram'] = server['flavor'].get('ram', 0)
2310 if requirements['ram']== None:
2311 requirements['ram'] = 0
2312 requirements['vcpus'] = server['flavor'].get('vcpus', 0)
2313 if requirements['vcpus']== None:
2314 requirements['vcpus'] = 0
2315 #If extended is not defined get requirements from flavor
2316 if extended is None:
2317 #If extended is defined in flavor convert to dictionary and use it
2318 if 'extended' in server['flavor'] and server['flavor']['extended'] != None:
2319 json_acceptable_string = server['flavor']['extended'].replace("'", "\"")
2320 extended = json.loads(json_acceptable_string)
2321 else:
2322 extended = None
2323 #print json.dumps(extended, indent=4)
2324
2325 #For simplicity only one numa VM are supported in the initial implementation
2326 if extended != None:
2327 numas = extended.get('numas', [])
2328 if len(numas)>1:
2329 return (-2, "Multi-NUMA VMs are not supported yet")
2330 #elif len(numas)<1:
2331 # return (-1, "At least one numa must be specified")
2332
2333 #a for loop is used in order to be ready to multi-NUMA VMs
2334 request = []
2335 for numa in numas:
2336 numa_req = {}
2337 numa_req['memory'] = numa.get('memory', 0)
2338 if 'cores' in numa:
2339 numa_req['proc_req_nb'] = numa['cores'] #number of cores or threads to be reserved
2340 numa_req['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
2341 numa_req['proc_req_list'] = numa.get('cores-id', None) #list of ids to be assigned to the cores or threads
2342 elif 'paired-threads' in numa:
2343 numa_req['proc_req_nb'] = numa['paired-threads']
2344 numa_req['proc_req_type'] = 'paired-threads'
2345 numa_req['proc_req_list'] = numa.get('paired-threads-id', None)
2346 elif 'threads' in numa:
2347 numa_req['proc_req_nb'] = numa['threads']
2348 numa_req['proc_req_type'] = 'threads'
2349 numa_req['proc_req_list'] = numa.get('threads-id', None)
2350 else:
2351 numa_req['proc_req_nb'] = 0 # by default
2352 numa_req['proc_req_type'] = 'threads'
2353
2354
2355
2356 #Generate a list of sriov and another for physical interfaces
2357 interfaces = numa.get('interfaces', [])
2358 sriov_list = []
2359 port_list = []
2360 for iface in interfaces:
2361 iface['bandwidth'] = int(iface['bandwidth'])
2362 if iface['dedicated'][:3]=='yes':
2363 port_list.append(iface)
2364 else:
2365 sriov_list.append(iface)
2366
2367 #Save lists ordered from more restrictive to less bw requirements
2368 numa_req['sriov_list'] = sorted(sriov_list, key=lambda k: k['bandwidth'], reverse=True)
2369 numa_req['port_list'] = sorted(port_list, key=lambda k: k['bandwidth'], reverse=True)
2370
2371
2372 request.append(numa_req)
2373
2374 # print "----------\n"+json.dumps(request[0], indent=4)
2375 # print '----------\n\n'
2376
2377 #Search in db for an appropriate numa for each requested numa
2378 #at the moment multi-NUMA VMs are not supported
2379 if len(request)>0:
2380 requirements['numa'].update(request[0])
2381 if requirements['numa']['memory']>0:
2382 requirements['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2383 elif requirements['ram']==0:
2384 return (-1, "Memory information not set neither at extended field not at ram")
2385 if requirements['numa']['proc_req_nb']>0:
2386 requirements['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2387 elif requirements['vcpus']==0:
2388 return (-1, "Processor information not set neither at extended field not at vcpus")
2389
2390
2391 db_lock.acquire()
2392 result, content = db.get_numas(requirements, server.get('host_id', None), only_of_ports)
2393 db_lock.release()
2394
2395 if result == -1:
2396 return (-1, content)
2397
2398 numa_id = content['numa_id']
2399 host_id = content['host_id']
2400
2401 #obtain threads_id and calculate pinning
2402 cpu_pinning = []
2403 reserved_threads=[]
2404 if requirements['numa']['proc_req_nb']>0:
2405 db_lock.acquire()
2406 result, content = db.get_table(FROM='resources_core',
2407 SELECT=('id','core_id','thread_id'),
2408 WHERE={'numa_id':numa_id,'instance_id': None, 'status':'ok'} )
2409 db_lock.release()
2410 if result <= 0:
2411 #print content
2412 return -1, content
2413
2414 #convert rows to a dictionary indexed by core_id
2415 cores_dict = {}
2416 for row in content:
2417 if not row['core_id'] in cores_dict:
2418 cores_dict[row['core_id']] = []
2419 cores_dict[row['core_id']].append([row['thread_id'],row['id']])
2420
2421 #In case full cores are requested
2422 paired = 'N'
2423 if requirements['numa']['proc_req_type'] == 'cores':
2424 #Get/create the list of the vcpu_ids
2425 vcpu_id_list = requirements['numa']['proc_req_list']
2426 if vcpu_id_list == None:
2427 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2428
2429 for threads in cores_dict.itervalues():
2430 #we need full cores
2431 if len(threads) != 2:
2432 continue
2433
2434 #set pinning for the first thread
2435 cpu_pinning.append( [ vcpu_id_list.pop(0), threads[0][0], threads[0][1] ] )
2436
2437 #reserve so it is not used the second thread
2438 reserved_threads.append(threads[1][1])
2439
2440 if len(vcpu_id_list) == 0:
2441 break
2442
2443 #In case paired threads are requested
2444 elif requirements['numa']['proc_req_type'] == 'paired-threads':
2445 paired = 'Y'
2446 #Get/create the list of the vcpu_ids
2447 if requirements['numa']['proc_req_list'] != None:
2448 vcpu_id_list = []
2449 for pair in requirements['numa']['proc_req_list']:
2450 if len(pair)!=2:
2451 return -1, "Field paired-threads-id not properly specified"
2452 return
2453 vcpu_id_list.append(pair[0])
2454 vcpu_id_list.append(pair[1])
2455 else:
2456 vcpu_id_list = range(0,2*int(requirements['numa']['proc_req_nb']))
2457
2458 for threads in cores_dict.itervalues():
2459 #we need full cores
2460 if len(threads) != 2:
2461 continue
2462 #set pinning for the first thread
2463 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2464
2465 #set pinning for the second thread
2466 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2467
2468 if len(vcpu_id_list) == 0:
2469 break
2470
2471 #In case normal threads are requested
2472 elif requirements['numa']['proc_req_type'] == 'threads':
2473 #Get/create the list of the vcpu_ids
2474 vcpu_id_list = requirements['numa']['proc_req_list']
2475 if vcpu_id_list == None:
2476 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2477
2478 for threads_index in sorted(cores_dict, key=lambda k: len(cores_dict[k])):
2479 threads = cores_dict[threads_index]
2480 #set pinning for the first thread
2481 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2482
2483 #if exists, set pinning for the second thread
2484 if len(threads) == 2 and len(vcpu_id_list) != 0:
2485 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2486
2487 if len(vcpu_id_list) == 0:
2488 break
2489
2490 #Get the source pci addresses for the selected numa
2491 used_sriov_ports = []
2492 for port in requirements['numa']['sriov_list']:
2493 db_lock.acquire()
2494 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2495 db_lock.release()
2496 if result <= 0:
2497 #print content
2498 return -1, content
2499 for row in content:
2500 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2501 continue
2502 port['pci'] = row['pci']
2503 if 'mac_address' not in port:
2504 port['mac_address'] = row['mac']
2505 del port['mac']
2506 port['port_id']=row['id']
2507 port['Mbps_used'] = port['bandwidth']
2508 used_sriov_ports.append(row['id'])
2509 break
2510
2511 for port in requirements['numa']['port_list']:
2512 port['Mbps_used'] = None
2513 if port['dedicated'] != "yes:sriov":
2514 port['mac_address'] = port['mac']
2515 del port['mac']
2516 continue
2517 db_lock.acquire()
2518 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac', 'Mbps'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2519 db_lock.release()
2520 if result <= 0:
2521 #print content
2522 return -1, content
2523 port['Mbps_used'] = content[0]['Mbps']
2524 for row in content:
2525 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2526 continue
2527 port['pci'] = row['pci']
2528 if 'mac_address' not in port:
2529 port['mac_address'] = row['mac'] # mac cannot be set to passthrough ports
2530 del port['mac']
2531 port['port_id']=row['id']
2532 used_sriov_ports.append(row['id'])
2533 break
2534
2535 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2536 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2537
2538 server['host_id'] = host_id
2539
2540 #Generate dictionary for saving in db the instance resources
2541 resources = {}
2542 resources['bridged-ifaces'] = []
2543
2544 numa_dict = {}
2545 numa_dict['interfaces'] = []
2546
2547 numa_dict['interfaces'] += requirements['numa']['port_list']
2548 numa_dict['interfaces'] += requirements['numa']['sriov_list']
2549
2550 #Check bridge information
2551 unified_dataplane_iface=[]
2552 unified_dataplane_iface += requirements['numa']['port_list']
2553 unified_dataplane_iface += requirements['numa']['sriov_list']
2554
2555 for control_iface in server.get('networks', []):
2556 control_iface['net_id']=control_iface.pop('uuid')
2557 #Get the brifge name
2558 db_lock.acquire()
2559 result, content = db.get_table(FROM='nets',
2560 SELECT=('name', 'type', 'vlan', 'provider', 'enable_dhcp','dhcp_first_ip',
2561 'dhcp_last_ip', 'cidr', 'gateway_ip', 'dns', 'links', 'routes'),
2562 WHERE={'uuid': control_iface['net_id']})
2563 db_lock.release()
2564 if result < 0:
2565 pass
2566 elif result==0:
2567 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface['net_id']
2568 else:
2569 network=content[0]
2570 if control_iface.get("type", 'virtual') == 'virtual':
2571 if network['type']!='bridge_data' and network['type']!='bridge_man':
2572 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface['net_id']
2573 resources['bridged-ifaces'].append(control_iface)
2574 if network.get("provider") and network["provider"][0:3] == "OVS":
2575 control_iface["type"] = "instance:ovs"
2576 else:
2577 control_iface["type"] = "instance:bridge"
2578 if network.get("vlan"):
2579 control_iface["vlan"] = network["vlan"]
2580
2581 if network.get("enable_dhcp") == 'true':
2582 control_iface["enable_dhcp"] = network.get("enable_dhcp")
2583 control_iface["dhcp_first_ip"] = network["dhcp_first_ip"]
2584 control_iface["dhcp_last_ip"] = network["dhcp_last_ip"]
2585 control_iface["cidr"] = network["cidr"]
2586
2587 if network.get("dns"):
2588 control_iface["dns"] = yaml.safe_load(network.get("dns"))
2589 if network.get("links"):
2590 control_iface["links"] = yaml.safe_load(network.get("links"))
2591 if network.get("routes"):
2592 control_iface["routes"] = yaml.safe_load(network.get("routes"))
2593 else:
2594 if network['type']!='data' and network['type']!='ptp':
2595 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface['net_id']
2596 #dataplane interface, look for it in the numa tree and asign this network
2597 iface_found=False
2598 for dataplane_iface in numa_dict['interfaces']:
2599 if dataplane_iface['name'] == control_iface.get("name"):
2600 if (dataplane_iface['dedicated'] == "yes" and control_iface["type"] != "PF") or \
2601 (dataplane_iface['dedicated'] == "no" and control_iface["type"] != "VF") or \
2602 (dataplane_iface['dedicated'] == "yes:sriov" and control_iface["type"] != "VFnotShared") :
2603 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2604 (control_iface.get("name"), dataplane_iface['dedicated'], control_iface["type"])
2605 dataplane_iface['uuid'] = control_iface['net_id']
2606 if dataplane_iface['dedicated'] == "no":
2607 dataplane_iface['vlan'] = network['vlan']
2608 if dataplane_iface['dedicated'] != "yes" and control_iface.get("mac_address"):
2609 dataplane_iface['mac_address'] = control_iface.get("mac_address")
2610 if control_iface.get("vpci"):
2611 dataplane_iface['vpci'] = control_iface.get("vpci")
2612 iface_found=True
2613 break
2614 if not iface_found:
2615 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface.get("name")
2616
2617 resources['host_id'] = host_id
2618 resources['image_id'] = server['image_id']
2619 resources['flavor_id'] = server['flavor_id']
2620 resources['tenant_id'] = server['tenant_id']
2621 resources['ram'] = requirements['ram']
2622 resources['vcpus'] = requirements['vcpus']
2623 resources['status'] = 'CREATING'
2624
2625 if 'description' in server: resources['description'] = server['description']
2626 if 'name' in server: resources['name'] = server['name']
2627
2628 resources['extended'] = {} #optional
2629 resources['extended']['numas'] = []
2630 numa_dict['numa_id'] = numa_id
2631 numa_dict['memory'] = requirements['numa']['memory']
2632 numa_dict['cores'] = []
2633
2634 for core in cpu_pinning:
2635 numa_dict['cores'].append({'id': core[2], 'vthread': core[0], 'paired': paired})
2636 for core in reserved_threads:
2637 numa_dict['cores'].append({'id': core})
2638 resources['extended']['numas'].append(numa_dict)
2639 if extended!=None and 'devices' in extended: #TODO allow extra devices without numa
2640 resources['extended']['devices'] = extended['devices']
2641
2642
2643 # '===================================={'
2644 #print json.dumps(resources, indent=4)
2645 #print '====================================}'
2646
2647 return 0, resources
2648