Refactor ssh commands
[osm/openvim.git] / osm_openvim / host_thread.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openvim
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
22 ##
23
24 '''
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
27 '''
28 __author__ = "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__ = "$10-jul-2014 12:07:15$"
30
31 import json
32 import yaml
33 import threading
34 import time
35 import Queue
36 import paramiko
37 import subprocess
38 # import libvirt
39 import imp
40 import random
41 import os
42 import logging
43 from jsonschema import validate as js_v, exceptions as js_e
44 from vim_schema import localinfo_schema, hostinfo_schema
45
46 class RunCommandException(Exception):
47 pass
48
49 class host_thread(threading.Thread):
50 lvirt_module = None
51
52 def __init__(self, name, host, user, db, db_lock, test, image_path, host_id, version, develop_mode,
53 develop_bridge_iface, password=None, keyfile = None, logger_name=None, debug=None):
54 """Init a thread to communicate with compute node or ovs_controller.
55 :param host_id: host identity
56 :param name: name of the thread
57 :param host: host ip or name to manage and user
58 :param user, password, keyfile: user and credentials to connect to host
59 :param db, db_lock': database class and lock to use it in exclusion
60 """
61 threading.Thread.__init__(self)
62 self.name = name
63 self.host = host
64 self.user = user
65 self.db = db
66 self.db_lock = db_lock
67 self.test = test
68 self.password = password
69 self.keyfile = keyfile
70 self.localinfo_dirty = False
71 self.connectivity = True
72
73 if not test and not host_thread.lvirt_module:
74 try:
75 module_info = imp.find_module("libvirt")
76 host_thread.lvirt_module = imp.load_module("libvirt", *module_info)
77 except (IOError, ImportError) as e:
78 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e))
79 if logger_name:
80 self.logger_name = logger_name
81 else:
82 self.logger_name = "openvim.host."+name
83 self.logger = logging.getLogger(self.logger_name)
84 if debug:
85 self.logger.setLevel(getattr(logging, debug))
86
87
88 self.develop_mode = develop_mode
89 self.develop_bridge_iface = develop_bridge_iface
90 self.image_path = image_path
91 self.empty_image_path = image_path
92 self.host_id = host_id
93 self.version = version
94
95 self.xml_level = 0
96 # self.pending ={}
97
98 self.server_status = {} #dictionary with pairs server_uuid:server_status
99 self.pending_terminate_server =[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
100 self.next_update_server_status = 0 #time when must be check servers status
101
102 self.hostinfo = None
103
104 self.queueLock = threading.Lock()
105 self.taskQueue = Queue.Queue(2000)
106 self.ssh_conn = None
107 self.run_command_session = None
108 self.error = None
109 self.localhost = True if host == 'localhost' else False
110 self.lvirt_conn_uri = "qemu+ssh://{user}@{host}/system?no_tty=1&no_verify=1".format(
111 user=self.user, host=self.host)
112 if keyfile:
113 self.lvirt_conn_uri += "&keyfile=" + keyfile
114 self.remote_ip = None
115 self.local_ip = None
116
117 def run_command(self, command, keep_session=False, ignore_exit_status=False):
118 """Run a command passed as a str on a localhost or at remote machine.
119 :param command: text with the command to execute.
120 :param keep_session: if True it returns a <stdin> for sending input with '<stdin>.write("text\n")'.
121 A command with keep_session=True MUST be followed by a command with keep_session=False in order to
122 close the session and get the output
123 :param ignore_exit_status: Return stdout and not raise an exepction in case of error.
124 :return: the output of the command if 'keep_session=False' or the <stdin> object if 'keep_session=True'
125 :raises: RunCommandException if command fails
126 """
127 if self.run_command_session and keep_session:
128 raise RunCommandException("Internal error. A command with keep_session=True must be followed by another "
129 "command with keep_session=False to close session")
130 try:
131 if self.localhost:
132 if self.run_command_session:
133 p = self.run_command_session
134 self.run_command_session = None
135 (output, outerror) = p.communicate()
136 returncode = p.returncode
137 p.stdin.close()
138 elif keep_session:
139 p = subprocess.Popen(('bash', "-c", command), stdin=subprocess.PIPE, stdout=subprocess.PIPE,
140 stderr=subprocess.PIPE)
141 self.run_command_session = p
142 return p.stdin
143 else:
144 if not ignore_exit_status:
145 output = subprocess.check_output(('bash', "-c", command))
146 returncode = 0
147 else:
148 out = None
149 p = subprocess.Popen(('bash', "-c", command), stdout=subprocess.PIPE)
150 out, err = p.communicate()
151 return out
152 else:
153 if self.run_command_session:
154 (i, o, e) = self.run_command_session
155 self.run_command_session = None
156 i.channel.shutdown_write()
157 else:
158 if not self.ssh_conn:
159 self.ssh_connect()
160 (i, o, e) = self.ssh_conn.exec_command(command, timeout=10)
161 if keep_session:
162 self.run_command_session = (i, o, e)
163 return i
164 returncode = o.channel.recv_exit_status()
165 output = o.read()
166 outerror = e.read()
167 if returncode != 0 and not ignore_exit_status:
168 text = "run_command='{}' Error='{}'".format(command, outerror)
169 self.logger.error(text)
170 raise RunCommandException(text)
171
172 self.logger.debug("run_command='{}' result='{}'".format(command, output))
173 return output
174
175 except RunCommandException:
176 raise
177 except subprocess.CalledProcessError as e:
178 text = "run_command Exception '{}' '{}'".format(str(e), e.output)
179 except (paramiko.ssh_exception.SSHException, Exception) as e:
180 text = "run_command='{}' Exception='{}'".format(command, str(e))
181 self.ssh_conn = None
182 self.run_command_session = None
183 raise RunCommandException(text)
184
185 def ssh_connect(self):
186 try:
187 # Connect SSH
188 self.ssh_conn = paramiko.SSHClient()
189 self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
190 self.ssh_conn.load_system_host_keys()
191 self.ssh_conn.connect(self.host, username=self.user, password=self.password, key_filename=self.keyfile,
192 timeout=10) # auth_timeout=10)
193 self.remote_ip = self.ssh_conn.get_transport().sock.getpeername()[0]
194 self.local_ip = self.ssh_conn.get_transport().sock.getsockname()[0]
195 except (paramiko.ssh_exception.SSHException, Exception) as e:
196 text = 'ssh connect Exception: {}'.format(e)
197 self.ssh_conn = None
198 self.error = text
199 raise
200
201 def check_connectivity(self):
202 if not self.test:
203 try:
204 command = 'sudo brctl show'
205 self.run_command(command)
206 except RunCommandException as e:
207 self.connectivity = False
208 self.logger.error("check_connectivity Exception: " + str(e))
209
210 def load_localinfo(self):
211 if not self.test:
212 try:
213 self.run_command('sudo mkdir -p ' + self.image_path)
214 result = self.run_command('cat {}/.openvim.yaml'.format(self.image_path))
215 self.localinfo = yaml.load(result)
216 js_v(self.localinfo, localinfo_schema)
217 self.localinfo_dirty = False
218 if 'server_files' not in self.localinfo:
219 self.localinfo['server_files'] = {}
220 self.logger.debug("localinfo loaded from host")
221 return
222 except RunCommandException as e:
223 self.logger.error("load_localinfo Exception: " + str(e))
224 except host_thread.lvirt_module.libvirtError as e:
225 text = e.get_error_message()
226 self.logger.error("load_localinfo libvirt Exception: " + text)
227 except yaml.YAMLError as exc:
228 text = ""
229 if hasattr(exc, 'problem_mark'):
230 mark = exc.problem_mark
231 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
232 self.logger.error("load_localinfo yaml format Exception " + text)
233 except js_e.ValidationError as e:
234 text = ""
235 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
236 self.logger.error("load_localinfo format Exception: %s %s", text, str(e))
237 except Exception as e:
238 text = str(e)
239 self.logger.error("load_localinfo Exception: " + text)
240
241 # not loaded, insert a default data and force saving by activating dirty flag
242 self.localinfo = {'files':{}, 'server_files':{} }
243 # self.localinfo_dirty=True
244 self.localinfo_dirty=False
245
246 def load_hostinfo(self):
247 if self.test:
248 return
249 try:
250 result = self.run_command('cat {}/hostinfo.yaml'.format(self.image_path))
251 self.hostinfo = yaml.load(result)
252 js_v(self.hostinfo, hostinfo_schema)
253 self.logger.debug("hostinfo load from host " + str(self.hostinfo))
254 return
255 except RunCommandException as e:
256 self.logger.error("load_hostinfo ssh Exception: " + str(e))
257 except host_thread.lvirt_module.libvirtError as e:
258 text = e.get_error_message()
259 self.logger.error("load_hostinfo libvirt Exception: " + text)
260 except yaml.YAMLError as exc:
261 text = ""
262 if hasattr(exc, 'problem_mark'):
263 mark = exc.problem_mark
264 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
265 self.logger.error("load_hostinfo yaml format Exception " + text)
266 except js_e.ValidationError as e:
267 text = ""
268 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
269 self.logger.error("load_hostinfo format Exception: %s %s", text, str(e))
270 except Exception as e:
271 text = str(e)
272 self.logger.error("load_hostinfo Exception: " + text)
273
274 #not loaded, insert a default data
275 self.hostinfo = None
276
277 def save_localinfo(self, tries=3):
278 if self.test:
279 self.localinfo_dirty = False
280 return
281
282 while tries>=0:
283 tries-=1
284
285 try:
286 command = 'cat > {}/.openvim.yaml'.format(self.image_path)
287 in_stream = self.run_command(command, keep_session=True)
288 yaml.safe_dump(self.localinfo, in_stream, explicit_start=True, indent=4, default_flow_style=False,
289 tags=False, encoding='utf-8', allow_unicode=True)
290 result = self.run_command(command, keep_session=False) # to end session
291
292 self.localinfo_dirty = False
293 break #while tries
294
295 except RunCommandException as e:
296 self.logger.error("save_localinfo ssh Exception: " + str(e))
297 except host_thread.lvirt_module.libvirtError as e:
298 text = e.get_error_message()
299 self.logger.error("save_localinfo libvirt Exception: " + text)
300 except yaml.YAMLError as exc:
301 text = ""
302 if hasattr(exc, 'problem_mark'):
303 mark = exc.problem_mark
304 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
305 self.logger.error("save_localinfo yaml format Exception " + text)
306 except Exception as e:
307 text = str(e)
308 self.logger.error("save_localinfo Exception: " + text)
309
310 def load_servers_from_db(self):
311 self.db_lock.acquire()
312 r,c = self.db.get_table(SELECT=('uuid','status', 'image_id'), FROM='instances', WHERE={'host_id': self.host_id})
313 self.db_lock.release()
314
315 self.server_status = {}
316 if r<0:
317 self.logger.error("Error getting data from database: " + c)
318 return
319 for server in c:
320 self.server_status[ server['uuid'] ] = server['status']
321
322 #convert from old version to new one
323 if 'inc_files' in self.localinfo and server['uuid'] in self.localinfo['inc_files']:
324 server_files_dict = {'source file': self.localinfo['inc_files'][ server['uuid'] ] [0], 'file format':'raw' }
325 if server_files_dict['source file'][-5:] == 'qcow2':
326 server_files_dict['file format'] = 'qcow2'
327
328 self.localinfo['server_files'][ server['uuid'] ] = { server['image_id'] : server_files_dict }
329 if 'inc_files' in self.localinfo:
330 del self.localinfo['inc_files']
331 self.localinfo_dirty = True
332
333 def delete_unused_files(self):
334 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
335 Deletes unused entries at self.loacalinfo and the corresponding local files.
336 The only reason for this mismatch is the manual deletion of instances (VM) at database
337 '''
338 if self.test:
339 return
340 for uuid,images in self.localinfo['server_files'].items():
341 if uuid not in self.server_status:
342 for localfile in images.values():
343 try:
344 self.logger.debug("deleting file '%s' of unused server '%s'", localfile['source file'], uuid)
345 self.delete_file(localfile['source file'])
346 except RunCommandException as e:
347 self.logger.error("Exception deleting file '%s': %s", localfile['source file'], str(e))
348 del self.localinfo['server_files'][uuid]
349 self.localinfo_dirty = True
350
351 def insert_task(self, task, *aditional):
352 try:
353 self.queueLock.acquire()
354 task = self.taskQueue.put( (task,) + aditional, timeout=5)
355 self.queueLock.release()
356 return 1, None
357 except Queue.Full:
358 return -1, "timeout inserting a task over host " + self.name
359
360 def run(self):
361 while True:
362 self.load_localinfo()
363 self.load_hostinfo()
364 self.load_servers_from_db()
365 self.delete_unused_files()
366 while True:
367 try:
368 self.queueLock.acquire()
369 if not self.taskQueue.empty():
370 task = self.taskQueue.get()
371 else:
372 task = None
373 self.queueLock.release()
374
375 if task is None:
376 now=time.time()
377 if self.localinfo_dirty:
378 self.save_localinfo()
379 elif self.next_update_server_status < now:
380 self.update_servers_status()
381 self.next_update_server_status = now + 5
382 elif len(self.pending_terminate_server)>0 and self.pending_terminate_server[0][0]<now:
383 self.server_forceoff()
384 else:
385 time.sleep(1)
386 continue
387
388 if task[0] == 'instance':
389 self.logger.debug("processing task instance " + str(task[1]['action']))
390 retry = 0
391 while retry < 2:
392 retry += 1
393 r = self.action_on_server(task[1], retry==2)
394 if r >= 0:
395 break
396 elif task[0] == 'image':
397 pass
398 elif task[0] == 'exit':
399 self.logger.debug("processing task exit")
400 self.terminate()
401 return 0
402 elif task[0] == 'reload':
403 self.logger.debug("processing task reload terminating and relaunching")
404 self.terminate()
405 break
406 elif task[0] == 'edit-iface':
407 self.logger.debug("processing task edit-iface port={}, old_net={}, new_net={}".format(
408 task[1], task[2], task[3]))
409 self.edit_iface(task[1], task[2], task[3])
410 elif task[0] == 'restore-iface':
411 self.logger.debug("processing task restore-iface={} mac={}".format(task[1], task[2]))
412 self.restore_iface(task[1], task[2])
413 elif task[0] == 'new-ovsbridge':
414 self.logger.debug("Creating compute OVS bridge")
415 self.create_ovs_bridge()
416 elif task[0] == 'new-vxlan':
417 self.logger.debug("Creating vxlan tunnel='{}', remote ip='{}'".format(task[1], task[2]))
418 self.create_ovs_vxlan_tunnel(task[1], task[2])
419 elif task[0] == 'del-ovsbridge':
420 self.logger.debug("Deleting OVS bridge")
421 self.delete_ovs_bridge()
422 elif task[0] == 'del-vxlan':
423 self.logger.debug("Deleting vxlan {} tunnel".format(task[1]))
424 self.delete_ovs_vxlan_tunnel(task[1])
425 elif task[0] == 'create-ovs-bridge-port':
426 self.logger.debug("Adding port ovim-{} to OVS bridge".format(task[1]))
427 self.create_ovs_bridge_port(task[1])
428 elif task[0] == 'del-ovs-port':
429 self.logger.debug("Delete bridge attached to ovs port vlan {} net {}".format(task[1], task[2]))
430 self.delete_bridge_port_attached_to_ovs(task[1], task[2])
431 else:
432 self.logger.debug("unknown task " + str(task))
433
434 except Exception as e:
435 self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
436
437 def server_forceoff(self, wait_until_finished=False):
438 while len(self.pending_terminate_server)>0:
439 now = time.time()
440 if self.pending_terminate_server[0][0]>now:
441 if wait_until_finished:
442 time.sleep(1)
443 continue
444 else:
445 return
446 req={'uuid':self.pending_terminate_server[0][1],
447 'action':{'terminate':'force'},
448 'status': None
449 }
450 self.action_on_server(req)
451 self.pending_terminate_server.pop(0)
452
453 def terminate(self):
454 try:
455 self.server_forceoff(True)
456 if self.localinfo_dirty:
457 self.save_localinfo()
458 if not self.test:
459 self.ssh_conn.close()
460 except Exception as e:
461 text = str(e)
462 self.logger.error("terminate Exception: " + text)
463 self.logger.debug("exit from host_thread")
464
465 def get_local_iface_name(self, generic_name):
466 if self.hostinfo != None and "iface_names" in self.hostinfo and generic_name in self.hostinfo["iface_names"]:
467 return self.hostinfo["iface_names"][generic_name]
468 return generic_name
469
470 def create_xml_server(self, server, dev_list, server_metadata={}):
471 """Function that implements the generation of the VM XML definition.
472 Additional devices are in dev_list list
473 The main disk is upon dev_list[0]"""
474
475 #get if operating system is Windows
476 windows_os = False
477 os_type = server_metadata.get('os_type', None)
478 if os_type == None and 'metadata' in dev_list[0]:
479 os_type = dev_list[0]['metadata'].get('os_type', None)
480 if os_type != None and os_type.lower() == "windows":
481 windows_os = True
482 #get type of hard disk bus
483 bus_ide = True if windows_os else False
484 bus = server_metadata.get('bus', None)
485 if bus == None and 'metadata' in dev_list[0]:
486 bus = dev_list[0]['metadata'].get('bus', None)
487 if bus != None:
488 bus_ide = True if bus=='ide' else False
489
490 self.xml_level = 0
491
492 text = "<domain type='kvm'>"
493 #get topology
494 topo = server_metadata.get('topology', None)
495 if topo == None and 'metadata' in dev_list[0]:
496 topo = dev_list[0]['metadata'].get('topology', None)
497 #name
498 name = server.get('name', '')[:28] + "_" + server['uuid'][:28] #qemu impose a length limit of 59 chars or not start. Using 58
499 text += self.inc_tab() + "<name>" + name+ "</name>"
500 #uuid
501 text += self.tab() + "<uuid>" + server['uuid'] + "</uuid>"
502
503 numa={}
504 if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:
505 numa = server['extended']['numas'][0]
506 #memory
507 use_huge = False
508 memory = int(numa.get('memory',0))*1024*1024 #in KiB
509 if memory==0:
510 memory = int(server['ram'])*1024;
511 else:
512 if not self.develop_mode:
513 use_huge = True
514 if memory==0:
515 return -1, 'No memory assigned to instance'
516 memory = str(memory)
517 text += self.tab() + "<memory unit='KiB'>" +memory+"</memory>"
518 text += self.tab() + "<currentMemory unit='KiB'>" +memory+ "</currentMemory>"
519 if use_huge:
520 text += self.tab()+'<memoryBacking>'+ \
521 self.inc_tab() + '<hugepages/>'+ \
522 self.dec_tab()+ '</memoryBacking>'
523
524 #cpu
525 use_cpu_pinning=False
526 vcpus = int(server.get("vcpus",0))
527 cpu_pinning = []
528 if 'cores-source' in numa:
529 use_cpu_pinning=True
530 for index in range(0, len(numa['cores-source'])):
531 cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )
532 vcpus += 1
533 if 'threads-source' in numa:
534 use_cpu_pinning=True
535 for index in range(0, len(numa['threads-source'])):
536 cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )
537 vcpus += 1
538 if 'paired-threads-source' in numa:
539 use_cpu_pinning=True
540 for index in range(0, len(numa['paired-threads-source'])):
541 cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )
542 cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )
543 vcpus += 2
544
545 if use_cpu_pinning and not self.develop_mode:
546 text += self.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning)) +"</vcpu>" + \
547 self.tab()+'<cputune>'
548 self.xml_level += 1
549 for i in range(0, len(cpu_pinning)):
550 text += self.tab() + "<vcpupin vcpu='" +str(cpu_pinning[i][0])+ "' cpuset='" +str(cpu_pinning[i][1]) +"'/>"
551 text += self.dec_tab()+'</cputune>'+ \
552 self.tab() + '<numatune>' +\
553 self.inc_tab() + "<memory mode='strict' nodeset='" +str(numa['source'])+ "'/>" +\
554 self.dec_tab() + '</numatune>'
555 else:
556 if vcpus==0:
557 return -1, "Instance without number of cpus"
558 text += self.tab()+"<vcpu>" + str(vcpus) + "</vcpu>"
559
560 #boot
561 boot_cdrom = False
562 for dev in dev_list:
563 if dev['type']=='cdrom' :
564 boot_cdrom = True
565 break
566 text += self.tab()+ '<os>' + \
567 self.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
568 if boot_cdrom:
569 text += self.tab() + "<boot dev='cdrom'/>"
570 text += self.tab() + "<boot dev='hd'/>" + \
571 self.dec_tab()+'</os>'
572 #features
573 text += self.tab()+'<features>'+\
574 self.inc_tab()+'<acpi/>' +\
575 self.tab()+'<apic/>' +\
576 self.tab()+'<pae/>'+ \
577 self.dec_tab() +'</features>'
578 if topo == "oneSocket:hyperthreading":
579 if vcpus % 2 != 0:
580 return -1, 'Cannot expose hyperthreading with an odd number of vcpus'
581 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>" % (vcpus/2)
582 elif windows_os or topo == "oneSocket":
583 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>" % vcpus
584 else:
585 text += self.tab() + "<cpu mode='host-model'></cpu>"
586 text += self.tab() + "<clock offset='utc'/>" +\
587 self.tab() + "<on_poweroff>preserve</on_poweroff>" + \
588 self.tab() + "<on_reboot>restart</on_reboot>" + \
589 self.tab() + "<on_crash>restart</on_crash>"
590 text += self.tab() + "<devices>" + \
591 self.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
592 self.tab() + "<serial type='pty'>" +\
593 self.inc_tab() + "<target port='0'/>" + \
594 self.dec_tab() + "</serial>" +\
595 self.tab() + "<console type='pty'>" + \
596 self.inc_tab()+ "<target type='serial' port='0'/>" + \
597 self.dec_tab()+'</console>'
598 if windows_os:
599 text += self.tab() + "<controller type='usb' index='0'/>" + \
600 self.tab() + "<controller type='ide' index='0'/>" + \
601 self.tab() + "<input type='mouse' bus='ps2'/>" + \
602 self.tab() + "<sound model='ich6'/>" + \
603 self.tab() + "<video>" + \
604 self.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
605 self.dec_tab() + "</video>" + \
606 self.tab() + "<memballoon model='virtio'/>" + \
607 self.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
608
609 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
610 #> self.dec_tab()+'</hostdev>\n' +\
611 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
612 if windows_os:
613 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
614 else:
615 #If image contains 'GRAPH' include graphics
616 #if 'GRAPH' in image:
617 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
618 self.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
619 self.dec_tab() + "</graphics>"
620
621 vd_index = 'a'
622 for dev in dev_list:
623 bus_ide_dev = bus_ide
624 if dev['type']=='cdrom' or dev['type']=='disk':
625 if dev['type']=='cdrom':
626 bus_ide_dev = True
627 text += self.tab() + "<disk type='file' device='"+dev['type']+"'>"
628 if 'file format' in dev:
629 text += self.inc_tab() + "<driver name='qemu' type='" +dev['file format']+ "' cache='writethrough'/>"
630 if 'source file' in dev:
631 text += self.tab() + "<source file='" +dev['source file']+ "'/>"
632 #elif v['type'] == 'block':
633 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
634 #else:
635 # return -1, 'Unknown disk type ' + v['type']
636 vpci = dev.get('vpci',None)
637 if vpci == None and 'metadata' in dev:
638 vpci = dev['metadata'].get('vpci',None)
639 text += self.pci2xml(vpci)
640
641 if bus_ide_dev:
642 text += self.tab() + "<target dev='hd" +vd_index+ "' bus='ide'/>" #TODO allows several type of disks
643 else:
644 text += self.tab() + "<target dev='vd" +vd_index+ "' bus='virtio'/>"
645 text += self.dec_tab() + '</disk>'
646 vd_index = chr(ord(vd_index)+1)
647 elif dev['type']=='xml':
648 dev_text = dev['xml']
649 if 'vpci' in dev:
650 dev_text = dev_text.replace('__vpci__', dev['vpci'])
651 if 'source file' in dev:
652 dev_text = dev_text.replace('__file__', dev['source file'])
653 if 'file format' in dev:
654 dev_text = dev_text.replace('__format__', dev['source file'])
655 if '__dev__' in dev_text:
656 dev_text = dev_text.replace('__dev__', vd_index)
657 vd_index = chr(ord(vd_index)+1)
658 text += dev_text
659 else:
660 return -1, 'Unknown device type ' + dev['type']
661
662 net_nb=0
663 bridge_interfaces = server.get('networks', [])
664 for v in bridge_interfaces:
665 #Get the brifge name
666 self.db_lock.acquire()
667 result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )
668 self.db_lock.release()
669 if result <= 0:
670 self.logger.error("create_xml_server ERROR %d getting nets %s", result, content)
671 return -1, content
672 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
673 #I know it is not secure
674 #for v in sorted(desc['network interfaces'].itervalues()):
675 model = v.get("model", None)
676 if content[0]['provider']=='default':
677 text += self.tab() + "<interface type='network'>" + \
678 self.inc_tab() + "<source network='" +content[0]['provider']+ "'/>"
679 elif content[0]['provider'][0:7]=='macvtap':
680 text += self.tab()+"<interface type='direct'>" + \
681 self.inc_tab() + "<source dev='" + self.get_local_iface_name(content[0]['provider'][8:]) + "' mode='bridge'/>" + \
682 self.tab() + "<target dev='macvtap0'/>"
683 if windows_os:
684 text += self.tab() + "<alias name='net" + str(net_nb) + "'/>"
685 elif model==None:
686 model = "virtio"
687 elif content[0]['provider'][0:6]=='bridge':
688 text += self.tab() + "<interface type='bridge'>" + \
689 self.inc_tab()+"<source bridge='" +self.get_local_iface_name(content[0]['provider'][7:])+ "'/>"
690 if windows_os:
691 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
692 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
693 elif model==None:
694 model = "virtio"
695 elif content[0]['provider'][0:3] == "OVS":
696 vlan = content[0]['provider'].replace('OVS:', '')
697 text += self.tab() + "<interface type='bridge'>" + \
698 self.inc_tab() + "<source bridge='ovim-" + str(vlan) + "'/>"
699 else:
700 return -1, 'Unknown Bridge net provider ' + content[0]['provider']
701 if model!=None:
702 text += self.tab() + "<model type='" +model+ "'/>"
703 if v.get('mac_address', None) != None:
704 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
705 text += self.pci2xml(v.get('vpci',None))
706 text += self.dec_tab()+'</interface>'
707
708 net_nb += 1
709
710 interfaces = numa.get('interfaces', [])
711
712 net_nb=0
713 for v in interfaces:
714 if self.develop_mode: #map these interfaces to bridges
715 text += self.tab() + "<interface type='bridge'>" + \
716 self.inc_tab()+"<source bridge='" +self.develop_bridge_iface+ "'/>"
717 if windows_os:
718 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
719 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
720 else:
721 text += self.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
722 if v.get('mac_address', None) != None:
723 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
724 text += self.pci2xml(v.get('vpci',None))
725 text += self.dec_tab()+'</interface>'
726 continue
727
728 if v['dedicated'] == 'yes': #passthrought
729 text += self.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
730 self.inc_tab() + "<source>"
731 self.inc_tab()
732 text += self.pci2xml(v['source'])
733 text += self.dec_tab()+'</source>'
734 text += self.pci2xml(v.get('vpci',None))
735 if windows_os:
736 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
737 text += self.dec_tab()+'</hostdev>'
738 net_nb += 1
739 else: #sriov_interfaces
740 #skip not connected interfaces
741 if v.get("net_id") == None:
742 continue
743 text += self.tab() + "<interface type='hostdev' managed='yes'>"
744 self.inc_tab()
745 if v.get('mac_address', None) != None:
746 text+= self.tab() + "<mac address='" +v['mac_address']+ "'/>"
747 text+= self.tab()+'<source>'
748 self.inc_tab()
749 text += self.pci2xml(v['source'])
750 text += self.dec_tab()+'</source>'
751 if v.get('vlan',None) != None:
752 text += self.tab() + "<vlan> <tag id='" + str(v['vlan']) + "'/> </vlan>"
753 text += self.pci2xml(v.get('vpci',None))
754 if windows_os:
755 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
756 text += self.dec_tab()+'</interface>'
757
758
759 text += self.dec_tab()+'</devices>'+\
760 self.dec_tab()+'</domain>'
761 return 0, text
762
763 def pci2xml(self, pci):
764 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
765 alows an empty pci text'''
766 if pci is None:
767 return ""
768 first_part = pci.split(':')
769 second_part = first_part[2].split('.')
770 return self.tab() + "<address type='pci' domain='0x" + first_part[0] + \
771 "' bus='0x" + first_part[1] + "' slot='0x" + second_part[0] + \
772 "' function='0x" + second_part[1] + "'/>"
773
774 def tab(self):
775 """Return indentation according to xml_level"""
776 return "\n" + (' '*self.xml_level)
777
778 def inc_tab(self):
779 """Increment and return indentation according to xml_level"""
780 self.xml_level += 1
781 return self.tab()
782
783 def dec_tab(self):
784 """Decrement and return indentation according to xml_level"""
785 self.xml_level -= 1
786 return self.tab()
787
788 def create_ovs_bridge(self):
789 """
790 Create a bridge in compute OVS to allocate VMs
791 :return: True if success
792 """
793 if self.test or not self.connectivity:
794 return True
795
796 try:
797 self.run_command('sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true')
798
799 return True
800 except RunCommandException as e:
801 self.logger.error("create_ovs_bridge ssh Exception: " + str(e))
802 if "SSH session not active" in str(e):
803 self.ssh_connect()
804 return False
805
806 def delete_port_to_ovs_bridge(self, vlan, net_uuid):
807 """
808 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
809 :param vlan: vlan port id
810 :param net_uuid: network id
811 :return:
812 """
813
814 if self.test or not self.connectivity:
815 return True
816 try:
817 port_name = 'ovim-{}'.format(str(vlan))
818 command = 'sudo ovs-vsctl del-port br-int {}'.format(port_name)
819 self.run_command(command)
820 return True
821 except RunCommandException as e:
822 self.logger.error("delete_port_to_ovs_bridge ssh Exception: {}".format(str(e)))
823 return False
824
825 def delete_dhcp_server(self, vlan, net_uuid, dhcp_path):
826 """
827 Delete dhcp server process lining in namespace
828 :param vlan: segmentation id
829 :param net_uuid: network uuid
830 :param dhcp_path: conf fiel path that live in namespace side
831 :return:
832 """
833 if self.test or not self.connectivity:
834 return True
835 if not self.is_dhcp_port_free(vlan, net_uuid):
836 return True
837 try:
838 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
839 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
840 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
841
842 command = 'sudo ip netns exec {} cat {}'.format(dhcp_namespace, pid_file)
843 content = self.run_command(command, ignore_exit_status=True)
844 dns_pid = content.replace('\n', '')
845 command = 'sudo ip netns exec {} kill -9 '.format(dhcp_namespace, dns_pid)
846 self.run_command(command, ignore_exit_status=True)
847
848 except RunCommandException as e:
849 self.logger.error("delete_dhcp_server ssh Exception: " + str(e))
850 return False
851
852 def is_dhcp_port_free(self, host_id, net_uuid):
853 """
854 Check if any port attached to the a net in a vxlan mesh across computes nodes
855 :param host_id: host id
856 :param net_uuid: network id
857 :return: True if is not free
858 """
859 self.db_lock.acquire()
860 result, content = self.db.get_table(
861 FROM='ports',
862 WHERE={'type': 'instance:ovs', 'net_id': net_uuid}
863 )
864 self.db_lock.release()
865
866 if len(content) > 0:
867 return False
868 else:
869 return True
870
871 def is_port_free(self, host_id, net_uuid):
872 """
873 Check if there not ovs ports of a network in a compute host.
874 :param host_id: host id
875 :param net_uuid: network id
876 :return: True if is not free
877 """
878
879 self.db_lock.acquire()
880 result, content = self.db.get_table(
881 FROM='ports as p join instances as i on p.instance_id=i.uuid',
882 WHERE={"i.host_id": self.host_id, 'p.type': 'instance:ovs', 'p.net_id': net_uuid}
883 )
884 self.db_lock.release()
885
886 if len(content) > 0:
887 return False
888 else:
889 return True
890
891 def add_port_to_ovs_bridge(self, vlan):
892 """
893 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
894 :param vlan: vlan port id
895 :return: True if success
896 """
897
898 if self.test:
899 return True
900 try:
901 port_name = 'ovim-{}'.format(str(vlan))
902 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(port_name, str(vlan))
903 self.run_command(command)
904 return True
905 except RunCommandException as e:
906 self.logger.error("add_port_to_ovs_bridge Exception: " + str(e))
907 return False
908
909 def delete_dhcp_port(self, vlan, net_uuid, dhcp_path):
910 """
911 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
912 :param vlan: segmentation id
913 :param net_uuid: network id
914 :return: True if success
915 """
916
917 if self.test:
918 return True
919
920 if not self.is_dhcp_port_free(vlan, net_uuid):
921 return True
922 self.delete_dhcp_interfaces(vlan, dhcp_path)
923 return True
924
925 def delete_bridge_port_attached_to_ovs(self, vlan, net_uuid):
926 """
927 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
928 :param vlan:
929 :param net_uuid:
930 :return: True if success
931 """
932 if self.test:
933 return
934
935 if not self.is_port_free(vlan, net_uuid):
936 return True
937 self.delete_port_to_ovs_bridge(vlan, net_uuid)
938 self.delete_linux_bridge(vlan)
939 return True
940
941 def delete_linux_bridge(self, vlan):
942 """
943 Delete a linux bridge in a scpecific compute.
944 :param vlan: vlan port id
945 :return: True if success
946 """
947
948 if self.test:
949 return True
950 try:
951 port_name = 'ovim-{}'.format(str(vlan))
952 command = 'sudo ip link set dev ovim-{} down'.format(str(vlan))
953 self.run_command(command)
954
955 command = 'sudo ifconfig {} down && sudo brctl delbr {}'.format(port_name, port_name)
956 self.run_command(command)
957 return True
958 except RunCommandException as e:
959 self.logger.error("delete_linux_bridge Exception: {}".format(str(e)))
960 return False
961
962 def remove_link_bridge_to_ovs(self, vlan, link):
963 """
964 Delete a linux provider net connection to tenatn net
965 :param vlan: vlan port id
966 :param link: link name
967 :return: True if success
968 """
969
970 if self.test:
971 return True
972 try:
973 br_tap_name = '{}-vethBO'.format(str(vlan))
974 br_ovs_name = '{}-vethOB'.format(str(vlan))
975
976 # Delete ovs veth pair
977 command = 'sudo ip link set dev {} down'.format(br_ovs_name)
978 self.run_command(command)
979
980 command = 'sudo ovs-vsctl del-port br-int {}'.format(br_ovs_name)
981 self.run_command(command)
982
983 # Delete br veth pair
984 command = 'sudo ip link set dev {} down'.format(br_tap_name)
985 self.run_command(command)
986
987 # Delete br veth interface form bridge
988 command = 'sudo brctl delif {} {}'.format(link, br_tap_name)
989 self.run_command(command)
990
991 # Delete br veth pair
992 command = 'sudo ip link set dev {} down'.format(link)
993 self.run_command(command)
994
995 return True
996 except RunCommandException as e:
997 self.logger.error("remove_link_bridge_to_ovs Exception: {}".format(str(e)))
998 return False
999
1000 def create_ovs_bridge_port(self, vlan):
1001 """
1002 Generate a linux bridge and attache the port to a OVS bridge
1003 :param vlan: vlan port id
1004 :return:
1005 """
1006 if self.test:
1007 return
1008 self.create_linux_bridge(vlan)
1009 self.add_port_to_ovs_bridge(vlan)
1010
1011 def create_linux_bridge(self, vlan):
1012 """
1013 Create a linux bridge with STP active
1014 :param vlan: netowrk vlan id
1015 :return:
1016 """
1017
1018 if self.test:
1019 return True
1020 try:
1021 port_name = 'ovim-{}'.format(str(vlan))
1022 command = 'sudo brctl show | grep {}'.format(port_name)
1023 result = self.run_command(command, ignore_exit_status=True)
1024 if not result:
1025 command = 'sudo brctl addbr {}'.format(port_name)
1026 self.run_command(command)
1027
1028 command = 'sudo brctl stp {} on'.format(port_name)
1029 self.run_command(command)
1030
1031 command = 'sudo ip link set dev {} up'.format(port_name)
1032 self.run_command(command)
1033 return True
1034 except RunCommandException as e:
1035 self.logger.error("create_linux_bridge ssh Exception: {}".format(str(e)))
1036 return False
1037
1038 def set_mac_dhcp_server(self, ip, mac, vlan, netmask, first_ip, dhcp_path):
1039 """
1040 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
1041 :param ip: IP address asigned to a VM
1042 :param mac: VM vnic mac to be macthed with the IP received
1043 :param vlan: Segmentation id
1044 :param netmask: netmask value
1045 :param path: dhcp conf file path that live in namespace side
1046 :return: True if success
1047 """
1048
1049 if self.test:
1050 return True
1051
1052 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
1053 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1054 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1055 ns_interface = '{}-vethDO'.format(str(vlan))
1056
1057 if not ip:
1058 return False
1059 try:
1060 command = 'sudo ip netns exec {} cat /sys/class/net/{}/address'.format(dhcp_namespace, ns_interface)
1061 iface_listen_mac = self.run_command(command, ignore_exit_status=True)
1062
1063 if iface_listen_mac > 0:
1064 command = 'sudo ip netns exec {} cat {} | grep -i {}'.format(dhcp_namespace,
1065 dhcp_hostsdir,
1066 iface_listen_mac)
1067 content = self.run_command(command, ignore_exit_status=True)
1068 if content == '':
1069 ip_data = iface_listen_mac.upper().replace('\n', '') + ',' + first_ip
1070 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1071
1072 command = 'sudo ip netns exec {} sudo bash -ec "echo {} >> {}"'.format(dhcp_namespace,
1073 ip_data,
1074 dhcp_hostsdir)
1075 self.run_command(command)
1076
1077 ip_data = mac.upper() + ',' + ip
1078 command = 'sudo ip netns exec {} sudo bash -ec "echo {} >> {}"'.format(dhcp_namespace,
1079 ip_data,
1080 dhcp_hostsdir)
1081 self.run_command(command, ignore_exit_status=False)
1082
1083 return True
1084
1085 return False
1086 except RunCommandException as e:
1087 self.logger.error("set_mac_dhcp_server ssh Exception: " + str(e))
1088 return False
1089
1090 def delete_mac_dhcp_server(self, ip, mac, vlan, dhcp_path):
1091 """
1092 Delete into dhcp conf file the ip assigned to a specific MAC address
1093
1094 :param ip: IP address asigned to a VM
1095 :param mac: VM vnic mac to be macthed with the IP received
1096 :param vlan: Segmentation id
1097 :param dhcp_path: dhcp conf file path that live in namespace side
1098 :return:
1099 """
1100
1101 if self.test:
1102 return False
1103 try:
1104 dhcp_namespace = str(vlan) + '-dnsmasq'
1105 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1106 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1107
1108 if not ip:
1109 return False
1110
1111 ip_data = mac.upper() + ',' + ip
1112
1113 command = 'sudo ip netns exec ' + dhcp_namespace + ' sudo sed -i \'/' + ip_data + '/d\' ' + dhcp_hostsdir
1114 self.run_command(command)
1115
1116 return True
1117 except RunCommandException as e:
1118 self.logger.error("delete_mac_dhcp_server Exception: " + str(e))
1119 return False
1120
1121 def launch_dhcp_server(self, vlan, ip_range, netmask, dhcp_path, gateway, dns_list=None, routes=None):
1122 """
1123 Generate a linux bridge and attache the port to a OVS bridge
1124 :param self:
1125 :param vlan: Segmentation id
1126 :param ip_range: IP dhcp range
1127 :param netmask: network netmask
1128 :param dhcp_path: dhcp conf file path that live in namespace side
1129 :param gateway: Gateway address for dhcp net
1130 :param dns_list: dns list for dhcp server
1131 :param routes: routes list for dhcp server
1132 :return: True if success
1133 """
1134
1135 if self.test:
1136 return True
1137 try:
1138 ns_interface = str(vlan) + '-vethDO'
1139 dhcp_namespace = str(vlan) + '-dnsmasq'
1140 dhcp_path = os.path.join(dhcp_path, dhcp_namespace, '')
1141 leases_path = os.path.join(dhcp_path, "dnsmasq.leases")
1142 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
1143
1144 dhcp_range = ip_range[0] + ',' + ip_range[1] + ',' + netmask
1145
1146 command = 'sudo ip netns exec {} mkdir -p {}'.format(dhcp_namespace, dhcp_path)
1147 self.run_command(command)
1148
1149 # check if dnsmasq process is running
1150 dnsmasq_is_runing = False
1151 pid_path = os.path.join(dhcp_path, 'dnsmasq.pid')
1152 command = 'sudo ip netns exec ' + dhcp_namespace + ' ls ' + pid_path
1153 content = self.run_command(command, ignore_exit_status=True)
1154
1155 # check if pid is runing
1156 if content:
1157 pid_path = content.replace('\n', '')
1158 command = "ps aux | awk '{print $2 }' | grep {}" + pid_path
1159 dnsmasq_is_runing = self.run_command(command, ignore_exit_status=True)
1160
1161 gateway_option = ' --dhcp-option=3,' + gateway
1162
1163 dhcp_route_option = ''
1164 if routes:
1165 dhcp_route_option = ' --dhcp-option=121'
1166 for key, value in routes.iteritems():
1167 if 'default' == key:
1168 gateway_option = ' --dhcp-option=3,' + value
1169 else:
1170 dhcp_route_option += ',' + key + ',' + value
1171 dns_data = ''
1172 if dns_list:
1173 dns_data = ' --dhcp-option=6'
1174 for dns in dns_list:
1175 dns_data += ',' + dns
1176
1177 if not dnsmasq_is_runing:
1178 command = 'sudo ip netns exec ' + dhcp_namespace + ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1179 '--interface=' + ns_interface + \
1180 ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path + \
1181 ' --dhcp-range ' + dhcp_range + \
1182 ' --pid-file=' + pid_file + \
1183 ' --dhcp-leasefile=' + leases_path + \
1184 ' --listen-address ' + ip_range[0] + \
1185 gateway_option + \
1186 dhcp_route_option + \
1187 dns_data
1188
1189 self.run_command(command)
1190 return True
1191 except RunCommandException as e:
1192 self.logger.error("launch_dhcp_server ssh Exception: " + str(e))
1193 return False
1194
1195 def delete_dhcp_interfaces(self, vlan, dhcp_path):
1196 """
1197 Delete a linux dnsmasq bridge and namespace
1198 :param vlan: netowrk vlan id
1199 :param dhcp_path:
1200 :return:
1201 """
1202 if self.test:
1203 return True
1204 try:
1205 br_veth_name ='{}-vethDO'.format(str(vlan))
1206 ovs_veth_name = '{}-vethOD'.format(str(vlan))
1207 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
1208
1209 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1210 command = 'sudo ovs-vsctl del-port br-int {}'.format(ovs_veth_name)
1211 self.run_command(command, ignore_exit_status=True) # to end session
1212
1213 command = 'sudo ip link set dev {} down'.format(ovs_veth_name)
1214 self.run_command(command, ignore_exit_status=True) # to end session
1215
1216 command = 'sudo ip netns exec {} ip link set dev {} down'.format(dhcp_namespace, br_veth_name)
1217 self.run_command(command, ignore_exit_status=True)
1218
1219 command = 'sudo rm -rf {}'.format(dhcp_path)
1220 self.run_command(command)
1221
1222 command = 'sudo ip netns del {}'.format(dhcp_namespace)
1223 self.run_command(command)
1224
1225 return True
1226 except RunCommandException as e:
1227 self.logger.error("delete_dhcp_interfaces ssh Exception: {}".format(str(e)))
1228 return False
1229
1230 def create_dhcp_interfaces(self, vlan, ip_listen_address, netmask):
1231 """
1232 Create a linux bridge with STP active
1233 :param vlan: segmentation id
1234 :param ip_listen_address: Listen Ip address for the dhcp service, the tap interface living in namesapce side
1235 :param netmask: dhcp net CIDR
1236 :return: True if success
1237 """
1238 if self.test:
1239 return True
1240 try:
1241 ovs_veth_name = '{}-vethOD'.format(str(vlan))
1242 ns_veth = '{}-vethDO'.format(str(vlan))
1243 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
1244
1245 command = 'sudo ip netns add {}'.format(dhcp_namespace)
1246 self.run_command(command)
1247
1248 command = 'sudo ip link add {} type veth peer name {}'.format(ns_veth, ovs_veth_name)
1249 self.run_command(command)
1250
1251 command = 'sudo ip link set {} netns {}'.format(ns_veth, dhcp_namespace)
1252 self.run_command(command)
1253
1254 command = 'sudo ip netns exec {} ip link set dev {} up'.format(dhcp_namespace, ns_veth)
1255 self.run_command(command)
1256
1257 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(ovs_veth_name, str(vlan))
1258 self.run_command(command, ignore_exit_status=True)
1259
1260 command = 'sudo ip link set dev {} up'.format(ovs_veth_name)
1261 self.run_command(command)
1262
1263 command = 'sudo ip netns exec {} ip link set dev lo up'.format(dhcp_namespace)
1264 self.run_command(command)
1265
1266 command = 'sudo ip netns exec {} ifconfig {} {} netmask {}'.format(dhcp_namespace,
1267 ns_veth,
1268 ip_listen_address,
1269 netmask)
1270 self.run_command(command)
1271 return True
1272 except RunCommandException as e:
1273 self.logger.error("create_dhcp_interfaces ssh Exception: {}".format(str(e)))
1274 return False
1275
1276 def delete_qrouter_connection(self, vlan, link):
1277 """
1278 Delete qrouter Namesapce with all veth interfaces need it
1279 :param vlan:
1280 :param link:
1281 :return:
1282 """
1283
1284 if self.test:
1285 return True
1286 try:
1287 ns_qouter = '{}-qrouter'.format(str(vlan))
1288 qrouter_ovs_veth = '{}-vethOQ'.format(str(vlan))
1289 qrouter_ns_veth = '{}-vethQO'.format(str(vlan))
1290 qrouter_br_veth = '{}-vethBQ'.format(str(vlan))
1291 qrouter_ns_router_veth = '{}-vethQB'.format(str(vlan))
1292
1293 command = 'sudo ovs-vsctl del-port br-int {}'.format(qrouter_ovs_veth)
1294 self.run_command(command)
1295
1296 # down ns veth
1297 command = 'sudo ip netns exec {} ip link set dev {} down'.format(ns_qouter, qrouter_ns_veth)
1298 self.run_command(command)
1299
1300 command = 'sudo ip netns del ' + ns_qouter
1301 self.run_command(command)
1302
1303 # down ovs veth interface
1304 command = 'sudo ip link set dev {} down'.format(qrouter_br_veth)
1305 self.run_command(command)
1306
1307 # down br veth interface
1308 command = 'sudo ip link set dev {} down'.format(qrouter_ovs_veth)
1309 self.run_command(command)
1310
1311 # down br veth interface
1312 command = 'sudo ip link set dev {} down'.format(qrouter_ns_router_veth)
1313 self.run_command(command)
1314
1315 # down br veth interface
1316 command = 'sudo brctl delif {} {}'.format(link, qrouter_br_veth)
1317 self.run_command(command)
1318
1319 # delete NS
1320 return True
1321 except RunCommandException as e:
1322 self.logger.error("delete_qrouter_connection ssh Exception: {}".format(str(e)))
1323 return False
1324
1325 def create_qrouter_ovs_connection(self, vlan, gateway, dhcp_cidr):
1326 """
1327 Create qrouter Namesapce with all veth interfaces need it between NS and OVS
1328 :param vlan:
1329 :param gateway:
1330 :return:
1331 """
1332
1333 if self.test:
1334 return True
1335
1336 try:
1337 ns_qouter = '{}-qrouter'.format(str(vlan))
1338 qrouter_ovs_veth ='{}-vethOQ'.format(str(vlan))
1339 qrouter_ns_veth = '{}-vethQO'.format(str(vlan))
1340
1341 # Create NS
1342 command = 'sudo ip netns add {}'.format(ns_qouter)
1343 self.run_command(command)
1344
1345 # Create pait veth
1346 command = 'sudo ip link add {} type veth peer name {}'.format(qrouter_ns_veth, qrouter_ovs_veth)
1347 self.run_command(command)
1348
1349 # up ovs veth interface
1350 command = 'sudo ip link set dev {} up'.format(qrouter_ovs_veth)
1351 self.run_command(command)
1352
1353 # add ovs veth to ovs br-int
1354 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(qrouter_ovs_veth, vlan)
1355 self.run_command(command)
1356
1357 # add veth to ns
1358 command = 'sudo ip link set {} netns {}'.format(qrouter_ns_veth, ns_qouter)
1359 self.run_command(command)
1360
1361 # up ns loopback
1362 command = 'sudo ip netns exec {} ip link set dev lo up'.format(ns_qouter)
1363 self.run_command(command)
1364
1365 # up ns veth
1366 command = 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter, qrouter_ns_veth)
1367 self.run_command(command)
1368
1369 from netaddr import IPNetwork
1370 ip_tools = IPNetwork(dhcp_cidr)
1371 cidr_len = ip_tools.prefixlen
1372
1373 # set gw to ns veth
1374 command = 'sudo ip netns exec {} ip address add {}/{} dev {}'.format(ns_qouter, gateway, cidr_len, qrouter_ns_veth)
1375 self.run_command(command)
1376
1377 return True
1378
1379 except RunCommandException as e:
1380 self.logger.error("Create_dhcp_interfaces ssh Exception: {}".format(str(e)))
1381 return False
1382
1383 def add_ns_routes(self, vlan, routes):
1384 """
1385
1386 :param vlan:
1387 :param routes:
1388 :return:
1389 """
1390
1391 if self.test:
1392 return True
1393
1394 try:
1395 ns_qouter = '{}-qrouter'.format(str(vlan))
1396 qrouter_ns_router_veth = '{}-vethQB'.format(str(vlan))
1397
1398 for key, value in routes.iteritems():
1399 # up ns veth
1400 if key == 'default':
1401 command = 'sudo ip netns exec {} ip route add {} via {} '.format(ns_qouter, key, value)
1402 else:
1403 command = 'sudo ip netns exec {} ip route add {} via {} dev {}'.format(ns_qouter, key, value,
1404 qrouter_ns_router_veth)
1405
1406 self.run_command(command)
1407
1408 return True
1409
1410 except RunCommandException as e:
1411 self.logger.error("add_ns_routes, error adding routes to namesapce, {}".format(str(e)))
1412 return False
1413
1414 def create_qrouter_br_connection(self, vlan, cidr, link):
1415 """
1416 Create veth interfaces between user bridge (link) and OVS
1417 :param vlan:
1418 :param link:
1419 :return:
1420 """
1421
1422 if self.test:
1423 return True
1424
1425 try:
1426 ns_qouter = '{}-qrouter'.format(str(vlan))
1427 qrouter_ns_router_veth = '{}-vethQB'.format(str(vlan))
1428 qrouter_br_veth = '{}-vethBQ'.format(str(vlan))
1429
1430 command = 'sudo brctl show | grep {}'.format(link['iface'])
1431 content = self.run_command(command, ignore_exit_status=True)
1432
1433 if content > '':
1434 # Create pait veth
1435 command = 'sudo ip link add {} type veth peer name {}'.format(qrouter_br_veth, qrouter_ns_router_veth)
1436 self.run_command(command)
1437
1438 # up ovs veth interface
1439 command = 'sudo ip link set dev {} up'.format(qrouter_br_veth)
1440 self.run_command(command)
1441
1442 # add veth to ns
1443 command = 'sudo ip link set {} netns {}'.format(qrouter_ns_router_veth, ns_qouter)
1444 self.run_command(command)
1445
1446 # up ns veth
1447 command = 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter, qrouter_ns_router_veth)
1448 self.run_command(command)
1449
1450 command = 'sudo ip netns exec {} ip address add {} dev {}'.format(ns_qouter,
1451 link['nat'],
1452 qrouter_ns_router_veth)
1453 self.run_command(command)
1454
1455 # up ns veth
1456 command = 'sudo brctl addif {} {}'.format(link['iface'], qrouter_br_veth)
1457 self.run_command(command)
1458
1459 # up ns veth
1460 command = 'sudo ip netns exec {} iptables -t nat -A POSTROUTING -o {} -s {} -d {} -j MASQUERADE' \
1461 .format(ns_qouter, qrouter_ns_router_veth, link['nat'], cidr)
1462 self.run_command(command)
1463
1464 return True
1465 else:
1466
1467 self.logger.error('create_qrouter_br_connection, Bridge {} given by user not exist'.format(qrouter_br_veth))
1468 return False
1469
1470 except RunCommandException as e:
1471 self.logger.error("Error creating qrouter, {}".format(str(e)))
1472 return False
1473
1474 def create_link_bridge_to_ovs(self, vlan, link):
1475 """
1476 Create interfaces to connect a linux bridge with tenant net
1477 :param vlan: segmentation id
1478 :return: True if success
1479 """
1480 if self.test:
1481 return True
1482 try:
1483
1484 br_tap_name = '{}-vethBO'.format(str(vlan))
1485 br_ovs_name = '{}-vethOB'.format(str(vlan))
1486
1487 # is a bridge or a interface
1488 command = 'sudo brctl show | grep {}'.format(link)
1489 content = self.run_command(command, ignore_exit_status=True)
1490 if content > '':
1491 command = 'sudo ip link add {} type veth peer name {}'.format(br_tap_name, br_ovs_name)
1492 self.run_command(command)
1493
1494 command = 'sudo ip link set dev {} up'.format(br_tap_name)
1495 self.run_command(command)
1496
1497 command = 'sudo ip link set dev {} up'.format(br_ovs_name)
1498 self.run_command(command)
1499
1500 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(br_ovs_name, str(vlan))
1501 self.run_command(command)
1502
1503 command = 'sudo brctl addif ' + link + ' {}'.format(br_tap_name)
1504 self.run_command(command)
1505 return True
1506 else:
1507 self.logger.error('Link is not present, please check {}'.format(link))
1508 return False
1509
1510 except RunCommandException as e:
1511 self.logger.error("create_link_bridge_to_ovs, Error creating link to ovs, {}".format(str(e)))
1512 return False
1513
1514 def create_ovs_vxlan_tunnel(self, vxlan_interface, remote_ip):
1515 """
1516 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1517 :param vxlan_interface: vlxan inteface name.
1518 :param remote_ip: tunnel endpoint remote compute ip.
1519 :return:
1520 """
1521 if self.test or not self.connectivity:
1522 return True
1523 if remote_ip == 'localhost':
1524 if self.localhost:
1525 return True # TODO: Cannot create a vxlan between localhost and localhost
1526 remote_ip = self.local_ip
1527 try:
1528
1529 command = 'sudo ovs-vsctl add-port br-int {} -- set Interface {} type=vxlan options:remote_ip={} ' \
1530 '-- set Port {} other_config:stp-path-cost=10'.format(vxlan_interface,
1531 vxlan_interface,
1532 remote_ip,
1533 vxlan_interface)
1534 self.run_command(command)
1535 return True
1536 except RunCommandException as e:
1537 self.logger.error("create_ovs_vxlan_tunnel, error creating vxlan tunnel, {}".format(str(e)))
1538 return False
1539
1540 def delete_ovs_vxlan_tunnel(self, vxlan_interface):
1541 """
1542 Delete a vlxan tunnel port from a OVS brdige.
1543 :param vxlan_interface: vlxan name to be delete it.
1544 :return: True if success.
1545 """
1546 if self.test or not self.connectivity:
1547 return True
1548 try:
1549 command = 'sudo ovs-vsctl del-port br-int {}'.format(vxlan_interface)
1550 self.run_command(command)
1551 return True
1552 except RunCommandException as e:
1553 self.logger.error("delete_ovs_vxlan_tunnel, error deleting vxlan tunenl, {}".format(str(e)))
1554 return False
1555
1556 def delete_ovs_bridge(self):
1557 """
1558 Delete a OVS bridge from a compute.
1559 :return: True if success
1560 """
1561 if self.test or not self.connectivity:
1562 return True
1563 try:
1564 command = 'sudo ovs-vsctl del-br br-int'
1565 self.run_command(command)
1566 return True
1567 except RunCommandException as e:
1568 self.logger.error("delete_ovs_bridge ssh Exception: {}".format(str(e)))
1569 return False
1570
1571 def get_file_info(self, path):
1572 command = 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1573 try:
1574 content = self.run_command(command)
1575 return content.split(" ") # (permission, 1, owner, group, size, date, file)
1576 except RunCommandException as e:
1577 return None # file does not exist
1578
1579 def qemu_get_info(self, path):
1580 command = 'qemu-img info ' + path
1581 content = self.run_command(command)
1582 try:
1583 return yaml.load(content)
1584 except yaml.YAMLError as exc:
1585 text = ""
1586 if hasattr(exc, 'problem_mark'):
1587 mark = exc.problem_mark
1588 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
1589 self.logger.error("get_qemu_info yaml format Exception " + text)
1590 raise RunCommandException("Error getting qemu_info yaml format" + text)
1591
1592 def qemu_change_backing(self, inc_file, new_backing_file):
1593 command = 'qemu-img rebase -u -b {} {}'.format(new_backing_file, inc_file)
1594 try:
1595 self.run_command(command)
1596 return 0
1597 except RunCommandException as e:
1598 self.logger.error("qemu_change_backing error: " + str(e))
1599 return -1
1600
1601 def qemu_create_empty_disk(self, dev):
1602
1603 if not dev and 'source' not in dev and 'file format' not in dev and 'image_size' not in dev:
1604 self.logger.error("qemu_create_empty_disk error: missing image parameter")
1605 return -1
1606
1607 empty_disk_path = dev['source file']
1608
1609 command = 'qemu-img create -f qcow2 {} {}G'.format(empty_disk_path, dev['image_size'])
1610 try:
1611 self.run_command(command)
1612 return 0
1613 except RunCommandException as e:
1614 self.logger.error("qemu_create_empty_disk error: " + str(e))
1615 return -1
1616
1617 def get_notused_filename(self, proposed_name, suffix=''):
1618 '''Look for a non existing file_name in the host
1619 proposed_name: proposed file name, includes path
1620 suffix: suffix to be added to the name, before the extention
1621 '''
1622 extension = proposed_name.rfind(".")
1623 slash = proposed_name.rfind("/")
1624 if extension < 0 or extension < slash: # no extension
1625 extension = len(proposed_name)
1626 target_name = proposed_name[:extension] + suffix + proposed_name[extension:]
1627 info = self.get_file_info(target_name)
1628 if info is None:
1629 return target_name
1630
1631 index=0
1632 while info is not None:
1633 target_name = proposed_name[:extension] + suffix + "-" + str(index) + proposed_name[extension:]
1634 index+=1
1635 info = self.get_file_info(target_name)
1636 return target_name
1637
1638 def get_notused_path(self, proposed_path, suffix=''):
1639 '''Look for a non existing path at database for images
1640 proposed_path: proposed file name, includes path
1641 suffix: suffix to be added to the name, before the extention
1642 '''
1643 extension = proposed_path.rfind(".")
1644 if extension < 0:
1645 extension = len(proposed_path)
1646 if suffix != None:
1647 target_path = proposed_path[:extension] + suffix + proposed_path[extension:]
1648 index=0
1649 while True:
1650 r,_=self.db.get_table(FROM="images",WHERE={"path":target_path})
1651 if r<=0:
1652 return target_path
1653 target_path = proposed_path[:extension] + suffix + "-" + str(index) + proposed_path[extension:]
1654 index+=1
1655
1656
1657 def delete_file(self, file_name):
1658 command = 'rm -f ' + file_name
1659 self.run_command(command)
1660
1661 def copy_file(self, source, destination, perserve_time=True):
1662 if source[0:4]=="http":
1663 command = "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1664 dst=destination, src=source, dst_result=destination + ".result" )
1665 else:
1666 command = 'cp --no-preserve=mode'
1667 if perserve_time:
1668 command += ' --preserve=timestamps'
1669 command += " '{}' '{}'".format(source, destination)
1670 self.run_command(command)
1671
1672 def copy_remote_file(self, remote_file, use_incremental):
1673 ''' Copy a file from the repository to local folder and recursively
1674 copy the backing files in case the remote file is incremental
1675 Read and/or modified self.localinfo['files'] that contain the
1676 unmodified copies of images in the local path
1677 params:
1678 remote_file: path of remote file
1679 use_incremental: None (leave the decision to this function), True, False
1680 return:
1681 local_file: name of local file
1682 qemu_info: dict with quemu information of local file
1683 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1684 '''
1685
1686 use_incremental_out = use_incremental
1687 new_backing_file = None
1688 local_file = None
1689 file_from_local = True
1690
1691 #in case incremental use is not decided, take the decision depending on the image
1692 #avoid the use of incremental if this image is already incremental
1693 if remote_file[0:4] == "http":
1694 file_from_local = False
1695 if file_from_local:
1696 qemu_remote_info = self.qemu_get_info(remote_file)
1697 if use_incremental_out==None:
1698 use_incremental_out = not ( file_from_local and 'backing file' in qemu_remote_info)
1699 #copy recursivelly the backing files
1700 if file_from_local and 'backing file' in qemu_remote_info:
1701 new_backing_file, _, _ = self.copy_remote_file(qemu_remote_info['backing file'], True)
1702
1703 #check if remote file is present locally
1704 if use_incremental_out and remote_file in self.localinfo['files']:
1705 local_file = self.localinfo['files'][remote_file]
1706 local_file_info = self.get_file_info(local_file)
1707 if file_from_local:
1708 remote_file_info = self.get_file_info(remote_file)
1709 if local_file_info == None:
1710 local_file = None
1711 elif file_from_local and (local_file_info[4]!=remote_file_info[4] or local_file_info[5]!=remote_file_info[5]):
1712 #local copy of file not valid because date or size are different.
1713 #TODO DELETE local file if this file is not used by any active virtual machine
1714 try:
1715 self.delete_file(local_file)
1716 del self.localinfo['files'][remote_file]
1717 except Exception:
1718 pass
1719 local_file = None
1720 else: #check that the local file has the same backing file, or there are not backing at all
1721 qemu_info = self.qemu_get_info(local_file)
1722 if new_backing_file != qemu_info.get('backing file'):
1723 local_file = None
1724
1725
1726 if local_file == None: #copy the file
1727 img_name= remote_file.split('/') [-1]
1728 img_local = self.image_path + '/' + img_name
1729 local_file = self.get_notused_filename(img_local)
1730 self.copy_file(remote_file, local_file, use_incremental_out)
1731
1732 if use_incremental_out:
1733 self.localinfo['files'][remote_file] = local_file
1734 if new_backing_file:
1735 self.qemu_change_backing(local_file, new_backing_file)
1736 qemu_info = self.qemu_get_info(local_file)
1737
1738 return local_file, qemu_info, use_incremental_out
1739
1740 def launch_server(self, conn, server, rebuild=False, domain=None):
1741 if self.test:
1742 time.sleep(random.randint(20,150)) #sleep random timeto be make it a bit more real
1743 return 0, 'Success'
1744
1745 server_id = server['uuid']
1746 paused = server.get('paused','no')
1747 try:
1748 if domain!=None and rebuild==False:
1749 domain.resume()
1750 #self.server_status[server_id] = 'ACTIVE'
1751 return 0, 'Success'
1752
1753 self.db_lock.acquire()
1754 result, server_data = self.db.get_instance(server_id)
1755 self.db_lock.release()
1756 if result <= 0:
1757 self.logger.error("launch_server ERROR getting server from DB %d %s", result, server_data)
1758 return result, server_data
1759
1760 #0: get image metadata
1761 server_metadata = server.get('metadata', {})
1762 use_incremental = None
1763
1764 if "use_incremental" in server_metadata:
1765 use_incremental = False if server_metadata["use_incremental"] == "no" else True
1766
1767 server_host_files = self.localinfo['server_files'].get( server['uuid'], {})
1768 if rebuild:
1769 #delete previous incremental files
1770 for file_ in server_host_files.values():
1771 self.delete_file(file_['source file'] )
1772 server_host_files={}
1773
1774 #1: obtain aditional devices (disks)
1775 #Put as first device the main disk
1776 devices = [ {"type":"disk", "image_id":server['image_id'], "vpci":server_metadata.get('vpci', None) } ]
1777 if 'extended' in server_data and server_data['extended']!=None and "devices" in server_data['extended']:
1778 devices += server_data['extended']['devices']
1779 empty_path = None
1780 for dev in devices:
1781 image_id = dev.get('image_id')
1782 if not image_id:
1783 import uuid
1784 uuid_empty = str(uuid.uuid4())
1785 empty_path = self.empty_image_path + uuid_empty + '.qcow2' # local path for empty disk
1786
1787 dev['source file'] = empty_path
1788 dev['file format'] = 'qcow2'
1789 self.qemu_create_empty_disk(dev)
1790 server_host_files[uuid_empty] = {'source file': empty_path,
1791 'file format': dev['file format']}
1792
1793 continue
1794 else:
1795 self.db_lock.acquire()
1796 result, content = self.db.get_table(FROM='images', SELECT=('path', 'metadata'),
1797 WHERE={'uuid': image_id})
1798 self.db_lock.release()
1799 if result <= 0:
1800 error_text = "ERROR", result, content, "when getting image", dev['image_id']
1801 self.logger.error("launch_server " + error_text)
1802 return -1, error_text
1803 if content[0]['metadata'] is not None:
1804 dev['metadata'] = json.loads(content[0]['metadata'])
1805 else:
1806 dev['metadata'] = {}
1807
1808 if image_id in server_host_files:
1809 dev['source file'] = server_host_files[image_id]['source file'] #local path
1810 dev['file format'] = server_host_files[image_id]['file format'] # raw or qcow2
1811 continue
1812
1813 #2: copy image to host
1814 if image_id:
1815 remote_file = content[0]['path']
1816 else:
1817 remote_file = empty_path
1818 use_incremental_image = use_incremental
1819 if dev['metadata'].get("use_incremental") == "no":
1820 use_incremental_image = False
1821 local_file, qemu_info, use_incremental_image = self.copy_remote_file(remote_file, use_incremental_image)
1822
1823 #create incremental image
1824 if use_incremental_image:
1825 local_file_inc = self.get_notused_filename(local_file, '.inc')
1826 command = 'qemu-img create -f qcow2 {} -o backing_file={}'.format(local_file_inc, local_file)
1827 self.run_command(command)
1828 local_file = local_file_inc
1829 qemu_info = {'file format': 'qcow2'}
1830
1831 server_host_files[ dev['image_id'] ] = {'source file': local_file, 'file format': qemu_info['file format']}
1832
1833 dev['source file'] = local_file
1834 dev['file format'] = qemu_info['file format']
1835
1836 self.localinfo['server_files'][ server['uuid'] ] = server_host_files
1837 self.localinfo_dirty = True
1838
1839 #3 Create XML
1840 result, xml = self.create_xml_server(server_data, devices, server_metadata) #local_file
1841 if result <0:
1842 self.logger.error("create xml server error: " + xml)
1843 return -2, xml
1844 self.logger.debug("create xml: " + xml)
1845 atribute = host_thread.lvirt_module.VIR_DOMAIN_START_PAUSED if paused == "yes" else 0
1846 #4 Start the domain
1847 if not rebuild: #ensures that any pending destroying server is done
1848 self.server_forceoff(True)
1849 #self.logger.debug("launching instance " + xml)
1850 conn.createXML(xml, atribute)
1851 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
1852
1853 return 0, 'Success'
1854
1855 except paramiko.ssh_exception.SSHException as e:
1856 text = e.args[0]
1857 self.logger.error("launch_server id='%s' ssh Exception: %s", server_id, text)
1858 if "SSH session not active" in text:
1859 self.ssh_connect()
1860 except host_thread.lvirt_module.libvirtError as e:
1861 text = e.get_error_message()
1862 self.logger.error("launch_server id='%s' libvirt Exception: %s", server_id, text)
1863 except Exception as e:
1864 text = str(e)
1865 self.logger.error("launch_server id='%s' Exception: %s", server_id, text)
1866 return -1, text
1867
1868 def update_servers_status(self):
1869 # # virDomainState
1870 # VIR_DOMAIN_NOSTATE = 0
1871 # VIR_DOMAIN_RUNNING = 1
1872 # VIR_DOMAIN_BLOCKED = 2
1873 # VIR_DOMAIN_PAUSED = 3
1874 # VIR_DOMAIN_SHUTDOWN = 4
1875 # VIR_DOMAIN_SHUTOFF = 5
1876 # VIR_DOMAIN_CRASHED = 6
1877 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
1878
1879 if self.test or len(self.server_status)==0:
1880 return
1881
1882 try:
1883 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
1884 domains= conn.listAllDomains()
1885 domain_dict={}
1886 for domain in domains:
1887 uuid = domain.UUIDString() ;
1888 libvirt_status = domain.state()
1889 #print libvirt_status
1890 if libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_RUNNING or libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTDOWN:
1891 new_status = "ACTIVE"
1892 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_PAUSED:
1893 new_status = "PAUSED"
1894 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTOFF:
1895 new_status = "INACTIVE"
1896 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_CRASHED:
1897 new_status = "ERROR"
1898 else:
1899 new_status = None
1900 domain_dict[uuid] = new_status
1901 conn.close()
1902 except host_thread.lvirt_module.libvirtError as e:
1903 self.logger.error("get_state() Exception " + e.get_error_message())
1904 return
1905
1906 for server_id, current_status in self.server_status.iteritems():
1907 new_status = None
1908 if server_id in domain_dict:
1909 new_status = domain_dict[server_id]
1910 else:
1911 new_status = "INACTIVE"
1912
1913 if new_status == None or new_status == current_status:
1914 continue
1915 if new_status == 'INACTIVE' and current_status == 'ERROR':
1916 continue #keep ERROR status, because obviously this machine is not running
1917 #change status
1918 self.logger.debug("server id='%s' status change from '%s' to '%s'", server_id, current_status, new_status)
1919 STATUS={'progress':100, 'status':new_status}
1920 if new_status == 'ERROR':
1921 STATUS['last_error'] = 'machine has crashed'
1922 self.db_lock.acquire()
1923 r,_ = self.db.update_rows('instances', STATUS, {'uuid':server_id}, log=False)
1924 self.db_lock.release()
1925 if r>=0:
1926 self.server_status[server_id] = new_status
1927
1928 def action_on_server(self, req, last_retry=True):
1929 '''Perform an action on a req
1930 Attributes:
1931 req: dictionary that contain:
1932 server properties: 'uuid','name','tenant_id','status'
1933 action: 'action'
1934 host properties: 'user', 'ip_name'
1935 return (error, text)
1936 0: No error. VM is updated to new state,
1937 -1: Invalid action, as trying to pause a PAUSED VM
1938 -2: Error accessing host
1939 -3: VM nor present
1940 -4: Error at DB access
1941 -5: Error while trying to perform action. VM is updated to ERROR
1942 '''
1943 server_id = req['uuid']
1944 conn = None
1945 new_status = None
1946 old_status = req['status']
1947 last_error = None
1948
1949 if self.test:
1950 if 'terminate' in req['action']:
1951 new_status = 'deleted'
1952 elif 'shutoff' in req['action'] or 'shutdown' in req['action'] or 'forceOff' in req['action']:
1953 if req['status']!='ERROR':
1954 time.sleep(5)
1955 new_status = 'INACTIVE'
1956 elif 'start' in req['action'] and req['status']!='ERROR':
1957 new_status = 'ACTIVE'
1958 elif 'resume' in req['action'] and req['status']!='ERROR' and req['status']!='INACTIVE':
1959 new_status = 'ACTIVE'
1960 elif 'pause' in req['action'] and req['status']!='ERROR':
1961 new_status = 'PAUSED'
1962 elif 'reboot' in req['action'] and req['status']!='ERROR':
1963 new_status = 'ACTIVE'
1964 elif 'rebuild' in req['action']:
1965 time.sleep(random.randint(20,150))
1966 new_status = 'ACTIVE'
1967 elif 'createImage' in req['action']:
1968 time.sleep(5)
1969 self.create_image(None, req)
1970 else:
1971 try:
1972 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
1973 try:
1974 dom = conn.lookupByUUIDString(server_id)
1975 except host_thread.lvirt_module.libvirtError as e:
1976 text = e.get_error_message()
1977 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
1978 dom = None
1979 else:
1980 self.logger.error("action_on_server id='%s' libvirt exception: %s", server_id, text)
1981 raise e
1982
1983 if 'forceOff' in req['action']:
1984 if dom == None:
1985 self.logger.debug("action_on_server id='%s' domain not running", server_id)
1986 else:
1987 try:
1988 self.logger.debug("sending DESTROY to server id='%s'", server_id)
1989 dom.destroy()
1990 except Exception as e:
1991 if "domain is not running" not in e.get_error_message():
1992 self.logger.error("action_on_server id='%s' Exception while sending force off: %s",
1993 server_id, e.get_error_message())
1994 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
1995 new_status = 'ERROR'
1996
1997 elif 'terminate' in req['action']:
1998 if dom == None:
1999 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2000 new_status = 'deleted'
2001 else:
2002 try:
2003 if req['action']['terminate'] == 'force':
2004 self.logger.debug("sending DESTROY to server id='%s'", server_id)
2005 dom.destroy()
2006 new_status = 'deleted'
2007 else:
2008 self.logger.debug("sending SHUTDOWN to server id='%s'", server_id)
2009 dom.shutdown()
2010 self.pending_terminate_server.append( (time.time()+10,server_id) )
2011 except Exception as e:
2012 self.logger.error("action_on_server id='%s' Exception while destroy: %s",
2013 server_id, e.get_error_message())
2014 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
2015 new_status = 'ERROR'
2016 if "domain is not running" in e.get_error_message():
2017 try:
2018 dom.undefine()
2019 new_status = 'deleted'
2020 except Exception:
2021 self.logger.error("action_on_server id='%s' Exception while undefine: %s",
2022 server_id, e.get_error_message())
2023 last_error = 'action_on_server Exception2 while undefine:', e.get_error_message()
2024 #Exception: 'virDomainDetachDevice() failed'
2025 if new_status=='deleted':
2026 if server_id in self.server_status:
2027 del self.server_status[server_id]
2028 if req['uuid'] in self.localinfo['server_files']:
2029 for file_ in self.localinfo['server_files'][ req['uuid'] ].values():
2030 try:
2031 self.delete_file(file_['source file'])
2032 except Exception:
2033 pass
2034 del self.localinfo['server_files'][ req['uuid'] ]
2035 self.localinfo_dirty = True
2036
2037 elif 'shutoff' in req['action'] or 'shutdown' in req['action']:
2038 try:
2039 if dom == None:
2040 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2041 else:
2042 dom.shutdown()
2043 # new_status = 'INACTIVE'
2044 #TODO: check status for changing at database
2045 except Exception as e:
2046 new_status = 'ERROR'
2047 self.logger.error("action_on_server id='%s' Exception while shutdown: %s",
2048 server_id, e.get_error_message())
2049 last_error = 'action_on_server Exception while shutdown: ' + e.get_error_message()
2050
2051 elif 'rebuild' in req['action']:
2052 if dom != None:
2053 dom.destroy()
2054 r = self.launch_server(conn, req, True, None)
2055 if r[0] <0:
2056 new_status = 'ERROR'
2057 last_error = r[1]
2058 else:
2059 new_status = 'ACTIVE'
2060 elif 'start' in req['action']:
2061 # The instance is only create in DB but not yet at libvirt domain, needs to be create
2062 rebuild = True if req['action']['start'] == 'rebuild' else False
2063 r = self.launch_server(conn, req, rebuild, dom)
2064 if r[0] <0:
2065 new_status = 'ERROR'
2066 last_error = r[1]
2067 else:
2068 new_status = 'ACTIVE'
2069
2070 elif 'resume' in req['action']:
2071 try:
2072 if dom == None:
2073 pass
2074 else:
2075 dom.resume()
2076 # new_status = 'ACTIVE'
2077 except Exception as e:
2078 self.logger.error("action_on_server id='%s' Exception while resume: %s",
2079 server_id, e.get_error_message())
2080
2081 elif 'pause' in req['action']:
2082 try:
2083 if dom == None:
2084 pass
2085 else:
2086 dom.suspend()
2087 # new_status = 'PAUSED'
2088 except Exception as e:
2089 self.logger.error("action_on_server id='%s' Exception while pause: %s",
2090 server_id, e.get_error_message())
2091
2092 elif 'reboot' in req['action']:
2093 try:
2094 if dom == None:
2095 pass
2096 else:
2097 dom.reboot()
2098 self.logger.debug("action_on_server id='%s' reboot:", server_id)
2099 #new_status = 'ACTIVE'
2100 except Exception as e:
2101 self.logger.error("action_on_server id='%s' Exception while reboot: %s",
2102 server_id, e.get_error_message())
2103 elif 'createImage' in req['action']:
2104 self.create_image(dom, req)
2105
2106
2107 conn.close()
2108 except host_thread.lvirt_module.libvirtError as e:
2109 if conn is not None: conn.close()
2110 text = e.get_error_message()
2111 new_status = "ERROR"
2112 last_error = text
2113 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
2114 self.logger.debug("action_on_server id='%s' Exception removed from host", server_id)
2115 else:
2116 self.logger.error("action_on_server id='%s' Exception %s", server_id, text)
2117 #end of if self.test
2118 if new_status == None:
2119 return 1
2120
2121 self.logger.debug("action_on_server id='%s' new status=%s %s",server_id, new_status, last_error)
2122 UPDATE = {'progress':100, 'status':new_status}
2123
2124 if new_status=='ERROR':
2125 if not last_retry: #if there will be another retry do not update database
2126 return -1
2127 elif 'terminate' in req['action']:
2128 #PUT a log in the database
2129 self.logger.error("PANIC deleting server id='%s' %s", server_id, last_error)
2130 self.db_lock.acquire()
2131 self.db.new_row('logs',
2132 {'uuid':server_id, 'tenant_id':req['tenant_id'], 'related':'instances','level':'panic',
2133 'description':'PANIC deleting server from host '+self.name+': '+last_error}
2134 )
2135 self.db_lock.release()
2136 if server_id in self.server_status:
2137 del self.server_status[server_id]
2138 return -1
2139 else:
2140 UPDATE['last_error'] = last_error
2141 if new_status != 'deleted' and (new_status != old_status or new_status == 'ERROR') :
2142 self.db_lock.acquire()
2143 self.db.update_rows('instances', UPDATE, {'uuid':server_id}, log=True)
2144 self.server_status[server_id] = new_status
2145 self.db_lock.release()
2146 if new_status == 'ERROR':
2147 return -1
2148 return 1
2149
2150
2151 def restore_iface(self, name, mac, lib_conn=None):
2152 ''' make an ifdown, ifup to restore default parameter of na interface
2153 Params:
2154 mac: mac address of the interface
2155 lib_conn: connection to the libvirt, if None a new connection is created
2156 Return 0,None if ok, -1,text if fails
2157 '''
2158 conn=None
2159 ret = 0
2160 error_text=None
2161 if self.test:
2162 self.logger.debug("restore_iface '%s' %s", name, mac)
2163 return 0, None
2164 try:
2165 if not lib_conn:
2166 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2167 else:
2168 conn = lib_conn
2169
2170 #wait to the pending VM deletion
2171 #TODO.Revise self.server_forceoff(True)
2172
2173 iface = conn.interfaceLookupByMACString(mac)
2174 if iface.isActive():
2175 iface.destroy()
2176 iface.create()
2177 self.logger.debug("restore_iface '%s' %s", name, mac)
2178 except host_thread.lvirt_module.libvirtError as e:
2179 error_text = e.get_error_message()
2180 self.logger.error("restore_iface '%s' '%s' libvirt exception: %s", name, mac, error_text)
2181 ret=-1
2182 finally:
2183 if lib_conn is None and conn is not None:
2184 conn.close()
2185 return ret, error_text
2186
2187
2188 def create_image(self,dom, req):
2189 if self.test:
2190 if 'path' in req['action']['createImage']:
2191 file_dst = req['action']['createImage']['path']
2192 else:
2193 createImage=req['action']['createImage']
2194 img_name= createImage['source']['path']
2195 index=img_name.rfind('/')
2196 file_dst = self.get_notused_path(img_name[:index+1] + createImage['name'] + '.qcow2')
2197 image_status='ACTIVE'
2198 else:
2199 for retry in (0,1):
2200 try:
2201 server_id = req['uuid']
2202 createImage=req['action']['createImage']
2203 file_orig = self.localinfo['server_files'][server_id] [ createImage['source']['image_id'] ] ['source file']
2204 if 'path' in req['action']['createImage']:
2205 file_dst = req['action']['createImage']['path']
2206 else:
2207 img_name= createImage['source']['path']
2208 index=img_name.rfind('/')
2209 file_dst = self.get_notused_filename(img_name[:index+1] + createImage['name'] + '.qcow2')
2210
2211 self.copy_file(file_orig, file_dst)
2212 qemu_info = self.qemu_get_info(file_orig)
2213 if 'backing file' in qemu_info:
2214 for k,v in self.localinfo['files'].items():
2215 if v==qemu_info['backing file']:
2216 self.qemu_change_backing(file_dst, k)
2217 break
2218 image_status='ACTIVE'
2219 break
2220 except paramiko.ssh_exception.SSHException as e:
2221 image_status='ERROR'
2222 error_text = e.args[0]
2223 self.logger.error("create_image id='%s' ssh Exception: %s", server_id, error_text)
2224 if "SSH session not active" in error_text and retry==0:
2225 self.ssh_connect()
2226 except Exception as e:
2227 image_status='ERROR'
2228 error_text = str(e)
2229 self.logger.error("create_image id='%s' Exception: %s", server_id, error_text)
2230
2231 #TODO insert a last_error at database
2232 self.db_lock.acquire()
2233 self.db.update_rows('images', {'status':image_status, 'progress': 100, 'path':file_dst},
2234 {'uuid':req['new_image']['uuid']}, log=True)
2235 self.db_lock.release()
2236
2237 def edit_iface(self, port_id, old_net, new_net):
2238 #This action imply remove and insert interface to put proper parameters
2239 if self.test:
2240 time.sleep(1)
2241 else:
2242 #get iface details
2243 self.db_lock.acquire()
2244 r,c = self.db.get_table(FROM='ports as p join resources_port as rp on p.uuid=rp.port_id',
2245 WHERE={'port_id': port_id})
2246 self.db_lock.release()
2247 if r<0:
2248 self.logger.error("edit_iface %s DDBB error: %s", port_id, c)
2249 return
2250 elif r==0:
2251 self.logger.error("edit_iface %s port not found", port_id)
2252 return
2253 port=c[0]
2254 if port["model"]!="VF":
2255 self.logger.error("edit_iface %s ERROR model must be VF", port_id)
2256 return
2257 #create xml detach file
2258 xml=[]
2259 self.xml_level = 2
2260 xml.append("<interface type='hostdev' managed='yes'>")
2261 xml.append(" <mac address='" +port['mac']+ "'/>")
2262 xml.append(" <source>"+ self.pci2xml(port['pci'])+"\n </source>")
2263 xml.append('</interface>')
2264
2265
2266 try:
2267 conn=None
2268 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2269 dom = conn.lookupByUUIDString(port["instance_id"])
2270 if old_net:
2271 text="\n".join(xml)
2272 self.logger.debug("edit_iface detaching SRIOV interface " + text)
2273 dom.detachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
2274 if new_net:
2275 xml[-1] =" <vlan> <tag id='" + str(port['vlan']) + "'/> </vlan>"
2276 self.xml_level = 1
2277 xml.append(self.pci2xml(port.get('vpci',None)) )
2278 xml.append('</interface>')
2279 text="\n".join(xml)
2280 self.logger.debug("edit_iface attaching SRIOV interface " + text)
2281 dom.attachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
2282
2283 except host_thread.lvirt_module.libvirtError as e:
2284 text = e.get_error_message()
2285 self.logger.error("edit_iface %s libvirt exception: %s", port["instance_id"], text)
2286
2287 finally:
2288 if conn is not None: conn.close()
2289
2290
2291 def create_server(server, db, db_lock, only_of_ports):
2292 extended = server.get('extended', None)
2293 requirements={}
2294 requirements['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
2295 requirements['ram'] = server['flavor'].get('ram', 0)
2296 if requirements['ram']== None:
2297 requirements['ram'] = 0
2298 requirements['vcpus'] = server['flavor'].get('vcpus', 0)
2299 if requirements['vcpus']== None:
2300 requirements['vcpus'] = 0
2301 #If extended is not defined get requirements from flavor
2302 if extended is None:
2303 #If extended is defined in flavor convert to dictionary and use it
2304 if 'extended' in server['flavor'] and server['flavor']['extended'] != None:
2305 json_acceptable_string = server['flavor']['extended'].replace("'", "\"")
2306 extended = json.loads(json_acceptable_string)
2307 else:
2308 extended = None
2309 #print json.dumps(extended, indent=4)
2310
2311 #For simplicity only one numa VM are supported in the initial implementation
2312 if extended != None:
2313 numas = extended.get('numas', [])
2314 if len(numas)>1:
2315 return (-2, "Multi-NUMA VMs are not supported yet")
2316 #elif len(numas)<1:
2317 # return (-1, "At least one numa must be specified")
2318
2319 #a for loop is used in order to be ready to multi-NUMA VMs
2320 request = []
2321 for numa in numas:
2322 numa_req = {}
2323 numa_req['memory'] = numa.get('memory', 0)
2324 if 'cores' in numa:
2325 numa_req['proc_req_nb'] = numa['cores'] #number of cores or threads to be reserved
2326 numa_req['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
2327 numa_req['proc_req_list'] = numa.get('cores-id', None) #list of ids to be assigned to the cores or threads
2328 elif 'paired-threads' in numa:
2329 numa_req['proc_req_nb'] = numa['paired-threads']
2330 numa_req['proc_req_type'] = 'paired-threads'
2331 numa_req['proc_req_list'] = numa.get('paired-threads-id', None)
2332 elif 'threads' in numa:
2333 numa_req['proc_req_nb'] = numa['threads']
2334 numa_req['proc_req_type'] = 'threads'
2335 numa_req['proc_req_list'] = numa.get('threads-id', None)
2336 else:
2337 numa_req['proc_req_nb'] = 0 # by default
2338 numa_req['proc_req_type'] = 'threads'
2339
2340
2341
2342 #Generate a list of sriov and another for physical interfaces
2343 interfaces = numa.get('interfaces', [])
2344 sriov_list = []
2345 port_list = []
2346 for iface in interfaces:
2347 iface['bandwidth'] = int(iface['bandwidth'])
2348 if iface['dedicated'][:3]=='yes':
2349 port_list.append(iface)
2350 else:
2351 sriov_list.append(iface)
2352
2353 #Save lists ordered from more restrictive to less bw requirements
2354 numa_req['sriov_list'] = sorted(sriov_list, key=lambda k: k['bandwidth'], reverse=True)
2355 numa_req['port_list'] = sorted(port_list, key=lambda k: k['bandwidth'], reverse=True)
2356
2357
2358 request.append(numa_req)
2359
2360 # print "----------\n"+json.dumps(request[0], indent=4)
2361 # print '----------\n\n'
2362
2363 #Search in db for an appropriate numa for each requested numa
2364 #at the moment multi-NUMA VMs are not supported
2365 if len(request)>0:
2366 requirements['numa'].update(request[0])
2367 if requirements['numa']['memory']>0:
2368 requirements['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2369 elif requirements['ram']==0:
2370 return (-1, "Memory information not set neither at extended field not at ram")
2371 if requirements['numa']['proc_req_nb']>0:
2372 requirements['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2373 elif requirements['vcpus']==0:
2374 return (-1, "Processor information not set neither at extended field not at vcpus")
2375
2376
2377 db_lock.acquire()
2378 result, content = db.get_numas(requirements, server.get('host_id', None), only_of_ports)
2379 db_lock.release()
2380
2381 if result == -1:
2382 return (-1, content)
2383
2384 numa_id = content['numa_id']
2385 host_id = content['host_id']
2386
2387 #obtain threads_id and calculate pinning
2388 cpu_pinning = []
2389 reserved_threads=[]
2390 if requirements['numa']['proc_req_nb']>0:
2391 db_lock.acquire()
2392 result, content = db.get_table(FROM='resources_core',
2393 SELECT=('id','core_id','thread_id'),
2394 WHERE={'numa_id':numa_id,'instance_id': None, 'status':'ok'} )
2395 db_lock.release()
2396 if result <= 0:
2397 #print content
2398 return -1, content
2399
2400 #convert rows to a dictionary indexed by core_id
2401 cores_dict = {}
2402 for row in content:
2403 if not row['core_id'] in cores_dict:
2404 cores_dict[row['core_id']] = []
2405 cores_dict[row['core_id']].append([row['thread_id'],row['id']])
2406
2407 #In case full cores are requested
2408 paired = 'N'
2409 if requirements['numa']['proc_req_type'] == 'cores':
2410 #Get/create the list of the vcpu_ids
2411 vcpu_id_list = requirements['numa']['proc_req_list']
2412 if vcpu_id_list == None:
2413 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2414
2415 for threads in cores_dict.itervalues():
2416 #we need full cores
2417 if len(threads) != 2:
2418 continue
2419
2420 #set pinning for the first thread
2421 cpu_pinning.append( [ vcpu_id_list.pop(0), threads[0][0], threads[0][1] ] )
2422
2423 #reserve so it is not used the second thread
2424 reserved_threads.append(threads[1][1])
2425
2426 if len(vcpu_id_list) == 0:
2427 break
2428
2429 #In case paired threads are requested
2430 elif requirements['numa']['proc_req_type'] == 'paired-threads':
2431 paired = 'Y'
2432 #Get/create the list of the vcpu_ids
2433 if requirements['numa']['proc_req_list'] != None:
2434 vcpu_id_list = []
2435 for pair in requirements['numa']['proc_req_list']:
2436 if len(pair)!=2:
2437 return -1, "Field paired-threads-id not properly specified"
2438 return
2439 vcpu_id_list.append(pair[0])
2440 vcpu_id_list.append(pair[1])
2441 else:
2442 vcpu_id_list = range(0,2*int(requirements['numa']['proc_req_nb']))
2443
2444 for threads in cores_dict.itervalues():
2445 #we need full cores
2446 if len(threads) != 2:
2447 continue
2448 #set pinning for the first thread
2449 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2450
2451 #set pinning for the second thread
2452 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2453
2454 if len(vcpu_id_list) == 0:
2455 break
2456
2457 #In case normal threads are requested
2458 elif requirements['numa']['proc_req_type'] == 'threads':
2459 #Get/create the list of the vcpu_ids
2460 vcpu_id_list = requirements['numa']['proc_req_list']
2461 if vcpu_id_list == None:
2462 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2463
2464 for threads_index in sorted(cores_dict, key=lambda k: len(cores_dict[k])):
2465 threads = cores_dict[threads_index]
2466 #set pinning for the first thread
2467 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2468
2469 #if exists, set pinning for the second thread
2470 if len(threads) == 2 and len(vcpu_id_list) != 0:
2471 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2472
2473 if len(vcpu_id_list) == 0:
2474 break
2475
2476 #Get the source pci addresses for the selected numa
2477 used_sriov_ports = []
2478 for port in requirements['numa']['sriov_list']:
2479 db_lock.acquire()
2480 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2481 db_lock.release()
2482 if result <= 0:
2483 #print content
2484 return -1, content
2485 for row in content:
2486 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2487 continue
2488 port['pci'] = row['pci']
2489 if 'mac_address' not in port:
2490 port['mac_address'] = row['mac']
2491 del port['mac']
2492 port['port_id']=row['id']
2493 port['Mbps_used'] = port['bandwidth']
2494 used_sriov_ports.append(row['id'])
2495 break
2496
2497 for port in requirements['numa']['port_list']:
2498 port['Mbps_used'] = None
2499 if port['dedicated'] != "yes:sriov":
2500 port['mac_address'] = port['mac']
2501 del port['mac']
2502 continue
2503 db_lock.acquire()
2504 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac', 'Mbps'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2505 db_lock.release()
2506 if result <= 0:
2507 #print content
2508 return -1, content
2509 port['Mbps_used'] = content[0]['Mbps']
2510 for row in content:
2511 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2512 continue
2513 port['pci'] = row['pci']
2514 if 'mac_address' not in port:
2515 port['mac_address'] = row['mac'] # mac cannot be set to passthrough ports
2516 del port['mac']
2517 port['port_id']=row['id']
2518 used_sriov_ports.append(row['id'])
2519 break
2520
2521 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2522 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2523
2524 server['host_id'] = host_id
2525
2526 #Generate dictionary for saving in db the instance resources
2527 resources = {}
2528 resources['bridged-ifaces'] = []
2529
2530 numa_dict = {}
2531 numa_dict['interfaces'] = []
2532
2533 numa_dict['interfaces'] += requirements['numa']['port_list']
2534 numa_dict['interfaces'] += requirements['numa']['sriov_list']
2535
2536 #Check bridge information
2537 unified_dataplane_iface=[]
2538 unified_dataplane_iface += requirements['numa']['port_list']
2539 unified_dataplane_iface += requirements['numa']['sriov_list']
2540
2541 for control_iface in server.get('networks', []):
2542 control_iface['net_id']=control_iface.pop('uuid')
2543 #Get the brifge name
2544 db_lock.acquire()
2545 result, content = db.get_table(FROM='nets',
2546 SELECT=('name', 'type', 'vlan', 'provider', 'enable_dhcp','dhcp_first_ip',
2547 'dhcp_last_ip', 'cidr', 'gateway_ip', 'dns', 'links', 'routes'),
2548 WHERE={'uuid': control_iface['net_id']})
2549 db_lock.release()
2550 if result < 0:
2551 pass
2552 elif result==0:
2553 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface['net_id']
2554 else:
2555 network=content[0]
2556 if control_iface.get("type", 'virtual') == 'virtual':
2557 if network['type']!='bridge_data' and network['type']!='bridge_man':
2558 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface['net_id']
2559 resources['bridged-ifaces'].append(control_iface)
2560 if network.get("provider") and network["provider"][0:3] == "OVS":
2561 control_iface["type"] = "instance:ovs"
2562 else:
2563 control_iface["type"] = "instance:bridge"
2564 if network.get("vlan"):
2565 control_iface["vlan"] = network["vlan"]
2566
2567 if network.get("enable_dhcp") == 'true':
2568 control_iface["enable_dhcp"] = network.get("enable_dhcp")
2569 control_iface["dhcp_first_ip"] = network["dhcp_first_ip"]
2570 control_iface["dhcp_last_ip"] = network["dhcp_last_ip"]
2571 control_iface["cidr"] = network["cidr"]
2572
2573 if network.get("dns"):
2574 control_iface["dns"] = yaml.safe_load(network.get("dns"))
2575 if network.get("links"):
2576 control_iface["links"] = yaml.safe_load(network.get("links"))
2577 if network.get("routes"):
2578 control_iface["routes"] = yaml.safe_load(network.get("routes"))
2579 else:
2580 if network['type']!='data' and network['type']!='ptp':
2581 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface['net_id']
2582 #dataplane interface, look for it in the numa tree and asign this network
2583 iface_found=False
2584 for dataplane_iface in numa_dict['interfaces']:
2585 if dataplane_iface['name'] == control_iface.get("name"):
2586 if (dataplane_iface['dedicated'] == "yes" and control_iface["type"] != "PF") or \
2587 (dataplane_iface['dedicated'] == "no" and control_iface["type"] != "VF") or \
2588 (dataplane_iface['dedicated'] == "yes:sriov" and control_iface["type"] != "VFnotShared") :
2589 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2590 (control_iface.get("name"), dataplane_iface['dedicated'], control_iface["type"])
2591 dataplane_iface['uuid'] = control_iface['net_id']
2592 if dataplane_iface['dedicated'] == "no":
2593 dataplane_iface['vlan'] = network['vlan']
2594 if dataplane_iface['dedicated'] != "yes" and control_iface.get("mac_address"):
2595 dataplane_iface['mac_address'] = control_iface.get("mac_address")
2596 if control_iface.get("vpci"):
2597 dataplane_iface['vpci'] = control_iface.get("vpci")
2598 iface_found=True
2599 break
2600 if not iface_found:
2601 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface.get("name")
2602
2603 resources['host_id'] = host_id
2604 resources['image_id'] = server['image_id']
2605 resources['flavor_id'] = server['flavor_id']
2606 resources['tenant_id'] = server['tenant_id']
2607 resources['ram'] = requirements['ram']
2608 resources['vcpus'] = requirements['vcpus']
2609 resources['status'] = 'CREATING'
2610
2611 if 'description' in server: resources['description'] = server['description']
2612 if 'name' in server: resources['name'] = server['name']
2613
2614 resources['extended'] = {} #optional
2615 resources['extended']['numas'] = []
2616 numa_dict['numa_id'] = numa_id
2617 numa_dict['memory'] = requirements['numa']['memory']
2618 numa_dict['cores'] = []
2619
2620 for core in cpu_pinning:
2621 numa_dict['cores'].append({'id': core[2], 'vthread': core[0], 'paired': paired})
2622 for core in reserved_threads:
2623 numa_dict['cores'].append({'id': core})
2624 resources['extended']['numas'].append(numa_dict)
2625 if extended!=None and 'devices' in extended: #TODO allow extra devices without numa
2626 resources['extended']['devices'] = extended['devices']
2627
2628
2629 # '===================================={'
2630 #print json.dumps(resources, indent=4)
2631 #print '====================================}'
2632
2633 return 0, resources
2634