34e8f07a608772d9c03e7265a6fd845f64825d0d
[osm/openvim.git] / osm_openvim / host_thread.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openvim
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
22 ##
23
24 '''
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
27 '''
28 __author__ = "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__ = "$10-jul-2014 12:07:15$"
30
31 import json
32 import yaml
33 import threading
34 import time
35 import Queue
36 import paramiko
37 import subprocess
38 # import libvirt
39 import imp
40 import random
41 import os
42 import logging
43 from jsonschema import validate as js_v, exceptions as js_e
44 from vim_schema import localinfo_schema, hostinfo_schema
45
46 class RunCommandException(Exception):
47 pass
48
49 class host_thread(threading.Thread):
50 lvirt_module = None
51
52 def __init__(self, name, host, user, db, db_lock, test, image_path, host_id, version, develop_mode,
53 develop_bridge_iface, password=None, keyfile = None, logger_name=None, debug=None, hypervisors=None):
54 """Init a thread to communicate with compute node or ovs_controller.
55 :param host_id: host identity
56 :param name: name of the thread
57 :param host: host ip or name to manage and user
58 :param user, password, keyfile: user and credentials to connect to host
59 :param db, db_lock': database class and lock to use it in exclusion
60 """
61 threading.Thread.__init__(self)
62 self.name = name
63 self.host = host
64 self.user = user
65 self.db = db
66 self.db_lock = db_lock
67 self.test = test
68 self.password = password
69 self.keyfile = keyfile
70 self.localinfo_dirty = False
71 self.connectivity = True
72
73 if not test and not host_thread.lvirt_module:
74 try:
75 module_info = imp.find_module("libvirt")
76 host_thread.lvirt_module = imp.load_module("libvirt", *module_info)
77 except (IOError, ImportError) as e:
78 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e))
79 if logger_name:
80 self.logger_name = logger_name
81 else:
82 self.logger_name = "openvim.host."+name
83 self.logger = logging.getLogger(self.logger_name)
84 if debug:
85 self.logger.setLevel(getattr(logging, debug))
86
87
88 self.develop_mode = develop_mode
89 self.develop_bridge_iface = develop_bridge_iface
90 self.image_path = image_path
91 self.empty_image_path = image_path
92 self.host_id = host_id
93 self.version = version
94
95 self.xml_level = 0
96 # self.pending ={}
97
98 self.server_status = {} #dictionary with pairs server_uuid:server_status
99 self.pending_terminate_server =[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
100 self.next_update_server_status = 0 #time when must be check servers status
101
102 ####### self.hypervisor = "kvm" #hypervisor flag (default: kvm)
103 if hypervisors:
104 self.hypervisors = hypervisors
105 else:
106 self.hypervisors = "kvm"
107
108 self.xen_hyp = True if "xen" in self.hypervisors else False
109
110 self.hostinfo = None
111
112 self.queueLock = threading.Lock()
113 self.taskQueue = Queue.Queue(2000)
114 self.ssh_conn = None
115 self.run_command_session = None
116 self.error = None
117 self.localhost = True if host == 'localhost' else False
118
119 if self.xen_hyp:
120 self.lvirt_conn_uri = "xen+ssh://{user}@{host}/?no_tty=1&no_verify=1".format(
121 user=self.user, host=self.host)
122 else:
123 self.lvirt_conn_uri = "qemu+ssh://{user}@{host}/system?no_tty=1&no_verify=1".format(
124 user=self.user, host=self.host)
125 if keyfile:
126 self.lvirt_conn_uri += "&keyfile=" + keyfile
127
128 self.remote_ip = None
129 self.local_ip = None
130
131 def run_command(self, command, keep_session=False, ignore_exit_status=False):
132 """Run a command passed as a str on a localhost or at remote machine.
133 :param command: text with the command to execute.
134 :param keep_session: if True it returns a <stdin> for sending input with '<stdin>.write("text\n")'.
135 A command with keep_session=True MUST be followed by a command with keep_session=False in order to
136 close the session and get the output
137 :param ignore_exit_status: Return stdout and not raise an exepction in case of error.
138 :return: the output of the command if 'keep_session=False' or the <stdin> object if 'keep_session=True'
139 :raises: RunCommandException if command fails
140 """
141 if self.run_command_session and keep_session:
142 raise RunCommandException("Internal error. A command with keep_session=True must be followed by another "
143 "command with keep_session=False to close session")
144 try:
145 if self.localhost:
146 if self.run_command_session:
147 p = self.run_command_session
148 self.run_command_session = None
149 (output, outerror) = p.communicate()
150 returncode = p.returncode
151 p.stdin.close()
152 elif keep_session:
153 p = subprocess.Popen(('bash', "-c", command), stdin=subprocess.PIPE, stdout=subprocess.PIPE,
154 stderr=subprocess.PIPE)
155 self.run_command_session = p
156 return p.stdin
157 else:
158 if not ignore_exit_status:
159 output = subprocess.check_output(('bash', "-c", command))
160 returncode = 0
161 else:
162 out = None
163 p = subprocess.Popen(('bash', "-c", command), stdout=subprocess.PIPE)
164 out, err = p.communicate()
165 return out
166 else:
167 if self.run_command_session:
168 (i, o, e) = self.run_command_session
169 self.run_command_session = None
170 i.channel.shutdown_write()
171 else:
172 if not self.ssh_conn:
173 self.ssh_connect()
174 (i, o, e) = self.ssh_conn.exec_command(command, timeout=10)
175 if keep_session:
176 self.run_command_session = (i, o, e)
177 return i
178 returncode = o.channel.recv_exit_status()
179 output = o.read()
180 outerror = e.read()
181 if returncode != 0 and not ignore_exit_status:
182 text = "run_command='{}' Error='{}'".format(command, outerror)
183 self.logger.error(text)
184 raise RunCommandException(text)
185
186 self.logger.debug("run_command='{}' result='{}'".format(command, output))
187 return output
188
189 except RunCommandException:
190 raise
191 except subprocess.CalledProcessError as e:
192 text = "run_command Exception '{}' '{}'".format(str(e), e.output)
193 except (paramiko.ssh_exception.SSHException, Exception) as e:
194 text = "run_command='{}' Exception='{}'".format(command, str(e))
195 self.ssh_conn = None
196 self.run_command_session = None
197 raise RunCommandException(text)
198
199 def ssh_connect(self):
200 try:
201 # Connect SSH
202 self.ssh_conn = paramiko.SSHClient()
203 self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
204 self.ssh_conn.load_system_host_keys()
205 self.ssh_conn.connect(self.host, username=self.user, password=self.password, key_filename=self.keyfile,
206 timeout=10) # auth_timeout=10)
207 self.remote_ip = self.ssh_conn.get_transport().sock.getpeername()[0]
208 self.local_ip = self.ssh_conn.get_transport().sock.getsockname()[0]
209 except (paramiko.ssh_exception.SSHException, Exception) as e:
210 text = 'ssh connect Exception: {}'.format(e)
211 self.ssh_conn = None
212 self.error = text
213 raise
214
215 def check_connectivity(self):
216 if not self.test:
217 try:
218 command = 'sudo brctl show'
219 self.run_command(command)
220 except RunCommandException as e:
221 self.connectivity = False
222 self.logger.error("check_connectivity Exception: " + str(e))
223
224 def load_localinfo(self):
225 if not self.test:
226 try:
227 self.run_command('sudo mkdir -p ' + self.image_path)
228 result = self.run_command('cat {}/.openvim.yaml'.format(self.image_path))
229 self.localinfo = yaml.load(result)
230 js_v(self.localinfo, localinfo_schema)
231 self.localinfo_dirty = False
232 if 'server_files' not in self.localinfo:
233 self.localinfo['server_files'] = {}
234 self.logger.debug("localinfo loaded from host")
235 return
236 except RunCommandException as e:
237 self.logger.error("load_localinfo Exception: " + str(e))
238 except host_thread.lvirt_module.libvirtError as e:
239 text = e.get_error_message()
240 self.logger.error("load_localinfo libvirt Exception: " + text)
241 except yaml.YAMLError as exc:
242 text = ""
243 if hasattr(exc, 'problem_mark'):
244 mark = exc.problem_mark
245 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
246 self.logger.error("load_localinfo yaml format Exception " + text)
247 except js_e.ValidationError as e:
248 text = ""
249 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
250 self.logger.error("load_localinfo format Exception: %s %s", text, str(e))
251 except Exception as e:
252 text = str(e)
253 self.logger.error("load_localinfo Exception: " + text)
254
255 # not loaded, insert a default data and force saving by activating dirty flag
256 self.localinfo = {'files':{}, 'server_files':{} }
257 # self.localinfo_dirty=True
258 self.localinfo_dirty=False
259
260 def load_hostinfo(self):
261 if self.test:
262 return
263 try:
264 result = self.run_command('cat {}/hostinfo.yaml'.format(self.image_path))
265 self.hostinfo = yaml.load(result)
266 js_v(self.hostinfo, hostinfo_schema)
267 self.logger.debug("hostinfo load from host " + str(self.hostinfo))
268 return
269 except RunCommandException as e:
270 self.logger.error("load_hostinfo ssh Exception: " + str(e))
271 except host_thread.lvirt_module.libvirtError as e:
272 text = e.get_error_message()
273 self.logger.error("load_hostinfo libvirt Exception: " + text)
274 except yaml.YAMLError as exc:
275 text = ""
276 if hasattr(exc, 'problem_mark'):
277 mark = exc.problem_mark
278 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
279 self.logger.error("load_hostinfo yaml format Exception " + text)
280 except js_e.ValidationError as e:
281 text = ""
282 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
283 self.logger.error("load_hostinfo format Exception: %s %s", text, str(e))
284 except Exception as e:
285 text = str(e)
286 self.logger.error("load_hostinfo Exception: " + text)
287
288 #not loaded, insert a default data
289 self.hostinfo = None
290
291 def save_localinfo(self, tries=3):
292 if self.test:
293 self.localinfo_dirty = False
294 return
295
296 while tries>=0:
297 tries-=1
298
299 try:
300 command = 'cat > {}/.openvim.yaml'.format(self.image_path)
301 in_stream = self.run_command(command, keep_session=True)
302 yaml.safe_dump(self.localinfo, in_stream, explicit_start=True, indent=4, default_flow_style=False,
303 tags=False, encoding='utf-8', allow_unicode=True)
304 result = self.run_command(command, keep_session=False) # to end session
305
306 self.localinfo_dirty = False
307 break #while tries
308
309 except RunCommandException as e:
310 self.logger.error("save_localinfo ssh Exception: " + str(e))
311 except host_thread.lvirt_module.libvirtError as e:
312 text = e.get_error_message()
313 self.logger.error("save_localinfo libvirt Exception: " + text)
314 except yaml.YAMLError as exc:
315 text = ""
316 if hasattr(exc, 'problem_mark'):
317 mark = exc.problem_mark
318 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
319 self.logger.error("save_localinfo yaml format Exception " + text)
320 except Exception as e:
321 text = str(e)
322 self.logger.error("save_localinfo Exception: " + text)
323
324 def load_servers_from_db(self):
325 self.db_lock.acquire()
326 r,c = self.db.get_table(SELECT=('uuid','status', 'image_id'), FROM='instances', WHERE={'host_id': self.host_id})
327 self.db_lock.release()
328
329 self.server_status = {}
330 if r<0:
331 self.logger.error("Error getting data from database: " + c)
332 return
333 for server in c:
334 self.server_status[ server['uuid'] ] = server['status']
335
336 #convert from old version to new one
337 if 'inc_files' in self.localinfo and server['uuid'] in self.localinfo['inc_files']:
338 server_files_dict = {'source file': self.localinfo['inc_files'][ server['uuid'] ] [0], 'file format':'raw' }
339 if server_files_dict['source file'][-5:] == 'qcow2':
340 server_files_dict['file format'] = 'qcow2'
341
342 self.localinfo['server_files'][ server['uuid'] ] = { server['image_id'] : server_files_dict }
343 if 'inc_files' in self.localinfo:
344 del self.localinfo['inc_files']
345 self.localinfo_dirty = True
346
347 def delete_unused_files(self):
348 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
349 Deletes unused entries at self.loacalinfo and the corresponding local files.
350 The only reason for this mismatch is the manual deletion of instances (VM) at database
351 '''
352 if self.test:
353 return
354 for uuid,images in self.localinfo['server_files'].items():
355 if uuid not in self.server_status:
356 for localfile in images.values():
357 try:
358 self.logger.debug("deleting file '%s' of unused server '%s'", localfile['source file'], uuid)
359 self.delete_file(localfile['source file'])
360 except RunCommandException as e:
361 self.logger.error("Exception deleting file '%s': %s", localfile['source file'], str(e))
362 del self.localinfo['server_files'][uuid]
363 self.localinfo_dirty = True
364
365 def insert_task(self, task, *aditional):
366 try:
367 self.queueLock.acquire()
368 task = self.taskQueue.put( (task,) + aditional, timeout=5)
369 self.queueLock.release()
370 return 1, None
371 except Queue.Full:
372 return -1, "timeout inserting a task over host " + self.name
373
374 def run(self):
375 while True:
376 self.load_localinfo()
377 self.load_hostinfo()
378 self.load_servers_from_db()
379 self.delete_unused_files()
380 while True:
381 try:
382 self.queueLock.acquire()
383 if not self.taskQueue.empty():
384 task = self.taskQueue.get()
385 else:
386 task = None
387 self.queueLock.release()
388
389 if task is None:
390 now=time.time()
391 if self.localinfo_dirty:
392 self.save_localinfo()
393 elif self.next_update_server_status < now:
394 self.update_servers_status()
395 self.next_update_server_status = now + 5
396 elif len(self.pending_terminate_server)>0 and self.pending_terminate_server[0][0]<now:
397 self.server_forceoff()
398 else:
399 time.sleep(1)
400 continue
401
402 if task[0] == 'instance':
403 self.logger.debug("processing task instance " + str(task[1]['action']))
404 retry = 0
405 while retry < 2:
406 retry += 1
407 r = self.action_on_server(task[1], retry==2)
408 if r >= 0:
409 break
410 elif task[0] == 'image':
411 pass
412 elif task[0] == 'exit':
413 self.logger.debug("processing task exit")
414 self.terminate()
415 return 0
416 elif task[0] == 'reload':
417 self.logger.debug("processing task reload terminating and relaunching")
418 self.terminate()
419 break
420 elif task[0] == 'edit-iface':
421 self.logger.debug("processing task edit-iface port={}, old_net={}, new_net={}".format(
422 task[1], task[2], task[3]))
423 self.edit_iface(task[1], task[2], task[3])
424 elif task[0] == 'restore-iface':
425 self.logger.debug("processing task restore-iface={} mac={}".format(task[1], task[2]))
426 self.restore_iface(task[1], task[2])
427 elif task[0] == 'new-ovsbridge':
428 self.logger.debug("Creating compute OVS bridge")
429 self.create_ovs_bridge()
430 elif task[0] == 'new-vxlan':
431 self.logger.debug("Creating vxlan tunnel='{}', remote ip='{}'".format(task[1], task[2]))
432 self.create_ovs_vxlan_tunnel(task[1], task[2])
433 elif task[0] == 'del-ovsbridge':
434 self.logger.debug("Deleting OVS bridge")
435 self.delete_ovs_bridge()
436 elif task[0] == 'del-vxlan':
437 self.logger.debug("Deleting vxlan {} tunnel".format(task[1]))
438 self.delete_ovs_vxlan_tunnel(task[1])
439 elif task[0] == 'create-ovs-bridge-port':
440 self.logger.debug("Adding port ovim-{} to OVS bridge".format(task[1]))
441 self.create_ovs_bridge_port(task[1])
442 elif task[0] == 'del-ovs-port':
443 self.logger.debug("Delete bridge attached to ovs port vlan {} net {}".format(task[1], task[2]))
444 self.delete_bridge_port_attached_to_ovs(task[1], task[2])
445 else:
446 self.logger.debug("unknown task " + str(task))
447
448 except Exception as e:
449 self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
450
451 def server_forceoff(self, wait_until_finished=False):
452 while len(self.pending_terminate_server)>0:
453 now = time.time()
454 if self.pending_terminate_server[0][0]>now:
455 if wait_until_finished:
456 time.sleep(1)
457 continue
458 else:
459 return
460 req={'uuid':self.pending_terminate_server[0][1],
461 'action':{'terminate':'force'},
462 'status': None
463 }
464 self.action_on_server(req)
465 self.pending_terminate_server.pop(0)
466
467 def terminate(self):
468 try:
469 self.server_forceoff(True)
470 if self.localinfo_dirty:
471 self.save_localinfo()
472 if not self.test:
473 self.ssh_conn.close()
474 except Exception as e:
475 text = str(e)
476 self.logger.error("terminate Exception: " + text)
477 self.logger.debug("exit from host_thread")
478
479 def get_local_iface_name(self, generic_name):
480 if self.hostinfo != None and "iface_names" in self.hostinfo and generic_name in self.hostinfo["iface_names"]:
481 return self.hostinfo["iface_names"][generic_name]
482 return generic_name
483
484 def create_xml_server(self, server, dev_list, server_metadata={}):
485 """Function that implements the generation of the VM XML definition.
486 Additional devices are in dev_list list
487 The main disk is upon dev_list[0]"""
488
489 #get if operating system is Windows
490 windows_os = False
491 os_type = server_metadata.get('os_type', None)
492 if os_type == None and 'metadata' in dev_list[0]:
493 os_type = dev_list[0]['metadata'].get('os_type', None)
494 if os_type != None and os_type.lower() == "windows":
495 windows_os = True
496 #get type of hard disk bus
497 bus_ide = True if windows_os else False
498 bus = server_metadata.get('bus', None)
499 if bus == None and 'metadata' in dev_list[0]:
500 bus = dev_list[0]['metadata'].get('bus', None)
501 if bus != None:
502 bus_ide = True if bus=='ide' else False
503
504 self.xml_level = 0
505 hypervisor = server.get('hypervisor', 'kvm')
506 os_type_img = server.get('os_image_type', 'other')
507
508 if hypervisor[:3] == 'xen':
509 text = "<domain type='xen'>"
510 else:
511 text = "<domain type='kvm'>"
512 #get topology
513 topo = server_metadata.get('topology', None)
514 if topo == None and 'metadata' in dev_list[0]:
515 topo = dev_list[0]['metadata'].get('topology', None)
516 #name
517 name = server.get('name', '')[:28] + "_" + server['uuid'][:28] #qemu impose a length limit of 59 chars or not start. Using 58
518 text += self.inc_tab() + "<name>" + name+ "</name>"
519 #uuid
520 text += self.tab() + "<uuid>" + server['uuid'] + "</uuid>"
521
522 numa={}
523 if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:
524 numa = server['extended']['numas'][0]
525 #memory
526 use_huge = False
527 memory = int(numa.get('memory',0))*1024*1024 #in KiB
528 if memory==0:
529 memory = int(server['ram'])*1024;
530 else:
531 if not self.develop_mode:
532 use_huge = True
533 if memory==0:
534 return -1, 'No memory assigned to instance'
535 memory = str(memory)
536 text += self.tab() + "<memory unit='KiB'>" +memory+"</memory>"
537 text += self.tab() + "<currentMemory unit='KiB'>" +memory+ "</currentMemory>"
538 if use_huge:
539 text += self.tab()+'<memoryBacking>'+ \
540 self.inc_tab() + '<hugepages/>'+ \
541 self.dec_tab()+ '</memoryBacking>'
542
543 #cpu
544 use_cpu_pinning=False
545 vcpus = int(server.get("vcpus",0))
546 cpu_pinning = []
547 if 'cores-source' in numa:
548 use_cpu_pinning=True
549 for index in range(0, len(numa['cores-source'])):
550 cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )
551 vcpus += 1
552 if 'threads-source' in numa:
553 use_cpu_pinning=True
554 for index in range(0, len(numa['threads-source'])):
555 cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )
556 vcpus += 1
557 if 'paired-threads-source' in numa:
558 use_cpu_pinning=True
559 for index in range(0, len(numa['paired-threads-source'])):
560 cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )
561 cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )
562 vcpus += 2
563
564 if use_cpu_pinning and not self.develop_mode:
565 text += self.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning)) +"</vcpu>" + \
566 self.tab()+'<cputune>'
567 self.xml_level += 1
568 for i in range(0, len(cpu_pinning)):
569 text += self.tab() + "<vcpupin vcpu='" +str(cpu_pinning[i][0])+ "' cpuset='" +str(cpu_pinning[i][1]) +"'/>"
570 text += self.dec_tab()+'</cputune>'+ \
571 self.tab() + '<numatune>' +\
572 self.inc_tab() + "<memory mode='strict' nodeset='" +str(numa['source'])+ "'/>" +\
573 self.dec_tab() + '</numatune>'
574 else:
575 if vcpus==0:
576 return -1, "Instance without number of cpus"
577 text += self.tab()+"<vcpu>" + str(vcpus) + "</vcpu>"
578
579 #boot
580 boot_cdrom = False
581 for dev in dev_list:
582 if dev['type']=='cdrom' :
583 boot_cdrom = True
584 break
585 if hypervisor == 'xenhvm':
586 text += self.tab()+ '<os>' + \
587 self.inc_tab() + "<type arch='x86_64' machine='xenfv'>hvm</type>"
588 text += self.tab() + "<loader type='rom'>/usr/lib/xen/boot/hvmloader</loader>"
589 if boot_cdrom:
590 text += self.tab() + "<boot dev='cdrom'/>"
591 text += self.tab() + "<boot dev='hd'/>" + \
592 self.dec_tab()+'</os>'
593 elif hypervisor == 'xen-unik':
594 text += self.tab()+ '<os>' + \
595 self.inc_tab() + "<type arch='x86_64' machine='xenpv'>xen</type>"
596 text += self.tab() + "<kernel>" + str(dev_list[0]['source file']) + "</kernel>" + \
597 self.dec_tab()+'</os>'
598 else:
599 text += self.tab()+ '<os>' + \
600 self.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
601 if boot_cdrom:
602 text += self.tab() + "<boot dev='cdrom'/>"
603 text += self.tab() + "<boot dev='hd'/>" + \
604 self.dec_tab()+'</os>'
605 #features
606 text += self.tab()+'<features>'+\
607 self.inc_tab()+'<acpi/>' +\
608 self.tab()+'<apic/>' +\
609 self.tab()+'<pae/>'+ \
610 self.dec_tab() +'</features>'
611 if topo == "oneSocket:hyperthreading":
612 if vcpus % 2 != 0:
613 return -1, 'Cannot expose hyperthreading with an odd number of vcpus'
614 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>" % (vcpus/2)
615 elif windows_os or topo == "oneSocket":
616 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>" % vcpus
617 else:
618 text += self.tab() + "<cpu mode='host-model'></cpu>"
619 text += self.tab() + "<clock offset='utc'/>" +\
620 self.tab() + "<on_poweroff>preserve</on_poweroff>" + \
621 self.tab() + "<on_reboot>restart</on_reboot>" + \
622 self.tab() + "<on_crash>restart</on_crash>"
623 if hypervisor == 'xenhvm':
624 text += self.tab() + "<devices>" + \
625 self.inc_tab() + "<emulator>/usr/bin/qemu-system-i386</emulator>" + \
626 self.tab() + "<serial type='pty'>" +\
627 self.inc_tab() + "<target port='0'/>" + \
628 self.dec_tab() + "</serial>" +\
629 self.tab() + "<console type='pty'>" + \
630 self.inc_tab()+ "<target type='serial' port='0'/>" + \
631 self.dec_tab()+'</console>' #In some libvirt version may be: <emulator>/usr/lib64/xen/bin/qemu-dm</emulator> (depends on distro)
632 elif hypervisor == 'xen-unik':
633 text += self.tab() + "<devices>" + \
634 self.tab() + "<console type='pty'>" + \
635 self.inc_tab()+ "<target type='xen' port='0'/>" + \
636 self.dec_tab()+'</console>'
637 else:
638 text += self.tab() + "<devices>" + \
639 self.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
640 self.tab() + "<serial type='pty'>" +\
641 self.inc_tab() + "<target port='0'/>" + \
642 self.dec_tab() + "</serial>" +\
643 self.tab() + "<console type='pty'>" + \
644 self.inc_tab()+ "<target type='serial' port='0'/>" + \
645 self.dec_tab()+'</console>'
646 if windows_os:
647 text += self.tab() + "<controller type='usb' index='0'/>" + \
648 self.tab() + "<controller type='ide' index='0'/>" + \
649 self.tab() + "<input type='mouse' bus='ps2'/>" + \
650 self.tab() + "<sound model='ich6'/>" + \
651 self.tab() + "<video>" + \
652 self.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
653 self.dec_tab() + "</video>" + \
654 self.tab() + "<memballoon model='virtio'/>" + \
655 self.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
656 elif hypervisor == 'xen-unik':
657 pass
658 else:
659 text += self.tab() + "<controller type='ide' index='0'/>" + \
660 self.tab() + "<input type='mouse' bus='ps2'/>" + \
661 self.tab() + "<input type='keyboard' bus='ps2'/>" + \
662 self.tab() + "<video>" + \
663 self.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
664 self.dec_tab() + "</video>"
665
666 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
667 #> self.dec_tab()+'</hostdev>\n' +\
668 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
669 if windows_os:
670 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
671 else:
672 #If image contains 'GRAPH' include graphics
673 #if 'GRAPH' in image:
674 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
675 self.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
676 self.dec_tab() + "</graphics>"
677
678 vd_index = 'a'
679 for dev in dev_list:
680 bus_ide_dev = bus_ide
681 if (dev['type']=='cdrom' or dev['type']=='disk') and hypervisor != 'xen-unik':
682 if dev['type']=='cdrom':
683 bus_ide_dev = True
684 text += self.tab() + "<disk type='file' device='"+dev['type']+"'>"
685 if 'file format' in dev:
686 text += self.inc_tab() + "<driver name='qemu' type='" +dev['file format']+ "' cache='writethrough'/>"
687 if 'source file' in dev:
688 text += self.tab() + "<source file='" +dev['source file']+ "'/>"
689 #elif v['type'] == 'block':
690 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
691 #else:
692 # return -1, 'Unknown disk type ' + v['type']
693 vpci = dev.get('vpci',None)
694 if vpci == None and 'metadata' in dev:
695 vpci = dev['metadata'].get('vpci',None)
696 text += self.pci2xml(vpci)
697
698 if bus_ide_dev:
699 text += self.tab() + "<target dev='hd" +vd_index+ "' bus='ide'/>" #TODO allows several type of disks
700 else:
701 text += self.tab() + "<target dev='vd" +vd_index+ "' bus='virtio'/>"
702 text += self.dec_tab() + '</disk>'
703 vd_index = chr(ord(vd_index)+1)
704 elif dev['type']=='xml':
705 dev_text = dev['xml']
706 if 'vpci' in dev:
707 dev_text = dev_text.replace('__vpci__', dev['vpci'])
708 if 'source file' in dev:
709 dev_text = dev_text.replace('__file__', dev['source file'])
710 if 'file format' in dev:
711 dev_text = dev_text.replace('__format__', dev['source file'])
712 if '__dev__' in dev_text:
713 dev_text = dev_text.replace('__dev__', vd_index)
714 vd_index = chr(ord(vd_index)+1)
715 text += dev_text
716 elif hypervisor == 'xen-unik':
717 pass
718 else:
719 return -1, 'Unknown device type ' + dev['type']
720
721 net_nb=0
722 bridge_interfaces = server.get('networks', [])
723 for v in bridge_interfaces:
724 #Get the brifge name
725 self.db_lock.acquire()
726 result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )
727 self.db_lock.release()
728 if result <= 0:
729 self.logger.error("create_xml_server ERROR %d getting nets %s", result, content)
730 return -1, content
731 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
732 #I know it is not secure
733 #for v in sorted(desc['network interfaces'].itervalues()):
734 model = v.get("model", None)
735 if content[0]['provider']=='default':
736 text += self.tab() + "<interface type='network'>" + \
737 self.inc_tab() + "<source network='" +content[0]['provider']+ "'/>"
738 elif content[0]['provider'][0:7]=='macvtap':
739 text += self.tab()+"<interface type='direct'>" + \
740 self.inc_tab() + "<source dev='" + self.get_local_iface_name(content[0]['provider'][8:]) + "' mode='bridge'/>" + \
741 self.tab() + "<target dev='macvtap0'/>"
742 if windows_os:
743 text += self.tab() + "<alias name='net" + str(net_nb) + "'/>"
744 elif model==None:
745 model = "virtio"
746 elif content[0]['provider'][0:6]=='bridge':
747 text += self.tab() + "<interface type='bridge'>" + \
748 self.inc_tab()+"<source bridge='" +self.get_local_iface_name(content[0]['provider'][7:])+ "'/>"
749 if windows_os:
750 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
751 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
752 elif model==None:
753 model = "virtio"
754 elif content[0]['provider'][0:3] == "OVS":
755 vlan = content[0]['provider'].replace('OVS:', '')
756 text += self.tab() + "<interface type='bridge'>" + \
757 self.inc_tab() + "<source bridge='ovim-" + str(vlan) + "'/>"
758 if hypervisor == 'xenhvm' or hypervisor == 'xen-unik':
759 text += self.tab() + "<script path='vif-openvswitch'/>"
760 else:
761 return -1, 'Unknown Bridge net provider ' + content[0]['provider']
762 if model!=None:
763 text += self.tab() + "<model type='" +model+ "'/>"
764 if v.get('mac_address', None) != None:
765 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
766 text += self.pci2xml(v.get('vpci',None))
767 text += self.dec_tab()+'</interface>'
768
769 net_nb += 1
770
771 interfaces = numa.get('interfaces', [])
772
773 net_nb=0
774 for v in interfaces:
775 if self.develop_mode: #map these interfaces to bridges
776 text += self.tab() + "<interface type='bridge'>" + \
777 self.inc_tab()+"<source bridge='" +self.develop_bridge_iface+ "'/>"
778 if windows_os:
779 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
780 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
781 else:
782 text += self.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
783 if v.get('mac_address', None) != None:
784 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
785 text += self.pci2xml(v.get('vpci',None))
786 text += self.dec_tab()+'</interface>'
787 continue
788
789 if v['dedicated'] == 'yes': #passthrought
790 text += self.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
791 self.inc_tab() + "<source>"
792 self.inc_tab()
793 text += self.pci2xml(v['source'])
794 text += self.dec_tab()+'</source>'
795 text += self.pci2xml(v.get('vpci',None))
796 if windows_os:
797 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
798 text += self.dec_tab()+'</hostdev>'
799 net_nb += 1
800 else: #sriov_interfaces
801 #skip not connected interfaces
802 if v.get("net_id") == None:
803 continue
804 text += self.tab() + "<interface type='hostdev' managed='yes'>"
805 self.inc_tab()
806 if v.get('mac_address', None) != None:
807 text+= self.tab() + "<mac address='" +v['mac_address']+ "'/>"
808 text+= self.tab()+'<source>'
809 self.inc_tab()
810 text += self.pci2xml(v['source'])
811 text += self.dec_tab()+'</source>'
812 if v.get('vlan',None) != None:
813 text += self.tab() + "<vlan> <tag id='" + str(v['vlan']) + "'/> </vlan>"
814 text += self.pci2xml(v.get('vpci',None))
815 if windows_os:
816 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
817 text += self.dec_tab()+'</interface>'
818
819
820 text += self.dec_tab()+'</devices>'+\
821 self.dec_tab()+'</domain>'
822 return 0, text
823
824 def pci2xml(self, pci):
825 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
826 alows an empty pci text'''
827 if pci is None:
828 return ""
829 first_part = pci.split(':')
830 second_part = first_part[2].split('.')
831 return self.tab() + "<address type='pci' domain='0x" + first_part[0] + \
832 "' bus='0x" + first_part[1] + "' slot='0x" + second_part[0] + \
833 "' function='0x" + second_part[1] + "'/>"
834
835 def tab(self):
836 """Return indentation according to xml_level"""
837 return "\n" + (' '*self.xml_level)
838
839 def inc_tab(self):
840 """Increment and return indentation according to xml_level"""
841 self.xml_level += 1
842 return self.tab()
843
844 def dec_tab(self):
845 """Decrement and return indentation according to xml_level"""
846 self.xml_level -= 1
847 return self.tab()
848
849 def create_ovs_bridge(self):
850 """
851 Create a bridge in compute OVS to allocate VMs
852 :return: True if success
853 """
854 if self.test or not self.connectivity:
855 return True
856
857 try:
858 self.run_command('sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true')
859
860 return True
861 except RunCommandException as e:
862 self.logger.error("create_ovs_bridge ssh Exception: " + str(e))
863 if "SSH session not active" in str(e):
864 self.ssh_connect()
865 return False
866
867 def delete_port_to_ovs_bridge(self, vlan, net_uuid):
868 """
869 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
870 :param vlan: vlan port id
871 :param net_uuid: network id
872 :return:
873 """
874
875 if self.test or not self.connectivity:
876 return True
877 try:
878 port_name = 'ovim-{}'.format(str(vlan))
879 command = 'sudo ovs-vsctl del-port br-int {}'.format(port_name)
880 self.run_command(command)
881 return True
882 except RunCommandException as e:
883 self.logger.error("delete_port_to_ovs_bridge ssh Exception: {}".format(str(e)))
884 return False
885
886 def delete_dhcp_server(self, vlan, net_uuid, dhcp_path):
887 """
888 Delete dhcp server process lining in namespace
889 :param vlan: segmentation id
890 :param net_uuid: network uuid
891 :param dhcp_path: conf fiel path that live in namespace side
892 :return:
893 """
894 if self.test or not self.connectivity:
895 return True
896 if not self.is_dhcp_port_free(vlan, net_uuid):
897 return True
898 try:
899 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
900 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
901 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
902
903 command = 'sudo ip netns exec {} cat {}'.format(dhcp_namespace, pid_file)
904 content = self.run_command(command, ignore_exit_status=True)
905 dns_pid = content.replace('\n', '')
906 command = 'sudo ip netns exec {} kill -9 {}'.format(dhcp_namespace, dns_pid)
907 self.run_command(command, ignore_exit_status=True)
908
909 except RunCommandException as e:
910 self.logger.error("delete_dhcp_server ssh Exception: " + str(e))
911 return False
912
913 def is_dhcp_port_free(self, host_id, net_uuid):
914 """
915 Check if any port attached to the a net in a vxlan mesh across computes nodes
916 :param host_id: host id
917 :param net_uuid: network id
918 :return: True if is not free
919 """
920 self.db_lock.acquire()
921 result, content = self.db.get_table(
922 FROM='ports',
923 WHERE={'type': 'instance:ovs', 'net_id': net_uuid}
924 )
925 self.db_lock.release()
926
927 if len(content) > 0:
928 return False
929 else:
930 return True
931
932 def is_port_free(self, host_id, net_uuid):
933 """
934 Check if there not ovs ports of a network in a compute host.
935 :param host_id: host id
936 :param net_uuid: network id
937 :return: True if is not free
938 """
939
940 self.db_lock.acquire()
941 result, content = self.db.get_table(
942 FROM='ports as p join instances as i on p.instance_id=i.uuid',
943 WHERE={"i.host_id": self.host_id, 'p.type': 'instance:ovs', 'p.net_id': net_uuid}
944 )
945 self.db_lock.release()
946
947 if len(content) > 0:
948 return False
949 else:
950 return True
951
952 def add_port_to_ovs_bridge(self, vlan):
953 """
954 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
955 :param vlan: vlan port id
956 :return: True if success
957 """
958
959 if self.test:
960 return True
961 try:
962 port_name = 'ovim-{}'.format(str(vlan))
963 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(port_name, str(vlan))
964 self.run_command(command)
965 return True
966 except RunCommandException as e:
967 self.logger.error("add_port_to_ovs_bridge Exception: " + str(e))
968 return False
969
970 def delete_dhcp_port(self, vlan, net_uuid, dhcp_path):
971 """
972 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
973 :param vlan: segmentation id
974 :param net_uuid: network id
975 :return: True if success
976 """
977
978 if self.test:
979 return True
980
981 if not self.is_dhcp_port_free(vlan, net_uuid):
982 return True
983 self.delete_dhcp_interfaces(vlan, dhcp_path)
984 return True
985
986 def delete_bridge_port_attached_to_ovs(self, vlan, net_uuid):
987 """
988 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
989 :param vlan:
990 :param net_uuid:
991 :return: True if success
992 """
993 if self.test:
994 return
995
996 if not self.is_port_free(vlan, net_uuid):
997 return True
998 self.delete_port_to_ovs_bridge(vlan, net_uuid)
999 self.delete_linux_bridge(vlan)
1000 return True
1001
1002 def delete_linux_bridge(self, vlan):
1003 """
1004 Delete a linux bridge in a scpecific compute.
1005 :param vlan: vlan port id
1006 :return: True if success
1007 """
1008
1009 if self.test:
1010 return True
1011 try:
1012 port_name = 'ovim-{}'.format(str(vlan))
1013 command = 'sudo ip link set dev ovim-{} down'.format(str(vlan))
1014 self.run_command(command)
1015
1016 command = 'sudo ip link delete {} && sudo brctl delbr {}'.format(port_name, port_name)
1017 self.run_command(command)
1018 return True
1019 except RunCommandException as e:
1020 self.logger.error("delete_linux_bridge Exception: {}".format(str(e)))
1021 return False
1022
1023 def remove_link_bridge_to_ovs(self, vlan, link):
1024 """
1025 Delete a linux provider net connection to tenatn net
1026 :param vlan: vlan port id
1027 :param link: link name
1028 :return: True if success
1029 """
1030
1031 if self.test:
1032 return True
1033 try:
1034 br_tap_name = '{}-vethBO'.format(str(vlan))
1035 br_ovs_name = '{}-vethOB'.format(str(vlan))
1036
1037 # Delete ovs veth pair
1038 command = 'sudo ip link set dev {} down'.format(br_ovs_name)
1039 self.run_command(command, ignore_exit_status=True)
1040
1041 command = 'sudo ovs-vsctl del-port br-int {}'.format(br_ovs_name)
1042 self.run_command(command)
1043
1044 # Delete br veth pair
1045 command = 'sudo ip link set dev {} down'.format(br_tap_name)
1046 self.run_command(command, ignore_exit_status=True)
1047
1048 # Delete br veth interface form bridge
1049 command = 'sudo brctl delif {} {}'.format(link, br_tap_name)
1050 self.run_command(command)
1051
1052 # Delete br veth pair
1053 command = 'sudo ip link set dev {} down'.format(link)
1054 self.run_command(command, ignore_exit_status=True)
1055
1056 return True
1057 except RunCommandException as e:
1058 self.logger.error("remove_link_bridge_to_ovs Exception: {}".format(str(e)))
1059 return False
1060
1061 def create_ovs_bridge_port(self, vlan):
1062 """
1063 Generate a linux bridge and attache the port to a OVS bridge
1064 :param vlan: vlan port id
1065 :return:
1066 """
1067 if self.test:
1068 return
1069 self.create_linux_bridge(vlan)
1070 self.add_port_to_ovs_bridge(vlan)
1071
1072 def create_linux_bridge(self, vlan):
1073 """
1074 Create a linux bridge with STP active
1075 :param vlan: netowrk vlan id
1076 :return:
1077 """
1078
1079 if self.test:
1080 return True
1081 try:
1082 port_name = 'ovim-{}'.format(str(vlan))
1083 command = 'sudo brctl show | grep {}'.format(port_name)
1084 result = self.run_command(command, ignore_exit_status=True)
1085 if not result:
1086 command = 'sudo brctl addbr {}'.format(port_name)
1087 self.run_command(command)
1088
1089 command = 'sudo brctl stp {} on'.format(port_name)
1090 self.run_command(command)
1091
1092 command = 'sudo ip link set dev {} up'.format(port_name)
1093 self.run_command(command)
1094 return True
1095 except RunCommandException as e:
1096 self.logger.error("create_linux_bridge ssh Exception: {}".format(str(e)))
1097 return False
1098
1099 def set_mac_dhcp_server(self, ip, mac, vlan, netmask, first_ip, dhcp_path):
1100 """
1101 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
1102 :param ip: IP address asigned to a VM
1103 :param mac: VM vnic mac to be macthed with the IP received
1104 :param vlan: Segmentation id
1105 :param netmask: netmask value
1106 :param path: dhcp conf file path that live in namespace side
1107 :return: True if success
1108 """
1109
1110 if self.test:
1111 return True
1112
1113 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
1114 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1115 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1116 ns_interface = '{}-vethDO'.format(str(vlan))
1117
1118 if not ip:
1119 return False
1120 try:
1121 command = 'sudo ip netns exec {} cat /sys/class/net/{}/address'.format(dhcp_namespace, ns_interface)
1122 iface_listen_mac = self.run_command(command, ignore_exit_status=True)
1123
1124 if iface_listen_mac > 0:
1125 command = 'sudo ip netns exec {} cat {} | grep -i {}'.format(dhcp_namespace,
1126 dhcp_hostsdir,
1127 iface_listen_mac)
1128 content = self.run_command(command, ignore_exit_status=True)
1129 if content == '':
1130 ip_data = iface_listen_mac.upper().replace('\n', '') + ',' + first_ip
1131 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1132
1133 command = 'sudo ip netns exec {} sudo bash -ec "echo {} >> {}"'.format(dhcp_namespace,
1134 ip_data,
1135 dhcp_hostsdir)
1136 self.run_command(command)
1137
1138 ip_data = mac.upper() + ',' + ip
1139 command = 'sudo ip netns exec {} sudo bash -ec "echo {} >> {}"'.format(dhcp_namespace,
1140 ip_data,
1141 dhcp_hostsdir)
1142 self.run_command(command, ignore_exit_status=False)
1143
1144 return True
1145
1146 return False
1147 except RunCommandException as e:
1148 self.logger.error("set_mac_dhcp_server ssh Exception: " + str(e))
1149 return False
1150
1151 def delete_mac_dhcp_server(self, ip, mac, vlan, dhcp_path):
1152 """
1153 Delete into dhcp conf file the ip assigned to a specific MAC address
1154
1155 :param ip: IP address asigned to a VM
1156 :param mac: VM vnic mac to be macthed with the IP received
1157 :param vlan: Segmentation id
1158 :param dhcp_path: dhcp conf file path that live in namespace side
1159 :return:
1160 """
1161
1162 if self.test:
1163 return False
1164 try:
1165 dhcp_namespace = str(vlan) + '-dnsmasq'
1166 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1167 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1168
1169 if not ip:
1170 return False
1171
1172 ip_data = mac.upper() + ',' + ip
1173
1174 command = 'sudo ip netns exec ' + dhcp_namespace + ' sudo sed -i \'/' + ip_data + '/d\' ' + dhcp_hostsdir
1175 self.run_command(command)
1176
1177 return True
1178 except RunCommandException as e:
1179 self.logger.error("delete_mac_dhcp_server Exception: " + str(e))
1180 return False
1181
1182 def launch_dhcp_server(self, vlan, ip_range, netmask, dhcp_path, gateway, dns_list=None, routes=None):
1183 """
1184 Generate a linux bridge and attache the port to a OVS bridge
1185 :param self:
1186 :param vlan: Segmentation id
1187 :param ip_range: IP dhcp range
1188 :param netmask: network netmask
1189 :param dhcp_path: dhcp conf file path that live in namespace side
1190 :param gateway: Gateway address for dhcp net
1191 :param dns_list: dns list for dhcp server
1192 :param routes: routes list for dhcp server
1193 :return: True if success
1194 """
1195
1196 if self.test:
1197 return True
1198 try:
1199 ns_interface = str(vlan) + '-vethDO'
1200 dhcp_namespace = str(vlan) + '-dnsmasq'
1201 dhcp_path = os.path.join(dhcp_path, dhcp_namespace, '')
1202 leases_path = os.path.join(dhcp_path, "dnsmasq.leases")
1203 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
1204
1205 dhcp_range = ip_range[0] + ',' + ip_range[1] + ',' + netmask
1206
1207 command = 'sudo ip netns exec {} mkdir -p {}'.format(dhcp_namespace, dhcp_path)
1208 self.run_command(command)
1209
1210 # check if dnsmasq process is running
1211 dnsmasq_is_runing = False
1212 pid_path = os.path.join(dhcp_path, 'dnsmasq.pid')
1213 command = 'sudo ip netns exec ' + dhcp_namespace + ' ls ' + pid_path
1214 content = self.run_command(command, ignore_exit_status=True)
1215
1216 # check if pid is runing
1217 if content:
1218 pid_path = content.replace('\n', '')
1219 command = "ps aux | awk '{print $2 }' | grep {}" + pid_path
1220 dnsmasq_is_runing = self.run_command(command, ignore_exit_status=True)
1221
1222 gateway_option = ' --dhcp-option=3,' + gateway
1223
1224 dhcp_route_option = ''
1225 if routes:
1226 dhcp_route_option = ' --dhcp-option=121'
1227 for key, value in routes.iteritems():
1228 if 'default' == key:
1229 gateway_option = ' --dhcp-option=3,' + value
1230 else:
1231 dhcp_route_option += ',' + key + ',' + value
1232 dns_data = ''
1233 if dns_list:
1234 dns_data = ' --dhcp-option=6'
1235 for dns in dns_list:
1236 dns_data += ',' + dns
1237
1238 if not dnsmasq_is_runing:
1239 command = 'sudo ip netns exec ' + dhcp_namespace + ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1240 '--interface=' + ns_interface + \
1241 ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path + \
1242 ' --dhcp-range ' + dhcp_range + \
1243 ' --pid-file=' + pid_file + \
1244 ' --dhcp-leasefile=' + leases_path + \
1245 ' --listen-address ' + ip_range[0] + \
1246 gateway_option + \
1247 dhcp_route_option + \
1248 dns_data
1249
1250 self.run_command(command)
1251 return True
1252 except RunCommandException as e:
1253 self.logger.error("launch_dhcp_server ssh Exception: " + str(e))
1254 return False
1255
1256 def delete_dhcp_interfaces(self, vlan, dhcp_path):
1257 """
1258 Delete a linux dnsmasq bridge and namespace
1259 :param vlan: netowrk vlan id
1260 :param dhcp_path:
1261 :return:
1262 """
1263 if self.test:
1264 return True
1265 try:
1266 br_veth_name ='{}-vethDO'.format(str(vlan))
1267 ovs_veth_name = '{}-vethOD'.format(str(vlan))
1268 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
1269
1270 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1271 command = 'sudo ovs-vsctl del-port br-int {}'.format(ovs_veth_name)
1272 self.run_command(command, ignore_exit_status=True) # to end session
1273
1274 command = 'sudo ip link set dev {} down'.format(ovs_veth_name)
1275 self.run_command(command, ignore_exit_status=True) # to end session
1276
1277 command = 'sudo ip link delete {} '.format(ovs_veth_name)
1278 self.run_command(command, ignore_exit_status=True)
1279
1280 command = 'sudo ip netns exec {} ip link set dev {} down'.format(dhcp_namespace, br_veth_name)
1281 self.run_command(command, ignore_exit_status=True)
1282
1283 command = 'sudo rm -rf {}'.format(dhcp_path)
1284 self.run_command(command)
1285
1286 command = 'sudo ip netns del {}'.format(dhcp_namespace)
1287 self.run_command(command)
1288
1289 return True
1290 except RunCommandException as e:
1291 self.logger.error("delete_dhcp_interfaces ssh Exception: {}".format(str(e)))
1292 return False
1293
1294 def create_dhcp_interfaces(self, vlan, ip_listen_address, netmask):
1295 """
1296 Create a linux bridge with STP active
1297 :param vlan: segmentation id
1298 :param ip_listen_address: Listen Ip address for the dhcp service, the tap interface living in namesapce side
1299 :param netmask: dhcp net CIDR
1300 :return: True if success
1301 """
1302 if self.test:
1303 return True
1304 try:
1305 ovs_veth_name = '{}-vethOD'.format(str(vlan))
1306 ns_veth = '{}-vethDO'.format(str(vlan))
1307 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
1308
1309 command = 'sudo ip netns add {}'.format(dhcp_namespace)
1310 self.run_command(command)
1311
1312 command = 'sudo ip link add {} type veth peer name {}'.format(ns_veth, ovs_veth_name)
1313 self.run_command(command)
1314
1315 command = 'sudo ip link set {} netns {}'.format(ns_veth, dhcp_namespace)
1316 self.run_command(command)
1317
1318 command = 'sudo ip netns exec {} ip link set dev {} up'.format(dhcp_namespace, ns_veth)
1319 self.run_command(command)
1320
1321 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(ovs_veth_name, str(vlan))
1322 self.run_command(command, ignore_exit_status=True)
1323
1324 command = 'sudo ip link set dev {} up'.format(ovs_veth_name)
1325 self.run_command(command)
1326
1327 command = 'sudo ip netns exec {} ip link set dev lo up'.format(dhcp_namespace)
1328 self.run_command(command)
1329
1330 command = 'sudo ip netns exec {} ifconfig {} {} netmask {}'.format(dhcp_namespace,
1331 ns_veth,
1332 ip_listen_address,
1333 netmask)
1334 self.run_command(command)
1335 return True
1336 except RunCommandException as e:
1337 self.logger.error("create_dhcp_interfaces ssh Exception: {}".format(str(e)))
1338 return False
1339
1340 def delete_qrouter_connection(self, vlan, link):
1341 """
1342 Delete qrouter Namesapce with all veth interfaces need it
1343 :param vlan:
1344 :param link:
1345 :return:
1346 """
1347
1348 if self.test:
1349 return True
1350 try:
1351 ns_qouter = '{}-qrouter'.format(str(vlan))
1352 qrouter_ovs_veth = '{}-vethOQ'.format(str(vlan))
1353 qrouter_ns_veth = '{}-vethQO'.format(str(vlan))
1354 qrouter_br_veth = '{}-vethBQ'.format(str(vlan))
1355 qrouter_ns_router_veth = '{}-vethQB'.format(str(vlan))
1356
1357 command = 'sudo ovs-vsctl del-port br-int {}'.format(qrouter_ovs_veth)
1358 self.run_command(command, ignore_exit_status=True)
1359
1360 # down ns veth
1361 command = 'sudo ip netns exec {} ip link set dev {} down'.format(ns_qouter, qrouter_ns_veth)
1362 self.run_command(command, ignore_exit_status=True)
1363
1364 command = 'sudo ip netns exec {} ip link delete {} '.format(ns_qouter, qrouter_ns_veth)
1365 self.run_command(command, ignore_exit_status=True)
1366
1367 command = 'sudo ip netns del ' + ns_qouter
1368 self.run_command(command)
1369
1370 # down ovs veth interface
1371 command = 'sudo ip link set dev {} down'.format(qrouter_br_veth)
1372 self.run_command(command, ignore_exit_status=True)
1373
1374 # down br veth interface
1375 command = 'sudo ip link set dev {} down'.format(qrouter_ovs_veth)
1376 self.run_command(command, ignore_exit_status=True)
1377
1378 # delete veth interface
1379 command = 'sudo ip link delete {} '.format(link, qrouter_ovs_veth)
1380 self.run_command(command, ignore_exit_status=True)
1381
1382 # down br veth interface
1383 command = 'sudo ip link set dev {} down'.format(qrouter_ns_router_veth)
1384 self.run_command(command, ignore_exit_status=True)
1385
1386 # delete veth interface
1387 command = 'sudo ip link delete {} '.format(link, qrouter_ns_router_veth)
1388 self.run_command(command, ignore_exit_status=True)
1389
1390 # down br veth interface
1391 command = 'sudo brctl delif {} {}'.format(link, qrouter_br_veth)
1392 self.run_command(command)
1393
1394 # delete NS
1395 return True
1396 except RunCommandException as e:
1397 self.logger.error("delete_qrouter_connection ssh Exception: {}".format(str(e)))
1398 return False
1399
1400 def create_qrouter_ovs_connection(self, vlan, gateway, dhcp_cidr):
1401 """
1402 Create qrouter Namesapce with all veth interfaces need it between NS and OVS
1403 :param vlan:
1404 :param gateway:
1405 :return:
1406 """
1407
1408 if self.test:
1409 return True
1410
1411 try:
1412 ns_qouter = '{}-qrouter'.format(str(vlan))
1413 qrouter_ovs_veth ='{}-vethOQ'.format(str(vlan))
1414 qrouter_ns_veth = '{}-vethQO'.format(str(vlan))
1415
1416 # Create NS
1417 command = 'sudo ip netns add {}'.format(ns_qouter)
1418 self.run_command(command)
1419
1420 # Create pait veth
1421 command = 'sudo ip link add {} type veth peer name {}'.format(qrouter_ns_veth, qrouter_ovs_veth)
1422 self.run_command(command, ignore_exit_status=True)
1423
1424 # up ovs veth interface
1425 command = 'sudo ip link set dev {} up'.format(qrouter_ovs_veth)
1426 self.run_command(command)
1427
1428 # add ovs veth to ovs br-int
1429 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(qrouter_ovs_veth, vlan)
1430 self.run_command(command)
1431
1432 # add veth to ns
1433 command = 'sudo ip link set {} netns {}'.format(qrouter_ns_veth, ns_qouter)
1434 self.run_command(command)
1435
1436 # up ns loopback
1437 command = 'sudo ip netns exec {} ip link set dev lo up'.format(ns_qouter)
1438 self.run_command(command)
1439
1440 # up ns veth
1441 command = 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter, qrouter_ns_veth)
1442 self.run_command(command)
1443
1444 from netaddr import IPNetwork
1445 ip_tools = IPNetwork(dhcp_cidr)
1446 cidr_len = ip_tools.prefixlen
1447
1448 # set gw to ns veth
1449 command = 'sudo ip netns exec {} ip address add {}/{} dev {}'.format(ns_qouter, gateway, cidr_len, qrouter_ns_veth)
1450 self.run_command(command)
1451
1452 return True
1453
1454 except RunCommandException as e:
1455 self.logger.error("Create_dhcp_interfaces ssh Exception: {}".format(str(e)))
1456 return False
1457
1458 def add_ns_routes(self, vlan, routes):
1459 """
1460
1461 :param vlan:
1462 :param routes:
1463 :return:
1464 """
1465
1466 if self.test:
1467 return True
1468
1469 try:
1470 ns_qouter = '{}-qrouter'.format(str(vlan))
1471 qrouter_ns_router_veth = '{}-vethQB'.format(str(vlan))
1472
1473 for key, value in routes.iteritems():
1474 # up ns veth
1475 if key == 'default':
1476 command = 'sudo ip netns exec {} ip route add {} via {} '.format(ns_qouter, key, value)
1477 else:
1478 command = 'sudo ip netns exec {} ip route add {} via {} dev {}'.format(ns_qouter, key, value,
1479 qrouter_ns_router_veth)
1480
1481 self.run_command(command)
1482
1483 return True
1484
1485 except RunCommandException as e:
1486 self.logger.error("add_ns_routes, error adding routes to namesapce, {}".format(str(e)))
1487 return False
1488
1489 def create_qrouter_br_connection(self, vlan, cidr, link):
1490 """
1491 Create veth interfaces between user bridge (link) and OVS
1492 :param vlan:
1493 :param link:
1494 :return:
1495 """
1496
1497 if self.test:
1498 return True
1499
1500 try:
1501 ns_qouter = '{}-qrouter'.format(str(vlan))
1502 qrouter_ns_router_veth = '{}-vethQB'.format(str(vlan))
1503 qrouter_br_veth = '{}-vethBQ'.format(str(vlan))
1504
1505 command = 'sudo brctl show | grep {}'.format(link['iface'])
1506 content = self.run_command(command, ignore_exit_status=True)
1507
1508 if content > '':
1509 # Create pait veth
1510 command = 'sudo ip link add {} type veth peer name {}'.format(qrouter_br_veth, qrouter_ns_router_veth)
1511 self.run_command(command)
1512
1513 # up ovs veth interface
1514 command = 'sudo ip link set dev {} up'.format(qrouter_br_veth)
1515 self.run_command(command)
1516
1517 # add veth to ns
1518 command = 'sudo ip link set {} netns {}'.format(qrouter_ns_router_veth, ns_qouter)
1519 self.run_command(command)
1520
1521 # up ns veth
1522 command = 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter, qrouter_ns_router_veth)
1523 self.run_command(command)
1524
1525 command = 'sudo ip netns exec {} ip address add {} dev {}'.format(ns_qouter,
1526 link['nat'],
1527 qrouter_ns_router_veth)
1528 self.run_command(command)
1529
1530 # up ns veth
1531 command = 'sudo brctl addif {} {}'.format(link['iface'], qrouter_br_veth)
1532 self.run_command(command)
1533
1534 # up ns veth
1535 command = 'sudo ip netns exec {} iptables -t nat -A POSTROUTING -o {} -s {} -d {} -j MASQUERADE' \
1536 .format(ns_qouter, qrouter_ns_router_veth, link['nat'], cidr)
1537 self.run_command(command)
1538
1539 return True
1540 else:
1541
1542 self.logger.error('create_qrouter_br_connection, Bridge {} given by user not exist'.format(qrouter_br_veth))
1543 return False
1544
1545 except RunCommandException as e:
1546 self.logger.error("Error creating qrouter, {}".format(str(e)))
1547 return False
1548
1549 def create_link_bridge_to_ovs(self, vlan, link):
1550 """
1551 Create interfaces to connect a linux bridge with tenant net
1552 :param vlan: segmentation id
1553 :return: True if success
1554 """
1555 if self.test:
1556 return True
1557 try:
1558
1559 br_tap_name = '{}-vethBO'.format(str(vlan))
1560 br_ovs_name = '{}-vethOB'.format(str(vlan))
1561
1562 # is a bridge or a interface
1563 command = 'sudo brctl show | grep {}'.format(link)
1564 content = self.run_command(command, ignore_exit_status=True)
1565 if content > '':
1566 command = 'sudo ip link add {} type veth peer name {}'.format(br_tap_name, br_ovs_name)
1567 self.run_command(command)
1568
1569 command = 'sudo ip link set dev {} up'.format(br_tap_name)
1570 self.run_command(command)
1571
1572 command = 'sudo ip link set dev {} up'.format(br_ovs_name)
1573 self.run_command(command)
1574
1575 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(br_ovs_name, str(vlan))
1576 self.run_command(command)
1577
1578 command = 'sudo brctl addif ' + link + ' {}'.format(br_tap_name)
1579 self.run_command(command)
1580 return True
1581 else:
1582 self.logger.error('Link is not present, please check {}'.format(link))
1583 return False
1584
1585 except RunCommandException as e:
1586 self.logger.error("create_link_bridge_to_ovs, Error creating link to ovs, {}".format(str(e)))
1587 return False
1588
1589 def create_ovs_vxlan_tunnel(self, vxlan_interface, remote_ip):
1590 """
1591 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1592 :param vxlan_interface: vlxan inteface name.
1593 :param remote_ip: tunnel endpoint remote compute ip.
1594 :return:
1595 """
1596 if self.test or not self.connectivity:
1597 return True
1598 if remote_ip == 'localhost':
1599 if self.localhost:
1600 return True # TODO: Cannot create a vxlan between localhost and localhost
1601 remote_ip = self.local_ip
1602 try:
1603
1604 command = 'sudo ovs-vsctl add-port br-int {} -- set Interface {} type=vxlan options:remote_ip={} ' \
1605 '-- set Port {} other_config:stp-path-cost=10'.format(vxlan_interface,
1606 vxlan_interface,
1607 remote_ip,
1608 vxlan_interface)
1609 self.run_command(command)
1610 return True
1611 except RunCommandException as e:
1612 self.logger.error("create_ovs_vxlan_tunnel, error creating vxlan tunnel, {}".format(str(e)))
1613 return False
1614
1615 def delete_ovs_vxlan_tunnel(self, vxlan_interface):
1616 """
1617 Delete a vlxan tunnel port from a OVS brdige.
1618 :param vxlan_interface: vlxan name to be delete it.
1619 :return: True if success.
1620 """
1621 if self.test or not self.connectivity:
1622 return True
1623 try:
1624 command = 'sudo ovs-vsctl del-port br-int {}'.format(vxlan_interface)
1625 self.run_command(command)
1626 return True
1627 except RunCommandException as e:
1628 self.logger.error("delete_ovs_vxlan_tunnel, error deleting vxlan tunenl, {}".format(str(e)))
1629 return False
1630
1631 def delete_ovs_bridge(self):
1632 """
1633 Delete a OVS bridge from a compute.
1634 :return: True if success
1635 """
1636 if self.test or not self.connectivity:
1637 return True
1638 try:
1639 command = 'sudo ovs-vsctl del-br br-int'
1640 self.run_command(command)
1641 return True
1642 except RunCommandException as e:
1643 self.logger.error("delete_ovs_bridge ssh Exception: {}".format(str(e)))
1644 return False
1645
1646 def get_file_info(self, path):
1647 command = 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1648 try:
1649 content = self.run_command(command)
1650 return content.split(" ") # (permission, 1, owner, group, size, date, file)
1651 except RunCommandException as e:
1652 return None # file does not exist
1653
1654 def qemu_get_info(self, path):
1655 command = 'qemu-img info ' + path
1656 content = self.run_command(command)
1657 try:
1658 return yaml.load(content)
1659 except yaml.YAMLError as exc:
1660 text = ""
1661 if hasattr(exc, 'problem_mark'):
1662 mark = exc.problem_mark
1663 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
1664 self.logger.error("get_qemu_info yaml format Exception " + text)
1665 raise RunCommandException("Error getting qemu_info yaml format" + text)
1666
1667 def qemu_change_backing(self, inc_file, new_backing_file):
1668 command = 'qemu-img rebase -u -b {} {}'.format(new_backing_file, inc_file)
1669 try:
1670 self.run_command(command)
1671 return 0
1672 except RunCommandException as e:
1673 self.logger.error("qemu_change_backing error: " + str(e))
1674 return -1
1675
1676 def qemu_create_empty_disk(self, dev):
1677
1678 if not dev and 'source' not in dev and 'file format' not in dev and 'image_size' not in dev:
1679 self.logger.error("qemu_create_empty_disk error: missing image parameter")
1680 return -1
1681
1682 empty_disk_path = dev['source file']
1683
1684 command = 'qemu-img create -f qcow2 {} {}G'.format(empty_disk_path, dev['image_size'])
1685 try:
1686 self.run_command(command)
1687 return 0
1688 except RunCommandException as e:
1689 self.logger.error("qemu_create_empty_disk error: " + str(e))
1690 return -1
1691
1692 def get_notused_filename(self, proposed_name, suffix=''):
1693 '''Look for a non existing file_name in the host
1694 proposed_name: proposed file name, includes path
1695 suffix: suffix to be added to the name, before the extention
1696 '''
1697 extension = proposed_name.rfind(".")
1698 slash = proposed_name.rfind("/")
1699 if extension < 0 or extension < slash: # no extension
1700 extension = len(proposed_name)
1701 target_name = proposed_name[:extension] + suffix + proposed_name[extension:]
1702 info = self.get_file_info(target_name)
1703 if info is None:
1704 return target_name
1705
1706 index=0
1707 while info is not None:
1708 target_name = proposed_name[:extension] + suffix + "-" + str(index) + proposed_name[extension:]
1709 index+=1
1710 info = self.get_file_info(target_name)
1711 return target_name
1712
1713 def get_notused_path(self, proposed_path, suffix=''):
1714 '''Look for a non existing path at database for images
1715 proposed_path: proposed file name, includes path
1716 suffix: suffix to be added to the name, before the extention
1717 '''
1718 extension = proposed_path.rfind(".")
1719 if extension < 0:
1720 extension = len(proposed_path)
1721 if suffix != None:
1722 target_path = proposed_path[:extension] + suffix + proposed_path[extension:]
1723 index=0
1724 while True:
1725 r,_=self.db.get_table(FROM="images",WHERE={"path":target_path})
1726 if r<=0:
1727 return target_path
1728 target_path = proposed_path[:extension] + suffix + "-" + str(index) + proposed_path[extension:]
1729 index+=1
1730
1731
1732 def delete_file(self, file_name):
1733 command = 'rm -f ' + file_name
1734 self.run_command(command)
1735
1736 def copy_file(self, source, destination, perserve_time=True):
1737 if source[0:4]=="http":
1738 command = "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1739 dst=destination, src=source, dst_result=destination + ".result" )
1740 else:
1741 command = 'cp --no-preserve=mode'
1742 if perserve_time:
1743 command += ' --preserve=timestamps'
1744 command += " '{}' '{}'".format(source, destination)
1745 self.run_command(command)
1746
1747 def copy_remote_file(self, remote_file, use_incremental):
1748 ''' Copy a file from the repository to local folder and recursively
1749 copy the backing files in case the remote file is incremental
1750 Read and/or modified self.localinfo['files'] that contain the
1751 unmodified copies of images in the local path
1752 params:
1753 remote_file: path of remote file
1754 use_incremental: None (leave the decision to this function), True, False
1755 return:
1756 local_file: name of local file
1757 qemu_info: dict with quemu information of local file
1758 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1759 '''
1760
1761 use_incremental_out = use_incremental
1762 new_backing_file = None
1763 local_file = None
1764 file_from_local = True
1765
1766 #in case incremental use is not decided, take the decision depending on the image
1767 #avoid the use of incremental if this image is already incremental
1768 if remote_file[0:4] == "http":
1769 file_from_local = False
1770 if file_from_local:
1771 qemu_remote_info = self.qemu_get_info(remote_file)
1772 if use_incremental_out==None:
1773 use_incremental_out = not ( file_from_local and 'backing file' in qemu_remote_info)
1774 #copy recursivelly the backing files
1775 if file_from_local and 'backing file' in qemu_remote_info:
1776 new_backing_file, _, _ = self.copy_remote_file(qemu_remote_info['backing file'], True)
1777
1778 #check if remote file is present locally
1779 if use_incremental_out and remote_file in self.localinfo['files']:
1780 local_file = self.localinfo['files'][remote_file]
1781 local_file_info = self.get_file_info(local_file)
1782 if file_from_local:
1783 remote_file_info = self.get_file_info(remote_file)
1784 if local_file_info == None:
1785 local_file = None
1786 elif file_from_local and (local_file_info[4]!=remote_file_info[4] or local_file_info[5]!=remote_file_info[5]):
1787 #local copy of file not valid because date or size are different.
1788 #TODO DELETE local file if this file is not used by any active virtual machine
1789 try:
1790 self.delete_file(local_file)
1791 del self.localinfo['files'][remote_file]
1792 except Exception:
1793 pass
1794 local_file = None
1795 else: #check that the local file has the same backing file, or there are not backing at all
1796 qemu_info = self.qemu_get_info(local_file)
1797 if new_backing_file != qemu_info.get('backing file'):
1798 local_file = None
1799
1800
1801 if local_file == None: #copy the file
1802 img_name= remote_file.split('/') [-1]
1803 img_local = self.image_path + '/' + img_name
1804 local_file = self.get_notused_filename(img_local)
1805 self.copy_file(remote_file, local_file, use_incremental_out)
1806
1807 if use_incremental_out:
1808 self.localinfo['files'][remote_file] = local_file
1809 if new_backing_file:
1810 self.qemu_change_backing(local_file, new_backing_file)
1811 qemu_info = self.qemu_get_info(local_file)
1812
1813 return local_file, qemu_info, use_incremental_out
1814
1815 def launch_server(self, conn, server, rebuild=False, domain=None):
1816 if self.test:
1817 time.sleep(random.randint(20,150)) #sleep random timeto be make it a bit more real
1818 return 0, 'Success'
1819
1820 server_id = server['uuid']
1821 paused = server.get('paused','no')
1822 try:
1823 if domain!=None and rebuild==False:
1824 domain.resume()
1825 #self.server_status[server_id] = 'ACTIVE'
1826 return 0, 'Success'
1827
1828 self.db_lock.acquire()
1829 result, server_data = self.db.get_instance(server_id)
1830 self.db_lock.release()
1831 if result <= 0:
1832 self.logger.error("launch_server ERROR getting server from DB %d %s", result, server_data)
1833 return result, server_data
1834
1835 self.hypervisor = str(server_data['hypervisor'])
1836
1837 #0: get image metadata
1838 server_metadata = server.get('metadata', {})
1839 use_incremental = None
1840
1841 if "use_incremental" in server_metadata:
1842 use_incremental = False if server_metadata["use_incremental"] == "no" else True
1843 if self.xen_hyp == True:
1844 use_incremental = False
1845
1846 server_host_files = self.localinfo['server_files'].get( server['uuid'], {})
1847 if rebuild:
1848 #delete previous incremental files
1849 for file_ in server_host_files.values():
1850 self.delete_file(file_['source file'] )
1851 server_host_files={}
1852
1853 #1: obtain aditional devices (disks)
1854 #Put as first device the main disk
1855 devices = [ {"type":"disk", "image_id":server['image_id'], "vpci":server_metadata.get('vpci', None) } ]
1856 if 'extended' in server_data and server_data['extended']!=None and "devices" in server_data['extended']:
1857 devices += server_data['extended']['devices']
1858 empty_path = None
1859 for dev in devices:
1860 image_id = dev.get('image_id')
1861 if not image_id:
1862 import uuid
1863 uuid_empty = str(uuid.uuid4())
1864 empty_path = self.empty_image_path + uuid_empty + '.qcow2' # local path for empty disk
1865
1866 dev['source file'] = empty_path
1867 dev['file format'] = 'qcow2'
1868 self.qemu_create_empty_disk(dev)
1869 server_host_files[uuid_empty] = {'source file': empty_path,
1870 'file format': dev['file format']}
1871
1872 continue
1873 else:
1874 self.db_lock.acquire()
1875 result, content = self.db.get_table(FROM='images', SELECT=('path', 'metadata'),
1876 WHERE={'uuid': image_id})
1877 self.db_lock.release()
1878 if result <= 0:
1879 error_text = "ERROR", result, content, "when getting image", dev['image_id']
1880 self.logger.error("launch_server " + error_text)
1881 return -1, error_text
1882 if content[0]['metadata'] is not None:
1883 dev['metadata'] = json.loads(content[0]['metadata'])
1884 else:
1885 dev['metadata'] = {}
1886
1887 if image_id in server_host_files:
1888 dev['source file'] = server_host_files[image_id]['source file'] #local path
1889 dev['file format'] = server_host_files[image_id]['file format'] # raw or qcow2
1890 continue
1891
1892 #2: copy image to host
1893 if image_id:
1894 remote_file = content[0]['path']
1895 else:
1896 remote_file = empty_path
1897 use_incremental_image = use_incremental
1898 if dev['metadata'].get("use_incremental") == "no":
1899 use_incremental_image = False
1900 local_file, qemu_info, use_incremental_image = self.copy_remote_file(remote_file, use_incremental_image)
1901
1902 #create incremental image
1903 if use_incremental_image:
1904 local_file_inc = self.get_notused_filename(local_file, '.inc')
1905 command = 'qemu-img create -f qcow2 {} -o backing_file={}'.format(local_file_inc, local_file)
1906 self.run_command(command)
1907 local_file = local_file_inc
1908 qemu_info = {'file format': 'qcow2'}
1909
1910 server_host_files[ dev['image_id'] ] = {'source file': local_file, 'file format': qemu_info['file format']}
1911
1912 dev['source file'] = local_file
1913 dev['file format'] = qemu_info['file format']
1914
1915 self.localinfo['server_files'][ server['uuid'] ] = server_host_files
1916 self.localinfo_dirty = True
1917
1918 #3 Create XML
1919 result, xml = self.create_xml_server(server_data, devices, server_metadata) #local_file
1920 if result <0:
1921 self.logger.error("create xml server error: " + xml)
1922 return -2, xml
1923 self.logger.debug("create xml: " + xml)
1924 atribute = host_thread.lvirt_module.VIR_DOMAIN_START_PAUSED if paused == "yes" else 0
1925 #4 Start the domain
1926 if not rebuild: #ensures that any pending destroying server is done
1927 self.server_forceoff(True)
1928 #self.logger.debug("launching instance " + xml)
1929 conn.createXML(xml, atribute)
1930 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
1931
1932 return 0, 'Success'
1933
1934 except paramiko.ssh_exception.SSHException as e:
1935 text = e.args[0]
1936 self.logger.error("launch_server id='%s' ssh Exception: %s", server_id, text)
1937 if "SSH session not active" in text:
1938 self.ssh_connect()
1939 except host_thread.lvirt_module.libvirtError as e:
1940 text = e.get_error_message()
1941 self.logger.error("launch_server id='%s' libvirt Exception: %s", server_id, text)
1942 except Exception as e:
1943 text = str(e)
1944 self.logger.error("launch_server id='%s' Exception: %s", server_id, text)
1945 return -1, text
1946
1947 def update_servers_status(self):
1948 # # virDomainState
1949 # VIR_DOMAIN_NOSTATE = 0
1950 # VIR_DOMAIN_RUNNING = 1
1951 # VIR_DOMAIN_BLOCKED = 2
1952 # VIR_DOMAIN_PAUSED = 3
1953 # VIR_DOMAIN_SHUTDOWN = 4
1954 # VIR_DOMAIN_SHUTOFF = 5
1955 # VIR_DOMAIN_CRASHED = 6
1956 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
1957
1958 if self.test or len(self.server_status)==0:
1959 return
1960
1961 try:
1962 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
1963 domains= conn.listAllDomains()
1964 domain_dict={}
1965 for domain in domains:
1966 uuid = domain.UUIDString() ;
1967 libvirt_status = domain.state()
1968 #print libvirt_status
1969 if libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_RUNNING or libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTDOWN:
1970 new_status = "ACTIVE"
1971 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_PAUSED:
1972 new_status = "PAUSED"
1973 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTOFF:
1974 new_status = "INACTIVE"
1975 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_CRASHED:
1976 new_status = "ERROR"
1977 else:
1978 new_status = None
1979 domain_dict[uuid] = new_status
1980 conn.close()
1981 except host_thread.lvirt_module.libvirtError as e:
1982 self.logger.error("get_state() Exception " + e.get_error_message())
1983 return
1984
1985 for server_id, current_status in self.server_status.iteritems():
1986 new_status = None
1987 if server_id in domain_dict:
1988 new_status = domain_dict[server_id]
1989 else:
1990 new_status = "INACTIVE"
1991
1992 if new_status == None or new_status == current_status:
1993 continue
1994 if new_status == 'INACTIVE' and current_status == 'ERROR':
1995 continue #keep ERROR status, because obviously this machine is not running
1996 #change status
1997 self.logger.debug("server id='%s' status change from '%s' to '%s'", server_id, current_status, new_status)
1998 STATUS={'progress':100, 'status':new_status}
1999 if new_status == 'ERROR':
2000 STATUS['last_error'] = 'machine has crashed'
2001 self.db_lock.acquire()
2002 r,_ = self.db.update_rows('instances', STATUS, {'uuid':server_id}, log=False)
2003 self.db_lock.release()
2004 if r>=0:
2005 self.server_status[server_id] = new_status
2006
2007 def action_on_server(self, req, last_retry=True):
2008 '''Perform an action on a req
2009 Attributes:
2010 req: dictionary that contain:
2011 server properties: 'uuid','name','tenant_id','status'
2012 action: 'action'
2013 host properties: 'user', 'ip_name'
2014 return (error, text)
2015 0: No error. VM is updated to new state,
2016 -1: Invalid action, as trying to pause a PAUSED VM
2017 -2: Error accessing host
2018 -3: VM nor present
2019 -4: Error at DB access
2020 -5: Error while trying to perform action. VM is updated to ERROR
2021 '''
2022 server_id = req['uuid']
2023 conn = None
2024 new_status = None
2025 old_status = req['status']
2026 last_error = None
2027
2028 if self.test:
2029 if 'terminate' in req['action']:
2030 new_status = 'deleted'
2031 elif 'shutoff' in req['action'] or 'shutdown' in req['action'] or 'forceOff' in req['action']:
2032 if req['status']!='ERROR':
2033 time.sleep(5)
2034 new_status = 'INACTIVE'
2035 elif 'start' in req['action'] and req['status']!='ERROR':
2036 new_status = 'ACTIVE'
2037 elif 'resume' in req['action'] and req['status']!='ERROR' and req['status']!='INACTIVE':
2038 new_status = 'ACTIVE'
2039 elif 'pause' in req['action'] and req['status']!='ERROR':
2040 new_status = 'PAUSED'
2041 elif 'reboot' in req['action'] and req['status']!='ERROR':
2042 new_status = 'ACTIVE'
2043 elif 'rebuild' in req['action']:
2044 time.sleep(random.randint(20,150))
2045 new_status = 'ACTIVE'
2046 elif 'createImage' in req['action']:
2047 time.sleep(5)
2048 self.create_image(None, req)
2049 else:
2050 try:
2051 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2052 try:
2053 dom = conn.lookupByUUIDString(server_id)
2054 except host_thread.lvirt_module.libvirtError as e:
2055 text = e.get_error_message()
2056 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
2057 dom = None
2058 else:
2059 self.logger.error("action_on_server id='%s' libvirt exception: %s", server_id, text)
2060 raise e
2061
2062 if 'forceOff' in req['action']:
2063 if dom == None:
2064 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2065 else:
2066 try:
2067 self.logger.debug("sending DESTROY to server id='%s'", server_id)
2068 dom.destroy()
2069 except Exception as e:
2070 if "domain is not running" not in e.get_error_message():
2071 self.logger.error("action_on_server id='%s' Exception while sending force off: %s",
2072 server_id, e.get_error_message())
2073 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
2074 new_status = 'ERROR'
2075
2076 elif 'terminate' in req['action']:
2077 if dom == None:
2078 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2079 new_status = 'deleted'
2080 else:
2081 try:
2082 if req['action']['terminate'] == 'force':
2083 self.logger.debug("sending DESTROY to server id='%s'", server_id)
2084 dom.destroy()
2085 new_status = 'deleted'
2086 else:
2087 self.logger.debug("sending SHUTDOWN to server id='%s'", server_id)
2088 dom.shutdown()
2089 self.pending_terminate_server.append( (time.time()+10,server_id) )
2090 except Exception as e:
2091 self.logger.error("action_on_server id='%s' Exception while destroy: %s",
2092 server_id, e.get_error_message())
2093 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
2094 new_status = 'ERROR'
2095 if "domain is not running" in e.get_error_message():
2096 try:
2097 dom.undefine()
2098 new_status = 'deleted'
2099 except Exception:
2100 self.logger.error("action_on_server id='%s' Exception while undefine: %s",
2101 server_id, e.get_error_message())
2102 last_error = 'action_on_server Exception2 while undefine:', e.get_error_message()
2103 #Exception: 'virDomainDetachDevice() failed'
2104 if new_status=='deleted':
2105 if server_id in self.server_status:
2106 del self.server_status[server_id]
2107 if req['uuid'] in self.localinfo['server_files']:
2108 for file_ in self.localinfo['server_files'][ req['uuid'] ].values():
2109 try:
2110 self.delete_file(file_['source file'])
2111 except Exception:
2112 pass
2113 del self.localinfo['server_files'][ req['uuid'] ]
2114 self.localinfo_dirty = True
2115
2116 elif 'shutoff' in req['action'] or 'shutdown' in req['action']:
2117 try:
2118 if dom == None:
2119 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2120 else:
2121 dom.shutdown()
2122 # new_status = 'INACTIVE'
2123 #TODO: check status for changing at database
2124 except Exception as e:
2125 new_status = 'ERROR'
2126 self.logger.error("action_on_server id='%s' Exception while shutdown: %s",
2127 server_id, e.get_error_message())
2128 last_error = 'action_on_server Exception while shutdown: ' + e.get_error_message()
2129
2130 elif 'rebuild' in req['action']:
2131 if dom != None:
2132 dom.destroy()
2133 r = self.launch_server(conn, req, True, None)
2134 if r[0] <0:
2135 new_status = 'ERROR'
2136 last_error = r[1]
2137 else:
2138 new_status = 'ACTIVE'
2139 elif 'start' in req['action']:
2140 # The instance is only create in DB but not yet at libvirt domain, needs to be create
2141 rebuild = True if req['action']['start'] == 'rebuild' else False
2142 r = self.launch_server(conn, req, rebuild, dom)
2143 if r[0] <0:
2144 new_status = 'ERROR'
2145 last_error = r[1]
2146 else:
2147 new_status = 'ACTIVE'
2148
2149 elif 'resume' in req['action']:
2150 try:
2151 if dom == None:
2152 pass
2153 else:
2154 dom.resume()
2155 # new_status = 'ACTIVE'
2156 except Exception as e:
2157 self.logger.error("action_on_server id='%s' Exception while resume: %s",
2158 server_id, e.get_error_message())
2159
2160 elif 'pause' in req['action']:
2161 try:
2162 if dom == None:
2163 pass
2164 else:
2165 dom.suspend()
2166 # new_status = 'PAUSED'
2167 except Exception as e:
2168 self.logger.error("action_on_server id='%s' Exception while pause: %s",
2169 server_id, e.get_error_message())
2170
2171 elif 'reboot' in req['action']:
2172 try:
2173 if dom == None:
2174 pass
2175 else:
2176 dom.reboot()
2177 self.logger.debug("action_on_server id='%s' reboot:", server_id)
2178 #new_status = 'ACTIVE'
2179 except Exception as e:
2180 self.logger.error("action_on_server id='%s' Exception while reboot: %s",
2181 server_id, e.get_error_message())
2182 elif 'createImage' in req['action']:
2183 self.create_image(dom, req)
2184
2185
2186 conn.close()
2187 except host_thread.lvirt_module.libvirtError as e:
2188 if conn is not None: conn.close()
2189 text = e.get_error_message()
2190 new_status = "ERROR"
2191 last_error = text
2192 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
2193 self.logger.debug("action_on_server id='%s' Exception removed from host", server_id)
2194 else:
2195 self.logger.error("action_on_server id='%s' Exception %s", server_id, text)
2196 #end of if self.test
2197 if new_status == None:
2198 return 1
2199
2200 self.logger.debug("action_on_server id='%s' new status=%s %s",server_id, new_status, last_error)
2201 UPDATE = {'progress':100, 'status':new_status}
2202
2203 if new_status=='ERROR':
2204 if not last_retry: #if there will be another retry do not update database
2205 return -1
2206 elif 'terminate' in req['action']:
2207 #PUT a log in the database
2208 self.logger.error("PANIC deleting server id='%s' %s", server_id, last_error)
2209 self.db_lock.acquire()
2210 self.db.new_row('logs',
2211 {'uuid':server_id, 'tenant_id':req['tenant_id'], 'related':'instances','level':'panic',
2212 'description':'PANIC deleting server from host '+self.name+': '+last_error}
2213 )
2214 self.db_lock.release()
2215 if server_id in self.server_status:
2216 del self.server_status[server_id]
2217 return -1
2218 else:
2219 UPDATE['last_error'] = last_error
2220 if new_status != 'deleted' and (new_status != old_status or new_status == 'ERROR') :
2221 self.db_lock.acquire()
2222 self.db.update_rows('instances', UPDATE, {'uuid':server_id}, log=True)
2223 self.server_status[server_id] = new_status
2224 self.db_lock.release()
2225 if new_status == 'ERROR':
2226 return -1
2227 return 1
2228
2229
2230 def restore_iface(self, name, mac, lib_conn=None):
2231 ''' make an ifdown, ifup to restore default parameter of na interface
2232 Params:
2233 mac: mac address of the interface
2234 lib_conn: connection to the libvirt, if None a new connection is created
2235 Return 0,None if ok, -1,text if fails
2236 '''
2237 conn=None
2238 ret = 0
2239 error_text=None
2240 if self.test:
2241 self.logger.debug("restore_iface '%s' %s", name, mac)
2242 return 0, None
2243 try:
2244 if not lib_conn:
2245 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2246 else:
2247 conn = lib_conn
2248
2249 #wait to the pending VM deletion
2250 #TODO.Revise self.server_forceoff(True)
2251
2252 iface = conn.interfaceLookupByMACString(mac)
2253 if iface.isActive():
2254 iface.destroy()
2255 iface.create()
2256 self.logger.debug("restore_iface '%s' %s", name, mac)
2257 except host_thread.lvirt_module.libvirtError as e:
2258 error_text = e.get_error_message()
2259 self.logger.error("restore_iface '%s' '%s' libvirt exception: %s", name, mac, error_text)
2260 ret=-1
2261 finally:
2262 if lib_conn is None and conn is not None:
2263 conn.close()
2264 return ret, error_text
2265
2266
2267 def create_image(self,dom, req):
2268 if self.test:
2269 if 'path' in req['action']['createImage']:
2270 file_dst = req['action']['createImage']['path']
2271 else:
2272 createImage=req['action']['createImage']
2273 img_name= createImage['source']['path']
2274 index=img_name.rfind('/')
2275 file_dst = self.get_notused_path(img_name[:index+1] + createImage['name'] + '.qcow2')
2276 image_status='ACTIVE'
2277 else:
2278 for retry in (0,1):
2279 try:
2280 server_id = req['uuid']
2281 createImage=req['action']['createImage']
2282 file_orig = self.localinfo['server_files'][server_id] [ createImage['source']['image_id'] ] ['source file']
2283 if 'path' in req['action']['createImage']:
2284 file_dst = req['action']['createImage']['path']
2285 else:
2286 img_name= createImage['source']['path']
2287 index=img_name.rfind('/')
2288 file_dst = self.get_notused_filename(img_name[:index+1] + createImage['name'] + '.qcow2')
2289
2290 self.copy_file(file_orig, file_dst)
2291 qemu_info = self.qemu_get_info(file_orig)
2292 if 'backing file' in qemu_info:
2293 for k,v in self.localinfo['files'].items():
2294 if v==qemu_info['backing file']:
2295 self.qemu_change_backing(file_dst, k)
2296 break
2297 image_status='ACTIVE'
2298 break
2299 except paramiko.ssh_exception.SSHException as e:
2300 image_status='ERROR'
2301 error_text = e.args[0]
2302 self.logger.error("create_image id='%s' ssh Exception: %s", server_id, error_text)
2303 if "SSH session not active" in error_text and retry==0:
2304 self.ssh_connect()
2305 except Exception as e:
2306 image_status='ERROR'
2307 error_text = str(e)
2308 self.logger.error("create_image id='%s' Exception: %s", server_id, error_text)
2309
2310 #TODO insert a last_error at database
2311 self.db_lock.acquire()
2312 self.db.update_rows('images', {'status':image_status, 'progress': 100, 'path':file_dst},
2313 {'uuid':req['new_image']['uuid']}, log=True)
2314 self.db_lock.release()
2315
2316 def edit_iface(self, port_id, old_net, new_net):
2317 #This action imply remove and insert interface to put proper parameters
2318 if self.test:
2319 time.sleep(1)
2320 else:
2321 #get iface details
2322 self.db_lock.acquire()
2323 r,c = self.db.get_table(FROM='ports as p join resources_port as rp on p.uuid=rp.port_id',
2324 WHERE={'port_id': port_id})
2325 self.db_lock.release()
2326 if r<0:
2327 self.logger.error("edit_iface %s DDBB error: %s", port_id, c)
2328 return
2329 elif r==0:
2330 self.logger.error("edit_iface %s port not found", port_id)
2331 return
2332 port=c[0]
2333 if port["model"]!="VF":
2334 self.logger.error("edit_iface %s ERROR model must be VF", port_id)
2335 return
2336 #create xml detach file
2337 xml=[]
2338 self.xml_level = 2
2339 xml.append("<interface type='hostdev' managed='yes'>")
2340 xml.append(" <mac address='" +port['mac']+ "'/>")
2341 xml.append(" <source>"+ self.pci2xml(port['pci'])+"\n </source>")
2342 xml.append('</interface>')
2343
2344
2345 try:
2346 conn=None
2347 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2348 dom = conn.lookupByUUIDString(port["instance_id"])
2349 if old_net:
2350 text="\n".join(xml)
2351 self.logger.debug("edit_iface detaching SRIOV interface " + text)
2352 dom.detachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
2353 if new_net:
2354 xml[-1] =" <vlan> <tag id='" + str(port['vlan']) + "'/> </vlan>"
2355 self.xml_level = 1
2356 xml.append(self.pci2xml(port.get('vpci',None)) )
2357 xml.append('</interface>')
2358 text="\n".join(xml)
2359 self.logger.debug("edit_iface attaching SRIOV interface " + text)
2360 dom.attachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
2361
2362 except host_thread.lvirt_module.libvirtError as e:
2363 text = e.get_error_message()
2364 self.logger.error("edit_iface %s libvirt exception: %s", port["instance_id"], text)
2365
2366 finally:
2367 if conn is not None: conn.close()
2368
2369
2370 def create_server(server, db, db_lock, only_of_ports):
2371 extended = server.get('extended', None)
2372 requirements={}
2373 requirements['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
2374 requirements['ram'] = server['flavor'].get('ram', 0)
2375 if requirements['ram']== None:
2376 requirements['ram'] = 0
2377 requirements['vcpus'] = server['flavor'].get('vcpus', 0)
2378 if requirements['vcpus']== None:
2379 requirements['vcpus'] = 0
2380 #If extended is not defined get requirements from flavor
2381 if extended is None:
2382 #If extended is defined in flavor convert to dictionary and use it
2383 if 'extended' in server['flavor'] and server['flavor']['extended'] != None:
2384 json_acceptable_string = server['flavor']['extended'].replace("'", "\"")
2385 extended = json.loads(json_acceptable_string)
2386 else:
2387 extended = None
2388 #print json.dumps(extended, indent=4)
2389
2390 #For simplicity only one numa VM are supported in the initial implementation
2391 if extended != None:
2392 numas = extended.get('numas', [])
2393 if len(numas)>1:
2394 return (-2, "Multi-NUMA VMs are not supported yet")
2395 #elif len(numas)<1:
2396 # return (-1, "At least one numa must be specified")
2397
2398 #a for loop is used in order to be ready to multi-NUMA VMs
2399 request = []
2400 for numa in numas:
2401 numa_req = {}
2402 numa_req['memory'] = numa.get('memory', 0)
2403 if 'cores' in numa:
2404 numa_req['proc_req_nb'] = numa['cores'] #number of cores or threads to be reserved
2405 numa_req['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
2406 numa_req['proc_req_list'] = numa.get('cores-id', None) #list of ids to be assigned to the cores or threads
2407 elif 'paired-threads' in numa:
2408 numa_req['proc_req_nb'] = numa['paired-threads']
2409 numa_req['proc_req_type'] = 'paired-threads'
2410 numa_req['proc_req_list'] = numa.get('paired-threads-id', None)
2411 elif 'threads' in numa:
2412 numa_req['proc_req_nb'] = numa['threads']
2413 numa_req['proc_req_type'] = 'threads'
2414 numa_req['proc_req_list'] = numa.get('threads-id', None)
2415 else:
2416 numa_req['proc_req_nb'] = 0 # by default
2417 numa_req['proc_req_type'] = 'threads'
2418
2419
2420
2421 #Generate a list of sriov and another for physical interfaces
2422 interfaces = numa.get('interfaces', [])
2423 sriov_list = []
2424 port_list = []
2425 for iface in interfaces:
2426 iface['bandwidth'] = int(iface['bandwidth'])
2427 if iface['dedicated'][:3]=='yes':
2428 port_list.append(iface)
2429 else:
2430 sriov_list.append(iface)
2431
2432 #Save lists ordered from more restrictive to less bw requirements
2433 numa_req['sriov_list'] = sorted(sriov_list, key=lambda k: k['bandwidth'], reverse=True)
2434 numa_req['port_list'] = sorted(port_list, key=lambda k: k['bandwidth'], reverse=True)
2435
2436
2437 request.append(numa_req)
2438
2439 # print "----------\n"+json.dumps(request[0], indent=4)
2440 # print '----------\n\n'
2441
2442 #Search in db for an appropriate numa for each requested numa
2443 #at the moment multi-NUMA VMs are not supported
2444 if len(request)>0:
2445 requirements['numa'].update(request[0])
2446 if requirements['numa']['memory']>0:
2447 requirements['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2448 elif requirements['ram']==0:
2449 return (-1, "Memory information not set neither at extended field not at ram")
2450 if requirements['numa']['proc_req_nb']>0:
2451 requirements['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2452 elif requirements['vcpus']==0:
2453 return (-1, "Processor information not set neither at extended field not at vcpus")
2454
2455 if 'hypervisor' in server: requirements['hypervisor'] = server['hypervisor'] #Unikernels extension
2456
2457 db_lock.acquire()
2458 result, content = db.get_numas(requirements, server.get('host_id', None), only_of_ports)
2459 db_lock.release()
2460
2461 if result == -1:
2462 return (-1, content)
2463
2464 numa_id = content['numa_id']
2465 host_id = content['host_id']
2466
2467 #obtain threads_id and calculate pinning
2468 cpu_pinning = []
2469 reserved_threads=[]
2470 if requirements['numa']['proc_req_nb']>0:
2471 db_lock.acquire()
2472 result, content = db.get_table(FROM='resources_core',
2473 SELECT=('id','core_id','thread_id'),
2474 WHERE={'numa_id':numa_id,'instance_id': None, 'status':'ok'} )
2475 db_lock.release()
2476 if result <= 0:
2477 #print content
2478 return -1, content
2479
2480 #convert rows to a dictionary indexed by core_id
2481 cores_dict = {}
2482 for row in content:
2483 if not row['core_id'] in cores_dict:
2484 cores_dict[row['core_id']] = []
2485 cores_dict[row['core_id']].append([row['thread_id'],row['id']])
2486
2487 #In case full cores are requested
2488 paired = 'N'
2489 if requirements['numa']['proc_req_type'] == 'cores':
2490 #Get/create the list of the vcpu_ids
2491 vcpu_id_list = requirements['numa']['proc_req_list']
2492 if vcpu_id_list == None:
2493 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2494
2495 for threads in cores_dict.itervalues():
2496 #we need full cores
2497 if len(threads) != 2:
2498 continue
2499
2500 #set pinning for the first thread
2501 cpu_pinning.append( [ vcpu_id_list.pop(0), threads[0][0], threads[0][1] ] )
2502
2503 #reserve so it is not used the second thread
2504 reserved_threads.append(threads[1][1])
2505
2506 if len(vcpu_id_list) == 0:
2507 break
2508
2509 #In case paired threads are requested
2510 elif requirements['numa']['proc_req_type'] == 'paired-threads':
2511 paired = 'Y'
2512 #Get/create the list of the vcpu_ids
2513 if requirements['numa']['proc_req_list'] != None:
2514 vcpu_id_list = []
2515 for pair in requirements['numa']['proc_req_list']:
2516 if len(pair)!=2:
2517 return -1, "Field paired-threads-id not properly specified"
2518 return
2519 vcpu_id_list.append(pair[0])
2520 vcpu_id_list.append(pair[1])
2521 else:
2522 vcpu_id_list = range(0,2*int(requirements['numa']['proc_req_nb']))
2523
2524 for threads in cores_dict.itervalues():
2525 #we need full cores
2526 if len(threads) != 2:
2527 continue
2528 #set pinning for the first thread
2529 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2530
2531 #set pinning for the second thread
2532 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2533
2534 if len(vcpu_id_list) == 0:
2535 break
2536
2537 #In case normal threads are requested
2538 elif requirements['numa']['proc_req_type'] == 'threads':
2539 #Get/create the list of the vcpu_ids
2540 vcpu_id_list = requirements['numa']['proc_req_list']
2541 if vcpu_id_list == None:
2542 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2543
2544 for threads_index in sorted(cores_dict, key=lambda k: len(cores_dict[k])):
2545 threads = cores_dict[threads_index]
2546 #set pinning for the first thread
2547 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2548
2549 #if exists, set pinning for the second thread
2550 if len(threads) == 2 and len(vcpu_id_list) != 0:
2551 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2552
2553 if len(vcpu_id_list) == 0:
2554 break
2555
2556 #Get the source pci addresses for the selected numa
2557 used_sriov_ports = []
2558 for port in requirements['numa']['sriov_list']:
2559 db_lock.acquire()
2560 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2561 db_lock.release()
2562 if result <= 0:
2563 #print content
2564 return -1, content
2565 for row in content:
2566 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2567 continue
2568 port['pci'] = row['pci']
2569 if 'mac_address' not in port:
2570 port['mac_address'] = row['mac']
2571 del port['mac']
2572 port['port_id']=row['id']
2573 port['Mbps_used'] = port['bandwidth']
2574 used_sriov_ports.append(row['id'])
2575 break
2576
2577 for port in requirements['numa']['port_list']:
2578 port['Mbps_used'] = None
2579 if port['dedicated'] != "yes:sriov":
2580 port['mac_address'] = port['mac']
2581 del port['mac']
2582 continue
2583 db_lock.acquire()
2584 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac', 'Mbps'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2585 db_lock.release()
2586 if result <= 0:
2587 #print content
2588 return -1, content
2589 port['Mbps_used'] = content[0]['Mbps']
2590 for row in content:
2591 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2592 continue
2593 port['pci'] = row['pci']
2594 if 'mac_address' not in port:
2595 port['mac_address'] = row['mac'] # mac cannot be set to passthrough ports
2596 del port['mac']
2597 port['port_id']=row['id']
2598 used_sriov_ports.append(row['id'])
2599 break
2600
2601 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2602 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2603
2604 server['host_id'] = host_id
2605
2606 #Generate dictionary for saving in db the instance resources
2607 resources = {}
2608 resources['bridged-ifaces'] = []
2609
2610 numa_dict = {}
2611 numa_dict['interfaces'] = []
2612
2613 numa_dict['interfaces'] += requirements['numa']['port_list']
2614 numa_dict['interfaces'] += requirements['numa']['sriov_list']
2615
2616 #Check bridge information
2617 unified_dataplane_iface=[]
2618 unified_dataplane_iface += requirements['numa']['port_list']
2619 unified_dataplane_iface += requirements['numa']['sriov_list']
2620
2621 for control_iface in server.get('networks', []):
2622 control_iface['net_id']=control_iface.pop('uuid')
2623 #Get the brifge name
2624 db_lock.acquire()
2625 result, content = db.get_table(FROM='nets',
2626 SELECT=('name', 'type', 'vlan', 'provider', 'enable_dhcp','dhcp_first_ip',
2627 'dhcp_last_ip', 'cidr', 'gateway_ip', 'dns', 'links', 'routes'),
2628 WHERE={'uuid': control_iface['net_id']})
2629 db_lock.release()
2630 if result < 0:
2631 pass
2632 elif result==0:
2633 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface['net_id']
2634 else:
2635 network=content[0]
2636 if control_iface.get("type", 'virtual') == 'virtual':
2637 if network['type']!='bridge_data' and network['type']!='bridge_man':
2638 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface['net_id']
2639 resources['bridged-ifaces'].append(control_iface)
2640 if network.get("provider") and network["provider"][0:3] == "OVS":
2641 control_iface["type"] = "instance:ovs"
2642 else:
2643 control_iface["type"] = "instance:bridge"
2644 if network.get("vlan"):
2645 control_iface["vlan"] = network["vlan"]
2646
2647 if network.get("enable_dhcp") == 'true':
2648 control_iface["enable_dhcp"] = network.get("enable_dhcp")
2649 control_iface["dhcp_first_ip"] = network["dhcp_first_ip"]
2650 control_iface["dhcp_last_ip"] = network["dhcp_last_ip"]
2651 control_iface["cidr"] = network["cidr"]
2652
2653 if network.get("dns"):
2654 control_iface["dns"] = yaml.safe_load(network.get("dns"))
2655 if network.get("links"):
2656 control_iface["links"] = yaml.safe_load(network.get("links"))
2657 if network.get("routes"):
2658 control_iface["routes"] = yaml.safe_load(network.get("routes"))
2659 else:
2660 if network['type']!='data' and network['type']!='ptp':
2661 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface['net_id']
2662 #dataplane interface, look for it in the numa tree and asign this network
2663 iface_found=False
2664 for dataplane_iface in numa_dict['interfaces']:
2665 if dataplane_iface['name'] == control_iface.get("name"):
2666 if (dataplane_iface['dedicated'] == "yes" and control_iface["type"] != "PF") or \
2667 (dataplane_iface['dedicated'] == "no" and control_iface["type"] != "VF") or \
2668 (dataplane_iface['dedicated'] == "yes:sriov" and control_iface["type"] != "VFnotShared") :
2669 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2670 (control_iface.get("name"), dataplane_iface['dedicated'], control_iface["type"])
2671 dataplane_iface['uuid'] = control_iface['net_id']
2672 if dataplane_iface['dedicated'] == "no":
2673 dataplane_iface['vlan'] = network['vlan']
2674 if dataplane_iface['dedicated'] != "yes" and control_iface.get("mac_address"):
2675 dataplane_iface['mac_address'] = control_iface.get("mac_address")
2676 if control_iface.get("vpci"):
2677 dataplane_iface['vpci'] = control_iface.get("vpci")
2678 iface_found=True
2679 break
2680 if not iface_found:
2681 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface.get("name")
2682
2683 resources['host_id'] = host_id
2684 resources['image_id'] = server['image_id']
2685 resources['flavor_id'] = server['flavor_id']
2686 resources['tenant_id'] = server['tenant_id']
2687 resources['ram'] = requirements['ram']
2688 resources['vcpus'] = requirements['vcpus']
2689 resources['status'] = 'CREATING'
2690
2691 if 'description' in server: resources['description'] = server['description']
2692 if 'name' in server: resources['name'] = server['name']
2693 if 'hypervisor' in server: resources['hypervisor'] = server['hypervisor']
2694 if 'os_image_type' in server: resources['os_image_type'] = server['os_image_type']
2695
2696 resources['extended'] = {} #optional
2697 resources['extended']['numas'] = []
2698 numa_dict['numa_id'] = numa_id
2699 numa_dict['memory'] = requirements['numa']['memory']
2700 numa_dict['cores'] = []
2701
2702 for core in cpu_pinning:
2703 numa_dict['cores'].append({'id': core[2], 'vthread': core[0], 'paired': paired})
2704 for core in reserved_threads:
2705 numa_dict['cores'].append({'id': core})
2706 resources['extended']['numas'].append(numa_dict)
2707 if extended!=None and 'devices' in extended: #TODO allow extra devices without numa
2708 resources['extended']['devices'] = extended['devices']
2709
2710
2711 # '===================================={'
2712 #print json.dumps(resources, indent=4)
2713 #print '====================================}'
2714
2715 return 0, resources
2716