Added LICENSE file to root folder
[osm/openvim.git] / osm_openvim / host_thread.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openvim
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
22 ##
23
24 '''
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
27 '''
28 __author__ = "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__ = "$10-jul-2014 12:07:15$"
30
31 import json
32 import yaml
33 import threading
34 import time
35 import Queue
36 import paramiko
37 import subprocess
38 # import libvirt
39 import imp
40 import random
41 import os
42 import logging
43 from jsonschema import validate as js_v, exceptions as js_e
44 from vim_schema import localinfo_schema, hostinfo_schema
45
46 class RunCommandException(Exception):
47 pass
48
49 class host_thread(threading.Thread):
50 lvirt_module = None
51
52 def __init__(self, name, host, user, db, test, image_path, host_id, version, develop_mode,
53 develop_bridge_iface, password=None, keyfile = None, logger_name=None, debug=None, hypervisors=None):
54 """Init a thread to communicate with compute node or ovs_controller.
55 :param host_id: host identity
56 :param name: name of the thread
57 :param host: host ip or name to manage and user
58 :param user, password, keyfile: user and credentials to connect to host
59 :param db: database class, threading safe
60 """
61 threading.Thread.__init__(self)
62 self.name = name
63 self.host = host
64 self.user = user
65 self.db = db
66 self.test = test
67 self.password = password
68 self.keyfile = keyfile
69 self.localinfo_dirty = False
70 self.connectivity = True
71
72 if not test and not host_thread.lvirt_module:
73 try:
74 module_info = imp.find_module("libvirt")
75 host_thread.lvirt_module = imp.load_module("libvirt", *module_info)
76 except (IOError, ImportError) as e:
77 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e))
78 if logger_name:
79 self.logger_name = logger_name
80 else:
81 self.logger_name = "openvim.host."+name
82 self.logger = logging.getLogger(self.logger_name)
83 if debug:
84 self.logger.setLevel(getattr(logging, debug))
85
86
87 self.develop_mode = develop_mode
88 self.develop_bridge_iface = develop_bridge_iface
89 self.image_path = image_path
90 self.empty_image_path = image_path
91 self.host_id = host_id
92 self.version = version
93
94 self.xml_level = 0
95 # self.pending ={}
96
97 self.server_status = {} #dictionary with pairs server_uuid:server_status
98 self.pending_terminate_server =[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
99 self.next_update_server_status = 0 #time when must be check servers status
100
101 ####### self.hypervisor = "kvm" #hypervisor flag (default: kvm)
102 if hypervisors:
103 self.hypervisors = hypervisors
104 else:
105 self.hypervisors = "kvm"
106
107 self.xen_hyp = True if "xen" in self.hypervisors else False
108
109 self.hostinfo = None
110
111 self.queueLock = threading.Lock()
112 self.taskQueue = Queue.Queue(2000)
113 self.ssh_conn = None
114 self.run_command_session = None
115 self.error = None
116 self.localhost = True if host == 'localhost' else False
117
118 if self.xen_hyp:
119 self.lvirt_conn_uri = "xen+ssh://{user}@{host}/?no_tty=1&no_verify=1".format(
120 user=self.user, host=self.host)
121 else:
122 self.lvirt_conn_uri = "qemu+ssh://{user}@{host}/system?no_tty=1&no_verify=1".format(
123 user=self.user, host=self.host)
124 if keyfile:
125 self.lvirt_conn_uri += "&keyfile=" + keyfile
126
127 self.remote_ip = None
128 self.local_ip = None
129
130 def run_command(self, command, keep_session=False, ignore_exit_status=False):
131 """Run a command passed as a str on a localhost or at remote machine.
132 :param command: text with the command to execute.
133 :param keep_session: if True it returns a <stdin> for sending input with '<stdin>.write("text\n")'.
134 A command with keep_session=True MUST be followed by a command with keep_session=False in order to
135 close the session and get the output
136 :param ignore_exit_status: Return stdout and not raise an exepction in case of error.
137 :return: the output of the command if 'keep_session=False' or the <stdin> object if 'keep_session=True'
138 :raises: RunCommandException if command fails
139 """
140 if self.run_command_session and keep_session:
141 raise RunCommandException("Internal error. A command with keep_session=True must be followed by another "
142 "command with keep_session=False to close session")
143 try:
144 if self.localhost:
145 if self.run_command_session:
146 p = self.run_command_session
147 self.run_command_session = None
148 (output, outerror) = p.communicate()
149 returncode = p.returncode
150 p.stdin.close()
151 elif keep_session:
152 p = subprocess.Popen(('bash', "-c", command), stdin=subprocess.PIPE, stdout=subprocess.PIPE,
153 stderr=subprocess.PIPE)
154 self.run_command_session = p
155 return p.stdin
156 else:
157 if not ignore_exit_status:
158 output = subprocess.check_output(('bash', "-c", command))
159 returncode = 0
160 else:
161 out = None
162 p = subprocess.Popen(('bash', "-c", command), stdout=subprocess.PIPE)
163 out, err = p.communicate()
164 return out
165 else:
166 if self.run_command_session:
167 (i, o, e) = self.run_command_session
168 self.run_command_session = None
169 i.channel.shutdown_write()
170 else:
171 if not self.ssh_conn:
172 self.ssh_connect()
173 (i, o, e) = self.ssh_conn.exec_command(command, timeout=10)
174 if keep_session:
175 self.run_command_session = (i, o, e)
176 return i
177 returncode = o.channel.recv_exit_status()
178 output = o.read()
179 outerror = e.read()
180 if returncode != 0 and not ignore_exit_status:
181 text = "run_command='{}' Error='{}'".format(command, outerror)
182 self.logger.error(text)
183 raise RunCommandException(text)
184
185 self.logger.debug("run_command='{}' result='{}'".format(command, output))
186 return output
187
188 except RunCommandException:
189 raise
190 except subprocess.CalledProcessError as e:
191 text = "run_command Exception '{}' '{}'".format(str(e), e.output)
192 except (paramiko.ssh_exception.SSHException, Exception) as e:
193 text = "run_command='{}' Exception='{}'".format(command, str(e))
194 self.ssh_conn = None
195 self.run_command_session = None
196 raise RunCommandException(text)
197
198 def ssh_connect(self):
199 try:
200 # Connect SSH
201 self.ssh_conn = paramiko.SSHClient()
202 self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
203 self.ssh_conn.load_system_host_keys()
204 self.ssh_conn.connect(self.host, username=self.user, password=self.password, key_filename=self.keyfile,
205 timeout=10) # auth_timeout=10)
206 self.remote_ip = self.ssh_conn.get_transport().sock.getpeername()[0]
207 self.local_ip = self.ssh_conn.get_transport().sock.getsockname()[0]
208 except (paramiko.ssh_exception.SSHException, Exception) as e:
209 text = 'ssh connect Exception: {}'.format(e)
210 self.ssh_conn = None
211 self.error = text
212 raise
213
214 def check_connectivity(self):
215 if not self.test:
216 try:
217 command = 'sudo brctl show'
218 self.run_command(command)
219 except RunCommandException as e:
220 self.connectivity = False
221 self.logger.error("check_connectivity Exception: " + str(e))
222
223 def load_localinfo(self):
224 if not self.test:
225 try:
226 self.run_command('sudo mkdir -p ' + self.image_path)
227 result = self.run_command('cat {}/.openvim.yaml'.format(self.image_path))
228 self.localinfo = yaml.load(result)
229 js_v(self.localinfo, localinfo_schema)
230 self.localinfo_dirty = False
231 if 'server_files' not in self.localinfo:
232 self.localinfo['server_files'] = {}
233 self.logger.debug("localinfo loaded from host")
234 return
235 except RunCommandException as e:
236 self.logger.error("load_localinfo Exception: " + str(e))
237 except host_thread.lvirt_module.libvirtError as e:
238 text = e.get_error_message()
239 self.logger.error("load_localinfo libvirt Exception: " + text)
240 except yaml.YAMLError as exc:
241 text = ""
242 if hasattr(exc, 'problem_mark'):
243 mark = exc.problem_mark
244 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
245 self.logger.error("load_localinfo yaml format Exception " + text)
246 except js_e.ValidationError as e:
247 text = ""
248 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
249 self.logger.error("load_localinfo format Exception: %s %s", text, str(e))
250 except Exception as e:
251 text = str(e)
252 self.logger.error("load_localinfo Exception: " + text)
253
254 # not loaded, insert a default data and force saving by activating dirty flag
255 self.localinfo = {'files':{}, 'server_files':{} }
256 # self.localinfo_dirty=True
257 self.localinfo_dirty=False
258
259 def load_hostinfo(self):
260 if self.test:
261 return
262 try:
263 result = self.run_command('cat {}/hostinfo.yaml'.format(self.image_path))
264 self.hostinfo = yaml.load(result)
265 js_v(self.hostinfo, hostinfo_schema)
266 self.logger.debug("hostinfo load from host " + str(self.hostinfo))
267 return
268 except RunCommandException as e:
269 self.logger.error("load_hostinfo ssh Exception: " + str(e))
270 except host_thread.lvirt_module.libvirtError as e:
271 text = e.get_error_message()
272 self.logger.error("load_hostinfo libvirt Exception: " + text)
273 except yaml.YAMLError as exc:
274 text = ""
275 if hasattr(exc, 'problem_mark'):
276 mark = exc.problem_mark
277 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
278 self.logger.error("load_hostinfo yaml format Exception " + text)
279 except js_e.ValidationError as e:
280 text = ""
281 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
282 self.logger.error("load_hostinfo format Exception: %s %s", text, str(e))
283 except Exception as e:
284 text = str(e)
285 self.logger.error("load_hostinfo Exception: " + text)
286
287 #not loaded, insert a default data
288 self.hostinfo = None
289
290 def save_localinfo(self, tries=3):
291 if self.test:
292 self.localinfo_dirty = False
293 return
294
295 while tries>=0:
296 tries-=1
297
298 try:
299 command = 'cat > {}/.openvim.yaml'.format(self.image_path)
300 in_stream = self.run_command(command, keep_session=True)
301 yaml.safe_dump(self.localinfo, in_stream, explicit_start=True, indent=4, default_flow_style=False,
302 tags=False, encoding='utf-8', allow_unicode=True)
303 result = self.run_command(command, keep_session=False) # to end session
304
305 self.localinfo_dirty = False
306 break #while tries
307
308 except RunCommandException as e:
309 self.logger.error("save_localinfo ssh Exception: " + str(e))
310 except host_thread.lvirt_module.libvirtError as e:
311 text = e.get_error_message()
312 self.logger.error("save_localinfo libvirt Exception: " + text)
313 except yaml.YAMLError as exc:
314 text = ""
315 if hasattr(exc, 'problem_mark'):
316 mark = exc.problem_mark
317 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
318 self.logger.error("save_localinfo yaml format Exception " + text)
319 except Exception as e:
320 text = str(e)
321 self.logger.error("save_localinfo Exception: " + text)
322
323 def load_servers_from_db(self):
324 r,c = self.db.get_table(SELECT=('uuid','status', 'image_id'), FROM='instances', WHERE={'host_id': self.host_id})
325
326 self.server_status = {}
327 if r<0:
328 self.logger.error("Error getting data from database: " + c)
329 return
330 for server in c:
331 self.server_status[ server['uuid'] ] = server['status']
332
333 #convert from old version to new one
334 if 'inc_files' in self.localinfo and server['uuid'] in self.localinfo['inc_files']:
335 server_files_dict = {'source file': self.localinfo['inc_files'][ server['uuid'] ] [0], 'file format':'raw' }
336 if server_files_dict['source file'][-5:] == 'qcow2':
337 server_files_dict['file format'] = 'qcow2'
338
339 self.localinfo['server_files'][ server['uuid'] ] = { server['image_id'] : server_files_dict }
340 if 'inc_files' in self.localinfo:
341 del self.localinfo['inc_files']
342 self.localinfo_dirty = True
343
344 def delete_unused_files(self):
345 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
346 Deletes unused entries at self.loacalinfo and the corresponding local files.
347 The only reason for this mismatch is the manual deletion of instances (VM) at database
348 '''
349 if self.test:
350 return
351 for uuid,images in self.localinfo['server_files'].items():
352 if uuid not in self.server_status:
353 for localfile in images.values():
354 try:
355 self.logger.debug("deleting file '%s' of unused server '%s'", localfile['source file'], uuid)
356 self.delete_file(localfile['source file'])
357 except RunCommandException as e:
358 self.logger.error("Exception deleting file '%s': %s", localfile['source file'], str(e))
359 del self.localinfo['server_files'][uuid]
360 self.localinfo_dirty = True
361
362 def insert_task(self, task, *aditional):
363 try:
364 self.queueLock.acquire()
365 task = self.taskQueue.put( (task,) + aditional, timeout=5)
366 self.queueLock.release()
367 return 1, None
368 except Queue.Full:
369 return -1, "timeout inserting a task over host " + self.name
370
371 def run(self):
372 while True:
373 self.load_localinfo()
374 self.load_hostinfo()
375 self.load_servers_from_db()
376 self.delete_unused_files()
377 while True:
378 try:
379 self.queueLock.acquire()
380 if not self.taskQueue.empty():
381 task = self.taskQueue.get()
382 else:
383 task = None
384 self.queueLock.release()
385
386 if task is None:
387 now=time.time()
388 if self.localinfo_dirty:
389 self.save_localinfo()
390 elif self.next_update_server_status < now:
391 self.update_servers_status()
392 self.next_update_server_status = now + 5
393 elif len(self.pending_terminate_server)>0 and self.pending_terminate_server[0][0]<now:
394 self.server_forceoff()
395 else:
396 time.sleep(1)
397 continue
398
399 if task[0] == 'instance':
400 self.logger.debug("processing task instance " + str(task[1]['action']))
401 retry = 0
402 while retry < 2:
403 retry += 1
404 r = self.action_on_server(task[1], retry==2)
405 if r >= 0:
406 break
407 elif task[0] == 'image':
408 pass
409 elif task[0] == 'exit':
410 self.logger.debug("processing task exit")
411 self.terminate()
412 return 0
413 elif task[0] == 'reload':
414 self.logger.debug("processing task reload terminating and relaunching")
415 self.terminate()
416 break
417 elif task[0] == 'edit-iface':
418 self.logger.debug("processing task edit-iface port={}, old_net={}, new_net={}".format(
419 task[1], task[2], task[3]))
420 self.edit_iface(task[1], task[2], task[3])
421 elif task[0] == 'restore-iface':
422 self.logger.debug("processing task restore-iface={} mac={}".format(task[1], task[2]))
423 self.restore_iface(task[1], task[2])
424 elif task[0] == 'new-ovsbridge':
425 self.logger.debug("Creating compute OVS bridge")
426 self.create_ovs_bridge()
427 elif task[0] == 'new-vxlan':
428 self.logger.debug("Creating vxlan tunnel='{}', remote ip='{}'".format(task[1], task[2]))
429 self.create_ovs_vxlan_tunnel(task[1], task[2])
430 elif task[0] == 'del-ovsbridge':
431 self.logger.debug("Deleting OVS bridge")
432 self.delete_ovs_bridge()
433 elif task[0] == 'del-vxlan':
434 self.logger.debug("Deleting vxlan {} tunnel".format(task[1]))
435 self.delete_ovs_vxlan_tunnel(task[1])
436 elif task[0] == 'create-ovs-bridge-port':
437 self.logger.debug("Adding port ovim-{} to OVS bridge".format(task[1]))
438 self.create_ovs_bridge_port(task[1])
439 elif task[0] == 'del-ovs-port':
440 self.logger.debug("Delete bridge attached to ovs port vlan {} net {}".format(task[1], task[2]))
441 self.delete_bridge_port_attached_to_ovs(task[1], task[2])
442 else:
443 self.logger.debug("unknown task " + str(task))
444
445 except Exception as e:
446 self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
447
448 def server_forceoff(self, wait_until_finished=False):
449 while len(self.pending_terminate_server)>0:
450 now = time.time()
451 if self.pending_terminate_server[0][0]>now:
452 if wait_until_finished:
453 time.sleep(1)
454 continue
455 else:
456 return
457 req={'uuid':self.pending_terminate_server[0][1],
458 'action':{'terminate':'force'},
459 'status': None
460 }
461 self.action_on_server(req)
462 self.pending_terminate_server.pop(0)
463
464 def terminate(self):
465 try:
466 self.server_forceoff(True)
467 if self.localinfo_dirty:
468 self.save_localinfo()
469 if not self.test:
470 self.ssh_conn.close()
471 except Exception as e:
472 text = str(e)
473 self.logger.error("terminate Exception: " + text)
474 self.logger.debug("exit from host_thread")
475
476 def get_local_iface_name(self, generic_name):
477 if self.hostinfo != None and "iface_names" in self.hostinfo and generic_name in self.hostinfo["iface_names"]:
478 return self.hostinfo["iface_names"][generic_name]
479 return generic_name
480
481 def create_xml_server(self, server, dev_list, server_metadata={}):
482 """Function that implements the generation of the VM XML definition.
483 Additional devices are in dev_list list
484 The main disk is upon dev_list[0]"""
485
486 #get if operating system is Windows
487 windows_os = False
488 os_type = server_metadata.get('os_type', None)
489 if os_type == None and 'metadata' in dev_list[0]:
490 os_type = dev_list[0]['metadata'].get('os_type', None)
491 if os_type != None and os_type.lower() == "windows":
492 windows_os = True
493 #get type of hard disk bus
494 bus_ide = True if windows_os else False
495 bus = server_metadata.get('bus', None)
496 if bus == None and 'metadata' in dev_list[0]:
497 bus = dev_list[0]['metadata'].get('bus', None)
498 if bus != None:
499 bus_ide = True if bus=='ide' else False
500
501 self.xml_level = 0
502 hypervisor = server.get('hypervisor', 'kvm')
503 os_type_img = server.get('os_image_type', 'other')
504
505 if hypervisor[:3] == 'xen':
506 text = "<domain type='xen'>"
507 else:
508 text = "<domain type='kvm'>"
509 #get topology
510 topo = server_metadata.get('topology', None)
511 if topo == None and 'metadata' in dev_list[0]:
512 topo = dev_list[0]['metadata'].get('topology', None)
513 #name
514 name = server.get('name', '')[:28] + "_" + server['uuid'][:28] #qemu impose a length limit of 59 chars or not start. Using 58
515 text += self.inc_tab() + "<name>" + name+ "</name>"
516 #uuid
517 text += self.tab() + "<uuid>" + server['uuid'] + "</uuid>"
518
519 numa={}
520 if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:
521 numa = server['extended']['numas'][0]
522 #memory
523 use_huge = False
524 memory = int(numa.get('memory',0))*1024*1024 #in KiB
525 if memory==0:
526 memory = int(server['ram'])*1024;
527 else:
528 if not self.develop_mode:
529 use_huge = True
530 if memory==0:
531 return -1, 'No memory assigned to instance'
532 memory = str(memory)
533 text += self.tab() + "<memory unit='KiB'>" +memory+"</memory>"
534 text += self.tab() + "<currentMemory unit='KiB'>" +memory+ "</currentMemory>"
535 if use_huge:
536 text += self.tab()+'<memoryBacking>'+ \
537 self.inc_tab() + '<hugepages/>'+ \
538 self.dec_tab()+ '</memoryBacking>'
539
540 #cpu
541 use_cpu_pinning=False
542 vcpus = int(server.get("vcpus",0))
543 cpu_pinning = []
544 if 'cores-source' in numa:
545 use_cpu_pinning=True
546 for index in range(0, len(numa['cores-source'])):
547 cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )
548 vcpus += 1
549 if 'threads-source' in numa:
550 use_cpu_pinning=True
551 for index in range(0, len(numa['threads-source'])):
552 cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )
553 vcpus += 1
554 if 'paired-threads-source' in numa:
555 use_cpu_pinning=True
556 for index in range(0, len(numa['paired-threads-source'])):
557 cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )
558 cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )
559 vcpus += 2
560
561 if use_cpu_pinning and not self.develop_mode:
562 text += self.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning)) +"</vcpu>" + \
563 self.tab()+'<cputune>'
564 self.xml_level += 1
565 for i in range(0, len(cpu_pinning)):
566 text += self.tab() + "<vcpupin vcpu='" +str(cpu_pinning[i][0])+ "' cpuset='" +str(cpu_pinning[i][1]) +"'/>"
567 text += self.dec_tab()+'</cputune>'+ \
568 self.tab() + '<numatune>' +\
569 self.inc_tab() + "<memory mode='strict' nodeset='" +str(numa['source'])+ "'/>" +\
570 self.dec_tab() + '</numatune>'
571 else:
572 if vcpus==0:
573 return -1, "Instance without number of cpus"
574 text += self.tab()+"<vcpu>" + str(vcpus) + "</vcpu>"
575
576 #boot
577 boot_cdrom = False
578 for dev in dev_list:
579 if dev['type']=='cdrom' :
580 boot_cdrom = True
581 break
582 if hypervisor == 'xenhvm':
583 text += self.tab()+ '<os>' + \
584 self.inc_tab() + "<type arch='x86_64' machine='xenfv'>hvm</type>"
585 text += self.tab() + "<loader type='rom'>/usr/lib/xen/boot/hvmloader</loader>"
586 if boot_cdrom:
587 text += self.tab() + "<boot dev='cdrom'/>"
588 text += self.tab() + "<boot dev='hd'/>" + \
589 self.dec_tab()+'</os>'
590 elif hypervisor == 'xen-unik':
591 text += self.tab()+ '<os>' + \
592 self.inc_tab() + "<type arch='x86_64' machine='xenpv'>xen</type>"
593 text += self.tab() + "<kernel>" + str(dev_list[0]['source file']) + "</kernel>" + \
594 self.dec_tab()+'</os>'
595 else:
596 text += self.tab()+ '<os>' + \
597 self.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
598 if boot_cdrom:
599 text += self.tab() + "<boot dev='cdrom'/>"
600 text += self.tab() + "<boot dev='hd'/>" + \
601 self.dec_tab()+'</os>'
602 #features
603 text += self.tab()+'<features>'+\
604 self.inc_tab()+'<acpi/>' +\
605 self.tab()+'<apic/>' +\
606 self.tab()+'<pae/>'+ \
607 self.dec_tab() +'</features>'
608 if topo == "oneSocket:hyperthreading":
609 if vcpus % 2 != 0:
610 return -1, 'Cannot expose hyperthreading with an odd number of vcpus'
611 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>" % (vcpus/2)
612 elif windows_os or topo == "oneSocket":
613 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>" % vcpus
614 else:
615 text += self.tab() + "<cpu mode='host-model'></cpu>"
616 text += self.tab() + "<clock offset='utc'/>" +\
617 self.tab() + "<on_poweroff>preserve</on_poweroff>" + \
618 self.tab() + "<on_reboot>restart</on_reboot>" + \
619 self.tab() + "<on_crash>restart</on_crash>"
620 if hypervisor == 'xenhvm':
621 text += self.tab() + "<devices>" + \
622 self.inc_tab() + "<emulator>/usr/bin/qemu-system-i386</emulator>" + \
623 self.tab() + "<serial type='pty'>" +\
624 self.inc_tab() + "<target port='0'/>" + \
625 self.dec_tab() + "</serial>" +\
626 self.tab() + "<console type='pty'>" + \
627 self.inc_tab()+ "<target type='serial' port='0'/>" + \
628 self.dec_tab()+'</console>' #In some libvirt version may be: <emulator>/usr/lib64/xen/bin/qemu-dm</emulator> (depends on distro)
629 elif hypervisor == 'xen-unik':
630 text += self.tab() + "<devices>" + \
631 self.tab() + "<console type='pty'>" + \
632 self.inc_tab()+ "<target type='xen' port='0'/>" + \
633 self.dec_tab()+'</console>'
634 else:
635 text += self.tab() + "<devices>" + \
636 self.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
637 self.tab() + "<serial type='pty'>" +\
638 self.inc_tab() + "<target port='0'/>" + \
639 self.dec_tab() + "</serial>" +\
640 self.tab() + "<console type='pty'>" + \
641 self.inc_tab()+ "<target type='serial' port='0'/>" + \
642 self.dec_tab()+'</console>'
643 if windows_os:
644 text += self.tab() + "<controller type='usb' index='0'/>" + \
645 self.tab() + "<controller type='ide' index='0'/>" + \
646 self.tab() + "<input type='mouse' bus='ps2'/>" + \
647 self.tab() + "<sound model='ich6'/>" + \
648 self.tab() + "<video>" + \
649 self.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
650 self.dec_tab() + "</video>" + \
651 self.tab() + "<memballoon model='virtio'/>" + \
652 self.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
653 elif hypervisor == 'xen-unik':
654 pass
655 else:
656 text += self.tab() + "<controller type='ide' index='0'/>" + \
657 self.tab() + "<input type='mouse' bus='ps2'/>" + \
658 self.tab() + "<input type='keyboard' bus='ps2'/>" + \
659 self.tab() + "<video>" + \
660 self.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
661 self.dec_tab() + "</video>"
662
663 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
664 #> self.dec_tab()+'</hostdev>\n' +\
665 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
666 if windows_os:
667 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
668 else:
669 #If image contains 'GRAPH' include graphics
670 #if 'GRAPH' in image:
671 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
672 self.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
673 self.dec_tab() + "</graphics>"
674
675 vd_index = 'a'
676 for dev in dev_list:
677 bus_ide_dev = bus_ide
678 if (dev['type']=='cdrom' or dev['type']=='disk') and hypervisor != 'xen-unik':
679 if dev['type']=='cdrom':
680 bus_ide_dev = True
681 text += self.tab() + "<disk type='file' device='"+dev['type']+"'>"
682 if 'file format' in dev:
683 text += self.inc_tab() + "<driver name='qemu' type='" +dev['file format']+ "' cache='writethrough'/>"
684 if 'source file' in dev:
685 text += self.tab() + "<source file='" +dev['source file']+ "'/>"
686 #elif v['type'] == 'block':
687 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
688 #else:
689 # return -1, 'Unknown disk type ' + v['type']
690 vpci = dev.get('vpci',None)
691 if vpci == None and 'metadata' in dev:
692 vpci = dev['metadata'].get('vpci',None)
693 text += self.pci2xml(vpci)
694
695 if bus_ide_dev:
696 text += self.tab() + "<target dev='hd" +vd_index+ "' bus='ide'/>" #TODO allows several type of disks
697 else:
698 text += self.tab() + "<target dev='vd" +vd_index+ "' bus='virtio'/>"
699 text += self.dec_tab() + '</disk>'
700 vd_index = chr(ord(vd_index)+1)
701 elif dev['type']=='xml':
702 dev_text = dev['xml']
703 if 'vpci' in dev:
704 dev_text = dev_text.replace('__vpci__', dev['vpci'])
705 if 'source file' in dev:
706 dev_text = dev_text.replace('__file__', dev['source file'])
707 if 'file format' in dev:
708 dev_text = dev_text.replace('__format__', dev['source file'])
709 if '__dev__' in dev_text:
710 dev_text = dev_text.replace('__dev__', vd_index)
711 vd_index = chr(ord(vd_index)+1)
712 text += dev_text
713 elif hypervisor == 'xen-unik':
714 pass
715 else:
716 return -1, 'Unknown device type ' + dev['type']
717
718 net_nb=0
719 bridge_interfaces = server.get('networks', [])
720 for v in bridge_interfaces:
721 #Get the brifge name
722 result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )
723 if result <= 0:
724 self.logger.error("create_xml_server ERROR %d getting nets %s", result, content)
725 return -1, content
726 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
727 #I know it is not secure
728 #for v in sorted(desc['network interfaces'].itervalues()):
729 model = v.get("model", None)
730 if content[0]['provider']=='default':
731 text += self.tab() + "<interface type='network'>" + \
732 self.inc_tab() + "<source network='" +content[0]['provider']+ "'/>"
733 elif content[0]['provider'][0:7]=='macvtap':
734 text += self.tab()+"<interface type='direct'>" + \
735 self.inc_tab() + "<source dev='" + self.get_local_iface_name(content[0]['provider'][8:]) + "' mode='bridge'/>" + \
736 self.tab() + "<target dev='macvtap0'/>"
737 if windows_os:
738 text += self.tab() + "<alias name='net" + str(net_nb) + "'/>"
739 elif model==None:
740 model = "virtio"
741 elif content[0]['provider'][0:6]=='bridge':
742 text += self.tab() + "<interface type='bridge'>" + \
743 self.inc_tab()+"<source bridge='" +self.get_local_iface_name(content[0]['provider'][7:])+ "'/>"
744 if windows_os:
745 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
746 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
747 elif model==None:
748 model = "virtio"
749 elif content[0]['provider'][0:3] == "OVS":
750 vlan = content[0]['provider'].replace('OVS:', '')
751 text += self.tab() + "<interface type='bridge'>" + \
752 self.inc_tab() + "<source bridge='ovim-" + str(vlan) + "'/>"
753 if hypervisor == 'xenhvm' or hypervisor == 'xen-unik':
754 text += self.tab() + "<script path='vif-openvswitch'/>"
755 else:
756 return -1, 'Unknown Bridge net provider ' + content[0]['provider']
757 if model!=None:
758 text += self.tab() + "<model type='" +model+ "'/>"
759 if v.get('mac_address', None) != None:
760 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
761 text += self.pci2xml(v.get('vpci',None))
762 text += self.dec_tab()+'</interface>'
763
764 net_nb += 1
765
766 interfaces = numa.get('interfaces', [])
767
768 net_nb=0
769 for v in interfaces:
770 if self.develop_mode: #map these interfaces to bridges
771 text += self.tab() + "<interface type='bridge'>" + \
772 self.inc_tab()+"<source bridge='" +self.develop_bridge_iface+ "'/>"
773 if windows_os:
774 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
775 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
776 else:
777 text += self.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
778 if v.get('mac_address', None) != None:
779 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
780 text += self.pci2xml(v.get('vpci',None))
781 text += self.dec_tab()+'</interface>'
782 continue
783
784 if v['dedicated'] == 'yes': #passthrought
785 text += self.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
786 self.inc_tab() + "<source>"
787 self.inc_tab()
788 text += self.pci2xml(v['source'])
789 text += self.dec_tab()+'</source>'
790 text += self.pci2xml(v.get('vpci',None))
791 if windows_os:
792 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
793 text += self.dec_tab()+'</hostdev>'
794 net_nb += 1
795 else: #sriov_interfaces
796 #skip not connected interfaces
797 if v.get("net_id") == None:
798 continue
799 text += self.tab() + "<interface type='hostdev' managed='yes'>"
800 self.inc_tab()
801 if v.get('mac_address', None) != None:
802 text+= self.tab() + "<mac address='" +v['mac_address']+ "'/>"
803 text+= self.tab()+'<source>'
804 self.inc_tab()
805 text += self.pci2xml(v['source'])
806 text += self.dec_tab()+'</source>'
807 if v.get('vlan',None) != None:
808 text += self.tab() + "<vlan> <tag id='" + str(v['vlan']) + "'/> </vlan>"
809 text += self.pci2xml(v.get('vpci',None))
810 if windows_os:
811 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
812 text += self.dec_tab()+'</interface>'
813
814
815 text += self.dec_tab()+'</devices>'+\
816 self.dec_tab()+'</domain>'
817 return 0, text
818
819 def pci2xml(self, pci):
820 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
821 alows an empty pci text'''
822 if pci is None:
823 return ""
824 first_part = pci.split(':')
825 second_part = first_part[2].split('.')
826 return self.tab() + "<address type='pci' domain='0x" + first_part[0] + \
827 "' bus='0x" + first_part[1] + "' slot='0x" + second_part[0] + \
828 "' function='0x" + second_part[1] + "'/>"
829
830 def tab(self):
831 """Return indentation according to xml_level"""
832 return "\n" + (' '*self.xml_level)
833
834 def inc_tab(self):
835 """Increment and return indentation according to xml_level"""
836 self.xml_level += 1
837 return self.tab()
838
839 def dec_tab(self):
840 """Decrement and return indentation according to xml_level"""
841 self.xml_level -= 1
842 return self.tab()
843
844 def create_ovs_bridge(self):
845 """
846 Create a bridge in compute OVS to allocate VMs
847 :return: True if success
848 """
849 if self.test or not self.connectivity:
850 return True
851
852 try:
853 self.run_command('sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true')
854
855 return True
856 except RunCommandException as e:
857 self.logger.error("create_ovs_bridge ssh Exception: " + str(e))
858 if "SSH session not active" in str(e):
859 self.ssh_connect()
860 return False
861
862 def delete_port_to_ovs_bridge(self, vlan, net_uuid):
863 """
864 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
865 :param vlan: vlan port id
866 :param net_uuid: network id
867 :return:
868 """
869
870 if self.test or not self.connectivity:
871 return True
872 try:
873 port_name = 'ovim-{}'.format(str(vlan))
874 command = 'sudo ovs-vsctl del-port br-int {}'.format(port_name)
875 self.run_command(command)
876 return True
877 except RunCommandException as e:
878 self.logger.error("delete_port_to_ovs_bridge ssh Exception: {}".format(str(e)))
879 return False
880
881 def delete_dhcp_server(self, vlan, net_uuid, dhcp_path):
882 """
883 Delete dhcp server process lining in namespace
884 :param vlan: segmentation id
885 :param net_uuid: network uuid
886 :param dhcp_path: conf fiel path that live in namespace side
887 :return:
888 """
889 if self.test or not self.connectivity:
890 return True
891 if not self.is_dhcp_port_free(vlan, net_uuid):
892 return True
893 try:
894 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
895 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
896 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
897
898 command = 'sudo ip netns exec {} cat {}'.format(dhcp_namespace, pid_file)
899 content = self.run_command(command, ignore_exit_status=True)
900 dns_pid = content.replace('\n', '')
901 command = 'sudo ip netns exec {} kill -9 {}'.format(dhcp_namespace, dns_pid)
902 self.run_command(command, ignore_exit_status=True)
903
904 except RunCommandException as e:
905 self.logger.error("delete_dhcp_server ssh Exception: " + str(e))
906 return False
907
908 def is_dhcp_port_free(self, host_id, net_uuid):
909 """
910 Check if any port attached to the a net in a vxlan mesh across computes nodes
911 :param host_id: host id
912 :param net_uuid: network id
913 :return: True if is not free
914 """
915 result, content = self.db.get_table(
916 FROM='ports',
917 WHERE={'type': 'instance:ovs', 'net_id': net_uuid}
918 )
919
920 if len(content) > 0:
921 return False
922 else:
923 return True
924
925 def is_port_free(self, host_id, net_uuid):
926 """
927 Check if there not ovs ports of a network in a compute host.
928 :param host_id: host id
929 :param net_uuid: network id
930 :return: True if is not free
931 """
932
933 result, content = self.db.get_table(
934 FROM='ports as p join instances as i on p.instance_id=i.uuid',
935 WHERE={"i.host_id": self.host_id, 'p.type': 'instance:ovs', 'p.net_id': net_uuid}
936 )
937
938 if len(content) > 0:
939 return False
940 else:
941 return True
942
943 def add_port_to_ovs_bridge(self, vlan):
944 """
945 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
946 :param vlan: vlan port id
947 :return: True if success
948 """
949
950 if self.test:
951 return True
952 try:
953 port_name = 'ovim-{}'.format(str(vlan))
954 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(port_name, str(vlan))
955 self.run_command(command)
956 return True
957 except RunCommandException as e:
958 self.logger.error("add_port_to_ovs_bridge Exception: " + str(e))
959 return False
960
961 def delete_dhcp_port(self, vlan, net_uuid, dhcp_path):
962 """
963 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
964 :param vlan: segmentation id
965 :param net_uuid: network id
966 :return: True if success
967 """
968
969 if self.test:
970 return True
971
972 if not self.is_dhcp_port_free(vlan, net_uuid):
973 return True
974 self.delete_dhcp_interfaces(vlan, dhcp_path)
975 return True
976
977 def delete_bridge_port_attached_to_ovs(self, vlan, net_uuid):
978 """
979 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
980 :param vlan:
981 :param net_uuid:
982 :return: True if success
983 """
984 if self.test:
985 return
986
987 if not self.is_port_free(vlan, net_uuid):
988 return True
989 self.delete_port_to_ovs_bridge(vlan, net_uuid)
990 self.delete_linux_bridge(vlan)
991 return True
992
993 def delete_linux_bridge(self, vlan):
994 """
995 Delete a linux bridge in a scpecific compute.
996 :param vlan: vlan port id
997 :return: True if success
998 """
999
1000 if self.test:
1001 return True
1002 try:
1003 port_name = 'ovim-{}'.format(str(vlan))
1004 command = 'sudo ip link set dev ovim-{} down'.format(str(vlan))
1005 self.run_command(command)
1006
1007 command = 'sudo ip link delete {} && sudo brctl delbr {}'.format(port_name, port_name)
1008 self.run_command(command)
1009 return True
1010 except RunCommandException as e:
1011 self.logger.error("delete_linux_bridge Exception: {}".format(str(e)))
1012 return False
1013
1014 def remove_link_bridge_to_ovs(self, vlan, link):
1015 """
1016 Delete a linux provider net connection to tenatn net
1017 :param vlan: vlan port id
1018 :param link: link name
1019 :return: True if success
1020 """
1021
1022 if self.test:
1023 return True
1024 try:
1025 br_tap_name = '{}-vethBO'.format(str(vlan))
1026 br_ovs_name = '{}-vethOB'.format(str(vlan))
1027
1028 # Delete ovs veth pair
1029 command = 'sudo ip link set dev {} down'.format(br_ovs_name)
1030 self.run_command(command, ignore_exit_status=True)
1031
1032 command = 'sudo ovs-vsctl del-port br-int {}'.format(br_ovs_name)
1033 self.run_command(command)
1034
1035 # Delete br veth pair
1036 command = 'sudo ip link set dev {} down'.format(br_tap_name)
1037 self.run_command(command, ignore_exit_status=True)
1038
1039 # Delete br veth interface form bridge
1040 command = 'sudo brctl delif {} {}'.format(link, br_tap_name)
1041 self.run_command(command)
1042
1043 # Delete br veth pair
1044 command = 'sudo ip link set dev {} down'.format(link)
1045 self.run_command(command, ignore_exit_status=True)
1046
1047 return True
1048 except RunCommandException as e:
1049 self.logger.error("remove_link_bridge_to_ovs Exception: {}".format(str(e)))
1050 return False
1051
1052 def create_ovs_bridge_port(self, vlan):
1053 """
1054 Generate a linux bridge and attache the port to a OVS bridge
1055 :param vlan: vlan port id
1056 :return:
1057 """
1058 if self.test:
1059 return
1060 self.create_linux_bridge(vlan)
1061 self.add_port_to_ovs_bridge(vlan)
1062
1063 def create_linux_bridge(self, vlan):
1064 """
1065 Create a linux bridge with STP active
1066 :param vlan: netowrk vlan id
1067 :return:
1068 """
1069
1070 if self.test:
1071 return True
1072 try:
1073 port_name = 'ovim-{}'.format(str(vlan))
1074 command = 'sudo brctl show | grep {}'.format(port_name)
1075 result = self.run_command(command, ignore_exit_status=True)
1076 if not result:
1077 command = 'sudo brctl addbr {}'.format(port_name)
1078 self.run_command(command)
1079
1080 command = 'sudo brctl stp {} on'.format(port_name)
1081 self.run_command(command)
1082
1083 command = 'sudo ip link set dev {} up'.format(port_name)
1084 self.run_command(command)
1085 return True
1086 except RunCommandException as e:
1087 self.logger.error("create_linux_bridge ssh Exception: {}".format(str(e)))
1088 return False
1089
1090 def set_mac_dhcp_server(self, ip, mac, vlan, netmask, first_ip, dhcp_path):
1091 """
1092 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
1093 :param ip: IP address asigned to a VM
1094 :param mac: VM vnic mac to be macthed with the IP received
1095 :param vlan: Segmentation id
1096 :param netmask: netmask value
1097 :param path: dhcp conf file path that live in namespace side
1098 :return: True if success
1099 """
1100
1101 if self.test:
1102 return True
1103
1104 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
1105 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1106 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1107 ns_interface = '{}-vethDO'.format(str(vlan))
1108
1109 if not ip:
1110 return False
1111 try:
1112 command = 'sudo ip netns exec {} cat /sys/class/net/{}/address'.format(dhcp_namespace, ns_interface)
1113 iface_listen_mac = self.run_command(command, ignore_exit_status=True)
1114
1115 if iface_listen_mac > 0:
1116 command = 'sudo ip netns exec {} cat {} | grep -i {}'.format(dhcp_namespace,
1117 dhcp_hostsdir,
1118 iface_listen_mac)
1119 content = self.run_command(command, ignore_exit_status=True)
1120 if content == '':
1121 ip_data = iface_listen_mac.upper().replace('\n', '') + ',' + first_ip
1122 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1123
1124 command = 'sudo ip netns exec {} sudo bash -ec "echo {} >> {}"'.format(dhcp_namespace,
1125 ip_data,
1126 dhcp_hostsdir)
1127 self.run_command(command)
1128
1129 ip_data = mac.upper() + ',' + ip
1130 command = 'sudo ip netns exec {} sudo bash -ec "echo {} >> {}"'.format(dhcp_namespace,
1131 ip_data,
1132 dhcp_hostsdir)
1133 self.run_command(command, ignore_exit_status=False)
1134
1135 return True
1136
1137 return False
1138 except RunCommandException as e:
1139 self.logger.error("set_mac_dhcp_server ssh Exception: " + str(e))
1140 return False
1141
1142 def delete_mac_dhcp_server(self, ip, mac, vlan, dhcp_path):
1143 """
1144 Delete into dhcp conf file the ip assigned to a specific MAC address
1145
1146 :param ip: IP address asigned to a VM
1147 :param mac: VM vnic mac to be macthed with the IP received
1148 :param vlan: Segmentation id
1149 :param dhcp_path: dhcp conf file path that live in namespace side
1150 :return:
1151 """
1152
1153 if self.test:
1154 return False
1155 try:
1156 dhcp_namespace = str(vlan) + '-dnsmasq'
1157 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1158 dhcp_hostsdir = os.path.join(dhcp_path, dhcp_namespace)
1159
1160 if not ip:
1161 return False
1162
1163 ip_data = mac.upper() + ',' + ip
1164
1165 command = 'sudo ip netns exec ' + dhcp_namespace + ' sudo sed -i \'/' + ip_data + '/d\' ' + dhcp_hostsdir
1166 self.run_command(command)
1167
1168 return True
1169 except RunCommandException as e:
1170 self.logger.error("delete_mac_dhcp_server Exception: " + str(e))
1171 return False
1172
1173 def launch_dhcp_server(self, vlan, ip_range, netmask, dhcp_path, gateway, dns_list=None, routes=None):
1174 """
1175 Generate a linux bridge and attache the port to a OVS bridge
1176 :param self:
1177 :param vlan: Segmentation id
1178 :param ip_range: IP dhcp range
1179 :param netmask: network netmask
1180 :param dhcp_path: dhcp conf file path that live in namespace side
1181 :param gateway: Gateway address for dhcp net
1182 :param dns_list: dns list for dhcp server
1183 :param routes: routes list for dhcp server
1184 :return: True if success
1185 """
1186
1187 if self.test:
1188 return True
1189 try:
1190 ns_interface = str(vlan) + '-vethDO'
1191 dhcp_namespace = str(vlan) + '-dnsmasq'
1192 dhcp_path = os.path.join(dhcp_path, dhcp_namespace, '')
1193 leases_path = os.path.join(dhcp_path, "dnsmasq.leases")
1194 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
1195
1196 dhcp_range = ip_range[0] + ',' + ip_range[1] + ',' + netmask
1197
1198 command = 'sudo ip netns exec {} mkdir -p {}'.format(dhcp_namespace, dhcp_path)
1199 self.run_command(command)
1200
1201 # check if dnsmasq process is running
1202 dnsmasq_is_runing = False
1203 pid_path = os.path.join(dhcp_path, 'dnsmasq.pid')
1204 command = 'sudo ip netns exec ' + dhcp_namespace + ' ls ' + pid_path
1205 content = self.run_command(command, ignore_exit_status=True)
1206
1207 # check if pid is runing
1208 if content:
1209 pid_path = content.replace('\n', '')
1210 command = "ps aux | awk '{print $2 }' | grep {}" + pid_path
1211 dnsmasq_is_runing = self.run_command(command, ignore_exit_status=True)
1212
1213 gateway_option = ' --dhcp-option=3,' + gateway
1214
1215 dhcp_route_option = ''
1216 if routes:
1217 dhcp_route_option = ' --dhcp-option=121'
1218 for key, value in routes.iteritems():
1219 if 'default' == key:
1220 gateway_option = ' --dhcp-option=3,' + value
1221 else:
1222 dhcp_route_option += ',' + key + ',' + value
1223 dns_data = ''
1224 if dns_list:
1225 dns_data = ' --dhcp-option=6'
1226 for dns in dns_list:
1227 dns_data += ',' + dns
1228
1229 if not dnsmasq_is_runing:
1230 command = 'sudo ip netns exec ' + dhcp_namespace + ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1231 '--interface=' + ns_interface + \
1232 ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path + \
1233 ' --dhcp-range ' + dhcp_range + \
1234 ' --pid-file=' + pid_file + \
1235 ' --dhcp-leasefile=' + leases_path + \
1236 ' --listen-address ' + ip_range[0] + \
1237 gateway_option + \
1238 dhcp_route_option + \
1239 dns_data
1240
1241 self.run_command(command)
1242 return True
1243 except RunCommandException as e:
1244 self.logger.error("launch_dhcp_server ssh Exception: " + str(e))
1245 return False
1246
1247 def delete_dhcp_interfaces(self, vlan, dhcp_path):
1248 """
1249 Delete a linux dnsmasq bridge and namespace
1250 :param vlan: netowrk vlan id
1251 :param dhcp_path:
1252 :return:
1253 """
1254 if self.test:
1255 return True
1256 try:
1257 br_veth_name ='{}-vethDO'.format(str(vlan))
1258 ovs_veth_name = '{}-vethOD'.format(str(vlan))
1259 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
1260
1261 dhcp_path = os.path.join(dhcp_path, dhcp_namespace)
1262 command = 'sudo ovs-vsctl del-port br-int {}'.format(ovs_veth_name)
1263 self.run_command(command, ignore_exit_status=True) # to end session
1264
1265 command = 'sudo ip link set dev {} down'.format(ovs_veth_name)
1266 self.run_command(command, ignore_exit_status=True) # to end session
1267
1268 command = 'sudo ip link delete {} '.format(ovs_veth_name)
1269 self.run_command(command, ignore_exit_status=True)
1270
1271 command = 'sudo ip netns exec {} ip link set dev {} down'.format(dhcp_namespace, br_veth_name)
1272 self.run_command(command, ignore_exit_status=True)
1273
1274 command = 'sudo rm -rf {}'.format(dhcp_path)
1275 self.run_command(command)
1276
1277 command = 'sudo ip netns del {}'.format(dhcp_namespace)
1278 self.run_command(command)
1279
1280 return True
1281 except RunCommandException as e:
1282 self.logger.error("delete_dhcp_interfaces ssh Exception: {}".format(str(e)))
1283 return False
1284
1285 def create_dhcp_interfaces(self, vlan, ip_listen_address, netmask):
1286 """
1287 Create a linux bridge with STP active
1288 :param vlan: segmentation id
1289 :param ip_listen_address: Listen Ip address for the dhcp service, the tap interface living in namesapce side
1290 :param netmask: dhcp net CIDR
1291 :return: True if success
1292 """
1293 if self.test:
1294 return True
1295 try:
1296 ovs_veth_name = '{}-vethOD'.format(str(vlan))
1297 ns_veth = '{}-vethDO'.format(str(vlan))
1298 dhcp_namespace = '{}-dnsmasq'.format(str(vlan))
1299
1300 command = 'sudo ip netns add {}'.format(dhcp_namespace)
1301 self.run_command(command)
1302
1303 command = 'sudo ip link add {} type veth peer name {}'.format(ns_veth, ovs_veth_name)
1304 self.run_command(command)
1305
1306 command = 'sudo ip link set {} netns {}'.format(ns_veth, dhcp_namespace)
1307 self.run_command(command)
1308
1309 command = 'sudo ip netns exec {} ip link set dev {} up'.format(dhcp_namespace, ns_veth)
1310 self.run_command(command)
1311
1312 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(ovs_veth_name, str(vlan))
1313 self.run_command(command, ignore_exit_status=True)
1314
1315 command = 'sudo ip link set dev {} up'.format(ovs_veth_name)
1316 self.run_command(command)
1317
1318 command = 'sudo ip netns exec {} ip link set dev lo up'.format(dhcp_namespace)
1319 self.run_command(command)
1320
1321 command = 'sudo ip netns exec {} ifconfig {} {} netmask {}'.format(dhcp_namespace,
1322 ns_veth,
1323 ip_listen_address,
1324 netmask)
1325 self.run_command(command)
1326 return True
1327 except RunCommandException as e:
1328 self.logger.error("create_dhcp_interfaces ssh Exception: {}".format(str(e)))
1329 return False
1330
1331 def delete_qrouter_connection(self, vlan, link):
1332 """
1333 Delete qrouter Namesapce with all veth interfaces need it
1334 :param vlan:
1335 :param link:
1336 :return:
1337 """
1338
1339 if self.test:
1340 return True
1341 try:
1342 ns_qouter = '{}-qrouter'.format(str(vlan))
1343 qrouter_ovs_veth = '{}-vethOQ'.format(str(vlan))
1344 qrouter_ns_veth = '{}-vethQO'.format(str(vlan))
1345 qrouter_br_veth = '{}-vethBQ'.format(str(vlan))
1346 qrouter_ns_router_veth = '{}-vethQB'.format(str(vlan))
1347
1348 command = 'sudo ovs-vsctl del-port br-int {}'.format(qrouter_ovs_veth)
1349 self.run_command(command, ignore_exit_status=True)
1350
1351 # down ns veth
1352 command = 'sudo ip netns exec {} ip link set dev {} down'.format(ns_qouter, qrouter_ns_veth)
1353 self.run_command(command, ignore_exit_status=True)
1354
1355 command = 'sudo ip netns exec {} ip link delete {} '.format(ns_qouter, qrouter_ns_veth)
1356 self.run_command(command, ignore_exit_status=True)
1357
1358 command = 'sudo ip netns del ' + ns_qouter
1359 self.run_command(command)
1360
1361 # down ovs veth interface
1362 command = 'sudo ip link set dev {} down'.format(qrouter_br_veth)
1363 self.run_command(command, ignore_exit_status=True)
1364
1365 # down br veth interface
1366 command = 'sudo ip link set dev {} down'.format(qrouter_ovs_veth)
1367 self.run_command(command, ignore_exit_status=True)
1368
1369 # delete veth interface
1370 command = 'sudo ip link delete {} '.format(link, qrouter_ovs_veth)
1371 self.run_command(command, ignore_exit_status=True)
1372
1373 # down br veth interface
1374 command = 'sudo ip link set dev {} down'.format(qrouter_ns_router_veth)
1375 self.run_command(command, ignore_exit_status=True)
1376
1377 # delete veth interface
1378 command = 'sudo ip link delete {} '.format(link, qrouter_ns_router_veth)
1379 self.run_command(command, ignore_exit_status=True)
1380
1381 # down br veth interface
1382 command = 'sudo brctl delif {} {}'.format(link, qrouter_br_veth)
1383 self.run_command(command)
1384
1385 # delete NS
1386 return True
1387 except RunCommandException as e:
1388 self.logger.error("delete_qrouter_connection ssh Exception: {}".format(str(e)))
1389 return False
1390
1391 def create_qrouter_ovs_connection(self, vlan, gateway, dhcp_cidr):
1392 """
1393 Create qrouter Namesapce with all veth interfaces need it between NS and OVS
1394 :param vlan:
1395 :param gateway:
1396 :return:
1397 """
1398
1399 if self.test:
1400 return True
1401
1402 try:
1403 ns_qouter = '{}-qrouter'.format(str(vlan))
1404 qrouter_ovs_veth ='{}-vethOQ'.format(str(vlan))
1405 qrouter_ns_veth = '{}-vethQO'.format(str(vlan))
1406
1407 # Create NS
1408 command = 'sudo ip netns add {}'.format(ns_qouter)
1409 self.run_command(command)
1410
1411 # Create pait veth
1412 command = 'sudo ip link add {} type veth peer name {}'.format(qrouter_ns_veth, qrouter_ovs_veth)
1413 self.run_command(command, ignore_exit_status=True)
1414
1415 # up ovs veth interface
1416 command = 'sudo ip link set dev {} up'.format(qrouter_ovs_veth)
1417 self.run_command(command)
1418
1419 # add ovs veth to ovs br-int
1420 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(qrouter_ovs_veth, vlan)
1421 self.run_command(command)
1422
1423 # add veth to ns
1424 command = 'sudo ip link set {} netns {}'.format(qrouter_ns_veth, ns_qouter)
1425 self.run_command(command)
1426
1427 # up ns loopback
1428 command = 'sudo ip netns exec {} ip link set dev lo up'.format(ns_qouter)
1429 self.run_command(command)
1430
1431 # up ns veth
1432 command = 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter, qrouter_ns_veth)
1433 self.run_command(command)
1434
1435 from netaddr import IPNetwork
1436 ip_tools = IPNetwork(dhcp_cidr)
1437 cidr_len = ip_tools.prefixlen
1438
1439 # set gw to ns veth
1440 command = 'sudo ip netns exec {} ip address add {}/{} dev {}'.format(ns_qouter, gateway, cidr_len, qrouter_ns_veth)
1441 self.run_command(command)
1442
1443 return True
1444
1445 except RunCommandException as e:
1446 self.logger.error("Create_dhcp_interfaces ssh Exception: {}".format(str(e)))
1447 return False
1448
1449 def add_ns_routes(self, vlan, routes):
1450 """
1451
1452 :param vlan:
1453 :param routes:
1454 :return:
1455 """
1456
1457 if self.test:
1458 return True
1459
1460 try:
1461 ns_qouter = '{}-qrouter'.format(str(vlan))
1462 qrouter_ns_router_veth = '{}-vethQB'.format(str(vlan))
1463
1464 for key, value in routes.iteritems():
1465 # up ns veth
1466 if key == 'default':
1467 command = 'sudo ip netns exec {} ip route add {} via {} '.format(ns_qouter, key, value)
1468 else:
1469 command = 'sudo ip netns exec {} ip route add {} via {} dev {}'.format(ns_qouter, key, value,
1470 qrouter_ns_router_veth)
1471
1472 self.run_command(command)
1473
1474 return True
1475
1476 except RunCommandException as e:
1477 self.logger.error("add_ns_routes, error adding routes to namesapce, {}".format(str(e)))
1478 return False
1479
1480 def create_qrouter_br_connection(self, vlan, cidr, link):
1481 """
1482 Create veth interfaces between user bridge (link) and OVS
1483 :param vlan:
1484 :param link:
1485 :return:
1486 """
1487
1488 if self.test:
1489 return True
1490
1491 try:
1492 ns_qouter = '{}-qrouter'.format(str(vlan))
1493 qrouter_ns_router_veth = '{}-vethQB'.format(str(vlan))
1494 qrouter_br_veth = '{}-vethBQ'.format(str(vlan))
1495
1496 command = 'sudo brctl show | grep {}'.format(link['iface'])
1497 content = self.run_command(command, ignore_exit_status=True)
1498
1499 if content > '':
1500 # Create pait veth
1501 command = 'sudo ip link add {} type veth peer name {}'.format(qrouter_br_veth, qrouter_ns_router_veth)
1502 self.run_command(command)
1503
1504 # up ovs veth interface
1505 command = 'sudo ip link set dev {} up'.format(qrouter_br_veth)
1506 self.run_command(command)
1507
1508 # add veth to ns
1509 command = 'sudo ip link set {} netns {}'.format(qrouter_ns_router_veth, ns_qouter)
1510 self.run_command(command)
1511
1512 # up ns veth
1513 command = 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter, qrouter_ns_router_veth)
1514 self.run_command(command)
1515
1516 command = 'sudo ip netns exec {} ip address add {} dev {}'.format(ns_qouter,
1517 link['nat'],
1518 qrouter_ns_router_veth)
1519 self.run_command(command)
1520
1521 # up ns veth
1522 command = 'sudo brctl addif {} {}'.format(link['iface'], qrouter_br_veth)
1523 self.run_command(command)
1524
1525 # up ns veth
1526 command = 'sudo ip netns exec {} iptables -t nat -A POSTROUTING -o {} -s {} -d {} -j MASQUERADE' \
1527 .format(ns_qouter, qrouter_ns_router_veth, link['nat'], cidr)
1528 self.run_command(command)
1529
1530 return True
1531 else:
1532
1533 self.logger.error('create_qrouter_br_connection, Bridge {} given by user not exist'.format(qrouter_br_veth))
1534 return False
1535
1536 except RunCommandException as e:
1537 self.logger.error("Error creating qrouter, {}".format(str(e)))
1538 return False
1539
1540 def create_link_bridge_to_ovs(self, vlan, link):
1541 """
1542 Create interfaces to connect a linux bridge with tenant net
1543 :param vlan: segmentation id
1544 :return: True if success
1545 """
1546 if self.test:
1547 return True
1548 try:
1549
1550 br_tap_name = '{}-vethBO'.format(str(vlan))
1551 br_ovs_name = '{}-vethOB'.format(str(vlan))
1552
1553 # is a bridge or a interface
1554 command = 'sudo brctl show | grep {}'.format(link)
1555 content = self.run_command(command, ignore_exit_status=True)
1556 if content > '':
1557 command = 'sudo ip link add {} type veth peer name {}'.format(br_tap_name, br_ovs_name)
1558 self.run_command(command)
1559
1560 command = 'sudo ip link set dev {} up'.format(br_tap_name)
1561 self.run_command(command)
1562
1563 command = 'sudo ip link set dev {} up'.format(br_ovs_name)
1564 self.run_command(command)
1565
1566 command = 'sudo ovs-vsctl add-port br-int {} tag={}'.format(br_ovs_name, str(vlan))
1567 self.run_command(command)
1568
1569 command = 'sudo brctl addif ' + link + ' {}'.format(br_tap_name)
1570 self.run_command(command)
1571 return True
1572 else:
1573 self.logger.error('Link is not present, please check {}'.format(link))
1574 return False
1575
1576 except RunCommandException as e:
1577 self.logger.error("create_link_bridge_to_ovs, Error creating link to ovs, {}".format(str(e)))
1578 return False
1579
1580 def create_ovs_vxlan_tunnel(self, vxlan_interface, remote_ip):
1581 """
1582 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1583 :param vxlan_interface: vlxan inteface name.
1584 :param remote_ip: tunnel endpoint remote compute ip.
1585 :return:
1586 """
1587 if self.test or not self.connectivity:
1588 return True
1589 if remote_ip == 'localhost':
1590 if self.localhost:
1591 return True # TODO: Cannot create a vxlan between localhost and localhost
1592 remote_ip = self.local_ip
1593 try:
1594
1595 command = 'sudo ovs-vsctl add-port br-int {} -- set Interface {} type=vxlan options:remote_ip={} ' \
1596 '-- set Port {} other_config:stp-path-cost=10'.format(vxlan_interface,
1597 vxlan_interface,
1598 remote_ip,
1599 vxlan_interface)
1600 self.run_command(command)
1601 return True
1602 except RunCommandException as e:
1603 self.logger.error("create_ovs_vxlan_tunnel, error creating vxlan tunnel, {}".format(str(e)))
1604 return False
1605
1606 def delete_ovs_vxlan_tunnel(self, vxlan_interface):
1607 """
1608 Delete a vlxan tunnel port from a OVS brdige.
1609 :param vxlan_interface: vlxan name to be delete it.
1610 :return: True if success.
1611 """
1612 if self.test or not self.connectivity:
1613 return True
1614 try:
1615 command = 'sudo ovs-vsctl del-port br-int {}'.format(vxlan_interface)
1616 self.run_command(command)
1617 return True
1618 except RunCommandException as e:
1619 self.logger.error("delete_ovs_vxlan_tunnel, error deleting vxlan tunenl, {}".format(str(e)))
1620 return False
1621
1622 def delete_ovs_bridge(self):
1623 """
1624 Delete a OVS bridge from a compute.
1625 :return: True if success
1626 """
1627 if self.test or not self.connectivity:
1628 return True
1629 try:
1630 command = 'sudo ovs-vsctl del-br br-int'
1631 self.run_command(command)
1632 return True
1633 except RunCommandException as e:
1634 self.logger.error("delete_ovs_bridge ssh Exception: {}".format(str(e)))
1635 return False
1636
1637 def get_file_info(self, path):
1638 command = 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1639 try:
1640 content = self.run_command(command)
1641 return content.split(" ") # (permission, 1, owner, group, size, date, file)
1642 except RunCommandException as e:
1643 return None # file does not exist
1644
1645 def qemu_get_info(self, path):
1646 command = 'qemu-img info ' + path
1647 content = self.run_command(command)
1648 try:
1649 return yaml.load(content)
1650 except yaml.YAMLError as exc:
1651 text = ""
1652 if hasattr(exc, 'problem_mark'):
1653 mark = exc.problem_mark
1654 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
1655 self.logger.error("get_qemu_info yaml format Exception " + text)
1656 raise RunCommandException("Error getting qemu_info yaml format" + text)
1657
1658 def qemu_change_backing(self, inc_file, new_backing_file):
1659 command = 'qemu-img rebase -u -b {} {}'.format(new_backing_file, inc_file)
1660 try:
1661 self.run_command(command)
1662 return 0
1663 except RunCommandException as e:
1664 self.logger.error("qemu_change_backing error: " + str(e))
1665 return -1
1666
1667 def qemu_create_empty_disk(self, dev):
1668
1669 if not dev and 'source' not in dev and 'file format' not in dev and 'image_size' not in dev:
1670 self.logger.error("qemu_create_empty_disk error: missing image parameter")
1671 return -1
1672
1673 empty_disk_path = dev['source file']
1674
1675 command = 'qemu-img create -f qcow2 {} {}G'.format(empty_disk_path, dev['image_size'])
1676 try:
1677 self.run_command(command)
1678 return 0
1679 except RunCommandException as e:
1680 self.logger.error("qemu_create_empty_disk error: " + str(e))
1681 return -1
1682
1683 def get_notused_filename(self, proposed_name, suffix=''):
1684 '''Look for a non existing file_name in the host
1685 proposed_name: proposed file name, includes path
1686 suffix: suffix to be added to the name, before the extention
1687 '''
1688 extension = proposed_name.rfind(".")
1689 slash = proposed_name.rfind("/")
1690 if extension < 0 or extension < slash: # no extension
1691 extension = len(proposed_name)
1692 target_name = proposed_name[:extension] + suffix + proposed_name[extension:]
1693 info = self.get_file_info(target_name)
1694 if info is None:
1695 return target_name
1696
1697 index=0
1698 while info is not None:
1699 target_name = proposed_name[:extension] + suffix + "-" + str(index) + proposed_name[extension:]
1700 index+=1
1701 info = self.get_file_info(target_name)
1702 return target_name
1703
1704 def get_notused_path(self, proposed_path, suffix=''):
1705 '''Look for a non existing path at database for images
1706 proposed_path: proposed file name, includes path
1707 suffix: suffix to be added to the name, before the extention
1708 '''
1709 extension = proposed_path.rfind(".")
1710 if extension < 0:
1711 extension = len(proposed_path)
1712 if suffix != None:
1713 target_path = proposed_path[:extension] + suffix + proposed_path[extension:]
1714 index=0
1715 while True:
1716 r,_=self.db.get_table(FROM="images",WHERE={"path":target_path})
1717 if r<=0:
1718 return target_path
1719 target_path = proposed_path[:extension] + suffix + "-" + str(index) + proposed_path[extension:]
1720 index+=1
1721
1722
1723 def delete_file(self, file_name):
1724 command = 'rm -f ' + file_name
1725 self.run_command(command)
1726
1727 def copy_file(self, source, destination, perserve_time=True):
1728 if source[0:4]=="http":
1729 command = "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1730 dst=destination, src=source, dst_result=destination + ".result" )
1731 else:
1732 command = 'cp --no-preserve=mode'
1733 if perserve_time:
1734 command += ' --preserve=timestamps'
1735 command += " '{}' '{}'".format(source, destination)
1736 self.run_command(command)
1737
1738 def copy_remote_file(self, remote_file, use_incremental):
1739 ''' Copy a file from the repository to local folder and recursively
1740 copy the backing files in case the remote file is incremental
1741 Read and/or modified self.localinfo['files'] that contain the
1742 unmodified copies of images in the local path
1743 params:
1744 remote_file: path of remote file
1745 use_incremental: None (leave the decision to this function), True, False
1746 return:
1747 local_file: name of local file
1748 qemu_info: dict with quemu information of local file
1749 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1750 '''
1751
1752 use_incremental_out = use_incremental
1753 new_backing_file = None
1754 local_file = None
1755 file_from_local = True
1756
1757 #in case incremental use is not decided, take the decision depending on the image
1758 #avoid the use of incremental if this image is already incremental
1759 if remote_file[0:4] == "http":
1760 file_from_local = False
1761 if file_from_local:
1762 qemu_remote_info = self.qemu_get_info(remote_file)
1763 if use_incremental_out==None:
1764 use_incremental_out = not ( file_from_local and 'backing file' in qemu_remote_info)
1765 #copy recursivelly the backing files
1766 if file_from_local and 'backing file' in qemu_remote_info:
1767 new_backing_file, _, _ = self.copy_remote_file(qemu_remote_info['backing file'], True)
1768
1769 #check if remote file is present locally
1770 if use_incremental_out and remote_file in self.localinfo['files']:
1771 local_file = self.localinfo['files'][remote_file]
1772 local_file_info = self.get_file_info(local_file)
1773 if file_from_local:
1774 remote_file_info = self.get_file_info(remote_file)
1775 if local_file_info == None:
1776 local_file = None
1777 elif file_from_local and (local_file_info[4]!=remote_file_info[4] or local_file_info[5]!=remote_file_info[5]):
1778 #local copy of file not valid because date or size are different.
1779 #TODO DELETE local file if this file is not used by any active virtual machine
1780 try:
1781 self.delete_file(local_file)
1782 del self.localinfo['files'][remote_file]
1783 except Exception:
1784 pass
1785 local_file = None
1786 else: #check that the local file has the same backing file, or there are not backing at all
1787 qemu_info = self.qemu_get_info(local_file)
1788 if new_backing_file != qemu_info.get('backing file'):
1789 local_file = None
1790
1791
1792 if local_file == None: #copy the file
1793 img_name= remote_file.split('/') [-1]
1794 img_local = self.image_path + '/' + img_name
1795 local_file = self.get_notused_filename(img_local)
1796 self.copy_file(remote_file, local_file, use_incremental_out)
1797
1798 if use_incremental_out:
1799 self.localinfo['files'][remote_file] = local_file
1800 if new_backing_file:
1801 self.qemu_change_backing(local_file, new_backing_file)
1802 qemu_info = self.qemu_get_info(local_file)
1803
1804 return local_file, qemu_info, use_incremental_out
1805
1806 def launch_server(self, conn, server, rebuild=False, domain=None):
1807 if self.test:
1808 time.sleep(random.randint(20,150)) #sleep random timeto be make it a bit more real
1809 return 0, 'Success'
1810
1811 server_id = server['uuid']
1812 paused = server.get('paused','no')
1813 try:
1814 if domain!=None and rebuild==False:
1815 domain.resume()
1816 #self.server_status[server_id] = 'ACTIVE'
1817 return 0, 'Success'
1818
1819 result, server_data = self.db.get_instance(server_id)
1820 if result <= 0:
1821 self.logger.error("launch_server ERROR getting server from DB %d %s", result, server_data)
1822 return result, server_data
1823
1824 self.hypervisor = str(server_data['hypervisor'])
1825
1826 #0: get image metadata
1827 server_metadata = server.get('metadata', {})
1828 use_incremental = None
1829
1830 if "use_incremental" in server_metadata:
1831 use_incremental = False if server_metadata["use_incremental"] == "no" else True
1832 if self.xen_hyp == True:
1833 use_incremental = False
1834
1835 server_host_files = self.localinfo['server_files'].get( server['uuid'], {})
1836 if rebuild:
1837 #delete previous incremental files
1838 for file_ in server_host_files.values():
1839 self.delete_file(file_['source file'] )
1840 server_host_files={}
1841
1842 #1: obtain aditional devices (disks)
1843 #Put as first device the main disk
1844 devices = [ {"type":"disk", "image_id":server['image_id'], "vpci":server_metadata.get('vpci', None) } ]
1845 if 'extended' in server_data and server_data['extended']!=None and "devices" in server_data['extended']:
1846 devices += server_data['extended']['devices']
1847 empty_path = None
1848 for dev in devices:
1849 image_id = dev.get('image_id')
1850 if not image_id:
1851 import uuid
1852 uuid_empty = str(uuid.uuid4())
1853 empty_path = self.empty_image_path + uuid_empty + '.qcow2' # local path for empty disk
1854
1855 dev['source file'] = empty_path
1856 dev['file format'] = 'qcow2'
1857 self.qemu_create_empty_disk(dev)
1858 server_host_files[uuid_empty] = {'source file': empty_path,
1859 'file format': dev['file format']}
1860
1861 continue
1862 else:
1863 result, content = self.db.get_table(FROM='images', SELECT=('path', 'metadata'),
1864 WHERE={'uuid': image_id})
1865 if result <= 0:
1866 error_text = "ERROR", result, content, "when getting image", dev['image_id']
1867 self.logger.error("launch_server " + error_text)
1868 return -1, error_text
1869 if content[0]['metadata'] is not None:
1870 dev['metadata'] = json.loads(content[0]['metadata'])
1871 else:
1872 dev['metadata'] = {}
1873
1874 if image_id in server_host_files:
1875 dev['source file'] = server_host_files[image_id]['source file'] #local path
1876 dev['file format'] = server_host_files[image_id]['file format'] # raw or qcow2
1877 continue
1878
1879 #2: copy image to host
1880 if image_id:
1881 remote_file = content[0]['path']
1882 else:
1883 remote_file = empty_path
1884 use_incremental_image = use_incremental
1885 if dev['metadata'].get("use_incremental") == "no":
1886 use_incremental_image = False
1887 local_file, qemu_info, use_incremental_image = self.copy_remote_file(remote_file, use_incremental_image)
1888
1889 #create incremental image
1890 if use_incremental_image:
1891 local_file_inc = self.get_notused_filename(local_file, '.inc')
1892 command = 'qemu-img create -f qcow2 {} -o backing_file={}'.format(local_file_inc, local_file)
1893 self.run_command(command)
1894 local_file = local_file_inc
1895 qemu_info = {'file format': 'qcow2'}
1896
1897 server_host_files[ dev['image_id'] ] = {'source file': local_file, 'file format': qemu_info['file format']}
1898
1899 dev['source file'] = local_file
1900 dev['file format'] = qemu_info['file format']
1901
1902 self.localinfo['server_files'][ server['uuid'] ] = server_host_files
1903 self.localinfo_dirty = True
1904
1905 #3 Create XML
1906 result, xml = self.create_xml_server(server_data, devices, server_metadata) #local_file
1907 if result <0:
1908 self.logger.error("create xml server error: " + xml)
1909 return -2, xml
1910 self.logger.debug("create xml: " + xml)
1911 atribute = host_thread.lvirt_module.VIR_DOMAIN_START_PAUSED if paused == "yes" else 0
1912 #4 Start the domain
1913 if not rebuild: #ensures that any pending destroying server is done
1914 self.server_forceoff(True)
1915 #self.logger.debug("launching instance " + xml)
1916 conn.createXML(xml, atribute)
1917 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
1918
1919 return 0, 'Success'
1920
1921 except paramiko.ssh_exception.SSHException as e:
1922 text = e.args[0]
1923 self.logger.error("launch_server id='%s' ssh Exception: %s", server_id, text)
1924 if "SSH session not active" in text:
1925 self.ssh_connect()
1926 except host_thread.lvirt_module.libvirtError as e:
1927 text = e.get_error_message()
1928 self.logger.error("launch_server id='%s' libvirt Exception: %s", server_id, text)
1929 except Exception as e:
1930 text = str(e)
1931 self.logger.error("launch_server id='%s' Exception: %s", server_id, text)
1932 return -1, text
1933
1934 def update_servers_status(self):
1935 # # virDomainState
1936 # VIR_DOMAIN_NOSTATE = 0
1937 # VIR_DOMAIN_RUNNING = 1
1938 # VIR_DOMAIN_BLOCKED = 2
1939 # VIR_DOMAIN_PAUSED = 3
1940 # VIR_DOMAIN_SHUTDOWN = 4
1941 # VIR_DOMAIN_SHUTOFF = 5
1942 # VIR_DOMAIN_CRASHED = 6
1943 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
1944
1945 if self.test or len(self.server_status)==0:
1946 return
1947
1948 try:
1949 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
1950 domains= conn.listAllDomains()
1951 domain_dict={}
1952 for domain in domains:
1953 uuid = domain.UUIDString() ;
1954 libvirt_status = domain.state()
1955 #print libvirt_status
1956 if libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_RUNNING or libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTDOWN:
1957 new_status = "ACTIVE"
1958 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_PAUSED:
1959 new_status = "PAUSED"
1960 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTOFF:
1961 new_status = "INACTIVE"
1962 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_CRASHED:
1963 new_status = "ERROR"
1964 else:
1965 new_status = None
1966 domain_dict[uuid] = new_status
1967 conn.close()
1968 except host_thread.lvirt_module.libvirtError as e:
1969 self.logger.error("get_state() Exception " + e.get_error_message())
1970 return
1971
1972 for server_id, current_status in self.server_status.iteritems():
1973 new_status = None
1974 if server_id in domain_dict:
1975 new_status = domain_dict[server_id]
1976 else:
1977 new_status = "INACTIVE"
1978
1979 if new_status == None or new_status == current_status:
1980 continue
1981 if new_status == 'INACTIVE' and current_status == 'ERROR':
1982 continue #keep ERROR status, because obviously this machine is not running
1983 #change status
1984 self.logger.debug("server id='%s' status change from '%s' to '%s'", server_id, current_status, new_status)
1985 STATUS={'progress':100, 'status':new_status}
1986 if new_status == 'ERROR':
1987 STATUS['last_error'] = 'machine has crashed'
1988 r,_ = self.db.update_rows('instances', STATUS, {'uuid':server_id}, log=False)
1989 if r>=0:
1990 self.server_status[server_id] = new_status
1991
1992 def action_on_server(self, req, last_retry=True):
1993 '''Perform an action on a req
1994 Attributes:
1995 req: dictionary that contain:
1996 server properties: 'uuid','name','tenant_id','status'
1997 action: 'action'
1998 host properties: 'user', 'ip_name'
1999 return (error, text)
2000 0: No error. VM is updated to new state,
2001 -1: Invalid action, as trying to pause a PAUSED VM
2002 -2: Error accessing host
2003 -3: VM nor present
2004 -4: Error at DB access
2005 -5: Error while trying to perform action. VM is updated to ERROR
2006 '''
2007 server_id = req['uuid']
2008 conn = None
2009 new_status = None
2010 old_status = req['status']
2011 last_error = None
2012
2013 if self.test:
2014 if 'terminate' in req['action']:
2015 new_status = 'deleted'
2016 elif 'shutoff' in req['action'] or 'shutdown' in req['action'] or 'forceOff' in req['action']:
2017 if req['status']!='ERROR':
2018 time.sleep(5)
2019 new_status = 'INACTIVE'
2020 elif 'start' in req['action'] and req['status']!='ERROR':
2021 new_status = 'ACTIVE'
2022 elif 'resume' in req['action'] and req['status']!='ERROR' and req['status']!='INACTIVE':
2023 new_status = 'ACTIVE'
2024 elif 'pause' in req['action'] and req['status']!='ERROR':
2025 new_status = 'PAUSED'
2026 elif 'reboot' in req['action'] and req['status']!='ERROR':
2027 new_status = 'ACTIVE'
2028 elif 'rebuild' in req['action']:
2029 time.sleep(random.randint(20,150))
2030 new_status = 'ACTIVE'
2031 elif 'createImage' in req['action']:
2032 time.sleep(5)
2033 self.create_image(None, req)
2034 else:
2035 try:
2036 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2037 try:
2038 dom = conn.lookupByUUIDString(server_id)
2039 except host_thread.lvirt_module.libvirtError as e:
2040 text = e.get_error_message()
2041 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
2042 dom = None
2043 else:
2044 self.logger.error("action_on_server id='%s' libvirt exception: %s", server_id, text)
2045 raise e
2046
2047 if 'forceOff' in req['action']:
2048 if dom == None:
2049 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2050 else:
2051 try:
2052 self.logger.debug("sending DESTROY to server id='%s'", server_id)
2053 dom.destroy()
2054 except Exception as e:
2055 if "domain is not running" not in e.get_error_message():
2056 self.logger.error("action_on_server id='%s' Exception while sending force off: %s",
2057 server_id, e.get_error_message())
2058 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
2059 new_status = 'ERROR'
2060
2061 elif 'terminate' in req['action']:
2062 if dom == None:
2063 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2064 new_status = 'deleted'
2065 else:
2066 try:
2067 if req['action']['terminate'] == 'force':
2068 self.logger.debug("sending DESTROY to server id='%s'", server_id)
2069 dom.destroy()
2070 new_status = 'deleted'
2071 else:
2072 self.logger.debug("sending SHUTDOWN to server id='%s'", server_id)
2073 dom.shutdown()
2074 self.pending_terminate_server.append( (time.time()+10,server_id) )
2075 except Exception as e:
2076 self.logger.error("action_on_server id='%s' Exception while destroy: %s",
2077 server_id, e.get_error_message())
2078 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
2079 new_status = 'ERROR'
2080 if "domain is not running" in e.get_error_message():
2081 try:
2082 dom.undefine()
2083 new_status = 'deleted'
2084 except Exception:
2085 self.logger.error("action_on_server id='%s' Exception while undefine: %s",
2086 server_id, e.get_error_message())
2087 last_error = 'action_on_server Exception2 while undefine:', e.get_error_message()
2088 #Exception: 'virDomainDetachDevice() failed'
2089 if new_status=='deleted':
2090 if server_id in self.server_status:
2091 del self.server_status[server_id]
2092 if req['uuid'] in self.localinfo['server_files']:
2093 for file_ in self.localinfo['server_files'][ req['uuid'] ].values():
2094 try:
2095 self.delete_file(file_['source file'])
2096 except Exception:
2097 pass
2098 del self.localinfo['server_files'][ req['uuid'] ]
2099 self.localinfo_dirty = True
2100
2101 elif 'shutoff' in req['action'] or 'shutdown' in req['action']:
2102 try:
2103 if dom == None:
2104 self.logger.debug("action_on_server id='%s' domain not running", server_id)
2105 else:
2106 dom.shutdown()
2107 # new_status = 'INACTIVE'
2108 #TODO: check status for changing at database
2109 except Exception as e:
2110 new_status = 'ERROR'
2111 self.logger.error("action_on_server id='%s' Exception while shutdown: %s",
2112 server_id, e.get_error_message())
2113 last_error = 'action_on_server Exception while shutdown: ' + e.get_error_message()
2114
2115 elif 'rebuild' in req['action']:
2116 if dom != None:
2117 dom.destroy()
2118 r = self.launch_server(conn, req, True, None)
2119 if r[0] <0:
2120 new_status = 'ERROR'
2121 last_error = r[1]
2122 else:
2123 new_status = 'ACTIVE'
2124 elif 'start' in req['action']:
2125 # The instance is only create in DB but not yet at libvirt domain, needs to be create
2126 rebuild = True if req['action']['start'] == 'rebuild' else False
2127 r = self.launch_server(conn, req, rebuild, dom)
2128 if r[0] <0:
2129 new_status = 'ERROR'
2130 last_error = r[1]
2131 else:
2132 new_status = 'ACTIVE'
2133
2134 elif 'resume' in req['action']:
2135 try:
2136 if dom == None:
2137 pass
2138 else:
2139 dom.resume()
2140 # new_status = 'ACTIVE'
2141 except Exception as e:
2142 self.logger.error("action_on_server id='%s' Exception while resume: %s",
2143 server_id, e.get_error_message())
2144
2145 elif 'pause' in req['action']:
2146 try:
2147 if dom == None:
2148 pass
2149 else:
2150 dom.suspend()
2151 # new_status = 'PAUSED'
2152 except Exception as e:
2153 self.logger.error("action_on_server id='%s' Exception while pause: %s",
2154 server_id, e.get_error_message())
2155
2156 elif 'reboot' in req['action']:
2157 try:
2158 if dom == None:
2159 pass
2160 else:
2161 dom.reboot()
2162 self.logger.debug("action_on_server id='%s' reboot:", server_id)
2163 #new_status = 'ACTIVE'
2164 except Exception as e:
2165 self.logger.error("action_on_server id='%s' Exception while reboot: %s",
2166 server_id, e.get_error_message())
2167 elif 'createImage' in req['action']:
2168 self.create_image(dom, req)
2169
2170
2171 conn.close()
2172 except host_thread.lvirt_module.libvirtError as e:
2173 if conn is not None: conn.close()
2174 text = e.get_error_message()
2175 new_status = "ERROR"
2176 last_error = text
2177 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
2178 self.logger.debug("action_on_server id='%s' Exception removed from host", server_id)
2179 else:
2180 self.logger.error("action_on_server id='%s' Exception %s", server_id, text)
2181 #end of if self.test
2182 if new_status == None:
2183 return 1
2184
2185 self.logger.debug("action_on_server id='%s' new status=%s %s",server_id, new_status, last_error)
2186 UPDATE = {'progress':100, 'status':new_status}
2187
2188 if new_status=='ERROR':
2189 if not last_retry: #if there will be another retry do not update database
2190 return -1
2191 elif 'terminate' in req['action']:
2192 #PUT a log in the database
2193 self.logger.error("PANIC deleting server id='%s' %s", server_id, last_error)
2194 self.db.new_row('logs',
2195 {'uuid':server_id, 'tenant_id':req['tenant_id'], 'related':'instances','level':'panic',
2196 'description':'PANIC deleting server from host '+self.name+': '+last_error}
2197 )
2198 if server_id in self.server_status:
2199 del self.server_status[server_id]
2200 return -1
2201 else:
2202 UPDATE['last_error'] = last_error
2203 if new_status != 'deleted' and (new_status != old_status or new_status == 'ERROR') :
2204 self.db.update_rows('instances', UPDATE, {'uuid':server_id}, log=True)
2205 self.server_status[server_id] = new_status
2206 if new_status == 'ERROR':
2207 return -1
2208 return 1
2209
2210
2211 def restore_iface(self, name, mac, lib_conn=None):
2212 ''' make an ifdown, ifup to restore default parameter of na interface
2213 Params:
2214 mac: mac address of the interface
2215 lib_conn: connection to the libvirt, if None a new connection is created
2216 Return 0,None if ok, -1,text if fails
2217 '''
2218 conn=None
2219 ret = 0
2220 error_text=None
2221 if self.test:
2222 self.logger.debug("restore_iface '%s' %s", name, mac)
2223 return 0, None
2224 try:
2225 if not lib_conn:
2226 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2227 else:
2228 conn = lib_conn
2229
2230 #wait to the pending VM deletion
2231 #TODO.Revise self.server_forceoff(True)
2232
2233 iface = conn.interfaceLookupByMACString(mac)
2234 if iface.isActive():
2235 iface.destroy()
2236 iface.create()
2237 self.logger.debug("restore_iface '%s' %s", name, mac)
2238 except host_thread.lvirt_module.libvirtError as e:
2239 error_text = e.get_error_message()
2240 self.logger.error("restore_iface '%s' '%s' libvirt exception: %s", name, mac, error_text)
2241 ret=-1
2242 finally:
2243 if lib_conn is None and conn is not None:
2244 conn.close()
2245 return ret, error_text
2246
2247
2248 def create_image(self,dom, req):
2249 if self.test:
2250 if 'path' in req['action']['createImage']:
2251 file_dst = req['action']['createImage']['path']
2252 else:
2253 createImage=req['action']['createImage']
2254 img_name= createImage['source']['path']
2255 index=img_name.rfind('/')
2256 file_dst = self.get_notused_path(img_name[:index+1] + createImage['name'] + '.qcow2')
2257 image_status='ACTIVE'
2258 else:
2259 for retry in (0,1):
2260 try:
2261 server_id = req['uuid']
2262 createImage=req['action']['createImage']
2263 file_orig = self.localinfo['server_files'][server_id] [ createImage['source']['image_id'] ] ['source file']
2264 if 'path' in req['action']['createImage']:
2265 file_dst = req['action']['createImage']['path']
2266 else:
2267 img_name= createImage['source']['path']
2268 index=img_name.rfind('/')
2269 file_dst = self.get_notused_filename(img_name[:index+1] + createImage['name'] + '.qcow2')
2270
2271 self.copy_file(file_orig, file_dst)
2272 qemu_info = self.qemu_get_info(file_orig)
2273 if 'backing file' in qemu_info:
2274 for k,v in self.localinfo['files'].items():
2275 if v==qemu_info['backing file']:
2276 self.qemu_change_backing(file_dst, k)
2277 break
2278 image_status='ACTIVE'
2279 break
2280 except paramiko.ssh_exception.SSHException as e:
2281 image_status='ERROR'
2282 error_text = e.args[0]
2283 self.logger.error("create_image id='%s' ssh Exception: %s", server_id, error_text)
2284 if "SSH session not active" in error_text and retry==0:
2285 self.ssh_connect()
2286 except Exception as e:
2287 image_status='ERROR'
2288 error_text = str(e)
2289 self.logger.error("create_image id='%s' Exception: %s", server_id, error_text)
2290
2291 #TODO insert a last_error at database
2292 self.db.update_rows('images', {'status':image_status, 'progress': 100, 'path':file_dst},
2293 {'uuid':req['new_image']['uuid']}, log=True)
2294
2295 def edit_iface(self, port_id, old_net, new_net):
2296 #This action imply remove and insert interface to put proper parameters
2297 if self.test:
2298 time.sleep(1)
2299 else:
2300 #get iface details
2301 r,c = self.db.get_table(FROM='ports as p join resources_port as rp on p.uuid=rp.port_id',
2302 WHERE={'port_id': port_id})
2303 if r<0:
2304 self.logger.error("edit_iface %s DDBB error: %s", port_id, c)
2305 return
2306 elif r==0:
2307 self.logger.error("edit_iface %s port not found", port_id)
2308 return
2309 port=c[0]
2310 if port["model"]!="VF":
2311 self.logger.error("edit_iface %s ERROR model must be VF", port_id)
2312 return
2313 #create xml detach file
2314 xml=[]
2315 self.xml_level = 2
2316 xml.append("<interface type='hostdev' managed='yes'>")
2317 xml.append(" <mac address='" +port['mac']+ "'/>")
2318 xml.append(" <source>"+ self.pci2xml(port['pci'])+"\n </source>")
2319 xml.append('</interface>')
2320
2321
2322 try:
2323 conn=None
2324 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2325 dom = conn.lookupByUUIDString(port["instance_id"])
2326 if old_net:
2327 text="\n".join(xml)
2328 self.logger.debug("edit_iface detaching SRIOV interface " + text)
2329 dom.detachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
2330 if new_net:
2331 xml[-1] =" <vlan> <tag id='" + str(port['vlan']) + "'/> </vlan>"
2332 self.xml_level = 1
2333 xml.append(self.pci2xml(port.get('vpci',None)) )
2334 xml.append('</interface>')
2335 text="\n".join(xml)
2336 self.logger.debug("edit_iface attaching SRIOV interface " + text)
2337 dom.attachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
2338
2339 except host_thread.lvirt_module.libvirtError as e:
2340 text = e.get_error_message()
2341 self.logger.error("edit_iface %s libvirt exception: %s", port["instance_id"], text)
2342
2343 finally:
2344 if conn is not None: conn.close()
2345
2346
2347 def create_server(server, db, only_of_ports):
2348 extended = server.get('extended', None)
2349 requirements={}
2350 requirements['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
2351 requirements['ram'] = server['flavor'].get('ram', 0)
2352 if requirements['ram']== None:
2353 requirements['ram'] = 0
2354 requirements['vcpus'] = server['flavor'].get('vcpus', 0)
2355 if requirements['vcpus']== None:
2356 requirements['vcpus'] = 0
2357 #If extended is not defined get requirements from flavor
2358 if extended is None:
2359 #If extended is defined in flavor convert to dictionary and use it
2360 if 'extended' in server['flavor'] and server['flavor']['extended'] != None:
2361 json_acceptable_string = server['flavor']['extended'].replace("'", "\"")
2362 extended = json.loads(json_acceptable_string)
2363 else:
2364 extended = None
2365 #print json.dumps(extended, indent=4)
2366
2367 #For simplicity only one numa VM are supported in the initial implementation
2368 if extended != None:
2369 numas = extended.get('numas', [])
2370 if len(numas)>1:
2371 return (-2, "Multi-NUMA VMs are not supported yet")
2372 #elif len(numas)<1:
2373 # return (-1, "At least one numa must be specified")
2374
2375 #a for loop is used in order to be ready to multi-NUMA VMs
2376 request = []
2377 for numa in numas:
2378 numa_req = {}
2379 numa_req['memory'] = numa.get('memory', 0)
2380 if 'cores' in numa:
2381 numa_req['proc_req_nb'] = numa['cores'] #number of cores or threads to be reserved
2382 numa_req['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
2383 numa_req['proc_req_list'] = numa.get('cores-id', None) #list of ids to be assigned to the cores or threads
2384 elif 'paired-threads' in numa:
2385 numa_req['proc_req_nb'] = numa['paired-threads']
2386 numa_req['proc_req_type'] = 'paired-threads'
2387 numa_req['proc_req_list'] = numa.get('paired-threads-id', None)
2388 elif 'threads' in numa:
2389 numa_req['proc_req_nb'] = numa['threads']
2390 numa_req['proc_req_type'] = 'threads'
2391 numa_req['proc_req_list'] = numa.get('threads-id', None)
2392 else:
2393 numa_req['proc_req_nb'] = 0 # by default
2394 numa_req['proc_req_type'] = 'threads'
2395
2396
2397
2398 #Generate a list of sriov and another for physical interfaces
2399 interfaces = numa.get('interfaces', [])
2400 sriov_list = []
2401 port_list = []
2402 for iface in interfaces:
2403 iface['bandwidth'] = int(iface['bandwidth'])
2404 if iface['dedicated'][:3]=='yes':
2405 port_list.append(iface)
2406 else:
2407 sriov_list.append(iface)
2408
2409 #Save lists ordered from more restrictive to less bw requirements
2410 numa_req['sriov_list'] = sorted(sriov_list, key=lambda k: k['bandwidth'], reverse=True)
2411 numa_req['port_list'] = sorted(port_list, key=lambda k: k['bandwidth'], reverse=True)
2412
2413
2414 request.append(numa_req)
2415
2416 # print "----------\n"+json.dumps(request[0], indent=4)
2417 # print '----------\n\n'
2418
2419 #Search in db for an appropriate numa for each requested numa
2420 #at the moment multi-NUMA VMs are not supported
2421 if len(request)>0:
2422 requirements['numa'].update(request[0])
2423 if requirements['numa']['memory']>0:
2424 requirements['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2425 elif requirements['ram']==0:
2426 return (-1, "Memory information not set neither at extended field not at ram")
2427 if requirements['numa']['proc_req_nb']>0:
2428 requirements['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2429 elif requirements['vcpus']==0:
2430 return (-1, "Processor information not set neither at extended field not at vcpus")
2431
2432 if 'hypervisor' in server: requirements['hypervisor'] = server['hypervisor'] #Unikernels extension
2433
2434 result, content = db.get_numas(requirements, server.get('host_id', None), only_of_ports)
2435
2436 if result == -1:
2437 return (-1, content)
2438
2439 numa_id = content['numa_id']
2440 host_id = content['host_id']
2441
2442 #obtain threads_id and calculate pinning
2443 cpu_pinning = []
2444 reserved_threads=[]
2445 if requirements['numa']['proc_req_nb']>0:
2446 result, content = db.get_table(FROM='resources_core',
2447 SELECT=('id','core_id','thread_id'),
2448 WHERE={'numa_id':numa_id,'instance_id': None, 'status':'ok'} )
2449 if result <= 0:
2450 #print content
2451 return -1, content
2452
2453 #convert rows to a dictionary indexed by core_id
2454 cores_dict = {}
2455 for row in content:
2456 if not row['core_id'] in cores_dict:
2457 cores_dict[row['core_id']] = []
2458 cores_dict[row['core_id']].append([row['thread_id'],row['id']])
2459
2460 #In case full cores are requested
2461 paired = 'N'
2462 if requirements['numa']['proc_req_type'] == 'cores':
2463 #Get/create the list of the vcpu_ids
2464 vcpu_id_list = requirements['numa']['proc_req_list']
2465 if vcpu_id_list == None:
2466 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2467
2468 for threads in cores_dict.itervalues():
2469 #we need full cores
2470 if len(threads) != 2:
2471 continue
2472
2473 #set pinning for the first thread
2474 cpu_pinning.append( [ vcpu_id_list.pop(0), threads[0][0], threads[0][1] ] )
2475
2476 #reserve so it is not used the second thread
2477 reserved_threads.append(threads[1][1])
2478
2479 if len(vcpu_id_list) == 0:
2480 break
2481
2482 #In case paired threads are requested
2483 elif requirements['numa']['proc_req_type'] == 'paired-threads':
2484 paired = 'Y'
2485 #Get/create the list of the vcpu_ids
2486 if requirements['numa']['proc_req_list'] != None:
2487 vcpu_id_list = []
2488 for pair in requirements['numa']['proc_req_list']:
2489 if len(pair)!=2:
2490 return -1, "Field paired-threads-id not properly specified"
2491 return
2492 vcpu_id_list.append(pair[0])
2493 vcpu_id_list.append(pair[1])
2494 else:
2495 vcpu_id_list = range(0,2*int(requirements['numa']['proc_req_nb']))
2496
2497 for threads in cores_dict.itervalues():
2498 #we need full cores
2499 if len(threads) != 2:
2500 continue
2501 #set pinning for the first thread
2502 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2503
2504 #set pinning for the second thread
2505 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2506
2507 if len(vcpu_id_list) == 0:
2508 break
2509
2510 #In case normal threads are requested
2511 elif requirements['numa']['proc_req_type'] == 'threads':
2512 #Get/create the list of the vcpu_ids
2513 vcpu_id_list = requirements['numa']['proc_req_list']
2514 if vcpu_id_list == None:
2515 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2516
2517 for threads_index in sorted(cores_dict, key=lambda k: len(cores_dict[k])):
2518 threads = cores_dict[threads_index]
2519 #set pinning for the first thread
2520 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2521
2522 #if exists, set pinning for the second thread
2523 if len(threads) == 2 and len(vcpu_id_list) != 0:
2524 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2525
2526 if len(vcpu_id_list) == 0:
2527 break
2528
2529 #Get the source pci addresses for the selected numa
2530 used_sriov_ports = []
2531 for port in requirements['numa']['sriov_list']:
2532 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2533 if result <= 0:
2534 #print content
2535 return -1, content
2536 for row in content:
2537 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2538 continue
2539 port['pci'] = row['pci']
2540 if 'mac_address' not in port:
2541 port['mac_address'] = row['mac']
2542 del port['mac']
2543 port['port_id']=row['id']
2544 port['Mbps_used'] = port['bandwidth']
2545 used_sriov_ports.append(row['id'])
2546 break
2547
2548 for port in requirements['numa']['port_list']:
2549 port['Mbps_used'] = None
2550 if port['dedicated'] != "yes:sriov":
2551 port['mac_address'] = port['mac']
2552 del port['mac']
2553 continue
2554 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac', 'Mbps'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2555 if result <= 0:
2556 #print content
2557 return -1, content
2558 port['Mbps_used'] = content[0]['Mbps']
2559 for row in content:
2560 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2561 continue
2562 port['pci'] = row['pci']
2563 if 'mac_address' not in port:
2564 port['mac_address'] = row['mac'] # mac cannot be set to passthrough ports
2565 del port['mac']
2566 port['port_id']=row['id']
2567 used_sriov_ports.append(row['id'])
2568 break
2569
2570 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2571 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2572
2573 server['host_id'] = host_id
2574
2575 #Generate dictionary for saving in db the instance resources
2576 resources = {}
2577 resources['bridged-ifaces'] = []
2578
2579 numa_dict = {}
2580 numa_dict['interfaces'] = []
2581
2582 numa_dict['interfaces'] += requirements['numa']['port_list']
2583 numa_dict['interfaces'] += requirements['numa']['sriov_list']
2584
2585 #Check bridge information
2586 unified_dataplane_iface=[]
2587 unified_dataplane_iface += requirements['numa']['port_list']
2588 unified_dataplane_iface += requirements['numa']['sriov_list']
2589
2590 for control_iface in server.get('networks', []):
2591 control_iface['net_id']=control_iface.pop('uuid')
2592 #Get the brifge name
2593 result, content = db.get_table(FROM='nets',
2594 SELECT=('name', 'type', 'vlan', 'provider', 'enable_dhcp','dhcp_first_ip',
2595 'dhcp_last_ip', 'cidr', 'gateway_ip', 'dns', 'links', 'routes'),
2596 WHERE={'uuid': control_iface['net_id']})
2597 if result < 0:
2598 pass
2599 elif result==0:
2600 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface['net_id']
2601 else:
2602 network=content[0]
2603 if control_iface.get("type", 'virtual') == 'virtual':
2604 if network['type']!='bridge_data' and network['type']!='bridge_man':
2605 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface['net_id']
2606 resources['bridged-ifaces'].append(control_iface)
2607 if network.get("provider") and network["provider"][0:3] == "OVS":
2608 control_iface["type"] = "instance:ovs"
2609 else:
2610 control_iface["type"] = "instance:bridge"
2611 if network.get("vlan"):
2612 control_iface["vlan"] = network["vlan"]
2613
2614 if network.get("enable_dhcp") == 'true':
2615 control_iface["enable_dhcp"] = network.get("enable_dhcp")
2616 control_iface["dhcp_first_ip"] = network["dhcp_first_ip"]
2617 control_iface["dhcp_last_ip"] = network["dhcp_last_ip"]
2618 control_iface["cidr"] = network["cidr"]
2619
2620 if network.get("dns"):
2621 control_iface["dns"] = yaml.safe_load(network.get("dns"))
2622 if network.get("links"):
2623 control_iface["links"] = yaml.safe_load(network.get("links"))
2624 if network.get("routes"):
2625 control_iface["routes"] = yaml.safe_load(network.get("routes"))
2626 else:
2627 if network['type']!='data' and network['type']!='ptp':
2628 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface['net_id']
2629 #dataplane interface, look for it in the numa tree and asign this network
2630 iface_found=False
2631 for dataplane_iface in numa_dict['interfaces']:
2632 if dataplane_iface['name'] == control_iface.get("name"):
2633 if (dataplane_iface['dedicated'] == "yes" and control_iface["type"] != "PF") or \
2634 (dataplane_iface['dedicated'] == "no" and control_iface["type"] != "VF") or \
2635 (dataplane_iface['dedicated'] == "yes:sriov" and control_iface["type"] != "VFnotShared") :
2636 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2637 (control_iface.get("name"), dataplane_iface['dedicated'], control_iface["type"])
2638 dataplane_iface['uuid'] = control_iface['net_id']
2639 if dataplane_iface['dedicated'] == "no":
2640 dataplane_iface['vlan'] = network['vlan']
2641 if dataplane_iface['dedicated'] != "yes" and control_iface.get("mac_address"):
2642 dataplane_iface['mac_address'] = control_iface.get("mac_address")
2643 if control_iface.get("vpci"):
2644 dataplane_iface['vpci'] = control_iface.get("vpci")
2645 iface_found=True
2646 break
2647 if not iface_found:
2648 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface.get("name")
2649
2650 resources['host_id'] = host_id
2651 resources['image_id'] = server['image_id']
2652 resources['flavor_id'] = server['flavor_id']
2653 resources['tenant_id'] = server['tenant_id']
2654 resources['ram'] = requirements['ram']
2655 resources['vcpus'] = requirements['vcpus']
2656 resources['status'] = 'CREATING'
2657
2658 if 'description' in server: resources['description'] = server['description']
2659 if 'name' in server: resources['name'] = server['name']
2660 if 'hypervisor' in server: resources['hypervisor'] = server['hypervisor']
2661 if 'os_image_type' in server: resources['os_image_type'] = server['os_image_type']
2662
2663 resources['extended'] = {} #optional
2664 resources['extended']['numas'] = []
2665 numa_dict['numa_id'] = numa_id
2666 numa_dict['memory'] = requirements['numa']['memory']
2667 numa_dict['cores'] = []
2668
2669 for core in cpu_pinning:
2670 numa_dict['cores'].append({'id': core[2], 'vthread': core[0], 'paired': paired})
2671 for core in reserved_threads:
2672 numa_dict['cores'].append({'id': core})
2673 resources['extended']['numas'].append(numa_dict)
2674 if extended!=None and 'devices' in extended: #TODO allow extra devices without numa
2675 resources['extended']['devices'] = extended['devices']
2676
2677
2678 # '===================================={'
2679 #print json.dumps(resources, indent=4)
2680 #print '====================================}'
2681
2682 return 0, resources
2683