b8f051c2f2671dadfdc35f2de2c58686433af7d7
[osm/openvim.git] / osm_openvim / host_thread.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openvim
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
22 ##
23
24 '''
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
27 '''
28 __author__ = "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__ = "$10-jul-2014 12:07:15$"
30
31 import json
32 import yaml
33 import threading
34 import time
35 import Queue
36 import paramiko
37 # import subprocess
38 # import libvirt
39 import imp
40 import random
41 import os
42 import logging
43 from jsonschema import validate as js_v, exceptions as js_e
44 from vim_schema import localinfo_schema, hostinfo_schema
45
46
47 class host_thread(threading.Thread):
48 lvirt_module = None
49
50 def __init__(self, name, host, user, db, db_lock, test, image_path, host_id, version, develop_mode,
51 develop_bridge_iface, password=None, keyfile = None, logger_name=None, debug=None):
52 '''Init a thread.
53 Arguments:
54 'id' number of thead
55 'name' name of thread
56 'host','user': host ip or name to manage and user
57 'db', 'db_lock': database class and lock to use it in exclusion
58 '''
59 threading.Thread.__init__(self)
60 self.name = name
61 self.host = host
62 self.user = user
63 self.db = db
64 self.db_lock = db_lock
65 self.test = test
66 self.password = password
67 self.keyfile = keyfile
68 self.localinfo_dirty = False
69
70 if not test and not host_thread.lvirt_module:
71 try:
72 module_info = imp.find_module("libvirt")
73 host_thread.lvirt_module = imp.load_module("libvirt", *module_info)
74 except (IOError, ImportError) as e:
75 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e))
76 if logger_name:
77 self.logger_name = logger_name
78 else:
79 self.logger_name = "openvim.host."+name
80 self.logger = logging.getLogger(self.logger_name)
81 if debug:
82 self.logger.setLevel(getattr(logging, debug))
83
84
85 self.develop_mode = develop_mode
86 self.develop_bridge_iface = develop_bridge_iface
87 self.image_path = image_path
88 self.empty_image_path = image_path
89 self.host_id = host_id
90 self.version = version
91
92 self.xml_level = 0
93 #self.pending ={}
94
95 self.server_status = {} #dictionary with pairs server_uuid:server_status
96 self.pending_terminate_server =[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
97 self.next_update_server_status = 0 #time when must be check servers status
98
99 self.hostinfo = None
100
101 self.queueLock = threading.Lock()
102 self.taskQueue = Queue.Queue(2000)
103 self.ssh_conn = None
104 self.connectivity = True
105 self.lvirt_conn_uri = "qemu+ssh://{user}@{host}/system?no_tty=1&no_verify=1".format(
106 user=self.user, host=self.host)
107 if keyfile:
108 self.lvirt_conn_uri += "&keyfile=" + keyfile
109
110 def ssh_connect(self):
111 try:
112 # Connect SSH
113 self.ssh_conn = paramiko.SSHClient()
114 self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
115 self.ssh_conn.load_system_host_keys()
116 self.ssh_conn.connect(self.host, username=self.user, password=self.password, key_filename=self.keyfile,
117 timeout=10) #, None)
118 except paramiko.ssh_exception.SSHException as e:
119 text = e.args[0]
120 self.logger.error("ssh_connect ssh Exception: " + text)
121
122 def check_connectivity(self):
123 if not self.test:
124
125 try:
126 if not self.ssh_conn:
127 self.ssh_connect()
128
129 command = 'sudo brctl show'
130 (_, stdout, stderr) = self.ssh_conn.exec_command(command, timeout=10)
131 content = stderr.read()
132 if len(content) > 0:
133 self.connectivity = False
134 self.logger.error("ssh conection error")
135 except paramiko.ssh_exception.SSHException as e:
136 text = e.args[0]
137 self.connectivity = False
138 self.logger.error("ssh_connect ssh Exception: " + text)
139 raise paramiko.ssh_exception.SSHException("ssh error conection")
140 except Exception as e:
141 self.connectivity = False
142 raise paramiko.ssh_exception.SSHException("ssh error conection")
143
144 def load_localinfo(self):
145 if not self.test:
146 try:
147 # Connect SSH
148 self.ssh_connect()
149
150 command = 'mkdir -p ' + self.image_path
151 # print self.name, ': command:', command
152 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
153 content = stderr.read()
154 if len(content) > 0:
155 self.logger.error("command: '%s' stderr: '%s'", command, content)
156
157 command = 'cat ' + self.image_path + '/.openvim.yaml'
158 # print self.name, ': command:', command
159 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
160 content = stdout.read()
161 if len(content) == 0:
162 self.logger.error("command: '%s' stderr='%s'", command, stderr.read())
163 raise paramiko.ssh_exception.SSHException("Error empty file, command: '{}'".format(command))
164 self.localinfo = yaml.load(content)
165 js_v(self.localinfo, localinfo_schema)
166 self.localinfo_dirty = False
167 if 'server_files' not in self.localinfo:
168 self.localinfo['server_files'] = {}
169 self.logger.debug("localinfo load from host")
170 return
171
172 except paramiko.ssh_exception.SSHException as e:
173 text = e.args[0]
174 self.logger.error("load_localinfo ssh Exception: " + text)
175 except host_thread.lvirt_module.libvirtError as e:
176 text = e.get_error_message()
177 self.logger.error("load_localinfo libvirt Exception: " + text)
178 except yaml.YAMLError as exc:
179 text = ""
180 if hasattr(exc, 'problem_mark'):
181 mark = exc.problem_mark
182 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
183 self.logger.error("load_localinfo yaml format Exception " + text)
184 except js_e.ValidationError as e:
185 text = ""
186 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
187 self.logger.error("load_localinfo format Exception: %s %s", text, str(e))
188 except Exception as e:
189 text = str(e)
190 self.logger.error("load_localinfo Exception: " + text)
191
192 #not loaded, insert a default data and force saving by activating dirty flag
193 self.localinfo = {'files':{}, 'server_files':{} }
194 #self.localinfo_dirty=True
195 self.localinfo_dirty=False
196
197 def load_hostinfo(self):
198 if self.test:
199 return;
200 try:
201 #Connect SSH
202 self.ssh_connect()
203
204
205 command = 'cat ' + self.image_path + '/hostinfo.yaml'
206 #print self.name, ': command:', command
207 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
208 content = stdout.read()
209 if len(content) == 0:
210 self.logger.error("command: '%s' stderr: '%s'", command, stderr.read())
211 raise paramiko.ssh_exception.SSHException("Error empty file ")
212 self.hostinfo = yaml.load(content)
213 js_v(self.hostinfo, hostinfo_schema)
214 self.logger.debug("hostlinfo load from host " + str(self.hostinfo))
215 return
216
217 except paramiko.ssh_exception.SSHException as e:
218 text = e.args[0]
219 self.logger.error("load_hostinfo ssh Exception: " + text)
220 except host_thread.lvirt_module.libvirtError as e:
221 text = e.get_error_message()
222 self.logger.error("load_hostinfo libvirt Exception: " + text)
223 except yaml.YAMLError as exc:
224 text = ""
225 if hasattr(exc, 'problem_mark'):
226 mark = exc.problem_mark
227 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
228 self.logger.error("load_hostinfo yaml format Exception " + text)
229 except js_e.ValidationError as e:
230 text = ""
231 if len(e.path)>0: text=" at '" + ":".join(map(str, e.path))+"'"
232 self.logger.error("load_hostinfo format Exception: %s %s", text, e.message)
233 except Exception as e:
234 text = str(e)
235 self.logger.error("load_hostinfo Exception: " + text)
236
237 #not loaded, insert a default data
238 self.hostinfo = None
239
240 def save_localinfo(self, tries=3):
241 if self.test:
242 self.localinfo_dirty = False
243 return
244
245 while tries>=0:
246 tries-=1
247
248 try:
249 command = 'cat > ' + self.image_path + '/.openvim.yaml'
250 self.logger.debug("command:" + command)
251 (stdin, _, _) = self.ssh_conn.exec_command(command)
252 yaml.safe_dump(self.localinfo, stdin, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True)
253
254 self.localinfo_dirty = False
255 break #while tries
256
257 except paramiko.ssh_exception.SSHException as e:
258 text = e.args[0]
259 self.logger.error("save_localinfo ssh Exception: " + text)
260 if "SSH session not active" in text:
261 self.ssh_connect()
262 except host_thread.lvirt_module.libvirtError as e:
263 text = e.get_error_message()
264 self.logger.error("save_localinfo libvirt Exception: " + text)
265 except yaml.YAMLError as exc:
266 text = ""
267 if hasattr(exc, 'problem_mark'):
268 mark = exc.problem_mark
269 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
270 self.logger.error("save_localinfo yaml format Exception " + text)
271 except Exception as e:
272 text = str(e)
273 self.logger.error("save_localinfo Exception: " + text)
274
275 def load_servers_from_db(self):
276 self.db_lock.acquire()
277 r,c = self.db.get_table(SELECT=('uuid','status', 'image_id'), FROM='instances', WHERE={'host_id': self.host_id})
278 self.db_lock.release()
279
280 self.server_status = {}
281 if r<0:
282 self.logger.error("Error getting data from database: " + c)
283 return
284 for server in c:
285 self.server_status[ server['uuid'] ] = server['status']
286
287 #convert from old version to new one
288 if 'inc_files' in self.localinfo and server['uuid'] in self.localinfo['inc_files']:
289 server_files_dict = {'source file': self.localinfo['inc_files'][ server['uuid'] ] [0], 'file format':'raw' }
290 if server_files_dict['source file'][-5:] == 'qcow2':
291 server_files_dict['file format'] = 'qcow2'
292
293 self.localinfo['server_files'][ server['uuid'] ] = { server['image_id'] : server_files_dict }
294 if 'inc_files' in self.localinfo:
295 del self.localinfo['inc_files']
296 self.localinfo_dirty = True
297
298 def delete_unused_files(self):
299 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
300 Deletes unused entries at self.loacalinfo and the corresponding local files.
301 The only reason for this mismatch is the manual deletion of instances (VM) at database
302 '''
303 if self.test:
304 return
305 for uuid,images in self.localinfo['server_files'].items():
306 if uuid not in self.server_status:
307 for localfile in images.values():
308 try:
309 self.logger.debug("deleting file '%s' of unused server '%s'", localfile['source file'], uuid)
310 self.delete_file(localfile['source file'])
311 except paramiko.ssh_exception.SSHException as e:
312 self.logger.error("Exception deleting file '%s': %s", localfile['source file'], str(e))
313 del self.localinfo['server_files'][uuid]
314 self.localinfo_dirty = True
315
316 def insert_task(self, task, *aditional):
317 try:
318 self.queueLock.acquire()
319 task = self.taskQueue.put( (task,) + aditional, timeout=5)
320 self.queueLock.release()
321 return 1, None
322 except Queue.Full:
323 return -1, "timeout inserting a task over host " + self.name
324
325 def run(self):
326 while True:
327 self.load_localinfo()
328 self.load_hostinfo()
329 self.load_servers_from_db()
330 self.delete_unused_files()
331 while True:
332 try:
333 self.queueLock.acquire()
334 if not self.taskQueue.empty():
335 task = self.taskQueue.get()
336 else:
337 task = None
338 self.queueLock.release()
339
340 if task is None:
341 now=time.time()
342 if self.localinfo_dirty:
343 self.save_localinfo()
344 elif self.next_update_server_status < now:
345 self.update_servers_status()
346 self.next_update_server_status = now + 5
347 elif len(self.pending_terminate_server)>0 and self.pending_terminate_server[0][0]<now:
348 self.server_forceoff()
349 else:
350 time.sleep(1)
351 continue
352
353 if task[0] == 'instance':
354 self.logger.debug("processing task instance " + str(task[1]['action']))
355 retry = 0
356 while retry < 2:
357 retry += 1
358 r = self.action_on_server(task[1], retry==2)
359 if r >= 0:
360 break
361 elif task[0] == 'image':
362 pass
363 elif task[0] == 'exit':
364 self.logger.debug("processing task exit")
365 self.terminate()
366 return 0
367 elif task[0] == 'reload':
368 self.logger.debug("processing task reload terminating and relaunching")
369 self.terminate()
370 break
371 elif task[0] == 'edit-iface':
372 self.logger.debug("processing task edit-iface port={}, old_net={}, new_net={}".format(
373 task[1], task[2], task[3]))
374 self.edit_iface(task[1], task[2], task[3])
375 elif task[0] == 'restore-iface':
376 self.logger.debug("processing task restore-iface={} mac={}".format(task[1], task[2]))
377 self.restore_iface(task[1], task[2])
378 elif task[0] == 'new-ovsbridge':
379 self.logger.debug("Creating compute OVS bridge")
380 self.create_ovs_bridge()
381 elif task[0] == 'new-vxlan':
382 self.logger.debug("Creating vxlan tunnel='{}', remote ip='{}'".format(task[1], task[2]))
383 self.create_ovs_vxlan_tunnel(task[1], task[2])
384 elif task[0] == 'del-ovsbridge':
385 self.logger.debug("Deleting OVS bridge")
386 self.delete_ovs_bridge()
387 elif task[0] == 'del-vxlan':
388 self.logger.debug("Deleting vxlan {} tunnel".format(task[1]))
389 self.delete_ovs_vxlan_tunnel(task[1])
390 elif task[0] == 'create-ovs-bridge-port':
391 self.logger.debug("Adding port ovim-{} to OVS bridge".format(task[1]))
392 self.create_ovs_bridge_port(task[1])
393 elif task[0] == 'del-ovs-port':
394 self.logger.debug("Delete bridge attached to ovs port vlan {} net {}".format(task[1], task[2]))
395 self.delete_bridge_port_attached_to_ovs(task[1], task[2])
396 else:
397 self.logger.debug("unknown task " + str(task))
398
399 except Exception as e:
400 self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
401
402 def server_forceoff(self, wait_until_finished=False):
403 while len(self.pending_terminate_server)>0:
404 now = time.time()
405 if self.pending_terminate_server[0][0]>now:
406 if wait_until_finished:
407 time.sleep(1)
408 continue
409 else:
410 return
411 req={'uuid':self.pending_terminate_server[0][1],
412 'action':{'terminate':'force'},
413 'status': None
414 }
415 self.action_on_server(req)
416 self.pending_terminate_server.pop(0)
417
418 def terminate(self):
419 try:
420 self.server_forceoff(True)
421 if self.localinfo_dirty:
422 self.save_localinfo()
423 if not self.test:
424 self.ssh_conn.close()
425 except Exception as e:
426 text = str(e)
427 self.logger.error("terminate Exception: " + text)
428 self.logger.debug("exit from host_thread")
429
430 def get_local_iface_name(self, generic_name):
431 if self.hostinfo != None and "iface_names" in self.hostinfo and generic_name in self.hostinfo["iface_names"]:
432 return self.hostinfo["iface_names"][generic_name]
433 return generic_name
434
435 def create_xml_server(self, server, dev_list, server_metadata={}):
436 """Function that implements the generation of the VM XML definition.
437 Additional devices are in dev_list list
438 The main disk is upon dev_list[0]"""
439
440 #get if operating system is Windows
441 windows_os = False
442 os_type = server_metadata.get('os_type', None)
443 if os_type == None and 'metadata' in dev_list[0]:
444 os_type = dev_list[0]['metadata'].get('os_type', None)
445 if os_type != None and os_type.lower() == "windows":
446 windows_os = True
447 #get type of hard disk bus
448 bus_ide = True if windows_os else False
449 bus = server_metadata.get('bus', None)
450 if bus == None and 'metadata' in dev_list[0]:
451 bus = dev_list[0]['metadata'].get('bus', None)
452 if bus != None:
453 bus_ide = True if bus=='ide' else False
454
455 self.xml_level = 0
456
457 text = "<domain type='kvm'>"
458 #get topology
459 topo = server_metadata.get('topology', None)
460 if topo == None and 'metadata' in dev_list[0]:
461 topo = dev_list[0]['metadata'].get('topology', None)
462 #name
463 name = server.get('name', '')[:28] + "_" + server['uuid'][:28] #qemu impose a length limit of 59 chars or not start. Using 58
464 text += self.inc_tab() + "<name>" + name+ "</name>"
465 #uuid
466 text += self.tab() + "<uuid>" + server['uuid'] + "</uuid>"
467
468 numa={}
469 if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:
470 numa = server['extended']['numas'][0]
471 #memory
472 use_huge = False
473 memory = int(numa.get('memory',0))*1024*1024 #in KiB
474 if memory==0:
475 memory = int(server['ram'])*1024;
476 else:
477 if not self.develop_mode:
478 use_huge = True
479 if memory==0:
480 return -1, 'No memory assigned to instance'
481 memory = str(memory)
482 text += self.tab() + "<memory unit='KiB'>" +memory+"</memory>"
483 text += self.tab() + "<currentMemory unit='KiB'>" +memory+ "</currentMemory>"
484 if use_huge:
485 text += self.tab()+'<memoryBacking>'+ \
486 self.inc_tab() + '<hugepages/>'+ \
487 self.dec_tab()+ '</memoryBacking>'
488
489 #cpu
490 use_cpu_pinning=False
491 vcpus = int(server.get("vcpus",0))
492 cpu_pinning = []
493 if 'cores-source' in numa:
494 use_cpu_pinning=True
495 for index in range(0, len(numa['cores-source'])):
496 cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )
497 vcpus += 1
498 if 'threads-source' in numa:
499 use_cpu_pinning=True
500 for index in range(0, len(numa['threads-source'])):
501 cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )
502 vcpus += 1
503 if 'paired-threads-source' in numa:
504 use_cpu_pinning=True
505 for index in range(0, len(numa['paired-threads-source'])):
506 cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )
507 cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )
508 vcpus += 2
509
510 if use_cpu_pinning and not self.develop_mode:
511 text += self.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning)) +"</vcpu>" + \
512 self.tab()+'<cputune>'
513 self.xml_level += 1
514 for i in range(0, len(cpu_pinning)):
515 text += self.tab() + "<vcpupin vcpu='" +str(cpu_pinning[i][0])+ "' cpuset='" +str(cpu_pinning[i][1]) +"'/>"
516 text += self.dec_tab()+'</cputune>'+ \
517 self.tab() + '<numatune>' +\
518 self.inc_tab() + "<memory mode='strict' nodeset='" +str(numa['source'])+ "'/>" +\
519 self.dec_tab() + '</numatune>'
520 else:
521 if vcpus==0:
522 return -1, "Instance without number of cpus"
523 text += self.tab()+"<vcpu>" + str(vcpus) + "</vcpu>"
524
525 #boot
526 boot_cdrom = False
527 for dev in dev_list:
528 if dev['type']=='cdrom' :
529 boot_cdrom = True
530 break
531 text += self.tab()+ '<os>' + \
532 self.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
533 if boot_cdrom:
534 text += self.tab() + "<boot dev='cdrom'/>"
535 text += self.tab() + "<boot dev='hd'/>" + \
536 self.dec_tab()+'</os>'
537 #features
538 text += self.tab()+'<features>'+\
539 self.inc_tab()+'<acpi/>' +\
540 self.tab()+'<apic/>' +\
541 self.tab()+'<pae/>'+ \
542 self.dec_tab() +'</features>'
543 if topo == "oneSocket:hyperthreading":
544 if vcpus % 2 != 0:
545 return -1, 'Cannot expose hyperthreading with an odd number of vcpus'
546 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>" % (vcpus/2)
547 elif windows_os or topo == "oneSocket":
548 text += self.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>" % vcpus
549 else:
550 text += self.tab() + "<cpu mode='host-model'></cpu>"
551 text += self.tab() + "<clock offset='utc'/>" +\
552 self.tab() + "<on_poweroff>preserve</on_poweroff>" + \
553 self.tab() + "<on_reboot>restart</on_reboot>" + \
554 self.tab() + "<on_crash>restart</on_crash>"
555 text += self.tab() + "<devices>" + \
556 self.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
557 self.tab() + "<serial type='pty'>" +\
558 self.inc_tab() + "<target port='0'/>" + \
559 self.dec_tab() + "</serial>" +\
560 self.tab() + "<console type='pty'>" + \
561 self.inc_tab()+ "<target type='serial' port='0'/>" + \
562 self.dec_tab()+'</console>'
563 if windows_os:
564 text += self.tab() + "<controller type='usb' index='0'/>" + \
565 self.tab() + "<controller type='ide' index='0'/>" + \
566 self.tab() + "<input type='mouse' bus='ps2'/>" + \
567 self.tab() + "<sound model='ich6'/>" + \
568 self.tab() + "<video>" + \
569 self.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
570 self.dec_tab() + "</video>" + \
571 self.tab() + "<memballoon model='virtio'/>" + \
572 self.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
573
574 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
575 #> self.dec_tab()+'</hostdev>\n' +\
576 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
577 if windows_os:
578 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
579 else:
580 #If image contains 'GRAPH' include graphics
581 #if 'GRAPH' in image:
582 text += self.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
583 self.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
584 self.dec_tab() + "</graphics>"
585
586 vd_index = 'a'
587 for dev in dev_list:
588 bus_ide_dev = bus_ide
589 if dev['type']=='cdrom' or dev['type']=='disk':
590 if dev['type']=='cdrom':
591 bus_ide_dev = True
592 text += self.tab() + "<disk type='file' device='"+dev['type']+"'>"
593 if 'file format' in dev:
594 text += self.inc_tab() + "<driver name='qemu' type='" +dev['file format']+ "' cache='writethrough'/>"
595 if 'source file' in dev:
596 text += self.tab() + "<source file='" +dev['source file']+ "'/>"
597 #elif v['type'] == 'block':
598 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
599 #else:
600 # return -1, 'Unknown disk type ' + v['type']
601 vpci = dev.get('vpci',None)
602 if vpci == None and 'metadata' in dev:
603 vpci = dev['metadata'].get('vpci',None)
604 text += self.pci2xml(vpci)
605
606 if bus_ide_dev:
607 text += self.tab() + "<target dev='hd" +vd_index+ "' bus='ide'/>" #TODO allows several type of disks
608 else:
609 text += self.tab() + "<target dev='vd" +vd_index+ "' bus='virtio'/>"
610 text += self.dec_tab() + '</disk>'
611 vd_index = chr(ord(vd_index)+1)
612 elif dev['type']=='xml':
613 dev_text = dev['xml']
614 if 'vpci' in dev:
615 dev_text = dev_text.replace('__vpci__', dev['vpci'])
616 if 'source file' in dev:
617 dev_text = dev_text.replace('__file__', dev['source file'])
618 if 'file format' in dev:
619 dev_text = dev_text.replace('__format__', dev['source file'])
620 if '__dev__' in dev_text:
621 dev_text = dev_text.replace('__dev__', vd_index)
622 vd_index = chr(ord(vd_index)+1)
623 text += dev_text
624 else:
625 return -1, 'Unknown device type ' + dev['type']
626
627 net_nb=0
628 bridge_interfaces = server.get('networks', [])
629 for v in bridge_interfaces:
630 #Get the brifge name
631 self.db_lock.acquire()
632 result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )
633 self.db_lock.release()
634 if result <= 0:
635 self.logger.error("create_xml_server ERROR %d getting nets %s", result, content)
636 return -1, content
637 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
638 #I know it is not secure
639 #for v in sorted(desc['network interfaces'].itervalues()):
640 model = v.get("model", None)
641 if content[0]['provider']=='default':
642 text += self.tab() + "<interface type='network'>" + \
643 self.inc_tab() + "<source network='" +content[0]['provider']+ "'/>"
644 elif content[0]['provider'][0:7]=='macvtap':
645 text += self.tab()+"<interface type='direct'>" + \
646 self.inc_tab() + "<source dev='" + self.get_local_iface_name(content[0]['provider'][8:]) + "' mode='bridge'/>" + \
647 self.tab() + "<target dev='macvtap0'/>"
648 if windows_os:
649 text += self.tab() + "<alias name='net" + str(net_nb) + "'/>"
650 elif model==None:
651 model = "virtio"
652 elif content[0]['provider'][0:6]=='bridge':
653 text += self.tab() + "<interface type='bridge'>" + \
654 self.inc_tab()+"<source bridge='" +self.get_local_iface_name(content[0]['provider'][7:])+ "'/>"
655 if windows_os:
656 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
657 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
658 elif model==None:
659 model = "virtio"
660 elif content[0]['provider'][0:3] == "OVS":
661 vlan = content[0]['provider'].replace('OVS:', '')
662 text += self.tab() + "<interface type='bridge'>" + \
663 self.inc_tab() + "<source bridge='ovim-" + str(vlan) + "'/>"
664 else:
665 return -1, 'Unknown Bridge net provider ' + content[0]['provider']
666 if model!=None:
667 text += self.tab() + "<model type='" +model+ "'/>"
668 if v.get('mac_address', None) != None:
669 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
670 text += self.pci2xml(v.get('vpci',None))
671 text += self.dec_tab()+'</interface>'
672
673 net_nb += 1
674
675 interfaces = numa.get('interfaces', [])
676
677 net_nb=0
678 for v in interfaces:
679 if self.develop_mode: #map these interfaces to bridges
680 text += self.tab() + "<interface type='bridge'>" + \
681 self.inc_tab()+"<source bridge='" +self.develop_bridge_iface+ "'/>"
682 if windows_os:
683 text += self.tab() + "<target dev='vnet" + str(net_nb)+ "'/>" +\
684 self.tab() + "<alias name='net" + str(net_nb)+ "'/>"
685 else:
686 text += self.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
687 if v.get('mac_address', None) != None:
688 text+= self.tab() +"<mac address='" +v['mac_address']+ "'/>"
689 text += self.pci2xml(v.get('vpci',None))
690 text += self.dec_tab()+'</interface>'
691 continue
692
693 if v['dedicated'] == 'yes': #passthrought
694 text += self.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
695 self.inc_tab() + "<source>"
696 self.inc_tab()
697 text += self.pci2xml(v['source'])
698 text += self.dec_tab()+'</source>'
699 text += self.pci2xml(v.get('vpci',None))
700 if windows_os:
701 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
702 text += self.dec_tab()+'</hostdev>'
703 net_nb += 1
704 else: #sriov_interfaces
705 #skip not connected interfaces
706 if v.get("net_id") == None:
707 continue
708 text += self.tab() + "<interface type='hostdev' managed='yes'>"
709 self.inc_tab()
710 if v.get('mac_address', None) != None:
711 text+= self.tab() + "<mac address='" +v['mac_address']+ "'/>"
712 text+= self.tab()+'<source>'
713 self.inc_tab()
714 text += self.pci2xml(v['source'])
715 text += self.dec_tab()+'</source>'
716 if v.get('vlan',None) != None:
717 text += self.tab() + "<vlan> <tag id='" + str(v['vlan']) + "'/> </vlan>"
718 text += self.pci2xml(v.get('vpci',None))
719 if windows_os:
720 text += self.tab() + "<alias name='hostdev" + str(net_nb) + "'/>"
721 text += self.dec_tab()+'</interface>'
722
723
724 text += self.dec_tab()+'</devices>'+\
725 self.dec_tab()+'</domain>'
726 return 0, text
727
728 def pci2xml(self, pci):
729 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
730 alows an empty pci text'''
731 if pci is None:
732 return ""
733 first_part = pci.split(':')
734 second_part = first_part[2].split('.')
735 return self.tab() + "<address type='pci' domain='0x" + first_part[0] + \
736 "' bus='0x" + first_part[1] + "' slot='0x" + second_part[0] + \
737 "' function='0x" + second_part[1] + "'/>"
738
739 def tab(self):
740 """Return indentation according to xml_level"""
741 return "\n" + (' '*self.xml_level)
742
743 def inc_tab(self):
744 """Increment and return indentation according to xml_level"""
745 self.xml_level += 1
746 return self.tab()
747
748 def dec_tab(self):
749 """Decrement and return indentation according to xml_level"""
750 self.xml_level -= 1
751 return self.tab()
752
753 def create_ovs_bridge(self):
754 """
755 Create a bridge in compute OVS to allocate VMs
756 :return: True if success
757 """
758 if self.test or not self.connectivity:
759 return True
760
761
762 try:
763 command = 'sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true'
764 self.logger.debug("command: " + command)
765 (_, stdout, _) = self.ssh_conn.exec_command(command)
766 content = stdout.read()
767 if len(content) == 0:
768 return True
769 else:
770 return False
771 except paramiko.ssh_exception.SSHException as e:
772 self.logger.error("create_ovs_bridge ssh Exception: " + str(e))
773 if "SSH session not active" in str(e):
774 self.ssh_connect()
775 return False
776
777 def delete_port_to_ovs_bridge(self, vlan, net_uuid):
778 """
779 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
780 :param vlan: vlan port id
781 :param net_uuid: network id
782 :return:
783 """
784
785 if self.test or not self.connectivity:
786 return True
787 try:
788 port_name = 'ovim-' + str(vlan)
789 command = 'sudo ovs-vsctl del-port br-int ' + port_name
790 self.logger.debug("command: " + command)
791 (_, stdout, _) = self.ssh_conn.exec_command(command)
792 content = stdout.read()
793 if len(content) == 0:
794 return True
795 else:
796 return False
797 except paramiko.ssh_exception.SSHException as e:
798 self.logger.error("delete_port_to_ovs_bridge ssh Exception: " + str(e))
799 if "SSH session not active" in str(e):
800 self.ssh_connect()
801 return False
802
803 def delete_dhcp_server(self, vlan, net_uuid, dhcp_path):
804 """
805 Delete dhcp server process lining in namespace
806 :param vlan: segmentation id
807 :param net_uuid: network uuid
808 :param dhcp_path: conf fiel path that live in namespace side
809 :return:
810 """
811 if self.test or not self.connectivity:
812 return True
813 if not self.is_dhcp_port_free(vlan, net_uuid):
814 return True
815 try:
816 net_namespace = 'ovim-' + str(vlan)
817 dhcp_path = os.path.join(dhcp_path, net_namespace)
818 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
819
820 command = 'sudo ip netns exec ' + net_namespace + ' cat ' + pid_file
821 self.logger.debug("command: " + command)
822 (_, stdout, _) = self.ssh_conn.exec_command(command)
823 content = stdout.read()
824
825 command = 'sudo ip netns exec ' + net_namespace + ' kill -9 ' + content
826 self.logger.debug("command: " + command)
827 (_, stdout, _) = self.ssh_conn.exec_command(command)
828 content = stdout.read()
829
830 # if len(content) == 0:
831 # return True
832 # else:
833 # return False
834 except paramiko.ssh_exception.SSHException as e:
835 self.logger.error("delete_dhcp_server ssh Exception: " + str(e))
836 if "SSH session not active" in str(e):
837 self.ssh_connect()
838 return False
839
840 def is_dhcp_port_free(self, host_id, net_uuid):
841 """
842 Check if any port attached to the a net in a vxlan mesh across computes nodes
843 :param host_id: host id
844 :param net_uuid: network id
845 :return: True if is not free
846 """
847 self.db_lock.acquire()
848 result, content = self.db.get_table(
849 FROM='ports',
850 WHERE={'type': 'instance:ovs', 'net_id': net_uuid}
851 )
852 self.db_lock.release()
853
854 if len(content) > 0:
855 return False
856 else:
857 return True
858
859 def is_port_free(self, host_id, net_uuid):
860 """
861 Check if there not ovs ports of a network in a compute host.
862 :param host_id: host id
863 :param net_uuid: network id
864 :return: True if is not free
865 """
866
867 self.db_lock.acquire()
868 result, content = self.db.get_table(
869 FROM='ports as p join instances as i on p.instance_id=i.uuid',
870 WHERE={"i.host_id": self.host_id, 'p.type': 'instance:ovs', 'p.net_id': net_uuid}
871 )
872 self.db_lock.release()
873
874 if len(content) > 0:
875 return False
876 else:
877 return True
878
879 def add_port_to_ovs_bridge(self, vlan):
880 """
881 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
882 :param vlan: vlan port id
883 :return: True if success
884 """
885
886 if self.test:
887 return True
888 try:
889 port_name = 'ovim-' + str(vlan)
890 command = 'sudo ovs-vsctl add-port br-int ' + port_name + ' tag=' + str(vlan)
891 self.logger.debug("command: " + command)
892 (_, stdout, _) = self.ssh_conn.exec_command(command)
893 content = stdout.read()
894 if len(content) == 0:
895 return True
896 else:
897 return False
898 except paramiko.ssh_exception.SSHException as e:
899 self.logger.error("add_port_to_ovs_bridge ssh Exception: " + str(e))
900 if "SSH session not active" in str(e):
901 self.ssh_connect()
902 return False
903
904 def delete_dhcp_port(self, vlan, net_uuid):
905 """
906 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
907 :param vlan: segmentation id
908 :param net_uuid: network id
909 :return: True if success
910 """
911
912 if self.test:
913 return True
914
915 if not self.is_dhcp_port_free(vlan, net_uuid):
916 return True
917 self.delete_dhcp_interfaces(vlan)
918 return True
919
920 def delete_bridge_port_attached_to_ovs(self, vlan, net_uuid):
921 """
922 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
923 :param vlan:
924 :param net_uuid:
925 :return: True if success
926 """
927 if self.test:
928 return
929
930 if not self.is_port_free(vlan, net_uuid):
931 return True
932 self.delete_port_to_ovs_bridge(vlan, net_uuid)
933 self.delete_linux_bridge(vlan)
934 return True
935
936 def delete_linux_bridge(self, vlan):
937 """
938 Delete a linux bridge in a scpecific compute.
939 :param vlan: vlan port id
940 :return: True if success
941 """
942
943 if self.test:
944 return True
945 try:
946 port_name = 'ovim-' + str(vlan)
947 command = 'sudo ip link set dev veth0-' + str(vlan) + ' down'
948 self.logger.debug("command: " + command)
949 (_, stdout, _) = self.ssh_conn.exec_command(command)
950 # content = stdout.read()
951 #
952 # if len(content) != 0:
953 # return False
954 command = 'sudo ifconfig ' + port_name + ' down && sudo brctl delbr ' + port_name
955 self.logger.debug("command: " + command)
956 (_, stdout, _) = self.ssh_conn.exec_command(command)
957 content = stdout.read()
958 if len(content) == 0:
959 return True
960 else:
961 return False
962 except paramiko.ssh_exception.SSHException as e:
963 self.logger.error("delete_linux_bridge ssh Exception: " + str(e))
964 if "SSH session not active" in str(e):
965 self.ssh_connect()
966 return False
967
968 def create_ovs_bridge_port(self, vlan):
969 """
970 Generate a linux bridge and attache the port to a OVS bridge
971 :param vlan: vlan port id
972 :return:
973 """
974 if self.test:
975 return
976 self.create_linux_bridge(vlan)
977 self.add_port_to_ovs_bridge(vlan)
978
979 def create_linux_bridge(self, vlan):
980 """
981 Create a linux bridge with STP active
982 :param vlan: netowrk vlan id
983 :return:
984 """
985
986 if self.test:
987 return True
988 try:
989 port_name = 'ovim-' + str(vlan)
990 command = 'sudo brctl show | grep ' + port_name
991 self.logger.debug("command: " + command)
992 (_, stdout, _) = self.ssh_conn.exec_command(command)
993 content = stdout.read()
994
995 # if exist nothing to create
996 # if len(content) == 0:
997 # return False
998
999 command = 'sudo brctl addbr ' + port_name
1000 self.logger.debug("command: " + command)
1001 (_, stdout, _) = self.ssh_conn.exec_command(command)
1002 content = stdout.read()
1003
1004 # if len(content) == 0:
1005 # return True
1006 # else:
1007 # return False
1008
1009 command = 'sudo brctl stp ' + port_name + ' on'
1010 self.logger.debug("command: " + command)
1011 (_, stdout, _) = self.ssh_conn.exec_command(command)
1012 content = stdout.read()
1013
1014 # if len(content) == 0:
1015 # return True
1016 # else:
1017 # return False
1018 command = 'sudo ip link set dev ' + port_name + ' up'
1019 self.logger.debug("command: " + command)
1020 (_, stdout, _) = self.ssh_conn.exec_command(command)
1021 content = stdout.read()
1022
1023 if len(content) == 0:
1024 return True
1025 else:
1026 return False
1027 except paramiko.ssh_exception.SSHException as e:
1028 self.logger.error("create_linux_bridge ssh Exception: " + str(e))
1029 if "SSH session not active" in str(e):
1030 self.ssh_connect()
1031 return False
1032
1033 def set_mac_dhcp_server(self, ip, mac, vlan, netmask, dhcp_path):
1034 """
1035 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
1036 :param ip: IP address asigned to a VM
1037 :param mac: VM vnic mac to be macthed with the IP received
1038 :param vlan: Segmentation id
1039 :param netmask: netmask value
1040 :param path: dhcp conf file path that live in namespace side
1041 :return: True if success
1042 """
1043
1044 if self.test:
1045 return True
1046
1047 net_namespace = 'ovim-' + str(vlan)
1048 dhcp_path = os.path.join(dhcp_path, net_namespace)
1049 dhcp_hostsdir = os.path.join(dhcp_path, net_namespace)
1050
1051 if not ip:
1052 return False
1053 try:
1054 ip_data = mac.upper() + ',' + ip
1055
1056 command = 'sudo ip netns exec ' + net_namespace + ' touch ' + dhcp_hostsdir
1057 self.logger.debug("command: " + command)
1058 (_, stdout, _) = self.ssh_conn.exec_command(command)
1059 content = stdout.read()
1060
1061 command = 'sudo ip netns exec ' + net_namespace + ' sudo bash -ec "echo ' + ip_data + ' >> ' + dhcp_hostsdir + '"'
1062
1063 self.logger.debug("command: " + command)
1064 (_, stdout, _) = self.ssh_conn.exec_command(command)
1065 content = stdout.read()
1066
1067 if len(content) == 0:
1068 return True
1069 else:
1070 return False
1071 except paramiko.ssh_exception.SSHException as e:
1072 self.logger.error("set_mac_dhcp_server ssh Exception: " + str(e))
1073 if "SSH session not active" in str(e):
1074 self.ssh_connect()
1075 return False
1076
1077 def delete_mac_dhcp_server(self, ip, mac, vlan, dhcp_path):
1078 """
1079 Delete into dhcp conf file the ip assigned to a specific MAC address
1080
1081 :param ip: IP address asigned to a VM
1082 :param mac: VM vnic mac to be macthed with the IP received
1083 :param vlan: Segmentation id
1084 :param dhcp_path: dhcp conf file path that live in namespace side
1085 :return:
1086 """
1087
1088 if self.test:
1089 return False
1090 try:
1091 net_namespace = 'ovim-' + str(vlan)
1092 dhcp_path = os.path.join(dhcp_path, net_namespace)
1093 dhcp_hostsdir = os.path.join(dhcp_path, net_namespace)
1094
1095 if not ip:
1096 return False
1097
1098 ip_data = mac.upper() + ',' + ip
1099
1100 command = 'sudo ip netns exec ' + net_namespace + ' sudo sed -i \'/' + ip_data + '/d\' ' + dhcp_hostsdir
1101 self.logger.debug("command: " + command)
1102 (_, stdout, _) = self.ssh_conn.exec_command(command)
1103 content = stdout.read()
1104
1105 if len(content) == 0:
1106 return True
1107 else:
1108 return False
1109
1110 except paramiko.ssh_exception.SSHException as e:
1111 self.logger.error("set_mac_dhcp_server ssh Exception: " + str(e))
1112 if "SSH session not active" in str(e):
1113 self.ssh_connect()
1114 return False
1115
1116 def launch_dhcp_server(self, vlan, ip_range, netmask, dhcp_path, gateway):
1117 """
1118 Generate a linux bridge and attache the port to a OVS bridge
1119 :param self:
1120 :param vlan: Segmentation id
1121 :param ip_range: IP dhcp range
1122 :param netmask: network netmask
1123 :param dhcp_path: dhcp conf file path that live in namespace side
1124 :param gateway: Gateway address for dhcp net
1125 :return: True if success
1126 """
1127
1128 if self.test:
1129 return True
1130 try:
1131 interface = 'tap-' + str(vlan)
1132 net_namespace = 'ovim-' + str(vlan)
1133 dhcp_path = os.path.join(dhcp_path, net_namespace)
1134 leases_path = os.path.join(dhcp_path, "dnsmasq.leases")
1135 pid_file = os.path.join(dhcp_path, 'dnsmasq.pid')
1136
1137 dhcp_range = ip_range[0] + ',' + ip_range[1] + ',' + netmask
1138
1139 command = 'sudo ip netns exec ' + net_namespace + ' mkdir -p ' + dhcp_path
1140 self.logger.debug("command: " + command)
1141 (_, stdout, _) = self.ssh_conn.exec_command(command)
1142 content = stdout.read()
1143
1144 pid_path = os.path.join(dhcp_path, 'dnsmasq.pid')
1145 command = 'sudo ip netns exec ' + net_namespace + ' cat ' + pid_path
1146 self.logger.debug("command: " + command)
1147 (_, stdout, _) = self.ssh_conn.exec_command(command)
1148 content = stdout.read()
1149 # check if pid is runing
1150 pid_status_path = content
1151 if content:
1152 command = "ps aux | awk '{print $2 }' | grep " + pid_status_path
1153 self.logger.debug("command: " + command)
1154 (_, stdout, _) = self.ssh_conn.exec_command(command)
1155 content = stdout.read()
1156 if not content:
1157 command = 'sudo ip netns exec ' + net_namespace + ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1158 '--interface=' + interface + ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path + \
1159 ' --dhcp-range ' + dhcp_range + ' --pid-file=' + pid_file + ' --dhcp-leasefile=' + leases_path + \
1160 ' --listen-address ' + gateway
1161
1162 self.logger.debug("command: " + command)
1163 (_, stdout, _) = self.ssh_conn.exec_command(command)
1164 content = stdout.readline()
1165
1166 if len(content) == 0:
1167 return True
1168 else:
1169 return False
1170 except paramiko.ssh_exception.SSHException as e:
1171 self.logger.error("launch_dhcp_server ssh Exception: " + str(e))
1172 if "SSH session not active" in str(e):
1173 self.ssh_connect()
1174 return False
1175
1176 def delete_dhcp_interfaces(self, vlan):
1177 """
1178 Create a linux bridge with STP active
1179 :param vlan: netowrk vlan id
1180 :return:
1181 """
1182
1183 if self.test:
1184 return True
1185 try:
1186 net_namespace = 'ovim-' + str(vlan)
1187 command = 'sudo ovs-vsctl del-port br-int ovs-tap-' + str(vlan)
1188 self.logger.debug("command: " + command)
1189 (_, stdout, _) = self.ssh_conn.exec_command(command)
1190 content = stdout.read()
1191
1192 command = 'sudo ip netns exec ' + net_namespace + ' ip link set dev tap-' + str(vlan) + ' down'
1193 self.logger.debug("command: " + command)
1194 (_, stdout, _) = self.ssh_conn.exec_command(command)
1195 content = stdout.read()
1196
1197 command = 'sudo ip link set dev ovs-tap-' + str(vlan) + ' down'
1198 self.logger.debug("command: " + command)
1199 (_, stdout, _) = self.ssh_conn.exec_command(command)
1200 content = stdout.read()
1201 except paramiko.ssh_exception.SSHException as e:
1202 self.logger.error("delete_dhcp_interfaces ssh Exception: " + str(e))
1203 if "SSH session not active" in str(e):
1204 self.ssh_connect()
1205 return False
1206
1207 def create_dhcp_interfaces(self, vlan, ip_listen_address, netmask):
1208 """
1209 Create a linux bridge with STP active
1210 :param vlan: segmentation id
1211 :param ip_listen_address: Listen Ip address for the dhcp service, the tap interface living in namesapce side
1212 :param netmask: dhcp net CIDR
1213 :return: True if success
1214 """
1215
1216 if self.test:
1217 return True
1218 try:
1219 net_namespace = 'ovim-' + str(vlan)
1220 namespace_interface = 'tap-' + str(vlan)
1221
1222 command = 'sudo ip netns add ' + net_namespace
1223 self.logger.debug("command: " + command)
1224 (_, stdout, _) = self.ssh_conn.exec_command(command)
1225 content = stdout.read()
1226
1227 command = 'sudo ip link add tap-' + str(vlan) + ' type veth peer name ovs-tap-' + str(vlan)
1228 self.logger.debug("command: " + command)
1229 (_, stdout, _) = self.ssh_conn.exec_command(command)
1230 content = stdout.read()
1231
1232 command = 'sudo ovs-vsctl add-port br-int ovs-tap-' + str(vlan) + ' tag=' + str(vlan)
1233 self.logger.debug("command: " + command)
1234 (_, stdout, _) = self.ssh_conn.exec_command(command)
1235 content = stdout.read()
1236
1237 command = 'sudo ip link set tap-' + str(vlan) + ' netns ' + net_namespace
1238 self.logger.debug("command: " + command)
1239 (_, stdout, _) = self.ssh_conn.exec_command(command)
1240 content = stdout.read()
1241
1242 command = 'sudo ip netns exec ' + net_namespace + ' ip link set dev tap-' + str(vlan) + ' up'
1243 self.logger.debug("command: " + command)
1244 (_, stdout, _) = self.ssh_conn.exec_command(command)
1245 content = stdout.read()
1246
1247 command = 'sudo ip link set dev ovs-tap-' + str(vlan) + ' up'
1248 self.logger.debug("command: " + command)
1249 (_, stdout, _) = self.ssh_conn.exec_command(command)
1250 content = stdout.read()
1251
1252 command = 'sudo ip netns exec ' + net_namespace + ' ip link set dev lo up'
1253 self.logger.debug("command: " + command)
1254 (_, stdout, _) = self.ssh_conn.exec_command(command)
1255 content = stdout.read()
1256
1257 command = 'sudo ip netns exec ' + net_namespace + ' ' + ' ifconfig ' + namespace_interface \
1258 + ' ' + ip_listen_address + ' netmask ' + netmask
1259 self.logger.debug("command: " + command)
1260 (_, stdout, _) = self.ssh_conn.exec_command(command)
1261 content = stdout.read()
1262
1263 if len(content) == 0:
1264 return True
1265 else:
1266 return False
1267 except paramiko.ssh_exception.SSHException as e:
1268 self.logger.error("create_dhcp_interfaces ssh Exception: " + str(e))
1269 if "SSH session not active" in str(e):
1270 self.ssh_connect()
1271 return False
1272
1273
1274 def create_ovs_vxlan_tunnel(self, vxlan_interface, remote_ip):
1275 """
1276 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1277 :param vxlan_interface: vlxan inteface name.
1278 :param remote_ip: tunnel endpoint remote compute ip.
1279 :return:
1280 """
1281 if self.test or not self.connectivity:
1282 return True
1283 try:
1284 command = 'sudo ovs-vsctl add-port br-int ' + vxlan_interface + \
1285 ' -- set Interface ' + vxlan_interface + ' type=vxlan options:remote_ip=' + remote_ip + \
1286 ' -- set Port ' + vxlan_interface + ' other_config:stp-path-cost=10'
1287 self.logger.debug("command: " + command)
1288 (_, stdout, _) = self.ssh_conn.exec_command(command)
1289 content = stdout.read()
1290 # print content
1291 if len(content) == 0:
1292 return True
1293 else:
1294 return False
1295 except paramiko.ssh_exception.SSHException as e:
1296 self.logger.error("create_ovs_vxlan_tunnel ssh Exception: " + str(e))
1297 if "SSH session not active" in str(e):
1298 self.ssh_connect()
1299 return False
1300
1301 def delete_ovs_vxlan_tunnel(self, vxlan_interface):
1302 """
1303 Delete a vlxan tunnel port from a OVS brdige.
1304 :param vxlan_interface: vlxan name to be delete it.
1305 :return: True if success.
1306 """
1307 if self.test or not self.connectivity:
1308 return True
1309 try:
1310 command = 'sudo ovs-vsctl del-port br-int ' + vxlan_interface
1311 self.logger.debug("command: " + command)
1312 (_, stdout, _) = self.ssh_conn.exec_command(command)
1313 content = stdout.read()
1314 # print content
1315 if len(content) == 0:
1316 return True
1317 else:
1318 return False
1319 except paramiko.ssh_exception.SSHException as e:
1320 self.logger.error("delete_ovs_vxlan_tunnel ssh Exception: " + str(e))
1321 if "SSH session not active" in str(e):
1322 self.ssh_connect()
1323 return False
1324
1325 def delete_ovs_bridge(self):
1326 """
1327 Delete a OVS bridge from a compute.
1328 :return: True if success
1329 """
1330 if self.test or not self.connectivity:
1331 return True
1332 try:
1333 command = 'sudo ovs-vsctl del-br br-int'
1334 self.logger.debug("command: " + command)
1335 (_, stdout, _) = self.ssh_conn.exec_command(command)
1336 content = stdout.read()
1337 if len(content) == 0:
1338 return True
1339 else:
1340 return False
1341 except paramiko.ssh_exception.SSHException as e:
1342 self.logger.error("delete_ovs_bridge ssh Exception: " + str(e))
1343 if "SSH session not active" in str(e):
1344 self.ssh_connect()
1345 return False
1346
1347 def get_file_info(self, path):
1348 command = 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1349 self.logger.debug("command: " + command)
1350 (_, stdout, _) = self.ssh_conn.exec_command(command)
1351 content = stdout.read()
1352 if len(content) == 0:
1353 return None # file does not exist
1354 else:
1355 return content.split(" ") # (permission, 1, owner, group, size, date, file)
1356
1357 def qemu_get_info(self, path):
1358 command = 'qemu-img info ' + path
1359 self.logger.debug("command: " + command)
1360 (_, stdout, stderr) = self.ssh_conn.exec_command(command)
1361 content = stdout.read()
1362 if len(content) == 0:
1363 error = stderr.read()
1364 self.logger.error("get_qemu_info error " + error)
1365 raise paramiko.ssh_exception.SSHException("Error getting qemu_info: " + error)
1366 else:
1367 try:
1368 return yaml.load(content)
1369 except yaml.YAMLError as exc:
1370 text = ""
1371 if hasattr(exc, 'problem_mark'):
1372 mark = exc.problem_mark
1373 text = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
1374 self.logger.error("get_qemu_info yaml format Exception " + text)
1375 raise paramiko.ssh_exception.SSHException("Error getting qemu_info yaml format" + text)
1376
1377 def qemu_change_backing(self, inc_file, new_backing_file):
1378 command = 'qemu-img rebase -u -b ' + new_backing_file + ' ' + inc_file
1379 self.logger.debug("command: " + command)
1380 (_, _, stderr) = self.ssh_conn.exec_command(command)
1381 content = stderr.read()
1382 if len(content) == 0:
1383 return 0
1384 else:
1385 self.logger.error("qemu_change_backing error: " + content)
1386 return -1
1387
1388 def qemu_create_empty_disk(self, dev):
1389
1390 if not dev and 'source' not in dev and 'file format' not in dev and 'image_size' not in dev:
1391 self.logger.error("qemu_create_empty_disk error: missing image parameter")
1392 return -1
1393
1394 empty_disk_path = dev['source file']
1395
1396 command = 'qemu-img create -f qcow2 ' + empty_disk_path + ' ' + str(dev['image_size']) + 'G'
1397 self.logger.debug("command: " + command)
1398 (_, _, stderr) = self.ssh_conn.exec_command(command)
1399 content = stderr.read()
1400 if len(content) == 0:
1401 return 0
1402 else:
1403 self.logger.error("qemu_create_empty_disk error: " + content)
1404 return -1
1405
1406 def get_notused_filename(self, proposed_name, suffix=''):
1407 '''Look for a non existing file_name in the host
1408 proposed_name: proposed file name, includes path
1409 suffix: suffix to be added to the name, before the extention
1410 '''
1411 extension = proposed_name.rfind(".")
1412 slash = proposed_name.rfind("/")
1413 if extension < 0 or extension < slash: # no extension
1414 extension = len(proposed_name)
1415 target_name = proposed_name[:extension] + suffix + proposed_name[extension:]
1416 info = self.get_file_info(target_name)
1417 if info is None:
1418 return target_name
1419
1420 index=0
1421 while info is not None:
1422 target_name = proposed_name[:extension] + suffix + "-" + str(index) + proposed_name[extension:]
1423 index+=1
1424 info = self.get_file_info(target_name)
1425 return target_name
1426
1427 def get_notused_path(self, proposed_path, suffix=''):
1428 '''Look for a non existing path at database for images
1429 proposed_path: proposed file name, includes path
1430 suffix: suffix to be added to the name, before the extention
1431 '''
1432 extension = proposed_path.rfind(".")
1433 if extension < 0:
1434 extension = len(proposed_path)
1435 if suffix != None:
1436 target_path = proposed_path[:extension] + suffix + proposed_path[extension:]
1437 index=0
1438 while True:
1439 r,_=self.db.get_table(FROM="images",WHERE={"path":target_path})
1440 if r<=0:
1441 return target_path
1442 target_path = proposed_path[:extension] + suffix + "-" + str(index) + proposed_path[extension:]
1443 index+=1
1444
1445
1446 def delete_file(self, file_name):
1447 command = 'rm -f '+file_name
1448 self.logger.debug("command: " + command)
1449 (_, _, stderr) = self.ssh_conn.exec_command(command)
1450 error_msg = stderr.read()
1451 if len(error_msg) > 0:
1452 raise paramiko.ssh_exception.SSHException("Error deleting file: " + error_msg)
1453
1454 def copy_file(self, source, destination, perserve_time=True):
1455 if source[0:4]=="http":
1456 command = "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1457 dst=destination, src=source, dst_result=destination + ".result" )
1458 else:
1459 command = 'cp --no-preserve=mode'
1460 if perserve_time:
1461 command += ' --preserve=timestamps'
1462 command += " '{}' '{}'".format(source, destination)
1463 self.logger.debug("command: " + command)
1464 (_, _, stderr) = self.ssh_conn.exec_command(command)
1465 error_msg = stderr.read()
1466 if len(error_msg) > 0:
1467 raise paramiko.ssh_exception.SSHException("Error copying image to local host: " + error_msg)
1468
1469 def copy_remote_file(self, remote_file, use_incremental):
1470 ''' Copy a file from the repository to local folder and recursively
1471 copy the backing files in case the remote file is incremental
1472 Read and/or modified self.localinfo['files'] that contain the
1473 unmodified copies of images in the local path
1474 params:
1475 remote_file: path of remote file
1476 use_incremental: None (leave the decision to this function), True, False
1477 return:
1478 local_file: name of local file
1479 qemu_info: dict with quemu information of local file
1480 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1481 '''
1482
1483 use_incremental_out = use_incremental
1484 new_backing_file = None
1485 local_file = None
1486 file_from_local = True
1487
1488 #in case incremental use is not decided, take the decision depending on the image
1489 #avoid the use of incremental if this image is already incremental
1490 if remote_file[0:4] == "http":
1491 file_from_local = False
1492 if file_from_local:
1493 qemu_remote_info = self.qemu_get_info(remote_file)
1494 if use_incremental_out==None:
1495 use_incremental_out = not ( file_from_local and 'backing file' in qemu_remote_info)
1496 #copy recursivelly the backing files
1497 if file_from_local and 'backing file' in qemu_remote_info:
1498 new_backing_file, _, _ = self.copy_remote_file(qemu_remote_info['backing file'], True)
1499
1500 #check if remote file is present locally
1501 if use_incremental_out and remote_file in self.localinfo['files']:
1502 local_file = self.localinfo['files'][remote_file]
1503 local_file_info = self.get_file_info(local_file)
1504 if file_from_local:
1505 remote_file_info = self.get_file_info(remote_file)
1506 if local_file_info == None:
1507 local_file = None
1508 elif file_from_local and (local_file_info[4]!=remote_file_info[4] or local_file_info[5]!=remote_file_info[5]):
1509 #local copy of file not valid because date or size are different.
1510 #TODO DELETE local file if this file is not used by any active virtual machine
1511 try:
1512 self.delete_file(local_file)
1513 del self.localinfo['files'][remote_file]
1514 except Exception:
1515 pass
1516 local_file = None
1517 else: #check that the local file has the same backing file, or there are not backing at all
1518 qemu_info = self.qemu_get_info(local_file)
1519 if new_backing_file != qemu_info.get('backing file'):
1520 local_file = None
1521
1522
1523 if local_file == None: #copy the file
1524 img_name= remote_file.split('/') [-1]
1525 img_local = self.image_path + '/' + img_name
1526 local_file = self.get_notused_filename(img_local)
1527 self.copy_file(remote_file, local_file, use_incremental_out)
1528
1529 if use_incremental_out:
1530 self.localinfo['files'][remote_file] = local_file
1531 if new_backing_file:
1532 self.qemu_change_backing(local_file, new_backing_file)
1533 qemu_info = self.qemu_get_info(local_file)
1534
1535 return local_file, qemu_info, use_incremental_out
1536
1537 def launch_server(self, conn, server, rebuild=False, domain=None):
1538 if self.test:
1539 time.sleep(random.randint(20,150)) #sleep random timeto be make it a bit more real
1540 return 0, 'Success'
1541
1542 server_id = server['uuid']
1543 paused = server.get('paused','no')
1544 try:
1545 if domain!=None and rebuild==False:
1546 domain.resume()
1547 #self.server_status[server_id] = 'ACTIVE'
1548 return 0, 'Success'
1549
1550 self.db_lock.acquire()
1551 result, server_data = self.db.get_instance(server_id)
1552 self.db_lock.release()
1553 if result <= 0:
1554 self.logger.error("launch_server ERROR getting server from DB %d %s", result, server_data)
1555 return result, server_data
1556
1557 #0: get image metadata
1558 server_metadata = server.get('metadata', {})
1559 use_incremental = None
1560
1561 if "use_incremental" in server_metadata:
1562 use_incremental = False if server_metadata["use_incremental"] == "no" else True
1563
1564 server_host_files = self.localinfo['server_files'].get( server['uuid'], {})
1565 if rebuild:
1566 #delete previous incremental files
1567 for file_ in server_host_files.values():
1568 self.delete_file(file_['source file'] )
1569 server_host_files={}
1570
1571 #1: obtain aditional devices (disks)
1572 #Put as first device the main disk
1573 devices = [ {"type":"disk", "image_id":server['image_id'], "vpci":server_metadata.get('vpci', None) } ]
1574 if 'extended' in server_data and server_data['extended']!=None and "devices" in server_data['extended']:
1575 devices += server_data['extended']['devices']
1576 empty_path = None
1577 for dev in devices:
1578 image_id = dev.get('image_id')
1579 if not image_id:
1580 import uuid
1581 uuid_empty = str(uuid.uuid4())
1582 empty_path = self.empty_image_path + uuid_empty + '.qcow2' # local path for empty disk
1583
1584 dev['source file'] = empty_path
1585 dev['file format'] = 'qcow2'
1586 self.qemu_create_empty_disk(dev)
1587 server_host_files[uuid_empty] = {'source file': empty_path,
1588 'file format': dev['file format']}
1589
1590 continue
1591 else:
1592 self.db_lock.acquire()
1593 result, content = self.db.get_table(FROM='images', SELECT=('path', 'metadata'),
1594 WHERE={'uuid': image_id})
1595 self.db_lock.release()
1596 if result <= 0:
1597 error_text = "ERROR", result, content, "when getting image", dev['image_id']
1598 self.logger.error("launch_server " + error_text)
1599 return -1, error_text
1600 if content[0]['metadata'] is not None:
1601 dev['metadata'] = json.loads(content[0]['metadata'])
1602 else:
1603 dev['metadata'] = {}
1604
1605 if image_id in server_host_files:
1606 dev['source file'] = server_host_files[image_id]['source file'] #local path
1607 dev['file format'] = server_host_files[image_id]['file format'] # raw or qcow2
1608 continue
1609
1610 #2: copy image to host
1611 if image_id:
1612 remote_file = content[0]['path']
1613 else:
1614 remote_file = empty_path
1615 use_incremental_image = use_incremental
1616 if dev['metadata'].get("use_incremental") == "no":
1617 use_incremental_image = False
1618 local_file, qemu_info, use_incremental_image = self.copy_remote_file(remote_file, use_incremental_image)
1619
1620 #create incremental image
1621 if use_incremental_image:
1622 local_file_inc = self.get_notused_filename(local_file, '.inc')
1623 command = 'qemu-img create -f qcow2 '+local_file_inc+ ' -o backing_file='+ local_file
1624 self.logger.debug("command: " + command)
1625 (_, _, stderr) = self.ssh_conn.exec_command(command)
1626 error_msg = stderr.read()
1627 if len(error_msg) > 0:
1628 raise paramiko.ssh_exception.SSHException("Error creating incremental file: " + error_msg)
1629 local_file = local_file_inc
1630 qemu_info = {'file format':'qcow2'}
1631
1632 server_host_files[ dev['image_id'] ] = {'source file': local_file, 'file format': qemu_info['file format']}
1633
1634 dev['source file'] = local_file
1635 dev['file format'] = qemu_info['file format']
1636
1637 self.localinfo['server_files'][ server['uuid'] ] = server_host_files
1638 self.localinfo_dirty = True
1639
1640 #3 Create XML
1641 result, xml = self.create_xml_server(server_data, devices, server_metadata) #local_file
1642 if result <0:
1643 self.logger.error("create xml server error: " + xml)
1644 return -2, xml
1645 self.logger.debug("create xml: " + xml)
1646 atribute = host_thread.lvirt_module.VIR_DOMAIN_START_PAUSED if paused == "yes" else 0
1647 #4 Start the domain
1648 if not rebuild: #ensures that any pending destroying server is done
1649 self.server_forceoff(True)
1650 #self.logger.debug("launching instance " + xml)
1651 conn.createXML(xml, atribute)
1652 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
1653
1654 return 0, 'Success'
1655
1656 except paramiko.ssh_exception.SSHException as e:
1657 text = e.args[0]
1658 self.logger.error("launch_server id='%s' ssh Exception: %s", server_id, text)
1659 if "SSH session not active" in text:
1660 self.ssh_connect()
1661 except host_thread.lvirt_module.libvirtError as e:
1662 text = e.get_error_message()
1663 self.logger.error("launch_server id='%s' libvirt Exception: %s", server_id, text)
1664 except Exception as e:
1665 text = str(e)
1666 self.logger.error("launch_server id='%s' Exception: %s", server_id, text)
1667 return -1, text
1668
1669 def update_servers_status(self):
1670 # # virDomainState
1671 # VIR_DOMAIN_NOSTATE = 0
1672 # VIR_DOMAIN_RUNNING = 1
1673 # VIR_DOMAIN_BLOCKED = 2
1674 # VIR_DOMAIN_PAUSED = 3
1675 # VIR_DOMAIN_SHUTDOWN = 4
1676 # VIR_DOMAIN_SHUTOFF = 5
1677 # VIR_DOMAIN_CRASHED = 6
1678 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
1679
1680 if self.test or len(self.server_status)==0:
1681 return
1682
1683 try:
1684 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
1685 domains= conn.listAllDomains()
1686 domain_dict={}
1687 for domain in domains:
1688 uuid = domain.UUIDString() ;
1689 libvirt_status = domain.state()
1690 #print libvirt_status
1691 if libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_RUNNING or libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTDOWN:
1692 new_status = "ACTIVE"
1693 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_PAUSED:
1694 new_status = "PAUSED"
1695 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_SHUTOFF:
1696 new_status = "INACTIVE"
1697 elif libvirt_status[0] == host_thread.lvirt_module.VIR_DOMAIN_CRASHED:
1698 new_status = "ERROR"
1699 else:
1700 new_status = None
1701 domain_dict[uuid] = new_status
1702 conn.close()
1703 except host_thread.lvirt_module.libvirtError as e:
1704 self.logger.error("get_state() Exception " + e.get_error_message())
1705 return
1706
1707 for server_id, current_status in self.server_status.iteritems():
1708 new_status = None
1709 if server_id in domain_dict:
1710 new_status = domain_dict[server_id]
1711 else:
1712 new_status = "INACTIVE"
1713
1714 if new_status == None or new_status == current_status:
1715 continue
1716 if new_status == 'INACTIVE' and current_status == 'ERROR':
1717 continue #keep ERROR status, because obviously this machine is not running
1718 #change status
1719 self.logger.debug("server id='%s' status change from '%s' to '%s'", server_id, current_status, new_status)
1720 STATUS={'progress':100, 'status':new_status}
1721 if new_status == 'ERROR':
1722 STATUS['last_error'] = 'machine has crashed'
1723 self.db_lock.acquire()
1724 r,_ = self.db.update_rows('instances', STATUS, {'uuid':server_id}, log=False)
1725 self.db_lock.release()
1726 if r>=0:
1727 self.server_status[server_id] = new_status
1728
1729 def action_on_server(self, req, last_retry=True):
1730 '''Perform an action on a req
1731 Attributes:
1732 req: dictionary that contain:
1733 server properties: 'uuid','name','tenant_id','status'
1734 action: 'action'
1735 host properties: 'user', 'ip_name'
1736 return (error, text)
1737 0: No error. VM is updated to new state,
1738 -1: Invalid action, as trying to pause a PAUSED VM
1739 -2: Error accessing host
1740 -3: VM nor present
1741 -4: Error at DB access
1742 -5: Error while trying to perform action. VM is updated to ERROR
1743 '''
1744 server_id = req['uuid']
1745 conn = None
1746 new_status = None
1747 old_status = req['status']
1748 last_error = None
1749
1750 if self.test:
1751 if 'terminate' in req['action']:
1752 new_status = 'deleted'
1753 elif 'shutoff' in req['action'] or 'shutdown' in req['action'] or 'forceOff' in req['action']:
1754 if req['status']!='ERROR':
1755 time.sleep(5)
1756 new_status = 'INACTIVE'
1757 elif 'start' in req['action'] and req['status']!='ERROR':
1758 new_status = 'ACTIVE'
1759 elif 'resume' in req['action'] and req['status']!='ERROR' and req['status']!='INACTIVE':
1760 new_status = 'ACTIVE'
1761 elif 'pause' in req['action'] and req['status']!='ERROR':
1762 new_status = 'PAUSED'
1763 elif 'reboot' in req['action'] and req['status']!='ERROR':
1764 new_status = 'ACTIVE'
1765 elif 'rebuild' in req['action']:
1766 time.sleep(random.randint(20,150))
1767 new_status = 'ACTIVE'
1768 elif 'createImage' in req['action']:
1769 time.sleep(5)
1770 self.create_image(None, req)
1771 else:
1772 try:
1773 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
1774 try:
1775 dom = conn.lookupByUUIDString(server_id)
1776 except host_thread.lvirt_module.libvirtError as e:
1777 text = e.get_error_message()
1778 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
1779 dom = None
1780 else:
1781 self.logger.error("action_on_server id='%s' libvirt exception: %s", server_id, text)
1782 raise e
1783
1784 if 'forceOff' in req['action']:
1785 if dom == None:
1786 self.logger.debug("action_on_server id='%s' domain not running", server_id)
1787 else:
1788 try:
1789 self.logger.debug("sending DESTROY to server id='%s'", server_id)
1790 dom.destroy()
1791 except Exception as e:
1792 if "domain is not running" not in e.get_error_message():
1793 self.logger.error("action_on_server id='%s' Exception while sending force off: %s",
1794 server_id, e.get_error_message())
1795 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
1796 new_status = 'ERROR'
1797
1798 elif 'terminate' in req['action']:
1799 if dom == None:
1800 self.logger.debug("action_on_server id='%s' domain not running", server_id)
1801 new_status = 'deleted'
1802 else:
1803 try:
1804 if req['action']['terminate'] == 'force':
1805 self.logger.debug("sending DESTROY to server id='%s'", server_id)
1806 dom.destroy()
1807 new_status = 'deleted'
1808 else:
1809 self.logger.debug("sending SHUTDOWN to server id='%s'", server_id)
1810 dom.shutdown()
1811 self.pending_terminate_server.append( (time.time()+10,server_id) )
1812 except Exception as e:
1813 self.logger.error("action_on_server id='%s' Exception while destroy: %s",
1814 server_id, e.get_error_message())
1815 last_error = 'action_on_server Exception while destroy: ' + e.get_error_message()
1816 new_status = 'ERROR'
1817 if "domain is not running" in e.get_error_message():
1818 try:
1819 dom.undefine()
1820 new_status = 'deleted'
1821 except Exception:
1822 self.logger.error("action_on_server id='%s' Exception while undefine: %s",
1823 server_id, e.get_error_message())
1824 last_error = 'action_on_server Exception2 while undefine:', e.get_error_message()
1825 #Exception: 'virDomainDetachDevice() failed'
1826 if new_status=='deleted':
1827 if server_id in self.server_status:
1828 del self.server_status[server_id]
1829 if req['uuid'] in self.localinfo['server_files']:
1830 for file_ in self.localinfo['server_files'][ req['uuid'] ].values():
1831 try:
1832 self.delete_file(file_['source file'])
1833 except Exception:
1834 pass
1835 del self.localinfo['server_files'][ req['uuid'] ]
1836 self.localinfo_dirty = True
1837
1838 elif 'shutoff' in req['action'] or 'shutdown' in req['action']:
1839 try:
1840 if dom == None:
1841 self.logger.debug("action_on_server id='%s' domain not running", server_id)
1842 else:
1843 dom.shutdown()
1844 # new_status = 'INACTIVE'
1845 #TODO: check status for changing at database
1846 except Exception as e:
1847 new_status = 'ERROR'
1848 self.logger.error("action_on_server id='%s' Exception while shutdown: %s",
1849 server_id, e.get_error_message())
1850 last_error = 'action_on_server Exception while shutdown: ' + e.get_error_message()
1851
1852 elif 'rebuild' in req['action']:
1853 if dom != None:
1854 dom.destroy()
1855 r = self.launch_server(conn, req, True, None)
1856 if r[0] <0:
1857 new_status = 'ERROR'
1858 last_error = r[1]
1859 else:
1860 new_status = 'ACTIVE'
1861 elif 'start' in req['action']:
1862 # The instance is only create in DB but not yet at libvirt domain, needs to be create
1863 rebuild = True if req['action']['start'] == 'rebuild' else False
1864 r = self.launch_server(conn, req, rebuild, dom)
1865 if r[0] <0:
1866 new_status = 'ERROR'
1867 last_error = r[1]
1868 else:
1869 new_status = 'ACTIVE'
1870
1871 elif 'resume' in req['action']:
1872 try:
1873 if dom == None:
1874 pass
1875 else:
1876 dom.resume()
1877 # new_status = 'ACTIVE'
1878 except Exception as e:
1879 self.logger.error("action_on_server id='%s' Exception while resume: %s",
1880 server_id, e.get_error_message())
1881
1882 elif 'pause' in req['action']:
1883 try:
1884 if dom == None:
1885 pass
1886 else:
1887 dom.suspend()
1888 # new_status = 'PAUSED'
1889 except Exception as e:
1890 self.logger.error("action_on_server id='%s' Exception while pause: %s",
1891 server_id, e.get_error_message())
1892
1893 elif 'reboot' in req['action']:
1894 try:
1895 if dom == None:
1896 pass
1897 else:
1898 dom.reboot()
1899 self.logger.debug("action_on_server id='%s' reboot:", server_id)
1900 #new_status = 'ACTIVE'
1901 except Exception as e:
1902 self.logger.error("action_on_server id='%s' Exception while reboot: %s",
1903 server_id, e.get_error_message())
1904 elif 'createImage' in req['action']:
1905 self.create_image(dom, req)
1906
1907
1908 conn.close()
1909 except host_thread.lvirt_module.libvirtError as e:
1910 if conn is not None: conn.close()
1911 text = e.get_error_message()
1912 new_status = "ERROR"
1913 last_error = text
1914 if 'LookupByUUIDString' in text or 'Domain not found' in text or 'No existe un dominio coincidente' in text:
1915 self.logger.debug("action_on_server id='%s' Exception removed from host", server_id)
1916 else:
1917 self.logger.error("action_on_server id='%s' Exception %s", server_id, text)
1918 #end of if self.test
1919 if new_status == None:
1920 return 1
1921
1922 self.logger.debug("action_on_server id='%s' new status=%s %s",server_id, new_status, last_error)
1923 UPDATE = {'progress':100, 'status':new_status}
1924
1925 if new_status=='ERROR':
1926 if not last_retry: #if there will be another retry do not update database
1927 return -1
1928 elif 'terminate' in req['action']:
1929 #PUT a log in the database
1930 self.logger.error("PANIC deleting server id='%s' %s", server_id, last_error)
1931 self.db_lock.acquire()
1932 self.db.new_row('logs',
1933 {'uuid':server_id, 'tenant_id':req['tenant_id'], 'related':'instances','level':'panic',
1934 'description':'PANIC deleting server from host '+self.name+': '+last_error}
1935 )
1936 self.db_lock.release()
1937 if server_id in self.server_status:
1938 del self.server_status[server_id]
1939 return -1
1940 else:
1941 UPDATE['last_error'] = last_error
1942 if new_status != 'deleted' and (new_status != old_status or new_status == 'ERROR') :
1943 self.db_lock.acquire()
1944 self.db.update_rows('instances', UPDATE, {'uuid':server_id}, log=True)
1945 self.server_status[server_id] = new_status
1946 self.db_lock.release()
1947 if new_status == 'ERROR':
1948 return -1
1949 return 1
1950
1951
1952 def restore_iface(self, name, mac, lib_conn=None):
1953 ''' make an ifdown, ifup to restore default parameter of na interface
1954 Params:
1955 mac: mac address of the interface
1956 lib_conn: connection to the libvirt, if None a new connection is created
1957 Return 0,None if ok, -1,text if fails
1958 '''
1959 conn=None
1960 ret = 0
1961 error_text=None
1962 if self.test:
1963 self.logger.debug("restore_iface '%s' %s", name, mac)
1964 return 0, None
1965 try:
1966 if not lib_conn:
1967 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
1968 else:
1969 conn = lib_conn
1970
1971 #wait to the pending VM deletion
1972 #TODO.Revise self.server_forceoff(True)
1973
1974 iface = conn.interfaceLookupByMACString(mac)
1975 if iface.isActive():
1976 iface.destroy()
1977 iface.create()
1978 self.logger.debug("restore_iface '%s' %s", name, mac)
1979 except host_thread.lvirt_module.libvirtError as e:
1980 error_text = e.get_error_message()
1981 self.logger.error("restore_iface '%s' '%s' libvirt exception: %s", name, mac, error_text)
1982 ret=-1
1983 finally:
1984 if lib_conn is None and conn is not None:
1985 conn.close()
1986 return ret, error_text
1987
1988
1989 def create_image(self,dom, req):
1990 if self.test:
1991 if 'path' in req['action']['createImage']:
1992 file_dst = req['action']['createImage']['path']
1993 else:
1994 createImage=req['action']['createImage']
1995 img_name= createImage['source']['path']
1996 index=img_name.rfind('/')
1997 file_dst = self.get_notused_path(img_name[:index+1] + createImage['name'] + '.qcow2')
1998 image_status='ACTIVE'
1999 else:
2000 for retry in (0,1):
2001 try:
2002 server_id = req['uuid']
2003 createImage=req['action']['createImage']
2004 file_orig = self.localinfo['server_files'][server_id] [ createImage['source']['image_id'] ] ['source file']
2005 if 'path' in req['action']['createImage']:
2006 file_dst = req['action']['createImage']['path']
2007 else:
2008 img_name= createImage['source']['path']
2009 index=img_name.rfind('/')
2010 file_dst = self.get_notused_filename(img_name[:index+1] + createImage['name'] + '.qcow2')
2011
2012 self.copy_file(file_orig, file_dst)
2013 qemu_info = self.qemu_get_info(file_orig)
2014 if 'backing file' in qemu_info:
2015 for k,v in self.localinfo['files'].items():
2016 if v==qemu_info['backing file']:
2017 self.qemu_change_backing(file_dst, k)
2018 break
2019 image_status='ACTIVE'
2020 break
2021 except paramiko.ssh_exception.SSHException as e:
2022 image_status='ERROR'
2023 error_text = e.args[0]
2024 self.logger.error("create_image id='%s' ssh Exception: %s", server_id, error_text)
2025 if "SSH session not active" in error_text and retry==0:
2026 self.ssh_connect()
2027 except Exception as e:
2028 image_status='ERROR'
2029 error_text = str(e)
2030 self.logger.error("create_image id='%s' Exception: %s", server_id, error_text)
2031
2032 #TODO insert a last_error at database
2033 self.db_lock.acquire()
2034 self.db.update_rows('images', {'status':image_status, 'progress': 100, 'path':file_dst},
2035 {'uuid':req['new_image']['uuid']}, log=True)
2036 self.db_lock.release()
2037
2038 def edit_iface(self, port_id, old_net, new_net):
2039 #This action imply remove and insert interface to put proper parameters
2040 if self.test:
2041 time.sleep(1)
2042 else:
2043 #get iface details
2044 self.db_lock.acquire()
2045 r,c = self.db.get_table(FROM='ports as p join resources_port as rp on p.uuid=rp.port_id',
2046 WHERE={'port_id': port_id})
2047 self.db_lock.release()
2048 if r<0:
2049 self.logger.error("edit_iface %s DDBB error: %s", port_id, c)
2050 return
2051 elif r==0:
2052 self.logger.error("edit_iface %s port not found", port_id)
2053 return
2054 port=c[0]
2055 if port["model"]!="VF":
2056 self.logger.error("edit_iface %s ERROR model must be VF", port_id)
2057 return
2058 #create xml detach file
2059 xml=[]
2060 self.xml_level = 2
2061 xml.append("<interface type='hostdev' managed='yes'>")
2062 xml.append(" <mac address='" +port['mac']+ "'/>")
2063 xml.append(" <source>"+ self.pci2xml(port['pci'])+"\n </source>")
2064 xml.append('</interface>')
2065
2066
2067 try:
2068 conn=None
2069 conn = host_thread.lvirt_module.open(self.lvirt_conn_uri)
2070 dom = conn.lookupByUUIDString(port["instance_id"])
2071 if old_net:
2072 text="\n".join(xml)
2073 self.logger.debug("edit_iface detaching SRIOV interface " + text)
2074 dom.detachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
2075 if new_net:
2076 xml[-1] =" <vlan> <tag id='" + str(port['vlan']) + "'/> </vlan>"
2077 self.xml_level = 1
2078 xml.append(self.pci2xml(port.get('vpci',None)) )
2079 xml.append('</interface>')
2080 text="\n".join(xml)
2081 self.logger.debug("edit_iface attaching SRIOV interface " + text)
2082 dom.attachDeviceFlags(text, flags=host_thread.lvirt_module.VIR_DOMAIN_AFFECT_LIVE)
2083
2084 except host_thread.lvirt_module.libvirtError as e:
2085 text = e.get_error_message()
2086 self.logger.error("edit_iface %s libvirt exception: %s", port["instance_id"], text)
2087
2088 finally:
2089 if conn is not None: conn.close()
2090
2091
2092 def create_server(server, db, db_lock, only_of_ports):
2093 extended = server.get('extended', None)
2094 requirements={}
2095 requirements['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
2096 requirements['ram'] = server['flavor'].get('ram', 0)
2097 if requirements['ram']== None:
2098 requirements['ram'] = 0
2099 requirements['vcpus'] = server['flavor'].get('vcpus', 0)
2100 if requirements['vcpus']== None:
2101 requirements['vcpus'] = 0
2102 #If extended is not defined get requirements from flavor
2103 if extended is None:
2104 #If extended is defined in flavor convert to dictionary and use it
2105 if 'extended' in server['flavor'] and server['flavor']['extended'] != None:
2106 json_acceptable_string = server['flavor']['extended'].replace("'", "\"")
2107 extended = json.loads(json_acceptable_string)
2108 else:
2109 extended = None
2110 #print json.dumps(extended, indent=4)
2111
2112 #For simplicity only one numa VM are supported in the initial implementation
2113 if extended != None:
2114 numas = extended.get('numas', [])
2115 if len(numas)>1:
2116 return (-2, "Multi-NUMA VMs are not supported yet")
2117 #elif len(numas)<1:
2118 # return (-1, "At least one numa must be specified")
2119
2120 #a for loop is used in order to be ready to multi-NUMA VMs
2121 request = []
2122 for numa in numas:
2123 numa_req = {}
2124 numa_req['memory'] = numa.get('memory', 0)
2125 if 'cores' in numa:
2126 numa_req['proc_req_nb'] = numa['cores'] #number of cores or threads to be reserved
2127 numa_req['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
2128 numa_req['proc_req_list'] = numa.get('cores-id', None) #list of ids to be assigned to the cores or threads
2129 elif 'paired-threads' in numa:
2130 numa_req['proc_req_nb'] = numa['paired-threads']
2131 numa_req['proc_req_type'] = 'paired-threads'
2132 numa_req['proc_req_list'] = numa.get('paired-threads-id', None)
2133 elif 'threads' in numa:
2134 numa_req['proc_req_nb'] = numa['threads']
2135 numa_req['proc_req_type'] = 'threads'
2136 numa_req['proc_req_list'] = numa.get('threads-id', None)
2137 else:
2138 numa_req['proc_req_nb'] = 0 # by default
2139 numa_req['proc_req_type'] = 'threads'
2140
2141
2142
2143 #Generate a list of sriov and another for physical interfaces
2144 interfaces = numa.get('interfaces', [])
2145 sriov_list = []
2146 port_list = []
2147 for iface in interfaces:
2148 iface['bandwidth'] = int(iface['bandwidth'])
2149 if iface['dedicated'][:3]=='yes':
2150 port_list.append(iface)
2151 else:
2152 sriov_list.append(iface)
2153
2154 #Save lists ordered from more restrictive to less bw requirements
2155 numa_req['sriov_list'] = sorted(sriov_list, key=lambda k: k['bandwidth'], reverse=True)
2156 numa_req['port_list'] = sorted(port_list, key=lambda k: k['bandwidth'], reverse=True)
2157
2158
2159 request.append(numa_req)
2160
2161 # print "----------\n"+json.dumps(request[0], indent=4)
2162 # print '----------\n\n'
2163
2164 #Search in db for an appropriate numa for each requested numa
2165 #at the moment multi-NUMA VMs are not supported
2166 if len(request)>0:
2167 requirements['numa'].update(request[0])
2168 if requirements['numa']['memory']>0:
2169 requirements['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2170 elif requirements['ram']==0:
2171 return (-1, "Memory information not set neither at extended field not at ram")
2172 if requirements['numa']['proc_req_nb']>0:
2173 requirements['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2174 elif requirements['vcpus']==0:
2175 return (-1, "Processor information not set neither at extended field not at vcpus")
2176
2177
2178 db_lock.acquire()
2179 result, content = db.get_numas(requirements, server.get('host_id', None), only_of_ports)
2180 db_lock.release()
2181
2182 if result == -1:
2183 return (-1, content)
2184
2185 numa_id = content['numa_id']
2186 host_id = content['host_id']
2187
2188 #obtain threads_id and calculate pinning
2189 cpu_pinning = []
2190 reserved_threads=[]
2191 if requirements['numa']['proc_req_nb']>0:
2192 db_lock.acquire()
2193 result, content = db.get_table(FROM='resources_core',
2194 SELECT=('id','core_id','thread_id'),
2195 WHERE={'numa_id':numa_id,'instance_id': None, 'status':'ok'} )
2196 db_lock.release()
2197 if result <= 0:
2198 #print content
2199 return -1, content
2200
2201 #convert rows to a dictionary indexed by core_id
2202 cores_dict = {}
2203 for row in content:
2204 if not row['core_id'] in cores_dict:
2205 cores_dict[row['core_id']] = []
2206 cores_dict[row['core_id']].append([row['thread_id'],row['id']])
2207
2208 #In case full cores are requested
2209 paired = 'N'
2210 if requirements['numa']['proc_req_type'] == 'cores':
2211 #Get/create the list of the vcpu_ids
2212 vcpu_id_list = requirements['numa']['proc_req_list']
2213 if vcpu_id_list == None:
2214 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2215
2216 for threads in cores_dict.itervalues():
2217 #we need full cores
2218 if len(threads) != 2:
2219 continue
2220
2221 #set pinning for the first thread
2222 cpu_pinning.append( [ vcpu_id_list.pop(0), threads[0][0], threads[0][1] ] )
2223
2224 #reserve so it is not used the second thread
2225 reserved_threads.append(threads[1][1])
2226
2227 if len(vcpu_id_list) == 0:
2228 break
2229
2230 #In case paired threads are requested
2231 elif requirements['numa']['proc_req_type'] == 'paired-threads':
2232 paired = 'Y'
2233 #Get/create the list of the vcpu_ids
2234 if requirements['numa']['proc_req_list'] != None:
2235 vcpu_id_list = []
2236 for pair in requirements['numa']['proc_req_list']:
2237 if len(pair)!=2:
2238 return -1, "Field paired-threads-id not properly specified"
2239 return
2240 vcpu_id_list.append(pair[0])
2241 vcpu_id_list.append(pair[1])
2242 else:
2243 vcpu_id_list = range(0,2*int(requirements['numa']['proc_req_nb']))
2244
2245 for threads in cores_dict.itervalues():
2246 #we need full cores
2247 if len(threads) != 2:
2248 continue
2249 #set pinning for the first thread
2250 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2251
2252 #set pinning for the second thread
2253 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2254
2255 if len(vcpu_id_list) == 0:
2256 break
2257
2258 #In case normal threads are requested
2259 elif requirements['numa']['proc_req_type'] == 'threads':
2260 #Get/create the list of the vcpu_ids
2261 vcpu_id_list = requirements['numa']['proc_req_list']
2262 if vcpu_id_list == None:
2263 vcpu_id_list = range(0,int(requirements['numa']['proc_req_nb']))
2264
2265 for threads_index in sorted(cores_dict, key=lambda k: len(cores_dict[k])):
2266 threads = cores_dict[threads_index]
2267 #set pinning for the first thread
2268 cpu_pinning.append([vcpu_id_list.pop(0), threads[0][0], threads[0][1]])
2269
2270 #if exists, set pinning for the second thread
2271 if len(threads) == 2 and len(vcpu_id_list) != 0:
2272 cpu_pinning.append([vcpu_id_list.pop(0), threads[1][0], threads[1][1]])
2273
2274 if len(vcpu_id_list) == 0:
2275 break
2276
2277 #Get the source pci addresses for the selected numa
2278 used_sriov_ports = []
2279 for port in requirements['numa']['sriov_list']:
2280 db_lock.acquire()
2281 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2282 db_lock.release()
2283 if result <= 0:
2284 #print content
2285 return -1, content
2286 for row in content:
2287 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2288 continue
2289 port['pci'] = row['pci']
2290 if 'mac_address' not in port:
2291 port['mac_address'] = row['mac']
2292 del port['mac']
2293 port['port_id']=row['id']
2294 port['Mbps_used'] = port['bandwidth']
2295 used_sriov_ports.append(row['id'])
2296 break
2297
2298 for port in requirements['numa']['port_list']:
2299 port['Mbps_used'] = None
2300 if port['dedicated'] != "yes:sriov":
2301 port['mac_address'] = port['mac']
2302 del port['mac']
2303 continue
2304 db_lock.acquire()
2305 result, content = db.get_table(FROM='resources_port', SELECT=('id', 'pci', 'mac', 'Mbps'),WHERE={'numa_id':numa_id,'root_id': port['port_id'], 'port_id': None, 'Mbps_used': 0} )
2306 db_lock.release()
2307 if result <= 0:
2308 #print content
2309 return -1, content
2310 port['Mbps_used'] = content[0]['Mbps']
2311 for row in content:
2312 if row['id'] in used_sriov_ports or row['id']==port['port_id']:
2313 continue
2314 port['pci'] = row['pci']
2315 if 'mac_address' not in port:
2316 port['mac_address'] = row['mac'] # mac cannot be set to passthrough ports
2317 del port['mac']
2318 port['port_id']=row['id']
2319 used_sriov_ports.append(row['id'])
2320 break
2321
2322 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2323 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2324
2325 server['host_id'] = host_id
2326
2327 #Generate dictionary for saving in db the instance resources
2328 resources = {}
2329 resources['bridged-ifaces'] = []
2330
2331 numa_dict = {}
2332 numa_dict['interfaces'] = []
2333
2334 numa_dict['interfaces'] += requirements['numa']['port_list']
2335 numa_dict['interfaces'] += requirements['numa']['sriov_list']
2336
2337 #Check bridge information
2338 unified_dataplane_iface=[]
2339 unified_dataplane_iface += requirements['numa']['port_list']
2340 unified_dataplane_iface += requirements['numa']['sriov_list']
2341
2342 for control_iface in server.get('networks', []):
2343 control_iface['net_id']=control_iface.pop('uuid')
2344 #Get the brifge name
2345 db_lock.acquire()
2346 result, content = db.get_table(FROM='nets',
2347 SELECT=('name', 'type', 'vlan', 'provider', 'enable_dhcp',
2348 'dhcp_first_ip', 'dhcp_last_ip', 'cidr'),
2349 WHERE={'uuid': control_iface['net_id']})
2350 db_lock.release()
2351 if result < 0:
2352 pass
2353 elif result==0:
2354 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface['net_id']
2355 else:
2356 network=content[0]
2357 if control_iface.get("type", 'virtual') == 'virtual':
2358 if network['type']!='bridge_data' and network['type']!='bridge_man':
2359 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface['net_id']
2360 resources['bridged-ifaces'].append(control_iface)
2361 if network.get("provider") and network["provider"][0:3] == "OVS":
2362 control_iface["type"] = "instance:ovs"
2363 else:
2364 control_iface["type"] = "instance:bridge"
2365 if network.get("vlan"):
2366 control_iface["vlan"] = network["vlan"]
2367
2368 if network.get("enable_dhcp") == 'true':
2369 control_iface["enable_dhcp"] = network.get("enable_dhcp")
2370 control_iface["dhcp_first_ip"] = network["dhcp_first_ip"]
2371 control_iface["dhcp_last_ip"] = network["dhcp_last_ip"]
2372 control_iface["cidr"] = network["cidr"]
2373 else:
2374 if network['type']!='data' and network['type']!='ptp':
2375 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface['net_id']
2376 #dataplane interface, look for it in the numa tree and asign this network
2377 iface_found=False
2378 for dataplane_iface in numa_dict['interfaces']:
2379 if dataplane_iface['name'] == control_iface.get("name"):
2380 if (dataplane_iface['dedicated'] == "yes" and control_iface["type"] != "PF") or \
2381 (dataplane_iface['dedicated'] == "no" and control_iface["type"] != "VF") or \
2382 (dataplane_iface['dedicated'] == "yes:sriov" and control_iface["type"] != "VFnotShared") :
2383 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2384 (control_iface.get("name"), dataplane_iface['dedicated'], control_iface["type"])
2385 dataplane_iface['uuid'] = control_iface['net_id']
2386 if dataplane_iface['dedicated'] == "no":
2387 dataplane_iface['vlan'] = network['vlan']
2388 if dataplane_iface['dedicated'] != "yes" and control_iface.get("mac_address"):
2389 dataplane_iface['mac_address'] = control_iface.get("mac_address")
2390 if control_iface.get("vpci"):
2391 dataplane_iface['vpci'] = control_iface.get("vpci")
2392 iface_found=True
2393 break
2394 if not iface_found:
2395 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface.get("name")
2396
2397 resources['host_id'] = host_id
2398 resources['image_id'] = server['image_id']
2399 resources['flavor_id'] = server['flavor_id']
2400 resources['tenant_id'] = server['tenant_id']
2401 resources['ram'] = requirements['ram']
2402 resources['vcpus'] = requirements['vcpus']
2403 resources['status'] = 'CREATING'
2404
2405 if 'description' in server: resources['description'] = server['description']
2406 if 'name' in server: resources['name'] = server['name']
2407
2408 resources['extended'] = {} #optional
2409 resources['extended']['numas'] = []
2410 numa_dict['numa_id'] = numa_id
2411 numa_dict['memory'] = requirements['numa']['memory']
2412 numa_dict['cores'] = []
2413
2414 for core in cpu_pinning:
2415 numa_dict['cores'].append({'id': core[2], 'vthread': core[0], 'paired': paired})
2416 for core in reserved_threads:
2417 numa_dict['cores'].append({'id': core})
2418 resources['extended']['numas'].append(numa_dict)
2419 if extended!=None and 'devices' in extended: #TODO allow extra devices without numa
2420 resources['extended']['devices'] = extended['devices']
2421
2422
2423 # '===================================={'
2424 #print json.dumps(resources, indent=4)
2425 #print '====================================}'
2426
2427 return 0, resources
2428