1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openvim
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
28 __author__
= "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__
= "$10-jul-2014 12:07:15$"
43 from jsonschema
import validate
as js_v
, exceptions
as js_e
44 from vim_schema
import localinfo_schema
, hostinfo_schema
46 class RunCommandException(Exception):
49 class host_thread(threading
.Thread
):
52 def __init__(self
, name
, host
, user
, db
, db_lock
, test
, image_path
, host_id
, version
, develop_mode
,
53 develop_bridge_iface
, password
=None, keyfile
= None, logger_name
=None, debug
=None):
54 """Init a thread to communicate with compute node or ovs_controller.
55 :param host_id: host identity
56 :param name: name of the thread
57 :param host: host ip or name to manage and user
58 :param user, password, keyfile: user and credentials to connect to host
59 :param db, db_lock': database class and lock to use it in exclusion
61 threading
.Thread
.__init
__(self
)
66 self
.db_lock
= db_lock
68 self
.password
= password
69 self
.keyfile
= keyfile
70 self
.localinfo_dirty
= False
71 self
.connectivity
= True
73 if not test
and not host_thread
.lvirt_module
:
75 module_info
= imp
.find_module("libvirt")
76 host_thread
.lvirt_module
= imp
.load_module("libvirt", *module_info
)
77 except (IOError, ImportError) as e
:
78 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e
))
80 self
.logger_name
= logger_name
82 self
.logger_name
= "openvim.host."+name
83 self
.logger
= logging
.getLogger(self
.logger_name
)
85 self
.logger
.setLevel(getattr(logging
, debug
))
88 self
.develop_mode
= develop_mode
89 self
.develop_bridge_iface
= develop_bridge_iface
90 self
.image_path
= image_path
91 self
.empty_image_path
= image_path
92 self
.host_id
= host_id
93 self
.version
= version
98 self
.server_status
= {} #dictionary with pairs server_uuid:server_status
99 self
.pending_terminate_server
=[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
100 self
.next_update_server_status
= 0 #time when must be check servers status
104 self
.queueLock
= threading
.Lock()
105 self
.taskQueue
= Queue
.Queue(2000)
107 self
.run_command_session
= None
109 self
.localhost
= True if host
== 'localhost' else False
110 self
.lvirt_conn_uri
= "qemu+ssh://{user}@{host}/system?no_tty=1&no_verify=1".format(
111 user
=self
.user
, host
=self
.host
)
113 self
.lvirt_conn_uri
+= "&keyfile=" + keyfile
114 self
.remote_ip
= None
117 def run_command(self
, command
, keep_session
=False, ignore_exit_status
=False):
118 """Run a command passed as a str on a localhost or at remote machine.
119 :param command: text with the command to execute.
120 :param keep_session: if True it returns a <stdin> for sending input with '<stdin>.write("text\n")'.
121 A command with keep_session=True MUST be followed by a command with keep_session=False in order to
122 close the session and get the output
123 :param ignore_exit_status: Return stdout and not raise an exepction in case of error.
124 :return: the output of the command if 'keep_session=False' or the <stdin> object if 'keep_session=True'
125 :raises: RunCommandException if command fails
127 if self
.run_command_session
and keep_session
:
128 raise RunCommandException("Internal error. A command with keep_session=True must be followed by another "
129 "command with keep_session=False to close session")
132 if self
.run_command_session
:
133 p
= self
.run_command_session
134 self
.run_command_session
= None
135 (output
, outerror
) = p
.communicate()
136 returncode
= p
.returncode
139 p
= subprocess
.Popen(('bash', "-c", command
), stdin
=subprocess
.PIPE
, stdout
=subprocess
.PIPE
,
140 stderr
=subprocess
.PIPE
)
141 self
.run_command_session
= p
144 if not ignore_exit_status
:
145 output
= subprocess
.check_output(('bash', "-c", command
))
149 p
= subprocess
.Popen(('bash', "-c", command
), stdout
=subprocess
.PIPE
)
150 out
, err
= p
.communicate()
153 if self
.run_command_session
:
154 (i
, o
, e
) = self
.run_command_session
155 self
.run_command_session
= None
156 i
.channel
.shutdown_write()
158 if not self
.ssh_conn
:
160 (i
, o
, e
) = self
.ssh_conn
.exec_command(command
, timeout
=10)
162 self
.run_command_session
= (i
, o
, e
)
164 returncode
= o
.channel
.recv_exit_status()
167 if returncode
!= 0 and not ignore_exit_status
:
168 text
= "run_command='{}' Error='{}'".format(command
, outerror
)
169 self
.logger
.error(text
)
170 raise RunCommandException(text
)
172 self
.logger
.debug("run_command='{}' result='{}'".format(command
, output
))
175 except RunCommandException
:
177 except subprocess
.CalledProcessError
as e
:
178 text
= "run_command Exception '{}' '{}'".format(str(e
), e
.output
)
179 except (paramiko
.ssh_exception
.SSHException
, Exception) as e
:
180 text
= "run_command='{}' Exception='{}'".format(command
, str(e
))
182 self
.run_command_session
= None
183 raise RunCommandException(text
)
185 def ssh_connect(self
):
188 self
.ssh_conn
= paramiko
.SSHClient()
189 self
.ssh_conn
.set_missing_host_key_policy(paramiko
.AutoAddPolicy())
190 self
.ssh_conn
.load_system_host_keys()
191 self
.ssh_conn
.connect(self
.host
, username
=self
.user
, password
=self
.password
, key_filename
=self
.keyfile
,
192 timeout
=10) # auth_timeout=10)
193 self
.remote_ip
= self
.ssh_conn
.get_transport().sock
.getpeername()[0]
194 self
.local_ip
= self
.ssh_conn
.get_transport().sock
.getsockname()[0]
195 except (paramiko
.ssh_exception
.SSHException
, Exception) as e
:
196 text
= 'ssh connect Exception: {}'.format(e
)
201 def check_connectivity(self
):
204 command
= 'sudo brctl show'
205 self
.run_command(command
)
206 except RunCommandException
as e
:
207 self
.connectivity
= False
208 self
.logger
.error("check_connectivity Exception: " + str(e
))
210 def load_localinfo(self
):
213 self
.run_command('sudo mkdir -p ' + self
.image_path
)
214 result
= self
.run_command('cat {}/.openvim.yaml'.format(self
.image_path
))
215 self
.localinfo
= yaml
.load(result
)
216 js_v(self
.localinfo
, localinfo_schema
)
217 self
.localinfo_dirty
= False
218 if 'server_files' not in self
.localinfo
:
219 self
.localinfo
['server_files'] = {}
220 self
.logger
.debug("localinfo loaded from host")
222 except RunCommandException
as e
:
223 self
.logger
.error("load_localinfo Exception: " + str(e
))
224 except host_thread
.lvirt_module
.libvirtError
as e
:
225 text
= e
.get_error_message()
226 self
.logger
.error("load_localinfo libvirt Exception: " + text
)
227 except yaml
.YAMLError
as exc
:
229 if hasattr(exc
, 'problem_mark'):
230 mark
= exc
.problem_mark
231 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
232 self
.logger
.error("load_localinfo yaml format Exception " + text
)
233 except js_e
.ValidationError
as e
:
235 if len(e
.path
)>0: text
=" at '" + ":".join(map(str, e
.path
))+"'"
236 self
.logger
.error("load_localinfo format Exception: %s %s", text
, str(e
))
237 except Exception as e
:
239 self
.logger
.error("load_localinfo Exception: " + text
)
241 # not loaded, insert a default data and force saving by activating dirty flag
242 self
.localinfo
= {'files':{}, 'server_files':{} }
243 # self.localinfo_dirty=True
244 self
.localinfo_dirty
=False
246 def load_hostinfo(self
):
250 result
= self
.run_command('cat {}/hostinfo.yaml'.format(self
.image_path
))
251 self
.hostinfo
= yaml
.load(result
)
252 js_v(self
.hostinfo
, hostinfo_schema
)
253 self
.logger
.debug("hostinfo load from host " + str(self
.hostinfo
))
255 except RunCommandException
as e
:
256 self
.logger
.error("load_hostinfo ssh Exception: " + str(e
))
257 except host_thread
.lvirt_module
.libvirtError
as e
:
258 text
= e
.get_error_message()
259 self
.logger
.error("load_hostinfo libvirt Exception: " + text
)
260 except yaml
.YAMLError
as exc
:
262 if hasattr(exc
, 'problem_mark'):
263 mark
= exc
.problem_mark
264 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
265 self
.logger
.error("load_hostinfo yaml format Exception " + text
)
266 except js_e
.ValidationError
as e
:
268 if len(e
.path
)>0: text
=" at '" + ":".join(map(str, e
.path
))+"'"
269 self
.logger
.error("load_hostinfo format Exception: %s %s", text
, str(e
))
270 except Exception as e
:
272 self
.logger
.error("load_hostinfo Exception: " + text
)
274 #not loaded, insert a default data
277 def save_localinfo(self
, tries
=3):
279 self
.localinfo_dirty
= False
286 command
= 'cat > {}/.openvim.yaml'.format(self
.image_path
)
287 in_stream
= self
.run_command(command
, keep_session
=True)
288 yaml
.safe_dump(self
.localinfo
, in_stream
, explicit_start
=True, indent
=4, default_flow_style
=False,
289 tags
=False, encoding
='utf-8', allow_unicode
=True)
290 result
= self
.run_command(command
, keep_session
=False) # to end session
292 self
.localinfo_dirty
= False
295 except RunCommandException
as e
:
296 self
.logger
.error("save_localinfo ssh Exception: " + str(e
))
297 except host_thread
.lvirt_module
.libvirtError
as e
:
298 text
= e
.get_error_message()
299 self
.logger
.error("save_localinfo libvirt Exception: " + text
)
300 except yaml
.YAMLError
as exc
:
302 if hasattr(exc
, 'problem_mark'):
303 mark
= exc
.problem_mark
304 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
305 self
.logger
.error("save_localinfo yaml format Exception " + text
)
306 except Exception as e
:
308 self
.logger
.error("save_localinfo Exception: " + text
)
310 def load_servers_from_db(self
):
311 self
.db_lock
.acquire()
312 r
,c
= self
.db
.get_table(SELECT
=('uuid','status', 'image_id'), FROM
='instances', WHERE
={'host_id': self
.host_id
})
313 self
.db_lock
.release()
315 self
.server_status
= {}
317 self
.logger
.error("Error getting data from database: " + c
)
320 self
.server_status
[ server
['uuid'] ] = server
['status']
322 #convert from old version to new one
323 if 'inc_files' in self
.localinfo
and server
['uuid'] in self
.localinfo
['inc_files']:
324 server_files_dict
= {'source file': self
.localinfo
['inc_files'][ server
['uuid'] ] [0], 'file format':'raw' }
325 if server_files_dict
['source file'][-5:] == 'qcow2':
326 server_files_dict
['file format'] = 'qcow2'
328 self
.localinfo
['server_files'][ server
['uuid'] ] = { server
['image_id'] : server_files_dict
}
329 if 'inc_files' in self
.localinfo
:
330 del self
.localinfo
['inc_files']
331 self
.localinfo_dirty
= True
333 def delete_unused_files(self
):
334 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
335 Deletes unused entries at self.loacalinfo and the corresponding local files.
336 The only reason for this mismatch is the manual deletion of instances (VM) at database
340 for uuid
,images
in self
.localinfo
['server_files'].items():
341 if uuid
not in self
.server_status
:
342 for localfile
in images
.values():
344 self
.logger
.debug("deleting file '%s' of unused server '%s'", localfile
['source file'], uuid
)
345 self
.delete_file(localfile
['source file'])
346 except RunCommandException
as e
:
347 self
.logger
.error("Exception deleting file '%s': %s", localfile
['source file'], str(e
))
348 del self
.localinfo
['server_files'][uuid
]
349 self
.localinfo_dirty
= True
351 def insert_task(self
, task
, *aditional
):
353 self
.queueLock
.acquire()
354 task
= self
.taskQueue
.put( (task
,) + aditional
, timeout
=5)
355 self
.queueLock
.release()
358 return -1, "timeout inserting a task over host " + self
.name
362 self
.load_localinfo()
364 self
.load_servers_from_db()
365 self
.delete_unused_files()
368 self
.queueLock
.acquire()
369 if not self
.taskQueue
.empty():
370 task
= self
.taskQueue
.get()
373 self
.queueLock
.release()
377 if self
.localinfo_dirty
:
378 self
.save_localinfo()
379 elif self
.next_update_server_status
< now
:
380 self
.update_servers_status()
381 self
.next_update_server_status
= now
+ 5
382 elif len(self
.pending_terminate_server
)>0 and self
.pending_terminate_server
[0][0]<now
:
383 self
.server_forceoff()
388 if task
[0] == 'instance':
389 self
.logger
.debug("processing task instance " + str(task
[1]['action']))
393 r
= self
.action_on_server(task
[1], retry
==2)
396 elif task
[0] == 'image':
398 elif task
[0] == 'exit':
399 self
.logger
.debug("processing task exit")
402 elif task
[0] == 'reload':
403 self
.logger
.debug("processing task reload terminating and relaunching")
406 elif task
[0] == 'edit-iface':
407 self
.logger
.debug("processing task edit-iface port={}, old_net={}, new_net={}".format(
408 task
[1], task
[2], task
[3]))
409 self
.edit_iface(task
[1], task
[2], task
[3])
410 elif task
[0] == 'restore-iface':
411 self
.logger
.debug("processing task restore-iface={} mac={}".format(task
[1], task
[2]))
412 self
.restore_iface(task
[1], task
[2])
413 elif task
[0] == 'new-ovsbridge':
414 self
.logger
.debug("Creating compute OVS bridge")
415 self
.create_ovs_bridge()
416 elif task
[0] == 'new-vxlan':
417 self
.logger
.debug("Creating vxlan tunnel='{}', remote ip='{}'".format(task
[1], task
[2]))
418 self
.create_ovs_vxlan_tunnel(task
[1], task
[2])
419 elif task
[0] == 'del-ovsbridge':
420 self
.logger
.debug("Deleting OVS bridge")
421 self
.delete_ovs_bridge()
422 elif task
[0] == 'del-vxlan':
423 self
.logger
.debug("Deleting vxlan {} tunnel".format(task
[1]))
424 self
.delete_ovs_vxlan_tunnel(task
[1])
425 elif task
[0] == 'create-ovs-bridge-port':
426 self
.logger
.debug("Adding port ovim-{} to OVS bridge".format(task
[1]))
427 self
.create_ovs_bridge_port(task
[1])
428 elif task
[0] == 'del-ovs-port':
429 self
.logger
.debug("Delete bridge attached to ovs port vlan {} net {}".format(task
[1], task
[2]))
430 self
.delete_bridge_port_attached_to_ovs(task
[1], task
[2])
432 self
.logger
.debug("unknown task " + str(task
))
434 except Exception as e
:
435 self
.logger
.critical("Unexpected exception at run: " + str(e
), exc_info
=True)
437 def server_forceoff(self
, wait_until_finished
=False):
438 while len(self
.pending_terminate_server
)>0:
440 if self
.pending_terminate_server
[0][0]>now
:
441 if wait_until_finished
:
446 req
={'uuid':self
.pending_terminate_server
[0][1],
447 'action':{'terminate':'force'},
450 self
.action_on_server(req
)
451 self
.pending_terminate_server
.pop(0)
455 self
.server_forceoff(True)
456 if self
.localinfo_dirty
:
457 self
.save_localinfo()
459 self
.ssh_conn
.close()
460 except Exception as e
:
462 self
.logger
.error("terminate Exception: " + text
)
463 self
.logger
.debug("exit from host_thread")
465 def get_local_iface_name(self
, generic_name
):
466 if self
.hostinfo
!= None and "iface_names" in self
.hostinfo
and generic_name
in self
.hostinfo
["iface_names"]:
467 return self
.hostinfo
["iface_names"][generic_name
]
470 def create_xml_server(self
, server
, dev_list
, server_metadata
={}):
471 """Function that implements the generation of the VM XML definition.
472 Additional devices are in dev_list list
473 The main disk is upon dev_list[0]"""
475 #get if operating system is Windows
477 os_type
= server_metadata
.get('os_type', None)
478 if os_type
== None and 'metadata' in dev_list
[0]:
479 os_type
= dev_list
[0]['metadata'].get('os_type', None)
480 if os_type
!= None and os_type
.lower() == "windows":
482 #get type of hard disk bus
483 bus_ide
= True if windows_os
else False
484 bus
= server_metadata
.get('bus', None)
485 if bus
== None and 'metadata' in dev_list
[0]:
486 bus
= dev_list
[0]['metadata'].get('bus', None)
488 bus_ide
= True if bus
=='ide' else False
492 text
= "<domain type='kvm'>"
494 topo
= server_metadata
.get('topology', None)
495 if topo
== None and 'metadata' in dev_list
[0]:
496 topo
= dev_list
[0]['metadata'].get('topology', None)
498 name
= server
.get('name', '')[:28] + "_" + server
['uuid'][:28] #qemu impose a length limit of 59 chars or not start. Using 58
499 text
+= self
.inc_tab() + "<name>" + name
+ "</name>"
501 text
+= self
.tab() + "<uuid>" + server
['uuid'] + "</uuid>"
504 if 'extended' in server
and server
['extended']!=None and 'numas' in server
['extended']:
505 numa
= server
['extended']['numas'][0]
508 memory
= int(numa
.get('memory',0))*1024*1024 #in KiB
510 memory
= int(server
['ram'])*1024;
512 if not self
.develop_mode
:
515 return -1, 'No memory assigned to instance'
517 text
+= self
.tab() + "<memory unit='KiB'>" +memory
+"</memory>"
518 text
+= self
.tab() + "<currentMemory unit='KiB'>" +memory
+ "</currentMemory>"
520 text
+= self
.tab()+'<memoryBacking>'+ \
521 self
.inc_tab() + '<hugepages/>'+ \
522 self
.dec_tab()+ '</memoryBacking>'
525 use_cpu_pinning
=False
526 vcpus
= int(server
.get("vcpus",0))
528 if 'cores-source' in numa
:
530 for index
in range(0, len(numa
['cores-source'])):
531 cpu_pinning
.append( [ numa
['cores-id'][index
], numa
['cores-source'][index
] ] )
533 if 'threads-source' in numa
:
535 for index
in range(0, len(numa
['threads-source'])):
536 cpu_pinning
.append( [ numa
['threads-id'][index
], numa
['threads-source'][index
] ] )
538 if 'paired-threads-source' in numa
:
540 for index
in range(0, len(numa
['paired-threads-source'])):
541 cpu_pinning
.append( [numa
['paired-threads-id'][index
][0], numa
['paired-threads-source'][index
][0] ] )
542 cpu_pinning
.append( [numa
['paired-threads-id'][index
][1], numa
['paired-threads-source'][index
][1] ] )
545 if use_cpu_pinning
and not self
.develop_mode
:
546 text
+= self
.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning
)) +"</vcpu>" + \
547 self
.tab()+'<cputune>'
549 for i
in range(0, len(cpu_pinning
)):
550 text
+= self
.tab() + "<vcpupin vcpu='" +str(cpu_pinning
[i
][0])+ "' cpuset='" +str(cpu_pinning
[i
][1]) +"'/>"
551 text
+= self
.dec_tab()+'</cputune>'+ \
552 self
.tab() + '<numatune>' +\
553 self
.inc_tab() + "<memory mode='strict' nodeset='" +str(numa
['source'])+ "'/>" +\
554 self
.dec_tab() + '</numatune>'
557 return -1, "Instance without number of cpus"
558 text
+= self
.tab()+"<vcpu>" + str(vcpus
) + "</vcpu>"
563 if dev
['type']=='cdrom' :
566 text
+= self
.tab()+ '<os>' + \
567 self
.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
569 text
+= self
.tab() + "<boot dev='cdrom'/>"
570 text
+= self
.tab() + "<boot dev='hd'/>" + \
571 self
.dec_tab()+'</os>'
573 text
+= self
.tab()+'<features>'+\
574 self
.inc_tab()+'<acpi/>' +\
575 self
.tab()+'<apic/>' +\
576 self
.tab()+'<pae/>'+ \
577 self
.dec_tab() +'</features>'
578 if topo
== "oneSocket:hyperthreading":
580 return -1, 'Cannot expose hyperthreading with an odd number of vcpus'
581 text
+= self
.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>" % (vcpus
/2)
582 elif windows_os
or topo
== "oneSocket":
583 text
+= self
.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>" % vcpus
585 text
+= self
.tab() + "<cpu mode='host-model'></cpu>"
586 text
+= self
.tab() + "<clock offset='utc'/>" +\
587 self
.tab() + "<on_poweroff>preserve</on_poweroff>" + \
588 self
.tab() + "<on_reboot>restart</on_reboot>" + \
589 self
.tab() + "<on_crash>restart</on_crash>"
590 text
+= self
.tab() + "<devices>" + \
591 self
.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
592 self
.tab() + "<serial type='pty'>" +\
593 self
.inc_tab() + "<target port='0'/>" + \
594 self
.dec_tab() + "</serial>" +\
595 self
.tab() + "<console type='pty'>" + \
596 self
.inc_tab()+ "<target type='serial' port='0'/>" + \
597 self
.dec_tab()+'</console>'
599 text
+= self
.tab() + "<controller type='usb' index='0'/>" + \
600 self
.tab() + "<controller type='ide' index='0'/>" + \
601 self
.tab() + "<input type='mouse' bus='ps2'/>" + \
602 self
.tab() + "<sound model='ich6'/>" + \
603 self
.tab() + "<video>" + \
604 self
.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
605 self
.dec_tab() + "</video>" + \
606 self
.tab() + "<memballoon model='virtio'/>" + \
607 self
.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
609 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
610 #> self.dec_tab()+'</hostdev>\n' +\
611 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
613 text
+= self
.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
615 #If image contains 'GRAPH' include graphics
616 #if 'GRAPH' in image:
617 text
+= self
.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
618 self
.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
619 self
.dec_tab() + "</graphics>"
623 bus_ide_dev
= bus_ide
624 if dev
['type']=='cdrom' or dev
['type']=='disk':
625 if dev
['type']=='cdrom':
627 text
+= self
.tab() + "<disk type='file' device='"+dev
['type']+"'>"
628 if 'file format' in dev
:
629 text
+= self
.inc_tab() + "<driver name='qemu' type='" +dev
['file format']+ "' cache='writethrough'/>"
630 if 'source file' in dev
:
631 text
+= self
.tab() + "<source file='" +dev
['source file']+ "'/>"
632 #elif v['type'] == 'block':
633 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
635 # return -1, 'Unknown disk type ' + v['type']
636 vpci
= dev
.get('vpci',None)
637 if vpci
== None and 'metadata' in dev
:
638 vpci
= dev
['metadata'].get('vpci',None)
639 text
+= self
.pci2xml(vpci
)
642 text
+= self
.tab() + "<target dev='hd" +vd_index
+ "' bus='ide'/>" #TODO allows several type of disks
644 text
+= self
.tab() + "<target dev='vd" +vd_index
+ "' bus='virtio'/>"
645 text
+= self
.dec_tab() + '</disk>'
646 vd_index
= chr(ord(vd_index
)+1)
647 elif dev
['type']=='xml':
648 dev_text
= dev
['xml']
650 dev_text
= dev_text
.replace('__vpci__', dev
['vpci'])
651 if 'source file' in dev
:
652 dev_text
= dev_text
.replace('__file__', dev
['source file'])
653 if 'file format' in dev
:
654 dev_text
= dev_text
.replace('__format__', dev
['source file'])
655 if '__dev__' in dev_text
:
656 dev_text
= dev_text
.replace('__dev__', vd_index
)
657 vd_index
= chr(ord(vd_index
)+1)
660 return -1, 'Unknown device type ' + dev
['type']
663 bridge_interfaces
= server
.get('networks', [])
664 for v
in bridge_interfaces
:
666 self
.db_lock
.acquire()
667 result
, content
= self
.db
.get_table(FROM
='nets', SELECT
=('provider',),WHERE
={'uuid':v
['net_id']} )
668 self
.db_lock
.release()
670 self
.logger
.error("create_xml_server ERROR %d getting nets %s", result
, content
)
672 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
673 #I know it is not secure
674 #for v in sorted(desc['network interfaces'].itervalues()):
675 model
= v
.get("model", None)
676 if content
[0]['provider']=='default':
677 text
+= self
.tab() + "<interface type='network'>" + \
678 self
.inc_tab() + "<source network='" +content
[0]['provider']+ "'/>"
679 elif content
[0]['provider'][0:7]=='macvtap':
680 text
+= self
.tab()+"<interface type='direct'>" + \
681 self
.inc_tab() + "<source dev='" + self
.get_local_iface_name(content
[0]['provider'][8:]) + "' mode='bridge'/>" + \
682 self
.tab() + "<target dev='macvtap0'/>"
684 text
+= self
.tab() + "<alias name='net" + str(net_nb
) + "'/>"
687 elif content
[0]['provider'][0:6]=='bridge':
688 text
+= self
.tab() + "<interface type='bridge'>" + \
689 self
.inc_tab()+"<source bridge='" +self
.get_local_iface_name(content
[0]['provider'][7:])+ "'/>"
691 text
+= self
.tab() + "<target dev='vnet" + str(net_nb
)+ "'/>" +\
692 self
.tab() + "<alias name='net" + str(net_nb
)+ "'/>"
695 elif content
[0]['provider'][0:3] == "OVS":
696 vlan
= content
[0]['provider'].replace('OVS:', '')
697 text
+= self
.tab() + "<interface type='bridge'>" + \
698 self
.inc_tab() + "<source bridge='ovim-" + str(vlan
) + "'/>"
700 return -1, 'Unknown Bridge net provider ' + content
[0]['provider']
702 text
+= self
.tab() + "<model type='" +model
+ "'/>"
703 if v
.get('mac_address', None) != None:
704 text
+= self
.tab() +"<mac address='" +v
['mac_address']+ "'/>"
705 text
+= self
.pci2xml(v
.get('vpci',None))
706 text
+= self
.dec_tab()+'</interface>'
710 interfaces
= numa
.get('interfaces', [])
714 if self
.develop_mode
: #map these interfaces to bridges
715 text
+= self
.tab() + "<interface type='bridge'>" + \
716 self
.inc_tab()+"<source bridge='" +self
.develop_bridge_iface
+ "'/>"
718 text
+= self
.tab() + "<target dev='vnet" + str(net_nb
)+ "'/>" +\
719 self
.tab() + "<alias name='net" + str(net_nb
)+ "'/>"
721 text
+= self
.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
722 if v
.get('mac_address', None) != None:
723 text
+= self
.tab() +"<mac address='" +v
['mac_address']+ "'/>"
724 text
+= self
.pci2xml(v
.get('vpci',None))
725 text
+= self
.dec_tab()+'</interface>'
728 if v
['dedicated'] == 'yes': #passthrought
729 text
+= self
.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
730 self
.inc_tab() + "<source>"
732 text
+= self
.pci2xml(v
['source'])
733 text
+= self
.dec_tab()+'</source>'
734 text
+= self
.pci2xml(v
.get('vpci',None))
736 text
+= self
.tab() + "<alias name='hostdev" + str(net_nb
) + "'/>"
737 text
+= self
.dec_tab()+'</hostdev>'
739 else: #sriov_interfaces
740 #skip not connected interfaces
741 if v
.get("net_id") == None:
743 text
+= self
.tab() + "<interface type='hostdev' managed='yes'>"
745 if v
.get('mac_address', None) != None:
746 text
+= self
.tab() + "<mac address='" +v
['mac_address']+ "'/>"
747 text
+= self
.tab()+'<source>'
749 text
+= self
.pci2xml(v
['source'])
750 text
+= self
.dec_tab()+'</source>'
751 if v
.get('vlan',None) != None:
752 text
+= self
.tab() + "<vlan> <tag id='" + str(v
['vlan']) + "'/> </vlan>"
753 text
+= self
.pci2xml(v
.get('vpci',None))
755 text
+= self
.tab() + "<alias name='hostdev" + str(net_nb
) + "'/>"
756 text
+= self
.dec_tab()+'</interface>'
759 text
+= self
.dec_tab()+'</devices>'+\
760 self
.dec_tab()+'</domain>'
763 def pci2xml(self
, pci
):
764 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
765 alows an empty pci text'''
768 first_part
= pci
.split(':')
769 second_part
= first_part
[2].split('.')
770 return self
.tab() + "<address type='pci' domain='0x" + first_part
[0] + \
771 "' bus='0x" + first_part
[1] + "' slot='0x" + second_part
[0] + \
772 "' function='0x" + second_part
[1] + "'/>"
775 """Return indentation according to xml_level"""
776 return "\n" + (' '*self
.xml_level
)
779 """Increment and return indentation according to xml_level"""
784 """Decrement and return indentation according to xml_level"""
788 def create_ovs_bridge(self
):
790 Create a bridge in compute OVS to allocate VMs
791 :return: True if success
793 if self
.test
or not self
.connectivity
:
797 self
.run_command('sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true')
800 except RunCommandException
as e
:
801 self
.logger
.error("create_ovs_bridge ssh Exception: " + str(e
))
802 if "SSH session not active" in str(e
):
806 def delete_port_to_ovs_bridge(self
, vlan
, net_uuid
):
808 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
809 :param vlan: vlan port id
810 :param net_uuid: network id
814 if self
.test
or not self
.connectivity
:
817 port_name
= 'ovim-{}'.format(str(vlan
))
818 command
= 'sudo ovs-vsctl del-port br-int {}'.format(port_name
)
819 self
.run_command(command
)
821 except RunCommandException
as e
:
822 self
.logger
.error("delete_port_to_ovs_bridge ssh Exception: {}".format(str(e
)))
825 def delete_dhcp_server(self
, vlan
, net_uuid
, dhcp_path
):
827 Delete dhcp server process lining in namespace
828 :param vlan: segmentation id
829 :param net_uuid: network uuid
830 :param dhcp_path: conf fiel path that live in namespace side
833 if self
.test
or not self
.connectivity
:
835 if not self
.is_dhcp_port_free(vlan
, net_uuid
):
838 dhcp_namespace
= '{}-dnsmasq'.format(str(vlan
))
839 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
)
840 pid_file
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
842 command
= 'sudo ip netns exec {} cat {}'.format(dhcp_namespace
, pid_file
)
843 content
= self
.run_command(command
, ignore_exit_status
=True)
844 dns_pid
= content
.replace('\n', '')
845 command
= 'sudo ip netns exec {} kill -9 '.format(dhcp_namespace
, dns_pid
)
846 self
.run_command(command
, ignore_exit_status
=True)
848 except RunCommandException
as e
:
849 self
.logger
.error("delete_dhcp_server ssh Exception: " + str(e
))
852 def is_dhcp_port_free(self
, host_id
, net_uuid
):
854 Check if any port attached to the a net in a vxlan mesh across computes nodes
855 :param host_id: host id
856 :param net_uuid: network id
857 :return: True if is not free
859 self
.db_lock
.acquire()
860 result
, content
= self
.db
.get_table(
862 WHERE
={'type': 'instance:ovs', 'net_id': net_uuid
}
864 self
.db_lock
.release()
871 def is_port_free(self
, host_id
, net_uuid
):
873 Check if there not ovs ports of a network in a compute host.
874 :param host_id: host id
875 :param net_uuid: network id
876 :return: True if is not free
879 self
.db_lock
.acquire()
880 result
, content
= self
.db
.get_table(
881 FROM
='ports as p join instances as i on p.instance_id=i.uuid',
882 WHERE
={"i.host_id": self
.host_id
, 'p.type': 'instance:ovs', 'p.net_id': net_uuid
}
884 self
.db_lock
.release()
891 def add_port_to_ovs_bridge(self
, vlan
):
893 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
894 :param vlan: vlan port id
895 :return: True if success
901 port_name
= 'ovim-{}'.format(str(vlan
))
902 command
= 'sudo ovs-vsctl add-port br-int {} tag={}'.format(port_name
, str(vlan
))
903 self
.run_command(command
)
905 except RunCommandException
as e
:
906 self
.logger
.error("add_port_to_ovs_bridge Exception: " + str(e
))
909 def delete_dhcp_port(self
, vlan
, net_uuid
, dhcp_path
):
911 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
912 :param vlan: segmentation id
913 :param net_uuid: network id
914 :return: True if success
920 if not self
.is_dhcp_port_free(vlan
, net_uuid
):
922 self
.delete_dhcp_interfaces(vlan
, dhcp_path
)
925 def delete_bridge_port_attached_to_ovs(self
, vlan
, net_uuid
):
927 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
930 :return: True if success
935 if not self
.is_port_free(vlan
, net_uuid
):
937 self
.delete_port_to_ovs_bridge(vlan
, net_uuid
)
938 self
.delete_linux_bridge(vlan
)
941 def delete_linux_bridge(self
, vlan
):
943 Delete a linux bridge in a scpecific compute.
944 :param vlan: vlan port id
945 :return: True if success
951 port_name
= 'ovim-{}'.format(str(vlan
))
952 command
= 'sudo ip link set dev ovim-{} down'.format(str(vlan
))
953 self
.run_command(command
)
955 command
= 'sudo ifconfig {} down && sudo brctl delbr {}'.format(port_name
, port_name
)
956 self
.run_command(command
)
958 except RunCommandException
as e
:
959 self
.logger
.error("delete_linux_bridge Exception: {}".format(str(e
)))
962 def remove_link_bridge_to_ovs(self
, vlan
, link
):
964 Delete a linux provider net connection to tenatn net
965 :param vlan: vlan port id
966 :param link: link name
967 :return: True if success
973 br_tap_name
= '{}-vethBO'.format(str(vlan
))
974 br_ovs_name
= '{}-vethOB'.format(str(vlan
))
976 # Delete ovs veth pair
977 command
= 'sudo ip link set dev {} down'.format(br_ovs_name
)
978 self
.run_command(command
)
980 command
= 'sudo ovs-vsctl del-port br-int {}'.format(br_ovs_name
)
981 self
.run_command(command
)
983 # Delete br veth pair
984 command
= 'sudo ip link set dev {} down'.format(br_tap_name
)
985 self
.run_command(command
)
987 # Delete br veth interface form bridge
988 command
= 'sudo brctl delif {} {}'.format(link
, br_tap_name
)
989 self
.run_command(command
)
991 # Delete br veth pair
992 command
= 'sudo ip link set dev {} down'.format(link
)
993 self
.run_command(command
)
996 except RunCommandException
as e
:
997 self
.logger
.error("remove_link_bridge_to_ovs Exception: {}".format(str(e
)))
1000 def create_ovs_bridge_port(self
, vlan
):
1002 Generate a linux bridge and attache the port to a OVS bridge
1003 :param vlan: vlan port id
1008 self
.create_linux_bridge(vlan
)
1009 self
.add_port_to_ovs_bridge(vlan
)
1011 def create_linux_bridge(self
, vlan
):
1013 Create a linux bridge with STP active
1014 :param vlan: netowrk vlan id
1021 port_name
= 'ovim-{}'.format(str(vlan
))
1022 command
= 'sudo brctl show | grep {}'.format(port_name
)
1023 result
= self
.run_command(command
, ignore_exit_status
=True)
1025 command
= 'sudo brctl addbr {}'.format(port_name
)
1026 self
.run_command(command
)
1028 command
= 'sudo brctl stp {} on'.format(port_name
)
1029 self
.run_command(command
)
1031 command
= 'sudo ip link set dev {} up'.format(port_name
)
1032 self
.run_command(command
)
1034 except RunCommandException
as e
:
1035 self
.logger
.error("create_linux_bridge ssh Exception: {}".format(str(e
)))
1038 def set_mac_dhcp_server(self
, ip
, mac
, vlan
, netmask
, first_ip
, dhcp_path
):
1040 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
1041 :param ip: IP address asigned to a VM
1042 :param mac: VM vnic mac to be macthed with the IP received
1043 :param vlan: Segmentation id
1044 :param netmask: netmask value
1045 :param path: dhcp conf file path that live in namespace side
1046 :return: True if success
1052 dhcp_namespace
= '{}-dnsmasq'.format(str(vlan
))
1053 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1054 dhcp_hostsdir
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1055 ns_interface
= '{}-vethDO'.format(str(vlan
))
1060 command
= 'sudo ip netns exec {} cat /sys/class/net/{}/address'.format(dhcp_namespace
, ns_interface
)
1061 iface_listen_mac
= self
.run_command(command
, ignore_exit_status
=True)
1063 if iface_listen_mac
> 0:
1064 command
= 'sudo ip netns exec {} cat {} | grep -i {}'.format(dhcp_namespace
,
1067 content
= self
.run_command(command
, ignore_exit_status
=True)
1069 ip_data
= iface_listen_mac
.upper().replace('\n', '') + ',' + first_ip
1070 dhcp_hostsdir
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1072 command
= 'sudo ip netns exec {} sudo bash -ec "echo {} >> {}"'.format(dhcp_namespace
,
1075 self
.run_command(command
)
1077 ip_data
= mac
.upper() + ',' + ip
1078 command
= 'sudo ip netns exec {} sudo bash -ec "echo {} >> {}"'.format(dhcp_namespace
,
1081 self
.run_command(command
, ignore_exit_status
=False)
1086 except RunCommandException
as e
:
1087 self
.logger
.error("set_mac_dhcp_server ssh Exception: " + str(e
))
1090 def delete_mac_dhcp_server(self
, ip
, mac
, vlan
, dhcp_path
):
1092 Delete into dhcp conf file the ip assigned to a specific MAC address
1094 :param ip: IP address asigned to a VM
1095 :param mac: VM vnic mac to be macthed with the IP received
1096 :param vlan: Segmentation id
1097 :param dhcp_path: dhcp conf file path that live in namespace side
1104 dhcp_namespace
= str(vlan
) + '-dnsmasq'
1105 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1106 dhcp_hostsdir
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1111 ip_data
= mac
.upper() + ',' + ip
1113 command
= 'sudo ip netns exec ' + dhcp_namespace
+ ' sudo sed -i \'/' + ip_data
+ '/d\' ' + dhcp_hostsdir
1114 self
.run_command(command
)
1117 except RunCommandException
as e
:
1118 self
.logger
.error("delete_mac_dhcp_server Exception: " + str(e
))
1121 def launch_dhcp_server(self
, vlan
, ip_range
, netmask
, dhcp_path
, gateway
, dns_list
=None, routes
=None):
1123 Generate a linux bridge and attache the port to a OVS bridge
1125 :param vlan: Segmentation id
1126 :param ip_range: IP dhcp range
1127 :param netmask: network netmask
1128 :param dhcp_path: dhcp conf file path that live in namespace side
1129 :param gateway: Gateway address for dhcp net
1130 :param dns_list: dns list for dhcp server
1131 :param routes: routes list for dhcp server
1132 :return: True if success
1138 ns_interface
= str(vlan
) + '-vethDO'
1139 dhcp_namespace
= str(vlan
) + '-dnsmasq'
1140 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
, '')
1141 leases_path
= os
.path
.join(dhcp_path
, "dnsmasq.leases")
1142 pid_file
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
1144 dhcp_range
= ip_range
[0] + ',' + ip_range
[1] + ',' + netmask
1146 command
= 'sudo ip netns exec {} mkdir -p {}'.format(dhcp_namespace
, dhcp_path
)
1147 self
.run_command(command
)
1149 # check if dnsmasq process is running
1150 dnsmasq_is_runing
= False
1151 pid_path
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
1152 command
= 'sudo ip netns exec ' + dhcp_namespace
+ ' ls ' + pid_path
1153 content
= self
.run_command(command
, ignore_exit_status
=True)
1155 # check if pid is runing
1157 pid_path
= content
.replace('\n', '')
1158 command
= "ps aux | awk '{print $2 }' | grep {}" + pid_path
1159 dnsmasq_is_runing
= self
.run_command(command
, ignore_exit_status
=True)
1161 gateway_option
= ' --dhcp-option=3,' + gateway
1163 dhcp_route_option
= ''
1165 dhcp_route_option
= ' --dhcp-option=121'
1166 for key
, value
in routes
.iteritems():
1167 if 'default' == key
:
1168 gateway_option
= ' --dhcp-option=3,' + value
1170 dhcp_route_option
+= ',' + key
+ ',' + value
1173 dns_data
= ' --dhcp-option=6'
1174 for dns
in dns_list
:
1175 dns_data
+= ',' + dns
1177 if not dnsmasq_is_runing
:
1178 command
= 'sudo ip netns exec ' + dhcp_namespace
+ ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1179 '--interface=' + ns_interface
+ \
1180 ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path
+ \
1181 ' --dhcp-range ' + dhcp_range
+ \
1182 ' --pid-file=' + pid_file
+ \
1183 ' --dhcp-leasefile=' + leases_path
+ \
1184 ' --listen-address ' + ip_range
[0] + \
1186 dhcp_route_option
+ \
1189 self
.run_command(command
)
1191 except RunCommandException
as e
:
1192 self
.logger
.error("launch_dhcp_server ssh Exception: " + str(e
))
1195 def delete_dhcp_interfaces(self
, vlan
, dhcp_path
):
1197 Delete a linux dnsmasq bridge and namespace
1198 :param vlan: netowrk vlan id
1205 br_veth_name
='{}-vethDO'.format(str(vlan
))
1206 ovs_veth_name
= '{}-vethOD'.format(str(vlan
))
1207 dhcp_namespace
= '{}-dnsmasq'.format(str(vlan
))
1209 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1210 command
= 'sudo ovs-vsctl del-port br-int {}'.format(ovs_veth_name
)
1211 self
.run_command(command
, ignore_exit_status
=True) # to end session
1213 command
= 'sudo ip link set dev {} down'.format(ovs_veth_name
)
1214 self
.run_command(command
, ignore_exit_status
=True) # to end session
1216 command
= 'sudo ip netns exec {} ip link set dev {} down'.format(dhcp_namespace
, br_veth_name
)
1217 self
.run_command(command
, ignore_exit_status
=True)
1219 command
= 'sudo rm -rf {}'.format(dhcp_path
)
1220 self
.run_command(command
)
1222 command
= 'sudo ip netns del {}'.format(dhcp_namespace
)
1223 self
.run_command(command
)
1226 except RunCommandException
as e
:
1227 self
.logger
.error("delete_dhcp_interfaces ssh Exception: {}".format(str(e
)))
1230 def create_dhcp_interfaces(self
, vlan
, ip_listen_address
, netmask
):
1232 Create a linux bridge with STP active
1233 :param vlan: segmentation id
1234 :param ip_listen_address: Listen Ip address for the dhcp service, the tap interface living in namesapce side
1235 :param netmask: dhcp net CIDR
1236 :return: True if success
1241 ovs_veth_name
= '{}-vethOD'.format(str(vlan
))
1242 ns_veth
= '{}-vethDO'.format(str(vlan
))
1243 dhcp_namespace
= '{}-dnsmasq'.format(str(vlan
))
1245 command
= 'sudo ip netns add {}'.format(dhcp_namespace
)
1246 self
.run_command(command
)
1248 command
= 'sudo ip link add {} type veth peer name {}'.format(ns_veth
, ovs_veth_name
)
1249 self
.run_command(command
)
1251 command
= 'sudo ip link set {} netns {}'.format(ns_veth
, dhcp_namespace
)
1252 self
.run_command(command
)
1254 command
= 'sudo ip netns exec {} ip link set dev {} up'.format(dhcp_namespace
, ns_veth
)
1255 self
.run_command(command
)
1257 command
= 'sudo ovs-vsctl add-port br-int {} tag={}'.format(ovs_veth_name
, str(vlan
))
1258 self
.run_command(command
, ignore_exit_status
=True)
1260 command
= 'sudo ip link set dev {} up'.format(ovs_veth_name
)
1261 self
.run_command(command
)
1263 command
= 'sudo ip netns exec {} ip link set dev lo up'.format(dhcp_namespace
)
1264 self
.run_command(command
)
1266 command
= 'sudo ip netns exec {} ifconfig {} {} netmask {}'.format(dhcp_namespace
,
1270 self
.run_command(command
)
1272 except RunCommandException
as e
:
1273 self
.logger
.error("create_dhcp_interfaces ssh Exception: {}".format(str(e
)))
1276 def delete_qrouter_connection(self
, vlan
, link
):
1278 Delete qrouter Namesapce with all veth interfaces need it
1287 ns_qouter
= '{}-qrouter'.format(str(vlan
))
1288 qrouter_ovs_veth
= '{}-vethOQ'.format(str(vlan
))
1289 qrouter_ns_veth
= '{}-vethQO'.format(str(vlan
))
1290 qrouter_br_veth
= '{}-vethBQ'.format(str(vlan
))
1291 qrouter_ns_router_veth
= '{}-vethQB'.format(str(vlan
))
1293 command
= 'sudo ovs-vsctl del-port br-int {}'.format(qrouter_ovs_veth
)
1294 self
.run_command(command
)
1297 command
= 'sudo ip netns exec {} ip link set dev {} down'.format(ns_qouter
, qrouter_ns_veth
)
1298 self
.run_command(command
)
1300 command
= 'sudo ip netns del ' + ns_qouter
1301 self
.run_command(command
)
1303 # down ovs veth interface
1304 command
= 'sudo ip link set dev {} down'.format(qrouter_br_veth
)
1305 self
.run_command(command
)
1307 # down br veth interface
1308 command
= 'sudo ip link set dev {} down'.format(qrouter_ovs_veth
)
1309 self
.run_command(command
)
1311 # down br veth interface
1312 command
= 'sudo ip link set dev {} down'.format(qrouter_ns_router_veth
)
1313 self
.run_command(command
)
1315 # down br veth interface
1316 command
= 'sudo brctl delif {} {}'.format(link
, qrouter_br_veth
)
1317 self
.run_command(command
)
1321 except RunCommandException
as e
:
1322 self
.logger
.error("delete_qrouter_connection ssh Exception: {}".format(str(e
)))
1325 def create_qrouter_ovs_connection(self
, vlan
, gateway
, dhcp_cidr
):
1327 Create qrouter Namesapce with all veth interfaces need it between NS and OVS
1337 ns_qouter
= '{}-qrouter'.format(str(vlan
))
1338 qrouter_ovs_veth
='{}-vethOQ'.format(str(vlan
))
1339 qrouter_ns_veth
= '{}-vethQO'.format(str(vlan
))
1342 command
= 'sudo ip netns add {}'.format(ns_qouter
)
1343 self
.run_command(command
)
1346 command
= 'sudo ip link add {} type veth peer name {}'.format(qrouter_ns_veth
, qrouter_ovs_veth
)
1347 self
.run_command(command
)
1349 # up ovs veth interface
1350 command
= 'sudo ip link set dev {} up'.format(qrouter_ovs_veth
)
1351 self
.run_command(command
)
1353 # add ovs veth to ovs br-int
1354 command
= 'sudo ovs-vsctl add-port br-int {} tag={}'.format(qrouter_ovs_veth
, vlan
)
1355 self
.run_command(command
)
1358 command
= 'sudo ip link set {} netns {}'.format(qrouter_ns_veth
, ns_qouter
)
1359 self
.run_command(command
)
1362 command
= 'sudo ip netns exec {} ip link set dev lo up'.format(ns_qouter
)
1363 self
.run_command(command
)
1366 command
= 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter
, qrouter_ns_veth
)
1367 self
.run_command(command
)
1369 from netaddr
import IPNetwork
1370 ip_tools
= IPNetwork(dhcp_cidr
)
1371 cidr_len
= ip_tools
.prefixlen
1374 command
= 'sudo ip netns exec {} ip address add {}/{} dev {}'.format(ns_qouter
, gateway
, cidr_len
, qrouter_ns_veth
)
1375 self
.run_command(command
)
1379 except RunCommandException
as e
:
1380 self
.logger
.error("Create_dhcp_interfaces ssh Exception: {}".format(str(e
)))
1383 def add_ns_routes(self
, vlan
, routes
):
1395 ns_qouter
= '{}-qrouter'.format(str(vlan
))
1396 qrouter_ns_router_veth
= '{}-vethQB'.format(str(vlan
))
1398 for key
, value
in routes
.iteritems():
1400 if key
== 'default':
1401 command
= 'sudo ip netns exec {} ip route add {} via {} '.format(ns_qouter
, key
, value
)
1403 command
= 'sudo ip netns exec {} ip route add {} via {} dev {}'.format(ns_qouter
, key
, value
,
1404 qrouter_ns_router_veth
)
1406 self
.run_command(command
)
1410 except RunCommandException
as e
:
1411 self
.logger
.error("add_ns_routes, error adding routes to namesapce, {}".format(str(e
)))
1414 def create_qrouter_br_connection(self
, vlan
, cidr
, link
):
1416 Create veth interfaces between user bridge (link) and OVS
1426 ns_qouter
= '{}-qrouter'.format(str(vlan
))
1427 qrouter_ns_router_veth
= '{}-vethQB'.format(str(vlan
))
1428 qrouter_br_veth
= '{}-vethBQ'.format(str(vlan
))
1430 command
= 'sudo brctl show | grep {}'.format(link
['iface'])
1431 content
= self
.run_command(command
, ignore_exit_status
=True)
1435 command
= 'sudo ip link add {} type veth peer name {}'.format(qrouter_br_veth
, qrouter_ns_router_veth
)
1436 self
.run_command(command
)
1438 # up ovs veth interface
1439 command
= 'sudo ip link set dev {} up'.format(qrouter_br_veth
)
1440 self
.run_command(command
)
1443 command
= 'sudo ip link set {} netns {}'.format(qrouter_ns_router_veth
, ns_qouter
)
1444 self
.run_command(command
)
1447 command
= 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter
, qrouter_ns_router_veth
)
1448 self
.run_command(command
)
1450 command
= 'sudo ip netns exec {} ip address add {} dev {}'.format(ns_qouter
,
1452 qrouter_ns_router_veth
)
1453 self
.run_command(command
)
1456 command
= 'sudo brctl addif {} {}'.format(link
['iface'], qrouter_br_veth
)
1457 self
.run_command(command
)
1460 command
= 'sudo ip netns exec {} iptables -t nat -A POSTROUTING -o {} -s {} -d {} -j MASQUERADE' \
1461 .format(ns_qouter
, qrouter_ns_router_veth
, link
['nat'], cidr
)
1462 self
.run_command(command
)
1467 self
.logger
.error('create_qrouter_br_connection, Bridge {} given by user not exist'.format(qrouter_br_veth
))
1470 except RunCommandException
as e
:
1471 self
.logger
.error("Error creating qrouter, {}".format(str(e
)))
1474 def create_link_bridge_to_ovs(self
, vlan
, link
):
1476 Create interfaces to connect a linux bridge with tenant net
1477 :param vlan: segmentation id
1478 :return: True if success
1484 br_tap_name
= '{}-vethBO'.format(str(vlan
))
1485 br_ovs_name
= '{}-vethOB'.format(str(vlan
))
1487 # is a bridge or a interface
1488 command
= 'sudo brctl show | grep {}'.format(link
)
1489 content
= self
.run_command(command
, ignore_exit_status
=True)
1491 command
= 'sudo ip link add {} type veth peer name {}'.format(br_tap_name
, br_ovs_name
)
1492 self
.run_command(command
)
1494 command
= 'sudo ip link set dev {} up'.format(br_tap_name
)
1495 self
.run_command(command
)
1497 command
= 'sudo ip link set dev {} up'.format(br_ovs_name
)
1498 self
.run_command(command
)
1500 command
= 'sudo ovs-vsctl add-port br-int {} tag={}'.format(br_ovs_name
, str(vlan
))
1501 self
.run_command(command
)
1503 command
= 'sudo brctl addif ' + link
+ ' {}'.format(br_tap_name
)
1504 self
.run_command(command
)
1507 self
.logger
.error('Link is not present, please check {}'.format(link
))
1510 except RunCommandException
as e
:
1511 self
.logger
.error("create_link_bridge_to_ovs, Error creating link to ovs, {}".format(str(e
)))
1514 def create_ovs_vxlan_tunnel(self
, vxlan_interface
, remote_ip
):
1516 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1517 :param vxlan_interface: vlxan inteface name.
1518 :param remote_ip: tunnel endpoint remote compute ip.
1521 if self
.test
or not self
.connectivity
:
1523 if remote_ip
== 'localhost':
1525 return True # TODO: Cannot create a vxlan between localhost and localhost
1526 remote_ip
= self
.local_ip
1529 command
= 'sudo ovs-vsctl add-port br-int {} -- set Interface {} type=vxlan options:remote_ip={} ' \
1530 '-- set Port {} other_config:stp-path-cost=10'.format(vxlan_interface
,
1534 self
.run_command(command
)
1536 except RunCommandException
as e
:
1537 self
.logger
.error("create_ovs_vxlan_tunnel, error creating vxlan tunnel, {}".format(str(e
)))
1540 def delete_ovs_vxlan_tunnel(self
, vxlan_interface
):
1542 Delete a vlxan tunnel port from a OVS brdige.
1543 :param vxlan_interface: vlxan name to be delete it.
1544 :return: True if success.
1546 if self
.test
or not self
.connectivity
:
1549 command
= 'sudo ovs-vsctl del-port br-int {}'.format(vxlan_interface
)
1550 self
.run_command(command
)
1552 except RunCommandException
as e
:
1553 self
.logger
.error("delete_ovs_vxlan_tunnel, error deleting vxlan tunenl, {}".format(str(e
)))
1556 def delete_ovs_bridge(self
):
1558 Delete a OVS bridge from a compute.
1559 :return: True if success
1561 if self
.test
or not self
.connectivity
:
1564 command
= 'sudo ovs-vsctl del-br br-int'
1565 self
.run_command(command
)
1567 except RunCommandException
as e
:
1568 self
.logger
.error("delete_ovs_bridge ssh Exception: {}".format(str(e
)))
1571 def get_file_info(self
, path
):
1572 command
= 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1574 content
= self
.run_command(command
)
1575 return content
.split(" ") # (permission, 1, owner, group, size, date, file)
1576 except RunCommandException
as e
:
1577 return None # file does not exist
1579 def qemu_get_info(self
, path
):
1580 command
= 'qemu-img info ' + path
1581 content
= self
.run_command(command
)
1583 return yaml
.load(content
)
1584 except yaml
.YAMLError
as exc
:
1586 if hasattr(exc
, 'problem_mark'):
1587 mark
= exc
.problem_mark
1588 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
1589 self
.logger
.error("get_qemu_info yaml format Exception " + text
)
1590 raise RunCommandException("Error getting qemu_info yaml format" + text
)
1592 def qemu_change_backing(self
, inc_file
, new_backing_file
):
1593 command
= 'qemu-img rebase -u -b {} {}'.format(new_backing_file
, inc_file
)
1595 self
.run_command(command
)
1597 except RunCommandException
as e
:
1598 self
.logger
.error("qemu_change_backing error: " + str(e
))
1601 def qemu_create_empty_disk(self
, dev
):
1603 if not dev
and 'source' not in dev
and 'file format' not in dev
and 'image_size' not in dev
:
1604 self
.logger
.error("qemu_create_empty_disk error: missing image parameter")
1607 empty_disk_path
= dev
['source file']
1609 command
= 'qemu-img create -f qcow2 {} {}G'.format(empty_disk_path
, dev
['image_size'])
1611 self
.run_command(command
)
1613 except RunCommandException
as e
:
1614 self
.logger
.error("qemu_create_empty_disk error: " + str(e
))
1617 def get_notused_filename(self
, proposed_name
, suffix
=''):
1618 '''Look for a non existing file_name in the host
1619 proposed_name: proposed file name, includes path
1620 suffix: suffix to be added to the name, before the extention
1622 extension
= proposed_name
.rfind(".")
1623 slash
= proposed_name
.rfind("/")
1624 if extension
< 0 or extension
< slash
: # no extension
1625 extension
= len(proposed_name
)
1626 target_name
= proposed_name
[:extension
] + suffix
+ proposed_name
[extension
:]
1627 info
= self
.get_file_info(target_name
)
1632 while info
is not None:
1633 target_name
= proposed_name
[:extension
] + suffix
+ "-" + str(index
) + proposed_name
[extension
:]
1635 info
= self
.get_file_info(target_name
)
1638 def get_notused_path(self
, proposed_path
, suffix
=''):
1639 '''Look for a non existing path at database for images
1640 proposed_path: proposed file name, includes path
1641 suffix: suffix to be added to the name, before the extention
1643 extension
= proposed_path
.rfind(".")
1645 extension
= len(proposed_path
)
1647 target_path
= proposed_path
[:extension
] + suffix
+ proposed_path
[extension
:]
1650 r
,_
=self
.db
.get_table(FROM
="images",WHERE
={"path":target_path
})
1653 target_path
= proposed_path
[:extension
] + suffix
+ "-" + str(index
) + proposed_path
[extension
:]
1657 def delete_file(self
, file_name
):
1658 command
= 'rm -f ' + file_name
1659 self
.run_command(command
)
1661 def copy_file(self
, source
, destination
, perserve_time
=True):
1662 if source
[0:4]=="http":
1663 command
= "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1664 dst
=destination
, src
=source
, dst_result
=destination
+ ".result" )
1666 command
= 'cp --no-preserve=mode'
1668 command
+= ' --preserve=timestamps'
1669 command
+= " '{}' '{}'".format(source
, destination
)
1670 self
.run_command(command
)
1672 def copy_remote_file(self
, remote_file
, use_incremental
):
1673 ''' Copy a file from the repository to local folder and recursively
1674 copy the backing files in case the remote file is incremental
1675 Read and/or modified self.localinfo['files'] that contain the
1676 unmodified copies of images in the local path
1678 remote_file: path of remote file
1679 use_incremental: None (leave the decision to this function), True, False
1681 local_file: name of local file
1682 qemu_info: dict with quemu information of local file
1683 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1686 use_incremental_out
= use_incremental
1687 new_backing_file
= None
1689 file_from_local
= True
1691 #in case incremental use is not decided, take the decision depending on the image
1692 #avoid the use of incremental if this image is already incremental
1693 if remote_file
[0:4] == "http":
1694 file_from_local
= False
1696 qemu_remote_info
= self
.qemu_get_info(remote_file
)
1697 if use_incremental_out
==None:
1698 use_incremental_out
= not ( file_from_local
and 'backing file' in qemu_remote_info
)
1699 #copy recursivelly the backing files
1700 if file_from_local
and 'backing file' in qemu_remote_info
:
1701 new_backing_file
, _
, _
= self
.copy_remote_file(qemu_remote_info
['backing file'], True)
1703 #check if remote file is present locally
1704 if use_incremental_out
and remote_file
in self
.localinfo
['files']:
1705 local_file
= self
.localinfo
['files'][remote_file
]
1706 local_file_info
= self
.get_file_info(local_file
)
1708 remote_file_info
= self
.get_file_info(remote_file
)
1709 if local_file_info
== None:
1711 elif file_from_local
and (local_file_info
[4]!=remote_file_info
[4] or local_file_info
[5]!=remote_file_info
[5]):
1712 #local copy of file not valid because date or size are different.
1713 #TODO DELETE local file if this file is not used by any active virtual machine
1715 self
.delete_file(local_file
)
1716 del self
.localinfo
['files'][remote_file
]
1720 else: #check that the local file has the same backing file, or there are not backing at all
1721 qemu_info
= self
.qemu_get_info(local_file
)
1722 if new_backing_file
!= qemu_info
.get('backing file'):
1726 if local_file
== None: #copy the file
1727 img_name
= remote_file
.split('/') [-1]
1728 img_local
= self
.image_path
+ '/' + img_name
1729 local_file
= self
.get_notused_filename(img_local
)
1730 self
.copy_file(remote_file
, local_file
, use_incremental_out
)
1732 if use_incremental_out
:
1733 self
.localinfo
['files'][remote_file
] = local_file
1734 if new_backing_file
:
1735 self
.qemu_change_backing(local_file
, new_backing_file
)
1736 qemu_info
= self
.qemu_get_info(local_file
)
1738 return local_file
, qemu_info
, use_incremental_out
1740 def launch_server(self
, conn
, server
, rebuild
=False, domain
=None):
1742 time
.sleep(random
.randint(20,150)) #sleep random timeto be make it a bit more real
1745 server_id
= server
['uuid']
1746 paused
= server
.get('paused','no')
1748 if domain
!=None and rebuild
==False:
1750 #self.server_status[server_id] = 'ACTIVE'
1753 self
.db_lock
.acquire()
1754 result
, server_data
= self
.db
.get_instance(server_id
)
1755 self
.db_lock
.release()
1757 self
.logger
.error("launch_server ERROR getting server from DB %d %s", result
, server_data
)
1758 return result
, server_data
1760 #0: get image metadata
1761 server_metadata
= server
.get('metadata', {})
1762 use_incremental
= None
1764 if "use_incremental" in server_metadata
:
1765 use_incremental
= False if server_metadata
["use_incremental"] == "no" else True
1767 server_host_files
= self
.localinfo
['server_files'].get( server
['uuid'], {})
1769 #delete previous incremental files
1770 for file_
in server_host_files
.values():
1771 self
.delete_file(file_
['source file'] )
1772 server_host_files
={}
1774 #1: obtain aditional devices (disks)
1775 #Put as first device the main disk
1776 devices
= [ {"type":"disk", "image_id":server
['image_id'], "vpci":server_metadata
.get('vpci', None) } ]
1777 if 'extended' in server_data
and server_data
['extended']!=None and "devices" in server_data
['extended']:
1778 devices
+= server_data
['extended']['devices']
1781 image_id
= dev
.get('image_id')
1784 uuid_empty
= str(uuid
.uuid4())
1785 empty_path
= self
.empty_image_path
+ uuid_empty
+ '.qcow2' # local path for empty disk
1787 dev
['source file'] = empty_path
1788 dev
['file format'] = 'qcow2'
1789 self
.qemu_create_empty_disk(dev
)
1790 server_host_files
[uuid_empty
] = {'source file': empty_path
,
1791 'file format': dev
['file format']}
1795 self
.db_lock
.acquire()
1796 result
, content
= self
.db
.get_table(FROM
='images', SELECT
=('path', 'metadata'),
1797 WHERE
={'uuid': image_id
})
1798 self
.db_lock
.release()
1800 error_text
= "ERROR", result
, content
, "when getting image", dev
['image_id']
1801 self
.logger
.error("launch_server " + error_text
)
1802 return -1, error_text
1803 if content
[0]['metadata'] is not None:
1804 dev
['metadata'] = json
.loads(content
[0]['metadata'])
1806 dev
['metadata'] = {}
1808 if image_id
in server_host_files
:
1809 dev
['source file'] = server_host_files
[image_id
]['source file'] #local path
1810 dev
['file format'] = server_host_files
[image_id
]['file format'] # raw or qcow2
1813 #2: copy image to host
1815 remote_file
= content
[0]['path']
1817 remote_file
= empty_path
1818 use_incremental_image
= use_incremental
1819 if dev
['metadata'].get("use_incremental") == "no":
1820 use_incremental_image
= False
1821 local_file
, qemu_info
, use_incremental_image
= self
.copy_remote_file(remote_file
, use_incremental_image
)
1823 #create incremental image
1824 if use_incremental_image
:
1825 local_file_inc
= self
.get_notused_filename(local_file
, '.inc')
1826 command
= 'qemu-img create -f qcow2 {} -o backing_file={}'.format(local_file_inc
, local_file
)
1827 self
.run_command(command
)
1828 local_file
= local_file_inc
1829 qemu_info
= {'file format': 'qcow2'}
1831 server_host_files
[ dev
['image_id'] ] = {'source file': local_file
, 'file format': qemu_info
['file format']}
1833 dev
['source file'] = local_file
1834 dev
['file format'] = qemu_info
['file format']
1836 self
.localinfo
['server_files'][ server
['uuid'] ] = server_host_files
1837 self
.localinfo_dirty
= True
1840 result
, xml
= self
.create_xml_server(server_data
, devices
, server_metadata
) #local_file
1842 self
.logger
.error("create xml server error: " + xml
)
1844 self
.logger
.debug("create xml: " + xml
)
1845 atribute
= host_thread
.lvirt_module
.VIR_DOMAIN_START_PAUSED
if paused
== "yes" else 0
1847 if not rebuild
: #ensures that any pending destroying server is done
1848 self
.server_forceoff(True)
1849 #self.logger.debug("launching instance " + xml)
1850 conn
.createXML(xml
, atribute
)
1851 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
1855 except paramiko
.ssh_exception
.SSHException
as e
:
1857 self
.logger
.error("launch_server id='%s' ssh Exception: %s", server_id
, text
)
1858 if "SSH session not active" in text
:
1860 except host_thread
.lvirt_module
.libvirtError
as e
:
1861 text
= e
.get_error_message()
1862 self
.logger
.error("launch_server id='%s' libvirt Exception: %s", server_id
, text
)
1863 except Exception as e
:
1865 self
.logger
.error("launch_server id='%s' Exception: %s", server_id
, text
)
1868 def update_servers_status(self
):
1870 # VIR_DOMAIN_NOSTATE = 0
1871 # VIR_DOMAIN_RUNNING = 1
1872 # VIR_DOMAIN_BLOCKED = 2
1873 # VIR_DOMAIN_PAUSED = 3
1874 # VIR_DOMAIN_SHUTDOWN = 4
1875 # VIR_DOMAIN_SHUTOFF = 5
1876 # VIR_DOMAIN_CRASHED = 6
1877 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
1879 if self
.test
or len(self
.server_status
)==0:
1883 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
1884 domains
= conn
.listAllDomains()
1886 for domain
in domains
:
1887 uuid
= domain
.UUIDString() ;
1888 libvirt_status
= domain
.state()
1889 #print libvirt_status
1890 if libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_RUNNING
or libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_SHUTDOWN
:
1891 new_status
= "ACTIVE"
1892 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_PAUSED
:
1893 new_status
= "PAUSED"
1894 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_SHUTOFF
:
1895 new_status
= "INACTIVE"
1896 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_CRASHED
:
1897 new_status
= "ERROR"
1900 domain_dict
[uuid
] = new_status
1902 except host_thread
.lvirt_module
.libvirtError
as e
:
1903 self
.logger
.error("get_state() Exception " + e
.get_error_message())
1906 for server_id
, current_status
in self
.server_status
.iteritems():
1908 if server_id
in domain_dict
:
1909 new_status
= domain_dict
[server_id
]
1911 new_status
= "INACTIVE"
1913 if new_status
== None or new_status
== current_status
:
1915 if new_status
== 'INACTIVE' and current_status
== 'ERROR':
1916 continue #keep ERROR status, because obviously this machine is not running
1918 self
.logger
.debug("server id='%s' status change from '%s' to '%s'", server_id
, current_status
, new_status
)
1919 STATUS
={'progress':100, 'status':new_status
}
1920 if new_status
== 'ERROR':
1921 STATUS
['last_error'] = 'machine has crashed'
1922 self
.db_lock
.acquire()
1923 r
,_
= self
.db
.update_rows('instances', STATUS
, {'uuid':server_id
}, log
=False)
1924 self
.db_lock
.release()
1926 self
.server_status
[server_id
] = new_status
1928 def action_on_server(self
, req
, last_retry
=True):
1929 '''Perform an action on a req
1931 req: dictionary that contain:
1932 server properties: 'uuid','name','tenant_id','status'
1934 host properties: 'user', 'ip_name'
1935 return (error, text)
1936 0: No error. VM is updated to new state,
1937 -1: Invalid action, as trying to pause a PAUSED VM
1938 -2: Error accessing host
1940 -4: Error at DB access
1941 -5: Error while trying to perform action. VM is updated to ERROR
1943 server_id
= req
['uuid']
1946 old_status
= req
['status']
1950 if 'terminate' in req
['action']:
1951 new_status
= 'deleted'
1952 elif 'shutoff' in req
['action'] or 'shutdown' in req
['action'] or 'forceOff' in req
['action']:
1953 if req
['status']!='ERROR':
1955 new_status
= 'INACTIVE'
1956 elif 'start' in req
['action'] and req
['status']!='ERROR':
1957 new_status
= 'ACTIVE'
1958 elif 'resume' in req
['action'] and req
['status']!='ERROR' and req
['status']!='INACTIVE':
1959 new_status
= 'ACTIVE'
1960 elif 'pause' in req
['action'] and req
['status']!='ERROR':
1961 new_status
= 'PAUSED'
1962 elif 'reboot' in req
['action'] and req
['status']!='ERROR':
1963 new_status
= 'ACTIVE'
1964 elif 'rebuild' in req
['action']:
1965 time
.sleep(random
.randint(20,150))
1966 new_status
= 'ACTIVE'
1967 elif 'createImage' in req
['action']:
1969 self
.create_image(None, req
)
1972 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
1974 dom
= conn
.lookupByUUIDString(server_id
)
1975 except host_thread
.lvirt_module
.libvirtError
as e
:
1976 text
= e
.get_error_message()
1977 if 'LookupByUUIDString' in text
or 'Domain not found' in text
or 'No existe un dominio coincidente' in text
:
1980 self
.logger
.error("action_on_server id='%s' libvirt exception: %s", server_id
, text
)
1983 if 'forceOff' in req
['action']:
1985 self
.logger
.debug("action_on_server id='%s' domain not running", server_id
)
1988 self
.logger
.debug("sending DESTROY to server id='%s'", server_id
)
1990 except Exception as e
:
1991 if "domain is not running" not in e
.get_error_message():
1992 self
.logger
.error("action_on_server id='%s' Exception while sending force off: %s",
1993 server_id
, e
.get_error_message())
1994 last_error
= 'action_on_server Exception while destroy: ' + e
.get_error_message()
1995 new_status
= 'ERROR'
1997 elif 'terminate' in req
['action']:
1999 self
.logger
.debug("action_on_server id='%s' domain not running", server_id
)
2000 new_status
= 'deleted'
2003 if req
['action']['terminate'] == 'force':
2004 self
.logger
.debug("sending DESTROY to server id='%s'", server_id
)
2006 new_status
= 'deleted'
2008 self
.logger
.debug("sending SHUTDOWN to server id='%s'", server_id
)
2010 self
.pending_terminate_server
.append( (time
.time()+10,server_id
) )
2011 except Exception as e
:
2012 self
.logger
.error("action_on_server id='%s' Exception while destroy: %s",
2013 server_id
, e
.get_error_message())
2014 last_error
= 'action_on_server Exception while destroy: ' + e
.get_error_message()
2015 new_status
= 'ERROR'
2016 if "domain is not running" in e
.get_error_message():
2019 new_status
= 'deleted'
2021 self
.logger
.error("action_on_server id='%s' Exception while undefine: %s",
2022 server_id
, e
.get_error_message())
2023 last_error
= 'action_on_server Exception2 while undefine:', e
.get_error_message()
2024 #Exception: 'virDomainDetachDevice() failed'
2025 if new_status
=='deleted':
2026 if server_id
in self
.server_status
:
2027 del self
.server_status
[server_id
]
2028 if req
['uuid'] in self
.localinfo
['server_files']:
2029 for file_
in self
.localinfo
['server_files'][ req
['uuid'] ].values():
2031 self
.delete_file(file_
['source file'])
2034 del self
.localinfo
['server_files'][ req
['uuid'] ]
2035 self
.localinfo_dirty
= True
2037 elif 'shutoff' in req
['action'] or 'shutdown' in req
['action']:
2040 self
.logger
.debug("action_on_server id='%s' domain not running", server_id
)
2043 # new_status = 'INACTIVE'
2044 #TODO: check status for changing at database
2045 except Exception as e
:
2046 new_status
= 'ERROR'
2047 self
.logger
.error("action_on_server id='%s' Exception while shutdown: %s",
2048 server_id
, e
.get_error_message())
2049 last_error
= 'action_on_server Exception while shutdown: ' + e
.get_error_message()
2051 elif 'rebuild' in req
['action']:
2054 r
= self
.launch_server(conn
, req
, True, None)
2056 new_status
= 'ERROR'
2059 new_status
= 'ACTIVE'
2060 elif 'start' in req
['action']:
2061 # The instance is only create in DB but not yet at libvirt domain, needs to be create
2062 rebuild
= True if req
['action']['start'] == 'rebuild' else False
2063 r
= self
.launch_server(conn
, req
, rebuild
, dom
)
2065 new_status
= 'ERROR'
2068 new_status
= 'ACTIVE'
2070 elif 'resume' in req
['action']:
2076 # new_status = 'ACTIVE'
2077 except Exception as e
:
2078 self
.logger
.error("action_on_server id='%s' Exception while resume: %s",
2079 server_id
, e
.get_error_message())
2081 elif 'pause' in req
['action']:
2087 # new_status = 'PAUSED'
2088 except Exception as e
:
2089 self
.logger
.error("action_on_server id='%s' Exception while pause: %s",
2090 server_id
, e
.get_error_message())
2092 elif 'reboot' in req
['action']:
2098 self
.logger
.debug("action_on_server id='%s' reboot:", server_id
)
2099 #new_status = 'ACTIVE'
2100 except Exception as e
:
2101 self
.logger
.error("action_on_server id='%s' Exception while reboot: %s",
2102 server_id
, e
.get_error_message())
2103 elif 'createImage' in req
['action']:
2104 self
.create_image(dom
, req
)
2108 except host_thread
.lvirt_module
.libvirtError
as e
:
2109 if conn
is not None: conn
.close()
2110 text
= e
.get_error_message()
2111 new_status
= "ERROR"
2113 if 'LookupByUUIDString' in text
or 'Domain not found' in text
or 'No existe un dominio coincidente' in text
:
2114 self
.logger
.debug("action_on_server id='%s' Exception removed from host", server_id
)
2116 self
.logger
.error("action_on_server id='%s' Exception %s", server_id
, text
)
2117 #end of if self.test
2118 if new_status
== None:
2121 self
.logger
.debug("action_on_server id='%s' new status=%s %s",server_id
, new_status
, last_error
)
2122 UPDATE
= {'progress':100, 'status':new_status
}
2124 if new_status
=='ERROR':
2125 if not last_retry
: #if there will be another retry do not update database
2127 elif 'terminate' in req
['action']:
2128 #PUT a log in the database
2129 self
.logger
.error("PANIC deleting server id='%s' %s", server_id
, last_error
)
2130 self
.db_lock
.acquire()
2131 self
.db
.new_row('logs',
2132 {'uuid':server_id
, 'tenant_id':req
['tenant_id'], 'related':'instances','level':'panic',
2133 'description':'PANIC deleting server from host '+self
.name
+': '+last_error
}
2135 self
.db_lock
.release()
2136 if server_id
in self
.server_status
:
2137 del self
.server_status
[server_id
]
2140 UPDATE
['last_error'] = last_error
2141 if new_status
!= 'deleted' and (new_status
!= old_status
or new_status
== 'ERROR') :
2142 self
.db_lock
.acquire()
2143 self
.db
.update_rows('instances', UPDATE
, {'uuid':server_id
}, log
=True)
2144 self
.server_status
[server_id
] = new_status
2145 self
.db_lock
.release()
2146 if new_status
== 'ERROR':
2151 def restore_iface(self
, name
, mac
, lib_conn
=None):
2152 ''' make an ifdown, ifup to restore default parameter of na interface
2154 mac: mac address of the interface
2155 lib_conn: connection to the libvirt, if None a new connection is created
2156 Return 0,None if ok, -1,text if fails
2162 self
.logger
.debug("restore_iface '%s' %s", name
, mac
)
2166 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
2170 #wait to the pending VM deletion
2171 #TODO.Revise self.server_forceoff(True)
2173 iface
= conn
.interfaceLookupByMACString(mac
)
2174 if iface
.isActive():
2177 self
.logger
.debug("restore_iface '%s' %s", name
, mac
)
2178 except host_thread
.lvirt_module
.libvirtError
as e
:
2179 error_text
= e
.get_error_message()
2180 self
.logger
.error("restore_iface '%s' '%s' libvirt exception: %s", name
, mac
, error_text
)
2183 if lib_conn
is None and conn
is not None:
2185 return ret
, error_text
2188 def create_image(self
,dom
, req
):
2190 if 'path' in req
['action']['createImage']:
2191 file_dst
= req
['action']['createImage']['path']
2193 createImage
=req
['action']['createImage']
2194 img_name
= createImage
['source']['path']
2195 index
=img_name
.rfind('/')
2196 file_dst
= self
.get_notused_path(img_name
[:index
+1] + createImage
['name'] + '.qcow2')
2197 image_status
='ACTIVE'
2201 server_id
= req
['uuid']
2202 createImage
=req
['action']['createImage']
2203 file_orig
= self
.localinfo
['server_files'][server_id
] [ createImage
['source']['image_id'] ] ['source file']
2204 if 'path' in req
['action']['createImage']:
2205 file_dst
= req
['action']['createImage']['path']
2207 img_name
= createImage
['source']['path']
2208 index
=img_name
.rfind('/')
2209 file_dst
= self
.get_notused_filename(img_name
[:index
+1] + createImage
['name'] + '.qcow2')
2211 self
.copy_file(file_orig
, file_dst
)
2212 qemu_info
= self
.qemu_get_info(file_orig
)
2213 if 'backing file' in qemu_info
:
2214 for k
,v
in self
.localinfo
['files'].items():
2215 if v
==qemu_info
['backing file']:
2216 self
.qemu_change_backing(file_dst
, k
)
2218 image_status
='ACTIVE'
2220 except paramiko
.ssh_exception
.SSHException
as e
:
2221 image_status
='ERROR'
2222 error_text
= e
.args
[0]
2223 self
.logger
.error("create_image id='%s' ssh Exception: %s", server_id
, error_text
)
2224 if "SSH session not active" in error_text
and retry
==0:
2226 except Exception as e
:
2227 image_status
='ERROR'
2229 self
.logger
.error("create_image id='%s' Exception: %s", server_id
, error_text
)
2231 #TODO insert a last_error at database
2232 self
.db_lock
.acquire()
2233 self
.db
.update_rows('images', {'status':image_status
, 'progress': 100, 'path':file_dst
},
2234 {'uuid':req
['new_image']['uuid']}, log
=True)
2235 self
.db_lock
.release()
2237 def edit_iface(self
, port_id
, old_net
, new_net
):
2238 #This action imply remove and insert interface to put proper parameters
2243 self
.db_lock
.acquire()
2244 r
,c
= self
.db
.get_table(FROM
='ports as p join resources_port as rp on p.uuid=rp.port_id',
2245 WHERE
={'port_id': port_id
})
2246 self
.db_lock
.release()
2248 self
.logger
.error("edit_iface %s DDBB error: %s", port_id
, c
)
2251 self
.logger
.error("edit_iface %s port not found", port_id
)
2254 if port
["model"]!="VF":
2255 self
.logger
.error("edit_iface %s ERROR model must be VF", port_id
)
2257 #create xml detach file
2260 xml
.append("<interface type='hostdev' managed='yes'>")
2261 xml
.append(" <mac address='" +port
['mac']+ "'/>")
2262 xml
.append(" <source>"+ self
.pci2xml(port
['pci'])+"\n </source>")
2263 xml
.append('</interface>')
2268 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
2269 dom
= conn
.lookupByUUIDString(port
["instance_id"])
2272 self
.logger
.debug("edit_iface detaching SRIOV interface " + text
)
2273 dom
.detachDeviceFlags(text
, flags
=host_thread
.lvirt_module
.VIR_DOMAIN_AFFECT_LIVE
)
2275 xml
[-1] =" <vlan> <tag id='" + str(port
['vlan']) + "'/> </vlan>"
2277 xml
.append(self
.pci2xml(port
.get('vpci',None)) )
2278 xml
.append('</interface>')
2280 self
.logger
.debug("edit_iface attaching SRIOV interface " + text
)
2281 dom
.attachDeviceFlags(text
, flags
=host_thread
.lvirt_module
.VIR_DOMAIN_AFFECT_LIVE
)
2283 except host_thread
.lvirt_module
.libvirtError
as e
:
2284 text
= e
.get_error_message()
2285 self
.logger
.error("edit_iface %s libvirt exception: %s", port
["instance_id"], text
)
2288 if conn
is not None: conn
.close()
2291 def create_server(server
, db
, db_lock
, only_of_ports
):
2292 extended
= server
.get('extended', None)
2294 requirements
['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
2295 requirements
['ram'] = server
['flavor'].get('ram', 0)
2296 if requirements
['ram']== None:
2297 requirements
['ram'] = 0
2298 requirements
['vcpus'] = server
['flavor'].get('vcpus', 0)
2299 if requirements
['vcpus']== None:
2300 requirements
['vcpus'] = 0
2301 #If extended is not defined get requirements from flavor
2302 if extended
is None:
2303 #If extended is defined in flavor convert to dictionary and use it
2304 if 'extended' in server
['flavor'] and server
['flavor']['extended'] != None:
2305 json_acceptable_string
= server
['flavor']['extended'].replace("'", "\"")
2306 extended
= json
.loads(json_acceptable_string
)
2309 #print json.dumps(extended, indent=4)
2311 #For simplicity only one numa VM are supported in the initial implementation
2312 if extended
!= None:
2313 numas
= extended
.get('numas', [])
2315 return (-2, "Multi-NUMA VMs are not supported yet")
2317 # return (-1, "At least one numa must be specified")
2319 #a for loop is used in order to be ready to multi-NUMA VMs
2323 numa_req
['memory'] = numa
.get('memory', 0)
2325 numa_req
['proc_req_nb'] = numa
['cores'] #number of cores or threads to be reserved
2326 numa_req
['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
2327 numa_req
['proc_req_list'] = numa
.get('cores-id', None) #list of ids to be assigned to the cores or threads
2328 elif 'paired-threads' in numa
:
2329 numa_req
['proc_req_nb'] = numa
['paired-threads']
2330 numa_req
['proc_req_type'] = 'paired-threads'
2331 numa_req
['proc_req_list'] = numa
.get('paired-threads-id', None)
2332 elif 'threads' in numa
:
2333 numa_req
['proc_req_nb'] = numa
['threads']
2334 numa_req
['proc_req_type'] = 'threads'
2335 numa_req
['proc_req_list'] = numa
.get('threads-id', None)
2337 numa_req
['proc_req_nb'] = 0 # by default
2338 numa_req
['proc_req_type'] = 'threads'
2342 #Generate a list of sriov and another for physical interfaces
2343 interfaces
= numa
.get('interfaces', [])
2346 for iface
in interfaces
:
2347 iface
['bandwidth'] = int(iface
['bandwidth'])
2348 if iface
['dedicated'][:3]=='yes':
2349 port_list
.append(iface
)
2351 sriov_list
.append(iface
)
2353 #Save lists ordered from more restrictive to less bw requirements
2354 numa_req
['sriov_list'] = sorted(sriov_list
, key
=lambda k
: k
['bandwidth'], reverse
=True)
2355 numa_req
['port_list'] = sorted(port_list
, key
=lambda k
: k
['bandwidth'], reverse
=True)
2358 request
.append(numa_req
)
2360 # print "----------\n"+json.dumps(request[0], indent=4)
2361 # print '----------\n\n'
2363 #Search in db for an appropriate numa for each requested numa
2364 #at the moment multi-NUMA VMs are not supported
2366 requirements
['numa'].update(request
[0])
2367 if requirements
['numa']['memory']>0:
2368 requirements
['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2369 elif requirements
['ram']==0:
2370 return (-1, "Memory information not set neither at extended field not at ram")
2371 if requirements
['numa']['proc_req_nb']>0:
2372 requirements
['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2373 elif requirements
['vcpus']==0:
2374 return (-1, "Processor information not set neither at extended field not at vcpus")
2378 result
, content
= db
.get_numas(requirements
, server
.get('host_id', None), only_of_ports
)
2382 return (-1, content
)
2384 numa_id
= content
['numa_id']
2385 host_id
= content
['host_id']
2387 #obtain threads_id and calculate pinning
2390 if requirements
['numa']['proc_req_nb']>0:
2392 result
, content
= db
.get_table(FROM
='resources_core',
2393 SELECT
=('id','core_id','thread_id'),
2394 WHERE
={'numa_id':numa_id
,'instance_id': None, 'status':'ok'} )
2400 #convert rows to a dictionary indexed by core_id
2403 if not row
['core_id'] in cores_dict
:
2404 cores_dict
[row
['core_id']] = []
2405 cores_dict
[row
['core_id']].append([row
['thread_id'],row
['id']])
2407 #In case full cores are requested
2409 if requirements
['numa']['proc_req_type'] == 'cores':
2410 #Get/create the list of the vcpu_ids
2411 vcpu_id_list
= requirements
['numa']['proc_req_list']
2412 if vcpu_id_list
== None:
2413 vcpu_id_list
= range(0,int(requirements
['numa']['proc_req_nb']))
2415 for threads
in cores_dict
.itervalues():
2417 if len(threads
) != 2:
2420 #set pinning for the first thread
2421 cpu_pinning
.append( [ vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1] ] )
2423 #reserve so it is not used the second thread
2424 reserved_threads
.append(threads
[1][1])
2426 if len(vcpu_id_list
) == 0:
2429 #In case paired threads are requested
2430 elif requirements
['numa']['proc_req_type'] == 'paired-threads':
2432 #Get/create the list of the vcpu_ids
2433 if requirements
['numa']['proc_req_list'] != None:
2435 for pair
in requirements
['numa']['proc_req_list']:
2437 return -1, "Field paired-threads-id not properly specified"
2439 vcpu_id_list
.append(pair
[0])
2440 vcpu_id_list
.append(pair
[1])
2442 vcpu_id_list
= range(0,2*int(requirements
['numa']['proc_req_nb']))
2444 for threads
in cores_dict
.itervalues():
2446 if len(threads
) != 2:
2448 #set pinning for the first thread
2449 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1]])
2451 #set pinning for the second thread
2452 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[1][0], threads
[1][1]])
2454 if len(vcpu_id_list
) == 0:
2457 #In case normal threads are requested
2458 elif requirements
['numa']['proc_req_type'] == 'threads':
2459 #Get/create the list of the vcpu_ids
2460 vcpu_id_list
= requirements
['numa']['proc_req_list']
2461 if vcpu_id_list
== None:
2462 vcpu_id_list
= range(0,int(requirements
['numa']['proc_req_nb']))
2464 for threads_index
in sorted(cores_dict
, key
=lambda k
: len(cores_dict
[k
])):
2465 threads
= cores_dict
[threads_index
]
2466 #set pinning for the first thread
2467 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1]])
2469 #if exists, set pinning for the second thread
2470 if len(threads
) == 2 and len(vcpu_id_list
) != 0:
2471 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[1][0], threads
[1][1]])
2473 if len(vcpu_id_list
) == 0:
2476 #Get the source pci addresses for the selected numa
2477 used_sriov_ports
= []
2478 for port
in requirements
['numa']['sriov_list']:
2480 result
, content
= db
.get_table(FROM
='resources_port', SELECT
=('id', 'pci', 'mac'),WHERE
={'numa_id':numa_id
,'root_id': port
['port_id'], 'port_id': None, 'Mbps_used': 0} )
2486 if row
['id'] in used_sriov_ports
or row
['id']==port
['port_id']:
2488 port
['pci'] = row
['pci']
2489 if 'mac_address' not in port
:
2490 port
['mac_address'] = row
['mac']
2492 port
['port_id']=row
['id']
2493 port
['Mbps_used'] = port
['bandwidth']
2494 used_sriov_ports
.append(row
['id'])
2497 for port
in requirements
['numa']['port_list']:
2498 port
['Mbps_used'] = None
2499 if port
['dedicated'] != "yes:sriov":
2500 port
['mac_address'] = port
['mac']
2504 result
, content
= db
.get_table(FROM
='resources_port', SELECT
=('id', 'pci', 'mac', 'Mbps'),WHERE
={'numa_id':numa_id
,'root_id': port
['port_id'], 'port_id': None, 'Mbps_used': 0} )
2509 port
['Mbps_used'] = content
[0]['Mbps']
2511 if row
['id'] in used_sriov_ports
or row
['id']==port
['port_id']:
2513 port
['pci'] = row
['pci']
2514 if 'mac_address' not in port
:
2515 port
['mac_address'] = row
['mac'] # mac cannot be set to passthrough ports
2517 port
['port_id']=row
['id']
2518 used_sriov_ports
.append(row
['id'])
2521 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2522 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2524 server
['host_id'] = host_id
2526 #Generate dictionary for saving in db the instance resources
2528 resources
['bridged-ifaces'] = []
2531 numa_dict
['interfaces'] = []
2533 numa_dict
['interfaces'] += requirements
['numa']['port_list']
2534 numa_dict
['interfaces'] += requirements
['numa']['sriov_list']
2536 #Check bridge information
2537 unified_dataplane_iface
=[]
2538 unified_dataplane_iface
+= requirements
['numa']['port_list']
2539 unified_dataplane_iface
+= requirements
['numa']['sriov_list']
2541 for control_iface
in server
.get('networks', []):
2542 control_iface
['net_id']=control_iface
.pop('uuid')
2543 #Get the brifge name
2545 result
, content
= db
.get_table(FROM
='nets',
2546 SELECT
=('name', 'type', 'vlan', 'provider', 'enable_dhcp','dhcp_first_ip',
2547 'dhcp_last_ip', 'cidr', 'gateway_ip', 'dns', 'links', 'routes'),
2548 WHERE
={'uuid': control_iface
['net_id']})
2553 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface
['net_id']
2556 if control_iface
.get("type", 'virtual') == 'virtual':
2557 if network
['type']!='bridge_data' and network
['type']!='bridge_man':
2558 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface
['net_id']
2559 resources
['bridged-ifaces'].append(control_iface
)
2560 if network
.get("provider") and network
["provider"][0:3] == "OVS":
2561 control_iface
["type"] = "instance:ovs"
2563 control_iface
["type"] = "instance:bridge"
2564 if network
.get("vlan"):
2565 control_iface
["vlan"] = network
["vlan"]
2567 if network
.get("enable_dhcp") == 'true':
2568 control_iface
["enable_dhcp"] = network
.get("enable_dhcp")
2569 control_iface
["dhcp_first_ip"] = network
["dhcp_first_ip"]
2570 control_iface
["dhcp_last_ip"] = network
["dhcp_last_ip"]
2571 control_iface
["cidr"] = network
["cidr"]
2573 if network
.get("dns"):
2574 control_iface
["dns"] = yaml
.safe_load(network
.get("dns"))
2575 if network
.get("links"):
2576 control_iface
["links"] = yaml
.safe_load(network
.get("links"))
2577 if network
.get("routes"):
2578 control_iface
["routes"] = yaml
.safe_load(network
.get("routes"))
2580 if network
['type']!='data' and network
['type']!='ptp':
2581 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface
['net_id']
2582 #dataplane interface, look for it in the numa tree and asign this network
2584 for dataplane_iface
in numa_dict
['interfaces']:
2585 if dataplane_iface
['name'] == control_iface
.get("name"):
2586 if (dataplane_iface
['dedicated'] == "yes" and control_iface
["type"] != "PF") or \
2587 (dataplane_iface
['dedicated'] == "no" and control_iface
["type"] != "VF") or \
2588 (dataplane_iface
['dedicated'] == "yes:sriov" and control_iface
["type"] != "VFnotShared") :
2589 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2590 (control_iface
.get("name"), dataplane_iface
['dedicated'], control_iface
["type"])
2591 dataplane_iface
['uuid'] = control_iface
['net_id']
2592 if dataplane_iface
['dedicated'] == "no":
2593 dataplane_iface
['vlan'] = network
['vlan']
2594 if dataplane_iface
['dedicated'] != "yes" and control_iface
.get("mac_address"):
2595 dataplane_iface
['mac_address'] = control_iface
.get("mac_address")
2596 if control_iface
.get("vpci"):
2597 dataplane_iface
['vpci'] = control_iface
.get("vpci")
2601 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface
.get("name")
2603 resources
['host_id'] = host_id
2604 resources
['image_id'] = server
['image_id']
2605 resources
['flavor_id'] = server
['flavor_id']
2606 resources
['tenant_id'] = server
['tenant_id']
2607 resources
['ram'] = requirements
['ram']
2608 resources
['vcpus'] = requirements
['vcpus']
2609 resources
['status'] = 'CREATING'
2611 if 'description' in server
: resources
['description'] = server
['description']
2612 if 'name' in server
: resources
['name'] = server
['name']
2614 resources
['extended'] = {} #optional
2615 resources
['extended']['numas'] = []
2616 numa_dict
['numa_id'] = numa_id
2617 numa_dict
['memory'] = requirements
['numa']['memory']
2618 numa_dict
['cores'] = []
2620 for core
in cpu_pinning
:
2621 numa_dict
['cores'].append({'id': core
[2], 'vthread': core
[0], 'paired': paired
})
2622 for core
in reserved_threads
:
2623 numa_dict
['cores'].append({'id': core
})
2624 resources
['extended']['numas'].append(numa_dict
)
2625 if extended
!=None and 'devices' in extended
: #TODO allow extra devices without numa
2626 resources
['extended']['devices'] = extended
['devices']
2629 # '===================================={'
2630 #print json.dumps(resources, indent=4)
2631 #print '====================================}'