1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
5 # This file is part of openvim
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
28 __author__
= "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__
= "$10-jul-2014 12:07:15$"
43 from jsonschema
import validate
as js_v
, exceptions
as js_e
44 from vim_schema
import localinfo_schema
, hostinfo_schema
46 class RunCommandException(Exception):
49 class host_thread(threading
.Thread
):
52 def __init__(self
, name
, host
, user
, db
, db_lock
, test
, image_path
, host_id
, version
, develop_mode
,
53 develop_bridge_iface
, password
=None, keyfile
= None, logger_name
=None, debug
=None):
54 """Init a thread to communicate with compute node or ovs_controller.
55 :param host_id: host identity
56 :param name: name of the thread
57 :param host: host ip or name to manage and user
58 :param user, password, keyfile: user and credentials to connect to host
59 :param db, db_lock': database class and lock to use it in exclusion
61 threading
.Thread
.__init
__(self
)
66 self
.db_lock
= db_lock
68 self
.password
= password
69 self
.keyfile
= keyfile
70 self
.localinfo_dirty
= False
71 self
.connectivity
= True
73 if not test
and not host_thread
.lvirt_module
:
75 module_info
= imp
.find_module("libvirt")
76 host_thread
.lvirt_module
= imp
.load_module("libvirt", *module_info
)
77 except (IOError, ImportError) as e
:
78 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e
))
80 self
.logger_name
= logger_name
82 self
.logger_name
= "openvim.host."+name
83 self
.logger
= logging
.getLogger(self
.logger_name
)
85 self
.logger
.setLevel(getattr(logging
, debug
))
88 self
.develop_mode
= develop_mode
89 self
.develop_bridge_iface
= develop_bridge_iface
90 self
.image_path
= image_path
91 self
.empty_image_path
= image_path
92 self
.host_id
= host_id
93 self
.version
= version
98 self
.server_status
= {} #dictionary with pairs server_uuid:server_status
99 self
.pending_terminate_server
=[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
100 self
.next_update_server_status
= 0 #time when must be check servers status
104 self
.queueLock
= threading
.Lock()
105 self
.taskQueue
= Queue
.Queue(2000)
107 self
.run_command_session
= None
109 self
.localhost
= True if host
== 'localhost' else False
110 self
.lvirt_conn_uri
= "qemu+ssh://{user}@{host}/system?no_tty=1&no_verify=1".format(
111 user
=self
.user
, host
=self
.host
)
113 self
.lvirt_conn_uri
+= "&keyfile=" + keyfile
114 self
.remote_ip
= None
117 def run_command(self
, command
, keep_session
=False, ignore_exit_status
=False):
118 """Run a command passed as a str on a localhost or at remote machine.
119 :param command: text with the command to execute.
120 :param keep_session: if True it returns a <stdin> for sending input with '<stdin>.write("text\n")'.
121 A command with keep_session=True MUST be followed by a command with keep_session=False in order to
122 close the session and get the output
123 :param ignore_exit_status: Return stdout and not raise an exepction in case of error.
124 :return: the output of the command if 'keep_session=False' or the <stdin> object if 'keep_session=True'
125 :raises: RunCommandException if command fails
127 if self
.run_command_session
and keep_session
:
128 raise RunCommandException("Internal error. A command with keep_session=True must be followed by another "
129 "command with keep_session=False to close session")
132 if self
.run_command_session
:
133 p
= self
.run_command_session
134 self
.run_command_session
= None
135 (output
, outerror
) = p
.communicate()
136 returncode
= p
.returncode
139 p
= subprocess
.Popen(('bash', "-c", command
), stdin
=subprocess
.PIPE
, stdout
=subprocess
.PIPE
,
140 stderr
=subprocess
.PIPE
)
141 self
.run_command_session
= p
144 if not ignore_exit_status
:
145 output
= subprocess
.check_output(('bash', "-c", command
))
149 p
= subprocess
.Popen(('bash', "-c", command
), stdout
=subprocess
.PIPE
)
150 out
, err
= p
.communicate()
153 if self
.run_command_session
:
154 (i
, o
, e
) = self
.run_command_session
155 self
.run_command_session
= None
156 i
.channel
.shutdown_write()
158 if not self
.ssh_conn
:
160 (i
, o
, e
) = self
.ssh_conn
.exec_command(command
, timeout
=10)
162 self
.run_command_session
= (i
, o
, e
)
164 returncode
= o
.channel
.recv_exit_status()
167 if returncode
!= 0 and not ignore_exit_status
:
168 text
= "run_command='{}' Error='{}'".format(command
, outerror
)
169 self
.logger
.error(text
)
170 raise RunCommandException(text
)
172 self
.logger
.debug("run_command='{}' result='{}'".format(command
, output
))
175 except RunCommandException
:
177 except subprocess
.CalledProcessError
as e
:
178 text
= "run_command Exception '{}' '{}'".format(str(e
), e
.output
)
179 except (paramiko
.ssh_exception
.SSHException
, Exception) as e
:
180 text
= "run_command='{}' Exception='{}'".format(command
, str(e
))
182 self
.run_command_session
= None
183 raise RunCommandException(text
)
185 def ssh_connect(self
):
188 self
.ssh_conn
= paramiko
.SSHClient()
189 self
.ssh_conn
.set_missing_host_key_policy(paramiko
.AutoAddPolicy())
190 self
.ssh_conn
.load_system_host_keys()
191 self
.ssh_conn
.connect(self
.host
, username
=self
.user
, password
=self
.password
, key_filename
=self
.keyfile
,
192 timeout
=10) # auth_timeout=10)
193 self
.remote_ip
= self
.ssh_conn
.get_transport().sock
.getpeername()[0]
194 self
.local_ip
= self
.ssh_conn
.get_transport().sock
.getsockname()[0]
195 except (paramiko
.ssh_exception
.SSHException
, Exception) as e
:
196 text
= 'ssh connect Exception: {}'.format(e
)
201 def check_connectivity(self
):
204 command
= 'sudo brctl show'
205 self
.run_command(command
)
206 except RunCommandException
as e
:
207 self
.connectivity
= False
208 self
.logger
.error("check_connectivity Exception: " + str(e
))
210 def load_localinfo(self
):
213 self
.run_command('sudo mkdir -p ' + self
.image_path
)
214 result
= self
.run_command('cat {}/.openvim.yaml'.format(self
.image_path
))
215 self
.localinfo
= yaml
.load(result
)
216 js_v(self
.localinfo
, localinfo_schema
)
217 self
.localinfo_dirty
= False
218 if 'server_files' not in self
.localinfo
:
219 self
.localinfo
['server_files'] = {}
220 self
.logger
.debug("localinfo loaded from host")
222 except RunCommandException
as e
:
223 self
.logger
.error("load_localinfo Exception: " + str(e
))
224 except host_thread
.lvirt_module
.libvirtError
as e
:
225 text
= e
.get_error_message()
226 self
.logger
.error("load_localinfo libvirt Exception: " + text
)
227 except yaml
.YAMLError
as exc
:
229 if hasattr(exc
, 'problem_mark'):
230 mark
= exc
.problem_mark
231 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
232 self
.logger
.error("load_localinfo yaml format Exception " + text
)
233 except js_e
.ValidationError
as e
:
235 if len(e
.path
)>0: text
=" at '" + ":".join(map(str, e
.path
))+"'"
236 self
.logger
.error("load_localinfo format Exception: %s %s", text
, str(e
))
237 except Exception as e
:
239 self
.logger
.error("load_localinfo Exception: " + text
)
241 # not loaded, insert a default data and force saving by activating dirty flag
242 self
.localinfo
= {'files':{}, 'server_files':{} }
243 # self.localinfo_dirty=True
244 self
.localinfo_dirty
=False
246 def load_hostinfo(self
):
250 result
= self
.run_command('cat {}/hostinfo.yaml'.format(self
.image_path
))
251 self
.hostinfo
= yaml
.load(result
)
252 js_v(self
.hostinfo
, hostinfo_schema
)
253 self
.logger
.debug("hostinfo load from host " + str(self
.hostinfo
))
255 except RunCommandException
as e
:
256 self
.logger
.error("load_hostinfo ssh Exception: " + str(e
))
257 except host_thread
.lvirt_module
.libvirtError
as e
:
258 text
= e
.get_error_message()
259 self
.logger
.error("load_hostinfo libvirt Exception: " + text
)
260 except yaml
.YAMLError
as exc
:
262 if hasattr(exc
, 'problem_mark'):
263 mark
= exc
.problem_mark
264 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
265 self
.logger
.error("load_hostinfo yaml format Exception " + text
)
266 except js_e
.ValidationError
as e
:
268 if len(e
.path
)>0: text
=" at '" + ":".join(map(str, e
.path
))+"'"
269 self
.logger
.error("load_hostinfo format Exception: %s %s", text
, str(e
))
270 except Exception as e
:
272 self
.logger
.error("load_hostinfo Exception: " + text
)
274 #not loaded, insert a default data
277 def save_localinfo(self
, tries
=3):
279 self
.localinfo_dirty
= False
286 command
= 'cat > {}/.openvim.yaml'.format(self
.image_path
)
287 in_stream
= self
.run_command(command
, keep_session
=True)
288 yaml
.safe_dump(self
.localinfo
, in_stream
, explicit_start
=True, indent
=4, default_flow_style
=False,
289 tags
=False, encoding
='utf-8', allow_unicode
=True)
290 result
= self
.run_command(command
, keep_session
=False) # to end session
292 self
.localinfo_dirty
= False
295 except RunCommandException
as e
:
296 self
.logger
.error("save_localinfo ssh Exception: " + str(e
))
297 except host_thread
.lvirt_module
.libvirtError
as e
:
298 text
= e
.get_error_message()
299 self
.logger
.error("save_localinfo libvirt Exception: " + text
)
300 except yaml
.YAMLError
as exc
:
302 if hasattr(exc
, 'problem_mark'):
303 mark
= exc
.problem_mark
304 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
305 self
.logger
.error("save_localinfo yaml format Exception " + text
)
306 except Exception as e
:
308 self
.logger
.error("save_localinfo Exception: " + text
)
310 def load_servers_from_db(self
):
311 self
.db_lock
.acquire()
312 r
,c
= self
.db
.get_table(SELECT
=('uuid','status', 'image_id'), FROM
='instances', WHERE
={'host_id': self
.host_id
})
313 self
.db_lock
.release()
315 self
.server_status
= {}
317 self
.logger
.error("Error getting data from database: " + c
)
320 self
.server_status
[ server
['uuid'] ] = server
['status']
322 #convert from old version to new one
323 if 'inc_files' in self
.localinfo
and server
['uuid'] in self
.localinfo
['inc_files']:
324 server_files_dict
= {'source file': self
.localinfo
['inc_files'][ server
['uuid'] ] [0], 'file format':'raw' }
325 if server_files_dict
['source file'][-5:] == 'qcow2':
326 server_files_dict
['file format'] = 'qcow2'
328 self
.localinfo
['server_files'][ server
['uuid'] ] = { server
['image_id'] : server_files_dict
}
329 if 'inc_files' in self
.localinfo
:
330 del self
.localinfo
['inc_files']
331 self
.localinfo_dirty
= True
333 def delete_unused_files(self
):
334 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
335 Deletes unused entries at self.loacalinfo and the corresponding local files.
336 The only reason for this mismatch is the manual deletion of instances (VM) at database
340 for uuid
,images
in self
.localinfo
['server_files'].items():
341 if uuid
not in self
.server_status
:
342 for localfile
in images
.values():
344 self
.logger
.debug("deleting file '%s' of unused server '%s'", localfile
['source file'], uuid
)
345 self
.delete_file(localfile
['source file'])
346 except RunCommandException
as e
:
347 self
.logger
.error("Exception deleting file '%s': %s", localfile
['source file'], str(e
))
348 del self
.localinfo
['server_files'][uuid
]
349 self
.localinfo_dirty
= True
351 def insert_task(self
, task
, *aditional
):
353 self
.queueLock
.acquire()
354 task
= self
.taskQueue
.put( (task
,) + aditional
, timeout
=5)
355 self
.queueLock
.release()
358 return -1, "timeout inserting a task over host " + self
.name
362 self
.load_localinfo()
364 self
.load_servers_from_db()
365 self
.delete_unused_files()
368 self
.queueLock
.acquire()
369 if not self
.taskQueue
.empty():
370 task
= self
.taskQueue
.get()
373 self
.queueLock
.release()
377 if self
.localinfo_dirty
:
378 self
.save_localinfo()
379 elif self
.next_update_server_status
< now
:
380 self
.update_servers_status()
381 self
.next_update_server_status
= now
+ 5
382 elif len(self
.pending_terminate_server
)>0 and self
.pending_terminate_server
[0][0]<now
:
383 self
.server_forceoff()
388 if task
[0] == 'instance':
389 self
.logger
.debug("processing task instance " + str(task
[1]['action']))
393 r
= self
.action_on_server(task
[1], retry
==2)
396 elif task
[0] == 'image':
398 elif task
[0] == 'exit':
399 self
.logger
.debug("processing task exit")
402 elif task
[0] == 'reload':
403 self
.logger
.debug("processing task reload terminating and relaunching")
406 elif task
[0] == 'edit-iface':
407 self
.logger
.debug("processing task edit-iface port={}, old_net={}, new_net={}".format(
408 task
[1], task
[2], task
[3]))
409 self
.edit_iface(task
[1], task
[2], task
[3])
410 elif task
[0] == 'restore-iface':
411 self
.logger
.debug("processing task restore-iface={} mac={}".format(task
[1], task
[2]))
412 self
.restore_iface(task
[1], task
[2])
413 elif task
[0] == 'new-ovsbridge':
414 self
.logger
.debug("Creating compute OVS bridge")
415 self
.create_ovs_bridge()
416 elif task
[0] == 'new-vxlan':
417 self
.logger
.debug("Creating vxlan tunnel='{}', remote ip='{}'".format(task
[1], task
[2]))
418 self
.create_ovs_vxlan_tunnel(task
[1], task
[2])
419 elif task
[0] == 'del-ovsbridge':
420 self
.logger
.debug("Deleting OVS bridge")
421 self
.delete_ovs_bridge()
422 elif task
[0] == 'del-vxlan':
423 self
.logger
.debug("Deleting vxlan {} tunnel".format(task
[1]))
424 self
.delete_ovs_vxlan_tunnel(task
[1])
425 elif task
[0] == 'create-ovs-bridge-port':
426 self
.logger
.debug("Adding port ovim-{} to OVS bridge".format(task
[1]))
427 self
.create_ovs_bridge_port(task
[1])
428 elif task
[0] == 'del-ovs-port':
429 self
.logger
.debug("Delete bridge attached to ovs port vlan {} net {}".format(task
[1], task
[2]))
430 self
.delete_bridge_port_attached_to_ovs(task
[1], task
[2])
432 self
.logger
.debug("unknown task " + str(task
))
434 except Exception as e
:
435 self
.logger
.critical("Unexpected exception at run: " + str(e
), exc_info
=True)
437 def server_forceoff(self
, wait_until_finished
=False):
438 while len(self
.pending_terminate_server
)>0:
440 if self
.pending_terminate_server
[0][0]>now
:
441 if wait_until_finished
:
446 req
={'uuid':self
.pending_terminate_server
[0][1],
447 'action':{'terminate':'force'},
450 self
.action_on_server(req
)
451 self
.pending_terminate_server
.pop(0)
455 self
.server_forceoff(True)
456 if self
.localinfo_dirty
:
457 self
.save_localinfo()
459 self
.ssh_conn
.close()
460 except Exception as e
:
462 self
.logger
.error("terminate Exception: " + text
)
463 self
.logger
.debug("exit from host_thread")
465 def get_local_iface_name(self
, generic_name
):
466 if self
.hostinfo
!= None and "iface_names" in self
.hostinfo
and generic_name
in self
.hostinfo
["iface_names"]:
467 return self
.hostinfo
["iface_names"][generic_name
]
470 def create_xml_server(self
, server
, dev_list
, server_metadata
={}):
471 """Function that implements the generation of the VM XML definition.
472 Additional devices are in dev_list list
473 The main disk is upon dev_list[0]"""
475 #get if operating system is Windows
477 os_type
= server_metadata
.get('os_type', None)
478 if os_type
== None and 'metadata' in dev_list
[0]:
479 os_type
= dev_list
[0]['metadata'].get('os_type', None)
480 if os_type
!= None and os_type
.lower() == "windows":
482 #get type of hard disk bus
483 bus_ide
= True if windows_os
else False
484 bus
= server_metadata
.get('bus', None)
485 if bus
== None and 'metadata' in dev_list
[0]:
486 bus
= dev_list
[0]['metadata'].get('bus', None)
488 bus_ide
= True if bus
=='ide' else False
492 text
= "<domain type='kvm'>"
494 topo
= server_metadata
.get('topology', None)
495 if topo
== None and 'metadata' in dev_list
[0]:
496 topo
= dev_list
[0]['metadata'].get('topology', None)
498 name
= server
.get('name', '')[:28] + "_" + server
['uuid'][:28] #qemu impose a length limit of 59 chars or not start. Using 58
499 text
+= self
.inc_tab() + "<name>" + name
+ "</name>"
501 text
+= self
.tab() + "<uuid>" + server
['uuid'] + "</uuid>"
504 if 'extended' in server
and server
['extended']!=None and 'numas' in server
['extended']:
505 numa
= server
['extended']['numas'][0]
508 memory
= int(numa
.get('memory',0))*1024*1024 #in KiB
510 memory
= int(server
['ram'])*1024;
512 if not self
.develop_mode
:
515 return -1, 'No memory assigned to instance'
517 text
+= self
.tab() + "<memory unit='KiB'>" +memory
+"</memory>"
518 text
+= self
.tab() + "<currentMemory unit='KiB'>" +memory
+ "</currentMemory>"
520 text
+= self
.tab()+'<memoryBacking>'+ \
521 self
.inc_tab() + '<hugepages/>'+ \
522 self
.dec_tab()+ '</memoryBacking>'
525 use_cpu_pinning
=False
526 vcpus
= int(server
.get("vcpus",0))
528 if 'cores-source' in numa
:
530 for index
in range(0, len(numa
['cores-source'])):
531 cpu_pinning
.append( [ numa
['cores-id'][index
], numa
['cores-source'][index
] ] )
533 if 'threads-source' in numa
:
535 for index
in range(0, len(numa
['threads-source'])):
536 cpu_pinning
.append( [ numa
['threads-id'][index
], numa
['threads-source'][index
] ] )
538 if 'paired-threads-source' in numa
:
540 for index
in range(0, len(numa
['paired-threads-source'])):
541 cpu_pinning
.append( [numa
['paired-threads-id'][index
][0], numa
['paired-threads-source'][index
][0] ] )
542 cpu_pinning
.append( [numa
['paired-threads-id'][index
][1], numa
['paired-threads-source'][index
][1] ] )
545 if use_cpu_pinning
and not self
.develop_mode
:
546 text
+= self
.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning
)) +"</vcpu>" + \
547 self
.tab()+'<cputune>'
549 for i
in range(0, len(cpu_pinning
)):
550 text
+= self
.tab() + "<vcpupin vcpu='" +str(cpu_pinning
[i
][0])+ "' cpuset='" +str(cpu_pinning
[i
][1]) +"'/>"
551 text
+= self
.dec_tab()+'</cputune>'+ \
552 self
.tab() + '<numatune>' +\
553 self
.inc_tab() + "<memory mode='strict' nodeset='" +str(numa
['source'])+ "'/>" +\
554 self
.dec_tab() + '</numatune>'
557 return -1, "Instance without number of cpus"
558 text
+= self
.tab()+"<vcpu>" + str(vcpus
) + "</vcpu>"
563 if dev
['type']=='cdrom' :
566 text
+= self
.tab()+ '<os>' + \
567 self
.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
569 text
+= self
.tab() + "<boot dev='cdrom'/>"
570 text
+= self
.tab() + "<boot dev='hd'/>" + \
571 self
.dec_tab()+'</os>'
573 text
+= self
.tab()+'<features>'+\
574 self
.inc_tab()+'<acpi/>' +\
575 self
.tab()+'<apic/>' +\
576 self
.tab()+'<pae/>'+ \
577 self
.dec_tab() +'</features>'
578 if topo
== "oneSocket:hyperthreading":
580 return -1, 'Cannot expose hyperthreading with an odd number of vcpus'
581 text
+= self
.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>" % (vcpus
/2)
582 elif windows_os
or topo
== "oneSocket":
583 text
+= self
.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>" % vcpus
585 text
+= self
.tab() + "<cpu mode='host-model'></cpu>"
586 text
+= self
.tab() + "<clock offset='utc'/>" +\
587 self
.tab() + "<on_poweroff>preserve</on_poweroff>" + \
588 self
.tab() + "<on_reboot>restart</on_reboot>" + \
589 self
.tab() + "<on_crash>restart</on_crash>"
590 text
+= self
.tab() + "<devices>" + \
591 self
.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
592 self
.tab() + "<serial type='pty'>" +\
593 self
.inc_tab() + "<target port='0'/>" + \
594 self
.dec_tab() + "</serial>" +\
595 self
.tab() + "<console type='pty'>" + \
596 self
.inc_tab()+ "<target type='serial' port='0'/>" + \
597 self
.dec_tab()+'</console>'
599 text
+= self
.tab() + "<controller type='usb' index='0'/>" + \
600 self
.tab() + "<controller type='ide' index='0'/>" + \
601 self
.tab() + "<input type='mouse' bus='ps2'/>" + \
602 self
.tab() + "<sound model='ich6'/>" + \
603 self
.tab() + "<video>" + \
604 self
.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
605 self
.dec_tab() + "</video>" + \
606 self
.tab() + "<memballoon model='virtio'/>" + \
607 self
.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
609 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
610 #> self.dec_tab()+'</hostdev>\n' +\
611 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
613 text
+= self
.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
615 #If image contains 'GRAPH' include graphics
616 #if 'GRAPH' in image:
617 text
+= self
.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
618 self
.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
619 self
.dec_tab() + "</graphics>"
623 bus_ide_dev
= bus_ide
624 if dev
['type']=='cdrom' or dev
['type']=='disk':
625 if dev
['type']=='cdrom':
627 text
+= self
.tab() + "<disk type='file' device='"+dev
['type']+"'>"
628 if 'file format' in dev
:
629 text
+= self
.inc_tab() + "<driver name='qemu' type='" +dev
['file format']+ "' cache='writethrough'/>"
630 if 'source file' in dev
:
631 text
+= self
.tab() + "<source file='" +dev
['source file']+ "'/>"
632 #elif v['type'] == 'block':
633 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
635 # return -1, 'Unknown disk type ' + v['type']
636 vpci
= dev
.get('vpci',None)
637 if vpci
== None and 'metadata' in dev
:
638 vpci
= dev
['metadata'].get('vpci',None)
639 text
+= self
.pci2xml(vpci
)
642 text
+= self
.tab() + "<target dev='hd" +vd_index
+ "' bus='ide'/>" #TODO allows several type of disks
644 text
+= self
.tab() + "<target dev='vd" +vd_index
+ "' bus='virtio'/>"
645 text
+= self
.dec_tab() + '</disk>'
646 vd_index
= chr(ord(vd_index
)+1)
647 elif dev
['type']=='xml':
648 dev_text
= dev
['xml']
650 dev_text
= dev_text
.replace('__vpci__', dev
['vpci'])
651 if 'source file' in dev
:
652 dev_text
= dev_text
.replace('__file__', dev
['source file'])
653 if 'file format' in dev
:
654 dev_text
= dev_text
.replace('__format__', dev
['source file'])
655 if '__dev__' in dev_text
:
656 dev_text
= dev_text
.replace('__dev__', vd_index
)
657 vd_index
= chr(ord(vd_index
)+1)
660 return -1, 'Unknown device type ' + dev
['type']
663 bridge_interfaces
= server
.get('networks', [])
664 for v
in bridge_interfaces
:
666 self
.db_lock
.acquire()
667 result
, content
= self
.db
.get_table(FROM
='nets', SELECT
=('provider',),WHERE
={'uuid':v
['net_id']} )
668 self
.db_lock
.release()
670 self
.logger
.error("create_xml_server ERROR %d getting nets %s", result
, content
)
672 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
673 #I know it is not secure
674 #for v in sorted(desc['network interfaces'].itervalues()):
675 model
= v
.get("model", None)
676 if content
[0]['provider']=='default':
677 text
+= self
.tab() + "<interface type='network'>" + \
678 self
.inc_tab() + "<source network='" +content
[0]['provider']+ "'/>"
679 elif content
[0]['provider'][0:7]=='macvtap':
680 text
+= self
.tab()+"<interface type='direct'>" + \
681 self
.inc_tab() + "<source dev='" + self
.get_local_iface_name(content
[0]['provider'][8:]) + "' mode='bridge'/>" + \
682 self
.tab() + "<target dev='macvtap0'/>"
684 text
+= self
.tab() + "<alias name='net" + str(net_nb
) + "'/>"
687 elif content
[0]['provider'][0:6]=='bridge':
688 text
+= self
.tab() + "<interface type='bridge'>" + \
689 self
.inc_tab()+"<source bridge='" +self
.get_local_iface_name(content
[0]['provider'][7:])+ "'/>"
691 text
+= self
.tab() + "<target dev='vnet" + str(net_nb
)+ "'/>" +\
692 self
.tab() + "<alias name='net" + str(net_nb
)+ "'/>"
695 elif content
[0]['provider'][0:3] == "OVS":
696 vlan
= content
[0]['provider'].replace('OVS:', '')
697 text
+= self
.tab() + "<interface type='bridge'>" + \
698 self
.inc_tab() + "<source bridge='ovim-" + str(vlan
) + "'/>"
700 return -1, 'Unknown Bridge net provider ' + content
[0]['provider']
702 text
+= self
.tab() + "<model type='" +model
+ "'/>"
703 if v
.get('mac_address', None) != None:
704 text
+= self
.tab() +"<mac address='" +v
['mac_address']+ "'/>"
705 text
+= self
.pci2xml(v
.get('vpci',None))
706 text
+= self
.dec_tab()+'</interface>'
710 interfaces
= numa
.get('interfaces', [])
714 if self
.develop_mode
: #map these interfaces to bridges
715 text
+= self
.tab() + "<interface type='bridge'>" + \
716 self
.inc_tab()+"<source bridge='" +self
.develop_bridge_iface
+ "'/>"
718 text
+= self
.tab() + "<target dev='vnet" + str(net_nb
)+ "'/>" +\
719 self
.tab() + "<alias name='net" + str(net_nb
)+ "'/>"
721 text
+= self
.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
722 if v
.get('mac_address', None) != None:
723 text
+= self
.tab() +"<mac address='" +v
['mac_address']+ "'/>"
724 text
+= self
.pci2xml(v
.get('vpci',None))
725 text
+= self
.dec_tab()+'</interface>'
728 if v
['dedicated'] == 'yes': #passthrought
729 text
+= self
.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
730 self
.inc_tab() + "<source>"
732 text
+= self
.pci2xml(v
['source'])
733 text
+= self
.dec_tab()+'</source>'
734 text
+= self
.pci2xml(v
.get('vpci',None))
736 text
+= self
.tab() + "<alias name='hostdev" + str(net_nb
) + "'/>"
737 text
+= self
.dec_tab()+'</hostdev>'
739 else: #sriov_interfaces
740 #skip not connected interfaces
741 if v
.get("net_id") == None:
743 text
+= self
.tab() + "<interface type='hostdev' managed='yes'>"
745 if v
.get('mac_address', None) != None:
746 text
+= self
.tab() + "<mac address='" +v
['mac_address']+ "'/>"
747 text
+= self
.tab()+'<source>'
749 text
+= self
.pci2xml(v
['source'])
750 text
+= self
.dec_tab()+'</source>'
751 if v
.get('vlan',None) != None:
752 text
+= self
.tab() + "<vlan> <tag id='" + str(v
['vlan']) + "'/> </vlan>"
753 text
+= self
.pci2xml(v
.get('vpci',None))
755 text
+= self
.tab() + "<alias name='hostdev" + str(net_nb
) + "'/>"
756 text
+= self
.dec_tab()+'</interface>'
759 text
+= self
.dec_tab()+'</devices>'+\
760 self
.dec_tab()+'</domain>'
763 def pci2xml(self
, pci
):
764 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
765 alows an empty pci text'''
768 first_part
= pci
.split(':')
769 second_part
= first_part
[2].split('.')
770 return self
.tab() + "<address type='pci' domain='0x" + first_part
[0] + \
771 "' bus='0x" + first_part
[1] + "' slot='0x" + second_part
[0] + \
772 "' function='0x" + second_part
[1] + "'/>"
775 """Return indentation according to xml_level"""
776 return "\n" + (' '*self
.xml_level
)
779 """Increment and return indentation according to xml_level"""
784 """Decrement and return indentation according to xml_level"""
788 def create_ovs_bridge(self
):
790 Create a bridge in compute OVS to allocate VMs
791 :return: True if success
793 if self
.test
or not self
.connectivity
:
797 self
.run_command('sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true')
800 except RunCommandException
as e
:
801 self
.logger
.error("create_ovs_bridge ssh Exception: " + str(e
))
802 if "SSH session not active" in str(e
):
806 def delete_port_to_ovs_bridge(self
, vlan
, net_uuid
):
808 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
809 :param vlan: vlan port id
810 :param net_uuid: network id
814 if self
.test
or not self
.connectivity
:
817 port_name
= 'ovim-{}'.format(str(vlan
))
818 command
= 'sudo ovs-vsctl del-port br-int {}'.format(port_name
)
819 self
.run_command(command
)
821 except RunCommandException
as e
:
822 self
.logger
.error("delete_port_to_ovs_bridge ssh Exception: {}".format(str(e
)))
825 def delete_dhcp_server(self
, vlan
, net_uuid
, dhcp_path
):
827 Delete dhcp server process lining in namespace
828 :param vlan: segmentation id
829 :param net_uuid: network uuid
830 :param dhcp_path: conf fiel path that live in namespace side
833 if self
.test
or not self
.connectivity
:
835 if not self
.is_dhcp_port_free(vlan
, net_uuid
):
838 dhcp_namespace
= '{}-dnsmasq'.format(str(vlan
))
839 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
)
840 pid_file
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
842 command
= 'sudo ip netns exec {} cat {}'.format(dhcp_namespace
, pid_file
)
843 content
= self
.run_command(command
, ignore_exit_status
=True)
844 dns_pid
= content
.replace('\n', '')
845 command
= 'sudo ip netns exec {} kill -9 {}'.format(dhcp_namespace
, dns_pid
)
846 self
.run_command(command
, ignore_exit_status
=True)
848 except RunCommandException
as e
:
849 self
.logger
.error("delete_dhcp_server ssh Exception: " + str(e
))
852 def is_dhcp_port_free(self
, host_id
, net_uuid
):
854 Check if any port attached to the a net in a vxlan mesh across computes nodes
855 :param host_id: host id
856 :param net_uuid: network id
857 :return: True if is not free
859 self
.db_lock
.acquire()
860 result
, content
= self
.db
.get_table(
862 WHERE
={'type': 'instance:ovs', 'net_id': net_uuid
}
864 self
.db_lock
.release()
871 def is_port_free(self
, host_id
, net_uuid
):
873 Check if there not ovs ports of a network in a compute host.
874 :param host_id: host id
875 :param net_uuid: network id
876 :return: True if is not free
879 self
.db_lock
.acquire()
880 result
, content
= self
.db
.get_table(
881 FROM
='ports as p join instances as i on p.instance_id=i.uuid',
882 WHERE
={"i.host_id": self
.host_id
, 'p.type': 'instance:ovs', 'p.net_id': net_uuid
}
884 self
.db_lock
.release()
891 def add_port_to_ovs_bridge(self
, vlan
):
893 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
894 :param vlan: vlan port id
895 :return: True if success
901 port_name
= 'ovim-{}'.format(str(vlan
))
902 command
= 'sudo ovs-vsctl add-port br-int {} tag={}'.format(port_name
, str(vlan
))
903 self
.run_command(command
)
905 except RunCommandException
as e
:
906 self
.logger
.error("add_port_to_ovs_bridge Exception: " + str(e
))
909 def delete_dhcp_port(self
, vlan
, net_uuid
, dhcp_path
):
911 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
912 :param vlan: segmentation id
913 :param net_uuid: network id
914 :return: True if success
920 if not self
.is_dhcp_port_free(vlan
, net_uuid
):
922 self
.delete_dhcp_interfaces(vlan
, dhcp_path
)
925 def delete_bridge_port_attached_to_ovs(self
, vlan
, net_uuid
):
927 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
930 :return: True if success
935 if not self
.is_port_free(vlan
, net_uuid
):
937 self
.delete_port_to_ovs_bridge(vlan
, net_uuid
)
938 self
.delete_linux_bridge(vlan
)
941 def delete_linux_bridge(self
, vlan
):
943 Delete a linux bridge in a scpecific compute.
944 :param vlan: vlan port id
945 :return: True if success
951 port_name
= 'ovim-{}'.format(str(vlan
))
952 command
= 'sudo ip link set dev ovim-{} down'.format(str(vlan
))
953 self
.run_command(command
)
955 command
= 'sudo ip link delete {} && sudo brctl delbr {}'.format(port_name
, port_name
)
956 self
.run_command(command
)
958 except RunCommandException
as e
:
959 self
.logger
.error("delete_linux_bridge Exception: {}".format(str(e
)))
962 def remove_link_bridge_to_ovs(self
, vlan
, link
):
964 Delete a linux provider net connection to tenatn net
965 :param vlan: vlan port id
966 :param link: link name
967 :return: True if success
973 br_tap_name
= '{}-vethBO'.format(str(vlan
))
974 br_ovs_name
= '{}-vethOB'.format(str(vlan
))
976 # Delete ovs veth pair
977 command
= 'sudo ip link set dev {} down'.format(br_ovs_name
)
978 self
.run_command(command
, ignore_exit_status
=True)
980 command
= 'sudo ovs-vsctl del-port br-int {}'.format(br_ovs_name
)
981 self
.run_command(command
)
983 # Delete br veth pair
984 command
= 'sudo ip link set dev {} down'.format(br_tap_name
)
985 self
.run_command(command
, ignore_exit_status
=True)
987 # Delete br veth interface form bridge
988 command
= 'sudo brctl delif {} {}'.format(link
, br_tap_name
)
989 self
.run_command(command
)
991 # Delete br veth pair
992 command
= 'sudo ip link set dev {} down'.format(link
)
993 self
.run_command(command
, ignore_exit_status
=True)
996 except RunCommandException
as e
:
997 self
.logger
.error("remove_link_bridge_to_ovs Exception: {}".format(str(e
)))
1000 def create_ovs_bridge_port(self
, vlan
):
1002 Generate a linux bridge and attache the port to a OVS bridge
1003 :param vlan: vlan port id
1008 self
.create_linux_bridge(vlan
)
1009 self
.add_port_to_ovs_bridge(vlan
)
1011 def create_linux_bridge(self
, vlan
):
1013 Create a linux bridge with STP active
1014 :param vlan: netowrk vlan id
1021 port_name
= 'ovim-{}'.format(str(vlan
))
1022 command
= 'sudo brctl show | grep {}'.format(port_name
)
1023 result
= self
.run_command(command
, ignore_exit_status
=True)
1025 command
= 'sudo brctl addbr {}'.format(port_name
)
1026 self
.run_command(command
)
1028 command
= 'sudo brctl stp {} on'.format(port_name
)
1029 self
.run_command(command
)
1031 command
= 'sudo ip link set dev {} up'.format(port_name
)
1032 self
.run_command(command
)
1034 except RunCommandException
as e
:
1035 self
.logger
.error("create_linux_bridge ssh Exception: {}".format(str(e
)))
1038 def set_mac_dhcp_server(self
, ip
, mac
, vlan
, netmask
, first_ip
, dhcp_path
):
1040 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
1041 :param ip: IP address asigned to a VM
1042 :param mac: VM vnic mac to be macthed with the IP received
1043 :param vlan: Segmentation id
1044 :param netmask: netmask value
1045 :param path: dhcp conf file path that live in namespace side
1046 :return: True if success
1052 dhcp_namespace
= '{}-dnsmasq'.format(str(vlan
))
1053 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1054 dhcp_hostsdir
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1055 ns_interface
= '{}-vethDO'.format(str(vlan
))
1060 command
= 'sudo ip netns exec {} cat /sys/class/net/{}/address'.format(dhcp_namespace
, ns_interface
)
1061 iface_listen_mac
= self
.run_command(command
, ignore_exit_status
=True)
1063 if iface_listen_mac
> 0:
1064 command
= 'sudo ip netns exec {} cat {} | grep -i {}'.format(dhcp_namespace
,
1067 content
= self
.run_command(command
, ignore_exit_status
=True)
1069 ip_data
= iface_listen_mac
.upper().replace('\n', '') + ',' + first_ip
1070 dhcp_hostsdir
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1072 command
= 'sudo ip netns exec {} sudo bash -ec "echo {} >> {}"'.format(dhcp_namespace
,
1075 self
.run_command(command
)
1077 ip_data
= mac
.upper() + ',' + ip
1078 command
= 'sudo ip netns exec {} sudo bash -ec "echo {} >> {}"'.format(dhcp_namespace
,
1081 self
.run_command(command
, ignore_exit_status
=False)
1086 except RunCommandException
as e
:
1087 self
.logger
.error("set_mac_dhcp_server ssh Exception: " + str(e
))
1090 def delete_mac_dhcp_server(self
, ip
, mac
, vlan
, dhcp_path
):
1092 Delete into dhcp conf file the ip assigned to a specific MAC address
1094 :param ip: IP address asigned to a VM
1095 :param mac: VM vnic mac to be macthed with the IP received
1096 :param vlan: Segmentation id
1097 :param dhcp_path: dhcp conf file path that live in namespace side
1104 dhcp_namespace
= str(vlan
) + '-dnsmasq'
1105 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1106 dhcp_hostsdir
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1111 ip_data
= mac
.upper() + ',' + ip
1113 command
= 'sudo ip netns exec ' + dhcp_namespace
+ ' sudo sed -i \'/' + ip_data
+ '/d\' ' + dhcp_hostsdir
1114 self
.run_command(command
)
1117 except RunCommandException
as e
:
1118 self
.logger
.error("delete_mac_dhcp_server Exception: " + str(e
))
1121 def launch_dhcp_server(self
, vlan
, ip_range
, netmask
, dhcp_path
, gateway
, dns_list
=None, routes
=None):
1123 Generate a linux bridge and attache the port to a OVS bridge
1125 :param vlan: Segmentation id
1126 :param ip_range: IP dhcp range
1127 :param netmask: network netmask
1128 :param dhcp_path: dhcp conf file path that live in namespace side
1129 :param gateway: Gateway address for dhcp net
1130 :param dns_list: dns list for dhcp server
1131 :param routes: routes list for dhcp server
1132 :return: True if success
1138 ns_interface
= str(vlan
) + '-vethDO'
1139 dhcp_namespace
= str(vlan
) + '-dnsmasq'
1140 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
, '')
1141 leases_path
= os
.path
.join(dhcp_path
, "dnsmasq.leases")
1142 pid_file
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
1144 dhcp_range
= ip_range
[0] + ',' + ip_range
[1] + ',' + netmask
1146 command
= 'sudo ip netns exec {} mkdir -p {}'.format(dhcp_namespace
, dhcp_path
)
1147 self
.run_command(command
)
1149 # check if dnsmasq process is running
1150 dnsmasq_is_runing
= False
1151 pid_path
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
1152 command
= 'sudo ip netns exec ' + dhcp_namespace
+ ' ls ' + pid_path
1153 content
= self
.run_command(command
, ignore_exit_status
=True)
1155 # check if pid is runing
1157 pid_path
= content
.replace('\n', '')
1158 command
= "ps aux | awk '{print $2 }' | grep {}" + pid_path
1159 dnsmasq_is_runing
= self
.run_command(command
, ignore_exit_status
=True)
1161 gateway_option
= ' --dhcp-option=3,' + gateway
1163 dhcp_route_option
= ''
1165 dhcp_route_option
= ' --dhcp-option=121'
1166 for key
, value
in routes
.iteritems():
1167 if 'default' == key
:
1168 gateway_option
= ' --dhcp-option=3,' + value
1170 dhcp_route_option
+= ',' + key
+ ',' + value
1173 dns_data
= ' --dhcp-option=6'
1174 for dns
in dns_list
:
1175 dns_data
+= ',' + dns
1177 if not dnsmasq_is_runing
:
1178 command
= 'sudo ip netns exec ' + dhcp_namespace
+ ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1179 '--interface=' + ns_interface
+ \
1180 ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path
+ \
1181 ' --dhcp-range ' + dhcp_range
+ \
1182 ' --pid-file=' + pid_file
+ \
1183 ' --dhcp-leasefile=' + leases_path
+ \
1184 ' --listen-address ' + ip_range
[0] + \
1186 dhcp_route_option
+ \
1189 self
.run_command(command
)
1191 except RunCommandException
as e
:
1192 self
.logger
.error("launch_dhcp_server ssh Exception: " + str(e
))
1195 def delete_dhcp_interfaces(self
, vlan
, dhcp_path
):
1197 Delete a linux dnsmasq bridge and namespace
1198 :param vlan: netowrk vlan id
1205 br_veth_name
='{}-vethDO'.format(str(vlan
))
1206 ovs_veth_name
= '{}-vethOD'.format(str(vlan
))
1207 dhcp_namespace
= '{}-dnsmasq'.format(str(vlan
))
1209 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1210 command
= 'sudo ovs-vsctl del-port br-int {}'.format(ovs_veth_name
)
1211 self
.run_command(command
, ignore_exit_status
=True) # to end session
1213 command
= 'sudo ip link set dev {} down'.format(ovs_veth_name
)
1214 self
.run_command(command
, ignore_exit_status
=True) # to end session
1216 command
= 'sudo ip link delete {} '.format(ovs_veth_name
)
1217 self
.run_command(command
, ignore_exit_status
=True)
1219 command
= 'sudo ip netns exec {} ip link set dev {} down'.format(dhcp_namespace
, br_veth_name
)
1220 self
.run_command(command
, ignore_exit_status
=True)
1222 command
= 'sudo rm -rf {}'.format(dhcp_path
)
1223 self
.run_command(command
)
1225 command
= 'sudo ip netns del {}'.format(dhcp_namespace
)
1226 self
.run_command(command
)
1229 except RunCommandException
as e
:
1230 self
.logger
.error("delete_dhcp_interfaces ssh Exception: {}".format(str(e
)))
1233 def create_dhcp_interfaces(self
, vlan
, ip_listen_address
, netmask
):
1235 Create a linux bridge with STP active
1236 :param vlan: segmentation id
1237 :param ip_listen_address: Listen Ip address for the dhcp service, the tap interface living in namesapce side
1238 :param netmask: dhcp net CIDR
1239 :return: True if success
1244 ovs_veth_name
= '{}-vethOD'.format(str(vlan
))
1245 ns_veth
= '{}-vethDO'.format(str(vlan
))
1246 dhcp_namespace
= '{}-dnsmasq'.format(str(vlan
))
1248 command
= 'sudo ip netns add {}'.format(dhcp_namespace
)
1249 self
.run_command(command
)
1251 command
= 'sudo ip link add {} type veth peer name {}'.format(ns_veth
, ovs_veth_name
)
1252 self
.run_command(command
)
1254 command
= 'sudo ip link set {} netns {}'.format(ns_veth
, dhcp_namespace
)
1255 self
.run_command(command
)
1257 command
= 'sudo ip netns exec {} ip link set dev {} up'.format(dhcp_namespace
, ns_veth
)
1258 self
.run_command(command
)
1260 command
= 'sudo ovs-vsctl add-port br-int {} tag={}'.format(ovs_veth_name
, str(vlan
))
1261 self
.run_command(command
, ignore_exit_status
=True)
1263 command
= 'sudo ip link set dev {} up'.format(ovs_veth_name
)
1264 self
.run_command(command
)
1266 command
= 'sudo ip netns exec {} ip link set dev lo up'.format(dhcp_namespace
)
1267 self
.run_command(command
)
1269 command
= 'sudo ip netns exec {} ifconfig {} {} netmask {}'.format(dhcp_namespace
,
1273 self
.run_command(command
)
1275 except RunCommandException
as e
:
1276 self
.logger
.error("create_dhcp_interfaces ssh Exception: {}".format(str(e
)))
1279 def delete_qrouter_connection(self
, vlan
, link
):
1281 Delete qrouter Namesapce with all veth interfaces need it
1290 ns_qouter
= '{}-qrouter'.format(str(vlan
))
1291 qrouter_ovs_veth
= '{}-vethOQ'.format(str(vlan
))
1292 qrouter_ns_veth
= '{}-vethQO'.format(str(vlan
))
1293 qrouter_br_veth
= '{}-vethBQ'.format(str(vlan
))
1294 qrouter_ns_router_veth
= '{}-vethQB'.format(str(vlan
))
1296 command
= 'sudo ovs-vsctl del-port br-int {}'.format(qrouter_ovs_veth
)
1297 self
.run_command(command
, ignore_exit_status
=True)
1300 command
= 'sudo ip netns exec {} ip link set dev {} down'.format(ns_qouter
, qrouter_ns_veth
)
1301 self
.run_command(command
, ignore_exit_status
=True)
1303 command
= 'sudo ip netns exec {} ip link delete {} '.format(ns_qouter
, qrouter_ns_veth
)
1304 self
.run_command(command
, ignore_exit_status
=True)
1306 command
= 'sudo ip netns del ' + ns_qouter
1307 self
.run_command(command
)
1309 # down ovs veth interface
1310 command
= 'sudo ip link set dev {} down'.format(qrouter_br_veth
)
1311 self
.run_command(command
, ignore_exit_status
=True)
1313 # down br veth interface
1314 command
= 'sudo ip link set dev {} down'.format(qrouter_ovs_veth
)
1315 self
.run_command(command
, ignore_exit_status
=True)
1317 # delete veth interface
1318 command
= 'sudo ip link delete {} '.format(link
, qrouter_ovs_veth
)
1319 self
.run_command(command
, ignore_exit_status
=True)
1321 # down br veth interface
1322 command
= 'sudo ip link set dev {} down'.format(qrouter_ns_router_veth
)
1323 self
.run_command(command
, ignore_exit_status
=True)
1325 # delete veth interface
1326 command
= 'sudo ip link delete {} '.format(link
, qrouter_ns_router_veth
)
1327 self
.run_command(command
, ignore_exit_status
=True)
1329 # down br veth interface
1330 command
= 'sudo brctl delif {} {}'.format(link
, qrouter_br_veth
)
1331 self
.run_command(command
)
1335 except RunCommandException
as e
:
1336 self
.logger
.error("delete_qrouter_connection ssh Exception: {}".format(str(e
)))
1339 def create_qrouter_ovs_connection(self
, vlan
, gateway
, dhcp_cidr
):
1341 Create qrouter Namesapce with all veth interfaces need it between NS and OVS
1351 ns_qouter
= '{}-qrouter'.format(str(vlan
))
1352 qrouter_ovs_veth
='{}-vethOQ'.format(str(vlan
))
1353 qrouter_ns_veth
= '{}-vethQO'.format(str(vlan
))
1356 command
= 'sudo ip netns add {}'.format(ns_qouter
)
1357 self
.run_command(command
)
1360 command
= 'sudo ip link add {} type veth peer name {}'.format(qrouter_ns_veth
, qrouter_ovs_veth
)
1361 self
.run_command(command
, ignore_exit_status
=True)
1363 # up ovs veth interface
1364 command
= 'sudo ip link set dev {} up'.format(qrouter_ovs_veth
)
1365 self
.run_command(command
)
1367 # add ovs veth to ovs br-int
1368 command
= 'sudo ovs-vsctl add-port br-int {} tag={}'.format(qrouter_ovs_veth
, vlan
)
1369 self
.run_command(command
)
1372 command
= 'sudo ip link set {} netns {}'.format(qrouter_ns_veth
, ns_qouter
)
1373 self
.run_command(command
)
1376 command
= 'sudo ip netns exec {} ip link set dev lo up'.format(ns_qouter
)
1377 self
.run_command(command
)
1380 command
= 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter
, qrouter_ns_veth
)
1381 self
.run_command(command
)
1383 from netaddr
import IPNetwork
1384 ip_tools
= IPNetwork(dhcp_cidr
)
1385 cidr_len
= ip_tools
.prefixlen
1388 command
= 'sudo ip netns exec {} ip address add {}/{} dev {}'.format(ns_qouter
, gateway
, cidr_len
, qrouter_ns_veth
)
1389 self
.run_command(command
)
1393 except RunCommandException
as e
:
1394 self
.logger
.error("Create_dhcp_interfaces ssh Exception: {}".format(str(e
)))
1397 def add_ns_routes(self
, vlan
, routes
):
1409 ns_qouter
= '{}-qrouter'.format(str(vlan
))
1410 qrouter_ns_router_veth
= '{}-vethQB'.format(str(vlan
))
1412 for key
, value
in routes
.iteritems():
1414 if key
== 'default':
1415 command
= 'sudo ip netns exec {} ip route add {} via {} '.format(ns_qouter
, key
, value
)
1417 command
= 'sudo ip netns exec {} ip route add {} via {} dev {}'.format(ns_qouter
, key
, value
,
1418 qrouter_ns_router_veth
)
1420 self
.run_command(command
)
1424 except RunCommandException
as e
:
1425 self
.logger
.error("add_ns_routes, error adding routes to namesapce, {}".format(str(e
)))
1428 def create_qrouter_br_connection(self
, vlan
, cidr
, link
):
1430 Create veth interfaces between user bridge (link) and OVS
1440 ns_qouter
= '{}-qrouter'.format(str(vlan
))
1441 qrouter_ns_router_veth
= '{}-vethQB'.format(str(vlan
))
1442 qrouter_br_veth
= '{}-vethBQ'.format(str(vlan
))
1444 command
= 'sudo brctl show | grep {}'.format(link
['iface'])
1445 content
= self
.run_command(command
, ignore_exit_status
=True)
1449 command
= 'sudo ip link add {} type veth peer name {}'.format(qrouter_br_veth
, qrouter_ns_router_veth
)
1450 self
.run_command(command
)
1452 # up ovs veth interface
1453 command
= 'sudo ip link set dev {} up'.format(qrouter_br_veth
)
1454 self
.run_command(command
)
1457 command
= 'sudo ip link set {} netns {}'.format(qrouter_ns_router_veth
, ns_qouter
)
1458 self
.run_command(command
)
1461 command
= 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter
, qrouter_ns_router_veth
)
1462 self
.run_command(command
)
1464 command
= 'sudo ip netns exec {} ip address add {} dev {}'.format(ns_qouter
,
1466 qrouter_ns_router_veth
)
1467 self
.run_command(command
)
1470 command
= 'sudo brctl addif {} {}'.format(link
['iface'], qrouter_br_veth
)
1471 self
.run_command(command
)
1474 command
= 'sudo ip netns exec {} iptables -t nat -A POSTROUTING -o {} -s {} -d {} -j MASQUERADE' \
1475 .format(ns_qouter
, qrouter_ns_router_veth
, link
['nat'], cidr
)
1476 self
.run_command(command
)
1481 self
.logger
.error('create_qrouter_br_connection, Bridge {} given by user not exist'.format(qrouter_br_veth
))
1484 except RunCommandException
as e
:
1485 self
.logger
.error("Error creating qrouter, {}".format(str(e
)))
1488 def create_link_bridge_to_ovs(self
, vlan
, link
):
1490 Create interfaces to connect a linux bridge with tenant net
1491 :param vlan: segmentation id
1492 :return: True if success
1498 br_tap_name
= '{}-vethBO'.format(str(vlan
))
1499 br_ovs_name
= '{}-vethOB'.format(str(vlan
))
1501 # is a bridge or a interface
1502 command
= 'sudo brctl show | grep {}'.format(link
)
1503 content
= self
.run_command(command
, ignore_exit_status
=True)
1505 command
= 'sudo ip link add {} type veth peer name {}'.format(br_tap_name
, br_ovs_name
)
1506 self
.run_command(command
)
1508 command
= 'sudo ip link set dev {} up'.format(br_tap_name
)
1509 self
.run_command(command
)
1511 command
= 'sudo ip link set dev {} up'.format(br_ovs_name
)
1512 self
.run_command(command
)
1514 command
= 'sudo ovs-vsctl add-port br-int {} tag={}'.format(br_ovs_name
, str(vlan
))
1515 self
.run_command(command
)
1517 command
= 'sudo brctl addif ' + link
+ ' {}'.format(br_tap_name
)
1518 self
.run_command(command
)
1521 self
.logger
.error('Link is not present, please check {}'.format(link
))
1524 except RunCommandException
as e
:
1525 self
.logger
.error("create_link_bridge_to_ovs, Error creating link to ovs, {}".format(str(e
)))
1528 def create_ovs_vxlan_tunnel(self
, vxlan_interface
, remote_ip
):
1530 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1531 :param vxlan_interface: vlxan inteface name.
1532 :param remote_ip: tunnel endpoint remote compute ip.
1535 if self
.test
or not self
.connectivity
:
1537 if remote_ip
== 'localhost':
1539 return True # TODO: Cannot create a vxlan between localhost and localhost
1540 remote_ip
= self
.local_ip
1543 command
= 'sudo ovs-vsctl add-port br-int {} -- set Interface {} type=vxlan options:remote_ip={} ' \
1544 '-- set Port {} other_config:stp-path-cost=10'.format(vxlan_interface
,
1548 self
.run_command(command
)
1550 except RunCommandException
as e
:
1551 self
.logger
.error("create_ovs_vxlan_tunnel, error creating vxlan tunnel, {}".format(str(e
)))
1554 def delete_ovs_vxlan_tunnel(self
, vxlan_interface
):
1556 Delete a vlxan tunnel port from a OVS brdige.
1557 :param vxlan_interface: vlxan name to be delete it.
1558 :return: True if success.
1560 if self
.test
or not self
.connectivity
:
1563 command
= 'sudo ovs-vsctl del-port br-int {}'.format(vxlan_interface
)
1564 self
.run_command(command
)
1566 except RunCommandException
as e
:
1567 self
.logger
.error("delete_ovs_vxlan_tunnel, error deleting vxlan tunenl, {}".format(str(e
)))
1570 def delete_ovs_bridge(self
):
1572 Delete a OVS bridge from a compute.
1573 :return: True if success
1575 if self
.test
or not self
.connectivity
:
1578 command
= 'sudo ovs-vsctl del-br br-int'
1579 self
.run_command(command
)
1581 except RunCommandException
as e
:
1582 self
.logger
.error("delete_ovs_bridge ssh Exception: {}".format(str(e
)))
1585 def get_file_info(self
, path
):
1586 command
= 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1588 content
= self
.run_command(command
)
1589 return content
.split(" ") # (permission, 1, owner, group, size, date, file)
1590 except RunCommandException
as e
:
1591 return None # file does not exist
1593 def qemu_get_info(self
, path
):
1594 command
= 'qemu-img info ' + path
1595 content
= self
.run_command(command
)
1597 return yaml
.load(content
)
1598 except yaml
.YAMLError
as exc
:
1600 if hasattr(exc
, 'problem_mark'):
1601 mark
= exc
.problem_mark
1602 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
1603 self
.logger
.error("get_qemu_info yaml format Exception " + text
)
1604 raise RunCommandException("Error getting qemu_info yaml format" + text
)
1606 def qemu_change_backing(self
, inc_file
, new_backing_file
):
1607 command
= 'qemu-img rebase -u -b {} {}'.format(new_backing_file
, inc_file
)
1609 self
.run_command(command
)
1611 except RunCommandException
as e
:
1612 self
.logger
.error("qemu_change_backing error: " + str(e
))
1615 def qemu_create_empty_disk(self
, dev
):
1617 if not dev
and 'source' not in dev
and 'file format' not in dev
and 'image_size' not in dev
:
1618 self
.logger
.error("qemu_create_empty_disk error: missing image parameter")
1621 empty_disk_path
= dev
['source file']
1623 command
= 'qemu-img create -f qcow2 {} {}G'.format(empty_disk_path
, dev
['image_size'])
1625 self
.run_command(command
)
1627 except RunCommandException
as e
:
1628 self
.logger
.error("qemu_create_empty_disk error: " + str(e
))
1631 def get_notused_filename(self
, proposed_name
, suffix
=''):
1632 '''Look for a non existing file_name in the host
1633 proposed_name: proposed file name, includes path
1634 suffix: suffix to be added to the name, before the extention
1636 extension
= proposed_name
.rfind(".")
1637 slash
= proposed_name
.rfind("/")
1638 if extension
< 0 or extension
< slash
: # no extension
1639 extension
= len(proposed_name
)
1640 target_name
= proposed_name
[:extension
] + suffix
+ proposed_name
[extension
:]
1641 info
= self
.get_file_info(target_name
)
1646 while info
is not None:
1647 target_name
= proposed_name
[:extension
] + suffix
+ "-" + str(index
) + proposed_name
[extension
:]
1649 info
= self
.get_file_info(target_name
)
1652 def get_notused_path(self
, proposed_path
, suffix
=''):
1653 '''Look for a non existing path at database for images
1654 proposed_path: proposed file name, includes path
1655 suffix: suffix to be added to the name, before the extention
1657 extension
= proposed_path
.rfind(".")
1659 extension
= len(proposed_path
)
1661 target_path
= proposed_path
[:extension
] + suffix
+ proposed_path
[extension
:]
1664 r
,_
=self
.db
.get_table(FROM
="images",WHERE
={"path":target_path
})
1667 target_path
= proposed_path
[:extension
] + suffix
+ "-" + str(index
) + proposed_path
[extension
:]
1671 def delete_file(self
, file_name
):
1672 command
= 'rm -f ' + file_name
1673 self
.run_command(command
)
1675 def copy_file(self
, source
, destination
, perserve_time
=True):
1676 if source
[0:4]=="http":
1677 command
= "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1678 dst
=destination
, src
=source
, dst_result
=destination
+ ".result" )
1680 command
= 'cp --no-preserve=mode'
1682 command
+= ' --preserve=timestamps'
1683 command
+= " '{}' '{}'".format(source
, destination
)
1684 self
.run_command(command
)
1686 def copy_remote_file(self
, remote_file
, use_incremental
):
1687 ''' Copy a file from the repository to local folder and recursively
1688 copy the backing files in case the remote file is incremental
1689 Read and/or modified self.localinfo['files'] that contain the
1690 unmodified copies of images in the local path
1692 remote_file: path of remote file
1693 use_incremental: None (leave the decision to this function), True, False
1695 local_file: name of local file
1696 qemu_info: dict with quemu information of local file
1697 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1700 use_incremental_out
= use_incremental
1701 new_backing_file
= None
1703 file_from_local
= True
1705 #in case incremental use is not decided, take the decision depending on the image
1706 #avoid the use of incremental if this image is already incremental
1707 if remote_file
[0:4] == "http":
1708 file_from_local
= False
1710 qemu_remote_info
= self
.qemu_get_info(remote_file
)
1711 if use_incremental_out
==None:
1712 use_incremental_out
= not ( file_from_local
and 'backing file' in qemu_remote_info
)
1713 #copy recursivelly the backing files
1714 if file_from_local
and 'backing file' in qemu_remote_info
:
1715 new_backing_file
, _
, _
= self
.copy_remote_file(qemu_remote_info
['backing file'], True)
1717 #check if remote file is present locally
1718 if use_incremental_out
and remote_file
in self
.localinfo
['files']:
1719 local_file
= self
.localinfo
['files'][remote_file
]
1720 local_file_info
= self
.get_file_info(local_file
)
1722 remote_file_info
= self
.get_file_info(remote_file
)
1723 if local_file_info
== None:
1725 elif file_from_local
and (local_file_info
[4]!=remote_file_info
[4] or local_file_info
[5]!=remote_file_info
[5]):
1726 #local copy of file not valid because date or size are different.
1727 #TODO DELETE local file if this file is not used by any active virtual machine
1729 self
.delete_file(local_file
)
1730 del self
.localinfo
['files'][remote_file
]
1734 else: #check that the local file has the same backing file, or there are not backing at all
1735 qemu_info
= self
.qemu_get_info(local_file
)
1736 if new_backing_file
!= qemu_info
.get('backing file'):
1740 if local_file
== None: #copy the file
1741 img_name
= remote_file
.split('/') [-1]
1742 img_local
= self
.image_path
+ '/' + img_name
1743 local_file
= self
.get_notused_filename(img_local
)
1744 self
.copy_file(remote_file
, local_file
, use_incremental_out
)
1746 if use_incremental_out
:
1747 self
.localinfo
['files'][remote_file
] = local_file
1748 if new_backing_file
:
1749 self
.qemu_change_backing(local_file
, new_backing_file
)
1750 qemu_info
= self
.qemu_get_info(local_file
)
1752 return local_file
, qemu_info
, use_incremental_out
1754 def launch_server(self
, conn
, server
, rebuild
=False, domain
=None):
1756 time
.sleep(random
.randint(20,150)) #sleep random timeto be make it a bit more real
1759 server_id
= server
['uuid']
1760 paused
= server
.get('paused','no')
1762 if domain
!=None and rebuild
==False:
1764 #self.server_status[server_id] = 'ACTIVE'
1767 self
.db_lock
.acquire()
1768 result
, server_data
= self
.db
.get_instance(server_id
)
1769 self
.db_lock
.release()
1771 self
.logger
.error("launch_server ERROR getting server from DB %d %s", result
, server_data
)
1772 return result
, server_data
1774 #0: get image metadata
1775 server_metadata
= server
.get('metadata', {})
1776 use_incremental
= None
1778 if "use_incremental" in server_metadata
:
1779 use_incremental
= False if server_metadata
["use_incremental"] == "no" else True
1781 server_host_files
= self
.localinfo
['server_files'].get( server
['uuid'], {})
1783 #delete previous incremental files
1784 for file_
in server_host_files
.values():
1785 self
.delete_file(file_
['source file'] )
1786 server_host_files
={}
1788 #1: obtain aditional devices (disks)
1789 #Put as first device the main disk
1790 devices
= [ {"type":"disk", "image_id":server
['image_id'], "vpci":server_metadata
.get('vpci', None) } ]
1791 if 'extended' in server_data
and server_data
['extended']!=None and "devices" in server_data
['extended']:
1792 devices
+= server_data
['extended']['devices']
1795 image_id
= dev
.get('image_id')
1798 uuid_empty
= str(uuid
.uuid4())
1799 empty_path
= self
.empty_image_path
+ uuid_empty
+ '.qcow2' # local path for empty disk
1801 dev
['source file'] = empty_path
1802 dev
['file format'] = 'qcow2'
1803 self
.qemu_create_empty_disk(dev
)
1804 server_host_files
[uuid_empty
] = {'source file': empty_path
,
1805 'file format': dev
['file format']}
1809 self
.db_lock
.acquire()
1810 result
, content
= self
.db
.get_table(FROM
='images', SELECT
=('path', 'metadata'),
1811 WHERE
={'uuid': image_id
})
1812 self
.db_lock
.release()
1814 error_text
= "ERROR", result
, content
, "when getting image", dev
['image_id']
1815 self
.logger
.error("launch_server " + error_text
)
1816 return -1, error_text
1817 if content
[0]['metadata'] is not None:
1818 dev
['metadata'] = json
.loads(content
[0]['metadata'])
1820 dev
['metadata'] = {}
1822 if image_id
in server_host_files
:
1823 dev
['source file'] = server_host_files
[image_id
]['source file'] #local path
1824 dev
['file format'] = server_host_files
[image_id
]['file format'] # raw or qcow2
1827 #2: copy image to host
1829 remote_file
= content
[0]['path']
1831 remote_file
= empty_path
1832 use_incremental_image
= use_incremental
1833 if dev
['metadata'].get("use_incremental") == "no":
1834 use_incremental_image
= False
1835 local_file
, qemu_info
, use_incremental_image
= self
.copy_remote_file(remote_file
, use_incremental_image
)
1837 #create incremental image
1838 if use_incremental_image
:
1839 local_file_inc
= self
.get_notused_filename(local_file
, '.inc')
1840 command
= 'qemu-img create -f qcow2 {} -o backing_file={}'.format(local_file_inc
, local_file
)
1841 self
.run_command(command
)
1842 local_file
= local_file_inc
1843 qemu_info
= {'file format': 'qcow2'}
1845 server_host_files
[ dev
['image_id'] ] = {'source file': local_file
, 'file format': qemu_info
['file format']}
1847 dev
['source file'] = local_file
1848 dev
['file format'] = qemu_info
['file format']
1850 self
.localinfo
['server_files'][ server
['uuid'] ] = server_host_files
1851 self
.localinfo_dirty
= True
1854 result
, xml
= self
.create_xml_server(server_data
, devices
, server_metadata
) #local_file
1856 self
.logger
.error("create xml server error: " + xml
)
1858 self
.logger
.debug("create xml: " + xml
)
1859 atribute
= host_thread
.lvirt_module
.VIR_DOMAIN_START_PAUSED
if paused
== "yes" else 0
1861 if not rebuild
: #ensures that any pending destroying server is done
1862 self
.server_forceoff(True)
1863 #self.logger.debug("launching instance " + xml)
1864 conn
.createXML(xml
, atribute
)
1865 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
1869 except paramiko
.ssh_exception
.SSHException
as e
:
1871 self
.logger
.error("launch_server id='%s' ssh Exception: %s", server_id
, text
)
1872 if "SSH session not active" in text
:
1874 except host_thread
.lvirt_module
.libvirtError
as e
:
1875 text
= e
.get_error_message()
1876 self
.logger
.error("launch_server id='%s' libvirt Exception: %s", server_id
, text
)
1877 except Exception as e
:
1879 self
.logger
.error("launch_server id='%s' Exception: %s", server_id
, text
)
1882 def update_servers_status(self
):
1884 # VIR_DOMAIN_NOSTATE = 0
1885 # VIR_DOMAIN_RUNNING = 1
1886 # VIR_DOMAIN_BLOCKED = 2
1887 # VIR_DOMAIN_PAUSED = 3
1888 # VIR_DOMAIN_SHUTDOWN = 4
1889 # VIR_DOMAIN_SHUTOFF = 5
1890 # VIR_DOMAIN_CRASHED = 6
1891 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
1893 if self
.test
or len(self
.server_status
)==0:
1897 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
1898 domains
= conn
.listAllDomains()
1900 for domain
in domains
:
1901 uuid
= domain
.UUIDString() ;
1902 libvirt_status
= domain
.state()
1903 #print libvirt_status
1904 if libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_RUNNING
or libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_SHUTDOWN
:
1905 new_status
= "ACTIVE"
1906 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_PAUSED
:
1907 new_status
= "PAUSED"
1908 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_SHUTOFF
:
1909 new_status
= "INACTIVE"
1910 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_CRASHED
:
1911 new_status
= "ERROR"
1914 domain_dict
[uuid
] = new_status
1916 except host_thread
.lvirt_module
.libvirtError
as e
:
1917 self
.logger
.error("get_state() Exception " + e
.get_error_message())
1920 for server_id
, current_status
in self
.server_status
.iteritems():
1922 if server_id
in domain_dict
:
1923 new_status
= domain_dict
[server_id
]
1925 new_status
= "INACTIVE"
1927 if new_status
== None or new_status
== current_status
:
1929 if new_status
== 'INACTIVE' and current_status
== 'ERROR':
1930 continue #keep ERROR status, because obviously this machine is not running
1932 self
.logger
.debug("server id='%s' status change from '%s' to '%s'", server_id
, current_status
, new_status
)
1933 STATUS
={'progress':100, 'status':new_status
}
1934 if new_status
== 'ERROR':
1935 STATUS
['last_error'] = 'machine has crashed'
1936 self
.db_lock
.acquire()
1937 r
,_
= self
.db
.update_rows('instances', STATUS
, {'uuid':server_id
}, log
=False)
1938 self
.db_lock
.release()
1940 self
.server_status
[server_id
] = new_status
1942 def action_on_server(self
, req
, last_retry
=True):
1943 '''Perform an action on a req
1945 req: dictionary that contain:
1946 server properties: 'uuid','name','tenant_id','status'
1948 host properties: 'user', 'ip_name'
1949 return (error, text)
1950 0: No error. VM is updated to new state,
1951 -1: Invalid action, as trying to pause a PAUSED VM
1952 -2: Error accessing host
1954 -4: Error at DB access
1955 -5: Error while trying to perform action. VM is updated to ERROR
1957 server_id
= req
['uuid']
1960 old_status
= req
['status']
1964 if 'terminate' in req
['action']:
1965 new_status
= 'deleted'
1966 elif 'shutoff' in req
['action'] or 'shutdown' in req
['action'] or 'forceOff' in req
['action']:
1967 if req
['status']!='ERROR':
1969 new_status
= 'INACTIVE'
1970 elif 'start' in req
['action'] and req
['status']!='ERROR':
1971 new_status
= 'ACTIVE'
1972 elif 'resume' in req
['action'] and req
['status']!='ERROR' and req
['status']!='INACTIVE':
1973 new_status
= 'ACTIVE'
1974 elif 'pause' in req
['action'] and req
['status']!='ERROR':
1975 new_status
= 'PAUSED'
1976 elif 'reboot' in req
['action'] and req
['status']!='ERROR':
1977 new_status
= 'ACTIVE'
1978 elif 'rebuild' in req
['action']:
1979 time
.sleep(random
.randint(20,150))
1980 new_status
= 'ACTIVE'
1981 elif 'createImage' in req
['action']:
1983 self
.create_image(None, req
)
1986 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
1988 dom
= conn
.lookupByUUIDString(server_id
)
1989 except host_thread
.lvirt_module
.libvirtError
as e
:
1990 text
= e
.get_error_message()
1991 if 'LookupByUUIDString' in text
or 'Domain not found' in text
or 'No existe un dominio coincidente' in text
:
1994 self
.logger
.error("action_on_server id='%s' libvirt exception: %s", server_id
, text
)
1997 if 'forceOff' in req
['action']:
1999 self
.logger
.debug("action_on_server id='%s' domain not running", server_id
)
2002 self
.logger
.debug("sending DESTROY to server id='%s'", server_id
)
2004 except Exception as e
:
2005 if "domain is not running" not in e
.get_error_message():
2006 self
.logger
.error("action_on_server id='%s' Exception while sending force off: %s",
2007 server_id
, e
.get_error_message())
2008 last_error
= 'action_on_server Exception while destroy: ' + e
.get_error_message()
2009 new_status
= 'ERROR'
2011 elif 'terminate' in req
['action']:
2013 self
.logger
.debug("action_on_server id='%s' domain not running", server_id
)
2014 new_status
= 'deleted'
2017 if req
['action']['terminate'] == 'force':
2018 self
.logger
.debug("sending DESTROY to server id='%s'", server_id
)
2020 new_status
= 'deleted'
2022 self
.logger
.debug("sending SHUTDOWN to server id='%s'", server_id
)
2024 self
.pending_terminate_server
.append( (time
.time()+10,server_id
) )
2025 except Exception as e
:
2026 self
.logger
.error("action_on_server id='%s' Exception while destroy: %s",
2027 server_id
, e
.get_error_message())
2028 last_error
= 'action_on_server Exception while destroy: ' + e
.get_error_message()
2029 new_status
= 'ERROR'
2030 if "domain is not running" in e
.get_error_message():
2033 new_status
= 'deleted'
2035 self
.logger
.error("action_on_server id='%s' Exception while undefine: %s",
2036 server_id
, e
.get_error_message())
2037 last_error
= 'action_on_server Exception2 while undefine:', e
.get_error_message()
2038 #Exception: 'virDomainDetachDevice() failed'
2039 if new_status
=='deleted':
2040 if server_id
in self
.server_status
:
2041 del self
.server_status
[server_id
]
2042 if req
['uuid'] in self
.localinfo
['server_files']:
2043 for file_
in self
.localinfo
['server_files'][ req
['uuid'] ].values():
2045 self
.delete_file(file_
['source file'])
2048 del self
.localinfo
['server_files'][ req
['uuid'] ]
2049 self
.localinfo_dirty
= True
2051 elif 'shutoff' in req
['action'] or 'shutdown' in req
['action']:
2054 self
.logger
.debug("action_on_server id='%s' domain not running", server_id
)
2057 # new_status = 'INACTIVE'
2058 #TODO: check status for changing at database
2059 except Exception as e
:
2060 new_status
= 'ERROR'
2061 self
.logger
.error("action_on_server id='%s' Exception while shutdown: %s",
2062 server_id
, e
.get_error_message())
2063 last_error
= 'action_on_server Exception while shutdown: ' + e
.get_error_message()
2065 elif 'rebuild' in req
['action']:
2068 r
= self
.launch_server(conn
, req
, True, None)
2070 new_status
= 'ERROR'
2073 new_status
= 'ACTIVE'
2074 elif 'start' in req
['action']:
2075 # The instance is only create in DB but not yet at libvirt domain, needs to be create
2076 rebuild
= True if req
['action']['start'] == 'rebuild' else False
2077 r
= self
.launch_server(conn
, req
, rebuild
, dom
)
2079 new_status
= 'ERROR'
2082 new_status
= 'ACTIVE'
2084 elif 'resume' in req
['action']:
2090 # new_status = 'ACTIVE'
2091 except Exception as e
:
2092 self
.logger
.error("action_on_server id='%s' Exception while resume: %s",
2093 server_id
, e
.get_error_message())
2095 elif 'pause' in req
['action']:
2101 # new_status = 'PAUSED'
2102 except Exception as e
:
2103 self
.logger
.error("action_on_server id='%s' Exception while pause: %s",
2104 server_id
, e
.get_error_message())
2106 elif 'reboot' in req
['action']:
2112 self
.logger
.debug("action_on_server id='%s' reboot:", server_id
)
2113 #new_status = 'ACTIVE'
2114 except Exception as e
:
2115 self
.logger
.error("action_on_server id='%s' Exception while reboot: %s",
2116 server_id
, e
.get_error_message())
2117 elif 'createImage' in req
['action']:
2118 self
.create_image(dom
, req
)
2122 except host_thread
.lvirt_module
.libvirtError
as e
:
2123 if conn
is not None: conn
.close()
2124 text
= e
.get_error_message()
2125 new_status
= "ERROR"
2127 if 'LookupByUUIDString' in text
or 'Domain not found' in text
or 'No existe un dominio coincidente' in text
:
2128 self
.logger
.debug("action_on_server id='%s' Exception removed from host", server_id
)
2130 self
.logger
.error("action_on_server id='%s' Exception %s", server_id
, text
)
2131 #end of if self.test
2132 if new_status
== None:
2135 self
.logger
.debug("action_on_server id='%s' new status=%s %s",server_id
, new_status
, last_error
)
2136 UPDATE
= {'progress':100, 'status':new_status
}
2138 if new_status
=='ERROR':
2139 if not last_retry
: #if there will be another retry do not update database
2141 elif 'terminate' in req
['action']:
2142 #PUT a log in the database
2143 self
.logger
.error("PANIC deleting server id='%s' %s", server_id
, last_error
)
2144 self
.db_lock
.acquire()
2145 self
.db
.new_row('logs',
2146 {'uuid':server_id
, 'tenant_id':req
['tenant_id'], 'related':'instances','level':'panic',
2147 'description':'PANIC deleting server from host '+self
.name
+': '+last_error
}
2149 self
.db_lock
.release()
2150 if server_id
in self
.server_status
:
2151 del self
.server_status
[server_id
]
2154 UPDATE
['last_error'] = last_error
2155 if new_status
!= 'deleted' and (new_status
!= old_status
or new_status
== 'ERROR') :
2156 self
.db_lock
.acquire()
2157 self
.db
.update_rows('instances', UPDATE
, {'uuid':server_id
}, log
=True)
2158 self
.server_status
[server_id
] = new_status
2159 self
.db_lock
.release()
2160 if new_status
== 'ERROR':
2165 def restore_iface(self
, name
, mac
, lib_conn
=None):
2166 ''' make an ifdown, ifup to restore default parameter of na interface
2168 mac: mac address of the interface
2169 lib_conn: connection to the libvirt, if None a new connection is created
2170 Return 0,None if ok, -1,text if fails
2176 self
.logger
.debug("restore_iface '%s' %s", name
, mac
)
2180 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
2184 #wait to the pending VM deletion
2185 #TODO.Revise self.server_forceoff(True)
2187 iface
= conn
.interfaceLookupByMACString(mac
)
2188 if iface
.isActive():
2191 self
.logger
.debug("restore_iface '%s' %s", name
, mac
)
2192 except host_thread
.lvirt_module
.libvirtError
as e
:
2193 error_text
= e
.get_error_message()
2194 self
.logger
.error("restore_iface '%s' '%s' libvirt exception: %s", name
, mac
, error_text
)
2197 if lib_conn
is None and conn
is not None:
2199 return ret
, error_text
2202 def create_image(self
,dom
, req
):
2204 if 'path' in req
['action']['createImage']:
2205 file_dst
= req
['action']['createImage']['path']
2207 createImage
=req
['action']['createImage']
2208 img_name
= createImage
['source']['path']
2209 index
=img_name
.rfind('/')
2210 file_dst
= self
.get_notused_path(img_name
[:index
+1] + createImage
['name'] + '.qcow2')
2211 image_status
='ACTIVE'
2215 server_id
= req
['uuid']
2216 createImage
=req
['action']['createImage']
2217 file_orig
= self
.localinfo
['server_files'][server_id
] [ createImage
['source']['image_id'] ] ['source file']
2218 if 'path' in req
['action']['createImage']:
2219 file_dst
= req
['action']['createImage']['path']
2221 img_name
= createImage
['source']['path']
2222 index
=img_name
.rfind('/')
2223 file_dst
= self
.get_notused_filename(img_name
[:index
+1] + createImage
['name'] + '.qcow2')
2225 self
.copy_file(file_orig
, file_dst
)
2226 qemu_info
= self
.qemu_get_info(file_orig
)
2227 if 'backing file' in qemu_info
:
2228 for k
,v
in self
.localinfo
['files'].items():
2229 if v
==qemu_info
['backing file']:
2230 self
.qemu_change_backing(file_dst
, k
)
2232 image_status
='ACTIVE'
2234 except paramiko
.ssh_exception
.SSHException
as e
:
2235 image_status
='ERROR'
2236 error_text
= e
.args
[0]
2237 self
.logger
.error("create_image id='%s' ssh Exception: %s", server_id
, error_text
)
2238 if "SSH session not active" in error_text
and retry
==0:
2240 except Exception as e
:
2241 image_status
='ERROR'
2243 self
.logger
.error("create_image id='%s' Exception: %s", server_id
, error_text
)
2245 #TODO insert a last_error at database
2246 self
.db_lock
.acquire()
2247 self
.db
.update_rows('images', {'status':image_status
, 'progress': 100, 'path':file_dst
},
2248 {'uuid':req
['new_image']['uuid']}, log
=True)
2249 self
.db_lock
.release()
2251 def edit_iface(self
, port_id
, old_net
, new_net
):
2252 #This action imply remove and insert interface to put proper parameters
2257 self
.db_lock
.acquire()
2258 r
,c
= self
.db
.get_table(FROM
='ports as p join resources_port as rp on p.uuid=rp.port_id',
2259 WHERE
={'port_id': port_id
})
2260 self
.db_lock
.release()
2262 self
.logger
.error("edit_iface %s DDBB error: %s", port_id
, c
)
2265 self
.logger
.error("edit_iface %s port not found", port_id
)
2268 if port
["model"]!="VF":
2269 self
.logger
.error("edit_iface %s ERROR model must be VF", port_id
)
2271 #create xml detach file
2274 xml
.append("<interface type='hostdev' managed='yes'>")
2275 xml
.append(" <mac address='" +port
['mac']+ "'/>")
2276 xml
.append(" <source>"+ self
.pci2xml(port
['pci'])+"\n </source>")
2277 xml
.append('</interface>')
2282 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
2283 dom
= conn
.lookupByUUIDString(port
["instance_id"])
2286 self
.logger
.debug("edit_iface detaching SRIOV interface " + text
)
2287 dom
.detachDeviceFlags(text
, flags
=host_thread
.lvirt_module
.VIR_DOMAIN_AFFECT_LIVE
)
2289 xml
[-1] =" <vlan> <tag id='" + str(port
['vlan']) + "'/> </vlan>"
2291 xml
.append(self
.pci2xml(port
.get('vpci',None)) )
2292 xml
.append('</interface>')
2294 self
.logger
.debug("edit_iface attaching SRIOV interface " + text
)
2295 dom
.attachDeviceFlags(text
, flags
=host_thread
.lvirt_module
.VIR_DOMAIN_AFFECT_LIVE
)
2297 except host_thread
.lvirt_module
.libvirtError
as e
:
2298 text
= e
.get_error_message()
2299 self
.logger
.error("edit_iface %s libvirt exception: %s", port
["instance_id"], text
)
2302 if conn
is not None: conn
.close()
2305 def create_server(server
, db
, db_lock
, only_of_ports
):
2306 extended
= server
.get('extended', None)
2308 requirements
['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
2309 requirements
['ram'] = server
['flavor'].get('ram', 0)
2310 if requirements
['ram']== None:
2311 requirements
['ram'] = 0
2312 requirements
['vcpus'] = server
['flavor'].get('vcpus', 0)
2313 if requirements
['vcpus']== None:
2314 requirements
['vcpus'] = 0
2315 #If extended is not defined get requirements from flavor
2316 if extended
is None:
2317 #If extended is defined in flavor convert to dictionary and use it
2318 if 'extended' in server
['flavor'] and server
['flavor']['extended'] != None:
2319 json_acceptable_string
= server
['flavor']['extended'].replace("'", "\"")
2320 extended
= json
.loads(json_acceptable_string
)
2323 #print json.dumps(extended, indent=4)
2325 #For simplicity only one numa VM are supported in the initial implementation
2326 if extended
!= None:
2327 numas
= extended
.get('numas', [])
2329 return (-2, "Multi-NUMA VMs are not supported yet")
2331 # return (-1, "At least one numa must be specified")
2333 #a for loop is used in order to be ready to multi-NUMA VMs
2337 numa_req
['memory'] = numa
.get('memory', 0)
2339 numa_req
['proc_req_nb'] = numa
['cores'] #number of cores or threads to be reserved
2340 numa_req
['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
2341 numa_req
['proc_req_list'] = numa
.get('cores-id', None) #list of ids to be assigned to the cores or threads
2342 elif 'paired-threads' in numa
:
2343 numa_req
['proc_req_nb'] = numa
['paired-threads']
2344 numa_req
['proc_req_type'] = 'paired-threads'
2345 numa_req
['proc_req_list'] = numa
.get('paired-threads-id', None)
2346 elif 'threads' in numa
:
2347 numa_req
['proc_req_nb'] = numa
['threads']
2348 numa_req
['proc_req_type'] = 'threads'
2349 numa_req
['proc_req_list'] = numa
.get('threads-id', None)
2351 numa_req
['proc_req_nb'] = 0 # by default
2352 numa_req
['proc_req_type'] = 'threads'
2356 #Generate a list of sriov and another for physical interfaces
2357 interfaces
= numa
.get('interfaces', [])
2360 for iface
in interfaces
:
2361 iface
['bandwidth'] = int(iface
['bandwidth'])
2362 if iface
['dedicated'][:3]=='yes':
2363 port_list
.append(iface
)
2365 sriov_list
.append(iface
)
2367 #Save lists ordered from more restrictive to less bw requirements
2368 numa_req
['sriov_list'] = sorted(sriov_list
, key
=lambda k
: k
['bandwidth'], reverse
=True)
2369 numa_req
['port_list'] = sorted(port_list
, key
=lambda k
: k
['bandwidth'], reverse
=True)
2372 request
.append(numa_req
)
2374 # print "----------\n"+json.dumps(request[0], indent=4)
2375 # print '----------\n\n'
2377 #Search in db for an appropriate numa for each requested numa
2378 #at the moment multi-NUMA VMs are not supported
2380 requirements
['numa'].update(request
[0])
2381 if requirements
['numa']['memory']>0:
2382 requirements
['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2383 elif requirements
['ram']==0:
2384 return (-1, "Memory information not set neither at extended field not at ram")
2385 if requirements
['numa']['proc_req_nb']>0:
2386 requirements
['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2387 elif requirements
['vcpus']==0:
2388 return (-1, "Processor information not set neither at extended field not at vcpus")
2392 result
, content
= db
.get_numas(requirements
, server
.get('host_id', None), only_of_ports
)
2396 return (-1, content
)
2398 numa_id
= content
['numa_id']
2399 host_id
= content
['host_id']
2401 #obtain threads_id and calculate pinning
2404 if requirements
['numa']['proc_req_nb']>0:
2406 result
, content
= db
.get_table(FROM
='resources_core',
2407 SELECT
=('id','core_id','thread_id'),
2408 WHERE
={'numa_id':numa_id
,'instance_id': None, 'status':'ok'} )
2414 #convert rows to a dictionary indexed by core_id
2417 if not row
['core_id'] in cores_dict
:
2418 cores_dict
[row
['core_id']] = []
2419 cores_dict
[row
['core_id']].append([row
['thread_id'],row
['id']])
2421 #In case full cores are requested
2423 if requirements
['numa']['proc_req_type'] == 'cores':
2424 #Get/create the list of the vcpu_ids
2425 vcpu_id_list
= requirements
['numa']['proc_req_list']
2426 if vcpu_id_list
== None:
2427 vcpu_id_list
= range(0,int(requirements
['numa']['proc_req_nb']))
2429 for threads
in cores_dict
.itervalues():
2431 if len(threads
) != 2:
2434 #set pinning for the first thread
2435 cpu_pinning
.append( [ vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1] ] )
2437 #reserve so it is not used the second thread
2438 reserved_threads
.append(threads
[1][1])
2440 if len(vcpu_id_list
) == 0:
2443 #In case paired threads are requested
2444 elif requirements
['numa']['proc_req_type'] == 'paired-threads':
2446 #Get/create the list of the vcpu_ids
2447 if requirements
['numa']['proc_req_list'] != None:
2449 for pair
in requirements
['numa']['proc_req_list']:
2451 return -1, "Field paired-threads-id not properly specified"
2453 vcpu_id_list
.append(pair
[0])
2454 vcpu_id_list
.append(pair
[1])
2456 vcpu_id_list
= range(0,2*int(requirements
['numa']['proc_req_nb']))
2458 for threads
in cores_dict
.itervalues():
2460 if len(threads
) != 2:
2462 #set pinning for the first thread
2463 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1]])
2465 #set pinning for the second thread
2466 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[1][0], threads
[1][1]])
2468 if len(vcpu_id_list
) == 0:
2471 #In case normal threads are requested
2472 elif requirements
['numa']['proc_req_type'] == 'threads':
2473 #Get/create the list of the vcpu_ids
2474 vcpu_id_list
= requirements
['numa']['proc_req_list']
2475 if vcpu_id_list
== None:
2476 vcpu_id_list
= range(0,int(requirements
['numa']['proc_req_nb']))
2478 for threads_index
in sorted(cores_dict
, key
=lambda k
: len(cores_dict
[k
])):
2479 threads
= cores_dict
[threads_index
]
2480 #set pinning for the first thread
2481 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1]])
2483 #if exists, set pinning for the second thread
2484 if len(threads
) == 2 and len(vcpu_id_list
) != 0:
2485 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[1][0], threads
[1][1]])
2487 if len(vcpu_id_list
) == 0:
2490 #Get the source pci addresses for the selected numa
2491 used_sriov_ports
= []
2492 for port
in requirements
['numa']['sriov_list']:
2494 result
, content
= db
.get_table(FROM
='resources_port', SELECT
=('id', 'pci', 'mac'),WHERE
={'numa_id':numa_id
,'root_id': port
['port_id'], 'port_id': None, 'Mbps_used': 0} )
2500 if row
['id'] in used_sriov_ports
or row
['id']==port
['port_id']:
2502 port
['pci'] = row
['pci']
2503 if 'mac_address' not in port
:
2504 port
['mac_address'] = row
['mac']
2506 port
['port_id']=row
['id']
2507 port
['Mbps_used'] = port
['bandwidth']
2508 used_sriov_ports
.append(row
['id'])
2511 for port
in requirements
['numa']['port_list']:
2512 port
['Mbps_used'] = None
2513 if port
['dedicated'] != "yes:sriov":
2514 port
['mac_address'] = port
['mac']
2518 result
, content
= db
.get_table(FROM
='resources_port', SELECT
=('id', 'pci', 'mac', 'Mbps'),WHERE
={'numa_id':numa_id
,'root_id': port
['port_id'], 'port_id': None, 'Mbps_used': 0} )
2523 port
['Mbps_used'] = content
[0]['Mbps']
2525 if row
['id'] in used_sriov_ports
or row
['id']==port
['port_id']:
2527 port
['pci'] = row
['pci']
2528 if 'mac_address' not in port
:
2529 port
['mac_address'] = row
['mac'] # mac cannot be set to passthrough ports
2531 port
['port_id']=row
['id']
2532 used_sriov_ports
.append(row
['id'])
2535 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2536 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2538 server
['host_id'] = host_id
2540 #Generate dictionary for saving in db the instance resources
2542 resources
['bridged-ifaces'] = []
2545 numa_dict
['interfaces'] = []
2547 numa_dict
['interfaces'] += requirements
['numa']['port_list']
2548 numa_dict
['interfaces'] += requirements
['numa']['sriov_list']
2550 #Check bridge information
2551 unified_dataplane_iface
=[]
2552 unified_dataplane_iface
+= requirements
['numa']['port_list']
2553 unified_dataplane_iface
+= requirements
['numa']['sriov_list']
2555 for control_iface
in server
.get('networks', []):
2556 control_iface
['net_id']=control_iface
.pop('uuid')
2557 #Get the brifge name
2559 result
, content
= db
.get_table(FROM
='nets',
2560 SELECT
=('name', 'type', 'vlan', 'provider', 'enable_dhcp','dhcp_first_ip',
2561 'dhcp_last_ip', 'cidr', 'gateway_ip', 'dns', 'links', 'routes'),
2562 WHERE
={'uuid': control_iface
['net_id']})
2567 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface
['net_id']
2570 if control_iface
.get("type", 'virtual') == 'virtual':
2571 if network
['type']!='bridge_data' and network
['type']!='bridge_man':
2572 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface
['net_id']
2573 resources
['bridged-ifaces'].append(control_iface
)
2574 if network
.get("provider") and network
["provider"][0:3] == "OVS":
2575 control_iface
["type"] = "instance:ovs"
2577 control_iface
["type"] = "instance:bridge"
2578 if network
.get("vlan"):
2579 control_iface
["vlan"] = network
["vlan"]
2581 if network
.get("enable_dhcp") == 'true':
2582 control_iface
["enable_dhcp"] = network
.get("enable_dhcp")
2583 control_iface
["dhcp_first_ip"] = network
["dhcp_first_ip"]
2584 control_iface
["dhcp_last_ip"] = network
["dhcp_last_ip"]
2585 control_iface
["cidr"] = network
["cidr"]
2587 if network
.get("dns"):
2588 control_iface
["dns"] = yaml
.safe_load(network
.get("dns"))
2589 if network
.get("links"):
2590 control_iface
["links"] = yaml
.safe_load(network
.get("links"))
2591 if network
.get("routes"):
2592 control_iface
["routes"] = yaml
.safe_load(network
.get("routes"))
2594 if network
['type']!='data' and network
['type']!='ptp':
2595 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface
['net_id']
2596 #dataplane interface, look for it in the numa tree and asign this network
2598 for dataplane_iface
in numa_dict
['interfaces']:
2599 if dataplane_iface
['name'] == control_iface
.get("name"):
2600 if (dataplane_iface
['dedicated'] == "yes" and control_iface
["type"] != "PF") or \
2601 (dataplane_iface
['dedicated'] == "no" and control_iface
["type"] != "VF") or \
2602 (dataplane_iface
['dedicated'] == "yes:sriov" and control_iface
["type"] != "VFnotShared") :
2603 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2604 (control_iface
.get("name"), dataplane_iface
['dedicated'], control_iface
["type"])
2605 dataplane_iface
['uuid'] = control_iface
['net_id']
2606 if dataplane_iface
['dedicated'] == "no":
2607 dataplane_iface
['vlan'] = network
['vlan']
2608 if dataplane_iface
['dedicated'] != "yes" and control_iface
.get("mac_address"):
2609 dataplane_iface
['mac_address'] = control_iface
.get("mac_address")
2610 if control_iface
.get("vpci"):
2611 dataplane_iface
['vpci'] = control_iface
.get("vpci")
2615 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface
.get("name")
2617 resources
['host_id'] = host_id
2618 resources
['image_id'] = server
['image_id']
2619 resources
['flavor_id'] = server
['flavor_id']
2620 resources
['tenant_id'] = server
['tenant_id']
2621 resources
['ram'] = requirements
['ram']
2622 resources
['vcpus'] = requirements
['vcpus']
2623 resources
['status'] = 'CREATING'
2625 if 'description' in server
: resources
['description'] = server
['description']
2626 if 'name' in server
: resources
['name'] = server
['name']
2628 resources
['extended'] = {} #optional
2629 resources
['extended']['numas'] = []
2630 numa_dict
['numa_id'] = numa_id
2631 numa_dict
['memory'] = requirements
['numa']['memory']
2632 numa_dict
['cores'] = []
2634 for core
in cpu_pinning
:
2635 numa_dict
['cores'].append({'id': core
[2], 'vthread': core
[0], 'paired': paired
})
2636 for core
in reserved_threads
:
2637 numa_dict
['cores'].append({'id': core
})
2638 resources
['extended']['numas'].append(numa_dict
)
2639 if extended
!=None and 'devices' in extended
: #TODO allow extra devices without numa
2640 resources
['extended']['devices'] = extended
['devices']
2643 # '===================================={'
2644 #print json.dumps(resources, indent=4)
2645 #print '====================================}'