1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openvim
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact with: nfvlabs@tid.es
25 This is thread that interact with the host and the libvirt to manage VM
26 One thread will be launched per host
28 __author__
= "Pablo Montes, Alfonso Tierno, Leonardo Mirabal"
29 __date__
= "$10-jul-2014 12:07:15$"
43 from jsonschema
import validate
as js_v
, exceptions
as js_e
44 from vim_schema
import localinfo_schema
, hostinfo_schema
46 class RunCommandException(Exception):
49 class host_thread(threading
.Thread
):
52 def __init__(self
, name
, host
, user
, db
, test
, image_path
, host_id
, version
, develop_mode
,
53 develop_bridge_iface
, password
=None, keyfile
= None, logger_name
=None, debug
=None, hypervisors
=None):
54 """Init a thread to communicate with compute node or ovs_controller.
55 :param host_id: host identity
56 :param name: name of the thread
57 :param host: host ip or name to manage and user
58 :param user, password, keyfile: user and credentials to connect to host
59 :param db: database class, threading safe
61 threading
.Thread
.__init
__(self
)
67 self
.password
= password
68 self
.keyfile
= keyfile
69 self
.localinfo_dirty
= False
70 self
.connectivity
= True
72 if not test
and not host_thread
.lvirt_module
:
74 module_info
= imp
.find_module("libvirt")
75 host_thread
.lvirt_module
= imp
.load_module("libvirt", *module_info
)
76 except (IOError, ImportError) as e
:
77 raise ImportError("Cannot import python-libvirt. Openvim not properly installed" +str(e
))
79 self
.logger_name
= logger_name
81 self
.logger_name
= "openvim.host."+name
82 self
.logger
= logging
.getLogger(self
.logger_name
)
84 self
.logger
.setLevel(getattr(logging
, debug
))
87 self
.develop_mode
= develop_mode
88 self
.develop_bridge_iface
= develop_bridge_iface
89 self
.image_path
= image_path
90 self
.empty_image_path
= image_path
91 self
.host_id
= host_id
92 self
.version
= version
97 self
.server_status
= {} #dictionary with pairs server_uuid:server_status
98 self
.pending_terminate_server
=[] #list with pairs (time,server_uuid) time to send a terminate for a server being destroyed
99 self
.next_update_server_status
= 0 #time when must be check servers status
101 ####### self.hypervisor = "kvm" #hypervisor flag (default: kvm)
103 self
.hypervisors
= hypervisors
105 self
.hypervisors
= "kvm"
107 self
.xen_hyp
= True if "xen" in self
.hypervisors
else False
111 self
.queueLock
= threading
.Lock()
112 self
.taskQueue
= Queue
.Queue(2000)
114 self
.run_command_session
= None
116 self
.localhost
= True if host
== 'localhost' else False
119 self
.lvirt_conn_uri
= "xen+ssh://{user}@{host}/?no_tty=1&no_verify=1".format(
120 user
=self
.user
, host
=self
.host
)
122 self
.lvirt_conn_uri
= "qemu+ssh://{user}@{host}/system?no_tty=1&no_verify=1".format(
123 user
=self
.user
, host
=self
.host
)
125 self
.lvirt_conn_uri
+= "&keyfile=" + keyfile
127 self
.remote_ip
= None
130 def run_command(self
, command
, keep_session
=False, ignore_exit_status
=False):
131 """Run a command passed as a str on a localhost or at remote machine.
132 :param command: text with the command to execute.
133 :param keep_session: if True it returns a <stdin> for sending input with '<stdin>.write("text\n")'.
134 A command with keep_session=True MUST be followed by a command with keep_session=False in order to
135 close the session and get the output
136 :param ignore_exit_status: Return stdout and not raise an exepction in case of error.
137 :return: the output of the command if 'keep_session=False' or the <stdin> object if 'keep_session=True'
138 :raises: RunCommandException if command fails
140 if self
.run_command_session
and keep_session
:
141 raise RunCommandException("Internal error. A command with keep_session=True must be followed by another "
142 "command with keep_session=False to close session")
145 if self
.run_command_session
:
146 p
= self
.run_command_session
147 self
.run_command_session
= None
148 (output
, outerror
) = p
.communicate()
149 returncode
= p
.returncode
152 p
= subprocess
.Popen(('bash', "-c", command
), stdin
=subprocess
.PIPE
, stdout
=subprocess
.PIPE
,
153 stderr
=subprocess
.PIPE
)
154 self
.run_command_session
= p
157 if not ignore_exit_status
:
158 output
= subprocess
.check_output(('bash', "-c", command
))
162 p
= subprocess
.Popen(('bash', "-c", command
), stdout
=subprocess
.PIPE
)
163 out
, err
= p
.communicate()
166 if self
.run_command_session
:
167 (i
, o
, e
) = self
.run_command_session
168 self
.run_command_session
= None
169 i
.channel
.shutdown_write()
171 if not self
.ssh_conn
:
173 (i
, o
, e
) = self
.ssh_conn
.exec_command(command
, timeout
=10)
175 self
.run_command_session
= (i
, o
, e
)
177 returncode
= o
.channel
.recv_exit_status()
180 if returncode
!= 0 and not ignore_exit_status
:
181 text
= "run_command='{}' Error='{}'".format(command
, outerror
)
182 self
.logger
.error(text
)
183 raise RunCommandException(text
)
185 self
.logger
.debug("run_command='{}' result='{}'".format(command
, output
))
188 except RunCommandException
:
190 except subprocess
.CalledProcessError
as e
:
191 text
= "run_command Exception '{}' '{}'".format(str(e
), e
.output
)
192 except (paramiko
.ssh_exception
.SSHException
, Exception) as e
:
193 text
= "run_command='{}' Exception='{}'".format(command
, str(e
))
195 self
.run_command_session
= None
196 raise RunCommandException(text
)
198 def ssh_connect(self
):
201 self
.ssh_conn
= paramiko
.SSHClient()
202 self
.ssh_conn
.set_missing_host_key_policy(paramiko
.AutoAddPolicy())
203 self
.ssh_conn
.load_system_host_keys()
204 self
.ssh_conn
.connect(self
.host
, username
=self
.user
, password
=self
.password
, key_filename
=self
.keyfile
,
205 timeout
=10) # auth_timeout=10)
206 self
.remote_ip
= self
.ssh_conn
.get_transport().sock
.getpeername()[0]
207 self
.local_ip
= self
.ssh_conn
.get_transport().sock
.getsockname()[0]
208 except (paramiko
.ssh_exception
.SSHException
, Exception) as e
:
209 text
= 'ssh connect Exception: {}'.format(e
)
214 def check_connectivity(self
):
217 command
= 'sudo brctl show'
218 self
.run_command(command
)
219 except RunCommandException
as e
:
220 self
.connectivity
= False
221 self
.logger
.error("check_connectivity Exception: " + str(e
))
223 def load_localinfo(self
):
226 self
.run_command('sudo mkdir -p ' + self
.image_path
)
227 result
= self
.run_command('cat {}/.openvim.yaml'.format(self
.image_path
))
228 self
.localinfo
= yaml
.load(result
)
229 js_v(self
.localinfo
, localinfo_schema
)
230 self
.localinfo_dirty
= False
231 if 'server_files' not in self
.localinfo
:
232 self
.localinfo
['server_files'] = {}
233 self
.logger
.debug("localinfo loaded from host")
235 except RunCommandException
as e
:
236 self
.logger
.error("load_localinfo Exception: " + str(e
))
237 except host_thread
.lvirt_module
.libvirtError
as e
:
238 text
= e
.get_error_message()
239 self
.logger
.error("load_localinfo libvirt Exception: " + text
)
240 except yaml
.YAMLError
as exc
:
242 if hasattr(exc
, 'problem_mark'):
243 mark
= exc
.problem_mark
244 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
245 self
.logger
.error("load_localinfo yaml format Exception " + text
)
246 except js_e
.ValidationError
as e
:
248 if len(e
.path
)>0: text
=" at '" + ":".join(map(str, e
.path
))+"'"
249 self
.logger
.error("load_localinfo format Exception: %s %s", text
, str(e
))
250 except Exception as e
:
252 self
.logger
.error("load_localinfo Exception: " + text
)
254 # not loaded, insert a default data and force saving by activating dirty flag
255 self
.localinfo
= {'files':{}, 'server_files':{} }
256 # self.localinfo_dirty=True
257 self
.localinfo_dirty
=False
259 def load_hostinfo(self
):
263 result
= self
.run_command('cat {}/hostinfo.yaml'.format(self
.image_path
))
264 self
.hostinfo
= yaml
.load(result
)
265 js_v(self
.hostinfo
, hostinfo_schema
)
266 self
.logger
.debug("hostinfo load from host " + str(self
.hostinfo
))
268 except RunCommandException
as e
:
269 self
.logger
.error("load_hostinfo ssh Exception: " + str(e
))
270 except host_thread
.lvirt_module
.libvirtError
as e
:
271 text
= e
.get_error_message()
272 self
.logger
.error("load_hostinfo libvirt Exception: " + text
)
273 except yaml
.YAMLError
as exc
:
275 if hasattr(exc
, 'problem_mark'):
276 mark
= exc
.problem_mark
277 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
278 self
.logger
.error("load_hostinfo yaml format Exception " + text
)
279 except js_e
.ValidationError
as e
:
281 if len(e
.path
)>0: text
=" at '" + ":".join(map(str, e
.path
))+"'"
282 self
.logger
.error("load_hostinfo format Exception: %s %s", text
, str(e
))
283 except Exception as e
:
285 self
.logger
.error("load_hostinfo Exception: " + text
)
287 #not loaded, insert a default data
290 def save_localinfo(self
, tries
=3):
292 self
.localinfo_dirty
= False
299 command
= 'cat > {}/.openvim.yaml'.format(self
.image_path
)
300 in_stream
= self
.run_command(command
, keep_session
=True)
301 yaml
.safe_dump(self
.localinfo
, in_stream
, explicit_start
=True, indent
=4, default_flow_style
=False,
302 tags
=False, encoding
='utf-8', allow_unicode
=True)
303 result
= self
.run_command(command
, keep_session
=False) # to end session
305 self
.localinfo_dirty
= False
308 except RunCommandException
as e
:
309 self
.logger
.error("save_localinfo ssh Exception: " + str(e
))
310 except host_thread
.lvirt_module
.libvirtError
as e
:
311 text
= e
.get_error_message()
312 self
.logger
.error("save_localinfo libvirt Exception: " + text
)
313 except yaml
.YAMLError
as exc
:
315 if hasattr(exc
, 'problem_mark'):
316 mark
= exc
.problem_mark
317 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
318 self
.logger
.error("save_localinfo yaml format Exception " + text
)
319 except Exception as e
:
321 self
.logger
.error("save_localinfo Exception: " + text
)
323 def load_servers_from_db(self
):
324 r
,c
= self
.db
.get_table(SELECT
=('uuid','status', 'image_id'), FROM
='instances', WHERE
={'host_id': self
.host_id
})
326 self
.server_status
= {}
328 self
.logger
.error("Error getting data from database: " + c
)
331 self
.server_status
[ server
['uuid'] ] = server
['status']
333 #convert from old version to new one
334 if 'inc_files' in self
.localinfo
and server
['uuid'] in self
.localinfo
['inc_files']:
335 server_files_dict
= {'source file': self
.localinfo
['inc_files'][ server
['uuid'] ] [0], 'file format':'raw' }
336 if server_files_dict
['source file'][-5:] == 'qcow2':
337 server_files_dict
['file format'] = 'qcow2'
339 self
.localinfo
['server_files'][ server
['uuid'] ] = { server
['image_id'] : server_files_dict
}
340 if 'inc_files' in self
.localinfo
:
341 del self
.localinfo
['inc_files']
342 self
.localinfo_dirty
= True
344 def delete_unused_files(self
):
345 '''Compares self.localinfo['server_files'] content with real servers running self.server_status obtained from database
346 Deletes unused entries at self.loacalinfo and the corresponding local files.
347 The only reason for this mismatch is the manual deletion of instances (VM) at database
351 for uuid
,images
in self
.localinfo
['server_files'].items():
352 if uuid
not in self
.server_status
:
353 for localfile
in images
.values():
355 self
.logger
.debug("deleting file '%s' of unused server '%s'", localfile
['source file'], uuid
)
356 self
.delete_file(localfile
['source file'])
357 except RunCommandException
as e
:
358 self
.logger
.error("Exception deleting file '%s': %s", localfile
['source file'], str(e
))
359 del self
.localinfo
['server_files'][uuid
]
360 self
.localinfo_dirty
= True
362 def insert_task(self
, task
, *aditional
):
364 self
.queueLock
.acquire()
365 task
= self
.taskQueue
.put( (task
,) + aditional
, timeout
=5)
366 self
.queueLock
.release()
369 return -1, "timeout inserting a task over host " + self
.name
373 self
.load_localinfo()
375 self
.load_servers_from_db()
376 self
.delete_unused_files()
379 self
.queueLock
.acquire()
380 if not self
.taskQueue
.empty():
381 task
= self
.taskQueue
.get()
384 self
.queueLock
.release()
388 if self
.localinfo_dirty
:
389 self
.save_localinfo()
390 elif self
.next_update_server_status
< now
:
391 self
.update_servers_status()
392 self
.next_update_server_status
= now
+ 5
393 elif len(self
.pending_terminate_server
)>0 and self
.pending_terminate_server
[0][0]<now
:
394 self
.server_forceoff()
399 if task
[0] == 'instance':
400 self
.logger
.debug("processing task instance " + str(task
[1]['action']))
404 r
= self
.action_on_server(task
[1], retry
==2)
407 elif task
[0] == 'image':
409 elif task
[0] == 'exit':
410 self
.logger
.debug("processing task exit")
413 elif task
[0] == 'reload':
414 self
.logger
.debug("processing task reload terminating and relaunching")
417 elif task
[0] == 'edit-iface':
418 self
.logger
.debug("processing task edit-iface port={}, old_net={}, new_net={}".format(
419 task
[1], task
[2], task
[3]))
420 self
.edit_iface(task
[1], task
[2], task
[3])
421 elif task
[0] == 'restore-iface':
422 self
.logger
.debug("processing task restore-iface={} mac={}".format(task
[1], task
[2]))
423 self
.restore_iface(task
[1], task
[2])
424 elif task
[0] == 'new-ovsbridge':
425 self
.logger
.debug("Creating compute OVS bridge")
426 self
.create_ovs_bridge()
427 elif task
[0] == 'new-vxlan':
428 self
.logger
.debug("Creating vxlan tunnel='{}', remote ip='{}'".format(task
[1], task
[2]))
429 self
.create_ovs_vxlan_tunnel(task
[1], task
[2])
430 elif task
[0] == 'del-ovsbridge':
431 self
.logger
.debug("Deleting OVS bridge")
432 self
.delete_ovs_bridge()
433 elif task
[0] == 'del-vxlan':
434 self
.logger
.debug("Deleting vxlan {} tunnel".format(task
[1]))
435 self
.delete_ovs_vxlan_tunnel(task
[1])
436 elif task
[0] == 'create-ovs-bridge-port':
437 self
.logger
.debug("Adding port ovim-{} to OVS bridge".format(task
[1]))
438 self
.create_ovs_bridge_port(task
[1])
439 elif task
[0] == 'del-ovs-port':
440 self
.logger
.debug("Delete bridge attached to ovs port vlan {} net {}".format(task
[1], task
[2]))
441 self
.delete_bridge_port_attached_to_ovs(task
[1], task
[2])
443 self
.logger
.debug("unknown task " + str(task
))
445 except Exception as e
:
446 self
.logger
.critical("Unexpected exception at run: " + str(e
), exc_info
=True)
448 def server_forceoff(self
, wait_until_finished
=False):
449 while len(self
.pending_terminate_server
)>0:
451 if self
.pending_terminate_server
[0][0]>now
:
452 if wait_until_finished
:
457 req
={'uuid':self
.pending_terminate_server
[0][1],
458 'action':{'terminate':'force'},
461 self
.action_on_server(req
)
462 self
.pending_terminate_server
.pop(0)
466 self
.server_forceoff(True)
467 if self
.localinfo_dirty
:
468 self
.save_localinfo()
470 self
.ssh_conn
.close()
471 except Exception as e
:
473 self
.logger
.error("terminate Exception: " + text
)
474 self
.logger
.debug("exit from host_thread")
476 def get_local_iface_name(self
, generic_name
):
477 if self
.hostinfo
!= None and "iface_names" in self
.hostinfo
and generic_name
in self
.hostinfo
["iface_names"]:
478 return self
.hostinfo
["iface_names"][generic_name
]
481 def create_xml_server(self
, server
, dev_list
, server_metadata
={}):
482 """Function that implements the generation of the VM XML definition.
483 Additional devices are in dev_list list
484 The main disk is upon dev_list[0]"""
486 #get if operating system is Windows
488 os_type
= server_metadata
.get('os_type', None)
489 if os_type
== None and 'metadata' in dev_list
[0]:
490 os_type
= dev_list
[0]['metadata'].get('os_type', None)
491 if os_type
!= None and os_type
.lower() == "windows":
493 #get type of hard disk bus
494 bus_ide
= True if windows_os
else False
495 bus
= server_metadata
.get('bus', None)
496 if bus
== None and 'metadata' in dev_list
[0]:
497 bus
= dev_list
[0]['metadata'].get('bus', None)
499 bus_ide
= True if bus
=='ide' else False
502 hypervisor
= server
.get('hypervisor', 'kvm')
503 os_type_img
= server
.get('os_image_type', 'other')
505 if hypervisor
[:3] == 'xen':
506 text
= "<domain type='xen'>"
508 text
= "<domain type='kvm'>"
510 topo
= server_metadata
.get('topology', None)
511 if topo
== None and 'metadata' in dev_list
[0]:
512 topo
= dev_list
[0]['metadata'].get('topology', None)
514 name
= server
.get('name', '')[:28] + "_" + server
['uuid'][:28] #qemu impose a length limit of 59 chars or not start. Using 58
515 text
+= self
.inc_tab() + "<name>" + name
+ "</name>"
517 text
+= self
.tab() + "<uuid>" + server
['uuid'] + "</uuid>"
520 if 'extended' in server
and server
['extended']!=None and 'numas' in server
['extended']:
521 numa
= server
['extended']['numas'][0]
524 memory
= int(numa
.get('memory',0))*1024*1024 #in KiB
526 memory
= int(server
['ram'])*1024;
528 if not self
.develop_mode
:
531 return -1, 'No memory assigned to instance'
533 text
+= self
.tab() + "<memory unit='KiB'>" +memory
+"</memory>"
534 text
+= self
.tab() + "<currentMemory unit='KiB'>" +memory
+ "</currentMemory>"
536 text
+= self
.tab()+'<memoryBacking>'+ \
537 self
.inc_tab() + '<hugepages/>'+ \
538 self
.dec_tab()+ '</memoryBacking>'
541 use_cpu_pinning
=False
542 vcpus
= int(server
.get("vcpus",0))
544 if 'cores-source' in numa
:
546 for index
in range(0, len(numa
['cores-source'])):
547 cpu_pinning
.append( [ numa
['cores-id'][index
], numa
['cores-source'][index
] ] )
549 if 'threads-source' in numa
:
551 for index
in range(0, len(numa
['threads-source'])):
552 cpu_pinning
.append( [ numa
['threads-id'][index
], numa
['threads-source'][index
] ] )
554 if 'paired-threads-source' in numa
:
556 for index
in range(0, len(numa
['paired-threads-source'])):
557 cpu_pinning
.append( [numa
['paired-threads-id'][index
][0], numa
['paired-threads-source'][index
][0] ] )
558 cpu_pinning
.append( [numa
['paired-threads-id'][index
][1], numa
['paired-threads-source'][index
][1] ] )
561 if use_cpu_pinning
and not self
.develop_mode
:
562 text
+= self
.tab()+"<vcpu placement='static'>" +str(len(cpu_pinning
)) +"</vcpu>" + \
563 self
.tab()+'<cputune>'
565 for i
in range(0, len(cpu_pinning
)):
566 text
+= self
.tab() + "<vcpupin vcpu='" +str(cpu_pinning
[i
][0])+ "' cpuset='" +str(cpu_pinning
[i
][1]) +"'/>"
567 text
+= self
.dec_tab()+'</cputune>'+ \
568 self
.tab() + '<numatune>' +\
569 self
.inc_tab() + "<memory mode='strict' nodeset='" +str(numa
['source'])+ "'/>" +\
570 self
.dec_tab() + '</numatune>'
573 return -1, "Instance without number of cpus"
574 text
+= self
.tab()+"<vcpu>" + str(vcpus
) + "</vcpu>"
579 if dev
['type']=='cdrom' :
582 if hypervisor
== 'xenhvm':
583 text
+= self
.tab()+ '<os>' + \
584 self
.inc_tab() + "<type arch='x86_64' machine='xenfv'>hvm</type>"
585 text
+= self
.tab() + "<loader type='rom'>/usr/lib/xen/boot/hvmloader</loader>"
587 text
+= self
.tab() + "<boot dev='cdrom'/>"
588 text
+= self
.tab() + "<boot dev='hd'/>" + \
589 self
.dec_tab()+'</os>'
590 elif hypervisor
== 'xen-unik':
591 text
+= self
.tab()+ '<os>' + \
592 self
.inc_tab() + "<type arch='x86_64' machine='xenpv'>xen</type>"
593 text
+= self
.tab() + "<kernel>" + str(dev_list
[0]['source file']) + "</kernel>" + \
594 self
.dec_tab()+'</os>'
596 text
+= self
.tab()+ '<os>' + \
597 self
.inc_tab() + "<type arch='x86_64' machine='pc'>hvm</type>"
599 text
+= self
.tab() + "<boot dev='cdrom'/>"
600 text
+= self
.tab() + "<boot dev='hd'/>" + \
601 self
.dec_tab()+'</os>'
603 text
+= self
.tab()+'<features>'+\
604 self
.inc_tab()+'<acpi/>' +\
605 self
.tab()+'<apic/>' +\
606 self
.tab()+'<pae/>'+ \
607 self
.dec_tab() +'</features>'
608 if topo
== "oneSocket:hyperthreading":
610 return -1, 'Cannot expose hyperthreading with an odd number of vcpus'
611 text
+= self
.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>" % (vcpus
/2)
612 elif windows_os
or topo
== "oneSocket":
613 text
+= self
.tab() + "<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>" % vcpus
615 text
+= self
.tab() + "<cpu mode='host-model'></cpu>"
616 text
+= self
.tab() + "<clock offset='utc'/>" +\
617 self
.tab() + "<on_poweroff>preserve</on_poweroff>" + \
618 self
.tab() + "<on_reboot>restart</on_reboot>" + \
619 self
.tab() + "<on_crash>restart</on_crash>"
620 if hypervisor
== 'xenhvm':
621 text
+= self
.tab() + "<devices>" + \
622 self
.inc_tab() + "<emulator>/usr/bin/qemu-system-i386</emulator>" + \
623 self
.tab() + "<serial type='pty'>" +\
624 self
.inc_tab() + "<target port='0'/>" + \
625 self
.dec_tab() + "</serial>" +\
626 self
.tab() + "<console type='pty'>" + \
627 self
.inc_tab()+ "<target type='serial' port='0'/>" + \
628 self
.dec_tab()+'</console>' #In some libvirt version may be: <emulator>/usr/lib64/xen/bin/qemu-dm</emulator> (depends on distro)
629 elif hypervisor
== 'xen-unik':
630 text
+= self
.tab() + "<devices>" + \
631 self
.tab() + "<console type='pty'>" + \
632 self
.inc_tab()+ "<target type='xen' port='0'/>" + \
633 self
.dec_tab()+'</console>'
635 text
+= self
.tab() + "<devices>" + \
636 self
.inc_tab() + "<emulator>/usr/libexec/qemu-kvm</emulator>" + \
637 self
.tab() + "<serial type='pty'>" +\
638 self
.inc_tab() + "<target port='0'/>" + \
639 self
.dec_tab() + "</serial>" +\
640 self
.tab() + "<console type='pty'>" + \
641 self
.inc_tab()+ "<target type='serial' port='0'/>" + \
642 self
.dec_tab()+'</console>'
644 text
+= self
.tab() + "<controller type='usb' index='0'/>" + \
645 self
.tab() + "<controller type='ide' index='0'/>" + \
646 self
.tab() + "<input type='mouse' bus='ps2'/>" + \
647 self
.tab() + "<sound model='ich6'/>" + \
648 self
.tab() + "<video>" + \
649 self
.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
650 self
.dec_tab() + "</video>" + \
651 self
.tab() + "<memballoon model='virtio'/>" + \
652 self
.tab() + "<input type='tablet' bus='usb'/>" #TODO revisar
653 elif hypervisor
== 'xen-unik':
656 text
+= self
.tab() + "<controller type='ide' index='0'/>" + \
657 self
.tab() + "<input type='mouse' bus='ps2'/>" + \
658 self
.tab() + "<input type='keyboard' bus='ps2'/>" + \
659 self
.tab() + "<video>" + \
660 self
.inc_tab() + "<model type='cirrus' vram='9216' heads='1'/>" + \
661 self
.dec_tab() + "</video>"
663 #> self.tab()+'<alias name=\'hostdev0\'/>\n' +\
664 #> self.dec_tab()+'</hostdev>\n' +\
665 #> self.tab()+'<input type=\'tablet\' bus=\'usb\'/>\n'
667 text
+= self
.tab() + "<graphics type='vnc' port='-1' autoport='yes'/>"
669 #If image contains 'GRAPH' include graphics
670 #if 'GRAPH' in image:
671 text
+= self
.tab() + "<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>" +\
672 self
.inc_tab() + "<listen type='address' address='0.0.0.0'/>" +\
673 self
.dec_tab() + "</graphics>"
677 bus_ide_dev
= bus_ide
678 if (dev
['type']=='cdrom' or dev
['type']=='disk') and hypervisor
!= 'xen-unik':
679 if dev
['type']=='cdrom':
681 text
+= self
.tab() + "<disk type='file' device='"+dev
['type']+"'>"
682 if 'file format' in dev
:
683 text
+= self
.inc_tab() + "<driver name='qemu' type='" +dev
['file format']+ "' cache='writethrough'/>"
684 if 'source file' in dev
:
685 text
+= self
.tab() + "<source file='" +dev
['source file']+ "'/>"
686 #elif v['type'] == 'block':
687 # text += self.tab() + "<source dev='" + v['source'] + "'/>"
689 # return -1, 'Unknown disk type ' + v['type']
690 vpci
= dev
.get('vpci',None)
691 if vpci
== None and 'metadata' in dev
:
692 vpci
= dev
['metadata'].get('vpci',None)
693 text
+= self
.pci2xml(vpci
)
696 text
+= self
.tab() + "<target dev='hd" +vd_index
+ "' bus='ide'/>" #TODO allows several type of disks
698 text
+= self
.tab() + "<target dev='vd" +vd_index
+ "' bus='virtio'/>"
699 text
+= self
.dec_tab() + '</disk>'
700 vd_index
= chr(ord(vd_index
)+1)
701 elif dev
['type']=='xml':
702 dev_text
= dev
['xml']
704 dev_text
= dev_text
.replace('__vpci__', dev
['vpci'])
705 if 'source file' in dev
:
706 dev_text
= dev_text
.replace('__file__', dev
['source file'])
707 if 'file format' in dev
:
708 dev_text
= dev_text
.replace('__format__', dev
['source file'])
709 if '__dev__' in dev_text
:
710 dev_text
= dev_text
.replace('__dev__', vd_index
)
711 vd_index
= chr(ord(vd_index
)+1)
713 elif hypervisor
== 'xen-unik':
716 return -1, 'Unknown device type ' + dev
['type']
719 bridge_interfaces
= server
.get('networks', [])
720 for v
in bridge_interfaces
:
722 result
, content
= self
.db
.get_table(FROM
='nets', SELECT
=('provider',),WHERE
={'uuid':v
['net_id']} )
724 self
.logger
.error("create_xml_server ERROR %d getting nets %s", result
, content
)
726 #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM
727 #I know it is not secure
728 #for v in sorted(desc['network interfaces'].itervalues()):
729 model
= v
.get("model", None)
730 if content
[0]['provider']=='default':
731 text
+= self
.tab() + "<interface type='network'>" + \
732 self
.inc_tab() + "<source network='" +content
[0]['provider']+ "'/>"
733 elif content
[0]['provider'][0:7]=='macvtap':
734 text
+= self
.tab()+"<interface type='direct'>" + \
735 self
.inc_tab() + "<source dev='" + self
.get_local_iface_name(content
[0]['provider'][8:]) + "' mode='bridge'/>" + \
736 self
.tab() + "<target dev='macvtap0'/>"
738 text
+= self
.tab() + "<alias name='net" + str(net_nb
) + "'/>"
741 elif content
[0]['provider'][0:6]=='bridge':
742 text
+= self
.tab() + "<interface type='bridge'>" + \
743 self
.inc_tab()+"<source bridge='" +self
.get_local_iface_name(content
[0]['provider'][7:])+ "'/>"
745 text
+= self
.tab() + "<target dev='vnet" + str(net_nb
)+ "'/>" +\
746 self
.tab() + "<alias name='net" + str(net_nb
)+ "'/>"
749 elif content
[0]['provider'][0:3] == "OVS":
750 vlan
= content
[0]['provider'].replace('OVS:', '')
751 text
+= self
.tab() + "<interface type='bridge'>" + \
752 self
.inc_tab() + "<source bridge='ovim-" + str(vlan
) + "'/>"
753 if hypervisor
== 'xenhvm' or hypervisor
== 'xen-unik':
754 text
+= self
.tab() + "<script path='vif-openvswitch'/>"
756 return -1, 'Unknown Bridge net provider ' + content
[0]['provider']
758 text
+= self
.tab() + "<model type='" +model
+ "'/>"
759 if v
.get('mac_address', None) != None:
760 text
+= self
.tab() +"<mac address='" +v
['mac_address']+ "'/>"
761 text
+= self
.pci2xml(v
.get('vpci',None))
762 text
+= self
.dec_tab()+'</interface>'
766 interfaces
= numa
.get('interfaces', [])
770 if self
.develop_mode
: #map these interfaces to bridges
771 text
+= self
.tab() + "<interface type='bridge'>" + \
772 self
.inc_tab()+"<source bridge='" +self
.develop_bridge_iface
+ "'/>"
774 text
+= self
.tab() + "<target dev='vnet" + str(net_nb
)+ "'/>" +\
775 self
.tab() + "<alias name='net" + str(net_nb
)+ "'/>"
777 text
+= self
.tab() + "<model type='e1000'/>" #e1000 is more probable to be supported than 'virtio'
778 if v
.get('mac_address', None) != None:
779 text
+= self
.tab() +"<mac address='" +v
['mac_address']+ "'/>"
780 text
+= self
.pci2xml(v
.get('vpci',None))
781 text
+= self
.dec_tab()+'</interface>'
784 if v
['dedicated'] == 'yes': #passthrought
785 text
+= self
.tab() + "<hostdev mode='subsystem' type='pci' managed='yes'>" + \
786 self
.inc_tab() + "<source>"
788 text
+= self
.pci2xml(v
['source'])
789 text
+= self
.dec_tab()+'</source>'
790 text
+= self
.pci2xml(v
.get('vpci',None))
792 text
+= self
.tab() + "<alias name='hostdev" + str(net_nb
) + "'/>"
793 text
+= self
.dec_tab()+'</hostdev>'
795 else: #sriov_interfaces
796 #skip not connected interfaces
797 if v
.get("net_id") == None:
799 text
+= self
.tab() + "<interface type='hostdev' managed='yes'>"
801 if v
.get('mac_address', None) != None:
802 text
+= self
.tab() + "<mac address='" +v
['mac_address']+ "'/>"
803 text
+= self
.tab()+'<source>'
805 text
+= self
.pci2xml(v
['source'])
806 text
+= self
.dec_tab()+'</source>'
807 if v
.get('vlan',None) != None:
808 text
+= self
.tab() + "<vlan> <tag id='" + str(v
['vlan']) + "'/> </vlan>"
809 text
+= self
.pci2xml(v
.get('vpci',None))
811 text
+= self
.tab() + "<alias name='hostdev" + str(net_nb
) + "'/>"
812 text
+= self
.dec_tab()+'</interface>'
815 text
+= self
.dec_tab()+'</devices>'+\
816 self
.dec_tab()+'</domain>'
819 def pci2xml(self
, pci
):
820 '''from a pci format text XXXX:XX:XX.X generates the xml content of <address>
821 alows an empty pci text'''
824 first_part
= pci
.split(':')
825 second_part
= first_part
[2].split('.')
826 return self
.tab() + "<address type='pci' domain='0x" + first_part
[0] + \
827 "' bus='0x" + first_part
[1] + "' slot='0x" + second_part
[0] + \
828 "' function='0x" + second_part
[1] + "'/>"
831 """Return indentation according to xml_level"""
832 return "\n" + (' '*self
.xml_level
)
835 """Increment and return indentation according to xml_level"""
840 """Decrement and return indentation according to xml_level"""
844 def create_ovs_bridge(self
):
846 Create a bridge in compute OVS to allocate VMs
847 :return: True if success
849 if self
.test
or not self
.connectivity
:
853 self
.run_command('sudo ovs-vsctl --may-exist add-br br-int -- set Bridge br-int stp_enable=true')
856 except RunCommandException
as e
:
857 self
.logger
.error("create_ovs_bridge ssh Exception: " + str(e
))
858 if "SSH session not active" in str(e
):
862 def delete_port_to_ovs_bridge(self
, vlan
, net_uuid
):
864 Delete linux bridge port attched to a OVS bridge, if port is not free the port is not removed
865 :param vlan: vlan port id
866 :param net_uuid: network id
870 if self
.test
or not self
.connectivity
:
873 port_name
= 'ovim-{}'.format(str(vlan
))
874 command
= 'sudo ovs-vsctl del-port br-int {}'.format(port_name
)
875 self
.run_command(command
)
877 except RunCommandException
as e
:
878 self
.logger
.error("delete_port_to_ovs_bridge ssh Exception: {}".format(str(e
)))
881 def delete_dhcp_server(self
, vlan
, net_uuid
, dhcp_path
):
883 Delete dhcp server process lining in namespace
884 :param vlan: segmentation id
885 :param net_uuid: network uuid
886 :param dhcp_path: conf fiel path that live in namespace side
889 if self
.test
or not self
.connectivity
:
891 if not self
.is_dhcp_port_free(vlan
, net_uuid
):
894 dhcp_namespace
= '{}-dnsmasq'.format(str(vlan
))
895 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
)
896 pid_file
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
898 command
= 'sudo ip netns exec {} cat {}'.format(dhcp_namespace
, pid_file
)
899 content
= self
.run_command(command
, ignore_exit_status
=True)
900 dns_pid
= content
.replace('\n', '')
901 command
= 'sudo ip netns exec {} kill -9 {}'.format(dhcp_namespace
, dns_pid
)
902 self
.run_command(command
, ignore_exit_status
=True)
904 except RunCommandException
as e
:
905 self
.logger
.error("delete_dhcp_server ssh Exception: " + str(e
))
908 def is_dhcp_port_free(self
, host_id
, net_uuid
):
910 Check if any port attached to the a net in a vxlan mesh across computes nodes
911 :param host_id: host id
912 :param net_uuid: network id
913 :return: True if is not free
915 result
, content
= self
.db
.get_table(
917 WHERE
={'type': 'instance:ovs', 'net_id': net_uuid
}
925 def is_port_free(self
, host_id
, net_uuid
):
927 Check if there not ovs ports of a network in a compute host.
928 :param host_id: host id
929 :param net_uuid: network id
930 :return: True if is not free
933 result
, content
= self
.db
.get_table(
934 FROM
='ports as p join instances as i on p.instance_id=i.uuid',
935 WHERE
={"i.host_id": self
.host_id
, 'p.type': 'instance:ovs', 'p.net_id': net_uuid
}
943 def add_port_to_ovs_bridge(self
, vlan
):
945 Add a bridge linux as a port to a OVS bridge and set a vlan for an specific linux bridge
946 :param vlan: vlan port id
947 :return: True if success
953 port_name
= 'ovim-{}'.format(str(vlan
))
954 command
= 'sudo ovs-vsctl add-port br-int {} tag={}'.format(port_name
, str(vlan
))
955 self
.run_command(command
)
957 except RunCommandException
as e
:
958 self
.logger
.error("add_port_to_ovs_bridge Exception: " + str(e
))
961 def delete_dhcp_port(self
, vlan
, net_uuid
, dhcp_path
):
963 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
964 :param vlan: segmentation id
965 :param net_uuid: network id
966 :return: True if success
972 if not self
.is_dhcp_port_free(vlan
, net_uuid
):
974 self
.delete_dhcp_interfaces(vlan
, dhcp_path
)
977 def delete_bridge_port_attached_to_ovs(self
, vlan
, net_uuid
):
979 Delete from an existing OVS bridge a linux bridge port attached and the linux bridge itself.
982 :return: True if success
987 if not self
.is_port_free(vlan
, net_uuid
):
989 self
.delete_port_to_ovs_bridge(vlan
, net_uuid
)
990 self
.delete_linux_bridge(vlan
)
993 def delete_linux_bridge(self
, vlan
):
995 Delete a linux bridge in a scpecific compute.
996 :param vlan: vlan port id
997 :return: True if success
1003 port_name
= 'ovim-{}'.format(str(vlan
))
1004 command
= 'sudo ip link set dev ovim-{} down'.format(str(vlan
))
1005 self
.run_command(command
)
1007 command
= 'sudo ip link delete {} && sudo brctl delbr {}'.format(port_name
, port_name
)
1008 self
.run_command(command
)
1010 except RunCommandException
as e
:
1011 self
.logger
.error("delete_linux_bridge Exception: {}".format(str(e
)))
1014 def remove_link_bridge_to_ovs(self
, vlan
, link
):
1016 Delete a linux provider net connection to tenatn net
1017 :param vlan: vlan port id
1018 :param link: link name
1019 :return: True if success
1025 br_tap_name
= '{}-vethBO'.format(str(vlan
))
1026 br_ovs_name
= '{}-vethOB'.format(str(vlan
))
1028 # Delete ovs veth pair
1029 command
= 'sudo ip link set dev {} down'.format(br_ovs_name
)
1030 self
.run_command(command
, ignore_exit_status
=True)
1032 command
= 'sudo ovs-vsctl del-port br-int {}'.format(br_ovs_name
)
1033 self
.run_command(command
)
1035 # Delete br veth pair
1036 command
= 'sudo ip link set dev {} down'.format(br_tap_name
)
1037 self
.run_command(command
, ignore_exit_status
=True)
1039 # Delete br veth interface form bridge
1040 command
= 'sudo brctl delif {} {}'.format(link
, br_tap_name
)
1041 self
.run_command(command
)
1043 # Delete br veth pair
1044 command
= 'sudo ip link set dev {} down'.format(link
)
1045 self
.run_command(command
, ignore_exit_status
=True)
1048 except RunCommandException
as e
:
1049 self
.logger
.error("remove_link_bridge_to_ovs Exception: {}".format(str(e
)))
1052 def create_ovs_bridge_port(self
, vlan
):
1054 Generate a linux bridge and attache the port to a OVS bridge
1055 :param vlan: vlan port id
1060 self
.create_linux_bridge(vlan
)
1061 self
.add_port_to_ovs_bridge(vlan
)
1063 def create_linux_bridge(self
, vlan
):
1065 Create a linux bridge with STP active
1066 :param vlan: netowrk vlan id
1073 port_name
= 'ovim-{}'.format(str(vlan
))
1074 command
= 'sudo brctl show | grep {}'.format(port_name
)
1075 result
= self
.run_command(command
, ignore_exit_status
=True)
1077 command
= 'sudo brctl addbr {}'.format(port_name
)
1078 self
.run_command(command
)
1080 command
= 'sudo brctl stp {} on'.format(port_name
)
1081 self
.run_command(command
)
1083 command
= 'sudo ip link set dev {} up'.format(port_name
)
1084 self
.run_command(command
)
1086 except RunCommandException
as e
:
1087 self
.logger
.error("create_linux_bridge ssh Exception: {}".format(str(e
)))
1090 def set_mac_dhcp_server(self
, ip
, mac
, vlan
, netmask
, first_ip
, dhcp_path
):
1092 Write into dhcp conf file a rule to assigned a fixed ip given to an specific MAC address
1093 :param ip: IP address asigned to a VM
1094 :param mac: VM vnic mac to be macthed with the IP received
1095 :param vlan: Segmentation id
1096 :param netmask: netmask value
1097 :param path: dhcp conf file path that live in namespace side
1098 :return: True if success
1104 dhcp_namespace
= '{}-dnsmasq'.format(str(vlan
))
1105 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1106 dhcp_hostsdir
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1107 ns_interface
= '{}-vethDO'.format(str(vlan
))
1112 command
= 'sudo ip netns exec {} cat /sys/class/net/{}/address'.format(dhcp_namespace
, ns_interface
)
1113 iface_listen_mac
= self
.run_command(command
, ignore_exit_status
=True)
1115 if iface_listen_mac
> 0:
1116 command
= 'sudo ip netns exec {} cat {} | grep -i {}'.format(dhcp_namespace
,
1119 content
= self
.run_command(command
, ignore_exit_status
=True)
1121 ip_data
= iface_listen_mac
.upper().replace('\n', '') + ',' + first_ip
1122 dhcp_hostsdir
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1124 command
= 'sudo ip netns exec {} sudo bash -ec "echo {} >> {}"'.format(dhcp_namespace
,
1127 self
.run_command(command
)
1129 ip_data
= mac
.upper() + ',' + ip
1130 command
= 'sudo ip netns exec {} sudo bash -ec "echo {} >> {}"'.format(dhcp_namespace
,
1133 self
.run_command(command
, ignore_exit_status
=False)
1138 except RunCommandException
as e
:
1139 self
.logger
.error("set_mac_dhcp_server ssh Exception: " + str(e
))
1142 def delete_mac_dhcp_server(self
, ip
, mac
, vlan
, dhcp_path
):
1144 Delete into dhcp conf file the ip assigned to a specific MAC address
1146 :param ip: IP address asigned to a VM
1147 :param mac: VM vnic mac to be macthed with the IP received
1148 :param vlan: Segmentation id
1149 :param dhcp_path: dhcp conf file path that live in namespace side
1156 dhcp_namespace
= str(vlan
) + '-dnsmasq'
1157 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1158 dhcp_hostsdir
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1163 ip_data
= mac
.upper() + ',' + ip
1165 command
= 'sudo ip netns exec ' + dhcp_namespace
+ ' sudo sed -i \'/' + ip_data
+ '/d\' ' + dhcp_hostsdir
1166 self
.run_command(command
)
1169 except RunCommandException
as e
:
1170 self
.logger
.error("delete_mac_dhcp_server Exception: " + str(e
))
1173 def launch_dhcp_server(self
, vlan
, ip_range
, netmask
, dhcp_path
, gateway
, dns_list
=None, routes
=None):
1175 Generate a linux bridge and attache the port to a OVS bridge
1177 :param vlan: Segmentation id
1178 :param ip_range: IP dhcp range
1179 :param netmask: network netmask
1180 :param dhcp_path: dhcp conf file path that live in namespace side
1181 :param gateway: Gateway address for dhcp net
1182 :param dns_list: dns list for dhcp server
1183 :param routes: routes list for dhcp server
1184 :return: True if success
1190 ns_interface
= str(vlan
) + '-vethDO'
1191 dhcp_namespace
= str(vlan
) + '-dnsmasq'
1192 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
, '')
1193 leases_path
= os
.path
.join(dhcp_path
, "dnsmasq.leases")
1194 pid_file
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
1196 dhcp_range
= ip_range
[0] + ',' + ip_range
[1] + ',' + netmask
1198 command
= 'sudo ip netns exec {} mkdir -p {}'.format(dhcp_namespace
, dhcp_path
)
1199 self
.run_command(command
)
1201 # check if dnsmasq process is running
1202 dnsmasq_is_runing
= False
1203 pid_path
= os
.path
.join(dhcp_path
, 'dnsmasq.pid')
1204 command
= 'sudo ip netns exec ' + dhcp_namespace
+ ' ls ' + pid_path
1205 content
= self
.run_command(command
, ignore_exit_status
=True)
1207 # check if pid is runing
1209 pid_path
= content
.replace('\n', '')
1210 command
= "ps aux | awk '{print $2 }' | grep {}" + pid_path
1211 dnsmasq_is_runing
= self
.run_command(command
, ignore_exit_status
=True)
1213 gateway_option
= ' --dhcp-option=3,' + gateway
1215 dhcp_route_option
= ''
1217 dhcp_route_option
= ' --dhcp-option=121'
1218 for key
, value
in routes
.iteritems():
1219 if 'default' == key
:
1220 gateway_option
= ' --dhcp-option=3,' + value
1222 dhcp_route_option
+= ',' + key
+ ',' + value
1225 dns_data
= ' --dhcp-option=6'
1226 for dns
in dns_list
:
1227 dns_data
+= ',' + dns
1229 if not dnsmasq_is_runing
:
1230 command
= 'sudo ip netns exec ' + dhcp_namespace
+ ' /usr/sbin/dnsmasq --strict-order --except-interface=lo ' \
1231 '--interface=' + ns_interface
+ \
1232 ' --bind-interfaces --dhcp-hostsdir=' + dhcp_path
+ \
1233 ' --dhcp-range ' + dhcp_range
+ \
1234 ' --pid-file=' + pid_file
+ \
1235 ' --dhcp-leasefile=' + leases_path
+ \
1236 ' --listen-address ' + ip_range
[0] + \
1238 dhcp_route_option
+ \
1241 self
.run_command(command
)
1243 except RunCommandException
as e
:
1244 self
.logger
.error("launch_dhcp_server ssh Exception: " + str(e
))
1247 def delete_dhcp_interfaces(self
, vlan
, dhcp_path
):
1249 Delete a linux dnsmasq bridge and namespace
1250 :param vlan: netowrk vlan id
1257 br_veth_name
='{}-vethDO'.format(str(vlan
))
1258 ovs_veth_name
= '{}-vethOD'.format(str(vlan
))
1259 dhcp_namespace
= '{}-dnsmasq'.format(str(vlan
))
1261 dhcp_path
= os
.path
.join(dhcp_path
, dhcp_namespace
)
1262 command
= 'sudo ovs-vsctl del-port br-int {}'.format(ovs_veth_name
)
1263 self
.run_command(command
, ignore_exit_status
=True) # to end session
1265 command
= 'sudo ip link set dev {} down'.format(ovs_veth_name
)
1266 self
.run_command(command
, ignore_exit_status
=True) # to end session
1268 command
= 'sudo ip link delete {} '.format(ovs_veth_name
)
1269 self
.run_command(command
, ignore_exit_status
=True)
1271 command
= 'sudo ip netns exec {} ip link set dev {} down'.format(dhcp_namespace
, br_veth_name
)
1272 self
.run_command(command
, ignore_exit_status
=True)
1274 command
= 'sudo rm -rf {}'.format(dhcp_path
)
1275 self
.run_command(command
)
1277 command
= 'sudo ip netns del {}'.format(dhcp_namespace
)
1278 self
.run_command(command
)
1281 except RunCommandException
as e
:
1282 self
.logger
.error("delete_dhcp_interfaces ssh Exception: {}".format(str(e
)))
1285 def create_dhcp_interfaces(self
, vlan
, ip_listen_address
, netmask
):
1287 Create a linux bridge with STP active
1288 :param vlan: segmentation id
1289 :param ip_listen_address: Listen Ip address for the dhcp service, the tap interface living in namesapce side
1290 :param netmask: dhcp net CIDR
1291 :return: True if success
1296 ovs_veth_name
= '{}-vethOD'.format(str(vlan
))
1297 ns_veth
= '{}-vethDO'.format(str(vlan
))
1298 dhcp_namespace
= '{}-dnsmasq'.format(str(vlan
))
1300 command
= 'sudo ip netns add {}'.format(dhcp_namespace
)
1301 self
.run_command(command
)
1303 command
= 'sudo ip link add {} type veth peer name {}'.format(ns_veth
, ovs_veth_name
)
1304 self
.run_command(command
)
1306 command
= 'sudo ip link set {} netns {}'.format(ns_veth
, dhcp_namespace
)
1307 self
.run_command(command
)
1309 command
= 'sudo ip netns exec {} ip link set dev {} up'.format(dhcp_namespace
, ns_veth
)
1310 self
.run_command(command
)
1312 command
= 'sudo ovs-vsctl add-port br-int {} tag={}'.format(ovs_veth_name
, str(vlan
))
1313 self
.run_command(command
, ignore_exit_status
=True)
1315 command
= 'sudo ip link set dev {} up'.format(ovs_veth_name
)
1316 self
.run_command(command
)
1318 command
= 'sudo ip netns exec {} ip link set dev lo up'.format(dhcp_namespace
)
1319 self
.run_command(command
)
1321 command
= 'sudo ip netns exec {} ifconfig {} {} netmask {}'.format(dhcp_namespace
,
1325 self
.run_command(command
)
1327 except RunCommandException
as e
:
1328 self
.logger
.error("create_dhcp_interfaces ssh Exception: {}".format(str(e
)))
1331 def delete_qrouter_connection(self
, vlan
, link
):
1333 Delete qrouter Namesapce with all veth interfaces need it
1342 ns_qouter
= '{}-qrouter'.format(str(vlan
))
1343 qrouter_ovs_veth
= '{}-vethOQ'.format(str(vlan
))
1344 qrouter_ns_veth
= '{}-vethQO'.format(str(vlan
))
1345 qrouter_br_veth
= '{}-vethBQ'.format(str(vlan
))
1346 qrouter_ns_router_veth
= '{}-vethQB'.format(str(vlan
))
1348 command
= 'sudo ovs-vsctl del-port br-int {}'.format(qrouter_ovs_veth
)
1349 self
.run_command(command
, ignore_exit_status
=True)
1352 command
= 'sudo ip netns exec {} ip link set dev {} down'.format(ns_qouter
, qrouter_ns_veth
)
1353 self
.run_command(command
, ignore_exit_status
=True)
1355 command
= 'sudo ip netns exec {} ip link delete {} '.format(ns_qouter
, qrouter_ns_veth
)
1356 self
.run_command(command
, ignore_exit_status
=True)
1358 command
= 'sudo ip netns del ' + ns_qouter
1359 self
.run_command(command
)
1361 # down ovs veth interface
1362 command
= 'sudo ip link set dev {} down'.format(qrouter_br_veth
)
1363 self
.run_command(command
, ignore_exit_status
=True)
1365 # down br veth interface
1366 command
= 'sudo ip link set dev {} down'.format(qrouter_ovs_veth
)
1367 self
.run_command(command
, ignore_exit_status
=True)
1369 # delete veth interface
1370 command
= 'sudo ip link delete {} '.format(link
, qrouter_ovs_veth
)
1371 self
.run_command(command
, ignore_exit_status
=True)
1373 # down br veth interface
1374 command
= 'sudo ip link set dev {} down'.format(qrouter_ns_router_veth
)
1375 self
.run_command(command
, ignore_exit_status
=True)
1377 # delete veth interface
1378 command
= 'sudo ip link delete {} '.format(link
, qrouter_ns_router_veth
)
1379 self
.run_command(command
, ignore_exit_status
=True)
1381 # down br veth interface
1382 command
= 'sudo brctl delif {} {}'.format(link
, qrouter_br_veth
)
1383 self
.run_command(command
)
1387 except RunCommandException
as e
:
1388 self
.logger
.error("delete_qrouter_connection ssh Exception: {}".format(str(e
)))
1391 def create_qrouter_ovs_connection(self
, vlan
, gateway
, dhcp_cidr
):
1393 Create qrouter Namesapce with all veth interfaces need it between NS and OVS
1403 ns_qouter
= '{}-qrouter'.format(str(vlan
))
1404 qrouter_ovs_veth
='{}-vethOQ'.format(str(vlan
))
1405 qrouter_ns_veth
= '{}-vethQO'.format(str(vlan
))
1408 command
= 'sudo ip netns add {}'.format(ns_qouter
)
1409 self
.run_command(command
)
1412 command
= 'sudo ip link add {} type veth peer name {}'.format(qrouter_ns_veth
, qrouter_ovs_veth
)
1413 self
.run_command(command
, ignore_exit_status
=True)
1415 # up ovs veth interface
1416 command
= 'sudo ip link set dev {} up'.format(qrouter_ovs_veth
)
1417 self
.run_command(command
)
1419 # add ovs veth to ovs br-int
1420 command
= 'sudo ovs-vsctl add-port br-int {} tag={}'.format(qrouter_ovs_veth
, vlan
)
1421 self
.run_command(command
)
1424 command
= 'sudo ip link set {} netns {}'.format(qrouter_ns_veth
, ns_qouter
)
1425 self
.run_command(command
)
1428 command
= 'sudo ip netns exec {} ip link set dev lo up'.format(ns_qouter
)
1429 self
.run_command(command
)
1432 command
= 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter
, qrouter_ns_veth
)
1433 self
.run_command(command
)
1435 from netaddr
import IPNetwork
1436 ip_tools
= IPNetwork(dhcp_cidr
)
1437 cidr_len
= ip_tools
.prefixlen
1440 command
= 'sudo ip netns exec {} ip address add {}/{} dev {}'.format(ns_qouter
, gateway
, cidr_len
, qrouter_ns_veth
)
1441 self
.run_command(command
)
1445 except RunCommandException
as e
:
1446 self
.logger
.error("Create_dhcp_interfaces ssh Exception: {}".format(str(e
)))
1449 def add_ns_routes(self
, vlan
, routes
):
1461 ns_qouter
= '{}-qrouter'.format(str(vlan
))
1462 qrouter_ns_router_veth
= '{}-vethQB'.format(str(vlan
))
1464 for key
, value
in routes
.iteritems():
1466 if key
== 'default':
1467 command
= 'sudo ip netns exec {} ip route add {} via {} '.format(ns_qouter
, key
, value
)
1469 command
= 'sudo ip netns exec {} ip route add {} via {} dev {}'.format(ns_qouter
, key
, value
,
1470 qrouter_ns_router_veth
)
1472 self
.run_command(command
)
1476 except RunCommandException
as e
:
1477 self
.logger
.error("add_ns_routes, error adding routes to namesapce, {}".format(str(e
)))
1480 def create_qrouter_br_connection(self
, vlan
, cidr
, link
):
1482 Create veth interfaces between user bridge (link) and OVS
1492 ns_qouter
= '{}-qrouter'.format(str(vlan
))
1493 qrouter_ns_router_veth
= '{}-vethQB'.format(str(vlan
))
1494 qrouter_br_veth
= '{}-vethBQ'.format(str(vlan
))
1496 command
= 'sudo brctl show | grep {}'.format(link
['iface'])
1497 content
= self
.run_command(command
, ignore_exit_status
=True)
1501 command
= 'sudo ip link add {} type veth peer name {}'.format(qrouter_br_veth
, qrouter_ns_router_veth
)
1502 self
.run_command(command
)
1504 # up ovs veth interface
1505 command
= 'sudo ip link set dev {} up'.format(qrouter_br_veth
)
1506 self
.run_command(command
)
1509 command
= 'sudo ip link set {} netns {}'.format(qrouter_ns_router_veth
, ns_qouter
)
1510 self
.run_command(command
)
1513 command
= 'sudo ip netns exec {} ip link set dev {} up'.format(ns_qouter
, qrouter_ns_router_veth
)
1514 self
.run_command(command
)
1516 command
= 'sudo ip netns exec {} ip address add {} dev {}'.format(ns_qouter
,
1518 qrouter_ns_router_veth
)
1519 self
.run_command(command
)
1522 command
= 'sudo brctl addif {} {}'.format(link
['iface'], qrouter_br_veth
)
1523 self
.run_command(command
)
1526 command
= 'sudo ip netns exec {} iptables -t nat -A POSTROUTING -o {} -s {} -d {} -j MASQUERADE' \
1527 .format(ns_qouter
, qrouter_ns_router_veth
, link
['nat'], cidr
)
1528 self
.run_command(command
)
1533 self
.logger
.error('create_qrouter_br_connection, Bridge {} given by user not exist'.format(qrouter_br_veth
))
1536 except RunCommandException
as e
:
1537 self
.logger
.error("Error creating qrouter, {}".format(str(e
)))
1540 def create_link_bridge_to_ovs(self
, vlan
, link
):
1542 Create interfaces to connect a linux bridge with tenant net
1543 :param vlan: segmentation id
1544 :return: True if success
1550 br_tap_name
= '{}-vethBO'.format(str(vlan
))
1551 br_ovs_name
= '{}-vethOB'.format(str(vlan
))
1553 # is a bridge or a interface
1554 command
= 'sudo brctl show | grep {}'.format(link
)
1555 content
= self
.run_command(command
, ignore_exit_status
=True)
1557 command
= 'sudo ip link add {} type veth peer name {}'.format(br_tap_name
, br_ovs_name
)
1558 self
.run_command(command
)
1560 command
= 'sudo ip link set dev {} up'.format(br_tap_name
)
1561 self
.run_command(command
)
1563 command
= 'sudo ip link set dev {} up'.format(br_ovs_name
)
1564 self
.run_command(command
)
1566 command
= 'sudo ovs-vsctl add-port br-int {} tag={}'.format(br_ovs_name
, str(vlan
))
1567 self
.run_command(command
)
1569 command
= 'sudo brctl addif ' + link
+ ' {}'.format(br_tap_name
)
1570 self
.run_command(command
)
1573 self
.logger
.error('Link is not present, please check {}'.format(link
))
1576 except RunCommandException
as e
:
1577 self
.logger
.error("create_link_bridge_to_ovs, Error creating link to ovs, {}".format(str(e
)))
1580 def create_ovs_vxlan_tunnel(self
, vxlan_interface
, remote_ip
):
1582 Create a vlxn tunnel between to computes with an OVS installed. STP is also active at port level
1583 :param vxlan_interface: vlxan inteface name.
1584 :param remote_ip: tunnel endpoint remote compute ip.
1587 if self
.test
or not self
.connectivity
:
1589 if remote_ip
== 'localhost':
1591 return True # TODO: Cannot create a vxlan between localhost and localhost
1592 remote_ip
= self
.local_ip
1595 command
= 'sudo ovs-vsctl add-port br-int {} -- set Interface {} type=vxlan options:remote_ip={} ' \
1596 '-- set Port {} other_config:stp-path-cost=10'.format(vxlan_interface
,
1600 self
.run_command(command
)
1602 except RunCommandException
as e
:
1603 self
.logger
.error("create_ovs_vxlan_tunnel, error creating vxlan tunnel, {}".format(str(e
)))
1606 def delete_ovs_vxlan_tunnel(self
, vxlan_interface
):
1608 Delete a vlxan tunnel port from a OVS brdige.
1609 :param vxlan_interface: vlxan name to be delete it.
1610 :return: True if success.
1612 if self
.test
or not self
.connectivity
:
1615 command
= 'sudo ovs-vsctl del-port br-int {}'.format(vxlan_interface
)
1616 self
.run_command(command
)
1618 except RunCommandException
as e
:
1619 self
.logger
.error("delete_ovs_vxlan_tunnel, error deleting vxlan tunenl, {}".format(str(e
)))
1622 def delete_ovs_bridge(self
):
1624 Delete a OVS bridge from a compute.
1625 :return: True if success
1627 if self
.test
or not self
.connectivity
:
1630 command
= 'sudo ovs-vsctl del-br br-int'
1631 self
.run_command(command
)
1633 except RunCommandException
as e
:
1634 self
.logger
.error("delete_ovs_bridge ssh Exception: {}".format(str(e
)))
1637 def get_file_info(self
, path
):
1638 command
= 'ls -lL --time-style=+%Y-%m-%dT%H:%M:%S ' + path
1640 content
= self
.run_command(command
)
1641 return content
.split(" ") # (permission, 1, owner, group, size, date, file)
1642 except RunCommandException
as e
:
1643 return None # file does not exist
1645 def qemu_get_info(self
, path
):
1646 command
= 'qemu-img info ' + path
1647 content
= self
.run_command(command
)
1649 return yaml
.load(content
)
1650 except yaml
.YAMLError
as exc
:
1652 if hasattr(exc
, 'problem_mark'):
1653 mark
= exc
.problem_mark
1654 text
= " at position: (%s:%s)" % (mark
.line
+1, mark
.column
+1)
1655 self
.logger
.error("get_qemu_info yaml format Exception " + text
)
1656 raise RunCommandException("Error getting qemu_info yaml format" + text
)
1658 def qemu_change_backing(self
, inc_file
, new_backing_file
):
1659 command
= 'qemu-img rebase -u -b {} {}'.format(new_backing_file
, inc_file
)
1661 self
.run_command(command
)
1663 except RunCommandException
as e
:
1664 self
.logger
.error("qemu_change_backing error: " + str(e
))
1667 def qemu_create_empty_disk(self
, dev
):
1669 if not dev
and 'source' not in dev
and 'file format' not in dev
and 'image_size' not in dev
:
1670 self
.logger
.error("qemu_create_empty_disk error: missing image parameter")
1673 empty_disk_path
= dev
['source file']
1675 command
= 'qemu-img create -f qcow2 {} {}G'.format(empty_disk_path
, dev
['image_size'])
1677 self
.run_command(command
)
1679 except RunCommandException
as e
:
1680 self
.logger
.error("qemu_create_empty_disk error: " + str(e
))
1683 def get_notused_filename(self
, proposed_name
, suffix
=''):
1684 '''Look for a non existing file_name in the host
1685 proposed_name: proposed file name, includes path
1686 suffix: suffix to be added to the name, before the extention
1688 extension
= proposed_name
.rfind(".")
1689 slash
= proposed_name
.rfind("/")
1690 if extension
< 0 or extension
< slash
: # no extension
1691 extension
= len(proposed_name
)
1692 target_name
= proposed_name
[:extension
] + suffix
+ proposed_name
[extension
:]
1693 info
= self
.get_file_info(target_name
)
1698 while info
is not None:
1699 target_name
= proposed_name
[:extension
] + suffix
+ "-" + str(index
) + proposed_name
[extension
:]
1701 info
= self
.get_file_info(target_name
)
1704 def get_notused_path(self
, proposed_path
, suffix
=''):
1705 '''Look for a non existing path at database for images
1706 proposed_path: proposed file name, includes path
1707 suffix: suffix to be added to the name, before the extention
1709 extension
= proposed_path
.rfind(".")
1711 extension
= len(proposed_path
)
1713 target_path
= proposed_path
[:extension
] + suffix
+ proposed_path
[extension
:]
1716 r
,_
=self
.db
.get_table(FROM
="images",WHERE
={"path":target_path
})
1719 target_path
= proposed_path
[:extension
] + suffix
+ "-" + str(index
) + proposed_path
[extension
:]
1723 def delete_file(self
, file_name
):
1724 command
= 'rm -f ' + file_name
1725 self
.run_command(command
)
1727 def copy_file(self
, source
, destination
, perserve_time
=True):
1728 if source
[0:4]=="http":
1729 command
= "wget --no-verbose -O '{dst}' '{src}' 2>'{dst_result}' || cat '{dst_result}' >&2 && rm '{dst_result}'".format(
1730 dst
=destination
, src
=source
, dst_result
=destination
+ ".result" )
1732 command
= 'cp --no-preserve=mode'
1734 command
+= ' --preserve=timestamps'
1735 command
+= " '{}' '{}'".format(source
, destination
)
1736 self
.run_command(command
)
1738 def copy_remote_file(self
, remote_file
, use_incremental
):
1739 ''' Copy a file from the repository to local folder and recursively
1740 copy the backing files in case the remote file is incremental
1741 Read and/or modified self.localinfo['files'] that contain the
1742 unmodified copies of images in the local path
1744 remote_file: path of remote file
1745 use_incremental: None (leave the decision to this function), True, False
1747 local_file: name of local file
1748 qemu_info: dict with quemu information of local file
1749 use_incremental_out: True, False; same as use_incremental, but if None a decision is taken
1752 use_incremental_out
= use_incremental
1753 new_backing_file
= None
1755 file_from_local
= True
1757 #in case incremental use is not decided, take the decision depending on the image
1758 #avoid the use of incremental if this image is already incremental
1759 if remote_file
[0:4] == "http":
1760 file_from_local
= False
1762 qemu_remote_info
= self
.qemu_get_info(remote_file
)
1763 if use_incremental_out
==None:
1764 use_incremental_out
= not ( file_from_local
and 'backing file' in qemu_remote_info
)
1765 #copy recursivelly the backing files
1766 if file_from_local
and 'backing file' in qemu_remote_info
:
1767 new_backing_file
, _
, _
= self
.copy_remote_file(qemu_remote_info
['backing file'], True)
1769 #check if remote file is present locally
1770 if use_incremental_out
and remote_file
in self
.localinfo
['files']:
1771 local_file
= self
.localinfo
['files'][remote_file
]
1772 local_file_info
= self
.get_file_info(local_file
)
1774 remote_file_info
= self
.get_file_info(remote_file
)
1775 if local_file_info
== None:
1777 elif file_from_local
and (local_file_info
[4]!=remote_file_info
[4] or local_file_info
[5]!=remote_file_info
[5]):
1778 #local copy of file not valid because date or size are different.
1779 #TODO DELETE local file if this file is not used by any active virtual machine
1781 self
.delete_file(local_file
)
1782 del self
.localinfo
['files'][remote_file
]
1786 else: #check that the local file has the same backing file, or there are not backing at all
1787 qemu_info
= self
.qemu_get_info(local_file
)
1788 if new_backing_file
!= qemu_info
.get('backing file'):
1792 if local_file
== None: #copy the file
1793 img_name
= remote_file
.split('/') [-1]
1794 img_local
= self
.image_path
+ '/' + img_name
1795 local_file
= self
.get_notused_filename(img_local
)
1796 self
.copy_file(remote_file
, local_file
, use_incremental_out
)
1798 if use_incremental_out
:
1799 self
.localinfo
['files'][remote_file
] = local_file
1800 if new_backing_file
:
1801 self
.qemu_change_backing(local_file
, new_backing_file
)
1802 qemu_info
= self
.qemu_get_info(local_file
)
1804 return local_file
, qemu_info
, use_incremental_out
1806 def launch_server(self
, conn
, server
, rebuild
=False, domain
=None):
1808 time
.sleep(random
.randint(20,150)) #sleep random timeto be make it a bit more real
1811 server_id
= server
['uuid']
1812 paused
= server
.get('paused','no')
1814 if domain
!=None and rebuild
==False:
1816 #self.server_status[server_id] = 'ACTIVE'
1819 result
, server_data
= self
.db
.get_instance(server_id
)
1821 self
.logger
.error("launch_server ERROR getting server from DB %d %s", result
, server_data
)
1822 return result
, server_data
1824 self
.hypervisor
= str(server_data
['hypervisor'])
1826 #0: get image metadata
1827 server_metadata
= server
.get('metadata', {})
1828 use_incremental
= None
1830 if "use_incremental" in server_metadata
:
1831 use_incremental
= False if server_metadata
["use_incremental"] == "no" else True
1832 if self
.xen_hyp
== True:
1833 use_incremental
= False
1835 server_host_files
= self
.localinfo
['server_files'].get( server
['uuid'], {})
1837 #delete previous incremental files
1838 for file_
in server_host_files
.values():
1839 self
.delete_file(file_
['source file'] )
1840 server_host_files
={}
1842 #1: obtain aditional devices (disks)
1843 #Put as first device the main disk
1844 devices
= [ {"type":"disk", "image_id":server
['image_id'], "vpci":server_metadata
.get('vpci', None) } ]
1845 if 'extended' in server_data
and server_data
['extended']!=None and "devices" in server_data
['extended']:
1846 devices
+= server_data
['extended']['devices']
1849 image_id
= dev
.get('image_id')
1852 uuid_empty
= str(uuid
.uuid4())
1853 empty_path
= self
.empty_image_path
+ uuid_empty
+ '.qcow2' # local path for empty disk
1855 dev
['source file'] = empty_path
1856 dev
['file format'] = 'qcow2'
1857 self
.qemu_create_empty_disk(dev
)
1858 server_host_files
[uuid_empty
] = {'source file': empty_path
,
1859 'file format': dev
['file format']}
1863 result
, content
= self
.db
.get_table(FROM
='images', SELECT
=('path', 'metadata'),
1864 WHERE
={'uuid': image_id
})
1866 error_text
= "ERROR", result
, content
, "when getting image", dev
['image_id']
1867 self
.logger
.error("launch_server " + error_text
)
1868 return -1, error_text
1869 if content
[0]['metadata'] is not None:
1870 dev
['metadata'] = json
.loads(content
[0]['metadata'])
1872 dev
['metadata'] = {}
1874 if image_id
in server_host_files
:
1875 dev
['source file'] = server_host_files
[image_id
]['source file'] #local path
1876 dev
['file format'] = server_host_files
[image_id
]['file format'] # raw or qcow2
1879 #2: copy image to host
1881 remote_file
= content
[0]['path']
1883 remote_file
= empty_path
1884 use_incremental_image
= use_incremental
1885 if dev
['metadata'].get("use_incremental") == "no":
1886 use_incremental_image
= False
1887 local_file
, qemu_info
, use_incremental_image
= self
.copy_remote_file(remote_file
, use_incremental_image
)
1889 #create incremental image
1890 if use_incremental_image
:
1891 local_file_inc
= self
.get_notused_filename(local_file
, '.inc')
1892 command
= 'qemu-img create -f qcow2 {} -o backing_file={}'.format(local_file_inc
, local_file
)
1893 self
.run_command(command
)
1894 local_file
= local_file_inc
1895 qemu_info
= {'file format': 'qcow2'}
1897 server_host_files
[ dev
['image_id'] ] = {'source file': local_file
, 'file format': qemu_info
['file format']}
1899 dev
['source file'] = local_file
1900 dev
['file format'] = qemu_info
['file format']
1902 self
.localinfo
['server_files'][ server
['uuid'] ] = server_host_files
1903 self
.localinfo_dirty
= True
1906 result
, xml
= self
.create_xml_server(server_data
, devices
, server_metadata
) #local_file
1908 self
.logger
.error("create xml server error: " + xml
)
1910 self
.logger
.debug("create xml: " + xml
)
1911 atribute
= host_thread
.lvirt_module
.VIR_DOMAIN_START_PAUSED
if paused
== "yes" else 0
1913 if not rebuild
: #ensures that any pending destroying server is done
1914 self
.server_forceoff(True)
1915 #self.logger.debug("launching instance " + xml)
1916 conn
.createXML(xml
, atribute
)
1917 #self.server_status[server_id] = 'PAUSED' if paused == "yes" else 'ACTIVE'
1921 except paramiko
.ssh_exception
.SSHException
as e
:
1923 self
.logger
.error("launch_server id='%s' ssh Exception: %s", server_id
, text
)
1924 if "SSH session not active" in text
:
1926 except host_thread
.lvirt_module
.libvirtError
as e
:
1927 text
= e
.get_error_message()
1928 self
.logger
.error("launch_server id='%s' libvirt Exception: %s", server_id
, text
)
1929 except Exception as e
:
1931 self
.logger
.error("launch_server id='%s' Exception: %s", server_id
, text
)
1934 def update_servers_status(self
):
1936 # VIR_DOMAIN_NOSTATE = 0
1937 # VIR_DOMAIN_RUNNING = 1
1938 # VIR_DOMAIN_BLOCKED = 2
1939 # VIR_DOMAIN_PAUSED = 3
1940 # VIR_DOMAIN_SHUTDOWN = 4
1941 # VIR_DOMAIN_SHUTOFF = 5
1942 # VIR_DOMAIN_CRASHED = 6
1943 # VIR_DOMAIN_PMSUSPENDED = 7 #TODO suspended
1945 if self
.test
or len(self
.server_status
)==0:
1949 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
1950 domains
= conn
.listAllDomains()
1952 for domain
in domains
:
1953 uuid
= domain
.UUIDString() ;
1954 libvirt_status
= domain
.state()
1955 #print libvirt_status
1956 if libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_RUNNING
or libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_SHUTDOWN
:
1957 new_status
= "ACTIVE"
1958 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_PAUSED
:
1959 new_status
= "PAUSED"
1960 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_SHUTOFF
:
1961 new_status
= "INACTIVE"
1962 elif libvirt_status
[0] == host_thread
.lvirt_module
.VIR_DOMAIN_CRASHED
:
1963 new_status
= "ERROR"
1966 domain_dict
[uuid
] = new_status
1968 except host_thread
.lvirt_module
.libvirtError
as e
:
1969 self
.logger
.error("get_state() Exception " + e
.get_error_message())
1972 for server_id
, current_status
in self
.server_status
.iteritems():
1974 if server_id
in domain_dict
:
1975 new_status
= domain_dict
[server_id
]
1977 new_status
= "INACTIVE"
1979 if new_status
== None or new_status
== current_status
:
1981 if new_status
== 'INACTIVE' and current_status
== 'ERROR':
1982 continue #keep ERROR status, because obviously this machine is not running
1984 self
.logger
.debug("server id='%s' status change from '%s' to '%s'", server_id
, current_status
, new_status
)
1985 STATUS
={'progress':100, 'status':new_status
}
1986 if new_status
== 'ERROR':
1987 STATUS
['last_error'] = 'machine has crashed'
1988 r
,_
= self
.db
.update_rows('instances', STATUS
, {'uuid':server_id
}, log
=False)
1990 self
.server_status
[server_id
] = new_status
1992 def action_on_server(self
, req
, last_retry
=True):
1993 '''Perform an action on a req
1995 req: dictionary that contain:
1996 server properties: 'uuid','name','tenant_id','status'
1998 host properties: 'user', 'ip_name'
1999 return (error, text)
2000 0: No error. VM is updated to new state,
2001 -1: Invalid action, as trying to pause a PAUSED VM
2002 -2: Error accessing host
2004 -4: Error at DB access
2005 -5: Error while trying to perform action. VM is updated to ERROR
2007 server_id
= req
['uuid']
2010 old_status
= req
['status']
2014 if 'terminate' in req
['action']:
2015 new_status
= 'deleted'
2016 elif 'shutoff' in req
['action'] or 'shutdown' in req
['action'] or 'forceOff' in req
['action']:
2017 if req
['status']!='ERROR':
2019 new_status
= 'INACTIVE'
2020 elif 'start' in req
['action'] and req
['status']!='ERROR':
2021 new_status
= 'ACTIVE'
2022 elif 'resume' in req
['action'] and req
['status']!='ERROR' and req
['status']!='INACTIVE':
2023 new_status
= 'ACTIVE'
2024 elif 'pause' in req
['action'] and req
['status']!='ERROR':
2025 new_status
= 'PAUSED'
2026 elif 'reboot' in req
['action'] and req
['status']!='ERROR':
2027 new_status
= 'ACTIVE'
2028 elif 'rebuild' in req
['action']:
2029 time
.sleep(random
.randint(20,150))
2030 new_status
= 'ACTIVE'
2031 elif 'createImage' in req
['action']:
2033 self
.create_image(None, req
)
2036 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
2038 dom
= conn
.lookupByUUIDString(server_id
)
2039 except host_thread
.lvirt_module
.libvirtError
as e
:
2040 text
= e
.get_error_message()
2041 if 'LookupByUUIDString' in text
or 'Domain not found' in text
or 'No existe un dominio coincidente' in text
:
2044 self
.logger
.error("action_on_server id='%s' libvirt exception: %s", server_id
, text
)
2047 if 'forceOff' in req
['action']:
2049 self
.logger
.debug("action_on_server id='%s' domain not running", server_id
)
2052 self
.logger
.debug("sending DESTROY to server id='%s'", server_id
)
2054 except Exception as e
:
2055 if "domain is not running" not in e
.get_error_message():
2056 self
.logger
.error("action_on_server id='%s' Exception while sending force off: %s",
2057 server_id
, e
.get_error_message())
2058 last_error
= 'action_on_server Exception while destroy: ' + e
.get_error_message()
2059 new_status
= 'ERROR'
2061 elif 'terminate' in req
['action']:
2063 self
.logger
.debug("action_on_server id='%s' domain not running", server_id
)
2064 new_status
= 'deleted'
2067 if req
['action']['terminate'] == 'force':
2068 self
.logger
.debug("sending DESTROY to server id='%s'", server_id
)
2070 new_status
= 'deleted'
2072 self
.logger
.debug("sending SHUTDOWN to server id='%s'", server_id
)
2074 self
.pending_terminate_server
.append( (time
.time()+10,server_id
) )
2075 except Exception as e
:
2076 self
.logger
.error("action_on_server id='%s' Exception while destroy: %s",
2077 server_id
, e
.get_error_message())
2078 last_error
= 'action_on_server Exception while destroy: ' + e
.get_error_message()
2079 new_status
= 'ERROR'
2080 if "domain is not running" in e
.get_error_message():
2083 new_status
= 'deleted'
2085 self
.logger
.error("action_on_server id='%s' Exception while undefine: %s",
2086 server_id
, e
.get_error_message())
2087 last_error
= 'action_on_server Exception2 while undefine:', e
.get_error_message()
2088 #Exception: 'virDomainDetachDevice() failed'
2089 if new_status
=='deleted':
2090 if server_id
in self
.server_status
:
2091 del self
.server_status
[server_id
]
2092 if req
['uuid'] in self
.localinfo
['server_files']:
2093 for file_
in self
.localinfo
['server_files'][ req
['uuid'] ].values():
2095 self
.delete_file(file_
['source file'])
2098 del self
.localinfo
['server_files'][ req
['uuid'] ]
2099 self
.localinfo_dirty
= True
2101 elif 'shutoff' in req
['action'] or 'shutdown' in req
['action']:
2104 self
.logger
.debug("action_on_server id='%s' domain not running", server_id
)
2107 # new_status = 'INACTIVE'
2108 #TODO: check status for changing at database
2109 except Exception as e
:
2110 new_status
= 'ERROR'
2111 self
.logger
.error("action_on_server id='%s' Exception while shutdown: %s",
2112 server_id
, e
.get_error_message())
2113 last_error
= 'action_on_server Exception while shutdown: ' + e
.get_error_message()
2115 elif 'rebuild' in req
['action']:
2118 r
= self
.launch_server(conn
, req
, True, None)
2120 new_status
= 'ERROR'
2123 new_status
= 'ACTIVE'
2124 elif 'start' in req
['action']:
2125 # The instance is only create in DB but not yet at libvirt domain, needs to be create
2126 rebuild
= True if req
['action']['start'] == 'rebuild' else False
2127 r
= self
.launch_server(conn
, req
, rebuild
, dom
)
2129 new_status
= 'ERROR'
2132 new_status
= 'ACTIVE'
2134 elif 'resume' in req
['action']:
2140 # new_status = 'ACTIVE'
2141 except Exception as e
:
2142 self
.logger
.error("action_on_server id='%s' Exception while resume: %s",
2143 server_id
, e
.get_error_message())
2145 elif 'pause' in req
['action']:
2151 # new_status = 'PAUSED'
2152 except Exception as e
:
2153 self
.logger
.error("action_on_server id='%s' Exception while pause: %s",
2154 server_id
, e
.get_error_message())
2156 elif 'reboot' in req
['action']:
2162 self
.logger
.debug("action_on_server id='%s' reboot:", server_id
)
2163 #new_status = 'ACTIVE'
2164 except Exception as e
:
2165 self
.logger
.error("action_on_server id='%s' Exception while reboot: %s",
2166 server_id
, e
.get_error_message())
2167 elif 'createImage' in req
['action']:
2168 self
.create_image(dom
, req
)
2172 except host_thread
.lvirt_module
.libvirtError
as e
:
2173 if conn
is not None: conn
.close()
2174 text
= e
.get_error_message()
2175 new_status
= "ERROR"
2177 if 'LookupByUUIDString' in text
or 'Domain not found' in text
or 'No existe un dominio coincidente' in text
:
2178 self
.logger
.debug("action_on_server id='%s' Exception removed from host", server_id
)
2180 self
.logger
.error("action_on_server id='%s' Exception %s", server_id
, text
)
2181 #end of if self.test
2182 if new_status
== None:
2185 self
.logger
.debug("action_on_server id='%s' new status=%s %s",server_id
, new_status
, last_error
)
2186 UPDATE
= {'progress':100, 'status':new_status
}
2188 if new_status
=='ERROR':
2189 if not last_retry
: #if there will be another retry do not update database
2191 elif 'terminate' in req
['action']:
2192 #PUT a log in the database
2193 self
.logger
.error("PANIC deleting server id='%s' %s", server_id
, last_error
)
2194 self
.db
.new_row('logs',
2195 {'uuid':server_id
, 'tenant_id':req
['tenant_id'], 'related':'instances','level':'panic',
2196 'description':'PANIC deleting server from host '+self
.name
+': '+last_error
}
2198 if server_id
in self
.server_status
:
2199 del self
.server_status
[server_id
]
2202 UPDATE
['last_error'] = last_error
2203 if new_status
!= 'deleted' and (new_status
!= old_status
or new_status
== 'ERROR') :
2204 self
.db
.update_rows('instances', UPDATE
, {'uuid':server_id
}, log
=True)
2205 self
.server_status
[server_id
] = new_status
2206 if new_status
== 'ERROR':
2211 def restore_iface(self
, name
, mac
, lib_conn
=None):
2212 ''' make an ifdown, ifup to restore default parameter of na interface
2214 mac: mac address of the interface
2215 lib_conn: connection to the libvirt, if None a new connection is created
2216 Return 0,None if ok, -1,text if fails
2222 self
.logger
.debug("restore_iface '%s' %s", name
, mac
)
2226 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
2230 #wait to the pending VM deletion
2231 #TODO.Revise self.server_forceoff(True)
2233 iface
= conn
.interfaceLookupByMACString(mac
)
2234 if iface
.isActive():
2237 self
.logger
.debug("restore_iface '%s' %s", name
, mac
)
2238 except host_thread
.lvirt_module
.libvirtError
as e
:
2239 error_text
= e
.get_error_message()
2240 self
.logger
.error("restore_iface '%s' '%s' libvirt exception: %s", name
, mac
, error_text
)
2243 if lib_conn
is None and conn
is not None:
2245 return ret
, error_text
2248 def create_image(self
,dom
, req
):
2250 if 'path' in req
['action']['createImage']:
2251 file_dst
= req
['action']['createImage']['path']
2253 createImage
=req
['action']['createImage']
2254 img_name
= createImage
['source']['path']
2255 index
=img_name
.rfind('/')
2256 file_dst
= self
.get_notused_path(img_name
[:index
+1] + createImage
['name'] + '.qcow2')
2257 image_status
='ACTIVE'
2261 server_id
= req
['uuid']
2262 createImage
=req
['action']['createImage']
2263 file_orig
= self
.localinfo
['server_files'][server_id
] [ createImage
['source']['image_id'] ] ['source file']
2264 if 'path' in req
['action']['createImage']:
2265 file_dst
= req
['action']['createImage']['path']
2267 img_name
= createImage
['source']['path']
2268 index
=img_name
.rfind('/')
2269 file_dst
= self
.get_notused_filename(img_name
[:index
+1] + createImage
['name'] + '.qcow2')
2271 self
.copy_file(file_orig
, file_dst
)
2272 qemu_info
= self
.qemu_get_info(file_orig
)
2273 if 'backing file' in qemu_info
:
2274 for k
,v
in self
.localinfo
['files'].items():
2275 if v
==qemu_info
['backing file']:
2276 self
.qemu_change_backing(file_dst
, k
)
2278 image_status
='ACTIVE'
2280 except paramiko
.ssh_exception
.SSHException
as e
:
2281 image_status
='ERROR'
2282 error_text
= e
.args
[0]
2283 self
.logger
.error("create_image id='%s' ssh Exception: %s", server_id
, error_text
)
2284 if "SSH session not active" in error_text
and retry
==0:
2286 except Exception as e
:
2287 image_status
='ERROR'
2289 self
.logger
.error("create_image id='%s' Exception: %s", server_id
, error_text
)
2291 #TODO insert a last_error at database
2292 self
.db
.update_rows('images', {'status':image_status
, 'progress': 100, 'path':file_dst
},
2293 {'uuid':req
['new_image']['uuid']}, log
=True)
2295 def edit_iface(self
, port_id
, old_net
, new_net
):
2296 #This action imply remove and insert interface to put proper parameters
2301 r
,c
= self
.db
.get_table(FROM
='ports as p join resources_port as rp on p.uuid=rp.port_id',
2302 WHERE
={'port_id': port_id
})
2304 self
.logger
.error("edit_iface %s DDBB error: %s", port_id
, c
)
2307 self
.logger
.error("edit_iface %s port not found", port_id
)
2310 if port
["model"]!="VF":
2311 self
.logger
.error("edit_iface %s ERROR model must be VF", port_id
)
2313 #create xml detach file
2316 xml
.append("<interface type='hostdev' managed='yes'>")
2317 xml
.append(" <mac address='" +port
['mac']+ "'/>")
2318 xml
.append(" <source>"+ self
.pci2xml(port
['pci'])+"\n </source>")
2319 xml
.append('</interface>')
2324 conn
= host_thread
.lvirt_module
.open(self
.lvirt_conn_uri
)
2325 dom
= conn
.lookupByUUIDString(port
["instance_id"])
2328 self
.logger
.debug("edit_iface detaching SRIOV interface " + text
)
2329 dom
.detachDeviceFlags(text
, flags
=host_thread
.lvirt_module
.VIR_DOMAIN_AFFECT_LIVE
)
2331 xml
[-1] =" <vlan> <tag id='" + str(port
['vlan']) + "'/> </vlan>"
2333 xml
.append(self
.pci2xml(port
.get('vpci',None)) )
2334 xml
.append('</interface>')
2336 self
.logger
.debug("edit_iface attaching SRIOV interface " + text
)
2337 dom
.attachDeviceFlags(text
, flags
=host_thread
.lvirt_module
.VIR_DOMAIN_AFFECT_LIVE
)
2339 except host_thread
.lvirt_module
.libvirtError
as e
:
2340 text
= e
.get_error_message()
2341 self
.logger
.error("edit_iface %s libvirt exception: %s", port
["instance_id"], text
)
2344 if conn
is not None: conn
.close()
2347 def create_server(server
, db
, only_of_ports
):
2348 extended
= server
.get('extended', None)
2350 requirements
['numa']={'memory':0, 'proc_req_type': 'threads', 'proc_req_nb':0, 'port_list':[], 'sriov_list':[]}
2351 requirements
['ram'] = server
['flavor'].get('ram', 0)
2352 if requirements
['ram']== None:
2353 requirements
['ram'] = 0
2354 requirements
['vcpus'] = server
['flavor'].get('vcpus', 0)
2355 if requirements
['vcpus']== None:
2356 requirements
['vcpus'] = 0
2357 #If extended is not defined get requirements from flavor
2358 if extended
is None:
2359 #If extended is defined in flavor convert to dictionary and use it
2360 if 'extended' in server
['flavor'] and server
['flavor']['extended'] != None:
2361 json_acceptable_string
= server
['flavor']['extended'].replace("'", "\"")
2362 extended
= json
.loads(json_acceptable_string
)
2365 #print json.dumps(extended, indent=4)
2367 #For simplicity only one numa VM are supported in the initial implementation
2368 if extended
!= None:
2369 numas
= extended
.get('numas', [])
2371 return (-2, "Multi-NUMA VMs are not supported yet")
2373 # return (-1, "At least one numa must be specified")
2375 #a for loop is used in order to be ready to multi-NUMA VMs
2379 numa_req
['memory'] = numa
.get('memory', 0)
2381 numa_req
['proc_req_nb'] = numa
['cores'] #number of cores or threads to be reserved
2382 numa_req
['proc_req_type'] = 'cores' #indicates whether cores or threads must be reserved
2383 numa_req
['proc_req_list'] = numa
.get('cores-id', None) #list of ids to be assigned to the cores or threads
2384 elif 'paired-threads' in numa
:
2385 numa_req
['proc_req_nb'] = numa
['paired-threads']
2386 numa_req
['proc_req_type'] = 'paired-threads'
2387 numa_req
['proc_req_list'] = numa
.get('paired-threads-id', None)
2388 elif 'threads' in numa
:
2389 numa_req
['proc_req_nb'] = numa
['threads']
2390 numa_req
['proc_req_type'] = 'threads'
2391 numa_req
['proc_req_list'] = numa
.get('threads-id', None)
2393 numa_req
['proc_req_nb'] = 0 # by default
2394 numa_req
['proc_req_type'] = 'threads'
2398 #Generate a list of sriov and another for physical interfaces
2399 interfaces
= numa
.get('interfaces', [])
2402 for iface
in interfaces
:
2403 iface
['bandwidth'] = int(iface
['bandwidth'])
2404 if iface
['dedicated'][:3]=='yes':
2405 port_list
.append(iface
)
2407 sriov_list
.append(iface
)
2409 #Save lists ordered from more restrictive to less bw requirements
2410 numa_req
['sriov_list'] = sorted(sriov_list
, key
=lambda k
: k
['bandwidth'], reverse
=True)
2411 numa_req
['port_list'] = sorted(port_list
, key
=lambda k
: k
['bandwidth'], reverse
=True)
2414 request
.append(numa_req
)
2416 # print "----------\n"+json.dumps(request[0], indent=4)
2417 # print '----------\n\n'
2419 #Search in db for an appropriate numa for each requested numa
2420 #at the moment multi-NUMA VMs are not supported
2422 requirements
['numa'].update(request
[0])
2423 if requirements
['numa']['memory']>0:
2424 requirements
['ram']=0 #By the moment I make incompatible ask for both Huge and non huge pages memory
2425 elif requirements
['ram']==0:
2426 return (-1, "Memory information not set neither at extended field not at ram")
2427 if requirements
['numa']['proc_req_nb']>0:
2428 requirements
['vcpus']=0 #By the moment I make incompatible ask for both Isolated and non isolated cpus
2429 elif requirements
['vcpus']==0:
2430 return (-1, "Processor information not set neither at extended field not at vcpus")
2432 if 'hypervisor' in server
: requirements
['hypervisor'] = server
['hypervisor'] #Unikernels extension
2434 result
, content
= db
.get_numas(requirements
, server
.get('host_id', None), only_of_ports
)
2437 return (-1, content
)
2439 numa_id
= content
['numa_id']
2440 host_id
= content
['host_id']
2442 #obtain threads_id and calculate pinning
2445 if requirements
['numa']['proc_req_nb']>0:
2446 result
, content
= db
.get_table(FROM
='resources_core',
2447 SELECT
=('id','core_id','thread_id'),
2448 WHERE
={'numa_id':numa_id
,'instance_id': None, 'status':'ok'} )
2453 #convert rows to a dictionary indexed by core_id
2456 if not row
['core_id'] in cores_dict
:
2457 cores_dict
[row
['core_id']] = []
2458 cores_dict
[row
['core_id']].append([row
['thread_id'],row
['id']])
2460 #In case full cores are requested
2462 if requirements
['numa']['proc_req_type'] == 'cores':
2463 #Get/create the list of the vcpu_ids
2464 vcpu_id_list
= requirements
['numa']['proc_req_list']
2465 if vcpu_id_list
== None:
2466 vcpu_id_list
= range(0,int(requirements
['numa']['proc_req_nb']))
2468 for threads
in cores_dict
.itervalues():
2470 if len(threads
) != 2:
2473 #set pinning for the first thread
2474 cpu_pinning
.append( [ vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1] ] )
2476 #reserve so it is not used the second thread
2477 reserved_threads
.append(threads
[1][1])
2479 if len(vcpu_id_list
) == 0:
2482 #In case paired threads are requested
2483 elif requirements
['numa']['proc_req_type'] == 'paired-threads':
2485 #Get/create the list of the vcpu_ids
2486 if requirements
['numa']['proc_req_list'] != None:
2488 for pair
in requirements
['numa']['proc_req_list']:
2490 return -1, "Field paired-threads-id not properly specified"
2492 vcpu_id_list
.append(pair
[0])
2493 vcpu_id_list
.append(pair
[1])
2495 vcpu_id_list
= range(0,2*int(requirements
['numa']['proc_req_nb']))
2497 for threads
in cores_dict
.itervalues():
2499 if len(threads
) != 2:
2501 #set pinning for the first thread
2502 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1]])
2504 #set pinning for the second thread
2505 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[1][0], threads
[1][1]])
2507 if len(vcpu_id_list
) == 0:
2510 #In case normal threads are requested
2511 elif requirements
['numa']['proc_req_type'] == 'threads':
2512 #Get/create the list of the vcpu_ids
2513 vcpu_id_list
= requirements
['numa']['proc_req_list']
2514 if vcpu_id_list
== None:
2515 vcpu_id_list
= range(0,int(requirements
['numa']['proc_req_nb']))
2517 for threads_index
in sorted(cores_dict
, key
=lambda k
: len(cores_dict
[k
])):
2518 threads
= cores_dict
[threads_index
]
2519 #set pinning for the first thread
2520 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[0][0], threads
[0][1]])
2522 #if exists, set pinning for the second thread
2523 if len(threads
) == 2 and len(vcpu_id_list
) != 0:
2524 cpu_pinning
.append([vcpu_id_list
.pop(0), threads
[1][0], threads
[1][1]])
2526 if len(vcpu_id_list
) == 0:
2529 #Get the source pci addresses for the selected numa
2530 used_sriov_ports
= []
2531 for port
in requirements
['numa']['sriov_list']:
2532 result
, content
= db
.get_table(FROM
='resources_port', SELECT
=('id', 'pci', 'mac'),WHERE
={'numa_id':numa_id
,'root_id': port
['port_id'], 'port_id': None, 'Mbps_used': 0} )
2537 if row
['id'] in used_sriov_ports
or row
['id']==port
['port_id']:
2539 port
['pci'] = row
['pci']
2540 if 'mac_address' not in port
:
2541 port
['mac_address'] = row
['mac']
2543 port
['port_id']=row
['id']
2544 port
['Mbps_used'] = port
['bandwidth']
2545 used_sriov_ports
.append(row
['id'])
2548 for port
in requirements
['numa']['port_list']:
2549 port
['Mbps_used'] = None
2550 if port
['dedicated'] != "yes:sriov":
2551 port
['mac_address'] = port
['mac']
2554 result
, content
= db
.get_table(FROM
='resources_port', SELECT
=('id', 'pci', 'mac', 'Mbps'),WHERE
={'numa_id':numa_id
,'root_id': port
['port_id'], 'port_id': None, 'Mbps_used': 0} )
2558 port
['Mbps_used'] = content
[0]['Mbps']
2560 if row
['id'] in used_sriov_ports
or row
['id']==port
['port_id']:
2562 port
['pci'] = row
['pci']
2563 if 'mac_address' not in port
:
2564 port
['mac_address'] = row
['mac'] # mac cannot be set to passthrough ports
2566 port
['port_id']=row
['id']
2567 used_sriov_ports
.append(row
['id'])
2570 # print '2. Physical ports assignation:'+json.dumps(requirements['port_list'], indent=4)
2571 # print '2. SR-IOV assignation:'+json.dumps(requirements['sriov_list'], indent=4)
2573 server
['host_id'] = host_id
2575 #Generate dictionary for saving in db the instance resources
2577 resources
['bridged-ifaces'] = []
2580 numa_dict
['interfaces'] = []
2582 numa_dict
['interfaces'] += requirements
['numa']['port_list']
2583 numa_dict
['interfaces'] += requirements
['numa']['sriov_list']
2585 #Check bridge information
2586 unified_dataplane_iface
=[]
2587 unified_dataplane_iface
+= requirements
['numa']['port_list']
2588 unified_dataplane_iface
+= requirements
['numa']['sriov_list']
2590 for control_iface
in server
.get('networks', []):
2591 control_iface
['net_id']=control_iface
.pop('uuid')
2592 #Get the brifge name
2593 result
, content
= db
.get_table(FROM
='nets',
2594 SELECT
=('name', 'type', 'vlan', 'provider', 'enable_dhcp','dhcp_first_ip',
2595 'dhcp_last_ip', 'cidr', 'gateway_ip', 'dns', 'links', 'routes'),
2596 WHERE
={'uuid': control_iface
['net_id']})
2600 return -1, "Error at field netwoks: Not found any network wit uuid %s" % control_iface
['net_id']
2603 if control_iface
.get("type", 'virtual') == 'virtual':
2604 if network
['type']!='bridge_data' and network
['type']!='bridge_man':
2605 return -1, "Error at field netwoks: network uuid %s for control interface is not of type bridge_man or bridge_data" % control_iface
['net_id']
2606 resources
['bridged-ifaces'].append(control_iface
)
2607 if network
.get("provider") and network
["provider"][0:3] == "OVS":
2608 control_iface
["type"] = "instance:ovs"
2610 control_iface
["type"] = "instance:bridge"
2611 if network
.get("vlan"):
2612 control_iface
["vlan"] = network
["vlan"]
2614 if network
.get("enable_dhcp") == 'true':
2615 control_iface
["enable_dhcp"] = network
.get("enable_dhcp")
2616 control_iface
["dhcp_first_ip"] = network
["dhcp_first_ip"]
2617 control_iface
["dhcp_last_ip"] = network
["dhcp_last_ip"]
2618 control_iface
["cidr"] = network
["cidr"]
2620 if network
.get("dns"):
2621 control_iface
["dns"] = yaml
.safe_load(network
.get("dns"))
2622 if network
.get("links"):
2623 control_iface
["links"] = yaml
.safe_load(network
.get("links"))
2624 if network
.get("routes"):
2625 control_iface
["routes"] = yaml
.safe_load(network
.get("routes"))
2627 if network
['type']!='data' and network
['type']!='ptp':
2628 return -1, "Error at field netwoks: network uuid %s for dataplane interface is not of type data or ptp" % control_iface
['net_id']
2629 #dataplane interface, look for it in the numa tree and asign this network
2631 for dataplane_iface
in numa_dict
['interfaces']:
2632 if dataplane_iface
['name'] == control_iface
.get("name"):
2633 if (dataplane_iface
['dedicated'] == "yes" and control_iface
["type"] != "PF") or \
2634 (dataplane_iface
['dedicated'] == "no" and control_iface
["type"] != "VF") or \
2635 (dataplane_iface
['dedicated'] == "yes:sriov" and control_iface
["type"] != "VFnotShared") :
2636 return -1, "Error at field netwoks: mismatch at interface '%s' from flavor 'dedicated=%s' and networks 'type=%s'" % \
2637 (control_iface
.get("name"), dataplane_iface
['dedicated'], control_iface
["type"])
2638 dataplane_iface
['uuid'] = control_iface
['net_id']
2639 if dataplane_iface
['dedicated'] == "no":
2640 dataplane_iface
['vlan'] = network
['vlan']
2641 if dataplane_iface
['dedicated'] != "yes" and control_iface
.get("mac_address"):
2642 dataplane_iface
['mac_address'] = control_iface
.get("mac_address")
2643 if control_iface
.get("vpci"):
2644 dataplane_iface
['vpci'] = control_iface
.get("vpci")
2648 return -1, "Error at field netwoks: interface name %s from network not found at flavor" % control_iface
.get("name")
2650 resources
['host_id'] = host_id
2651 resources
['image_id'] = server
['image_id']
2652 resources
['flavor_id'] = server
['flavor_id']
2653 resources
['tenant_id'] = server
['tenant_id']
2654 resources
['ram'] = requirements
['ram']
2655 resources
['vcpus'] = requirements
['vcpus']
2656 resources
['status'] = 'CREATING'
2658 if 'description' in server
: resources
['description'] = server
['description']
2659 if 'name' in server
: resources
['name'] = server
['name']
2660 if 'hypervisor' in server
: resources
['hypervisor'] = server
['hypervisor']
2661 if 'os_image_type' in server
: resources
['os_image_type'] = server
['os_image_type']
2663 resources
['extended'] = {} #optional
2664 resources
['extended']['numas'] = []
2665 numa_dict
['numa_id'] = numa_id
2666 numa_dict
['memory'] = requirements
['numa']['memory']
2667 numa_dict
['cores'] = []
2669 for core
in cpu_pinning
:
2670 numa_dict
['cores'].append({'id': core
[2], 'vthread': core
[0], 'paired': paired
})
2671 for core
in reserved_threads
:
2672 numa_dict
['cores'].append({'id': core
})
2673 resources
['extended']['numas'].append(numa_dict
)
2674 if extended
!=None and 'devices' in extended
: #TODO allow extra devices without numa
2675 resources
['extended']['devices'] = extended
['devices']
2678 # '===================================={'
2679 #print json.dumps(resources, indent=4)
2680 #print '====================================}'