import time
import yaml
import random
-import sys
import re
import copy
from neutronclient.neutron import client as neClient
from neutronclient.common import exceptions as neExceptions
from requests.exceptions import ConnectionError
-from email.mime.multipart import MIMEMultipart
-from email.mime.text import MIMEText
"""contain the openstack virtual machine status to openmano status"""
supportedClassificationTypes = ['legacy_flow_classifier']
#global var to have a timeout creating and deleting volumes
-volume_timeout = 60
-server_timeout = 300
+volume_timeout = 600
+server_timeout = 600
class vimconnector(vimconn.vimconnector):
def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
if self.config.get('APIversion'):
self.api_version3 = self.config['APIversion'] == 'v3.3' or self.config['APIversion'] == '3'
else: # get from ending auth_url that end with v3 or with v2.0
- self.api_version3 = self.url.split("/")[-1] == "v3"
+ self.api_version3 = self.url.endswith("/v3") or self.url.endswith("/v3/")
self.session['api_version3'] = self.api_version3
if self.api_version3:
+ if self.config.get('project_domain_id') or self.config.get('project_domain_name'):
+ project_domain_id_default = None
+ else:
+ project_domain_id_default = 'default'
+ if self.config.get('user_domain_id') or self.config.get('user_domain_name'):
+ user_domain_id_default = None
+ else:
+ user_domain_id_default = 'default'
auth = v3.Password(auth_url=self.url,
username=self.user,
password=self.passwd,
project_name=self.tenant_name,
project_id=self.tenant_id,
- project_domain_id=self.config.get('project_domain_id', 'default'),
- user_domain_id=self.config.get('user_domain_id', 'default'))
+ project_domain_id=self.config.get('project_domain_id', project_domain_id_default),
+ user_domain_id=self.config.get('user_domain_id', user_domain_id_default),
+ project_domain_name=self.config.get('project_domain_name'),
+ user_domain_name=self.config.get('user_domain_name'))
else:
auth = v2.Password(auth_url=self.url,
username=self.user,
self._reload_connection()
if self.api_version3 and "tenant_id" in filter_dict:
filter_dict['project_id'] = filter_dict.pop('tenant_id') #TODO check
- net_dict=self.neutron.list_networks(**filter_dict)
- net_list=net_dict["networks"]
+ net_dict = self.neutron.list_networks(**filter_dict)
+ net_list = net_dict["networks"]
self.__net_os2mano(net_list)
return net_list
except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
self._format_exception(e)
-
def new_flavor(self, flavor_data, change_name_if_used=True):
'''Adds a tenant flavor to openstack VIM
if change_name_if_used is True, it will change name in case of conflict, because it is not supported name repetition
numa_properties["vmware:latency_sensitivity_level"] = "high"
for numa in numas:
#overwrite ram and vcpus
- ram = numa['memory']*1024
+ #check if key 'memory' is present in numa else use ram value at flavor
+ if 'memory' in numa:
+ ram = numa['memory']*1024
#See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
if 'paired-threads' in numa:
vcpus = numa['paired-threads']*2
new_flavor=self.nova.flavors.create(name,
ram,
vcpus,
- flavor_data.get('disk',1),
+ flavor_data.get('disk',0),
is_public=flavor_data.get('is_public', True)
)
#add metadata
filter_dict_os=filter_dict.copy()
#First we filter by the available filter fields: name, id. The others are removed.
filter_dict_os.pop('checksum',None)
- image_list=self.nova.images.findall(**filter_dict_os)
- if len(image_list)==0:
+ image_list = self.nova.images.findall(**filter_dict_os)
+ if len(image_list) == 0:
return []
#Then we filter by the rest of filter fields: checksum
filtered_list = []
for image in image_list:
- image_class=self.glance.images.get(image.id)
- if 'checksum' not in filter_dict or image_class['checksum']==filter_dict.get('checksum'):
- filtered_list.append(image_class.copy())
+ try:
+ image_class = self.glance.images.get(image.id)
+ if 'checksum' not in filter_dict or image_class['checksum']==filter_dict.get('checksum'):
+ filtered_list.append(image_class.copy())
+ except gl1Exceptions.HTTPNotFound:
+ pass
return filtered_list
except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
self._format_exception(e)
- @staticmethod
- def _create_mimemultipart(content_list):
- """Creates a MIMEmultipart text combining the content_list
- :param content_list: list of text scripts to be combined
- :return: str of the created MIMEmultipart. If the list is empty returns None, if the list contains only one
- element MIMEmultipart is not created and this content is returned
- """
- if not content_list:
- return None
- elif len(content_list) == 1:
- return content_list[0]
- combined_message = MIMEMultipart()
- for content in content_list:
- if content.startswith('#include'):
- format = 'text/x-include-url'
- elif content.startswith('#include-once'):
- format = 'text/x-include-once-url'
- elif content.startswith('#!'):
- format = 'text/x-shellscript'
- elif content.startswith('#cloud-config'):
- format = 'text/cloud-config'
- elif content.startswith('#cloud-config-archive'):
- format = 'text/cloud-config-archive'
- elif content.startswith('#upstart-job'):
- format = 'text/upstart-job'
- elif content.startswith('#part-handler'):
- format = 'text/part-handler'
- elif content.startswith('#cloud-boothook'):
- format = 'text/cloud-boothook'
- else: # by default
- format = 'text/x-shellscript'
- sub_message = MIMEText(content, format, sys.getdefaultencoding())
- combined_message.attach(sub_message)
- return combined_message.as_string()
-
def __wait_for_vm(self, vm_id, status):
"""wait until vm is in the desired status and return True.
If the VM gets in ERROR status, return false.
def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
availability_zone_index=None, availability_zone_list=None):
- '''Adds a VM instance to VIM
+ """Adds a VM instance to VIM
Params:
start: indicates if VM must start or boot in pause mode. Ignored
image_id,flavor_id: iamge and flavor uuid
model: interface model, ignored #TODO
mac_address: used for SR-IOV ifaces #TODO for other types
use: 'data', 'bridge', 'mgmt'
- type: 'virtual', 'PF', 'VF', 'VFnotShared'
+ type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
vim_id: filled/added by this function
floating_ip: True/False (or it can be None)
'cloud_config': (optional) dictionary with:
availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
availability_zone_index is None
#TODO ip, security groups
- Returns the instance identifier
- '''
+ Returns a tuple with the instance identifier and created_items or raises an exception on error
+ created_items can be None or a dictionary where this method can include key-values that will be passed to
+ the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+ Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+ as not present.
+ """
self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'",image_id, flavor_id,str(net_list))
try:
server = None
- metadata={}
- net_list_vim=[]
- external_network=[] # list of external networks to be connected to instance, later on used to create floating_ip
+ created_items = {}
+ # metadata = {}
+ net_list_vim = []
+ external_network = [] # list of external networks to be connected to instance, later on used to create floating_ip
no_secured_ports = [] # List of port-is with port-security disabled
self._reload_connection()
- metadata_vpci={} # For a specific neutron plugin
+ # metadata_vpci = {} # For a specific neutron plugin
block_device_mapping = None
for net in net_list:
- if not net.get("net_id"): #skip non connected iface
+ if not net.get("net_id"): # skip non connected iface
continue
port_dict={
"admin_state_up": True
}
if net["type"]=="virtual":
- if "vpci" in net:
- metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
- elif net["type"]=="VF": # for VF
- if "vpci" in net:
- if "VF" not in metadata_vpci:
- metadata_vpci["VF"]=[]
- metadata_vpci["VF"].append([ net["vpci"], "" ])
+ pass
+ # if "vpci" in net:
+ # metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
+ elif net["type"] == "VF" or net["type"] == "SR-IOV": # for VF
+ # if "vpci" in net:
+ # if "VF" not in metadata_vpci:
+ # metadata_vpci["VF"]=[]
+ # metadata_vpci["VF"].append([ net["vpci"], "" ])
port_dict["binding:vnic_type"]="direct"
- ########## VIO specific Changes #######
+ # VIO specific Changes
if self.vim_type == "VIO":
- #Need to create port with port_security_enabled = False and no-security-groups
+ # Need to create port with port_security_enabled = False and no-security-groups
port_dict["port_security_enabled"]=False
port_dict["provider_security_groups"]=[]
port_dict["security_groups"]=[]
- else: #For PT
- ########## VIO specific Changes #######
- #Current VIO release does not support port with type 'direct-physical'
- #So no need to create virtual port in case of PCI-device.
- #Will update port_dict code when support gets added in next VIO release
+ else: # For PT PCI-PASSTHROUGH
+ # VIO specific Changes
+ # Current VIO release does not support port with type 'direct-physical'
+ # So no need to create virtual port in case of PCI-device.
+ # Will update port_dict code when support gets added in next VIO release
if self.vim_type == "VIO":
- raise vimconn.vimconnNotSupportedException("Current VIO release does not support full passthrough (PT)")
- if "vpci" in net:
- if "PF" not in metadata_vpci:
- metadata_vpci["PF"]=[]
- metadata_vpci["PF"].append([ net["vpci"], "" ])
+ raise vimconn.vimconnNotSupportedException(
+ "Current VIO release does not support full passthrough (PT)")
+ # if "vpci" in net:
+ # if "PF" not in metadata_vpci:
+ # metadata_vpci["PF"]=[]
+ # metadata_vpci["PF"].append([ net["vpci"], "" ])
port_dict["binding:vnic_type"]="direct-physical"
if not port_dict["name"]:
port_dict["name"]=name
if net.get("mac_address"):
port_dict["mac_address"]=net["mac_address"]
new_port = self.neutron.create_port({"port": port_dict })
+ created_items["port:" + str(new_port["port"]["id"])] = True
net["mac_adress"] = new_port["port"]["mac_address"]
net["vim_id"] = new_port["port"]["id"]
# if try to use a network without subnetwork, it will return a emtpy list
if net.get("port_security") == False:
no_secured_ports.append(new_port["port"]["id"])
- if metadata_vpci:
- metadata = {"pci_assignement": json.dumps(metadata_vpci)}
- if len(metadata["pci_assignement"]) >255:
- #limit the metadata size
- #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
- self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
- metadata = {}
+ # if metadata_vpci:
+ # metadata = {"pci_assignement": json.dumps(metadata_vpci)}
+ # if len(metadata["pci_assignement"]) >255:
+ # #limit the metadata size
+ # #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
+ # self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
+ # metadata = {}
- self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s' metadata %s",
- name, image_id, flavor_id, str(net_list_vim), description, str(metadata))
+ self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
+ name, image_id, flavor_id, str(net_list_vim), description)
- security_groups = self.config.get('security_groups')
+ security_groups = self.config.get('security_groups')
if type(security_groups) is str:
security_groups = ( security_groups, )
- #cloud config
- userdata=None
- config_drive = None
- userdata_list = []
- if isinstance(cloud_config, dict):
- if cloud_config.get("user-data"):
- if isinstance(cloud_config["user-data"], str):
- userdata_list.append(cloud_config["user-data"])
- else:
- for u in cloud_config["user-data"]:
- userdata_list.append(u)
- if cloud_config.get("boot-data-drive") != None:
- config_drive = cloud_config["boot-data-drive"]
- if cloud_config.get("config-files") or cloud_config.get("users") or cloud_config.get("key-pairs"):
- userdata_dict={}
- #default user
- if cloud_config.get("key-pairs"):
- userdata_dict["ssh-authorized-keys"] = cloud_config["key-pairs"]
- userdata_dict["users"] = [{"default": None, "ssh-authorized-keys": cloud_config["key-pairs"] }]
- if cloud_config.get("users"):
- if "users" not in userdata_dict:
- userdata_dict["users"] = [ "default" ]
- for user in cloud_config["users"]:
- user_info = {
- "name" : user["name"],
- "sudo": "ALL = (ALL)NOPASSWD:ALL"
- }
- if "user-info" in user:
- user_info["gecos"] = user["user-info"]
- if user.get("key-pairs"):
- user_info["ssh-authorized-keys"] = user["key-pairs"]
- userdata_dict["users"].append(user_info)
-
- if cloud_config.get("config-files"):
- userdata_dict["write_files"] = []
- for file in cloud_config["config-files"]:
- file_info = {
- "path" : file["dest"],
- "content": file["content"]
- }
- if file.get("encoding"):
- file_info["encoding"] = file["encoding"]
- if file.get("permissions"):
- file_info["permissions"] = file["permissions"]
- if file.get("owner"):
- file_info["owner"] = file["owner"]
- userdata_dict["write_files"].append(file_info)
- userdata_list.append("#cloud-config\n" + yaml.safe_dump(userdata_dict, indent=4,
- default_flow_style=False))
- userdata = self._create_mimemultipart(userdata_list)
- self.logger.debug("userdata: %s", userdata)
- elif isinstance(cloud_config, str):
- userdata = cloud_config
-
- #Create additional volumes in case these are present in disk_list
+ # cloud config
+ config_drive, userdata = self._create_user_data(cloud_config)
+
+ # Create additional volumes in case these are present in disk_list
base_disk_index = ord('b')
if disk_list != None:
block_device_mapping = {}
else:
volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
chr(base_disk_index))
+ created_items["volume:" + str(volume.id)] = True
block_device_mapping['_vd' + chr(base_disk_index)] = volume.id
base_disk_index += 1
- #wait until volumes are with status available
+ # Wait until volumes are with status available
keep_waiting = True
elapsed_time = 0
while keep_waiting and elapsed_time < volume_timeout:
time.sleep(1)
elapsed_time += 1
- #if we exceeded the timeout rollback
+ # If we exceeded the timeout rollback
if elapsed_time >= volume_timeout:
- #delete the volumes we just created
- for volume_id in block_device_mapping.itervalues():
- self.cinder.volumes.delete(volume_id)
-
- #delete ports we just created
- for net_item in net_list_vim:
- if 'port-id' in net_item:
- self.neutron.delete_port(net_item['port-id'])
-
raise vimconn.vimconnException('Timeout creating volumes for instance ' + name,
http_code=vimconn.HTTP_Request_Timeout)
# get availability Zone
vm_av_zone = self._get_vm_availability_zone(availability_zone_index, availability_zone_list)
- self.logger.debug("nova.servers.create({}, {}, {}, nics={}, meta={}, security_groups={}, "
+ self.logger.debug("nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
"availability_zone={}, key_name={}, userdata={}, config_drive={}, "
- "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim, metadata,
+ "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim,
security_groups, vm_av_zone, self.config.get('keypair'),
- userdata, config_drive, block_device_mapping))
- server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim, meta=metadata,
+ userdata, config_drive, block_device_mapping))
+ server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim,
security_groups=security_groups,
availability_zone=vm_av_zone,
key_name=self.config.get('keypair'),
for port_id in no_secured_ports:
try:
self.neutron.update_port(port_id, {"port": {"port_security_enabled": False, "security_groups": None} })
-
except Exception as e:
self.logger.error("It was not possible to disable port security for port {}".format(port_id))
- self.delete_vminstance(server.id)
raise
- #print "DONE :-)", server
- pool_id = None
- floating_ips = self.neutron.list_floatingips().get("floatingips", ())
+ # print "DONE :-)", server
+ pool_id = None
if external_network:
+ floating_ips = self.neutron.list_floatingips().get("floatingips", ())
self.__wait_for_vm(server.id, 'ACTIVE')
for floating_network in external_network:
try:
assigned = False
- while(assigned == False):
+ while not assigned:
if floating_ips:
ip = floating_ips.pop(0)
if not ip.get("port_id", False) and ip.get('tenant_id') == server.tenant_id:
continue
raise
- return server.id
+ return server.id, created_items
# except nvExceptions.NotFound as e:
# error_value=-vimconn.HTTP_Not_Found
# error_text= "vm instance %s not found" % vm_id
# raise vimconn.vimconnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request)
except Exception as e:
- # delete the volumes we just created
- if block_device_mapping:
- for volume_id in block_device_mapping.itervalues():
- self.cinder.volumes.delete(volume_id)
-
- # Delete the VM
- if server != None:
- self.delete_vminstance(server.id)
- else:
- # delete ports we just created
- for net_item in net_list_vim:
- if 'port-id' in net_item:
- self.neutron.delete_port(net_item['port-id'])
+ server_id = None
+ if server:
+ server_id = server.id
+ try:
+ self.delete_vminstance(server_id, created_items)
+ except Exception as e2:
+ self.logger.error("new_vminstance rollback fail {}".format(e2))
self._format_exception(e)
except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.BadRequest, ConnectionError) as e:
self._format_exception(e)
- def delete_vminstance(self, vm_id):
+ def delete_vminstance(self, vm_id, created_items=None):
'''Removes a VM instance from VIM. Returns the old identifier
'''
#print "osconnector: Getting VM from VIM"
+ if created_items == None:
+ created_items = {}
try:
self._reload_connection()
- #delete VM ports attached to this networks before the virtual machine
- ports = self.neutron.list_ports(device_id=vm_id)
- for p in ports['ports']:
+ # delete VM ports attached to this networks before the virtual machine
+ for k, v in created_items.items():
+ if not v: # skip already deleted
+ continue
try:
- self.neutron.delete_port(p["id"])
+ k_item, _, k_id = k.partition(":")
+ if k_item == "port":
+ self.neutron.delete_port(k_id)
except Exception as e:
- self.logger.error("Error deleting port: " + type(e).__name__ + ": "+ str(e))
+ self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
- #commented because detaching the volumes makes the servers.delete not work properly ?!?
- #dettach volumes attached
- server = self.nova.servers.get(vm_id)
- volumes_attached_dict = server._info['os-extended-volumes:volumes_attached']
- #for volume in volumes_attached_dict:
- # self.cinder.volumes.detach(volume['id'])
+ # #commented because detaching the volumes makes the servers.delete not work properly ?!?
+ # #dettach volumes attached
+ # server = self.nova.servers.get(vm_id)
+ # volumes_attached_dict = server._info['os-extended-volumes:volumes_attached'] #volume['id']
+ # #for volume in volumes_attached_dict:
+ # # self.cinder.volumes.detach(volume['id'])
- self.nova.servers.delete(vm_id)
+ if vm_id:
+ self.nova.servers.delete(vm_id)
- #delete volumes.
- #Although having detached them should have them in active status
- #we ensure in this loop
+ # delete volumes. Although having detached, they should have in active status before deleting
+ # we ensure in this loop
keep_waiting = True
elapsed_time = 0
while keep_waiting and elapsed_time < volume_timeout:
keep_waiting = False
- for volume in volumes_attached_dict:
- if self.cinder.volumes.get(volume['id']).status != 'available':
- keep_waiting = True
- else:
- self.cinder.volumes.delete(volume['id'])
+ for k, v in created_items.items():
+ if not v: # skip already deleted
+ continue
+ try:
+ k_item, _, k_id = k.partition(":")
+ if k_item == "volume":
+ if self.cinder.volumes.get(k_id).status != 'available':
+ keep_waiting = True
+ else:
+ self.cinder.volumes.delete(k_id)
+ except Exception as e:
+ self.logger.error("Error deleting volume: {}: {}".format(type(e).__name__, e))
if keep_waiting:
time.sleep(1)
elapsed_time += 1
-
- return vm_id
+ return None
except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
self._format_exception(e)
- #TODO insert exception vimconn.HTTP_Unauthorized
- #if reaching here is because an exception
def refresh_vms_status(self, vm_list):
'''Get the status of the virtual machines and their interfaces/ports
vm_dict[vm_id] = vm
return vm_dict
- def action_vminstance(self, vm_id, action_dict):
+ def action_vminstance(self, vm_id, action_dict, created_items={}):
'''Send and action over a VM instance from VIM
- Returns the vm_id if the action was successfully sent to the VIM'''
+ Returns None or the console dict if the action was successfully sent to the VIM'''
self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
try:
self._reload_connection()
except Exception as e:
raise vimconn.vimconnException("Unexpected response from VIM " + str(console_dict))
- return vm_id
+ return None
except (ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.NotFound, ConnectionError) as e:
self._format_exception(e)
#TODO insert exception vimconn.HTTP_Unauthorized