supportedClassificationTypes = ['legacy_flow_classifier']
#global var to have a timeout creating and deleting volumes
-volume_timeout = 60
-server_timeout = 300
+volume_timeout = 600
+server_timeout = 600
class vimconnector(vimconn.vimconnector):
def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
self._reload_connection()
if self.api_version3 and "tenant_id" in filter_dict:
filter_dict['project_id'] = filter_dict.pop('tenant_id') #TODO check
- net_dict=self.neutron.list_networks(**filter_dict)
- net_list=net_dict["networks"]
+ net_dict = self.neutron.list_networks(**filter_dict)
+ net_list = net_dict["networks"]
self.__net_os2mano(net_list)
return net_list
except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
numa_properties["vmware:latency_sensitivity_level"] = "high"
for numa in numas:
#overwrite ram and vcpus
- ram = numa['memory']*1024
+ #check if key 'memory' is present in numa else use ram value at flavor
+ if 'memory' in numa:
+ ram = numa['memory']*1024
#See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
if 'paired-threads' in numa:
vcpus = numa['paired-threads']*2
new_flavor=self.nova.flavors.create(name,
ram,
vcpus,
- flavor_data.get('disk',1),
+ flavor_data.get('disk',0),
is_public=flavor_data.get('is_public', True)
)
#add metadata
model: interface model, ignored #TODO
mac_address: used for SR-IOV ifaces #TODO for other types
use: 'data', 'bridge', 'mgmt'
- type: 'virtual', 'PF', 'VF', 'VFnotShared'
+ type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
vim_id: filled/added by this function
floating_ip: True/False (or it can be None)
'cloud_config': (optional) dictionary with:
try:
server = None
created_items = {}
- metadata = {}
+ # metadata = {}
net_list_vim = []
external_network = [] # list of external networks to be connected to instance, later on used to create floating_ip
no_secured_ports = [] # List of port-is with port-security disabled
self._reload_connection()
- metadata_vpci = {} # For a specific neutron plugin
+ # metadata_vpci = {} # For a specific neutron plugin
block_device_mapping = None
for net in net_list:
if not net.get("net_id"): # skip non connected iface
"admin_state_up": True
}
if net["type"]=="virtual":
- if "vpci" in net:
- metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
- elif net["type"]=="VF": # for VF
- if "vpci" in net:
- if "VF" not in metadata_vpci:
- metadata_vpci["VF"]=[]
- metadata_vpci["VF"].append([ net["vpci"], "" ])
+ pass
+ # if "vpci" in net:
+ # metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
+ elif net["type"] == "VF" or net["type"] == "SR-IOV": # for VF
+ # if "vpci" in net:
+ # if "VF" not in metadata_vpci:
+ # metadata_vpci["VF"]=[]
+ # metadata_vpci["VF"].append([ net["vpci"], "" ])
port_dict["binding:vnic_type"]="direct"
- ########## VIO specific Changes #######
+ # VIO specific Changes
if self.vim_type == "VIO":
- #Need to create port with port_security_enabled = False and no-security-groups
+ # Need to create port with port_security_enabled = False and no-security-groups
port_dict["port_security_enabled"]=False
port_dict["provider_security_groups"]=[]
port_dict["security_groups"]=[]
- else: #For PT
- ########## VIO specific Changes #######
- #Current VIO release does not support port with type 'direct-physical'
- #So no need to create virtual port in case of PCI-device.
- #Will update port_dict code when support gets added in next VIO release
+ else: # For PT PCI-PASSTHROUGH
+ # VIO specific Changes
+ # Current VIO release does not support port with type 'direct-physical'
+ # So no need to create virtual port in case of PCI-device.
+ # Will update port_dict code when support gets added in next VIO release
if self.vim_type == "VIO":
- raise vimconn.vimconnNotSupportedException("Current VIO release does not support full passthrough (PT)")
- if "vpci" in net:
- if "PF" not in metadata_vpci:
- metadata_vpci["PF"]=[]
- metadata_vpci["PF"].append([ net["vpci"], "" ])
+ raise vimconn.vimconnNotSupportedException(
+ "Current VIO release does not support full passthrough (PT)")
+ # if "vpci" in net:
+ # if "PF" not in metadata_vpci:
+ # metadata_vpci["PF"]=[]
+ # metadata_vpci["PF"].append([ net["vpci"], "" ])
port_dict["binding:vnic_type"]="direct-physical"
if not port_dict["name"]:
port_dict["name"]=name
if net.get("mac_address"):
port_dict["mac_address"]=net["mac_address"]
new_port = self.neutron.create_port({"port": port_dict })
- created_items[("port", str(new_port["port"]["id"]))] = True
+ created_items["port:" + str(new_port["port"]["id"])] = True
net["mac_adress"] = new_port["port"]["mac_address"]
net["vim_id"] = new_port["port"]["id"]
# if try to use a network without subnetwork, it will return a emtpy list
if net.get("port_security") == False:
no_secured_ports.append(new_port["port"]["id"])
- if metadata_vpci:
- metadata = {"pci_assignement": json.dumps(metadata_vpci)}
- if len(metadata["pci_assignement"]) >255:
- #limit the metadata size
- #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
- self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
- metadata = {}
+ # if metadata_vpci:
+ # metadata = {"pci_assignement": json.dumps(metadata_vpci)}
+ # if len(metadata["pci_assignement"]) >255:
+ # #limit the metadata size
+ # #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
+ # self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
+ # metadata = {}
- self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s' metadata %s",
- name, image_id, flavor_id, str(net_list_vim), description, str(metadata))
+ self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
+ name, image_id, flavor_id, str(net_list_vim), description)
- security_groups = self.config.get('security_groups')
+ security_groups = self.config.get('security_groups')
if type(security_groups) is str:
security_groups = ( security_groups, )
# cloud config
else:
volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
chr(base_disk_index))
- created_items[("volume", str(volume.id))] = True
+ created_items["volume:" + str(volume.id)] = True
block_device_mapping['_vd' + chr(base_disk_index)] = volume.id
base_disk_index += 1
- # wait until volumes are with status available
+ # Wait until volumes are with status available
keep_waiting = True
elapsed_time = 0
while keep_waiting and elapsed_time < volume_timeout:
time.sleep(1)
elapsed_time += 1
- # if we exceeded the timeout rollback
+ # If we exceeded the timeout rollback
if elapsed_time >= volume_timeout:
raise vimconn.vimconnException('Timeout creating volumes for instance ' + name,
http_code=vimconn.HTTP_Request_Timeout)
# get availability Zone
vm_av_zone = self._get_vm_availability_zone(availability_zone_index, availability_zone_list)
- self.logger.debug("nova.servers.create({}, {}, {}, nics={}, meta={}, security_groups={}, "
+ self.logger.debug("nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
"availability_zone={}, key_name={}, userdata={}, config_drive={}, "
- "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim, metadata,
+ "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim,
security_groups, vm_av_zone, self.config.get('keypair'),
- userdata, config_drive, block_device_mapping))
- server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim, meta=metadata,
+ userdata, config_drive, block_device_mapping))
+ server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim,
security_groups=security_groups,
availability_zone=vm_av_zone,
key_name=self.config.get('keypair'),
if not v: # skip already deleted
continue
try:
- if k[0] == "port":
- self.neutron.delete_port(k[1])
+ k_item, _, k_id = k.partition(":")
+ if k_item == "port":
+ self.neutron.delete_port(k_id)
except Exception as e:
- self.logger.error("Error deleting port: " + type(e).__name__ + ": "+ str(e))
+ self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
# #commented because detaching the volumes makes the servers.delete not work properly ?!?
# #dettach volumes attached
if not v: # skip already deleted
continue
try:
- if k[0] == "volume":
- if self.cinder.volumes.get(k[1]).status != 'available':
+ k_item, _, k_id = k.partition(":")
+ if k_item == "volume":
+ if self.cinder.volumes.get(k_id).status != 'available':
keep_waiting = True
else:
- self.cinder.volumes.delete(k[1])
+ self.cinder.volumes.delete(k_id)
except Exception as e:
- self.logger.error("Error deleting volume: " + type(e).__name__ + ": " + str(e))
+ self.logger.error("Error deleting volume: {}: {}".format(type(e).__name__, e))
if keep_waiting:
time.sleep(1)
elapsed_time += 1