__date__ = "$22-sep-2017 23:59:59$"
import vimconn
-import json
+# import json
import logging
import netaddr
import time
supportedClassificationTypes = ['legacy_flow_classifier']
#global var to have a timeout creating and deleting volumes
-volume_timeout = 60
-server_timeout = 300
+volume_timeout = 600
+server_timeout = 600
class vimconnector(vimconn.vimconnector):
def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
elif isinstance(exception, nvExceptions.Conflict):
raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + str(exception))
elif isinstance(exception, vimconn.vimconnException):
- raise
+ raise exception
else: # ()
self.logger.error("General Exception " + str(exception), exc_info=True)
raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
#create subnetwork, even if there is no profile
if not ip_profile:
ip_profile = {}
- if 'subnet_address' not in ip_profile:
+ if not ip_profile.get('subnet_address'):
#Fake subnet is required
subnet_rand = random.randint(0, 255)
ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand)
"cidr": ip_profile['subnet_address']
}
# Gateway should be set to None if not needed. Otherwise openstack assigns one by default
- subnet['gateway_ip'] = ip_profile.get('gateway_address')
+ if ip_profile.get('gateway_address'):
+ subnet['gateway_ip'] = ip_profile.get('gateway_address')
if ip_profile.get('dns_address'):
subnet['dns_nameservers'] = ip_profile['dns_address'].split(";")
if 'dhcp_enabled' in ip_profile:
- subnet['enable_dhcp'] = False if ip_profile['dhcp_enabled']=="false" else True
- if 'dhcp_start_address' in ip_profile:
+ subnet['enable_dhcp'] = False if \
+ ip_profile['dhcp_enabled']=="false" or ip_profile['dhcp_enabled']==False else True
+ if ip_profile.get('dhcp_start_address'):
subnet['allocation_pools'] = []
subnet['allocation_pools'].append(dict())
subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address']
- if 'dhcp_count' in ip_profile:
+ if ip_profile.get('dhcp_count'):
#parts = ip_profile['dhcp_start_address'].split('.')
#ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
ip_int = int(netaddr.IPAddress(ip_profile['dhcp_start_address']))
#self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
self.neutron.create_subnet({"subnet": subnet} )
return new_net["network"]["id"]
- except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
+ except Exception as e:
if new_net:
self.neutron.delete_network(new_net['network']['id'])
self._format_exception(e)
self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
try:
self._reload_connection()
- if self.api_version3 and "tenant_id" in filter_dict:
- filter_dict['project_id'] = filter_dict.pop('tenant_id') #TODO check
- net_dict=self.neutron.list_networks(**filter_dict)
- net_list=net_dict["networks"]
+ filter_dict_os = filter_dict.copy()
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id') #T ODO check
+ net_dict = self.neutron.list_networks(**filter_dict_os)
+ net_list = net_dict["networks"]
self.__net_os2mano(net_list)
return net_list
except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
numa_properties["vmware:latency_sensitivity_level"] = "high"
for numa in numas:
#overwrite ram and vcpus
- ram = numa['memory']*1024
+ #check if key 'memory' is present in numa else use ram value at flavor
+ if 'memory' in numa:
+ ram = numa['memory']*1024
#See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
if 'paired-threads' in numa:
vcpus = numa['paired-threads']*2
new_flavor=self.nova.flavors.create(name,
ram,
vcpus,
- flavor_data.get('disk',1),
+ flavor_data.get('disk',0),
is_public=flavor_data.get('is_public', True)
)
#add metadata
self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
try:
self._reload_connection()
- filter_dict_os=filter_dict.copy()
+ filter_dict_os = filter_dict.copy()
#First we filter by the available filter fields: name, id. The others are removed.
- filter_dict_os.pop('checksum',None)
+ filter_dict_os.pop('checksum', None)
image_list = self.nova.images.findall(**filter_dict_os)
if len(image_list) == 0:
return []
for image in image_list:
try:
image_class = self.glance.images.get(image.id)
- if 'checksum' not in filter_dict or image_class['checksum']==filter_dict.get('checksum'):
+ if 'checksum' not in filter_dict or image_class['checksum'] == filter_dict.get('checksum'):
filtered_list.append(image_class.copy())
except gl1Exceptions.HTTPNotFound:
pass
type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
vim_id: filled/added by this function
floating_ip: True/False (or it can be None)
- 'cloud_config': (optional) dictionary with:
- 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
- 'users': (optional) list of users to be inserted, each item is a dict with:
- 'name': (mandatory) user name,
- 'key-pairs': (optional) list of strings with the public key to be inserted to the user
- 'user-data': (optional) string is a text script to be passed directly to cloud-init
- 'config-files': (optional). List of files to be transferred. Each item is a dict with:
- 'dest': (mandatory) string with the destination absolute path
- 'encoding': (optional, by default text). Can be one of:
- 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
- 'content' (mandatory): string with the content of the file
- 'permissions': (optional) string with file permissions, typically octal notation '0644'
- 'owner': (optional) file owner, string with the format 'owner:group'
- 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+ 'cloud_config': (optional) dictionary with:
+ 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+ 'users': (optional) list of users to be inserted, each item is a dict with:
+ 'name': (mandatory) user name,
+ 'key-pairs': (optional) list of strings with the public key to be inserted to the user
+ 'user-data': (optional) string is a text script to be passed directly to cloud-init
+ 'config-files': (optional). List of files to be transferred. Each item is a dict with:
+ 'dest': (mandatory) string with the destination absolute path
+ 'encoding': (optional, by default text). Can be one of:
+ 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+ 'content' (mandatory): string with the content of the file
+ 'permissions': (optional) string with file permissions, typically octal notation '0644'
+ 'owner': (optional) file owner, string with the format 'owner:group'
+ 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
'size': (mandatory) string with the size of the disk in GB
port_dict["name"]=name
if net.get("mac_address"):
port_dict["mac_address"]=net["mac_address"]
+ if net.get("ip_address"):
+ port_dict["fixed_ips"] = [{'ip_address': net["ip_address"]}]
+ # TODO add 'subnet_id': <subnet_id>
new_port = self.neutron.create_port({"port": port_dict })
- created_items[("port", str(new_port["port"]["id"]))] = True
+ created_items["port:" + str(new_port["port"]["id"])] = True
net["mac_adress"] = new_port["port"]["mac_address"]
net["vim_id"] = new_port["port"]["id"]
# if try to use a network without subnetwork, it will return a emtpy list
else:
volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
chr(base_disk_index))
- created_items[("volume", str(volume.id))] = True
+ created_items["volume:" + str(volume.id)] = True
block_device_mapping['_vd' + chr(base_disk_index)] = volume.id
base_disk_index += 1
if not v: # skip already deleted
continue
try:
- if k[0] == "port":
- self.neutron.delete_port(k[1])
+ k_item, _, k_id = k.partition(":")
+ if k_item == "port":
+ self.neutron.delete_port(k_id)
except Exception as e:
- self.logger.error("Error deleting port: " + type(e).__name__ + ": "+ str(e))
+ self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
# #commented because detaching the volumes makes the servers.delete not work properly ?!?
# #dettach volumes attached
if not v: # skip already deleted
continue
try:
- if k[0] == "volume":
- if self.cinder.volumes.get(k[1]).status != 'available':
+ k_item, _, k_id = k.partition(":")
+ if k_item == "volume":
+ if self.cinder.volumes.get(k_id).status != 'available':
keep_waiting = True
else:
- self.cinder.volumes.delete(k[1])
+ self.cinder.volumes.delete(k_id)
except Exception as e:
- self.logger.error("Error deleting volume: " + type(e).__name__ + ": " + str(e))
+ self.logger.error("Error deleting volume: {}: {}".format(type(e).__name__, e))
if keep_waiting:
time.sleep(1)
elapsed_time += 1
classification_dict = definition
classification_dict['name'] = name
- new_class = self.neutron.create_flow_classifier(
+ new_class = self.neutron.create_sfc_flow_classifier(
{'flow_classifier': classification_dict})
return new_class['flow_classifier']['id']
except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
self.logger.debug("Getting Classifications from VIM filter: '%s'",
str(filter_dict))
try:
+ filter_dict_os = filter_dict.copy()
self._reload_connection()
- if self.api_version3 and "tenant_id" in filter_dict:
- filter_dict['project_id'] = filter_dict.pop('tenant_id')
- classification_dict = self.neutron.list_flow_classifier(
- **filter_dict)
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+ classification_dict = self.neutron.list_sfc_flow_classifiers(
+ **filter_dict_os)
classification_list = classification_dict["flow_classifiers"]
self.__classification_os2mano(classification_list)
return classification_list
self.logger.debug("Deleting Classification '%s' from VIM", class_id)
try:
self._reload_connection()
- self.neutron.delete_flow_classifier(class_id)
+ self.neutron.delete_sfc_flow_classifier(class_id)
return class_id
except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
ksExceptions.ClientException, neExceptions.NeutronException,
self._reload_connection()
correlation = None
if sfc_encap:
- # TODO(igordc): must be changed to NSH in Queens
- # (MPLS is a workaround)
- correlation = 'mpls'
+ correlation = 'nsh'
if len(ingress_ports) != 1:
raise vimconn.vimconnNotSupportedException(
"OpenStack VIM connector can only have "
'egress': egress_ports[0],
'service_function_parameters': {
'correlation': correlation}}
- new_sfi = self.neutron.create_port_pair({'port_pair': sfi_dict})
+ new_sfi = self.neutron.create_sfc_port_pair({'port_pair': sfi_dict})
return new_sfi['port_pair']['id']
except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
neExceptions.NeutronException, ConnectionError) as e:
if new_sfi:
try:
- self.neutron.delete_port_pair_group(
+ self.neutron.delete_sfc_port_pair(
new_sfi['port_pair']['id'])
except Exception:
self.logger.error(
"VIM filter: '%s'", str(filter_dict))
try:
self._reload_connection()
- if self.api_version3 and "tenant_id" in filter_dict:
- filter_dict['project_id'] = filter_dict.pop('tenant_id')
- sfi_dict = self.neutron.list_port_pair(**filter_dict)
+ filter_dict_os = filter_dict.copy()
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+ sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
sfi_list = sfi_dict["port_pairs"]
self.__sfi_os2mano(sfi_list)
return sfi_list
"from VIM", sfi_id)
try:
self._reload_connection()
- self.neutron.delete_port_pair(sfi_id)
+ self.neutron.delete_sfc_port_pair(sfi_id)
return sfi_id
except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
ksExceptions.ClientException, neExceptions.NeutronException,
self._reload_connection()
correlation = None
if sfc_encap:
- # TODO(igordc): must be changed to NSH in Queens
- # (MPLS is a workaround)
- correlation = 'mpls'
+ correlation = 'nsh'
for instance in sfis:
sfi = self.get_sfi(instance)
- if sfi.get('sfc_encap') != correlation:
+ if sfi.get('sfc_encap') != sfc_encap:
raise vimconn.vimconnNotSupportedException(
"OpenStack VIM connector requires all SFIs of the "
"same SF to share the same SFC Encapsulation")
sf_dict = {'name': name,
'port_pairs': sfis}
- new_sf = self.neutron.create_port_pair_group({
+ new_sf = self.neutron.create_sfc_port_pair_group({
'port_pair_group': sf_dict})
return new_sf['port_pair_group']['id']
except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
neExceptions.NeutronException, ConnectionError) as e:
if new_sf:
try:
- self.neutron.delete_port_pair_group(
+ self.neutron.delete_sfc_port_pair_group(
new_sf['port_pair_group']['id'])
except Exception:
self.logger.error(
str(filter_dict))
try:
self._reload_connection()
- if self.api_version3 and "tenant_id" in filter_dict:
- filter_dict['project_id'] = filter_dict.pop('tenant_id')
- sf_dict = self.neutron.list_port_pair_group(**filter_dict)
+ filter_dict_os = filter_dict.copy()
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+ sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
sf_list = sf_dict["port_pair_groups"]
self.__sf_os2mano(sf_list)
return sf_list
self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
try:
self._reload_connection()
- self.neutron.delete_port_pair_group(sf_id)
+ self.neutron.delete_sfc_port_pair_group(sf_id)
return sf_id
except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
ksExceptions.ClientException, neExceptions.NeutronException,
try:
new_sfp = None
self._reload_connection()
- if not sfc_encap:
- raise vimconn.vimconnNotSupportedException(
- "OpenStack VIM connector only supports "
- "SFC-Encapsulated chains")
- # TODO(igordc): must be changed to NSH in Queens
- # (MPLS is a workaround)
- correlation = 'mpls'
+ # In networking-sfc the MPLS encapsulation is legacy
+ # should be used when no full SFC Encapsulation is intended
+ sfc_encap = 'mpls'
+ if sfc_encap:
+ correlation = 'nsh'
sfp_dict = {'name': name,
'flow_classifiers': classifications,
'port_pair_groups': sfs,
'chain_parameters': {'correlation': correlation}}
if spi:
sfp_dict['chain_id'] = spi
- new_sfp = self.neutron.create_port_chain({'port_chain': sfp_dict})
+ new_sfp = self.neutron.create_sfc_port_chain({'port_chain': sfp_dict})
return new_sfp["port_chain"]["id"]
except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
neExceptions.NeutronException, ConnectionError) as e:
if new_sfp:
try:
- self.neutron.delete_port_chain(new_sfp['port_chain']['id'])
+ self.neutron.delete_sfc_port_chain(new_sfp['port_chain']['id'])
except Exception:
self.logger.error(
'Creation of Service Function Path failed, with '
"'%s'", str(filter_dict))
try:
self._reload_connection()
- if self.api_version3 and "tenant_id" in filter_dict:
- filter_dict['project_id'] = filter_dict.pop('tenant_id')
- sfp_dict = self.neutron.list_port_chain(**filter_dict)
+ filter_dict_os = filter_dict.copy()
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+ sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
sfp_list = sfp_dict["port_chains"]
self.__sfp_os2mano(sfp_list)
return sfp_list
"Deleting Service Function Path '%s' from VIM", sfp_id)
try:
self._reload_connection()
- self.neutron.delete_port_chain(sfp_id)
+ self.neutron.delete_sfc_port_chain(sfp_id)
return sfp_id
except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
ksExceptions.ClientException, neExceptions.NeutronException,