X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_ro%2Fvimconn_openstack.py;h=0d2603f69df6c23d636a48f06e3e3d9dcf13a708;hb=e72710b0ac189586e822a71a611f87fdce6a917d;hp=43fdbc516ee6ddbea5b812cddb2f1266e5fd339f;hpb=b42fd9bdcea865bd3c6d4a546a6f294ff69e1ef4;p=osm%2FRO.git diff --git a/osm_ro/vimconn_openstack.py b/osm_ro/vimconn_openstack.py index 43fdbc51..0d2603f6 100644 --- a/osm_ro/vimconn_openstack.py +++ b/osm_ro/vimconn_openstack.py @@ -118,7 +118,7 @@ class vimconnector(vimconn.vimconnector): self.neutron = self.session.get('neutron') self.cinder = self.session.get('cinder') self.glance = self.session.get('glance') - self.glancev1 = self.session.get('glancev1') + # self.glancev1 = self.session.get('glancev1') self.keystone = self.session.get('keystone') self.api_version3 = self.session.get('api_version3') self.vim_type = self.config.get("vim_type") @@ -219,8 +219,8 @@ class vimconnector(vimconn.vimconnector): glance_endpoint = None self.glance = self.session['glance'] = glClient.Client(2, session=sess, endpoint=glance_endpoint) #using version 1 of glance client in new_image() - self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess, - endpoint=glance_endpoint) + # self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess, + # endpoint=glance_endpoint) self.session['reload_client'] = False self.persistent_info['session'] = self.session # add availablity zone info inside self.persistent_info @@ -463,7 +463,9 @@ class vimconnector(vimconn.vimconnector): } # Gateway should be set to None if not needed. Otherwise openstack assigns one by default if ip_profile.get('gateway_address'): - subnet['gateway_ip'] = ip_profile.get('gateway_address') + subnet['gateway_ip'] = ip_profile['gateway_address'] + else: + subnet['gateway_ip'] = None if ip_profile.get('dns_address'): subnet['dns_nameservers'] = ip_profile['dns_address'].split(";") if 'dhcp_enabled' in ip_profile: @@ -770,40 +772,38 @@ class vimconnector(vimconn.vimconnector): if "disk_format" in image_dict: disk_format=image_dict["disk_format"] else: #autodiscover based on extension - if image_dict['location'][-6:]==".qcow2": + if image_dict['location'].endswith(".qcow2"): disk_format="qcow2" - elif image_dict['location'][-4:]==".vhd": + elif image_dict['location'].endswith(".vhd"): disk_format="vhd" - elif image_dict['location'][-5:]==".vmdk": + elif image_dict['location'].endswith(".vmdk"): disk_format="vmdk" - elif image_dict['location'][-4:]==".vdi": + elif image_dict['location'].endswith(".vdi"): disk_format="vdi" - elif image_dict['location'][-4:]==".iso": + elif image_dict['location'].endswith(".iso"): disk_format="iso" - elif image_dict['location'][-4:]==".aki": + elif image_dict['location'].endswith(".aki"): disk_format="aki" - elif image_dict['location'][-4:]==".ari": + elif image_dict['location'].endswith(".ari"): disk_format="ari" - elif image_dict['location'][-4:]==".ami": + elif image_dict['location'].endswith(".ami"): disk_format="ami" else: disk_format="raw" self.logger.debug("new_image: '%s' loading from '%s'", image_dict['name'], image_dict['location']) - if image_dict['location'][0:4]=="http": - new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes", - container_format="bare", location=image_dict['location'], disk_format=disk_format) + new_image = self.glance.images.create(name=image_dict['name']) + if image_dict['location'].startswith("http"): + # TODO there is not a method to direct download. It must be downloaded locally with requests + raise vimconn.vimconnNotImplemented("Cannot create image from URL") else: #local path with open(image_dict['location']) as fimage: - new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes", - container_format="bare", data=fimage, disk_format=disk_format) - #insert metadata. We cannot use 'new_image.properties.setdefault' - #because nova and glance are "INDEPENDENT" and we are using nova for reading metadata - new_image_nova=self.nova.images.find(id=new_image.id) - new_image_nova.metadata.setdefault('location',image_dict['location']) + self.glance.images.upload(new_image.id, fimage) + #new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes", + # container_format="bare", data=fimage, disk_format=disk_format) metadata_to_load = image_dict.get('metadata') - if metadata_to_load: - for k,v in yaml.load(metadata_to_load).iteritems(): - new_image_nova.metadata.setdefault(k,v) + #TODO location is a reserved word for current openstack versions. Use another word + metadata_to_load['location'] = image_dict['location'] + self.glance.images.update(new_image.id, **metadata_to_load) return new_image.id except (nvExceptions.Conflict, ksExceptions.ClientException, nvExceptions.ClientException) as e: self._format_exception(e) @@ -820,7 +820,7 @@ class vimconnector(vimconn.vimconnector): ''' try: self._reload_connection() - self.nova.images.delete(image_id) + self.glance.images.delete(image_id) return image_id except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e: #TODO remove self._format_exception(e) @@ -829,7 +829,7 @@ class vimconnector(vimconn.vimconnector): '''Get the image id from image path in the VIM database. Returns the image_id''' try: self._reload_connection() - images = self.nova.images.list() + images = self.glance.images.list() for image in images: if image.metadata.get("location")==path: return image.id @@ -852,17 +852,18 @@ class vimconnector(vimconn.vimconnector): self._reload_connection() filter_dict_os = filter_dict.copy() #First we filter by the available filter fields: name, id. The others are removed. - filter_dict_os.pop('checksum', None) - image_list = self.nova.images.findall(**filter_dict_os) - if len(image_list) == 0: - return [] - #Then we filter by the rest of filter fields: checksum + image_list = self.glance.images.list() filtered_list = [] for image in image_list: try: - image_class = self.glance.images.get(image.id) - if 'checksum' not in filter_dict or image_class['checksum'] == filter_dict.get('checksum'): - filtered_list.append(image_class.copy()) + if filter_dict.get("name") and image["name"] != filter_dict["name"]: + continue + if filter_dict.get("id") and image["id"] != filter_dict["id"]: + continue + if filter_dict.get("checksum") and image["checksum"] != filter_dict["checksum"]: + continue + + filtered_list.append(image.copy()) except gl1Exceptions.HTTPNotFound: pass return filtered_list @@ -880,8 +881,8 @@ class vimconnector(vimconn.vimconnector): return True if vm_status == 'ERROR': return False - time.sleep(1) - elapsed_time += 1 + time.sleep(5) + elapsed_time += 5 # if we exceeded the timeout rollback if elapsed_time >= server_timeout: @@ -979,6 +980,7 @@ class vimconnector(vimconn.vimconnector): 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with: 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted 'size': (mandatory) string with the size of the disk in GB + 'vim_id' (optional) should use this existing volume id availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if availability_zone_index is None @@ -1093,31 +1095,34 @@ class vimconnector(vimconn.vimconnector): # Create additional volumes in case these are present in disk_list base_disk_index = ord('b') - if disk_list != None: + if disk_list: block_device_mapping = {} for disk in disk_list: - if 'image_id' in disk: - volume = self.cinder.volumes.create(size = disk['size'],name = name + '_vd' + - chr(base_disk_index), imageRef = disk['image_id']) + if disk.get('vim_id'): + block_device_mapping['_vd' + chr(base_disk_index)] = disk['vim_id'] else: - volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' + - chr(base_disk_index)) - created_items["volume:" + str(volume.id)] = True - block_device_mapping['_vd' + chr(base_disk_index)] = volume.id + if 'image_id' in disk: + volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' + + chr(base_disk_index), imageRef=disk['image_id']) + else: + volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' + + chr(base_disk_index)) + created_items["volume:" + str(volume.id)] = True + block_device_mapping['_vd' + chr(base_disk_index)] = volume.id base_disk_index += 1 - # Wait until volumes are with status available - keep_waiting = True + # Wait until created volumes are with status available elapsed_time = 0 - while keep_waiting and elapsed_time < volume_timeout: - keep_waiting = False - for volume_id in block_device_mapping.itervalues(): - if self.cinder.volumes.get(volume_id).status != 'available': - keep_waiting = True - if keep_waiting: - time.sleep(1) - elapsed_time += 1 - + while elapsed_time < volume_timeout: + for created_item in created_items: + v, _, volume_id = created_item.partition(":") + if v == 'volume': + if self.cinder.volumes.get(volume_id).status != 'available': + break + else: # all ready: break from while + break + time.sleep(5) + elapsed_time += 5 # If we exceeded the timeout rollback if elapsed_time >= volume_timeout: raise vimconn.vimconnException('Timeout creating volumes for instance ' + name,