From: chamarty Date: Mon, 13 Mar 2017 20:12:39 +0000 (+0000) Subject: Volume ref support X-Git-Tag: v2.0.0~40 X-Git-Url: https://osm.etsi.org/gitweb/?a=commitdiff_plain;h=5ca290edab221b3a2cca985eeb8cfd1b8b4a5823;p=osm%2FSO.git Volume ref support Change-Id: I52c328564fa9eb2d5d95049f033ba5c6c2af19a5 Signed-off-by: chamarty --- diff --git a/models/plugins/yang/mano-types.yang b/models/plugins/yang/mano-types.yang index 03c27a17..10135955 100644 --- a/models/plugins/yang/mano-types.yang +++ b/models/plugins/yang/mano-types.yang @@ -2029,7 +2029,7 @@ module mano-types } - leaf device_bus { + leaf device-bus { description "Type of disk-bus on which this disk is exposed to guest"; type enumeration { enum ide; @@ -2039,7 +2039,7 @@ module mano-types } } - leaf device_type { + leaf device-type { description "The type of device as exposed to guest"; type enumeration { enum disk; diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py index ffab0147..c7ed119c 100644 --- a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py +++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py @@ -223,8 +223,15 @@ class OpenstackDriver(object): self.log.info("Discovering images") self.glance_cache['images'] = self._cache_populate(self.glance_image_list, list()) + return self.glance_cache['images'] + def _build_cinder_volume_list(self): + self.log.info("Discovering volumes") + vollist = self.cinder_volume_list() + self.cinder_cache['volumes'] = self._cache_populate(self.cinder_volume_list, + list()) + return self.cinder_cache['volumes'] def build_nova_resource_cache(self): self.log.info("Building nova resource cache") @@ -239,8 +246,8 @@ class OpenstackDriver(object): self._build_neutron_subnet_prefix_list() def build_cinder_resource_cache(self): - pass - + self.log.info("Building cinder resource cache") + self._build_cinder_volume_list() def build_glance_resource_cache(self): self.log.info("Building glance resource cache") @@ -293,6 +300,13 @@ class OpenstackDriver(object): else: return self._build_glance_image_list() + @property + def _cinder_volume_list(self): + if 'volumes' in self.cinder_cache: + return self.cinder_cache['volumes'] + else: + return self._build_cinder_volume_list() + def validate_account_creds(self): try: self.sess_drv.invalidate_auth_token() diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/compute.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/compute.py index 55382017..6a800de3 100644 --- a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/compute.py +++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/compute.py @@ -173,6 +173,30 @@ class ComputeUtils(object): %(image_name, checksum)) return image['id'] + def resolve_volume_n_validate(self, volume_ref): + """ + Resolve the volume reference + + Arguments: + volume_ref (string): Name of volume reference + + Raises VolumeValidateError in case of Errors + """ + + for vol in self.driver._cinder_volume_list: + voldict = vol.to_dict() + if voldict['display_name'] == volume_ref: + if 'status' in voldict and voldict['status'] == 'available': + return voldict['id'] + else: + self.log.error("Volume %s not in available state. Current state: %s", + volume_ref, voldict['status']) + raise VolumeValidateError("Volume with name %s found in incorrect (%s) state" + %(volume_ref, vol['status'])) + + self.log.info("No volume found with matching name: %s ", volume_ref) + raise VolumeValidateError("No volume found with matching name: %s " %(volume_ref)) + def make_vdu_volume_args(self, volume, vdu_params): """ Arguments: @@ -186,23 +210,24 @@ class ComputeUtils(object): """ kwargs = dict() - if volume.has_field('volume_ref'): - self.log.error("Unsupported option found for volume: %s", volume.name) - raise VolumeValidateError("Unsupported option found for volume: %s" - %(volume.name)) - kwargs['boot_index'] = volume.boot_priority if volume.has_field("image"): # Support image->volume kwargs['source_type'] = "image" kwargs['uuid'] = self.resolve_image_n_validate(volume.image, volume.image_checksum) + kwargs['delete_on_termination'] = True + elif "volume_ref" in volume: + # Support volume-ref->volume (only ref) + kwargs['source_type'] = "volume" + kwargs['uuid'] = self.resolve_volume_n_validate(volume.volume_ref) + kwargs['delete_on_termination'] = False else: # Support blank->volume kwargs['source_type'] = "blank" + kwargs['delete_on_termination'] = True kwargs['device_name'] = volume.name kwargs['destination_type'] = "volume" kwargs['volume_size'] = volume.size - kwargs['delete_on_termination'] = True if volume.has_field('device_type'): if volume.device_type in ['cdrom', 'disk']: diff --git a/rwcal/test/test_rwcal_openstack.py b/rwcal/test/test_rwcal_openstack.py index 654e1a30..339a2b4d 100644 --- a/rwcal/test/test_rwcal_openstack.py +++ b/rwcal/test/test_rwcal_openstack.py @@ -973,6 +973,136 @@ class OpenStackTest(unittest.TestCase): return vdu + def _get_rbsh_vdu_request_info(self, vlink_list): + """ + Returns object of type RwcalYang.VDUInitParams + """ + vdu = RwcalYang.VDUInitParams() + vdu.name = "cal_rbsh_vdu" + vdu.vm_flavor.memory_mb = 2048 + vdu.vm_flavor.vcpu_count = 1 + vdu.vm_flavor.storage_gb = 10 + vdu.flavor_id = self._flavor.id + vdu.allocate_public_address = True + ctr = 0 + for vl in vlink_list: + c1 = vdu.connection_points.add() + c1.name = "c_point" + str(ctr) + ctr += 1 + c1.virtual_link_id = vl + c1.type_yang = 'VIRTIO' + + vol0 = vdu.volumes.add() + vol0.name = "vda" + vol0.image = "mgmt.img" + vol0.size = 40 + vol0.boot_priority = 0 + vol0.device_bus = "virtio" + vol0.device_type = "disk" + + vol1 = vdu.volumes.add() + vol1.name = "vdb" + vol1.image = "segstore.img" + vol1.size = 60 + vol1.boot_priority = 1 + vol1.device_bus = "virtio" + vol1.device_type = "disk" + + # blank disk + vol2 = vdu.volumes.add() + vol2.name = "vdc" + vol2.size = 10 + vol2.boot_priority = 2 + vol2.device_bus = "virtio" + vol2.device_type = "disk" + + # existing volume disk + vol3 = vdu.volumes.add() + vol3.name = "vdd" + vol3.size = 10 + vol3.volume_ref = "volume-ref1" + vol3.boot_priority = 3 + vol3.device_bus = "virtio" + vol3.device_type = "disk" + return vdu + + @unittest.skip("Skipping test_create_rbsh_vdu") + def test_create_rbsh_vdu(self): + """ + Test to create VDU with mgmt port and 3 additional connection points + """ + logger.info("Openstack-CAL-Test: Test Create Virtual Link API") + vlink_list = [] + for ctr in range(3): + vlink = RwcalYang.VirtualLinkReqParams() + vlink.name = 'rift.cal.virtual_link' + str(ctr) + vlink.subnet = '11.0.{}.0/24'.format(str(1 + ctr)) + + rc, rsp = self.cal.create_virtual_link(self._acct, vlink) + self.assertEqual(rc.status, RwStatus.SUCCESS) + logger.info("Openstack-CAL-Test: Created virtual_link with Id: %s" %rsp) + vlink_id = rsp + + #Check if virtual_link create is successful + rc, rsp = self.cal.get_virtual_link(self._acct, rsp) + self.assertEqual(rc, RwStatus.SUCCESS) + self.assertEqual(rsp.virtual_link_id, vlink_id) + vlink_list.append(vlink_id) + + + # Now create VDU + vdu_req = self._get_rbsh_vdu_request_info(vlink_list) + logger.info("Openstack-CAL-Test: Test Create RB steelhead VDU API (w/ mgmt port) and 3 CPs") + + rc, rsp = self.cal.create_vdu(self._acct, vdu_req) + logger.debug("Openstack-CAL-Test: rc %s rsp %s" % (rc, rsp)) + self.assertEqual(rc.status, RwStatus.SUCCESS) + logger.info("Openstack-CAL-Test: Created vdu with Id: %s" %rsp) + + test_vdu_id = rsp + + ## Check if VDU get is successful + rc, rsp = self.cal.get_vdu(self._acct, test_vdu_id) + logger.debug("Get VDU response %s", rsp) + self.assertEqual(rsp.vdu_id, test_vdu_id) + + ### Wait until vdu_state is active + logger.debug("Waiting 10 secs") + time.sleep(10) + #{'name': 'dp0vhost7', 'connection_point_id': 'dp0vhost7', 'state': 'active', 'virtual_link_id': 'rift.cal.virtual_link', 'ip_address': '192.168.100.6'} + vdu_state = 'inactive' + cp_state = 'inactive' + for i in range(15): + rc, rsp = self.cal.get_vdu(self._acct, test_vdu_id) + self.assertEqual(rc, RwStatus.SUCCESS) + logger.info("Openstack-CAL-Test: Iter %d VDU with id : %s. Reached State : %s, mgmt ip %s" %(i, test_vdu_id, rsp.state, rsp.management_ip)) + if (rsp.state == 'active') and ('management_ip' in rsp) and ('public_ip' in rsp): + vdu_state = 'active' + #'connection_points': [{'name': 'dp0vhost7', 'connection_point_id': 'dp0vhost7', 'state': 'active', 'virtual_link_id': 'rift.cal.virtual_link', 'ip_address': '192.168.100.6'}] + for cp in rsp.connection_points: + logger.info("Openstack-CAL-Test: Iter %d VDU with id : %s. Reached State : %s CP state %s" %(i, test_vdu_id, rsp.state, cp)) + logger.debug("Waiting another 5 secs") + time.sleep(5) + + self.assertEqual(rc, RwStatus.SUCCESS) + self.assertEqual(rsp.state, 'active') + self.assertEqual(vdu_state, 'active') + logger.info("Openstack-CAL-Test: VDU with id : %s reached expected state : %s IP: %s" %(test_vdu_id, rsp.state, rsp.management_ip)) + logger.info("Openstack-CAL-Test: VDUInfo: %s" %(rsp)) + logger.info("Waiting for 30 secs before deletion") + time.sleep(30) + + ### Check vdu list as well + rc, rsp = self.cal.get_vdu_list(self._acct) + self.assertEqual(rc, RwStatus.SUCCESS) + found = False + logger.debug("Get VDU response %s", rsp) + for vdu in rsp.vdu_info_list: + if vdu.vdu_id == test_vdu_id: + found = True + self.assertEqual(found, True) + logger.info("Openstack-CAL-Test: Passed VDU list" ) + #@unittest.skip("Skipping test_create_delete_virtual_link_and_vdu") def test_create_delete_virtual_link_and_vdu(self): """ @@ -1027,10 +1157,10 @@ class OpenStackTest(unittest.TestCase): vlink_id2= rsp ### Now exercise the modify_vdu_api - vdu_modify = self._get_vdu_modify_request_info(vdu_id, vlink_id2) - rc = self.cal.modify_vdu(self._acct, vdu_modify) - self.assertEqual(rc, RwStatus.SUCCESS) - logger.info("Openstack-CAL-Test: Modified vdu with Id: %s" %vdu_id) + #vdu_modify = self._get_vdu_modify_request_info(vdu_id, vlink_id2) + #rc = self.cal.modify_vdu(self._acct, vdu_modify) + #self.assertEqual(rc, RwStatus.SUCCESS) + #logger.info("Openstack-CAL-Test: Modified vdu with Id: %s" %vdu_id) ### Lets delete the VDU self.cal.delete_vdu(self._acct, vdu_id)