rw.core.rwvx-rwdts=${PLATFORM_VERSION} \
rw.automation.core-RWAUTO=${PLATFORM_VERSION} \
rw.core.rwvx-rwha-1.0=${PLATFORM_VERSION}
+
+ sudo apt-get install python-cinderclient
sudo chmod 777 /usr/rift /usr/rift/usr/share
"Derived from earlier versions of base YANG files";
}
+ typedef meta-data-type {
+ type enumeration {
+ enum STRING;
+ enum JSON;
+ }
+ }
+
typedef parameter-data-type {
type enumeration {
enum STRING;
}
}
+ grouping custom-config-files {
+ description "Grouping for files needed to be mounted into an additional drive";
+ list custom-config-files {
+ description
+ "List of configuration files to be written on an additional drive";
+ key "source";
+ leaf source {
+ description "Name of the configuration file";
+ type string;
+ }
+ leaf dest {
+ description "Full path of the destination in the guest";
+ type string;
+ }
+ }
+ }
+
+ grouping custom-meta-data {
+ description "Grouping for instance-specific meta data";
+ list custom-meta-data {
+ description
+ "List of meta-data to be associated with the instance";
+ key "name";
+ leaf name {
+ description "Name of the meta-data parameter";
+ type string;
+ }
+
+ leaf data-type {
+ description "Data-type the meta-data parameter";
+ type manotypes:meta-data-type;
+ default "STRING";
+ }
+
+ leaf value {
+ description "Value of the meta-data parameter";
+ type string;
+ }
+ }
+ }
+
+ grouping custom-boot-data {
+ description "Grouping for custom vim data";
+ container custom-boot-data {
+ uses manotypes:custom-config-files;
+ uses manotypes:custom-meta-data;
+ leaf custom-drive {
+ description "Some VIMs implement custom drives to host custom-files or meta-data";
+ type boolean;
+ default false;
+ }
+ }
+ }
+
grouping volume-info {
description "Grouping for Volume-info";
enum lun;
}
}
+
+ uses custom-meta-data;
}
}
}
}
}
+ uses manotypes:custom-boot-data;
+
list internal-connection-point {
key "id";
description
uses manotypes:hypervisor-epa;
uses manotypes:host-epa;
+ uses manotypes:custom-boot-data;
+
list volumes {
key "name";
from neutronclient.neutron import client as ntclient
from glanceclient.v2 import client as glclient
from ceilometerclient import client as ceilo_client
+from cinderclient.v2 import client as cinder_client
# Exceptions
import novaclient.exceptions as NovaException
import keystoneclient.exceptions as KeystoneExceptions
import neutronclient.common.exceptions as NeutronException
import glanceclient.exc as GlanceException
+import cinderclient.exceptions as CinderException
logger = logging.getLogger('rwcal.openstack.drv')
logger.setLevel(logging.DEBUG)
kwargs['image_id'],
kwargs['flavor_id'],
meta = kwargs['metadata'],
- files = None,
+ files = kwargs['files'],
reservation_id = None,
min_count = None,
max_count = None,
block_device_mapping_v2 = kwargs['block_device_mapping_v2'],
nics = nics,
scheduler_hints = kwargs['scheduler_hints'],
- config_drive = None)
+ config_drive = kwargs['config_drive'])
except Exception as e:
logger.info("OpenstackDriver: Create Server operation failed. Exception: %s" %(str(e)))
raise
self.nova_drv = NovaDriverV21(self.ks_drv)
self.neutron_drv = NeutronDriverV2(self.ks_drv)
self.ceilo_drv = CeilometerDriverV2(self.ks_drv)
+ self.cinder_drv = CinderDriverV2(self.ks_drv)
elif auth_url.find('/v2') != -1:
self.ks_drv = KeystoneDriverV2(username, password, auth_url, tenant_name, insecure)
self.glance_drv = GlanceDriverV2(self.ks_drv)
self.nova_drv = NovaDriverV2(self.ks_drv)
self.neutron_drv = NeutronDriverV2(self.ks_drv)
self.ceilo_drv = CeilometerDriverV2(self.ks_drv)
+ self.cinder_drv = CinderDriverV2(self.ks_drv)
else:
logger.error("Could not identity the version information for openstack service endpoints. Auth_URL should contain \"/v2\" or \"/v3\" string in it")
raise NotImplementedError("Auth URL is wrong or invalid. Only Keystone v2 & v3 supported")
def ceilo_alarm_delete(self, alarm_id):
self.ceilo_drv.client.alarms.delete(alarm_id)
+
+ def cinder_volume_list(self):
+ return self.cinder_drv.volume_list()
+
+ def cinder_volume_get(self,vol_id):
+ return self.cinder_drv.volume_get(vol_id)
+
+ def cinder_volume_set_metadata(self, volumeid, metadata):
+ return self.cinder_drv.volume_set_metadata(volumeid, metadata)
+
+ def cinder_volume_delete_metadata(self, volumeid, metadata):
+ return self.cinder_drv.volume_delete_metadata(volumeid, metadata)
+
+
+
+class CinderDriver(object):
+ """
+ Driver for openstack cinder-client
+ """
+ def __init__(self, ks_drv, service_name, version):
+ """
+ Constructor for CinderDriver
+ Arguments: KeystoneDriver class object
+ """
+ self.ks_drv = ks_drv
+ self._service_name = service_name
+ self._version = version
+
+ def _get_cinder_credentials(self):
+ """
+ Returns a dictionary of kwargs required to instantiate python-cinderclient class
+
+ Arguments: None
+
+ Returns:
+ A dictionary object of arguments
+ """
+ creds = {}
+ creds['version'] = self._version
+ creds['username'] = self.ks_drv.get_username()
+ creds['api_key'] = self.ks_drv.get_password()
+ creds['auth_url'] = self.ks_drv.get_service_endpoint("identity", "publicURL")
+ creds['project_id'] = self.ks_drv.get_tenant_name()
+ creds['insecure'] = self.ks_drv.get_security_mode()
+
+ return creds
+
+ def _get_cinder_connection(self):
+ """
+ Returns a object of class python-cinderclient
+ """
+ if not hasattr(self, '_cinder_connection'):
+ self._cinder_connection = cinder_client.Client(**self._get_cinder_credentials())
+ else:
+ # Reinitialize if auth_token is no longer valid
+ if not self.ks_drv.is_auth_token_valid():
+ self._cinder_connection = cinder_client.Client(**self._get_cinder_credentials())
+ return self._cinder_connection
+
+ def volume_list(self):
+ """
+ Returns list of dictionaries. Each dictionary contains attributes associated with
+ volumes
+
+ Arguments: None
+
+ Returns: List of dictionaries.
+ """
+ cinderconn = self._get_cinder_connection()
+ volumes = []
+ try:
+ volume_info = cinderconn.volumes.list()
+ except Exception as e:
+ logger.error("OpenstackDriver: List volumes operation failed. Exception: %s" %(str(e)))
+ raise
+ volumes = [ volume for volume in volume_info ]
+ return volumes
+
+ def volume_get(self, volume_id):
+ """
+ Get details volume
+
+ Arguments: None
+
+ Returns: List of dictionaries.
+ """
+ cinderconn = self._get_cinder_connection()
+ try:
+ vol = cinderconn.volumes.get(volume_id)
+ except Exception as e:
+ logger.error("OpenstackDriver: Get volume operation failed. Exception: %s" %(str(e)))
+ raise
+ return vol
+
+ def volume_set_metadata(self, volume_id, metadata):
+ """
+ Set metadata for volume
+ Metadata is a dictionary of key-value pairs
+
+ Arguments: None
+
+ Returns: List of dictionaries.
+ """
+ cinderconn = self._get_cinder_connection()
+ try:
+ cinderconn.volumes.set_metadata(volume_id, metadata)
+ except Exception as e:
+ logger.error("OpenstackDriver: Set metadata operation failed. Exception: %s" %(str(e)))
+ raise
+
+ def volume_delete_metadata(self, volume_id, metadata):
+ """
+ Delete metadata for volume
+ Metadata is a dictionary of key-value pairs
+
+ Arguments: None
+
+ Returns: List of dictionaries.
+ """
+ cinderconn = self._get_cinder_connection()
+ try:
+ cinderconn.volumes.delete_metadata(volume_id, metadata)
+ except Exception as e:
+ logger.error("OpenstackDriver: Delete metadata operation failed. Exception: %s" %(str(e)))
+ raise
+
+class CinderDriverV2(CinderDriver):
+ """
+ Driver for openstack cinder-client V2
+ """
+ def __init__(self, ks_drv):
+ super(CinderDriverV2, self).__init__(ks_drv, 'volumev2', 2)
+
import argparse
import sys, os, time
import rwlogger
+import yaml
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
nvconn = drv.nova_drv._get_nova_connection()
nvconn.servers.set_meta(argument.server_id, meta_data)
+
+def get_volume_id(server_vol_list, name):
+ if server_vol_list is None:
+ return
+
+ for os_volume in server_vol_list:
+ try:
+ " Device name is of format /dev/vda"
+ vol_name = (os_volume['device']).split('/')[2]
+ except:
+ continue
+ if name == vol_name:
+ return os_volume['volumeId']
+def create_volume_metadata(drv, argument):
+ if argument.vol_metadata is None:
+ return
+
+ yaml_vol_str = argument.vol_metadata.read()
+ yaml_vol_cfg = yaml.load(yaml_vol_str)
+
+ srv_volume_list = drv.nova_volume_list(argument.server_id)
+ for volume in yaml_vol_cfg:
+ if 'guest_params' not in volume:
+ continue
+ if 'custom_meta_data' not in volume['guest_params']:
+ continue
+ vmd = dict()
+ for vol_md_item in volume['guest_params']['custom_meta_data']:
+ if 'value' not in vol_md_item:
+ continue
+ vmd[vol_md_item['name']] = vol_md_item['value']
+
+ # Get volume id
+ vol_id = get_volume_id(srv_volume_list, volume['name'])
+ if vol_id is None:
+ logger.error("Server %s Could not find volume %s" %(argument.server_id, volume['name']))
+ sys.exit(3)
+ drv.cinder_volume_set_metadata(vol_id, vmd)
+
def prepare_vm_after_boot(drv,argument):
### Important to call create_port_metadata before assign_floating_ip_address
### since assign_floating_ip_address can wait thus delaying port_metadata creation
### Wait for a max of 5 minute for server to come up -- Needs fine tuning
- wait_time = 300
+ wait_time = 500
sleep_time = 2
for i in range(int(wait_time/sleep_time)):
server = drv.nova_server_get(argument.server_id)
sys.exit(4)
#create_port_metadata(drv, argument)
+ create_volume_metadata(drv, argument)
assign_floating_ip_address(drv, argument)
default = False,
help = "Create Port Metadata")
+ parser.add_argument("--vol_metadata", type=argparse.FileType('r'))
+
argument = parser.parse_args()
if not argument.auth_url:
import os
import subprocess
import uuid
+import tempfile
+import yaml
import rift.rwcal.openstack as openstack_drv
import rw_status
import rwlogger
import neutronclient.common.exceptions as NeutronException
import keystoneclient.exceptions as KeystoneExceptions
+import tornado
+import gi
+
+gi.require_version('RwSdn', '1.0')
from gi.repository import (
GObject,
image = drv.glance_image_get(image_id)
return RwcalOpenstackPlugin._fill_image_info(image)
+ # This is being deprecated. Please do not use for new SW development
@rwstatus(ret_on_failure=[""])
def do_create_vm(self, account, vminfo):
"""Create a new virtual machine.
return link
@staticmethod
- def _fill_vdu_info(vm_info, flavor_info, mgmt_network, port_list, server_group, volume_list = None):
+ def _fill_vdu_info(drv, vm_info, flavor_info, mgmt_network, port_list, server_group, volume_list = None):
"""Create a GI object for VDUInfoParams
Converts VM information dictionary object returned by openstack
for key, value in vm_info['metadata'].items():
if key == 'node_id':
vdu.node_id = value
+ else:
+ custommetadata = vdu.custom_boot_data.custom_meta_data.add()
+ custommetadata.name = key
+ custommetadata.value = str(value)
+
+ # Look for config_drive
+ if ('config_drive' in vm_info):
+ vdu.custom_boot_data.custom_drive = vm_info['config_drive']
if ('image' in vm_info) and ('id' in vm_info['image']):
vdu.image_id = vm_info['image']['id']
if ('flavor' in vm_info) and ('id' in vm_info['flavor']):
continue
volr.name = vol_name
volr.volume_id = os_volume['volumeId']
+ try:
+ vol_details = drv.cinder_volume_get(volr.volume_id)
+ except:
+ continue
+ if vol_details is None:
+ continue
+ for key, value in vol_details.metadata.items():
+ volmd = volr.custom_meta_data.add()
+ volmd.name = key
+ volmd.value = value
return vdu
kwargs['network_list'] = network_list
metadata = {}
+ files = {}
+ config_drive = False
# Add all metadata related fields
if vduinfo.has_field('node_id'):
metadata['node_id'] = vduinfo.node_id
if pci_assignement is not None:
metadata['pci_assignement'] = pci_assignement
+ if vduinfo.has_field('custom_boot_data'):
+ if vduinfo.custom_boot_data.has_field('custom_meta_data'):
+ for custom_meta_item in vduinfo.custom_boot_data.custom_meta_data:
+ if custom_meta_item.data_type == "STRING":
+ metadata[custom_meta_item.name] = custom_meta_item.value
+ elif custom_meta_item.data_type == "JSON":
+ metadata[custom_meta_item.name] = tornado.escape.json_decode(custom_meta_item.value)
+ else:
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Unsupported data-type {} for custom-meta-data name {} ".format(custom_meta_item.data_type, custom_meta_item.name))
+ if vduinfo.custom_boot_data.has_field('custom_config_files'):
+ for custom_config_file in vduinfo.custom_boot_data.custom_config_files:
+ files[custom_config_file.dest] = custom_config_file.source
+
+ if vduinfo.custom_boot_data.has_field('custom_drive'):
+ if vduinfo.custom_boot_data.custom_drive is True:
+ config_drive = True
+
kwargs['metadata'] = metadata
+ kwargs['files'] = files
+ kwargs['config_drive'] = config_drive
if vduinfo.has_field('availability_zone') and vduinfo.availability_zone.has_field('name'):
kwargs['availability_zone'] = vduinfo.availability_zone
kwargs['scheduler_hints'] = None
kwargs['block_device_mapping_v2'] = None
+ vol_metadata = False
if vduinfo.has_field('volumes') :
kwargs['block_device_mapping_v2'] = []
with self._use_driver(account) as drv:
with self._use_driver(account) as drv:
vm_id = drv.nova_server_create(**kwargs)
if floating_ip:
- self.prepare_vdu_on_boot(account, vm_id, floating_ip)
+ self.prepare_vdu_on_boot(account, vm_id, floating_ip, vduinfo.volumes)
return vm_id
vm.user_tags.pci_assignement = pci_assignement
vm_id = self._create_vm(account, vdu_init, pci_assignement=pci_assignement, server_group=server_group, port_list=port_list, network_list=vm_network_list, imageinfo_list = imageinfo_list)
- self.prepare_vdu_on_boot(account, vm_id, floating_ip)
return vm_id
def prepare_vpci_metadata(self, drv, vdu_init):
- def prepare_vdu_on_boot(self, account, server_id, floating_ip):
+ def prepare_vdu_on_boot(self, account, server_id, floating_ip, volumes=None):
cmd = PREPARE_VM_CMD.format(auth_url = account.openstack.auth_url,
username = account.openstack.key,
password = account.openstack.secret,
if floating_ip is not None:
cmd += (" --floating_ip "+ floating_ip.ip)
+ vol_metadata = False
+ if volumes is not None:
+ for volume in volumes:
+ if volume.guest_params.has_field('custom_meta_data'):
+ vol_metadata = True
+ break
+
+ if vol_metadata is True:
+ tmp_file = None
+ with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_file:
+ vol_list = list()
+ for volume in volumes:
+ vol_dict = volume.as_dict()
+ vol_list.append(vol_dict)
+
+ yaml.dump(vol_list, tmp_file)
+ cmd += (" --vol_metadata {}").format(tmp_file.name)
+
exec_path = 'python3 ' + os.path.dirname(openstack_drv.__file__)
exec_cmd = exec_path+'/'+cmd
self.log.info("Running command: %s" %(exec_cmd))
openstack_group_list = drv.nova_server_group_list()
server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
openstack_srv_volume_list = drv.nova_volume_list(vm['id'])
- vdu_info = RwcalOpenstackPlugin._fill_vdu_info(vm,
+ vdu_info = RwcalOpenstackPlugin._fill_vdu_info(drv, vm,
flavor_info,
account.openstack.mgmt_network,
port_list,
server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
openstack_srv_volume_list = drv.nova_volume_list(vm['id'])
- vdu = RwcalOpenstackPlugin._fill_vdu_info(vm,
+ vdu = RwcalOpenstackPlugin._fill_vdu_info(drv, vm,
flavor_info,
account.openstack.mgmt_network,
port_list,
}
}
+ uses manotypes:custom-boot-data;
+
list volumes {
key "name";
description "Console URL from the VIM, if available";
}
+ uses manotypes:custom-boot-data;
+
list volumes {
key "name";
rwpb:field-string-max 64;
type string;
}
-
+ uses manotypes:custom-meta-data;
}
}
logger = logging.getLogger('rwcal-openstack')
+PING_USERDATA = '''
+#cloud-config
+password: fedora
+chpasswd: { expire: False }
+ssh_pwauth: True
+'''
+
#
# Important information about openstack installation. This needs to be manually verified
#
vdu.node_id = OpenStackTest.NodeID
vdu.image_id = self._image.id
vdu.flavor_id = self._flavor.id
- vdu.vdu_init.userdata = ''
+ vdu.vdu_init.userdata = PING_USERDATA
vdu.allocate_public_address = True
+ meta1 = vdu.custom_boot_data.custom_meta_data.add()
+ meta1.name = "EMS_IP"
+ meta1.data_type = "STRING"
+ meta1.value = "10.5.6.6"
+ #meta2 = vdu.custom_boot_data.custom_meta_data.add()
+ #meta2.name = "Cluster_data"
+ #meta2.data_type = "JSON"
+ #meta2.value = '''{ "cluster_id": "12" , "vnfc_id": "112" }'''
+ #vdu.custom_boot_data.custom_drive = True
+ customfile1 = vdu.custom_boot_data.custom_config_files.add()
+ customfile1.source = "abcdef124"
+ customfile1.dest = "/tmp/tempfile.txt"
+ customfile2 = vdu.custom_boot_data.custom_config_files.add()
+ customfile2.source = "123456"
+ customfile2.dest = "/tmp/tempfile2.txt"
c1 = vdu.connection_points.add()
c1.name = "c_point1"
c1.virtual_link_id = virtual_link_id
pkg.extract_file(script_file, dest_path)
except package.ExtractError as e:
raise ScriptExtractionError("Failed to extract script %s" % script_name) from e
+
+ def read_script(self, pkg, filename):
+ script_files = PackageScriptExtractor.package_script_files(pkg)
+
+ for script_name, script_file in script_files.items():
+ if script_name == filename:
+ self._log.debug("Found %s script file in package at %s", filename, script_file)
+
+ try:
+ with pkg.open(script_file) as f:
+ userdata = f.read()
+ self._log.info("Custom script read from file %s", userdata)
+ # File contents are read in binary string, decode to regular string and return
+ return userdata.decode()
+ except package.ExtractError as e:
+ raise ScriptExtractionError("Failed to extract script %s" % script_name) from e
+
+ # If we've reached this point but not found a matching script,
+ # raise an Exception, since we got here only because there was supposed
+ # to be a script in the VDU
+ errmsg = "No script file found in the descriptor package"
+ self._log.error(errmsg)
+ raise ScriptExtractionError(errmsg)
+
import rift.tasklets
import rift.package.store
import rift.package.cloud_init
+import rift.package.script
import rift.mano.dts as mano_dts
@property
def msg(self):
- """ VDU message """
+ """ Process VDU message from resmgr"""
vdu_fields = ["vm_flavor",
"guest_epa",
"vswitch_epa",
vdurvol_data = [vduvol for vduvol in vdur_dict['volumes'] if vduvol['name'] == opvolume.name]
if len(vdurvol_data) == 1:
vdurvol_data[0]["volume_id"] = opvolume.volume_id
+ if opvolume.has_field('custom_meta_data'):
+ metadata_list = list()
+ for metadata_item in opvolume.custom_meta_data:
+ metadata_list.append(metadata_item.as_dict())
+ if 'guest_params' not in vdurvol_data[0]:
+ vdurvol_data[0]['guest_params'] = dict()
+ vdurvol_data[0]['guest_params']['custom_meta_data'] = metadata_list
+
+ if self._vm_resp.has_field('custom_boot_data'):
+ vdur_dict['custom_boot_data'] = dict()
+ if self._vm_resp.custom_boot_data.has_field('custom_drive'):
+ vdur_dict['custom_boot_data']['custom_drive'] = self._vm_resp.custom_boot_data.custom_drive
+ if self._vm_resp.custom_boot_data.has_field('custom_meta_data'):
+ metadata_list = list()
+ for metadata_item in self._vm_resp.custom_boot_data.custom_meta_data:
+ metadata_list.append(metadata_item.as_dict())
+ vdur_dict['custom_boot_data']['custom_meta_data'] = metadata_list
+ if self._vm_resp.custom_boot_data.has_field('custom_config_files'):
+ file_list = list()
+ for file_item in self._vm_resp.custom_boot_data.custom_config_files:
+ file_list.append(file_item.as_dict())
+ vdur_dict['custom_boot_data']['custom_config_files'] = file_list
icp_list = []
ii_list = []
self._log.info("Ignoring placement group with cloud construct for cloud-type: %s", cloud_type)
return
+ def process_custom_bootdata(self, vm_create_msg_dict):
+ """Process the custom boot data"""
+ if 'custom_config_files' not in vm_create_msg_dict['custom_boot_data']:
+ return
+
+ stored_package = self._vnfd_package_store.get_package(self._vnfr.vnfd_id)
+ script_extractor = rift.package.script.PackageScriptExtractor(self._log)
+ for custom_file_item in vm_create_msg_dict['custom_boot_data']['custom_config_files']:
+ if 'source' not in custom_file_item or 'dest' not in custom_file_item:
+ continue
+ source = custom_file_item['source']
+ # Find source file in scripts dir of VNFD
+ self._vnfd_package_store.refresh()
+ self._log.debug("Checking for source config file at %s", source)
+ try:
+ source_file_str = script_extractor.read_script(stored_package, source)
+ except rift.package.script.ScriptExtractionError as e:
+ raise VirtualDeploymentUnitRecordError(e)
+ # Update source file location with file contents
+ custom_file_item['source'] = source_file_str
+
+ return
+
def resmgr_msg(self, config=None):
vdu_fields = ["vm_flavor",
"guest_epa",
"vswitch_epa",
"hypervisor_epa",
- "host_epa"]
+ "host_epa",
+ "volumes",
+ "custom_boot_data"]
self._log.debug("Creating params based on VDUD: %s", self._vdud)
vdu_copy_dict = {k: v for k, v in self._vdud.as_dict().items() if k in vdu_fields}
vm_create_msg_dict.update(vdu_copy_dict)
self.process_placement_groups(vm_create_msg_dict)
+ if 'custom_boot_data' in vm_create_msg_dict:
+ self.process_custom_bootdata(vm_create_msg_dict)
msg = RwResourceMgrYang.VDUEventData()
msg.event_id = self._request_id
msg.cloud_account = self.cloud_account_name
msg.request_info.from_dict(vm_create_msg_dict)
- for volume in self._vdud.volumes:
- v = msg.request_info.volumes.add()
- v.from_dict(volume.as_dict())
return msg
@asyncio.coroutine