RIFT-15099 Model changes and implementation for passing meta-data and custom files to Openstack VMs

Change-Id: I47624b9643e66a193d0b3eca040a71eb154f11b4
Signed-off-by: chamarty <ravi.chamarty@riftio.com>
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py
index 943cdd5..ce27e6a 100644
--- a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py
@@ -26,12 +26,14 @@
 from neutronclient.neutron import client as ntclient
 from glanceclient.v2 import client as glclient
 from ceilometerclient import client as ceilo_client
+from cinderclient.v2 import client as cinder_client
 
 # Exceptions
 import novaclient.exceptions as NovaException
 import keystoneclient.exceptions as KeystoneExceptions
 import neutronclient.common.exceptions as NeutronException
 import glanceclient.exc as GlanceException
+import cinderclient.exceptions as CinderException
 
 logger = logging.getLogger('rwcal.openstack.drv')
 logger.setLevel(logging.DEBUG)
@@ -559,7 +561,7 @@
                                            kwargs['image_id'],
                                            kwargs['flavor_id'],
                                            meta                 = kwargs['metadata'],
-                                           files                = None,
+                                           files                = kwargs['files'],
                                            reservation_id       = None,
                                            min_count            = None,
                                            max_count            = None,
@@ -569,7 +571,7 @@
                                            block_device_mapping_v2 = kwargs['block_device_mapping_v2'],
                                            nics                 = nics,
                                            scheduler_hints      = kwargs['scheduler_hints'],
-                                           config_drive         = None)
+                                           config_drive         = kwargs['config_drive'])
         except Exception as e:
             logger.info("OpenstackDriver: Create Server operation failed. Exception: %s" %(str(e)))
             raise
@@ -1659,12 +1661,14 @@
             self.nova_drv      = NovaDriverV21(self.ks_drv)
             self.neutron_drv   = NeutronDriverV2(self.ks_drv)
             self.ceilo_drv     = CeilometerDriverV2(self.ks_drv)
+            self.cinder_drv     = CinderDriverV2(self.ks_drv)
         elif auth_url.find('/v2') != -1:
             self.ks_drv        = KeystoneDriverV2(username, password, auth_url, tenant_name, insecure)
             self.glance_drv    = GlanceDriverV2(self.ks_drv)
             self.nova_drv      = NovaDriverV2(self.ks_drv)
             self.neutron_drv   = NeutronDriverV2(self.ks_drv)
             self.ceilo_drv     = CeilometerDriverV2(self.ks_drv)
+            self.cinder_drv     = CinderDriverV2(self.ks_drv)
         else:
             logger.error("Could not identity the version information for openstack service endpoints. Auth_URL should contain \"/v2\" or \"/v3\" string in it")
             raise NotImplementedError("Auth URL is wrong or invalid. Only Keystone v2 & v3 supported")
@@ -2034,3 +2038,136 @@
 
     def ceilo_alarm_delete(self, alarm_id):
         self.ceilo_drv.client.alarms.delete(alarm_id)
+
+    def cinder_volume_list(self):
+        return self.cinder_drv.volume_list()
+  
+    def cinder_volume_get(self,vol_id):
+        return self.cinder_drv.volume_get(vol_id)
+  
+    def cinder_volume_set_metadata(self, volumeid, metadata):
+        return self.cinder_drv.volume_set_metadata(volumeid, metadata)
+  
+    def cinder_volume_delete_metadata(self, volumeid, metadata):
+        return self.cinder_drv.volume_delete_metadata(volumeid, metadata)
+          
+              
+          
+class CinderDriver(object):
+      """
+      Driver for openstack cinder-client
+      """
+      def __init__(self, ks_drv, service_name, version):
+          """
+          Constructor for CinderDriver
+          Arguments: KeystoneDriver class object
+          """
+          self.ks_drv = ks_drv
+          self._service_name = service_name
+          self._version = version
+  
+      def _get_cinder_credentials(self):
+          """
+          Returns a dictionary of kwargs required to instantiate python-cinderclient class
+  
+          Arguments: None
+  
+          Returns:
+             A dictionary object of arguments
+          """
+          creds             = {}
+          creds['version']  = self._version 
+          creds['username']   = self.ks_drv.get_username() 
+          creds['api_key']   = self.ks_drv.get_password() 
+          creds['auth_url'] = self.ks_drv.get_service_endpoint("identity", "publicURL") 
+          creds['project_id'] = self.ks_drv.get_tenant_name() 
+          creds['insecure']   = self.ks_drv.get_security_mode()
+  
+          return creds
+
+      def _get_cinder_connection(self):
+          """
+          Returns a object of class python-cinderclient
+          """
+          if not hasattr(self, '_cinder_connection'):
+              self._cinder_connection = cinder_client.Client(**self._get_cinder_credentials())
+          else:
+              # Reinitialize if auth_token is no longer valid
+              if not self.ks_drv.is_auth_token_valid():
+                  self._cinder_connection = cinder_client.Client(**self._get_cinder_credentials())
+          return self._cinder_connection
+  
+      def volume_list(self):
+          """
+          Returns list of dictionaries. Each dictionary contains attributes associated with
+          volumes
+  
+          Arguments: None
+  
+          Returns: List of dictionaries.
+          """
+          cinderconn = self._get_cinder_connection()
+          volumes = []
+          try:
+              volume_info = cinderconn.volumes.list()
+          except Exception as e:
+              logger.error("OpenstackDriver: List volumes operation failed. Exception: %s" %(str(e)))
+              raise
+          volumes = [ volume for volume in volume_info ]
+          return volumes
+  
+      def volume_get(self, volume_id):
+          """
+          Get details volume
+  
+          Arguments: None
+  
+          Returns: List of dictionaries.
+          """
+          cinderconn = self._get_cinder_connection()
+          try:
+              vol = cinderconn.volumes.get(volume_id)
+          except Exception as e:
+              logger.error("OpenstackDriver: Get volume operation failed. Exception: %s" %(str(e)))
+              raise
+          return vol
+
+      def volume_set_metadata(self, volume_id, metadata):
+          """
+          Set metadata for volume
+          Metadata is a dictionary of key-value pairs
+  
+          Arguments: None
+  
+          Returns: List of dictionaries.
+          """
+          cinderconn = self._get_cinder_connection()
+          try:
+              cinderconn.volumes.set_metadata(volume_id, metadata)
+          except Exception as e:
+              logger.error("OpenstackDriver: Set metadata operation failed. Exception: %s" %(str(e)))
+              raise
+  
+      def volume_delete_metadata(self, volume_id, metadata):
+          """
+          Delete metadata for volume
+          Metadata is a dictionary of key-value pairs
+  
+          Arguments: None
+  
+          Returns: List of dictionaries.
+          """
+          cinderconn = self._get_cinder_connection()
+          try:
+              cinderconn.volumes.delete_metadata(volume_id, metadata)
+          except Exception as e:
+              logger.error("OpenstackDriver: Delete metadata operation failed. Exception: %s" %(str(e)))
+              raise
+  
+class CinderDriverV2(CinderDriver):
+      """
+      Driver for openstack cinder-client V2
+      """
+      def __init__(self, ks_drv):
+          super(CinderDriverV2, self).__init__(ks_drv, 'volumev2', 2)
+  
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py
index 0d658f1..78ec3a6 100644
--- a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py
@@ -21,6 +21,7 @@
 import argparse
 import sys, os, time
 import rwlogger
+import yaml
 
 logging.basicConfig(level=logging.DEBUG)
 logger = logging.getLogger()
@@ -90,14 +91,53 @@
         
     nvconn = drv.nova_drv._get_nova_connection()
     nvconn.servers.set_meta(argument.server_id, meta_data)
+
+def get_volume_id(server_vol_list, name):
+    if server_vol_list is None:
+        return
+
+    for os_volume in server_vol_list:
+        try:
+            " Device name is of format /dev/vda"
+            vol_name = (os_volume['device']).split('/')[2]
+        except:                   
+            continue
+        if name == vol_name:
+           return os_volume['volumeId']
     
+def create_volume_metadata(drv, argument):
+    if argument.vol_metadata is None:
+        return
+
+    yaml_vol_str = argument.vol_metadata.read()
+    yaml_vol_cfg = yaml.load(yaml_vol_str)
+
+    srv_volume_list = drv.nova_volume_list(argument.server_id)
+    for volume in yaml_vol_cfg:
+        if 'guest_params' not in volume:
+            continue
+        if 'custom_meta_data' not in volume['guest_params']:
+            continue
+        vmd = dict()
+        for vol_md_item in volume['guest_params']['custom_meta_data']:
+            if 'value' not in vol_md_item:
+               continue
+            vmd[vol_md_item['name']] = vol_md_item['value']
+
+        # Get volume id
+        vol_id = get_volume_id(srv_volume_list, volume['name'])
+        if vol_id is None:
+            logger.error("Server %s Could not find volume %s" %(argument.server_id, volume['name']))
+            sys.exit(3)
+        drv.cinder_volume_set_metadata(vol_id, vmd)
+
         
 def prepare_vm_after_boot(drv,argument):
     ### Important to call create_port_metadata before assign_floating_ip_address
     ### since assign_floating_ip_address can wait thus delaying port_metadata creation
 
     ### Wait for a max of 5 minute for server to come up -- Needs fine tuning
-    wait_time = 300
+    wait_time = 500
     sleep_time = 2
     for i in range(int(wait_time/sleep_time)):
         server = drv.nova_server_get(argument.server_id)
@@ -115,6 +155,7 @@
         sys.exit(4)
     
     #create_port_metadata(drv, argument)
+    create_volume_metadata(drv, argument)
     assign_floating_ip_address(drv, argument)
     
 
@@ -171,6 +212,8 @@
                         default = False,
                         help = "Create Port Metadata")
 
+    parser.add_argument("--vol_metadata", type=argparse.FileType('r'))
+
     argument = parser.parse_args()
 
     if not argument.auth_url:
diff --git a/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py b/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py
index 936db92..df477e6 100644
--- a/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py
+++ b/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py
@@ -20,6 +20,8 @@
 import os
 import subprocess
 import uuid
+import tempfile
+import yaml
 
 import rift.rwcal.openstack as openstack_drv
 import rw_status
@@ -27,6 +29,10 @@
 import rwlogger
 import neutronclient.common.exceptions as NeutronException
 import keystoneclient.exceptions as KeystoneExceptions
+import tornado
+import gi
+
+gi.require_version('RwSdn', '1.0')
 
 from gi.repository import (
     GObject,
@@ -362,6 +368,7 @@
             image = drv.glance_image_get(image_id)
         return RwcalOpenstackPlugin._fill_image_info(image)
 
+    # This is being deprecated. Please do not use for new SW development
     @rwstatus(ret_on_failure=[""])
     def do_create_vm(self, account, vminfo):
         """Create a new virtual machine.
@@ -1230,7 +1237,7 @@
         return link
 
     @staticmethod
-    def _fill_vdu_info(vm_info, flavor_info, mgmt_network, port_list, server_group, volume_list = None):
+    def _fill_vdu_info(drv, vm_info, flavor_info, mgmt_network, port_list, server_group, volume_list = None):
         """Create a GI object for VDUInfoParams
 
         Converts VM information dictionary object returned by openstack
@@ -1261,6 +1268,14 @@
         for key, value in vm_info['metadata'].items():
             if key == 'node_id':
                 vdu.node_id = value
+            else:
+                custommetadata = vdu.custom_boot_data.custom_meta_data.add()
+                custommetadata.name = key
+                custommetadata.value = str(value)
+
+        # Look for config_drive
+        if ('config_drive' in vm_info):
+            vdu.custom_boot_data.custom_drive = vm_info['config_drive']
         if ('image' in vm_info) and ('id' in vm_info['image']):
             vdu.image_id = vm_info['image']['id']
         if ('flavor' in vm_info) and ('id' in vm_info['flavor']):
@@ -1299,6 +1314,16 @@
                    continue
                 volr.name = vol_name
                 volr.volume_id = os_volume['volumeId']
+                try:
+                   vol_details = drv.cinder_volume_get(volr.volume_id)
+                except:
+                   continue
+                if vol_details is None:
+                   continue
+                for key, value in vol_details.metadata.items():
+                      volmd = volr.custom_meta_data.add()
+                      volmd.name = key
+                      volmd.value = value
 
         return vdu
 
@@ -1911,12 +1936,33 @@
         kwargs['network_list'] = network_list
 
         metadata = {}
+        files = {}
+        config_drive = False
         # Add all metadata related fields
         if vduinfo.has_field('node_id'):
             metadata['node_id'] = vduinfo.node_id
         if pci_assignement is not None:
             metadata['pci_assignement'] = pci_assignement
+        if vduinfo.has_field('custom_boot_data'):
+            if vduinfo.custom_boot_data.has_field('custom_meta_data'):
+                for custom_meta_item in vduinfo.custom_boot_data.custom_meta_data:
+                    if custom_meta_item.data_type == "STRING":
+                       metadata[custom_meta_item.name] = custom_meta_item.value
+                    elif custom_meta_item.data_type == "JSON":
+                       metadata[custom_meta_item.name] = tornado.escape.json_decode(custom_meta_item.value)
+                    else:
+                       raise OpenstackCALOperationFailure("Create-vdu operation failed. Unsupported data-type {} for custom-meta-data name {} ".format(custom_meta_item.data_type, custom_meta_item.name))
+            if vduinfo.custom_boot_data.has_field('custom_config_files'):
+                for custom_config_file in vduinfo.custom_boot_data.custom_config_files:
+                    files[custom_config_file.dest] = custom_config_file.source
+
+            if vduinfo.custom_boot_data.has_field('custom_drive'):
+                if vduinfo.custom_boot_data.custom_drive is True:
+                     config_drive = True
+                     
         kwargs['metadata'] = metadata
+        kwargs['files'] = files
+        kwargs['config_drive'] = config_drive
 
         if vduinfo.has_field('availability_zone') and vduinfo.availability_zone.has_field('name'):
             kwargs['availability_zone']  = vduinfo.availability_zone
@@ -1929,6 +1975,7 @@
             kwargs['scheduler_hints'] = None
 
         kwargs['block_device_mapping_v2'] = None
+        vol_metadata = False
         if vduinfo.has_field('volumes') :
             kwargs['block_device_mapping_v2'] = []
             with self._use_driver(account) as drv:
@@ -1965,7 +2012,7 @@
         with self._use_driver(account) as drv:
             vm_id = drv.nova_server_create(**kwargs)
             if floating_ip:
-                self.prepare_vdu_on_boot(account, vm_id, floating_ip)
+                self.prepare_vdu_on_boot(account, vm_id, floating_ip, vduinfo.volumes)
 
         return vm_id
 
@@ -2134,7 +2181,6 @@
                 vm.user_tags.pci_assignement = pci_assignement
 
             vm_id = self._create_vm(account, vdu_init, pci_assignement=pci_assignement, server_group=server_group, port_list=port_list, network_list=vm_network_list, imageinfo_list = imageinfo_list)
-            self.prepare_vdu_on_boot(account, vm_id, floating_ip)
             return vm_id
 
     def prepare_vpci_metadata(self, drv, vdu_init):
@@ -2180,7 +2226,7 @@
 
 
 
-    def prepare_vdu_on_boot(self, account, server_id, floating_ip):
+    def prepare_vdu_on_boot(self, account, server_id, floating_ip,  volumes=None):
         cmd = PREPARE_VM_CMD.format(auth_url     = account.openstack.auth_url,
                                     username     = account.openstack.key,
                                     password     = account.openstack.secret,
@@ -2191,6 +2237,24 @@
         if floating_ip is not None:
             cmd += (" --floating_ip "+ floating_ip.ip)
 
+        vol_metadata = False
+        if volumes is not None:
+            for volume in volumes:
+                if volume.guest_params.has_field('custom_meta_data'):
+                    vol_metadata = True
+                    break
+        
+        if vol_metadata is True:       
+            tmp_file = None
+            with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_file:
+                 vol_list = list()
+                 for volume in volumes:
+                    vol_dict = volume.as_dict()
+                    vol_list.append(vol_dict)
+
+                 yaml.dump(vol_list, tmp_file)
+            cmd += (" --vol_metadata {}").format(tmp_file.name)
+
         exec_path = 'python3 ' + os.path.dirname(openstack_drv.__file__)
         exec_cmd = exec_path+'/'+cmd
         self.log.info("Running command: %s" %(exec_cmd))
@@ -2286,7 +2350,7 @@
             openstack_group_list = drv.nova_server_group_list()
             server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
             openstack_srv_volume_list = drv.nova_volume_list(vm['id'])
-            vdu_info = RwcalOpenstackPlugin._fill_vdu_info(vm,
+            vdu_info = RwcalOpenstackPlugin._fill_vdu_info(drv, vm,
                                                            flavor_info,
                                                            account.openstack.mgmt_network,
                                                            port_list,
@@ -2335,7 +2399,7 @@
                 server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
 
                 openstack_srv_volume_list = drv.nova_volume_list(vm['id'])
-                vdu = RwcalOpenstackPlugin._fill_vdu_info(vm,
+                vdu = RwcalOpenstackPlugin._fill_vdu_info(drv, vm,
                                                           flavor_info,
                                                           account.openstack.mgmt_network,
                                                           port_list,
diff --git a/rwcal/plugins/yang/rwcal.yang b/rwcal/plugins/yang/rwcal.yang
index 2e768f9..5e85468 100644
--- a/rwcal/plugins/yang/rwcal.yang
+++ b/rwcal/plugins/yang/rwcal.yang
@@ -999,6 +999,8 @@
       }
     }
  
+    uses manotypes:custom-boot-data;
+
     list volumes {
       key "name";
 
@@ -1231,6 +1233,8 @@
       description "Console URL from the VIM, if available";
     }
 
+    uses manotypes:custom-boot-data;
+
     list volumes {
       key "name";
 
@@ -1245,7 +1249,7 @@
         rwpb:field-string-max 64;
         type string;
       }
-
+      uses manotypes:custom-meta-data;
     } 
   }
 
diff --git a/rwcal/test/test_rwcal_openstack.py b/rwcal/test/test_rwcal_openstack.py
index 960beb9..e15718c 100644
--- a/rwcal/test/test_rwcal_openstack.py
+++ b/rwcal/test/test_rwcal_openstack.py
@@ -33,6 +33,13 @@
 
 logger = logging.getLogger('rwcal-openstack')
 
+PING_USERDATA = '''
+#cloud-config
+password: fedora
+chpasswd: { expire: False }
+ssh_pwauth: True
+'''
+
 #
 # Important information about openstack installation. This needs to be manually verified
 #
@@ -831,8 +838,23 @@
         vdu.node_id = OpenStackTest.NodeID
         vdu.image_id = self._image.id
         vdu.flavor_id = self._flavor.id
-        vdu.vdu_init.userdata = ''
+        vdu.vdu_init.userdata = PING_USERDATA
         vdu.allocate_public_address = True
+        meta1 = vdu.custom_boot_data.custom_meta_data.add()
+        meta1.name = "EMS_IP"
+        meta1.data_type = "STRING"
+        meta1.value = "10.5.6.6"
+        #meta2 = vdu.custom_boot_data.custom_meta_data.add()
+        #meta2.name = "Cluster_data"
+        #meta2.data_type = "JSON"
+        #meta2.value = '''{ "cluster_id": "12" , "vnfc_id": "112" }'''
+        #vdu.custom_boot_data.custom_drive = True
+        customfile1 = vdu.custom_boot_data.custom_config_files.add()
+        customfile1.source = "abcdef124"
+        customfile1.dest = "/tmp/tempfile.txt"
+        customfile2 = vdu.custom_boot_data.custom_config_files.add()
+        customfile2.source = "123456"
+        customfile2.dest = "/tmp/tempfile2.txt"
         c1 = vdu.connection_points.add()
         c1.name = "c_point1"
         c1.virtual_link_id = virtual_link_id