+ #cloud config
+ userdata=None
+ config_drive = None
+ if isinstance(cloud_config, dict):
+ if cloud_config.get("user-data"):
+ userdata=cloud_config["user-data"]
+ if cloud_config.get("boot-data-drive") != None:
+ config_drive = cloud_config["boot-data-drive"]
+ if cloud_config.get("config-files") or cloud_config.get("users") or cloud_config.get("key-pairs"):
+ if userdata:
+ raise vimconn.vimconnConflictException("Cloud-config cannot contain both 'userdata' and 'config-files'/'users'/'key-pairs'")
+ userdata_dict={}
+ #default user
+ if cloud_config.get("key-pairs"):
+ userdata_dict["ssh-authorized-keys"] = cloud_config["key-pairs"]
+ userdata_dict["users"] = [{"default": None, "ssh-authorized-keys": cloud_config["key-pairs"] }]
+ if cloud_config.get("users"):
+ if "users" not in userdata_dict:
+ userdata_dict["users"] = [ "default" ]
+ for user in cloud_config["users"]:
+ user_info = {
+ "name" : user["name"],
+ "sudo": "ALL = (ALL)NOPASSWD:ALL"
+ }
+ if "user-info" in user:
+ user_info["gecos"] = user["user-info"]
+ if user.get("key-pairs"):
+ user_info["ssh-authorized-keys"] = user["key-pairs"]
+ userdata_dict["users"].append(user_info)
+
+ if cloud_config.get("config-files"):
+ userdata_dict["write_files"] = []
+ for file in cloud_config["config-files"]:
+ file_info = {
+ "path" : file["dest"],
+ "content": file["content"]
+ }
+ if file.get("encoding"):
+ file_info["encoding"] = file["encoding"]
+ if file.get("permissions"):
+ file_info["permissions"] = file["permissions"]
+ if file.get("owner"):
+ file_info["owner"] = file["owner"]
+ userdata_dict["write_files"].append(file_info)
+ userdata = "#cloud-config\n"
+ userdata += yaml.safe_dump(userdata_dict, indent=4, default_flow_style=False)
+ self.logger.debug("userdata: %s", userdata)
+ elif isinstance(cloud_config, str):
+ userdata = cloud_config
+
+ #Create additional volumes in case these are present in disk_list
+ block_device_mapping = None
+ base_disk_index = ord('b')
+ if disk_list != None:
+ block_device_mapping = dict()
+ for disk in disk_list:
+ if 'image_id' in disk:
+ volume = self.cinder.volumes.create(size = disk['size'],name = name + '_vd' +
+ chr(base_disk_index), imageRef = disk['image_id'])
+ else:
+ volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
+ chr(base_disk_index))
+ block_device_mapping['_vd' + chr(base_disk_index)] = volume.id
+ base_disk_index += 1
+
+ #wait until volumes are with status available
+ keep_waiting = True
+ elapsed_time = 0
+ while keep_waiting and elapsed_time < volume_timeout:
+ keep_waiting = False
+ for volume_id in block_device_mapping.itervalues():
+ if self.cinder.volumes.get(volume_id).status != 'available':
+ keep_waiting = True
+ if keep_waiting:
+ time.sleep(1)
+ elapsed_time += 1
+
+ #if we exceeded the timeout rollback
+ if elapsed_time >= volume_timeout:
+ #delete the volumes we just created
+ for volume_id in block_device_mapping.itervalues():
+ self.cinder.volumes.delete(volume_id)
+
+ #delete ports we just created
+ for net_item in net_list_vim:
+ if 'port-id' in net_item:
+ self.neutron.delete_port(net_item['port-id'])
+
+ raise vimconn.vimconnException('Timeout creating volumes for instance ' + name,
+ http_code=vimconn.HTTP_Request_Timeout)
+